file_path
stringlengths
7
180
content
stringlengths
0
811k
repo
stringclasses
11 values
src/arith/ir_mutator_with_analyzer.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/arithmetic/ir_mutator_with_analyzer.h * \brief IR mutator base-class with an analyzer context. */ #ifndef TVM_ARITH_IR_MUTATOR_WITH_ANALYZER_H_ #define TVM_ARITH_IR_MUTATOR_WITH_ANALYZER_H_ #include <tvm/arith/analyzer.h> #include <tvm/tir/stmt_functor.h> #include <utility> namespace tvm { namespace arith { /*! * \brief IRMutator with an analyzer context. * * This class can sub-classed by ir mutators that need an analyzer. * It will populates scope-related info such as bounds of loop-variables and constraints * for the analyzer, so that the child class can do accurate context-dependent analysis. * * \sa src/arithmetic/ir_mutator_with_analyzer.cc */ class IRMutatorWithAnalyzer : public tir::StmtExprMutator { public: explicit IRMutatorWithAnalyzer(Analyzer* analyzer) : analyzer_(analyzer) {} using StmtExprMutator::VisitExpr_; using StmtExprMutator::VisitStmt_; // override functions that need to populate the context information. tir::Stmt VisitStmt_(const tir::ForNode* op) override; tir::Stmt VisitStmt_(const tir::BlockNode* op) override; tir::Stmt VisitStmt_(const tir::LetStmtNode* op) override; tir::Stmt VisitStmt_(const tir::IfThenElseNode* op) override; tir::Stmt VisitStmt_(const tir::AttrStmtNode* op) override; tir::Stmt VisitStmt_(const tir::AssertStmtNode* op) override; PrimExpr VisitExpr_(const tir::LetNode* op) override; PrimExpr VisitExpr_(const tir::SelectNode* op) override; PrimExpr VisitExpr_(const tir::CallNode* op) override; PrimExpr VisitExpr_(const tir::ReduceNode* op) override; protected: /*! \brief internal analyzer field. */ Analyzer* analyzer_; }; } // namespace arith } // namespace tvm #endif // TVM_ARITH_IR_MUTATOR_WITH_ANALYZER_H_
https://github.com/zk-ml/tachikoma
src/arith/ir_visitor_with_analyzer.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/arithmetic/ir_visitor_with_analyzer.h * \brief IR visitor class with an analyzer context. */ #ifndef TVM_ARITH_IR_VISITOR_WITH_ANALYZER_H_ #define TVM_ARITH_IR_VISITOR_WITH_ANALYZER_H_ #include <tvm/arith/analyzer.h> #include <tvm/tir/expr.h> #include <tvm/tir/stmt_functor.h> namespace tvm { namespace arith { class IRVisitorWithAnalyzer : public tir::StmtExprVisitor { public: PrimExpr Simplify(const PrimExpr& expr) { return analyzer_.Simplify(expr); } using StmtExprVisitor::VisitExpr_; using StmtExprVisitor::VisitStmt_; void VisitStmt_(const tir::ForNode* op); void VisitStmt_(const tir::BlockNode* op); void VisitStmt_(const tir::LetStmtNode* op); void VisitStmt_(const tir::IfThenElseNode* op); void VisitStmt_(const tir::AttrStmtNode* op); void VisitStmt_(const tir::AssertStmtNode* op); void VisitExpr_(const tir::CallNode* op); void VisitExpr_(const tir::LetNode* op); void VisitExpr_(const tir::ReduceNode* op); // IRVisitorWithAnalyzer deliberately does not handle Select nodes, // because both sides of a Select node are visited regardless of the // condition. protected: /*! \brief internal analyzer field. */ arith::Analyzer analyzer_; private: PrimExpr ExtractRealCondition(PrimExpr condition) const; }; } // namespace arith } // namespace tvm #endif // TVM_ARITH_IR_VISITOR_WITH_ANALYZER_H_
https://github.com/zk-ml/tachikoma
src/arith/narrow_predicate_expression.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file narrow_predicate_expression.h * \brief Utility for extracting and interacting with buffer touch points */ #include <tvm/ir/expr.h> #include <tvm/tir/var.h> #ifndef TVM_ARITH_NARROW_PREDICATE_EXPRESSION_H_ #define TVM_ARITH_NARROW_PREDICATE_EXPRESSION_H_ namespace tvm { namespace arith { /* \brief Narrow a true expression to remove free parameters * * This function provides two guarantees: * * 1. If the resulting expression evaluates to True, then the original * expression also evaluates to True. * * 2. The resulting expression does not contain any of the free * parameters. * * 3. The resulting expression does not contain any BufferLoad * * \param expr The expression to be examined. * * \param ranges The variables to be removed from the expression * * \returns An expression that, if true, implies that the original * expression is also true. */ PrimExpr NarrowPredicateExpression(PrimExpr expr, Map<tir::Var, Range> free_parameters); } // namespace arith } // namespace tvm #endif // TVM_ARITH_NARROW_PREDICATE_EXPRESSION_H_
https://github.com/zk-ml/tachikoma
src/arith/pattern_match.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/arithmetic/pattern_match.h * * \brief Internal tool for expression-template based pattern matching. * * It helps to simplify pattern matching and rewrites. * All the patterns are generated via expression template during compile time, * so the result code should be as efficient as manually written pattern match code. * * The code below shows how to use the pattern matcher. * * \code * * // max(x + z, y + z) => max(x, y) + z * arith::PVar<Expr> x, y, z; * * // The following code tries to match the declared pattern. * // Match will fill the result of match into PVar if successful. * // Note that z occurs twice in the pattern, * // an equality check is performed to ensure each occurance of z * // is equivalent to each other. * if (max(x + z, y + z).Match(expr)) { * // Eval evaluates a pattern with the current matched value. * // The filled value is valid until the next call to Match. * return (max(x, y) + z).Eval(); * } * * tvm::tir::Var tx, ty; * arith::PVar<IntImm> c; * arith::PVar<Var> v; * // We can match integer and Var, both of which are * // special case container of Expr * ICHECK((v * c).Match(tx * 3)); * ICHECK_EQ(c.Eval()->value, 3); * // cannot match c to ty * ICHECK(!(v * c).Match(tx * ty)); * * \endcode * * \note The pattern matcher is not threadsafe, * do not use the same PVar in multiple threads. * * Please be aware that the filled value in a PVar * can be overriden in the next call to Match. */ #ifndef TVM_ARITH_PATTERN_MATCH_H_ #define TVM_ARITH_PATTERN_MATCH_H_ #include <tvm/tir/analysis.h> #include <tvm/tir/builtin.h> #include <tvm/tir/expr.h> #include <cmath> #include <tuple> #include "const_fold.h" namespace tvm { namespace arith { /*! * \brief Base class of all the patterns. * * There are two major member functions supported by each pattern. * - Match: checks if value matches the pattern. * - Eval: construct a new value based on matched values in PVar. * * We use curiously recurring template pattern to construct * expression templates. * * \tparam Derived The type of the derived class. */ template <typename Derived> class Pattern { public: /*! * \brief Nested storage type in the expression. * * Depending on the Derived class, * Nested can be Derived (nest by value) or * const Derived& (nest by reference). * * The trick of Nested typedef originates from Eigen. * * \note We use nest by value for intermediate expressions, * and nest by reference for PVars. */ using Nested = Derived; /*! * \brief Check if value matches the current pattern. * * This call also populates the PVars with matched value. * The values in PVars are valid until the next call to Match. * * \return whether value matches the pattern. */ template <typename NodeType> bool Match(const NodeType& value) const { derived().InitMatch_(); return derived().Match_(value); } /*! \return Derived instance of current class. */ const Derived& derived() const { return *static_cast<const Derived*>(this); } }; /*! * \brief Default deep equality checker * \tparam T the comparison point. */ template <typename T> class PEqualChecker { public: bool operator()(const T& lhs, const T& rhs) const { return lhs == rhs; } }; template <> class PEqualChecker<PrimExpr> { public: bool operator()(const PrimExpr& lhs, const PrimExpr& rhs) const { if (lhs.same_as(rhs)) return true; return tir::ExprDeepEqual()(lhs, rhs); } }; template <> class PEqualChecker<IntImm> { public: bool operator()(const IntImm& lhs, const IntImm& rhs) const { return lhs->value == rhs->value; } }; template <> class PEqualChecker<FloatImm> { public: bool operator()(const FloatImm& lhs, const FloatImm& rhs) const { return std::fabs(lhs->value - rhs->value) < 1e-20; } }; template <> class PEqualChecker<tir::Var> { public: bool operator()(const tir::Var& lhs, const tir::Var& rhs) const { return lhs.same_as(rhs); } }; /*! * \brief Pattern variable container. * * PVar is used as a "hole" in the pattern that can be matched. * * \tparam T the type of the hole. * * \note PVar is not thread safe. * Do not use the same PVar in multiple threads. */ template <typename T> class PVar : public Pattern<PVar<T>> { public: // Store PVars by reference in the expression. using Nested = const PVar<T>&; void InitMatch_() const { filled_ = false; } bool Match_(const T& value) const { if (!filled_) { value_ = value; filled_ = true; return true; } else { return PEqualChecker<T>()(value_, value); } } template <typename NodeRefType, typename = typename std::enable_if<std::is_base_of<NodeRefType, T>::value>::type> bool Match_(const NodeRefType& value) const { if (const auto* ptr = value.template as<typename T::ContainerType>()) { return Match_(GetRef<T>(ptr)); } else { return false; } } T Eval() const { ICHECK(filled_); return value_; } T EvalOr(const T& default_value) const { return filled_ ? value_ : default_value; } protected: /*! \brief The matched value */ mutable T value_; /*! \brief whether the variable has been filled */ mutable bool filled_{false}; }; /*! * \brief Wrapper for pattern variable container with extra match logic. * * \tparam Derived the type of derived class. * \tparam T the type of the hole. */ template <typename Derived, typename T> class PVarWithCheck : public arith::Pattern<PVarWithCheck<Derived, T>> { public: // Store by reference in the expression. using Nested = const PVarWithCheck<Derived, T>&; void InitMatch_() const { pvar_.InitMatch_(); } bool Match_(const T& value) const { if (!static_cast<const Derived*>(this)->Match_(value)) return false; return pvar_.Match_(value); } template <typename NodeRefType, typename = typename std::enable_if<std::is_base_of<NodeRefType, T>::value>::type> bool Match_(const NodeRefType& value) const { if (const auto* ptr = value.template as<typename T::ContainerType>()) { return Match_(GetRef<T>(ptr)); } else { return false; } } T Eval() const { return pvar_.Eval(); } protected: arith::PVar<T> pvar_; }; /*! * \brief Pattern variable container with expr type check. * * \tparam T the type of the hole. * \tparam DType the Pattern type of dtype. */ template <typename T, typename DType, typename = std::enable_if<std::is_base_of<T, PrimExpr>::value>> class PVarWithDataType : public PVarWithCheck<PVarWithDataType<T, DType>, T> { public: explicit PVarWithDataType(const DType& dtype) : dtype_(dtype) {} bool Match_(const T& value) const { return dtype_.Match_(value->dtype); } protected: typename DType::Nested dtype_; }; /*! * \brief Pattern variable container for data type with lanes. */ class PVecDataType : public PVarWithCheck<PVecDataType, DataType> { public: /*! \brief construct vector dtype placeholder with element type check */ explicit PVecDataType(const DataType& elem_dtype) : elem_dtype_(elem_dtype) {} bool Match_(const DataType& dtype) const { return dtype.code() == elem_dtype_.code(); } protected: DataType elem_dtype_; }; /*! * \brief Constant Pattern variable container. * * \tparam T the type of the hole. */ template <typename T> class PConst : public Pattern<PConst<T>> { public: PConst(T value) // NOLINT(*) : value_(value) {} void InitMatch_() const {} bool Match_(const T& value) const { return PEqualChecker<T>()(value_, value); } T Eval() const { return value_; } private: const T value_; }; /*! * \brief Pattern binary expression. * \tparam OpType The AST noderef type. * \tparam TA The pattern type of the first operand. * \tparam TB The pattern type of the second operand. */ template <typename OpType, typename TA, typename TB> class PBinaryExpr : public Pattern<PBinaryExpr<OpType, TA, TB>> { public: PBinaryExpr(const TA& a, const TB& b) : a_(a), b_(b) {} void InitMatch_() const { a_.InitMatch_(); b_.InitMatch_(); } bool Match_(const ObjectRef& node) const { using NodeType = typename OpType::ContainerType; if (const NodeType* ptr = node.as<NodeType>()) { if (!a_.Match_(ptr->a)) return false; if (!b_.Match_(ptr->b)) return false; return true; } else { return false; } } PrimExpr Eval() const { PrimExpr lhs = a_.Eval(); PrimExpr rhs = b_.Eval(); if (auto ret = TryConstFold<OpType>(lhs, rhs)) return ret.value(); return OpType(lhs, rhs); } private: typename TA::Nested a_; typename TB::Nested b_; }; template <typename TA> class PConstWithTypeLike : public Pattern<PConstWithTypeLike<TA>> { public: PConstWithTypeLike(const TA& ref, int64_t value) : ref_(ref), value_(value) {} void InitMatch_() const {} bool Match_(const ObjectRef& node) const { if (const tir::IntImmNode* ptr = node.as<tir::IntImmNode>()) { return ptr->value == value_; } else { return false; } } PrimExpr Eval() const { return tir::make_const(ref_.Eval().dtype(), value_); } private: typename TA::Nested ref_; int64_t value_; }; #define TVM_PATTERN_BINARY_OP_EX(FuncName, NodeName, CheckStep) \ template <typename TA, typename TB> \ inline PBinaryExpr<NodeName, TA, TB> FuncName(const Pattern<TA>& a, const Pattern<TB>& b) { \ CheckStep; \ return PBinaryExpr<NodeName, TA, TB>(a.derived(), b.derived()); \ } \ template <typename TA> \ inline PBinaryExpr<NodeName, TA, PConstWithTypeLike<TA>> FuncName(const Pattern<TA>& a, \ int64_t b) { \ CheckStep; \ return FuncName(a, PConstWithTypeLike<TA>(a.derived(), b)); \ } \ template <typename TA> \ inline PBinaryExpr<NodeName, PConstWithTypeLike<TA>, TA> FuncName(int64_t b, \ const Pattern<TA>& a) { \ CheckStep; \ return FuncName(PConstWithTypeLike<TA>(a.derived(), b), a); \ } #define TVM_PATTERN_BINARY_OP(FuncName, NodeName) TVM_PATTERN_BINARY_OP_EX(FuncName, NodeName, ) // raise ambiguity error for operator overload of / and % TVM_PATTERN_BINARY_OP_EX(operator/, tir::Div, DivAmbiguityError(a)); TVM_PATTERN_BINARY_OP_EX(operator%, tir::Mod, DivAmbiguityError(a)); // arithmetic expressions TVM_PATTERN_BINARY_OP(operator+, tir::Add); TVM_PATTERN_BINARY_OP(operator-, tir::Sub); TVM_PATTERN_BINARY_OP(operator*, tir::Mul); TVM_PATTERN_BINARY_OP(min, tir::Min); TVM_PATTERN_BINARY_OP(max, tir::Max); TVM_PATTERN_BINARY_OP(div, tir::Div); TVM_PATTERN_BINARY_OP(truncdiv, tir::Div); TVM_PATTERN_BINARY_OP(truncmod, tir::Mod); TVM_PATTERN_BINARY_OP(floordiv, tir::FloorDiv); TVM_PATTERN_BINARY_OP(floormod, tir::FloorMod); // logical expressions TVM_PATTERN_BINARY_OP(operator>, tir::GT); TVM_PATTERN_BINARY_OP(operator>=, tir::GE); TVM_PATTERN_BINARY_OP(operator<, tir::LT); TVM_PATTERN_BINARY_OP(operator<=, tir::LE); TVM_PATTERN_BINARY_OP(operator==, tir::EQ); TVM_PATTERN_BINARY_OP(operator!=, tir::NE); TVM_PATTERN_BINARY_OP(operator&&, tir::And); TVM_PATTERN_BINARY_OP(operator||, tir::Or); /*! * \brief Pattern not expression. * \tparam TA The pattern type of the true operand. */ template <typename TA> class PNotExpr : public Pattern<PNotExpr<TA>> { public: explicit PNotExpr(const TA& value) : value_(value) {} void InitMatch_() const { value_.InitMatch_(); } bool Match_(const ObjectRef& node) const { if (const tir::NotNode* ptr = node.as<tir::NotNode>()) { if (!value_.Match_(ptr->a)) return false; return true; } else { return false; } } PrimExpr Eval() const { return tir::Not(value_.Eval()); } private: typename TA::Nested value_; }; template <typename TA> inline PNotExpr<TA> operator!(const Pattern<TA>& value) { return PNotExpr<TA>(value.derived()); } // select /*! * \brief Pattern select expression. * \tparam TCond The pattern type of the condition. * \tparam TA The pattern type of the true operand. * \tparam TB The pattern type of the false operand. */ template <typename TCond, typename TA, typename TB> class PSelectExpr : public Pattern<PSelectExpr<TCond, TA, TB>> { public: PSelectExpr(const TCond& condition, const TA& true_value, const TB& false_value) : condition_(condition), true_value_(true_value), false_value_(false_value) {} void InitMatch_() const { condition_.InitMatch_(); true_value_.InitMatch_(); false_value_.InitMatch_(); } bool Match_(const ObjectRef& node) const { if (const tir::SelectNode* ptr = node.as<tir::SelectNode>()) { if (!condition_.Match_(ptr->condition)) return false; if (!true_value_.Match_(ptr->true_value)) return false; if (!false_value_.Match_(ptr->false_value)) return false; return true; } else { return false; } } PrimExpr Eval() const { return tir::Select(condition_.Eval(), true_value_.Eval(), false_value_.Eval()); } private: typename TCond::Nested condition_; typename TA::Nested true_value_; typename TB::Nested false_value_; }; /*! * \brief Construct a select pattern. * * \param condition The condition expression. * \param true_value The value when condition is true. * \param true_value The value when condition is false. * * \return The result pattern. * * \tparam TCond The pattern type of the condition. * \tparam TA The pattern type of the true operand. * \tparam TB The pattern type of the false operand. */ template <typename TCond, typename TA, typename TB> inline PSelectExpr<TCond, TA, TB> select(const Pattern<TCond>& condition, const Pattern<TA>& true_value, const Pattern<TB>& false_value) { return PSelectExpr<TCond, TA, TB>(condition.derived(), true_value.derived(), false_value.derived()); } /*! * \brief Pattern cast expression. * \tparam DType The Pattern type of dtype. * \tparam TA The pattern type of the first operand. */ template <typename DType, typename TA> class PCastExpr : public Pattern<PCastExpr<DType, TA>> { public: PCastExpr(const DType& dtype, const TA& value) : dtype_(dtype), value_(value) {} void InitMatch_() const { dtype_.InitMatch_(); value_.InitMatch_(); } bool Match_(const ObjectRef& node) const { if (const tir::CastNode* ptr = node.as<tir::CastNode>()) { if (!dtype_.Match_(ptr->dtype)) return false; if (!value_.Match_(ptr->value)) return false; return true; } else { return false; } } PrimExpr Eval() const { return tir::Cast(dtype_.Eval(), value_.Eval()); } private: typename DType::Nested dtype_; typename TA::Nested value_; }; /*! * \brief Construct a cast pattern. * * \param dtype The target data type, can be PVar<DataType> or PConst<DataType>. * \param value The input type. * * \return The result pattern. * * \tparam DType The pattern type of type. * \tparam TA The pattern type of value. */ template <typename DType, typename TA> inline PCastExpr<DType, TA> cast(const Pattern<DType>& dtype, const Pattern<TA>& value) { return PCastExpr<DType, TA>(dtype.derived(), value.derived()); } /*! * \brief Pattern ramp expression. * \tparam TBase The pattern type of the base. * \tparam TStride The pattern type of the stride. * \tparam TLanes The pattern type of the lanes. */ template <typename TBase, typename TStride, typename TLanes> class PRampExpr : public Pattern<PRampExpr<TBase, TStride, TLanes>> { public: PRampExpr(const TBase& base, const TStride& stride, const TLanes& lanes) : base_(base), stride_(stride), lanes_(lanes) {} void InitMatch_() const { base_.InitMatch_(); stride_.InitMatch_(); lanes_.InitMatch_(); } bool Match_(const ObjectRef& node) const { if (const tir::RampNode* ptr = node.as<tir::RampNode>()) { if (!base_.Match_(ptr->base)) return false; if (!stride_.Match_(ptr->stride)) return false; if (!lanes_.Match_(ptr->lanes)) return false; return true; } else { return false; } } PrimExpr Eval() const { return tir::Ramp(base_.Eval(), stride_.Eval(), lanes_.Eval()); } private: typename TBase::Nested base_; typename TStride::Nested stride_; typename TLanes::Nested lanes_; }; /*! * \brief Construct a ramp pattern. * * \param base The base pattern. * \param stride The stride pattern. * \param lanes The lanes pattern. * * \return The result pattern. * * \tparam TBase The pattern type of the base. * \tparam TStride The pattern type of the stride. * \tparam TLanes The pattern type of the lanes. */ template <typename TBase, typename TStride, typename TLanes> inline PRampExpr<TBase, TStride, TLanes> ramp(const Pattern<TBase>& base, const Pattern<TStride>& stride, const Pattern<TLanes>& lanes) { return PRampExpr<TBase, TStride, TLanes>(base.derived(), stride.derived(), lanes.derived()); } template <typename TBase> inline PRampExpr<TBase, PConstWithTypeLike<TBase>, PConst<int>> ramp(const Pattern<TBase>& base, int stride, int lanes) { return PRampExpr<TBase, PConstWithTypeLike<TBase>, PConst<int>>( base.derived(), PConstWithTypeLike<TBase>(base.derived(), stride), PConst<int>(lanes)); } /*! * \brief Pattern broadcast expression. * \tparam TA The pattern type of the value. * \tparam TLanes The pattern type of the lanes. */ template <typename TA, typename TLanes> class PBroadcastExpr : public Pattern<PBroadcastExpr<TA, TLanes>> { public: PBroadcastExpr(const TA& value, const TLanes& lanes) : value_(value), lanes_(lanes) {} void InitMatch_() const { value_.InitMatch_(); lanes_.InitMatch_(); } bool Match_(const ObjectRef& node) const { if (const tir::BroadcastNode* ptr = node.as<tir::BroadcastNode>()) { if (!value_.Match_(ptr->value)) return false; if (!lanes_.Match_(ptr->lanes)) return false; return true; } else { return false; } } PrimExpr Eval() const { return tir::Broadcast(value_.Eval(), lanes_.Eval()); } private: typename TA::Nested value_; typename TLanes::Nested lanes_; }; /*! * \brief Construct a broadcast pattern. * * \param value The value pattern. * \param lanes The lanes pattern. * * \return The result pattern. * * \tparam TA The pattern type of the value. * \tparam TLanes The pattern type of the lanes. */ template <typename TA, typename TLanes> inline PBroadcastExpr<TA, TLanes> broadcast(const Pattern<TA>& value, const Pattern<TLanes>& lanes) { return PBroadcastExpr<TA, TLanes>(value.derived(), lanes.derived()); } // internal namespace namespace detail { // implementation details for CallExpr template <bool stop, std::size_t I, typename F> struct tuple_for_each_dispatcher { template <typename TTuple> static void run(F& f, const TTuple& tuple) { // NOLINT(*) f(I, std::get<I>(tuple)); tuple_for_each_dispatcher<(I + 1) == std::tuple_size<TTuple>::value, (I + 1), F>::run(f, tuple); } }; template <std::size_t I, typename F> struct tuple_for_each_dispatcher<true, I, F> { template <typename TTuple> static void run(F& f, const TTuple& tuple) {} // NOLINT(*) }; template <typename F, typename TTuple> inline void tuple_for_each(F& f, const TTuple& tuple) { // NOLINT(*) tuple_for_each_dispatcher<std::tuple_size<TTuple>::value == 0, 0, F>::run(f, tuple); } struct PCallExprInitMatchFunctor { template <typename T> void operator()(size_t i, const T& pattern) const { pattern.InitMatch_(); } }; struct PCallExprMatchFunctor { const tir::CallNode* call_; bool matched_{true}; explicit PCallExprMatchFunctor(const tir::CallNode* call) : call_(call) {} template <typename T> void operator()(size_t i, const T& pattern) { matched_ = matched_ && pattern.Match_(call_->args[i]); } }; struct PCallExprEvalArgsFunctor { Array<PrimExpr> args_; template <typename T> void operator()(size_t i, const T& pattern) { args_.push_back(pattern.Eval()); } }; } // namespace detail /*! * \brief Pattern CallExpr expression. * \tparam Op The operator functor class. * \tparam TArgs The arguments. * \note Op functor contains the name of the function and * the implementation of Eval. */ template <typename Op, typename... TArgs> class PCallExpr : public Pattern<PCallExpr<Op, TArgs...>> { public: explicit PCallExpr(const TArgs&... args) : args_(args...) {} void InitMatch_() const { detail::PCallExprInitMatchFunctor finit; detail::tuple_for_each(finit, args_); } bool Match_(const ObjectRef& node) const { if (const tir::CallNode* ptr = node.as<tir::CallNode>()) { if (ptr->args.size() != sizeof...(TArgs)) return false; if (!ptr->op.same_as(Op::GetOp())) return false; detail::PCallExprMatchFunctor fmatch(ptr); detail::tuple_for_each(fmatch, args_); return fmatch.matched_; } else { return false; } } PrimExpr Eval() const { detail::PCallExprEvalArgsFunctor feval_args; detail::tuple_for_each(feval_args, args_); return Op::Eval(feval_args.args_); } private: std::tuple<typename TArgs::Nested...> args_; }; // arithemetic intrinsics #define TVM_PATTERN_BINARY_INTRIN(FuncName, OpName, IntrinOpName) \ struct OpName { \ static PrimExpr Eval(Array<PrimExpr> args) { \ return tir::Call(args[0].dtype(), GetOp(), args); \ } \ static const Op& GetOp() { return tir::builtin::IntrinOpName(); } \ }; \ template <typename TA, typename TB> \ inline PCallExpr<OpName, TA, TB> FuncName(const Pattern<TA>& a, const Pattern<TB>& b) { \ return PCallExpr<OpName, TA, TB>(a.derived(), b.derived()); \ } TVM_PATTERN_BINARY_INTRIN(operator<<, PLeftShiftOp, shift_left); TVM_PATTERN_BINARY_INTRIN(operator>>, PRightShiftOp, shift_right); TVM_PATTERN_BINARY_INTRIN(operator&, PBitwiseAndOp, bitwise_and); TVM_PATTERN_BINARY_INTRIN(operator|, PBitwiseOrOp, bitwise_or); TVM_PATTERN_BINARY_INTRIN(operator^, PBitwiseXorOp, bitwise_xor); // unary intrinsics #define TVM_PATTERN_UNARY_INTRIN(FuncName, OpName, IntrinOpName) \ struct OpName { \ static PrimExpr Eval(Array<PrimExpr> args) { \ return tir::Call(args[0].dtype(), GetOp(), args); \ } \ static const Op& GetOp() { return tir::builtin::IntrinOpName(); } \ }; \ template <typename TA> \ inline PCallExpr<OpName, TA> FuncName(const Pattern<TA>& a) { \ return PCallExpr<OpName, TA>(a.derived()); \ } TVM_PATTERN_UNARY_INTRIN(operator~, PBitwiseNotOp, bitwise_not); // if_then_else struct PIfThenElseOp { static PrimExpr Eval(Array<PrimExpr> args) { return tir::Call(args[1].dtype(), GetOp(), args); } static const Op& GetOp() { return tir::builtin::if_then_else(); } }; /*! * \brief Construct a if_then_else pattern. * * \param cond The condition expression. * \param true_value The value when condition is true. * \param true_value The value when condition is false. * * \return The result pattern. * * \tparam TCond The pattern type of the condition. * \tparam TA The pattern type of the true operand. * \tparam TB The pattern type of the false operand. */ template <typename TCond, typename TA, typename TB> inline PCallExpr<PIfThenElseOp, TCond, TA, TB> if_then_else(const Pattern<TCond>& cond, const Pattern<TA>& true_value, const Pattern<TB>& false_value) { return PCallExpr<PIfThenElseOp, TCond, TA, TB>(cond.derived(), true_value.derived(), false_value.derived()); } } // namespace arith } // namespace tvm #endif // TVM_ARITH_PATTERN_MATCH_H_
https://github.com/zk-ml/tachikoma
src/arith/rewrite_simplify.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file rewrite_simplify.h * \brief Rewrite-rule based simplification. */ #ifndef TVM_ARITH_REWRITE_SIMPLIFY_H_ #define TVM_ARITH_REWRITE_SIMPLIFY_H_ #include <tvm/arith/analyzer.h> #include <tvm/tir/op.h> #include <unordered_map> #include <vector> #include "const_fold.h" #include "ir_mutator_with_analyzer.h" #include "pattern_match.h" namespace tvm { namespace arith { using namespace tir; /*! * \brief Rewrite-based simplifier. * * This class can be inheritated for other simplifiers. */ class RewriteSimplifier::Impl : public IRMutatorWithAnalyzer { public: using IRMutatorWithAnalyzer::VisitExpr_; explicit Impl(Analyzer* parent) : IRMutatorWithAnalyzer(parent) {} void Update(const Var& var, const PrimExpr& info, bool override_info); PrimExpr VisitExpr_(const AddNode* op) override; PrimExpr VisitExpr_(const SubNode* op) override; PrimExpr VisitExpr_(const MulNode* op) override; PrimExpr VisitExpr_(const DivNode* op) override; PrimExpr VisitExpr_(const ModNode* op) override; PrimExpr VisitExpr_(const FloorDivNode* op) override; PrimExpr VisitExpr_(const FloorModNode* op) override; PrimExpr VisitExpr_(const MinNode* op) override; PrimExpr VisitExpr_(const MaxNode* op) override; PrimExpr VisitExpr_(const EQNode* op) override; PrimExpr VisitExpr_(const NENode* op) override; PrimExpr VisitExpr_(const LTNode* op) override; PrimExpr VisitExpr_(const LENode* op) override; PrimExpr VisitExpr_(const GTNode* op) override; PrimExpr VisitExpr_(const GENode* op) override; PrimExpr VisitExpr_(const AndNode* op) override; PrimExpr VisitExpr_(const OrNode* op) override; PrimExpr VisitExpr_(const NotNode* op) override; PrimExpr VisitExpr_(const SelectNode* op) override; PrimExpr VisitExpr_(const CallNode* op) override; PrimExpr VisitExpr_(const VarNode* op) override; PrimExpr VisitExpr_(const CastNode* op) override; PrimExpr VisitExpr_(const LetNode* op) override; std::function<void()> EnterConstraint(const PrimExpr& constraint); /*! \brief Enable an optional extension or extensions * * \param flags A bitwise OR of all optional extensions that should * be enabled. */ void SetEnabledExtensions(Extension flags); /*! \brief Return the currently enabled extensions */ Extension GetEnabledExtensions() const; protected: // counter to record recursive rewrite depth. int recur_depth_{0}; // internal variable map std::unordered_map<Var, PrimExpr, ObjectPtrHash, ObjectPtrEqual> var_map_; std::vector<PrimExpr> literal_constraints_; // Optionally enabled extensions Extension enabled_extensions_{kNone}; /*! Whether the simplifier is current */ bool recursively_visiting_boolean_{false}; // maximum number of recursion allowed during a single pass. static const constexpr int kMaxRecurDepth = 5; /*! * \brief try to compare x against val. * \param x The expression to be evaluated. * \param val The constant value. * \return comparison result. */ CompareResult TryCompare(const PrimExpr& x, int64_t val); /*! Try to compare x against y * * \param x The lhs of the comparison * \param y The rhs of the comparison * \return comparison result. */ CompareResult TryCompare(const PrimExpr& x, const PrimExpr& y); /*! * \brief Internal function to check whether or not to inline let. * \param op The let expr. * \return The inline decision. */ bool CanInlineLet(const LetNode* op); /*! \brief Internal function to apply constraints * * Tests whether the expression is known to be true or false based * on existing constraints. If the expression or its negation * matches a constraint, return the boolean it should be replaced * with. Otherwise, return false. */ Optional<PrimExpr> TryMatchLiteralConstraint(const PrimExpr& expr) const; /*! \brief Rewrite rules for Less Than comparisons * * These are separate from the VisitExpr_(const LTNode*) method, as * they may required from rewrites of LT or LE. */ PrimExpr ApplyRewriteRules(LT node); /*! \brief Rewrite rules for Equal comparisons * * These are separate from the VisitExpr_(const EQNode*) method, as * they may required from rewrites of LE or NE. */ PrimExpr ApplyRewriteRules(EQ node); /*! \brief Rewrite rules for Equal comparisons * * These are separate from the VisitExpr_(const EQNode*) method, as * they may required from rewrites of LT, LE, or NE. */ PrimExpr ApplyRewriteRules(Not node); private: CompareResult TryCompareUsingKnownInequalities(const PrimExpr& x, const PrimExpr& y); CompareResult TryCompareUsingConstIntBounds(const PrimExpr& x, const PrimExpr y); // Whether x >= val bool CanProveGreaterEqual(const PrimExpr& x, int64_t val) { return analyzer_->CanProveGreaterEqual(x, val); } // Whether x < val bool CanProveLess(const PrimExpr& x, int64_t val) { return analyzer_->CanProveLess(x, val); } // Whether x == val bool CanProveEqual(const PrimExpr& x, int64_t val) { // TODO(tqchen) refer back to super-analyzer. return TryCompare(x, val) == CompareResult::kEQ; } // Recursive rewrite x // we limit maximum depth of recursive rewrite allowed to // avoid infinite loop PrimExpr RecursiveRewrite(const PrimExpr& x) { if (recur_depth_ >= kMaxRecurDepth) return x; ++recur_depth_; PrimExpr res = this->VisitExpr(x); --recur_depth_; return res; } template <typename TA> PConstWithTypeLike<TA> ZeroWithTypeLike(const Pattern<TA>& pattern) { return PConstWithTypeLike<TA>(pattern.derived(), 0); } template <typename TA> PConstWithTypeLike<TA> OneWithTypeLike(const Pattern<TA>& pattern) { return PConstWithTypeLike<TA>(pattern.derived(), 1); } }; } // namespace arith } // namespace tvm #endif // TVM_ARITH_REWRITE_SIMPLIFY_H_
https://github.com/zk-ml/tachikoma
src/auto_scheduler/search_policy/empty_policy.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file auto_scheduler/search_policy/empty_policy.h * \brief A simple example of the search policy which always returns the initial naive schedule * (state). */ #ifndef TVM_AUTO_SCHEDULER_SEARCH_POLICY_EMPTY_POLICY_H_ #define TVM_AUTO_SCHEDULER_SEARCH_POLICY_EMPTY_POLICY_H_ #include <tvm/auto_scheduler/loop_state.h> #include <tvm/auto_scheduler/measure.h> #include <tvm/auto_scheduler/search_policy.h> #include <utility> namespace tvm { namespace auto_scheduler { /*! * \brief A simple example of the search policy which always returns the initial naive schedule * (state). * The key implementation for this structure is `Search()`, check `empty_policy.cc` for more * details. */ class EmptyPolicyNode : public SearchPolicyNode { public: State Search(int num_measure_trials, int early_stopping, int num_measures_per_round, ProgramMeasurer measurer) final; std::pair<Array<MeasureInput>, Array<MeasureResult>> ContinueSearchOneRound( int num_measure, ProgramMeasurer measurer) final; static constexpr const char* _type_key = "auto_scheduler.EmptyPolicy"; TVM_DECLARE_FINAL_OBJECT_INFO(EmptyPolicyNode, SearchPolicyNode); private: /*! * \brief Use a sub function to generate several candidate states in each search round. * \returns The generated states */ Array<State> SearchOneRound(); }; /*! * \brief Managed reference to EmptyPolicyNode. * \sa EmptyPolicyNode */ class EmptyPolicy : public SearchPolicy { public: explicit EmptyPolicy(SearchTask task, Optional<Array<SearchCallback>> init_search_callbacks); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(EmptyPolicy, SearchPolicy, EmptyPolicyNode); }; } // namespace auto_scheduler } // namespace tvm #endif // TVM_AUTO_SCHEDULER_SEARCH_POLICY_EMPTY_POLICY_H_
https://github.com/zk-ml/tachikoma
src/auto_scheduler/search_policy/sketch_policy.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file auto_scheduler/search_policy/sketch_policy.h * \brief This search policy constructs a search space according to the compute declaration. * It then randomly samples programs from the search space and uses evolutionary search with a * learned cost model to fine tune the sampled programs. * The final optimized programs are sent to actual hardware for measurement. * The above process is repeated until the auto-scheduler runs out of time budget. * * Reference: * L. Zheng, C. Jia, M. Sun, Z. Wu, C. Yu, et al. "Ansor : Generating High-Performance Tensor * Programs for Deep Learning." (OSDI 2020). */ #ifndef TVM_AUTO_SCHEDULER_SEARCH_POLICY_SKETCH_POLICY_H_ #define TVM_AUTO_SCHEDULER_SEARCH_POLICY_SKETCH_POLICY_H_ #include <tvm/auto_scheduler/cost_model.h> #include <tvm/auto_scheduler/search_policy.h> #include <memory> #include <set> #include <string> #include <unordered_set> #include <utility> #include <vector> #include "sketch_policy_rules.h" #include "utils.h" namespace tvm { namespace auto_scheduler { /*! \brief String keys used in parameter map of SketchPolicy. */ struct SketchParamKey { /*! \brief Always allocate this percentage of measurements to random sampled states. */ static constexpr const char* eps_greedy = "eps_greedy"; /*! \brief Retry several times if SearchOneRound gets no valid state. */ static constexpr const char* empty_retry_count = "retry_search_one_round_on_empty"; struct SampleInitPopulation { /*! \brief The minimal size of valid population in the initial sampling. */ static constexpr const char* min_population = "sample_init_min_population"; /*! \brief The maximum percentage of measured states in the initial sampling. */ static constexpr const char* use_measured_ratio = "sample_init_use_measured_ratio"; }; struct EvolutionarySearch { /*! \brief The population size of evolutionary search. */ static constexpr const char* population = "evolutionary_search_population"; /*! \brief The number of iterations performed by generic algorithm.*/ static constexpr const char* num_iters = "evolutionary_search_num_iters"; /*! \brief The mutation probability.*/ static constexpr const char* mutation_prob = "evolutionary_search_mutation_prob"; }; struct MultiLevelTiling { /*! \brief The structure of multi-level tiling for CPU. */ static constexpr const char* cpu_structure = "cpu_multi_level_tiling_structure"; /*! \brief The structure of multi-level tiling for GPU. */ static constexpr const char* gpu_structure = "gpu_multi_level_tiling_structure"; }; /*! \brief The max inner most split factor. */ static constexpr const char* max_innermost_split_factor = "max_innermost_split_factor"; /*! \brief The max vectorize size. */ static constexpr const char* max_vectorize_size = "max_vectorize_size"; /*! \brief Whether disable compute location changing. */ static constexpr const char* disable_change_compute_location = "disable_change_compute_location"; }; class SketchPolicy; /*! * \brief The search policy that searches in a hierarchical search space defined by sketches. * The policy randomly samples programs from the space defined by sketches * and use evolutionary search to fine-tune them. */ class SketchPolicyNode : public SearchPolicyNode { public: /*! \brief The cost model to estimate the complete schedules. */ CostModel program_cost_model; /*! \brief The parameters map for this search policy. */ Map<String, ObjectRef> params; /*! \brief The rules to generate sketches. */ std::vector<SketchGenerationRule*> sketch_rules; /*! \brief The rules to generate initial population. */ std::vector<PopulationGenerationRule*> init_rules; /*! \brief The rules to mutate states in the evolutionary search. */ std::vector<std::shared_ptr<PopulationMutationRule>> mutation_rules; /*! \brief Random generator. */ std::mt19937 rand_gen; /*! \brief Memorize split space for Split. */ SplitFactorizationMemo split_memo; State Search(int num_measure_trials, int early_stopping, int num_measures_per_round, ProgramMeasurer measurer) final; std::pair<Array<MeasureInput>, Array<MeasureResult>> ContinueSearchOneRound( int num_measure, ProgramMeasurer measurer) final; /*! * \brief Generate sketches. * \return The generated sketches(states). */ Array<State> GenerateSketches(); /*! * \brief Sample the init population. * \param sketches The initial sketches for the sampled population * \return The generated states (the initial population). */ Array<State> SampleInitPopulation(const Array<State>& sketches); /*! * \brief Perform evolutionary search. * \param init_populations The states generated from init population. * \param out_size The number of expected output states. * \return The generated states after evolutionary search. */ Array<State> EvolutionarySearch(const Array<State>& init_populations, int out_size); static constexpr const char* _type_key = "auto_scheduler.SketchPolicy"; TVM_DECLARE_FINAL_OBJECT_INFO(SketchPolicyNode, SearchPolicyNode); private: /*! * \brief Run one round of the search pipeline. * \param num_random_states Number of states that are picked randomly, this is used for * eps-greedy policy. * \param random_states The picked random states, used as one of the output of this function. * \return The best several states generated in this search round. */ Array<State> SearchOneRound(int num_random_states, Array<State>* random_states = nullptr); /*! * \brief Pick states from best states and random states with eps-greedy policy. * \param best_states States picked by cost model. * \param random_states States picked randomly. * \param remaining_n_trials The remaining number of states need to be generated. * \return The generated states to be measured, wrapped in MeasureInput. */ Array<MeasureInput> PickStatesWithEpsGreedy(const Array<State>& best_states, const Array<State>& random_states, int remaining_n_trials); /*! \brief The number of states to measure per iteration. */ int num_measure_per_iter_; /*! \brief The cached sketches */ Array<State> sketch_cache_; /*! \brief The minimul output population of SampleInitPopulation */ int sample_init_min_pop_; friend class SketchPolicy; }; /*! * \brief Managed reference to SketchPolicyNode. * \sa SketchPolicyNode */ class SketchPolicy : public SearchPolicy { public: /*! * \brief The constructor. * \param task The SearchTask for the computation declaration. * \param program_cost_model The cost model for complete programs. * \param params The parameters map for this search process. * \param seed The random seed of this search process. * \param verbose Verbose level. 0 for silent, 1 to output information during schedule * search. * \param init_search_callbacks SearchCallback to be called before schedule search. */ SketchPolicy(SearchTask task, CostModel program_cost_model, Map<String, ObjectRef> params, int seed, int verbose, Optional<Array<SearchCallback>> init_search_callbacks); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(SketchPolicy, SearchPolicy, SketchPolicyNode); }; /*! \brief Pre-search callback function to load custom rules for sketch generation */ class PreloadCustomSketchRuleNode : public SearchCallbackNode { public: /*! \brief The condition check function of this rule. */ PackedFunc meet_condition_func; /*! \brief The apply function of this rule. */ PackedFunc apply_func; /*! \brief The name of this rule. */ String rule_name; void Callback(SearchPolicyNode* policy) final; static constexpr const char* _type_key = "auto_scheduler.PreloadCustomSketchRule"; TVM_DECLARE_FINAL_OBJECT_INFO(PreloadCustomSketchRuleNode, SearchCallbackNode); }; /*! * \brief Managed reference to PreloadCustomSketchRuleNode. * \sa PreloadCustomSketchRuleNode */ class PreloadCustomSketchRule : public SearchCallback { public: /*! * \brief The constructor. * \param meet_condition_func The condition check function of this rule. * \param apply_func The apply function of this rule. * \param rule_name The name of this rule. */ PreloadCustomSketchRule(PackedFunc meet_condition_func, PackedFunc apply_func, String rule_name); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(PreloadCustomSketchRule, SearchCallback, PreloadCustomSketchRuleNode); }; } // namespace auto_scheduler } // namespace tvm #endif // TVM_AUTO_SCHEDULER_SEARCH_POLICY_SKETCH_POLICY_H_
https://github.com/zk-ml/tachikoma
src/auto_scheduler/search_policy/sketch_policy_rules.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file auto_scheduler/search_policy/sketch_policy_rules.h * \brief Rules for generating the sketches, sampling the initial population, and mutating the * population in SketchPolicy. */ #ifndef TVM_AUTO_SCHEDULER_SEARCH_POLICY_SKETCH_POLICY_RULES_H_ #define TVM_AUTO_SCHEDULER_SEARCH_POLICY_SKETCH_POLICY_RULES_H_ #include <tvm/auto_scheduler/loop_state.h> #include <tvm/auto_scheduler/search_task.h> #include <string> #include <utility> #include <vector> #include "utils.h" namespace tvm { namespace auto_scheduler { class SketchPolicyNode; /********** Sketch Generation Rule **********/ /*! \brief The base class for derivation rules used in the sketch generation. */ class SketchGenerationRule { public: /*! \brief Result enumeration of the condition function. */ enum class ConditionKind : int { /*! \brief Skip this rule and continue to try the next rules. */ kSkip = 0, /*! \brief Apply this rule and continue to try the next rules. */ kApply = 1, /*! \brief Apply this rule and skip the rest rules. */ kApplyAndSkipRest = 2 }; /*! * \brief Condition check function of this rule. * \param policy The SketchPolicyNode of this rule, some information may be used during * the condition checking. * \param state The original state to be checked. * \param stage_id The index of the stage to process this condition check. * \return The condition check result of this rule. */ virtual ConditionKind MeetCondition(const SketchPolicyNode& policy, const State& state, int stage_id) const = 0; /*! * \brief Apply function of this rule. * \param policy The SketchPolicyNode of this rule, some information may be used during * the rule applying. * \param state The original state to apply this rule. * \param stage_id The index of the next stage to apply this rule. * \return The state after applying this rule, and index of the next stage. */ virtual std::vector<std::pair<State, int>> Apply(const SketchPolicyNode& policy, const State& state, int stage_id) const = 0; /*! * \brief Get the name of this rule. * \return A string of the rule name. */ virtual std::string GetRuleName() const = 0; }; #define DEFINE_SKETCH_GENERATION_RULE(rule_name) \ class rule_name : public SketchGenerationRule { \ public: \ ConditionKind MeetCondition(const SketchPolicyNode& policy, const State& state, \ int stage_id) const final; \ std::vector<std::pair<State, int>> Apply(const SketchPolicyNode& policy, const State& state, \ int stage_id) const final; \ std::string GetRuleName() const final { return #rule_name; } \ }; /*! \brief The rule that simply skips the current stage. It returns an unchanged state and move to * the next stage. */ DEFINE_SKETCH_GENERATION_RULE(RuleSkipStage); /*! \brief The rule that inlines simple elementwise ops. * \note This rule only inlines the strictly inlineable stages. Stages marked as not strictly * inlineable will have a chance to try different compute at location in InitPopulation later. */ DEFINE_SKETCH_GENERATION_RULE(RuleAlwaysInline); /*! \brief The rule that performs multi-level tiling. */ DEFINE_SKETCH_GENERATION_RULE(RuleMultiLevelTiling); /*! \brief The rule that performs multi-level tiling and fuses later consumers. */ DEFINE_SKETCH_GENERATION_RULE(RuleMultiLevelTilingWithFusion); /*! \brief The rule that adds a cache read stage. Mainly used for GPU cooperative fetching, * Currently only support 1 to 1 match cache read. */ DEFINE_SKETCH_GENERATION_RULE(RuleAddCacheRead); /*! \brief The rule that adds a cache write stage. */ DEFINE_SKETCH_GENERATION_RULE(RuleAddCacheWrite); /*! \brief The rule that adds rfactor stage. */ DEFINE_SKETCH_GENERATION_RULE(RuleAddRfactor); /*! \brief The rule that deals with compute ops that perform "fake reduction" with const tensors. * This kind of op comes from winograd transformation. */ DEFINE_SKETCH_GENERATION_RULE(RuleSimplifyComputeWithConstTensor); /*! \brief The rule that use cross thread reduction for GPU. */ DEFINE_SKETCH_GENERATION_RULE(RuleCrossThreadReduction); /*! \brief Handle special cases in Winograd transformation for GPU. We need to change the compute * location of the producers of compute ops that perform "fake reduction" with const tensors. */ DEFINE_SKETCH_GENERATION_RULE(RuleSpecialComputeLocationGPU); /*! \brief The rule that allows users to generate custom sketches. */ class RuleCustomSketch : public SketchGenerationRule { public: RuleCustomSketch(PackedFunc meet_condition_func, PackedFunc apply_func, String rule_name = "CustomSketchRule") : meet_condition_func_(std::move(meet_condition_func)), apply_func_(std::move(apply_func)), rule_name_(std::move(rule_name)) {} ConditionKind MeetCondition(const SketchPolicyNode& policy, const State& state, int stage_id) const final; std::vector<std::pair<State, int>> Apply(const SketchPolicyNode& policy, const State& state, int stage_id) const final; std::string GetRuleName() const final { return rule_name_; } private: PackedFunc meet_condition_func_; PackedFunc apply_func_; String rule_name_; }; /********** Init Population **********/ /*! \brief The base class for rules used to annotate the sketches to get the initial population. */ class PopulationGenerationRule { public: /*! \brief Result enumeration of the apply function. */ enum class ResultKind : int { kValid = 0, kInvalid = 1 }; /*! * \brief Apply function of this rule. * \param policy The SketchPolicyNode of this rule, some member may get changed during the * rule applying. (e.g. random number generator) * \param state The state to apply this rule, update inplace. * \return The result of this rule, indicate if there's any valid state generated. */ virtual ResultKind Apply(SketchPolicyNode* policy, State* state, std::mt19937* rand_gen) const = 0; /*! \brief The deconstructor */ virtual ~PopulationGenerationRule() = default; }; // A helper to define population initialization rules #define DEFINE_INIT_POPULATION_RULE(rule_name) \ class rule_name : public PopulationGenerationRule { \ public: \ ResultKind Apply(SketchPolicyNode* policy, State* state, std::mt19937* rand_gen) const final; \ }; /*! \brief The rule that fills the incomplete SplitSteps. */ DEFINE_INIT_POPULATION_RULE(InitFillTileSize); /*! \brief The rule that randomly changes the computation location for some stages that do not * need tiling and are not strictly inlineable(e.g. data padding). */ DEFINE_INIT_POPULATION_RULE(InitChangeComputeLocation); /*! \brief The rule that annotates parallel for CPU. */ DEFINE_INIT_POPULATION_RULE(InitParallel); /*! \brief The rule that annotates unroll. */ DEFINE_INIT_POPULATION_RULE(InitUnroll); /*! \brief The rule that annotates vectorization. */ DEFINE_INIT_POPULATION_RULE(InitVectorization); /*! \brief The rule that annotates thread binding for GPU. */ DEFINE_INIT_POPULATION_RULE(InitThreadBind); /********** Mutation **********/ /*! \brief The base class for mutation rules used in the evolutionary search. */ class PopulationMutationRule : public PopulationGenerationRule { public: /* \brief The constructor * \param selection_weight the probabiliy of applying this rule is * proportional to this weight */ explicit PopulationMutationRule(double selection_weight) : weight(selection_weight) {} /* \brief The weight of this rule */ double weight; }; // A helper to define mutation rules used in the evolutionary search #define DEFINE_MUTATE_POPULATION_RULE(rule_name) \ class rule_name : public PopulationMutationRule { \ public: \ explicit rule_name(double weight) : PopulationMutationRule(weight) {} \ ResultKind Apply(SketchPolicyNode* policy, State* state, std::mt19937* rand_gen) const final; \ }; /*! \brief The rule that mutates tile size by randomly dividing a tile size by a factor and multipling it to another tile size. */ DEFINE_MUTATE_POPULATION_RULE(MutateTileSize); /*! \brief The rule that mutates the number of fused outer iterators annotated by parallel. */ DEFINE_MUTATE_POPULATION_RULE(MutateParallel); /*! \brief The rule that randomly changes the computation location for some stages that do not * need tiling and are not strictly inlineable(e.g. data padding). */ DEFINE_MUTATE_POPULATION_RULE(MutateComputeLocation); /*! \brief The rule that mutates the value of a randomly selected auto unroll pragma step. */ DEFINE_MUTATE_POPULATION_RULE(MutateAutoUnroll); } // namespace auto_scheduler } // namespace tvm #endif // TVM_AUTO_SCHEDULER_SEARCH_POLICY_SKETCH_POLICY_RULES_H_
https://github.com/zk-ml/tachikoma
src/auto_scheduler/search_policy/utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file auto_scheduler/search_policy/utils.h * \brief Common utilities for search policies. */ #ifndef TVM_AUTO_SCHEDULER_SEARCH_POLICY_UTILS_H_ #define TVM_AUTO_SCHEDULER_SEARCH_POLICY_UTILS_H_ #include <dmlc/common.h> #include <tvm/auto_scheduler/loop_state.h> #include <tvm/auto_scheduler/search_policy.h> #include <tvm/ir/expr.h> #include <tvm/te/operation.h> #include <algorithm> #include <condition_variable> #include <set> #include <string> #include <tuple> #include <unordered_map> #include <unordered_set> #include <utility> #include <vector> #include "../utils.h" namespace tvm { namespace auto_scheduler { /*! \brief Return whether the search task is targeting a CPU. */ inline bool IsCPUTask(const SearchTask& task) { return (task)->target->GetTargetDeviceType() == kDLCPU; } /*! \brief Return whether the search task is targeting a GPU. */ inline bool IsGPUTask(const SearchTask& task) { int device_type = (task)->target->GetTargetDeviceType(); return device_type == kDLCUDA || device_type == kDLOpenCL || device_type == kDLVulkan || device_type == kDLMetal || device_type == kDLROCM || device_type == kOpenGL; } /*! \brief Return whether the search task is targeting a CUDA GPU. */ inline bool IsCUDATask(const SearchTask& task) { return (task)->target->GetTargetDeviceType() == kDLCUDA; } /*! \brief Return whether the search task is targeting a OpenCL GPU. */ inline bool IsOpenCLTask(const SearchTask& task) { return (task)->target->GetTargetDeviceType() == kDLOpenCL; } /*! \brief Argsort. Order: largest to smallest */ template <typename T> inline std::vector<int> Argsort(const std::vector<T>& scores) { std::vector<int> index; index.reserve(scores.size()); for (size_t i = 0; i < scores.size(); ++i) { index.push_back(i); } auto cmp = [&scores](int l, int r) { return scores[l] > scores[r]; }; std::sort(index.begin(), index.end(), cmp); return index; } /*! \brief Convert operation to stage id. */ inline int OperationToStage(const te::Operation& op, const State& state) { for (size_t i = 0; i < state->stages.size(); ++i) { if (op == state->stages[i]->op) { return i; } } LOG(FATAL) << "Cannot find op: " << op; return -1; } /********** Get Parameters **********/ /*! \brief Get an integer from a tvm str Map. */ inline int GetIntParam(const Map<String, ObjectRef>& attr_dict, const std::string& key) { ICHECK_GT(attr_dict.count(key), 0) << "Cannot find key: \"" << key << "\" in " << attr_dict; auto pint = attr_dict[key].as<IntImmNode>(); ICHECK(pint != nullptr); return pint->value; } /*! \brief Get a double from a tvm str Map. */ inline double GetDoubleParam(const Map<String, ObjectRef>& attr_dict, const std::string& key) { ICHECK_GT(attr_dict.count(key), 0) << "Cannot find key: \"" << key << "\" in " << attr_dict; auto pdouble = attr_dict[key].as<FloatImmNode>(); ICHECK(pdouble != nullptr); return pdouble->value; } /*! \brief Get a string from a tvm str Map. */ inline std::string GetStringParam(const Map<String, ObjectRef>& attr_dict, const std::string& key) { ICHECK_GT(attr_dict.count(key), 0) << "Cannot find key: \"" << key << "\" in " << attr_dict; const auto& target = attr_dict[key]; if (auto pstr = target.as<StringImmNode>()) { return pstr->value; } auto pstr = target.as<StringObj>(); ICHECK(pstr != nullptr); return pstr->data; } /*! \brief Get a iterator name set from a tvm str Map. */ inline std::set<std::string> GetIterNameSetParam(const Map<String, ObjectRef>& attr_dict, const std::string& key) { std::set<std::string> ret; ICHECK_GT(attr_dict.count(key), 0) << "Cannot find key: \"" << key << "\" in " << attr_dict; auto names = attr_dict[key].as<ArrayNode>(); ICHECK(names != nullptr); for (const auto& name : *names) { ret.insert(name.as<StringObj>()->data); } return ret; } /********** Checks with ComputeDAG **********/ /*! \brief Return whether an op is strictly-inlineable. */ inline bool IsStrictlyInlineable(const SearchTask& task, const State& state, int stage_id) { if (state->current_compute_dag) { return state->current_compute_dag.as<ComputeDAGNode>()->access_analyzer.IsStrictlyInlineable( state->stages[stage_id]->op); } else { return task->compute_dag->access_analyzer.IsStrictlyInlineable(state->stages[stage_id]->op); } } /*! \brief Return whether an op is an output op. */ inline bool IsOutputOp(const SearchTask& task, const State& state, int stage_id) { if (state->current_compute_dag) { return state->current_compute_dag.as<ComputeDAGNode>()->access_analyzer.IsOutput( state->stages[stage_id]->op); } else { return task->compute_dag->access_analyzer.IsOutput(state->stages[stage_id]->op); } } /*! \brief Return whether an op needs multi level tiling. */ inline bool NeedsMultilevelTiling(const SearchTask& task, const State& state, int stage_id) { if (state->current_compute_dag) { return state->current_compute_dag.as<ComputeDAGNode>()->access_analyzer.NeedsMultiLevelTiling( state->stages[stage_id]->op); } else { return task->compute_dag->access_analyzer.NeedsMultiLevelTiling(state->stages[stage_id]->op); } } /*! \brief Get all consumers for a stage. This function propagates the relation for inlined ops. */ inline std::set<int> GetConsumers(const SearchTask& task, const State& state, int stage_id) { std::unordered_set<te::Operation, ObjectHash, ObjectEqual> consumers; std::set<int> ret; if (state->current_compute_dag) { consumers = state->current_compute_dag.as<ComputeDAGNode>()->access_analyzer.GetConsumers( state, state->stages[stage_id]->op); } else { consumers = task->compute_dag->access_analyzer.GetConsumers(state, state->stages[stage_id]->op); } for (const auto& op : consumers) { ret.insert(OperationToStage(op, state)); } return ret; } /*! \brief Check if a stage has single consumer or all of its consumers share a common root, return * the target consumer root or -1. */ inline int GetSingleConsumerId(const SearchTask& task, const State& state, int stage_id) { const std::set<int>& consumers = GetConsumers(task, state, stage_id); if (consumers.empty()) { return -1; } if (consumers.size() == 1) { return *consumers.begin(); } else { // Check all consumers share a common root int common_root_id = -1; bool mismatch = false; for (const auto& consumer_stage_id : consumers) { int root_id = -1; if (state->stages[consumer_stage_id]->compute_at == ComputeAtKind::kRoot) { root_id = consumer_stage_id; } else if (state->stages[consumer_stage_id]->compute_at == ComputeAtKind::kIter) { root_id = state->attach_map->stage_to_attach_iter.at(consumer_stage_id).first; } else { LOG(FATAL) << "Invalid case"; } if (common_root_id == -1) { common_root_id = root_id; } else { if (common_root_id != root_id) { mismatch = true; break; } } } return mismatch ? -1 : common_root_id; } } /*! \brief Get all producers for a stage. This function propagates the relation for inlined ops. */ inline std::set<int> GetProducers(const SearchTask& task, const State& state, int stage_id) { std::unordered_set<te::Operation, ObjectHash, ObjectEqual> producers; std::set<int> ret; if (state->current_compute_dag) { producers = state->current_compute_dag.as<ComputeDAGNode>()->access_analyzer.GetProducers( state, state->stages[stage_id]->op); } else { producers = task->compute_dag->access_analyzer.GetProducers(state, state->stages[stage_id]->op); } for (const auto& op : producers) { ret.insert(OperationToStage(op, state)); } return ret; } /*! \brief Get all producers for a stage. This function DOES NOT propagates the relation for * inlined ops. */ inline std::set<int> GetDirectProducers(const SearchTask& task, const State& state, int stage_id) { std::unordered_set<te::Operation, ObjectHash, ObjectEqual> producers; std::set<int> ret; if (state->current_compute_dag) { producers = state->current_compute_dag.as<ComputeDAGNode>()->access_analyzer.GetDirectProducers( state->stages[stage_id]->op); } else { producers = task->compute_dag->access_analyzer.GetDirectProducers(state->stages[stage_id]->op); } for (const auto& op : producers) { ret.insert(OperationToStage(op, state)); } return ret; } /*! \brief Get the number of common outer iterators. This function propagates the relation for * chains with multiple ops. */ inline int GetNumCommonOuterIterator(const SearchTask& task, const State& state, int stage_id, int target_stage_id) { if (state->current_compute_dag) { return state->current_compute_dag.as<ComputeDAGNode>() ->access_analyzer.GetNumCommonOuterIterator(state->stages[stage_id]->op, state->stages[target_stage_id]->op); } else { return task->compute_dag->access_analyzer.GetNumCommonOuterIterator( state->stages[stage_id]->op, state->stages[target_stage_id]->op); } } /*! \brief Return whether two ops are elementwise-matched. */ inline bool ElementwiseMatch(const SearchTask& task, const State& state, int stage_id, int target_stage_id) { const auto& op = state->stages[stage_id]->op; const auto& target_op = state->stages[target_stage_id]->op; if (state->current_compute_dag) { return state->current_compute_dag.as<ComputeDAGNode>()->access_analyzer.ElementWiseMatch( op, target_op); } else { return task->compute_dag->access_analyzer.ElementWiseMatch(op, target_op); } } /********** Get informations from Stage/Iterator **********/ /*! \brief Return the extent of an iterator. */ inline int64_t GetExtent(const Iterator& it) { if (it->range.defined()) { if (auto pint = it->range->extent.as<IntImmNode>()) { return pint->value; } } return -1; } /*! \brief Compute the product of lengths of all space iters and all reduce iters, respectively. */ inline std::pair<int64_t, int64_t> GetCumulativeSpaceAndReductionLength(const Stage& stage) { int64_t cum_space_len = 1, cum_reduce_len = 1; for (const auto& iter : stage->iters) { if (iter->iter_kind == IteratorKind::kSpatial) { cum_space_len *= GetExtent(iter); } else if (iter->iter_kind == IteratorKind::kReduction) { cum_reduce_len *= GetExtent(iter); } } return std::make_pair(cum_space_len, cum_reduce_len); } /*! \brief Return whether this stage needs rfactor. */ inline bool NeedsRfactor(const SearchTask& task, const State& state, int stage_id) { const auto& op = state->stages[stage_id]->op; if (op->IsInstance<te::ComputeOpNode>()) { // Compute the product of lengths of all space iters and all reduce iters int cum_space_len, cum_reduce_len; std::tie(cum_space_len, cum_reduce_len) = GetCumulativeSpaceAndReductionLength(state->stages[stage_id]); if (NeedsMultilevelTiling(task, state, stage_id)) { // Do not use rfactor if we have enough parallelism on space iters if (cum_space_len > cum_reduce_len || cum_space_len > task->hardware_params->num_cores * 16) { return false; } else { return true; } } else if (cum_reduce_len > 1) { // Always try rfactor for reduction ops return cum_reduce_len > task->hardware_params->num_cores; } } return false; } /*! \brief Return whether the stage has reduce iterators. */ inline bool HasReduceIter(const Stage& stage) { for (const auto& iter : stage->iters) { if (iter->iter_kind != IteratorKind::kSpatial) { return true; } } return false; } /*! \brief Return whether the stage has specific annotated iterators. */ inline bool HasAnnotatedIter(const Stage& stage, IteratorAnnotation type) { for (const auto& iter : stage->iters) { if (iter->annotation == type) { return true; } } return false; } /*! \brief Return whether the stage has only one consumer and they are elementwise-matched. */ inline bool HasSingleElementwiseMatchedConsumer(const SearchTask& task, const State& state, int stage_id, int* target_stage_id = nullptr) { // Temporal object to be used if the input pointer is nullptr int temp_target_stage_id; if (target_stage_id == nullptr) { target_stage_id = &temp_target_stage_id; } const std::set<int>& consumers = GetConsumers(task, state, stage_id); if (consumers.size() == 1) { *target_stage_id = *consumers.begin(); if (ElementwiseMatch(task, state, stage_id, *target_stage_id) && (!(HasReduceIter(state->stages[stage_id]) && HasReduceIter(state->stages[*target_stage_id]))) && (!StrEndsWith(state->stages[*target_stage_id]->op->name, ".shared"))) { return true; } } return false; } /*! \brief Return whether the step changes the number of stages */ inline bool IsStageNumberChangingStep(const Step& step) { return step->IsInstance<CacheWriteStepNode>() || step->IsInstance<CacheReadStepNode>() || step->IsInstance<RfactorStepNode>(); } /*! \brief Return whether the state does cache_read for stage_id. */ inline bool HasCacheReadStage(const State& s, int stage_id) { for (int i = static_cast<int>(s->transform_steps.size()) - 1; i >= 0; --i) { if (auto ps = s->transform_steps[i].as<CacheReadStepNode>()) { if (stage_id == ps->stage_id) { return true; } } if (IsStageNumberChangingStep(s->transform_steps[i])) { if (stage_id > s->transform_steps[i]->stage_id) { stage_id--; } } } return false; } /*! \brief Return whether the state does cache_write for stage_id. */ inline bool HasCacheWriteStage(const State& s, int stage_id) { for (int i = static_cast<int>(s->transform_steps.size()) - 1; i >= 0; --i) { if (auto ps = s->transform_steps[i].as<CacheWriteStepNode>()) { if (stage_id == ps->stage_id) { return true; } } if (IsStageNumberChangingStep(s->transform_steps[i])) { if (stage_id > s->transform_steps[i]->stage_id) { stage_id--; } } } return false; } /*! \brief Return whether the state does rfactor for stage_id. */ inline bool HasRfactorStage(const State& s, int stage_id) { for (int i = static_cast<int>(s->transform_steps.size()) - 1; i >= 0; --i) { if (auto ps = s->transform_steps[i].as<RfactorStepNode>()) { if (stage_id == ps->stage_id) { return true; } } if (IsStageNumberChangingStep(s->transform_steps[i])) { if (stage_id > s->transform_steps[i]->stage_id) { stage_id--; } } } return false; } /*! \brief Return whether the stage does cross thread reduction. */ inline bool HasCrossThreadReduction(const State& state, int stage_id) { std::function<bool(const Stage&)> check_stage = [](const Stage& in_stage) { for (const auto& iter : in_stage->iters) { if (iter->annotation == IteratorAnnotation::kThreadX && iter->iter_kind == IteratorKind::kReduction) { return true; } } return false; }; // Check the stage itself if (check_stage(state->stages[stage_id])) { return true; } // Check the attached stages for (size_t iter_id = 0; iter_id < state->stages[stage_id]->iters.size(); iter_id++) { const auto& res = state->attach_map->iter_to_attached_stages.find(std::make_pair(stage_id, iter_id)); if (res != state->attach_map->iter_to_attached_stages.end()) { for (int attached_stage_id : res->second) { if (check_stage(state->stages[attached_stage_id])) { return true; } } } } return false; } /*! \brief Return whether the stage has been tiled already. */ inline bool IsTiled(const Stage& stage) { auto op = stage->op.as<te::ComputeOpNode>(); ICHECK(op != nullptr); return stage->iters.size() != op->axis.size() + op->reduce_axis.size(); } /*! \brief Extract primitive iterators from a nested fused or splitted iterator's name. */ inline void ExtractOriginalIterators(const std::string& name, std::set<std::string>* rets) { size_t last_pos = 0; for (size_t i = 0; i < name.size(); ++i) { if (name[i] == '@' || name[i] == '.') { // '@' for fuse and '.' for split if (!isdigit(name[last_pos]) && name[last_pos] != '@' && name[last_pos] != '.') { rets->insert(name.substr(last_pos, i - last_pos)); } last_pos = i + 1; } } if (last_pos < name.size() && !isdigit(name[last_pos]) && name[last_pos] != '@' && name[last_pos] != '.') { rets->insert(name.substr(last_pos, name.size() - last_pos)); } } /*! \brief Get the last reduce iterator in the outermost reduce tile. */ inline Iterator GetLastReduceIteratorInOutermostReduceTile(const Stage& stage) { auto pop = stage->op.as<te::ComputeOpNode>(); ICHECK(pop != nullptr); std::set<std::string> original_names; const std::set<std::string>& no_split_at_inner_name_set = stage->op->attrs.count(SearchPolicyKey::no_split_at_inner) ? GetIterNameSetParam(stage->op->attrs, SearchPolicyKey::no_split_at_inner) : std::set<std::string>(); size_t reduce_axis_size = 0; for (const auto axis : pop->reduce_axis) { if (!no_split_at_inner_name_set.count(axis->var->name_hint)) { reduce_axis_size++; } } if (reduce_axis_size) { for (const auto& iter : stage->iters) { if (iter->iter_kind == IteratorKind::kReduction) { ExtractOriginalIterators(iter->name, &original_names); if (original_names.size() == reduce_axis_size) { return iter; } } } } else { // Return the first reduce iterator for (const auto& iter : stage->iters) { if (iter->iter_kind == IteratorKind::kReduction) { return iter; } } } LOG(FATAL) << "Cannot find the iterator."; return stage->iters[0]; } /*! \brief Get the target stage id of a history step in the new state. * We need this because the stage_id in the history may be stale due to later steps */ inline int GetTargetStageIDInState(const State& s, int step_id) { int stage_inc = 0; for (size_t i = step_id + 1; i < s->transform_steps.size(); ++i) { if (IsStageNumberChangingStep(s->transform_steps[i])) { if (s->transform_steps[i]->stage_id <= s->transform_steps[step_id]->stage_id + stage_inc) stage_inc++; } } return s->transform_steps[step_id]->stage_id + stage_inc; } /*! \brief Get all split steps for one stage. */ inline void GetSplitStepIds(const State& s, int stage_id, std::vector<int>* split_step_ids) { for (int i = static_cast<int>(s->transform_steps.size()) - 1; i >= 0; --i) { if (auto ps = s->transform_steps[i].as<SplitStepNode>()) { if (stage_id == ps->stage_id) { split_step_ids->push_back(i); } } if (IsStageNumberChangingStep(s->transform_steps[i])) { if (stage_id > s->transform_steps[i]->stage_id) { stage_id--; } } } } /*! \brief Fuse all reduction iterators. */ inline State FuseAllReductionIterators(const State& state, int stage_id, Iterator* fused_iter, Array<Iterator>* space_iters, Array<Iterator>* reduce_iters) { space_iters->clear(); reduce_iters->clear(); for (const auto& iter : state->stages[stage_id]->iters) { if (iter->iter_kind == IteratorKind::kSpatial) { space_iters->push_back(iter); } else if (iter->iter_kind == IteratorKind::kReduction) { reduce_iters->push_back(iter); } } ICHECK(!reduce_iters->empty()); State tmp_s = state; if (reduce_iters->size() > 1) { *fused_iter = tmp_s.fuse(stage_id, *reduce_iters); } else { *fused_iter = (*reduce_iters)[0]; } return tmp_s; } /*! \brief Fuse all outer level space iterators. */ inline State FuseAllOuterSpaceIterators(const State& state, int stage_id, Iterator* fused_iter) { std::vector<Iterator> to_fuse; for (size_t iter_id = 0; iter_id < state->stages[stage_id]->iters.size(); ++iter_id) { const auto& it = state->stages[stage_id]->iters[iter_id]; // Stop at reduce iterator or annotated iterator if (it->iter_kind == IteratorKind::kReduction || it->annotation != IteratorAnnotation::kNone) { break; } // Stop at compute_at attach point if (state->attach_map->iter_to_attached_stages.count(std::make_pair(stage_id, iter_id - 1))) { break; } to_fuse.push_back(it); } State tmp_s = state; if (to_fuse.size() == 1) { *fused_iter = to_fuse[0]; } else { *fused_iter = tmp_s.fuse(stage_id, to_fuse); } return tmp_s; } /*! \brief Random sample states. */ inline Array<State> RandomSampleStates(const Array<State>& in_states, std::mt19937* random_gen, size_t out_size) { Array<State> out_states; for (size_t i = 0; i < out_size; i++) { out_states.push_back(in_states[(*random_gen)() % in_states.size()]); } return out_states; } /*! \brief Compute prefix-sum probabiilty based on the given weights */ inline void ComputePrefixSumProb(const std::vector<float>& weights, std::vector<double>* prefix_sum_probs) { // Compute selection probabilities. float sum = 0.0; prefix_sum_probs->resize(weights.size()); for (size_t i = 0; i < weights.size(); ++i) { sum += std::max(weights[i], 0.0f); (*prefix_sum_probs)[i] = sum; } for (size_t i = 0; i < weights.size(); ++i) { (*prefix_sum_probs)[i] /= sum; } } /*! \brief Random choose an index according to a prefix sum probability. */ inline int RandomChoose(const std::vector<double>& prefix_sum_probs, std::mt19937* random_gen) { std::uniform_real_distribution<> dis(0.0, 1.0); double x = dis(*random_gen); ICHECK(!prefix_sum_probs.empty()); return std::lower_bound(prefix_sum_probs.begin(), prefix_sum_probs.end(), x) - prefix_sum_probs.begin(); } /*! \brief Print a title */ inline void PrintTitle(const std::string& title, int verbose) { StdCout(verbose) << Chars('-', 70) << "\n" << Chars('-', 30) << " [ " << title << " ]\n" << Chars('-', 70) << std::endl; } /*! * \brief Enumerate all possible factorization schemes for splitting an axes. * \note This class will memorize the results for reuse. */ class SplitFactorizationMemo { public: using QueryKey = std::tuple<int, int, int>; const Array<Array<Integer>>& GetFactorizationSchemes(int extent, int n_lengths, int max_innermost_factor); const std::vector<int>& GetFactors(int n); private: void DfsEnumerate(int now, int remaining_length, int max_innermost_factor); std::unordered_map<QueryKey, Array<Array<Integer>>> memory_; int n_lengths_; Array<Integer> tmp_stack_; Array<Array<Integer>>* results_; std::unordered_map<int, std::vector<int>> factor_memory_; }; /*! \brief Get the indexes of SplitStep that processes on spatial iterator. */ Array<Integer> GetSpatialSplitStepIds(const State& s, int stage_id); /*! \brief Get the possible compute locations for a stage. */ std::vector<std::pair<int, int>> GetComputeLocationCandidates(const SearchTask& task, const State& state, int stage_id); // Apply multi-level tiling structure according to a string format, // where "S" stands a space level, "R" stands for a reduction level. // For example, if the format is "SSRSRS", then we will // use tiling structure: space_L0, space_L1, reduce_L0, space_L2, reduce_L1, space_L3 // For example, if apply "SSRSRS" to matrix multiplication, // we have space iterators i and j, reduce iterator k. // Then the tiling structure is : i0, j0, i1, j1, k0, i2, j2, k1, i3, j3 State DoMultiLevelTiling(const State& state, int stage_id, const std::string& format, std::vector<int>* spatial_split_step_ids = nullptr); // Apply tiling structure: space, space, space, ..., with tile sizes from other SplitStep State FollowTiling(const State& state, int stage_id, const std::vector<int>& split_step_ids, int n_split); // Prune invalid states and return the results in-place. void PruneInvalidState(const SearchTask& task, Array<State>* states); } // namespace auto_scheduler } // namespace tvm #endif // TVM_AUTO_SCHEDULER_SEARCH_POLICY_UTILS_H_
https://github.com/zk-ml/tachikoma
src/auto_scheduler/utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file auto_scheduler/utils.h * \brief Common utilities. */ #ifndef TVM_AUTO_SCHEDULER_UTILS_H_ #define TVM_AUTO_SCHEDULER_UTILS_H_ #include <dmlc/common.h> #include <tvm/tir/expr.h> #include <algorithm> #include <deque> #include <exception> #include <future> #include <iomanip> #include <numeric> #include <random> #include <string> #include <thread> #include <tuple> #include <utility> #include <vector> namespace std { /*! \brief Hash function for std::pair */ template <typename T1, typename T2> struct hash<std::pair<T1, T2>> { std::size_t operator()(const std::pair<T1, T2>& k) const { return ::dmlc::HashCombine(std::hash<T1>()(k.first), std::hash<T2>()(k.second)); } }; /*! \brief Hash function for std::tuple */ template <typename T1, typename T2, typename T3> struct hash<std::tuple<T1, T2, T3>> { std::size_t operator()(const std::tuple<T1, T2, T3>& k) const { return ::dmlc::HashCombine( ::dmlc::HashCombine(std::hash<T1>()(std::get<0>(k)), std::hash<T2>()(std::get<1>(k))), std::hash<T3>()(std::get<2>(k))); } }; } // namespace std namespace tvm { namespace auto_scheduler { /********** Utilities for Array, std::vector, std::string **********/ /*! \brief Get the first appearance index of elements in an Array */ template <typename T> inline void GetIndices(const Array<T>& array, const Array<T>& to_locate, Array<Integer>* indices) { for (const auto& v : to_locate) { auto it = std::find(array.begin(), array.end(), v); if (it != array.end()) { indices->push_back(it - array.begin()); } else { LOG(FATAL) << "Cannot find the item"; } } } /*! \brief Get the first appearance index of an element in an Array */ template <typename T> inline int GetIndex(const Array<T>& array, const T& to_locate) { for (size_t i = 0; i < array.size(); ++i) { if (array[i] == to_locate) { return i; } } LOG(FATAL) << "Cannot find the item"; return -1; } /*! \brief Delete the item in a std::vector if it exists. */ template <typename T> inline void FindAndDeleteItem(std::vector<T>* array, const T& to_delete) { auto iter = std::find(array->begin(), array->end(), to_delete); if (iter != array->end()) { array->erase(iter); } } /*! \brief Compute the product of all elements in a vector */ inline int64_t ElementProduct(const std::vector<int>& array) { int64_t ret = 1; for (auto x : array) { ret *= x; } return ret; } /*! \brief Move elements from multiple vectors to one vector */ template <typename T> std::vector<T>& ConcatenateMove(std::vector<T>* out, std::vector<T>* in) { out->insert(out->end(), std::make_move_iterator(in->begin()), std::make_move_iterator(in->end())); return *out; } /*! \brief Move elements from multiple vectors to one vector */ template <typename T, typename... Args> std::vector<T>& ConcatenateMove(std::vector<T>* out, std::vector<T>* first, Args... args) { ConcatenateMove(out, first); ConcatenateMove(out, args...); return *out; } /*! \brief Get a random permutation of integers [0, n-1] */ template <typename G> void RandomPermutation(int n, std::vector<int>* out, G* gen) { out->assign(n, 0); std::iota(out->begin(), out->end(), 0); std::shuffle(out->begin(), out->end(), *gen); } /*! \brief Replace a sub-string to another sub-string in a string */ inline void StrReplace(std::string* base, const std::string& from, const std::string& to) { auto pos = base->find(from); while (pos != std::string::npos) { base->replace(pos, from.size(), to); pos = base->find(from, pos + to.size()); } } /*! \brief Return whether two int arrays are elementwise-equal */ inline bool IntArrayEqual(const Array<PrimExpr>& arr1, const Array<PrimExpr>& arr2) { if (arr1.size() != arr2.size()) { return false; } for (size_t i = 0; i < arr1.size(); ++i) { auto int1 = arr1[i].as<IntImmNode>(); auto int2 = arr2[i].as<IntImmNode>(); ICHECK(int1 != nullptr); ICHECK(int2 != nullptr); if (int1->value != int2->value) { return false; } } return true; } /********** Utilities for TVM Containers / ByteArray **********/ /*! \brief Compute mean of a FloatImm array */ inline double FloatArrayMean(const Array<PrimExpr>& float_array) { double sum = 0; if (float_array.empty()) { return 0.0; } for (const auto& x : float_array) { auto floatimm = x.as<tir::FloatImmNode>(); ICHECK(floatimm != nullptr); sum += floatimm->value; } return sum / float_array.size(); } /*! \brief Return whether a string starts with another substring */ inline bool StrStartsWith(const String& a, const String& b) { if (b.size() > a.size()) return false; return std::equal(a.c_str(), a.c_str() + b.size(), b.c_str()); } /*! \brief Return whether a string ends with another substring */ inline bool StrEndsWith(const String& a, const String& b) { if (b.size() > a.size()) return false; return std::equal(a.c_str() + a.size() - b.size(), a.c_str() + a.size(), b.c_str()); } /********** Other Utilities **********/ /*! \brief Get an int value from an Expr */ inline int64_t GetIntImm(const PrimExpr& expr) { auto pint = expr.as<IntImmNode>(); if (pint == nullptr) { return 1; } return pint->value; } /*! \brief Compute the product of the lengths of axes */ inline int64_t AxisLengthProd(const Array<tir::IterVar>& axes) { int64_t ret = 1.0; for (const auto& x : axes) { if (const IntImmNode* imm = x->dom->extent.as<IntImmNode>()) { ret *= imm->value; } else { return -1.0; } } return ret; } /*! * \brief Clean the name of an iterator or an op to make it valid in python code. * \param str The original name. * \param prefix The name prefix to differentiate the same name (e.g., the same iterator names). * \return The cleaned name. */ inline std::string CleanName(const std::string& str, const std::string& prefix = "") { std::string ret = str; StrReplace(&ret, ".", "_"); StrReplace(&ret, "@", "_"); StrReplace(&ret, "outer", "o"); StrReplace(&ret, "inner", "i"); if (prefix != "") { return prefix + "_" + ret; } return ret; } /*! \brief An empty output stream */ class NullStream : public std::ostream { public: NullStream() : std::ostream(nullptr) {} NullStream(const NullStream&) : std::ostream(nullptr) {} static NullStream& Global(); }; template <class T> NullStream& operator<<(NullStream& os, const T& value) { return os; } /*! \brief Get std cout with verbose control */ inline std::ostream& StdCout(int verbose, int setting = 1) { return verbose >= setting ? std::cout : NullStream::Global(); } /*! \brief Print multiple chars */ inline std::string Chars(const char& str, int times) { std::stringstream ret; for (int i = 0; i < times; ++i) { ret << str; } return ret.str(); } /*! \brief Print the time elapsed */ inline void PrintTimeElapsed(std::chrono::time_point<std::chrono::high_resolution_clock> t_begin, const std::string& info, int verbose) { double duration = std::chrono::duration_cast<std::chrono::duration<double>>( std::chrono::high_resolution_clock::now() - t_begin) .count(); StdCout(verbose) << "Time elapsed for " << info << ": " << std::fixed << std::setprecision(2) << duration << " s" << std::endl; } /*! * \brief Parse shape and axis names from layout string */ inline void ParseKernelLayout(const String& layout, Array<PrimExpr>* shape, std::vector<std::string>* axes) { int32_t factor = 0; std::string axis = ""; for (char c : std::string(layout)) { if (c >= 'A' && c <= 'z') { axis += c; if (factor != 0) { shape->push_back(factor); factor = 0; } } else if (c >= '0' && c <= '9') { factor = factor * 10 + c - '0'; if (!axis.empty()) { axes->push_back(axis); axis = ""; } } else { LOG(FATAL) << "Invalid layout " << layout; } } if (!axis.empty()) { axes->push_back(axis); } } /*! \brief Get the base name before '_' of an axis */ inline std::string AxisBaseName(const std::string& str) { return str.substr(0, str.rfind("_")); } } // namespace auto_scheduler } // namespace tvm #endif // TVM_AUTO_SCHEDULER_UTILS_H_
https://github.com/zk-ml/tachikoma
src/autotvm/feature_visitor.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file feature_visitor.h * \brief Base class for feature extractor. * These features are used for machine learning cost model */ #ifndef TVM_AUTOTVM_FEATURE_VISITOR_H_ #define TVM_AUTOTVM_FEATURE_VISITOR_H_ #include <tvm/tir/expr.h> #include <tvm/tir/stmt.h> #include <tvm/tir/stmt_functor.h> #include <string> namespace tvm { namespace autotvm { using namespace tvm::tir; /*! * \brief Type of for loop, used as one-hot encoding in features */ enum AnnotationType { kBlockX, kBlockY, kBlockZ, kThreadX, kThreadY, kThreadZ, kUnrolled, kVectorized, kParallel, kSerial, kVirtualThread, kNum, }; /*! * \brief A base class for feature extractor, used for processing * for loop and memory access in the IR */ class FeatureVisitor : public StmtExprVisitor { public: // for loop void VisitStmt_(const ForNode* op) final; void VisitStmt_(const AttrStmtNode* op) final; // memory access void VisitExpr_(const BufferLoadNode* op) final; void VisitStmt_(const BufferStoreNode* op) final; using StmtExprVisitor::VisitExpr_; using StmtExprVisitor::VisitStmt_; protected: /*! * \brief Enter a for loop node * \param var The expression to be printed. * \param length The output stream * \param ann_type The type for the for loop * \return skip Whether skip this node */ virtual bool EnterItervar_(tir::Var var, int64_t length, AnnotationType ann_type) = 0; /*! \brief Exit a for loop subtree */ virtual void ExitItervar_() = 0; /*! * \brief Enter a memory access node * \param buffer_var The buffer to access. * \param index Index expression */ virtual void EnterMem_(tir::Var buffer_var, tvm::PrimExpr index) = 0; /*! \brief Exit a memory access node */ virtual void ExitMem_() = 0; }; } // namespace autotvm } // namespace tvm #endif // TVM_AUTOTVM_FEATURE_VISITOR_H_
https://github.com/zk-ml/tachikoma
src/autotvm/touch_extractor.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file touch_extractor.h * \brief Extract feature of touch pattern of axes in lowered IR */ #ifndef TVM_AUTOTVM_TOUCH_EXTRACTOR_H_ #define TVM_AUTOTVM_TOUCH_EXTRACTOR_H_ #include <tvm/runtime/registry.h> #include <tvm/tir/expr.h> #include <tvm/tir/expr_functor.h> #include <deque> #include <map> #include <stack> #include <string> #include <unordered_map> #include <vector> #include "feature_visitor.h" namespace tvm { namespace autotvm { using TouchedBuffer = std::string; // touch pattern buf[(stride * var) % mod) + other] struct TouchPattern { int64_t stride{0}; int64_t mod{-1}; // -1 for +inf int64_t count{1}; int64_t reuse{1}; int64_t thread_count{0}; // count when move thread axis into innermost int64_t thread_reuse{0}; // reuse ratio move thread axis into innermost }; // all the feature of an iter var struct ItervarFeature { ItervarFeature(Var var, int64_t extent, int nest, AnnotationType ann_type, int64_t topdown, int counter) : length(extent), nest_level(nest), ann(ann_type), topdown_product(topdown), order(counter) {} ItervarFeature() {} // Axis Attributes int64_t length; int nest_level; AnnotationType ann; // one-hot axis type int64_t topdown_product; // accumulative product of axis length, in top-down order int64_t bottomup_product; // accumulative product of axis length, in bottom-up order // bottomup_product = reuse * count for any touched buffer int order; // used for soring axis // Arithmetic feature int add_ct{0}; int mul_ct{0}; int div_ct{0}; // Memory Touch Feature std::unordered_map<TouchedBuffer, TouchPattern> touch_feature; }; // extract iter vars and their touch pattern from ir class TouchExtractor : public FeatureVisitor { public: void Analyze(const Stmt& stmt) { operator()(stmt); } // arithmetic stats void VisitExpr_(const AddNode* op) final { if (op->dtype.is_float() || op->dtype.is_bfloat16()) { itervar_map[itervar_stack_.back()].add_ct++; } FeatureVisitor::VisitExpr_(op); } void VisitExpr_(const SubNode* op) final { if (op->dtype.is_float() || op->dtype.is_bfloat16()) { itervar_map[itervar_stack_.back()].add_ct++; } FeatureVisitor::VisitExpr_(op); } void VisitExpr_(const MulNode* op) final { if (op->dtype.is_float() || op->dtype.is_bfloat16()) { itervar_map[itervar_stack_.back()].mul_ct++; } FeatureVisitor::VisitExpr_(op); } void VisitExpr_(const DivNode* op) final { if (op->dtype.is_float() || op->dtype.is_bfloat16()) { itervar_map[itervar_stack_.back()].div_ct++; } FeatureVisitor::VisitExpr_(op); } void VisitExpr_(const ModNode* op) final { if (op->dtype.is_float() || op->dtype.is_bfloat16()) { itervar_map[itervar_stack_.back()].div_ct++; } FeatureVisitor::VisitExpr_(op); } std::unordered_map<Var, ItervarFeature, tvm::ObjectPtrHash, tvm::ObjectPtrEqual> itervar_map; private: bool EnterItervar_(Var var, int64_t length, AnnotationType ann_type); void ExitItervar_(); void EnterMem_(Var buffer_var, PrimExpr index); void ExitMem_(); int64_t topdown_product_{1}; std::map<std::string, size_t> buffer_counter_; size_t itervar_counter_{0}; std::deque<Var> itervar_stack_; // use deque instead of stack for indexing std::deque<size_t> skip_stack_size_; using FeatureVisitor::VisitExpr_; }; } // namespace autotvm } // namespace tvm #endif // TVM_AUTOTVM_TOUCH_EXTRACTOR_H_
https://github.com/zk-ml/tachikoma
src/contrib/ethosu/cascader/block_config.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/contrib/ethosu/cascader/block_config.h * \brief BlockConfig object for the NPU cascader */ #ifndef TVM_CONTRIB_ETHOSU_CASCADER_BLOCK_CONFIG_H_ #define TVM_CONTRIB_ETHOSU_CASCADER_BLOCK_CONFIG_H_ #include <tvm/node/reflection.h> #include <tvm/runtime/object.h> #include <functional> #include <vector> namespace tvm { namespace contrib { namespace ethosu { namespace cascader { class BlockConfig; /*! \brief Node to represent a BlockConfig */ class BlockConfigNode : public Object { public: void VisitAttrs(AttrVisitor* v); /*! * \brief Get the shape of input block. * \return The input shape of the block config. */ inline std::vector<int> GetInputBlockShape() const { return input_shape_; } /*! * \brief Get the shape of output block. * \return The output shape of the block config. */ inline std::vector<int> GetOutputBlockShape() const { return output_shape_; } /*! * \brief Get the number of cycles required to output this block * \return The output cycles */ inline int GetOutputCycles() const { return output_cycles_; } /*! * \brief Get the number of cycles required to compute this block * \return The compute cycles */ inline int GetComputeCycles() const { return compute_cycles_; } static constexpr const char* _type_key = "contrib.ethosu.cascader.BlockConfig"; TVM_DECLARE_FINAL_OBJECT_INFO(BlockConfigNode, Object); protected: friend class BlockConfig; /*! \brief The shape of the input block */ std::vector<int> input_shape_; /*! \brief The shape of the output block */ std::vector<int> output_shape_; /*! \brief Cycles required to compute this block */ int compute_cycles_; /*! \brief Cycles required to output this block */ int output_cycles_; }; /*! * \brief An object that contains a an output block shape as well as the output and compute cycles * required to compute this block */ class BlockConfig : public ObjectRef { public: BlockConfig(const std::vector<int>& input_shape, const std::vector<int>& output_shape, int compute_cycles, int output_cycles); TVM_DEFINE_OBJECT_REF_METHODS(BlockConfig, ObjectRef, BlockConfigNode); }; } // namespace cascader } // namespace ethosu } // namespace contrib } // namespace tvm #endif // TVM_CONTRIB_ETHOSU_CASCADER_BLOCK_CONFIG_H_
https://github.com/zk-ml/tachikoma
src/contrib/ethosu/cascader/cascader_options.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/contrib/ethosu/cascader/cascader_options.h * \brief Class to store configuration options for the NPU cascader */ #ifndef TVM_CONTRIB_ETHOSU_CASCADER_CASCADER_OPTIONS_H_ #define TVM_CONTRIB_ETHOSU_CASCADER_CASCADER_OPTIONS_H_ #include <tvm/node/reflection.h> #include <tvm/runtime/object.h> #include "tensor_config.h" namespace tvm { namespace contrib { namespace ethosu { namespace cascader { /*! \brief Node to represent CascaderOptions */ class CascaderOptionsNode : public Object { public: void VisitAttrs(AttrVisitor* v); /*! \brief The MemoryRegion to place cascading buffer into. */ MemoryRegion cascade_region; /*! \brief The maximum number of Proposals to generate. */ int max_proposals; /*! \brief How many striping factors to try per axis. */ int stripe_factors; /*! \brief The maximum number of Parts in a Plan. */ int max_plan_size; /*! \brief The maximum number of open Plans saved for a Part Group */ int max_open_plans; /*! \brief The maximum number of closed Plans saved for a Part Group */ int max_closed_plans; /*! \brief The maximum size of Tensor that will always be copied into the cascade region. */ int always_copy_size; /*! \brief Flag to disable pareto culling for plans to allow non pareto-optimal plans */ bool disable_pareto_plans; /*! \brief Flag to disable pareto culling for proposals to allow non pareto-optimal proposals */ bool disable_pareto_proposals; /*! \brief Whether to consider multi-dimensional striping */ bool enable_multi_dimensional_striping; /*! \brief Flag to disable culling for block configs to allow non-dominant blocks */ bool disable_block_culling; /*! \brief A boolean option to enable striping. */ bool enable_striping; static constexpr const char* _type_key = "contrib.ethosu.cascader.CascaderOptions"; TVM_DECLARE_FINAL_OBJECT_INFO(CascaderOptionsNode, Object) }; /*! \brief A class to hold configuration options for the cascader. */ class CascaderOptions : public ObjectRef { public: CascaderOptions(const MemoryRegion& cascade_region, int max_proposals, int stripe_factors, int max_plan_size, int max_open_plans, int max_closed_plans, int always_copy_size, bool disable_pareto_plans, bool disable_pareto_proposals, bool enable_multi_dimensional_striping, bool disable_block_culling, bool multi_dimensional_striping); TVM_DEFINE_OBJECT_REF_METHODS(CascaderOptions, ObjectRef, CascaderOptionsNode); }; } // namespace cascader } // namespace ethosu } // namespace contrib } // namespace tvm #endif // TVM_CONTRIB_ETHOSU_CASCADER_CASCADER_OPTIONS_H_
https://github.com/zk-ml/tachikoma
src/contrib/ethosu/cascader/common.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/contrib/ethosu/cascader/common.h * \brief Common functions used in the NPU cascader */ #ifndef TVM_CONTRIB_ETHOSU_CASCADER_COMMON_H_ #define TVM_CONTRIB_ETHOSU_CASCADER_COMMON_H_ #include <tvm/ir/expr.h> #include <tvm/runtime/container/array.h> #include <functional> #include <numeric> #include <vector> namespace tvm { namespace contrib { namespace ethosu { namespace cascader { /*! * \brief Make a tvm::Array<Integer> from an int vector. * \param vec The int vector. * \return The Integer Array. * \note Array<Integer>(std::vector<int>) doesn't work as this implicit * type conversion fails. This is why this helper is required. */ inline Array<Integer> make_array(const std::vector<int>& vec) { Array<Integer> arr; arr.resize(vec.size()); for (unsigned int i = 0; i < vec.size(); ++i) { arr.Set(i, Integer(vec[i])); } return arr; } /*! * \brief Make a tvm::Array<Integer> from a size_t vector. * \param vec The size_t vector. * \return The Integer Array. * \note Array<Integer>(std::vector<size_t>) doesn't work as this implicit * type conversion fails. This is why this helper is required. */ inline Array<Integer> make_array(const std::vector<size_t>& vec) { Array<Integer> arr; arr.resize(vec.size()); for (unsigned int i = 0; i < vec.size(); ++i) { arr.Set(i, Integer(vec[i])); } return arr; } /*! * \brief Make a tvm::Array<IntImm> from an int64_t vector. * \param vec The int64_t vector. * \return The IntImm Array. * \note Array<IntImm>(std::vector<int64_t>) doesn't work as this implicit * type conversion fails. This is why this helper is required. */ inline Array<IntImm> make_array(const std::vector<int64_t>& vec) { Array<IntImm> arr; arr.resize(vec.size()); for (unsigned int i = 0; i < vec.size(); ++i) { arr.Set(i, IntImm(DataType::Int(64), vec[i])); } return arr; } /*! * \brief Make a tvm::Array<FloatImm> from an float vector. * \param vec The float vector. * \return The FloatImm Array. */ inline Array<FloatImm> make_array(const std::vector<float>& vec) { Array<FloatImm> arr; arr.resize(vec.size()); for (unsigned int i = 0; i < vec.size(); ++i) { arr.Set(i, FloatImm(DataType::Float(32), static_cast<double>(vec[i]))); } return arr; } /*! * \brief Calculate the ceil of an Integer division * \param dividend The dividend of the division * \param divisor The divisor of the division * \return The quotient */ inline int round_up_divide(int dividend, int divisor) { return dividend / divisor + (dividend % divisor != 0); } /*! * \brief Make a vector from a tvm::Array. * \param arr The Array. * \return The vector. */ template <typename T, typename tvm_T> inline std::vector<T> make_vector(const Array<tvm_T>& arr) { std::vector<T> vec(arr.size()); for (unsigned int i = 0; i < arr.size(); ++i) { vec[i] = arr[i]->value; } return vec; } /*! * \brief Create a combined hash. * \param seed The current hash value. * \param v The value to combine into the hash. * \return The combined hash. */ template <class T> inline void hash_combine(std::size_t* seed, T const& v) { *seed ^= std::hash<T>()(v) + 0x9e3779b9 + (*seed << 6) + (*seed >> 2); } /*! * \brief Hash a vector. * \param vec The vector to hash. * \return The hash. */ template <class T> inline std::size_t hash_vector(const std::vector<T>& vec) { std::size_t seed = vec.size(); for (const auto& elem : vec) { hash_combine(&seed, elem); } return seed; } template <class T> inline T mul_reduce(const std::vector<T>& vec) { return std::accumulate(vec.begin(), vec.end(), 1, std::multiplies<T>()); } } // namespace cascader } // namespace ethosu } // namespace contrib } // namespace tvm #endif // TVM_CONTRIB_ETHOSU_CASCADER_COMMON_H_
https://github.com/zk-ml/tachikoma
src/contrib/ethosu/cascader/graph.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/contrib/ethosu/cascader/graph.h * \brief Graph objects (Tensor and Part) for the Ethos-U cascader */ #ifndef TVM_CONTRIB_ETHOSU_CASCADER_GRAPH_H_ #define TVM_CONTRIB_ETHOSU_CASCADER_GRAPH_H_ #include <tvm/runtime/data_type.h> #include <tvm/runtime/object.h> #include <tvm/te/operation.h> #include <tvm/te/tensor.h> #include <unordered_map> #include <utility> #include <vector> #include "block_config.h" #include "propagator.h" namespace tvm { namespace contrib { namespace ethosu { namespace cascader { class Tensor; class Part; class StripeConfig; /*! * \brief The buffering mode to use when realizing a tensor. * RECOMPUTE - The 'default' behaviour of TVM. Overlapping stripes will be recomputed. * ROLLING - Apply both the sliding window and storage folding optimizations to the tensor * realization. */ enum BufferMode { RECOMPUTE, ROLLING }; /*! \brief A struct to hold a Tensor Expression subgraph */ struct TESubgraph { /*! \brief The input te::Tensors to the subgraph */ std::vector<te::Tensor> input_tensors; /*! \brief The output te::Tensor of the subgraph */ te::Tensor output_tensor; }; /*! \brief Node to hold performance information for a Part */ class PerformanceInfoNode : public Object { public: void VisitAttrs(AttrVisitor* v); /*! \brief The cycles to compute a block */ int64_t compute_cycles; /*! \brief The number of bytes read per input tensor */ std::vector<int64_t> read_bytes; /*! \brief The number of bytes written to the output tensor */ int64_t write_bytes; /*! \brief The block config used for this performance point */ BlockConfig block_config; static constexpr const char* _type_key = "contrib.ethosu.cascader.PerformanceInfo"; TVM_DECLARE_FINAL_OBJECT_INFO(PerformanceInfoNode, Object); }; /*! * \brief A class to hold the performance information for a Part. * \note The performance information for a Part is composed of 3 factors: the compute cycles, * the number of bytes read from each input tensor and the number of bytes written to the output * tensor. Bytes read/written is reported in favour of read/write bandwidth cycles so the * calculation of the performance information can be re-used with different memory homing. */ class PerformanceInfo : public ObjectRef { public: PerformanceInfo(int64_t compute_cycles, std::vector<int64_t> read_bytes, int64_t write_bytes, BlockConfig block_config) { auto n = make_object<PerformanceInfoNode>(); n->compute_cycles = compute_cycles; n->read_bytes = std::move(read_bytes); n->write_bytes = write_bytes; n->block_config = block_config; data_ = std::move(n); } TVM_DEFINE_OBJECT_REF_METHODS(PerformanceInfo, ObjectRef, PerformanceInfoNode); }; /*! \brief Node to represent a Tensor */ class TensorNode : public Object { public: void VisitAttrs(AttrVisitor* v); /*! \return The shape of the tensor */ std::vector<int> GetShape() const { return shape_; } /*! \return The data type of the tensor */ DataType GetDataType() const { return dtype_; } /*! \return Whether the tensor stores a constant value */ bool IsConstant() const { return is_constant_; } /*! \return The compression ratio of the tensor */ float GetCompressionRatio() const { return compression_ratio_; } /*! \return The producers of the tensor */ const std::vector<Part> GetProducers() const { return producers_; } /*! \return The consumers of the tensor */ const std::vector<Part> GetConsumers() const { return consumers_; } /*! \return The size of the tensor in bytes */ int GetSize() const { return size_ * compression_ratio_; } /*! \brief Add a producer of the tensor */ inline void AddProducer(const Part& part) { producers_.push_back(part); } /*! \brief Add a consumer of the tensor */ inline void AddConsumer(const Part& part) { consumers_.push_back(part); } static constexpr const char* _type_key = "contrib.ethosu.cascader.Tensor"; TVM_DECLARE_FINAL_OBJECT_INFO(TensorNode, Object); protected: friend class Tensor; /*! \brief The shape of the tensor */ std::vector<int> shape_; /*! \brief The data type of the tensor */ DataType dtype_; /*! \brief Whether the tensor stores a constant value */ bool is_constant_; /*! \brief The compression ratio of the tensor */ float compression_ratio_; /*! \brief The producers of the tensor */ std::vector<Part> producers_; /*! \brief The consumers of the tensor */ std::vector<Part> consumers_; /*! \brief The size of the tensor in bytes */ int size_; }; /*! * \brief A class to describe a Tensor in a Cascader graph. * \note Cascader graphs consist of two object types: Tensors and Parts. This class * defines the Tensors which represent the tensors that are consumed and produced * as part of the graph. They are augmented with information about their 'kind' * (input/output/constant/intermediate), their default memory home (which memory they * are expected to be allocated in) and a compression ratio where applicable (weights * for instance are compressed). */ class Tensor : public ObjectRef { public: Tensor(const std::vector<int>& shape, DataType dtype, bool is_constant, float compression_ratio); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(Tensor, ObjectRef, TensorNode); }; /*! \brief Node to represent a Part */ class PartNode : public Object { public: virtual void VisitAttrs(AttrVisitor* v); /*! \return The TE subgraph represented by the Part */ const TESubgraph GetSubgraph() const { return subgraph_; } /*! \return The output->input propagators */ const std::vector<Propagator> GetPropagators() const { return propagators_; } /*! \return Whether the Part is inline */ bool IsInline() const { return in_line_; } /*! \return The input tensors */ const std::vector<Tensor> GetInputTensors() const { return input_tensors_; } /*! \return The output tensor */ const Tensor GetOutputTensor() const { return output_tensor_; } /*! \brief Add a producer of the tensor */ void SetInput(uint64_t input_index, const Tensor& input_tensor); /*! \brief Add a consumer of the tensor */ void SetOutput(const Tensor& output_tensor) { output_tensor_ = output_tensor; } /*! * \brief Calculate the input stripe configs for a given output stripe config using the * Propagators. \param output_stripe_config The output stripe config to propagate. \return The * calculated input stripe configs. */ std::vector<StripeConfig> CalculateInputStripeConfigs(const StripeConfig& output_stripe_config); /*! * \brief Get the preferred alignment in each axis for a stripe of the Part. * \note This is used to bias the selection of StripeConfigs towards those that are integer * multiples of a tensor intrinsic used to compute the Part. */ virtual const std::vector<int> GetStripeAlignHint() const; /*! * \brief Get the performance information for a given output stripe config. * \param output_stripe_config The output stripe config to compute the performance for. * \param is_rolling Whether the output config should be computed as a rolling buffer. * \return The performance information containing the compute cycles and read/write bytes. */ virtual const PerformanceInfo GetPerformanceInfo(const StripeConfig& output_stripe_config, BufferMode buffer_mode) = 0; static constexpr const char* _type_key = "contrib.ethosu.cascader.Part"; TVM_DECLARE_BASE_OBJECT_INFO(PartNode, Object); protected: friend class Part; /*! \brief The Tensor Expression subgraph represented by the Part */ TESubgraph subgraph_; /*! \brief The output->input propagators */ std::vector<Propagator> propagators_; /*! \brief Whether the Part is computed in-line */ bool in_line_; /*! \brief The input tensors */ std::vector<Tensor> input_tensors_; /*! \brief The output tensor */ Tensor output_tensor_; }; /*! * \brief A class to describe a Part in a Cascader graph. * \note Cascader graphs consist of two object types: Tensors and Parts. This class * defines the Parts which represent the operations which produce and consume Tensors. * * A Part can represent one or more Tensor Expression compute operations but the subgraph * it represents must have only a single output. Multiple TE compute operations should be * represented under a single Part if the intermediate tensors between them won't be * realized. This is a common pattern in Ethos-U where a sequence of TE compute operations * are used to represent a single hardware primitive operation. * * Parts contain a Propagator per input which describes how a given output stripe config * should be transformed into an input stripe config for each input. This is essential * to analyse both the performance of Parts (determining the data that will be read) and * in cascading Parts together (determining compatible stripe config choices). * * A Part can be marked as 'in_line', in which case it is assumed that it doesn't need to * allocate space for its output tensor. * * This is only a base class and concrete Parts must be derived from it, implementing a * function to model the performance of the Part as well as to determine its compute * quantum. */ class Part : public ObjectRef { public: TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(Part, ObjectRef, PartNode); }; /*! \brief Node to represent a CascaderGraph */ class CascaderGraphNode : public Object { public: CascaderGraphNode() {} CascaderGraphNode(std::vector<Tensor> input_tensors, std::vector<Tensor> output_tensors); void VisitAttrs(AttrVisitor* v); /*! \return The input Tensors of the CascaderGraph */ std::vector<Tensor> GetInputTensors() const { return input_tensors_; } /*! \return The output Tensors of the CascaderGraph */ std::vector<Tensor> GetOutputTensors() const { return output_tensors_; } /*! \return The order of the Parts in the CascaderGraph */ std::vector<Part> GetPartOrder() const { return part_order_; } /*! * \brief Get the ID of a Part in the CascaderGraph. * \param part The Part to get the ID of. * \return The ID of the Part in the CascaderGraph. * \note Each Part is given a unique ID within the CascaderGraph. */ int GetPartID(const Part& part) const; /*! * \brief Get the ID of a Tensor in the CascaderGraph. * \param tensor The Tensor to get the ID of. * \return The ID of the Tensor in the CascaderGraph. * \note Each Tensor is given a unique ID within the CascaderGraph. */ int GetTensorID(const Tensor& tensor) const; static constexpr const char* _type_key = "contrib.ethosu.cascader.CascaderGraph"; TVM_DECLARE_FINAL_OBJECT_INFO(CascaderGraphNode, Object); protected: /*! * \brief Initialize the CascaderGraph by defining a topological ordering. * \note This will traverse the Parts and Tensors using a depth-first * visiting pattern and use the traversal order to initialize both the * 'order' vectors and the ID maps. The order vectors define the ordering * that the cascader expects the CascaderGraph to be executed in, but reversed. * The ID maps assign a unique integer ID to each Part and Tensor corresponding * to their position in their respective order vector. */ void Init_(); /*! \brief The input Tensors of the CascaderGraph */ std::vector<Tensor> input_tensors_; /*! \brief The output Tensors of the CascaderGraph */ std::vector<Tensor> output_tensors_; /*! \brief The order of the Tensors in the CascaderGraph */ std::vector<Tensor> tensor_order_; /*! \brief The order of the Parts in the CascaderGraph */ std::vector<Part> part_order_; /*! \brief A map between Parts in the CascaderGraph and their IDs */ std::unordered_map<Part, int, ObjectPtrHash, ObjectPtrEqual> part_id_map_; /*! \brief A map between Tensors in the CascaderGraph and their IDs */ std::unordered_map<Tensor, int, ObjectPtrHash, ObjectPtrEqual> tensor_id_map_; }; /*! * \brief A class to describe a graph of Parts and Tensors used by the cascader. * \note This class describes a graph consisting of two object types: Tensors and Parts. * It defines a topological ordering on the graph such that each Part and Tensor has a * position in the ordering. This ordering is used by the Plan and Proposal generation * algorithms. It is also the ordering the Parts are expected to be executed in. * * In addition to defining an ordering, the Parts and Tensors are also all given unique * IDs which they can be referred to by. */ class CascaderGraph : public ObjectRef { public: CascaderGraph(std::vector<Tensor> input_tensors, std::vector<Tensor> output_tensors); TVM_DEFINE_OBJECT_REF_METHODS(CascaderGraph, ObjectRef, CascaderGraphNode); }; } // namespace cascader } // namespace ethosu } // namespace contrib } // namespace tvm #endif // TVM_CONTRIB_ETHOSU_CASCADER_GRAPH_H_
https://github.com/zk-ml/tachikoma
src/contrib/ethosu/cascader/pareto.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/contrib/ethosu/cascader/pareto.h * \brief Pareto optimisation functions for the NPU cascader */ #ifndef TVM_CONTRIB_ETHOSU_CASCADER_PARETO_H_ #define TVM_CONTRIB_ETHOSU_CASCADER_PARETO_H_ #include <tvm/ir/expr.h> #include <tvm/runtime/container/array.h> #include <algorithm> #include <array> #include <vector> namespace tvm { namespace contrib { namespace ethosu { namespace cascader { class Plan; class MemoryRegion; class Proposal; /*! * \brief Determine the Pareto optimal points. * \param costs The points as a vector of N-dimensional costs. * \return A vector that is true where a point is Pareto optimal and false otherwise. */ template <int N> std::vector<bool> GetParetoFrontier(const std::vector<std::array<float, N>>& costs); /*! * \brief Evenly sample items from a vector to reduce its size. * \param vec The vector to thin. * \param max_size The maximum size of the thinned vector. * \return The thinned vector. */ template <class T> std::vector<T> ThinVector(const std::vector<T>& vec, size_t max_size); /*! * \brief Cull plans which are not Pareto optimal then thin them down. * \param plans The plans to apply the Pareto culling to. * \param max_plans The maximum number of plans after the culling. * \param disable_pareto_metric Whether to only select from Pareto frontier or not. * \return The culled plans. * \note Plan Pareto-optimality is determined based upon a Plan's memory_usage * and cycles. */ std::vector<Plan> ParetoCullPlans(std::vector<Plan> plans, size_t max_plans, bool disable_pareto_metric); std::vector<Proposal> ParetoCullProposals(std::vector<Proposal> proposals, size_t max_proposals, bool disable_pareto_metric); } // namespace cascader } // namespace ethosu } // namespace contrib } // namespace tvm #endif // TVM_CONTRIB_ETHOSU_CASCADER_PARETO_H_
https://github.com/zk-ml/tachikoma
src/contrib/ethosu/cascader/parts/ethosu.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/contrib/ethosu/cascader/parts/ethosu.h * \brief Arm(R) Ethos(TM)-U NPU Part object */ #ifndef TVM_CONTRIB_ETHOSU_CASCADER_PARTS_ETHOSU_H_ #define TVM_CONTRIB_ETHOSU_CASCADER_PARTS_ETHOSU_H_ #include <tvm/runtime/object.h> #include <vector> #include "../block_config.h" #include "../graph.h" namespace tvm { namespace contrib { namespace ethosu { namespace cascader { /*! \brief Node to represent an EthosuPart */ class EthosuPartNode : public PartNode { public: /*! * \brief Get the optimal BlockConfig to use given a StripeConfig * \param output_stripe_config The output StripeConfig. */ const BlockConfig GetBlockConfig(const StripeConfig& output_stripe_config); /*! * \brief Get the preferred alignment in each axis for a stripe of the Part. * \note This is used to bias the selection of StripeConfigs towards those that are integer * multiples of a tensor intrinsic used to compute the Part. */ const std::vector<int> GetStripeAlignHint() const final { return output_quantum_; } /*! * \brief Get the performance information for a given output stripe config. * \param output_stripe_config The output stripe config to compute the performance for. * \param buffer_mode The mode of buffering, rolling or recompute. * \return The performance information containing the compute cycles and read/write bytes. */ const PerformanceInfo GetPerformanceInfo(const StripeConfig& output_stripe_config, BufferMode buffer_mode) final; static constexpr const char* _type_key = "contrib.ethosu.cascader.EthosuPart"; TVM_DECLARE_FINAL_OBJECT_INFO(EthosuPartNode, PartNode); protected: friend class EthosuPart; /*! * \brief Get the size of input required (per input tensor) to compute a stripe given a * block_shape * \param block_shape The shape of the block(s) the stripe is split into * \param stripe_shape The shape of the full stripe to compute. * \return The bytes required per input tensor. */ const std::vector<int64_t> GetBytesRead(const std::vector<int>& block_shape, const std::vector<int>& full_shape); /*! * \brief Get cost heuristic of using a given block config with the associated stripe config * \param block_config The block config that is being checked for the cost * \param output_stripe_config The striping configuration associated with the operator * \return A cost heuristic representative of the choice */ float CalculateCost(const BlockConfig& block_config, const StripeConfig& output_stripe_config); /*! \brief List of block configs that are valid for this part */ std::vector<BlockConfig> valid_block_configs_; /*! \brief The output volume that is atomically computed */ std::vector<int> output_quantum_; /*! \brief Index for output height dimension */ int height_idx_; /*! \brief Index for output width dimension */ int width_idx_; /*! \brief Index of weight tensor, -1 if the Part has no weights */ int weight_tensor_idx_; /*! \brief Number of sub-kernels the kernel has been split into */ int subkernels_; }; /*! * \brief A class to describe a Part to be executed on an Arm(R) Ethos(TM)-U NPU. * \note EthosuParts must be provided with an output quantum and the cycles taken to * compute an output quantum which depend on the operator the NPU is computing. */ class EthosuPart : public Part { public: EthosuPart(const TESubgraph& subgraph, const std::vector<Propagator> propagators, const std::vector<int>& output_quantum, int subkernels, const std::vector<BlockConfig>& valid_block_configs, int weight_tensor_idx); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(EthosuPart, Part, EthosuPartNode); }; } // namespace cascader } // namespace ethosu } // namespace contrib } // namespace tvm #endif // TVM_CONTRIB_ETHOSU_CASCADER_PARTS_ETHOSU_H_
https://github.com/zk-ml/tachikoma
src/contrib/ethosu/cascader/parts/inline.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/contrib/ethosu/cascader/parts/inline.h * \brief Inline Part object */ #ifndef TVM_CONTRIB_ETHOSU_CASCADER_PARTS_INLINE_H_ #define TVM_CONTRIB_ETHOSU_CASCADER_PARTS_INLINE_H_ #include <tvm/runtime/object.h> #include <vector> #include "../graph.h" namespace tvm { namespace contrib { namespace ethosu { namespace cascader { /*! \brief Node to represent an inlined Part */ class InlinePartNode : public PartNode { public: /*! * \brief Get the performance information for a given output stripe config. * \param output_stripe_config The output stripe config to compute the performance for. * \param is_rolling Whether the output config should be computed as a rolling buffer. * \return The performance information containing the compute cycles and read/write bytes. */ const PerformanceInfo GetPerformanceInfo(const StripeConfig& output_stripe_config, BufferMode buffer_mode) final; static constexpr const char* _type_key = "contrib.ethosu.cascader.InlinePart"; TVM_DECLARE_FINAL_OBJECT_INFO(InlinePartNode, PartNode); protected: friend class InlinePart; }; /*! * \brief A class to describe a inlined Part in a Cascader graph. * \note Inlined Parts have a few special properties. First by IsInline being true, * the Cascader will not allocate any space for the outputs of the Part. This is because * they will be directly consumed as they are produced by the following Part. Second, they * are assumed to be 'free' and require no cycles to execute. Lastly, as they are 'free' * the compute quantum is arbitrary, but by convention it is a single tensor element. * * Examples of inline Parts include strided_slice, reshape and concatenate - all of which * get absorbed into the DMA functionality of Ethos-U compute primitives. */ class InlinePart : public Part { public: InlinePart(const TESubgraph& subgraph, const std::vector<Propagator> propagators); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(InlinePart, Part, InlinePartNode); }; } // namespace cascader } // namespace ethosu } // namespace contrib } // namespace tvm #endif // TVM_CONTRIB_ETHOSU_CASCADER_PARTS_INLINE_H_
https://github.com/zk-ml/tachikoma
src/contrib/ethosu/cascader/plan.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/contrib/ethosu/cascader/plan.h * \brief Plan object for the NPU cascader */ #ifndef TVM_CONTRIB_ETHOSU_CASCADER_PLAN_H_ #define TVM_CONTRIB_ETHOSU_CASCADER_PLAN_H_ #include <tvm/node/reflection.h> #include <tvm/runtime/object.h> #include <functional> #include <unordered_map> #include <unordered_set> #include <vector> #include "graph.h" #include "tensor_config.h" namespace tvm { namespace contrib { namespace ethosu { namespace cascader { /*! \brief Node to represent a Plan */ class PlanNode : public Object { public: void VisitAttrs(AttrVisitor* v); /*! \return The TensorConfigs specified by the Plan */ const std::vector<TensorConfig>& GetTensorConfigs() const { return tensor_configs_; } /*! \return The TensorConfigs which are 'open' meaning they are a Plan input/output but have * INTERIOR state */ const std::vector<TensorConfig>& GetOpenConfigs() const { return open_configs_; } /*! \return The TensorConfig of the Plan's output tensor */ const TensorConfig GetOutputConfig() const { return output_config_; } /*! \return The Parts which are covered by the Plan */ const std::vector<Part>& GetPartGroup() const { return part_group_; } /*! \return The memory region in which to store interior Plan buffers */ MemoryRegion const GetInteriorRegion() const { return interior_region_; } /*! * \return The interior memory used by the Plan in bytes. * \note The interior memory usage is defined as being the memory required in the interior region * to execute the Plan excluding input and output buffers. */ int GetMemoryUsage() const { return memory_usage_; } /*! \return The cycles taken to execute the Plan */ int GetCycles() const { return cycles_; } /*! \return Whether the Plan is 'closed' meaning it has no 'open' TensorConfigs */ bool IsClosed() const { return open_configs_.size() == 0; } static constexpr const char* _type_key = "contrib.ethosu.cascader.Plan"; TVM_DECLARE_FINAL_OBJECT_INFO(PlanNode, Object); protected: friend class Plan; /*! \brief The TensorConfigs specified by the Plan */ std::vector<TensorConfig> tensor_configs_; /*! \brief The TensorConfigs which are 'open' meaning they are a Plan input/output but have * INTERIOR state */ std::vector<TensorConfig> open_configs_; /*! \brief The TensorConfig of the Plan's output tensor */ TensorConfig output_config_; /*! \brief The Parts which are covered by the Plan */ std::vector<Part> part_group_; /*! \brief The memory region in which to store interior Plan buffers */ MemoryRegion interior_region_; /*! \brief The interior memory used by the Plan in bytes */ int memory_usage_; /*! \brief The cycles taken to execute the Plan */ int cycles_; }; /*! * \brief A class which describes how to schedule a subgraph of Parts together. * \note A Plan takes the form of a subgraph of connected Parts (recorded in part_group) with * TensorConfigs for all of the required Tensors (recorded in tensor_configs). This information can * be used to produce a Tensor Expression schedule with inter-operator scheduling. A Plan is * necessarily single-output such that all non-output Parts are 'computed_at'ed the scope of the * output Part. This is what achieves the technique referred to as 'cascading'. A Plan also has an * interior memory region which specifies the region of memory into which all the Plans intermediate * buffers should be allocated. * * Additionally, a Plan contains some other information used during the Plan generation and * selection algorithms. Both the memory and cycles required to run the Plan are accounted for so * that Plans can be ranked and Pareto-culled on these metrics. Furthermore, the TensorConfigs which * are 'open' is recorded indicating that these are valid points to merge with another Plan. A Plan * can only be turned into a schedule if it has no 'open' TensorConfigs - at which point the Plan is * said to be 'closed'. */ class Plan : public ObjectRef { public: Plan(const std::vector<TensorConfig>& tensor_configs, const std::vector<TensorConfig>& open_configs, const TensorConfig& output_config, const std::vector<Part>& part_group, const MemoryRegion& interior_region, int memory_usage, int cycles); /*! * \brief Merge two Plans which share an 'open' TensorConfig. * \param other The Plan to merge with. * \return The merged Plan. * \note The current Plan is referred to as the 'upper Plan' and the other Plan as the 'lower * Plan'. The 'open' output config of the upper Plan must be an 'open' input config of the lower * Plan. The Tensor referenced by these configs is the Tensor on which the two Plans will be * merged. The merge process does the following: * * The tensor config maps will be merged with TensorConfigs from the upper Plan taking priority. * The open configs will be merged with the TensorConfigs that are being merged having been * removed. The output config will be that of the lower Plan. The part groups will be merged. The * interior region is necessarily the same for both the upper and lower Plan. The cycles and * memory usage will be summed. */ Plan Merge(const Plan& other) const; TVM_DEFINE_OBJECT_REF_METHODS(Plan, ObjectRef, PlanNode); }; } // namespace cascader } // namespace ethosu } // namespace contrib } // namespace tvm // Hash functions TensorConfig and Part sets namespace std { using TensorConfigSet = std::vector<::tvm::contrib::ethosu::cascader::TensorConfig>; using PartSet = std::vector<::tvm::contrib::ethosu::cascader::Part>; template <> struct hash<TensorConfigSet> { std::size_t operator()(const TensorConfigSet& tensor_config_set) const { size_t seed = 0; for (const auto& tensor_config : tensor_config_set) { seed ^= hash<::tvm::contrib::ethosu::cascader::TensorConfig>()(tensor_config); } return seed; } }; template <> struct equal_to<TensorConfigSet> { bool operator()(const TensorConfigSet& lhs, const TensorConfigSet& rhs) const { std::unordered_set<::tvm::contrib::ethosu::cascader::TensorConfig> lh_set(lhs.begin(), lhs.end()); std::unordered_set<::tvm::contrib::ethosu::cascader::TensorConfig> rh_set(rhs.begin(), rhs.end()); return lh_set == rh_set; } }; template <> struct hash<PartSet> { std::size_t operator()(const PartSet& part_set) const { size_t seed = 0; for (const auto& part : part_set) { seed ^= tvm::runtime::ObjectHash()(part); } return seed; } }; template <> struct equal_to<PartSet> { bool operator()(const PartSet& lhs, const PartSet& rhs) const { return lhs == rhs; } }; } // namespace std #endif // TVM_CONTRIB_ETHOSU_CASCADER_PLAN_H_
https://github.com/zk-ml/tachikoma
src/contrib/ethosu/cascader/plan_generator.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/contrib/ethosu/cascader/plan_generator.h * \brief Algorithm to generate possible Plans in the NPU cascader */ #ifndef TVM_CONTRIB_ETHOSU_CASCADER_PLAN_GENERATOR_H_ #define TVM_CONTRIB_ETHOSU_CASCADER_PLAN_GENERATOR_H_ #include <tvm/node/reflection.h> #include <tvm/runtime/object.h> #include <unordered_map> #include <unordered_set> #include <vector> namespace tvm { namespace contrib { namespace ethosu { namespace cascader { class CascaderGraph; class MemoryRegion; class Part; class Tensor; class StripeConfig; class Plan; class CascaderOptions; using HomeMap = std::unordered_map<Tensor, std::vector<MemoryRegion>, ObjectPtrHash, ObjectPtrEqual>; /*! * \brief Generate possible output StripeConfigs that could be applied to a Part's output. * \param part The Part to generate StripeConfigs for. * \param stripe_factors How many striping factors to try per axis. * \param enable_striping Whether striping is enabled * \param multi_dimensional Whether to stripe in more than one dimension. * \return The generated StripeConfigs for the Part's output. */ std::vector<StripeConfig> GenerateOutputStripeConfigs(const Part& part, int stripe_factors, bool enable_striping, bool multi_dimensional); /*! * \brief Generate single-Part Plans for a Part for a given list of output StripeConfigs. * \param part The Part to generate Plans for. * \param output_stripe_configs The output StripeConfigs to generate Plans with. * \param home_map The Tensor homing map defining valid memory homes for Tensors. * \param options The configuration options with which to run the generator. * \return The generated Plans covering the Part. * \note For each of the output StripeConfigs provided, this algorithm will produce a number * of Plans corresponding to different choices of Tensor homing/copying, buffer modes * and INTERIOR/BOUNDARY states. For each of these variants, the Part's performance will * be queried and the memory usage will be calculated. */ std::vector<Plan> GenerateSinglePlans(const Part& part, const std::vector<StripeConfig>& output_stripe_configs, const HomeMap& home_map, const CascaderOptions& options); /*! * \brief Generate pareto optimal Plans for a Graph. * \param graph The Graph to generate Plans for. * \param home_map The Tensor homing map defining valid memory homes for Tensors. * \param options The configuration options with which to run the generator. * \return A map between Part groups and a list of pareto optimal Plans which cover that group. * \note This algorithm does the following: * * Iterate Part-by-Part in a reversed topological ordering (starting at the output Parts and * working towards the input Parts). * * For each Part: * 1. Determine the possible StripeConfigs we might want to use to stripe the Part using * GenerateOutputStripeConfigs. * 2. Additionally, collect all the StripeConfigs of open Plans that could connect to this * Part (i.e. the Plan has an open TensorConfig for the Part's output Tensor). * 3. Use these two lists of StripeConfigs to produce single Part Plans with GenerateSinglePlans. * 4. For the generated Plans that have an open output TensorConfig, try and merge these into * existing Plans which share an open input TensorConfig. * 5. All Plans are then indexed by both the Part group they cover and their open TensorConfigs. * 6. Plans which cover the same Part group and share the same open TensorConfigs are culled * using ParetoCullPlans. * * Once every Part has been visited, return the Plans with no open TensorConfigs indexed by Part * group. */ std::unordered_map<std::vector<Part>, std::vector<Plan>> GenerateGraphPlans( const CascaderGraph& graph, const HomeMap& home_map, const CascaderOptions& options); } // namespace cascader } // namespace ethosu } // namespace contrib } // namespace tvm #endif // TVM_CONTRIB_ETHOSU_CASCADER_PLAN_GENERATOR_H_
https://github.com/zk-ml/tachikoma
src/contrib/ethosu/cascader/propagator.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/contrib/ethosu/cascader/propagator.h * \brief Propagator class for the NPU cascader */ #ifndef TVM_CONTRIB_ETHOSU_CASCADER_PROPAGATOR_H_ #define TVM_CONTRIB_ETHOSU_CASCADER_PROPAGATOR_H_ #include <tvm/node/reflection.h> #include <tvm/runtime/object.h> #include <vector> namespace tvm { namespace contrib { namespace ethosu { namespace cascader { class Propagator; class StripeConfig; /*! \brief Node to represent a Propagator */ class PropagatorNode : public Object { public: void VisitAttrs(AttrVisitor* v); /*! \return The transform matrix to apply to the StripeConfigs */ const std::vector<std::vector<float>> GetTransform() const { return transform_; } /*! \return The offset vector to apply to the StripeConfigs */ const std::vector<int> GetOffset() const { return offset_; } /*! \return The number of input dimensions */ size_t GetInputDims() const { return offset_.size(); } /*! \return The number of output dimensions */ size_t GetOutputDims() const { return transform_[0].size() - 1; } /*! * \brief Propagate a StripeConfig through the transform and offset matrices. * \param stripe_config The StripeConfig to propagate. * \return The transformed StripeConfig. * \note The propagation proceeds as follows: * * Both the stripe shape and extent have 1 appended to them (so they pick up * constant factors from the affine transform) and are then multiplied by the * transform matrix. The result is then ceil-rounded and has the trailing 1 * stripped to give the new shape and extent. * * The strides has 0 appended to it (so it doesn't pick up constant factors) * and is then multiplied by the transform matrix. The trailing 0 is stripped. * * For the remaining three values we introduce the concept of the 'binarized' * transform matrix. This is the transform matrix but with every non-zero element * set to 1. It represents how axes get re-ordered as part of the propagation. * * [2, 0, 0, 1] [1, 0, 0, 1] * [0, 0, 0.4, 2] binarize [0, 0, 1, 1] * [0, 1.5, 0, 0] ----> [0, 1, 0, 0] * [0, 0, 0, 1] [0, 0, 0, 1] * * The order has 0 appended to it and is multiplied by the 'binarized' transform * matrix. The trailing 0 is then stripped. * * The stripes has 0 appended to it and multiplied by the 'binarized' transform * matrix. The trailing 0 is then stripped and any remaining 0 elements that * were introduced by the transform are set instead to 1. * * The stripe offset is multiplied by the 'binarized' transform matrix and is * then summed with the propagator offset. */ StripeConfig propagate(const StripeConfig& stripe_config) const; static constexpr const char* _type_key = "contrib.ethosu.cascader.Propagator"; TVM_DECLARE_FINAL_OBJECT_INFO(PropagatorNode, Object); protected: friend class Propagator; /*! \brief The transform matrix to apply to the StripeConfigs */ std::vector<std::vector<float>> transform_; /*! \brief The offset vector to apply to the StripeConfigs */ std::vector<int> offset_; }; /*! * \brief A class to transform StripeConfigs according to the data dependencies between Part outputs and inputs. The dependency is represented as an affine transformation matrix + an offset vector. Using this, an output StripeConfig can be propagated through a Part to arrive at the input StripeConfigs. * \note The transform matrix should be a 2D affine transform matrix. * As an example, consider a (1, 1, 2, 32) output stripe for an NHWC pooling * operation with a 3x3 pool size: * * [1, 0, 0, 0, 0] [ 1] [ 1] * [0, 1, 0, 0, 2] [ 1] [ 3] * [0, 0, 1, 0, 2] x [ 2] = [ 4] * [0, 0, 0, 1, 0] [32] [32] * [0, 0, 0, 0, 1] [ 1] [ 1] * * Using the appropriate affine matrix we see that the required input data to * produce that output stripe is a (1, 3, 4, 32) stripe. These matrices should * be derived for the Parts to relate input and output data dependencies. * * The offset is a 1D vector representing the first tensor element to read. * Often this is just the 0 element, but for an operator such as pad it may be * negative. For instance, a symmetric padding by 1 of a 2D tensor would require * the offset vector [-1, -1]. Additionally, positive offsets may be required * for operators like strided_slice where only part of a tensor is read from. */ class Propagator : public ObjectRef { public: Propagator(const std::vector<std::vector<float>>& transform, const std::vector<int>& offset); TVM_DEFINE_OBJECT_REF_METHODS(Propagator, ObjectRef, PropagatorNode); }; } // namespace cascader } // namespace ethosu } // namespace contrib } // namespace tvm #endif // TVM_CONTRIB_ETHOSU_CASCADER_PROPAGATOR_H_
https://github.com/zk-ml/tachikoma
src/contrib/ethosu/cascader/proposal.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/contrib/ethosu/cascader/proposal.h * \brief Proposal object for the NPU cascader */ #ifndef TVM_CONTRIB_ETHOSU_CASCADER_PROPOSAL_H_ #define TVM_CONTRIB_ETHOSU_CASCADER_PROPOSAL_H_ #include <tvm/node/reflection.h> #include <tvm/runtime/object.h> #include <unordered_map> #include <unordered_set> #include <vector> #include "graph.h" #include "plan.h" #include "tensor_config.h" namespace tvm { namespace contrib { namespace ethosu { namespace cascader { using MemoryUsageMap = std::unordered_map<MemoryRegion, int, ObjectPtrHash, ObjectPtrEqual>; using TensorConfigMap = std::unordered_map<Tensor, TensorConfig, ObjectPtrHash, ObjectPtrEqual>; /*! \brief Node to represent a Proposal */ class ProposalNode : public Object { public: void VisitAttrs(AttrVisitor* v); /*! \return The CascaderGraph to which the Proposal applies */ const CascaderGraph GetGraph() const { return graph_; } /*! \return The Parts which are covered by the Proposal */ const std::vector<Part> GetPartGroup() const { return part_group_; } /*! \return The Plans used in the Proposal */ const std::vector<Plan> GetPlans() const { return plans_; } /*! \return The TensorConfigs indexed by Tensor in the Proposal which aren't produced by a Plan */ const TensorConfigMap GetInputTensorConfigs() const { return input_tensor_configs_; } /*! \return The MemoryRegion where cascading buffers should be homed */ const MemoryRegion GetCascadeRegion() const { return cascade_region_; } /*! \return The memory required to execute the Proposal in the cascading MemoryRegion */ const int GetMemoryUsage() const { return memory_usage_; } /*! \return The estimated cycles taken to execute the Proposal */ int GetCycles() const { return cycles_; } static constexpr const char* _type_key = "contrib.ethosu.cascader.Proposal"; TVM_DECLARE_FINAL_OBJECT_INFO(ProposalNode, Object); protected: friend class Proposal; /*! \brief The CascaderGraph to which the Proposal applies */ CascaderGraph graph_; /*! \brief The Parts which are covered by the Proposal */ std::vector<Part> part_group_; /*! \brief The Plans used in the Proposal */ std::vector<Plan> plans_; /*! \brief The TensorConfigs indexed by Tensor in the Proposal which aren't produced by a Plan */ TensorConfigMap input_tensor_configs_; /*! \brief The MemoryRegion where cascading buffers should be homed */ MemoryRegion cascade_region_; /*! \brief The memory required to execute the Proposal in the cascading MemoryRegion */ int memory_usage_; /*! \brief The estimated cycles taken to execute the Proposal */ int cycles_; }; /*! * \brief A class which describes how to schedule a CascaderGraph as a series of disjoint Plans. */ class Proposal : public ObjectRef { public: Proposal(const CascaderGraph& graph, const std::vector<Part>& part_group, const std::vector<Plan>& plans, const TensorConfigMap& input_tensor_configs, const MemoryRegion& cascade_region, int memory_usage, int cycles); TVM_DEFINE_OBJECT_REF_METHODS(Proposal, ObjectRef, ProposalNode); }; } // namespace cascader } // namespace ethosu } // namespace contrib } // namespace tvm #endif // TVM_CONTRIB_ETHOSU_CASCADER_PROPOSAL_H_
https://github.com/zk-ml/tachikoma
src/contrib/ethosu/cascader/proposal_generator.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/contrib/ethosu/cascader/proposal_generator.h * \brief Algorithm to generate possible Proposals in the NPU cascader */ #ifndef TVM_CONTRIB_ETHOSU_CASCADER_PROPOSAL_GENERATOR_H_ #define TVM_CONTRIB_ETHOSU_CASCADER_PROPOSAL_GENERATOR_H_ #include <tvm/node/reflection.h> #include <tvm/runtime/object.h> #include <unordered_map> #include <unordered_set> #include <vector> namespace tvm { namespace contrib { namespace ethosu { namespace cascader { class CascaderGraph; class MemoryRegion; class Tensor; class Proposal; class CascaderOptions; using HomeMap = std::unordered_map<Tensor, std::vector<MemoryRegion>, ObjectPtrHash, ObjectPtrEqual>; /*! * \brief Generate Pareto optimal Proposals for a CascaderGraph. * \param graph The CascaderGraph to generate Proposals for. * \param home_map The Tensor homing map defining valid memory homes for Tensors. * \param options The configuration options with which to run the generator. * \return A vector of Pareto optimal Proposals. * \note This algorithm takes a top-down dynamic programming approach to determining how * to optimally combine Plans into Proposals. It does the following: * * First, run GenerateGraphPlans to generate the Pareto optimal Plans that cover all the * Part groups in the CascaderGraph. * * Solve the problem recursively, generating optimal Proposals for increasingly small * portions of the overall graph. * * Take the first Part in the graph: * 1. Find all the Plans for which the Part is both in the Plan's Part group and has the * highest Part ID of any Part in the Part group (i.e. it's the 'first' Part in the * group). * For each Plan: * 2. Get the Part group covered by the Plan and subtract it from the 'total Part group' * covering all the Parts. This forms a 'residual Part group'. * 3. Recursively, determine the optimal Proposals for the 'residual Part group' (the graph * minus the Parts included in the Plan). Memoize the results. * For each residual Proposal: * 4. Create a new Proposal by adding the current Plan to the residual Proposal. * 5. Pareto cull all the newly created Proposals (which all share the same Part group). * 6. Return the Proposals which cover all the Parts in the CascaderGraph. * */ std::vector<Proposal> GenerateProposals(const CascaderGraph& graph, const HomeMap& home_map, const CascaderOptions& options); } // namespace cascader } // namespace ethosu } // namespace contrib } // namespace tvm #endif // TVM_CONTRIB_ETHOSU_CASCADER_PROPOSAL_GENERATOR_H_
https://github.com/zk-ml/tachikoma
src/contrib/ethosu/cascader/stripe_config.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/contrib/ethosu/cascader/stripe_config.h * \brief StripeConfig object for the NPU cascader */ #ifndef TVM_CONTRIB_ETHOSU_CASCADER_STRIPE_CONFIG_H_ #define TVM_CONTRIB_ETHOSU_CASCADER_STRIPE_CONFIG_H_ #include <tvm/node/reflection.h> #include <tvm/runtime/object.h> #include <functional> #include <map> #include <vector> namespace tvm { namespace contrib { namespace ethosu { namespace cascader { class StripeConfig; class PropagatorNode; /*! \brief Node to represent a StripeConfig */ class StripeConfigNode : public Object { public: void VisitAttrs(AttrVisitor* v); /*! * \brief Get the shape of the stripe config. * \return The shape of the stripe config. * \note The shape refers to the size of the stripes in each dimension. */ inline std::vector<int> GetShape() const { return shape_; } /*! * \brief Get the extent of the stripe config. * \return The extent of the stripe config. * \note The extent refers to the extent over which a StripeConfig operates. * Specifically, it is the extent in each axis between the lowest value read * by a stripe and the highest value read by a stripe. */ inline std::vector<int> GetExtent() const { return extent_; } /*! * \brief Get the strides of the stripe config. * \return The strides of the stripe config. * \note The strides refer to the stride between stripes in each axis. * The strides are represented as a float rather than an int to account for * cases of 'fractional striding'. The stride should therefore be interpreted * as the average striding in each axis. * * The starting offset of the i-th stripe in axis 'ax' is given by: * * stripe_offset_i[ax] = offset[ax] + floor(strides[ax]*i) * * As a concrete example, consider a 2x2 upscaling operation. If an output * stripe config with a stride of (3, 3) is chosen, then when this is * propagated to the input it will be reduced by a factor of two to become * (1.5, 1.5). * * This means the first stripe in axis 0 should begin at (floor(1.5*0), 0) = (0, 0), * the second at (floor(1.5*1), 0) = (1, 0), and the third at (floor(1.5*2), 0) = * (3, 0). This results in irregular striding where 'strides' is the average * striding value. */ inline std::vector<float> GetStrides() const { return strides_; } /*! * \brief Get the order of the stripe config. * \return The order of the stripe config. * \note The order refers to order in which the axes are iterated over. * The first (outermost) axis is labelled as 1 with the rest increasing * according to the axis' position. Any axis labelled with 0 isn't iterated over. * For example, [1, 3, 2] would mean axis 0 is the outermost iteration axis, * then axis 2, then finally axis 1. */ inline std::vector<int> GetOrder() const { return order_; } /*! * \brief Get the stripes of the stripe config. * \return The stripes of the stripe config. * \note The stripes refer to the number of stripes in each axis. * There must be at least one stripe in any given axis. */ inline std::vector<int> GetStripes() const { return stripes_; } /*! * \brief Get the offset of the stripe config. * \return The offset of the stripe config. * \note The offset refers to the offset of the first stripe * from the first element of the tensor. For example, in a slice operation * which only returns the second (4, 8) half of a (8, 8) tensor, the offset * would need to be [4, 0]. */ inline std::vector<int> GetOffset() const { return offset_; } /*! \return The hash of the StripeConfigNode */ size_t GetHash() const { return hash_; } static constexpr const char* _type_key = "contrib.ethosu.cascader.StripeConfig"; TVM_DECLARE_FINAL_OBJECT_INFO(StripeConfigNode, Object); protected: friend class StripeConfig; friend class PropagatorNode; /*! \brief Compute the hash of the StripeConfigNode */ void ComputeHash_(); /*! \brief The shape of the stripes */ std::vector<int> shape_; /*! \brief The extent of region to stripe over */ std::vector<int> extent_; /*! \brief The strides of the stripes */ std::vector<float> strides_; /*! \brief The order of the striping axes */ std::vector<int> order_; /*! \brief The number of stripes in each axis */ std::vector<int> stripes_; /*! \brief The offset of the first stripe */ std::vector<int> offset_; /*! \brief The hash of the StripeConfigNode */ std::size_t hash_{0}; }; /*! * \brief An object to describe how a tensor should be computed as a series of n-dimensional tiles, or 'stripes'. * \note The StripeConfig is a verbose way of specifying how to tile a tensor. * We can imagine taking a 2D tensor of size (12, 12) and wanting to compute * it in tiles of (4, 4). The tile is referred to as a stripe here to generalize * this to n-dimensional tiles. * * The size of that stripe in each axis is the 'shape'. The strides is how far * you should move between stripes, so also (4, 4) for a simple non-overlappping * tiling. However, we explore some overlapping scheduling options so shape != strides * in general. Note that the striding may be fractional, for instance (1.5, 1.5). * This means the first stripe should begin at (floor(1.5*0), 0) = (0, 0), the second * at (floor(1.5*1), 0) = (1, 0), and the third at (floor(1.5*2), 0) = (3, 0). This results * in slightly irregular striding where 'strides' should be interpreted as the average * striding value. * * The 'extent' is simply (12, 12), the region over which we're conducting our tiling. * * The 'order' tells us which axis to iterate over first and which second and the * 'stripes' tells us how many stripes we need to compute in each of those axes. * * Finally, the 'offset' tells us where to start the first stripe. In this simple * case the offset is just (0, 0), but in something like a slice operation we * may want to start part way through a tensor. */ class StripeConfig : public ObjectRef { public: StripeConfig(const std::vector<int>& shape, const std::vector<int>& extent, const std::vector<float>& strides, const std::vector<int>& order, const std::vector<int>& stripes, const std::vector<int>& offset); /*! * \brief Check if two StripeConfigs are equals to each other. * \param other StripeConfig to be checked. * \return Whether the two StripeConfigs equal each other. */ bool operator==(const StripeConfig& other) const; TVM_DEFINE_OBJECT_REF_METHODS(StripeConfig, ObjectRef, StripeConfigNode); }; /*! * \brief Count the number of stripes of each shape that are executed for a given StripeConfig. * \param stripe_config The StripeConfig to count the stripes for. * \param enable_sliding_window Whether to assume the sliding window optimization. * \return A map between stripe shapes and the number of stripes of that shape that need * executing. * \note If the StripeConfig were to split an (8, 8) tensor into (4, 4) stripes with * (4, 4) striding, then this function will return {(4, 4): 4} indicating that 4 (4, 4) * stripes will be executed. If instead an (8, 8) were striped using (5, 5) stripes * with (5, 5) striding, this function would return: * * { * (5, 5): 1, * (3, 5): 1, * (5, 3): 1, * (3, 3): 1, * } * * This is because some of the stripes will exceed the extent of the tensor and so only part * of them will need executing. Therefore, CountStripes will return the exact number of each * shape of stripe that is executed, accounting for edge and overlap behaviour which is not * explicit in the StripeConfig alone. */ std::map<std::vector<int>, int> CountStripes(const StripeConfig& stripe_config, bool enable_sliding_window); } // namespace cascader } // namespace ethosu } // namespace contrib } // namespace tvm // Hash and equal function for StripeConfig namespace std { /*! \brief The equal_to function for tvm::contrib::ethosu::cascader::StripeConfig */ template <> struct equal_to<::tvm::contrib::ethosu::cascader::StripeConfig> { bool operator()(const ::tvm::contrib::ethosu::cascader::StripeConfig& lhs, const ::tvm::contrib::ethosu::cascader::StripeConfig& rhs) const { return lhs == rhs; } }; /*! \brief The hash function for tvm::contrib::ethosu::cascader::StripeConfig */ template <> struct hash<::tvm::contrib::ethosu::cascader::StripeConfig> { std::size_t operator()( const ::tvm::contrib::ethosu::cascader::StripeConfig& stripe_config) const { return stripe_config->GetHash(); } }; } // namespace std #endif // TVM_CONTRIB_ETHOSU_CASCADER_STRIPE_CONFIG_H_
https://github.com/zk-ml/tachikoma
src/contrib/ethosu/cascader/tensor_config.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/contrib/ethosu/cascader/tensor_config.h * \brief TensorConfig object for the NPU cascader */ #ifndef TVM_CONTRIB_ETHOSU_CASCADER_TENSOR_CONFIG_H_ #define TVM_CONTRIB_ETHOSU_CASCADER_TENSOR_CONFIG_H_ #include <tvm/node/reflection.h> #include <tvm/runtime/object.h> #include <functional> #include <string> #include <utility> #include <vector> #include "graph.h" #include "stripe_config.h" namespace tvm { namespace contrib { namespace ethosu { namespace cascader { class MemoryRegionNode : public Object { public: void VisitAttrs(AttrVisitor* v); /*! \brief The name of the region */ std::string name; /*! \brief The size of the region */ int size; /*! \brief The read bandwidth of the region in bytes per cycle */ int read_bandwidth; /*! \brief The write bandwidth of the region in bytes per cycle */ int write_bandwidth; /*! \brief The read bandwidth of the region in bytes per cycle */ int read_latency; /*! \brief The write bandwidth of the region in bytes per cycle */ int write_latency; /*! \brief Length of memory burst */ int burst_length; static constexpr const char* _type_key = "contrib.ethosu.cascader.MemoryRegion"; TVM_DECLARE_FINAL_OBJECT_INFO(MemoryRegionNode, Object) }; class MemoryRegion : public ObjectRef { public: MemoryRegion(std::string name, int size, int read_bandwidth, int write_bandwidth, int read_latency, int write_latency, int burst_length) { auto n = make_object<MemoryRegionNode>(); n->name = name; n->size = size; n->read_bandwidth = read_bandwidth; n->write_bandwidth = write_bandwidth; n->read_latency = read_latency; n->write_latency = write_latency; n->burst_length = burst_length; data_ = std::move(n); } TVM_DEFINE_OBJECT_REF_METHODS(MemoryRegion, ObjectRef, MemoryRegionNode); }; /*! \brief The 'state' of a TensorConfig as used in the Plan generation algorithm. * BOUNDARY - Should describe a Plan input/output Tensor. * INTERIOR - Should describe an intermediate Tensor in a 'closed' Plan. */ enum TensorConfigState { BOUNDARY, INTERIOR }; /*! \brief Node to represent a TensorConfig */ class TensorConfigNode : public Object { public: void VisitAttrs(AttrVisitor* v); /*! \return The Tensor the config applies to */ const Tensor GetTensor() const { return tensor_; } /*! \return The region where the tensor is allocated */ MemoryRegion GetHomeRegion() const { return home_region_; } /*! * \return The state of the TensorConfig. * \note The TensorConfigState is only used as part of the Plan generation algorithm. For a Plan * to be 'closed' (and therefore not subject to any further merging), all the TensorConfigs that * describe Plan input or output Tensors must be in the 'BOUNDARY' state with the rest being * 'INTERIOR'. If any of the input or output tensors are described by an 'INTERIOR' TensorConfig, * then the Plan is 'open' and should be merged with other 'open' Plans until the result becomes * 'closed'. */ TensorConfigState GetState() const { return state_; } /*! * \return The mode in which the buffer should be realized * \note There are multiple buffering strategies by which a tensor may be realized (computed). * These affect the amount of recomputation necessary as well as the size of buffer required to * store the tensor. See 'BufferMode' for a description of the allowable buffering modes. */ BufferMode GetBufferMode() const { return buffer_mode_; } /*! * \return Whether to copy the tensor. * \note While a tensor will originally reside in its home region, the TensorConfig may optionally * specify that the tensor should be copied (according to the StripeConfigs) into another * MemoryRegion. As an example for where this may be used, if a weights tensor initially resides * in slow Flash memory then necessarily the home region will be Flash. However, if the weights * values are used multiple times by a Part, it may be more performant to choose to copy the * weights into a faster memory like SRAM. */ bool DoCopy() const { return copy_tensor_; } /*! \return The region to copy the tensor to */ MemoryRegion GetCopyRegion() const { if (!copy_tensor_) { return home_region_; } return copy_region_; } /*! * \return The StripeConfigs with which to compute the tensor. * \note The StripeConfigs determine the order in which the elements of the tensor should be * computed, including potentially computing them multiple times (recompute). Multiple * StripeConfigs are used over just a single StripeConfig for the case where the tensor is * consumed by two different Parts executing themselves with different StripeConfigs. In this * case, there is a StripeConfig per consumer of the tensor. */ const std::vector<StripeConfig> GetStripeConfigs() const { return stripe_configs_; } /*! * \return The size of the buffer needed for the TensorConfig. * \note The size of buffer necessary to store a tensor being produced using the TensorConfig is * not necessarily just the size of the tensor. In Plans, a tensor may be being produced and * consumed in 'stripes' which are smaller than the full tensor. Therefore, the buffer necessary * to store the tensor may only need to be as large as the stripe. The precise size of the buffer * will depend both on the BufferMode and StripeConfigs (as well as, of course, the Tensor). */ int GetBufferSize() const; /*! \return The hash of the TensorConfigNode */ size_t GetHash() const { return hash_; } static constexpr const char* _type_key = "contrib.ethosu.cascader.TensorConfig"; TVM_DECLARE_FINAL_OBJECT_INFO(TensorConfigNode, Object); protected: friend class TensorConfig; /*! \brief Compute the hash of the TensorConfigNode */ void ComputeHash_(); /*! \return The size of the recompute buffer needed*/ int GetRecomputeBufferSize_() const; /*! \return The size of the rolling buffer needed*/ int GetRollingBufferSize_() const; /*! \brief The Tensor the config applies to */ Tensor tensor_; /*! \brief The region where the tensor is allocated */ MemoryRegion home_region_; /*! \return The state of the TensorConfig */ TensorConfigState state_; /*! \brief The mode in which the buffer should be realized */ BufferMode buffer_mode_; /*! \return The StripeConfigs with which to compute the tensor */ std::vector<StripeConfig> stripe_configs_; /*! \brief Whether to copy the tensor */ bool copy_tensor_; /*! \brief The region to copy the tensor to */ MemoryRegion copy_region_; /*! \brief The hash of the TensorConfigNode */ size_t hash_{0}; }; /*! * \brief A class which describes how to realize a Tensor. * \note The TensorConfig describes both how a Tensor is scheduled (the order in which it's * produced/consumed) and how its allocated in memory (which region it should reside in and whether * it should be copied). For further detail on how TensorConfig stores this information, consult the * documentation of TensorConfigNode. */ class TensorConfig : public ObjectRef { public: TensorConfig(const Tensor& tensor, const MemoryRegion& home_region, TensorConfigState state, BufferMode buffer_mode, const std::vector<StripeConfig>& stripe_configs, bool copy_tensor, const MemoryRegion& copy_region); /*! * \brief Check if two TensorConfigs are equal to each other. * \param other TensorConfig to be checked. * \return Whether the two TensorConfigs equal each other. */ bool operator==(const TensorConfig& other) const; TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(TensorConfig, ObjectRef, TensorConfigNode); }; } // namespace cascader } // namespace ethosu } // namespace contrib } // namespace tvm // Hash and equal function for TensorConfig namespace std { /*! \brief The equal_to function for tvm::contrib::ethosu::cascader::TensorConfig */ template <> struct equal_to<::tvm::contrib::ethosu::cascader::TensorConfig> { bool operator()(const ::tvm::contrib::ethosu::cascader::TensorConfig& lhs, const ::tvm::contrib::ethosu::cascader::TensorConfig& rhs) const { return lhs == rhs; } }; /*! \brief The hash function for tvm::contrib::ethosu::cascader::TensorConfig */ template <> struct hash<::tvm::contrib::ethosu::cascader::TensorConfig> { std::size_t operator()( const ::tvm::contrib::ethosu::cascader::TensorConfig& tensor_config) const { return tensor_config->GetHash(); } }; } // namespace std #endif // TVM_CONTRIB_ETHOSU_CASCADER_TENSOR_CONFIG_H_
https://github.com/zk-ml/tachikoma
src/contrib/hybrid/codegen_hybrid.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file codegen_hybrid.h * \brief Common utilities to generated C style code. */ #ifndef TVM_CONTRIB_HYBRID_CODEGEN_HYBRID_H_ #define TVM_CONTRIB_HYBRID_CODEGEN_HYBRID_H_ #include <tvm/ir/name_supply.h> #include <tvm/target/codegen.h> #include <tvm/te/operation.h> #include <tvm/te/schedule.h> #include <tvm/tir/expr.h> #include <tvm/tir/stmt_functor.h> #include <map> #include <string> #include <unordered_map> #include <utility> #include <vector> namespace tvm { namespace contrib { using namespace te; using namespace tir; /*! * \brief A base class to generate Hybrid Script. * * **NOTE** CodeGenHybrid does not aim at generating Python scripts consumed by Python2/3. * For runtime support, please refer the decorator in ``tvm/python/hybrid/api.py``. */ class CodeGenHybrid : public ExprFunctor<void(const PrimExpr&, std::ostream&)>, public StmtFunctor<void(const Stmt&)> { public: /*! * \brief Dump the given function body to hybrid script. * \param stmt The function body to be dumped to hybrid script. * \param inputs Input tensors of this schedule. * \param outputs Output tensors of this schedule. * \param name The name of the function. */ void DumpStmt(const Stmt& stmt, const Array<ObjectRef>& inputs, const Array<Tensor>& outputs, const std::string& name = "hybrid_func"); /*! * \brief Finalize the compilation and return the code. * \return The code. */ std::string Finish(); /*! \brief Reserve keywords in avoid of name conflict. */ void ReserveKeywords(); /*! * \brief Print the Stmt n to CodeGenHybrid->stream * \param n The statement to be printed. */ void PrintStmt(const Stmt& n) { this->VisitStmt(n); } /*! * \brief Print the expression n(or its ssa id if in ssa mode) into os * \param n The expression to be printed. * \param os The output stream */ void PrintExpr(const PrimExpr& n, std::ostream& os) { this->VisitExpr(n, os); } /*! * \brief Same as PrintExpr, but simply returns result string * \param n The expression to be printed. */ std::string PrintExpr(const PrimExpr& n) { std::ostringstream os; PrintExpr(n, os); return os.str(); } // expression void VisitExpr_(const VarNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const LoadNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const BufferLoadNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const LetNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const CallNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const ProducerLoadNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const AddNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const SubNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const MulNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const DivNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const ModNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const FloorDivNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const FloorModNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const MinNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const MaxNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const EQNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const NENode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const LTNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const LENode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const GTNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const GENode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const AndNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const OrNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const CastNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const NotNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const SelectNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const RampNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const BroadcastNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const IntImmNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const FloatImmNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const StringImmNode* op, std::ostream& os) override; // NOLINT(*) // statment void VisitStmt_(const LetStmtNode* op) override; void VisitStmt_(const StoreNode* op) override; void VisitStmt_(const BufferStoreNode* op) override; void VisitStmt_(const ProducerStoreNode* op) override; void VisitStmt_(const ForNode* op) override; void VisitStmt_(const IfThenElseNode* op) override; void VisitStmt_(const AllocateNode* op) override; void VisitStmt_(const ProducerRealizeNode* op) override; void VisitStmt_(const AttrStmtNode* op) override; void VisitStmt_(const AssertStmtNode* op) override; void VisitStmt_(const EvaluateNode* op) override; void VisitStmt_(const SeqStmtNode* op) override; /*! * \brief Print Type represetnation of type t. * \param t The type representation. * \param os The stream to print the ctype into */ virtual void PrintType(DataType t, std::ostream& os); // NOLINT(*) private: /*! \brief The current indent of the code dump. */ int indent_{0}; /*! \brief The tab size of code indent. */ const int tab_{4}; /*! \brief Print the current indent spaces. */ inline void PrintIndent(); /*! \brief NameSupply for allocated ids. */ NameSupply ids_allocated = NameSupply(""); /*! * \brief Keys are either (tensors, value_index) or (variables, 0). * Values are the corresponding IDs.*/ std::map<std::pair<const Object*, int>, std::string> id_map_; /*! \brief Variables (keys) binded to the threads (values). */ std::map<const VarNode*, std::string> binds_; /*! \brief The output code string builder. */ std::stringstream stream; /*! * \brief Get or allocate the ID for the given variable. * \param v The given variable. */ std::string GetVarID(const VarNode* v); /*! * \brief Get or allocate the ID for the given tensor. * \param tensor The tensor to allocate a name. */ std::string GetTensorID(const Tensor& tensor); }; } // namespace contrib } // namespace tvm #endif // TVM_CONTRIB_HYBRID_CODEGEN_HYBRID_H_
https://github.com/zk-ml/tachikoma
src/contrib/torch/base64.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file base64.h * \brief Util functions for converting plain bytes back to plain bytes */ #ifndef TVM_CONTRIB_TORCH_BASE64_H_ #define TVM_CONTRIB_TORCH_BASE64_H_ #include <tvm/runtime/logging.h> #include <cctype> #include <cstdio> #include <string> #include "../../support/base64.h" namespace tvm { namespace support { size_t b64strlen(const std::string b64str) { ICHECK(b64str.size() % 4 == 0) << "invalid base64 encoding"; size_t length = b64str.size() / 4 * 3; if (b64str[b64str.size() - 2] == '=') { length -= 2; } else if (b64str[b64str.size() - 1] == '=') { length -= 1; } return length; } void b64decode(const std::string b64str, u_char* ret) { size_t index = 0; const auto length = b64str.size(); for (size_t i = 0; i < length; i += 4) { int8_t ch0 = base64::DecodeTable[(int32_t)b64str[i]]; int8_t ch1 = base64::DecodeTable[(int32_t)b64str[i + 1]]; int8_t ch2 = base64::DecodeTable[(int32_t)b64str[i + 2]]; int8_t ch3 = base64::DecodeTable[(int32_t)b64str[i + 3]]; u_char st1 = (ch0 << 2) + (ch1 >> 4); ret[index++] = st1; if (b64str[i + 2] != '=') { u_char st2 = ((ch1 & 0b1111) << 4) + (ch2 >> 2); ret[index++] = st2; if (b64str[i + 3] != '=') { u_char st3 = ((ch2 & 0b11) << 6) + ch3; ret[index++] = st3; } } } ICHECK(b64strlen(b64str) == index) << "base64 decoding fails"; } } // namespace support } // namespace tvm #endif // TVM_CONTRIB_TORCH_BASE64_H_
https://github.com/zk-ml/tachikoma
src/contrib/torch/tvm_module_wrapper/runtime_bridge.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file runtime_bridge.h * \brief Util functions for pytorch tvm interaction. */ #ifndef TVM_CONTRIB_TORCH_TVM_MODULE_WRAPPER_RUNTIME_BRIDGE_H_ #define TVM_CONTRIB_TORCH_TVM_MODULE_WRAPPER_RUNTIME_BRIDGE_H_ extern "C" { /* * DLPack data structure extend with `is_bool` flag. * DLPack haven't support boolean tensor * (https://github.com/pytorch/pytorch/blob/4618371da56c887195e2e1d16dad2b9686302800/aten/src/ATen/DLConvertor.cpp#L42), * thus a boolean tensor will be regarded as a UInt8 tensor * (https://github.com/apache/tvm/blob/de124862714e747764aa8b7f41a90bcb25f3c6a8/python/tvm/_ffi/runtime_ctypes.py#L91). */ struct DLPackTensorExt { DLManagedTensor* dl_managed_tensor; bool is_bool; }; /* * A wrapper pointing to TVM runtime module. */ struct TVMContribTorchRuntimeModule; /* * Obtain a saved runtime module passed by TVM FFI. * @return A TVM runtime module wrapper. */ TVMContribTorchRuntimeModule* tvm_contrib_torch_get_last_saved_runtime_module(); /* * Delete TVMContribTorchRuntimeModule pointer. */ void tvm_contrib_torch_free_runtime_module(TVMContribTorchRuntimeModule* module_ptr); /* * Obtain ExecutorFactory runtime module from ExecutorFactory class. * @param graph_executor_factory ExecutorFactory class * @param input_example For obtaining device information * @return ExecutorFactory TVM runtime module wrapper */ TVMContribTorchRuntimeModule* tvm_contrib_torch_create_graph_runtime_module( TVMContribTorchRuntimeModule* graph_executor_factory, DLManagedTensor* input_example); /* * Forward method for OperatorModuleWrapper. * @param runtime_module TVM runtime module wrapper * @param inputs Array pointer of the input tensors * @param input_size The number of input tensors */ void tvm_contrib_torch_operator_module_forward(TVMContribTorchRuntimeModule* runtime_module, DLPackTensorExt* inputs, size_t input_size); /* * Forward method for GraphExecutorFactoryWrapper. * @param graph_executor_factory TVM runtime module wrapper * @param inputs Array pointer of the input tensors * @param input_size The number of input tensors * @param outputs The resulting output tensors pointer * @return The number of output tensors */ size_t tvm_contrib_torch_graph_executor_module_forward( TVMContribTorchRuntimeModule* graph_executor_factory, DLPackTensorExt* inputs, size_t input_size, DLPackTensorExt** outputs); /* * Encode TVM runtime module. * @param runtime_module TVM runtime module wrapper * @return The encoding stream (char array) */ char* tvm_contrib_torch_encode(TVMContribTorchRuntimeModule* runtime_module); /* * Decode TVM runtime module. * @param state The encoding stream (char array) of TVM runtime module * @return TVM runtime module wrapper */ TVMContribTorchRuntimeModule* tvm_contrib_torch_decode(const char* state); /* * Delete DLPackTensorExt pointer. */ void tvm_contrib_torch_free_dlpack_tensor_ext_array(DLPackTensorExt*); /* * Delete char array pointer. */ void tvm_contrib_torch_free_encoding(char* encoding); /* * Checking if a DLPackTensorExt is boolean or cannot be copied in zero cost. */ bool tvm_contrib_torch_tensor_ability_of_zero_copy(DLPackTensorExt*); } #endif // TVM_CONTRIB_TORCH_TVM_MODULE_WRAPPER_RUNTIME_BRIDGE_H_
https://github.com/zk-ml/tachikoma
src/contrib/torch/utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file utils.h * \brief Util functions for pytorch tvm interaction. */ #ifndef TVM_CONTRIB_TORCH_UTILS_H_ #define TVM_CONTRIB_TORCH_UTILS_H_ #include <dlpack/dlpack.h> #include <torch/script.h> #include <tvm/runtime/data_type.h> #include <tvm/runtime/device_api.h> #ifdef PT_TVMDSOOP_ENABLE_GPU #include <cuda_runtime.h> #endif #include <string> #include <vector> namespace tvm { namespace contrib { namespace pytorch { inline bool GetTvmDtype(const caffe2::TypeMeta& dtype, DLDataType* res) noexcept { if (dtype == torch::kFloat16) { *res = {kDLFloat, 16, 1}; } else if (dtype == torch::kFloat32) { *res = {kDLFloat, 32, 1}; } else if (dtype == torch::kFloat64) { *res = {kDLFloat, 64, 1}; } else if (dtype == torch::kInt8) { *res = {kDLInt, 8, 1}; } else if (dtype == torch::kInt16) { *res = {kDLInt, 16, 1}; } else if (dtype == torch::kInt32) { *res = {kDLInt, 32, 1}; } else if (dtype == torch::kInt64) { *res = {kDLInt, 64, 1}; } else if (dtype == torch::kUInt8) { *res = {kDLUInt, 8, 1}; } else if (dtype == torch::kBool) { *res = {kDLInt, 1, 1}; } else { return false; } return true; } inline bool GetTvmDtype(const caffe2::TypeMeta& dtype, tvm::runtime::DataType* res) noexcept { DLDataType dlpack_dtype; if (!GetTvmDtype(dtype, &dlpack_dtype)) { return false; } *res = tvm::runtime::DataType(dlpack_dtype); return true; } inline bool GetTorchDtype(const DLDataType& dtype, c10::ScalarType* res) noexcept { if (dtype.lanes != 1) { // only scalar type return false; } if (dtype.code == kDLFloat) { if (dtype.bits == 16) { *res = torch::kFloat16; } else if (dtype.bits == 32) { *res = torch::kFloat32; } else if (dtype.bits == 64) { *res = torch::kFloat64; } else { return false; } } else if (dtype.code == kDLInt) { if (dtype.bits == 16) { *res = torch::kInt16; } else if (dtype.bits == 32) { *res = torch::kInt32; } else if (dtype.bits == 64) { *res = torch::kInt64; } else if (dtype.bits == 1) { *res = torch::kBool; } else { return false; } } else if (dtype.code == kDLUInt) { if (dtype.bits == 8) { *res = torch::kUInt8; } else if (dtype.bits == 1) { *res = torch::kBool; } else { return false; } } else { return false; } return true; } inline bool GetTorchDtype(const tvm::runtime::DataType& dtype, c10::ScalarType* res) noexcept { using tvm::runtime::DataType; if (dtype == DataType::Float(16)) { *res = torch::kFloat16; } else if (dtype == DataType::Float(32)) { *res = torch::kFloat32; } else if (dtype == DataType::Float(64)) { *res = torch::kFloat64; } else if (dtype == DataType::Int(32)) { *res = torch::kInt32; } else if (dtype == DataType::Int(64)) { *res = torch::kInt64; } else if (dtype == DataType::Int(1)) { *res = torch::kBool; } else if (dtype == DataType::Int(8)) { *res = torch::kInt8; } else if (dtype == DataType::Int(16)) { *res = torch::kInt16; } else if (dtype == DataType::UInt(8)) { *res = torch::kUInt8; } else if (dtype == DataType::Bool()) { *res = torch::kBool; } else { return false; } return true; } // Buffer information used for actual computation. // Each buffer is associated with one PyTorch tensor // whose underlying buffer is record into "origin_buf". // For input tensor, we copy data from origin_buf to buf // and for output tensor, copy data from buf to origin_buf class TensorAsBuf { public: explicit TensorAsBuf(const at::Tensor& tensor) : pt_device_type_(tensor.device().type()), device_id_(tensor.device().index()), origin_shape_(tensor.sizes().begin(), tensor.sizes().end()) { CHECK(pt_device_type_ == torch::kCUDA || pt_device_type_ == torch::kCPU); device_type_ = (pt_device_type_ == torch::kCUDA ? kDLCUDA : kDLCPU); char* buf = static_cast<char*>(tensor.data_ptr()); this->origin_buf_ = buf; this->size_ = tensor.nbytes(); // const int alignment = 64; const int alignment = tvm::runtime::kAllocAlignment; char* aligned = reinterpret_cast<char*>(((uint64_t)buf + alignment - 1) & (~(alignment - 1))); if (buf == aligned) { this->tensor_ = tensor; this->buf_ = buf; this->offset_ = 0; } else { const auto options = torch::TensorOptions().dtype(tensor.dtype()).device(pt_device_type_, device_id_); this->inline_tensor_ = torch::empty({static_cast<int64_t>(tensor.nbytes() + alignment)}, options); this->tensor_ = this->inline_tensor_; buf = static_cast<char*>(this->tensor_.data_ptr()); char* buf_aligned = reinterpret_cast<char*>(((uint64_t)buf + alignment) & (~(alignment - 1))); this->buf_ = buf; this->offset_ = buf_aligned - buf; } } void CopyToOrigin() { if (buf_ == origin_buf_) { return; } if (device_type_ == kDLCPU) { memcpy(origin_buf_, buf_ + offset_, size_); #ifdef PT_TVMDSOOP_ENABLE_GPU } else if (device_type_ == kDLCUDA) { cudaMemcpy(origin_buf_, buf_ + offset_, size_, cudaMemcpyDeviceToDevice); #endif } else { LOG(FATAL) << "Only support CPU and CUDA now. Device " << device_type_ << " is not implemented currently"; } } void CopyFromOrigin() { if (buf_ == origin_buf_) { return; } if (device_type_ == kDLCPU) { memcpy(buf_ + offset_, origin_buf_, size_); #ifdef PT_TVMDSOOP_ENABLE_GPU } else if (device_type_ == kDLCUDA) { cudaMemcpy(buf_ + offset_, origin_buf_, size_, cudaMemcpyDeviceToDevice); #endif } else { LOG(FATAL) << "Only support CPU and CUDA now. Device " << device_type_ << " is not implemented currently"; } } // Create DLPack tensor from PyTorch tensor void MakeDLTensor(DLTensor* out) { const DLDevice dl_ctx{DLDeviceType(device_type_), device_id_}; DLDataType dlpack_type; const auto& tensor = this->tensor_; CHECK(GetTvmDtype(tensor.dtype(), &dlpack_type)); out->device = dl_ctx; out->ndim = origin_shape_.size(); out->shape = origin_shape_.data(); out->strides = nullptr; out->byte_offset = 0; out->dtype = dlpack_type; out->data = buf_ + offset_; } std::string DebugString() { std::stringstream ss; ss << "dl device: " << device_type_ << "\npt device: " << static_cast<int>(pt_device_type_) << "\ndevice_id: " << device_id_ << "\nsize: " << size_ << "\noffset: " << offset_ << "\nshape:"; for (auto dim : origin_shape_) { ss << ' ' << dim; } ss << std::endl; return ss.str(); } private: DLDeviceType device_type_; c10::DeviceType pt_device_type_; int device_id_; at::Tensor inline_tensor_; at::Tensor tensor_; size_t size_; size_t offset_; std::vector<int64_t> origin_shape_; char* origin_buf_; char* buf_; }; } // namespace pytorch } // namespace contrib } // namespace tvm #endif // TVM_CONTRIB_TORCH_UTILS_H_
https://github.com/zk-ml/tachikoma
src/driver/internal_driver_api.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/driver/driver_api.h * \brief Internal compiler driver APIs to drive the compilation. * * This module provides functionality that may be called internally * within TVM, but is not part of the public-facing API. */ #ifndef TVM_DRIVER_INTERNAL_DRIVER_API_H_ #define TVM_DRIVER_INTERNAL_DRIVER_API_H_ #include <tvm/ir/module.h> #include <tvm/target/target.h> namespace tvm { /*! * \brief Build a device and host module for a specific target from a map * contains target to IRModule. This function is used * for heterogeneous build. * \param input The map contains target to an IRModule. * \param target_host The target for building host code. To use the default, * pass Target(). * \return The built module that contains code for different processors. */ runtime::Module TIRToRuntime(const Map<Target, IRModule>& input, const Target& target_host); } // namespace tvm #endif // TVM_DRIVER_INTERNAL_DRIVER_API_H_
https://github.com/zk-ml/tachikoma
src/ir/attr_functor.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file attr_functor.h * \brief A way to define arbitrary function signature * with dispatch on common attributes. * * Common attributes include: * - int, float, str constants * - array of attributes * - map of attributes */ #ifndef TVM_IR_ATTR_FUNCTOR_H_ #define TVM_IR_ATTR_FUNCTOR_H_ #include <tvm/node/functor.h> #include <tvm/tir/expr.h> #include <utility> namespace tvm { template <typename FType> class AttrFunctor; #define ATTR_FUNCTOR_DEFAULT \ { return VisitAttrDefault_(op, std::forward<Args>(args)...); } #define ATTR_FUNCTOR_DISPATCH(OP) \ vtable.template set_dispatch<OP>([](const ObjectRef& n, TSelf* self, Args... args) { \ return self->VisitAttr_(static_cast<const OP*>(n.get()), std::forward<Args>(args)...); \ }); // A functor for common attribute information. template <typename R, typename... Args> class AttrFunctor<R(const ObjectRef& n, Args...)> { private: using TSelf = AttrFunctor<R(const ObjectRef& n, Args...)>; using FType = tvm::NodeFunctor<R(const ObjectRef& n, TSelf* self, Args...)>; public: /*! \brief the result type of this functor */ using result_type = R; /*! \brief virtual destructor */ virtual ~AttrFunctor() {} /*! * \brief The functor call. * \param n The expression node. * \param args Additional arguments. * \return The result of the call */ virtual R VisitAttr(const ObjectRef& n, Args... args) { static FType vtable = InitVTable(); if (vtable.can_dispatch(n)) { return vtable(n, this, std::forward<Args>(args)...); } else { return VisitAttrDefault_(n.get(), std::forward<Args>(args)...); } } virtual R VisitAttrDefault_(const Object* node, Args... args) = 0; virtual R VisitAttr_(const ArrayNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; virtual R VisitAttr_(const tir::IntImmNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; virtual R VisitAttr_(const tir::FloatImmNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; virtual R VisitAttr_(const tir::StringImmNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; // deep comparison of symbolic integer expressions. virtual R VisitAttr_(const tir::VarNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; virtual R VisitAttr_(const tir::SizeVarNode* op, Args... args) { return VisitAttr_(static_cast<const tir::VarNode*>(op), std::forward<Args>(args)...); } virtual R VisitAttr_(const tir::AddNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; virtual R VisitAttr_(const tir::SubNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; virtual R VisitAttr_(const tir::MulNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; virtual R VisitAttr_(const tir::DivNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; virtual R VisitAttr_(const tir::ModNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; virtual R VisitAttr_(const tir::FloorDivNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; virtual R VisitAttr_(const tir::FloorModNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; virtual R VisitAttr_(const tir::MinNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; virtual R VisitAttr_(const tir::MaxNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; virtual R VisitAttr_(const tir::GENode* op, Args... args) ATTR_FUNCTOR_DEFAULT; virtual R VisitAttr_(const tir::GTNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; virtual R VisitAttr_(const tir::LTNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; virtual R VisitAttr_(const tir::LENode* op, Args... args) ATTR_FUNCTOR_DEFAULT; virtual R VisitAttr_(const tir::EQNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; virtual R VisitAttr_(const tir::NENode* op, Args... args) ATTR_FUNCTOR_DEFAULT; virtual R VisitAttr_(const tir::AndNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; virtual R VisitAttr_(const tir::OrNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; virtual R VisitAttr_(const tir::NotNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; virtual R VisitAttr_(const tir::CastNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; virtual R VisitAttr_(const tir::CallNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; virtual R VisitAttr_(const tir::SelectNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; private: // initialize the vtable. static FType InitVTable() { using namespace tir; FType vtable; // Set dispatch ATTR_FUNCTOR_DISPATCH(ArrayNode); ATTR_FUNCTOR_DISPATCH(IntImmNode); ATTR_FUNCTOR_DISPATCH(FloatImmNode); ATTR_FUNCTOR_DISPATCH(StringImmNode); ATTR_FUNCTOR_DISPATCH(VarNode); ATTR_FUNCTOR_DISPATCH(SizeVarNode); ATTR_FUNCTOR_DISPATCH(AddNode); ATTR_FUNCTOR_DISPATCH(SubNode); ATTR_FUNCTOR_DISPATCH(MulNode); ATTR_FUNCTOR_DISPATCH(DivNode); ATTR_FUNCTOR_DISPATCH(ModNode); ATTR_FUNCTOR_DISPATCH(FloorDivNode); ATTR_FUNCTOR_DISPATCH(FloorModNode); ATTR_FUNCTOR_DISPATCH(MinNode); ATTR_FUNCTOR_DISPATCH(MaxNode); ATTR_FUNCTOR_DISPATCH(GENode); ATTR_FUNCTOR_DISPATCH(GTNode); ATTR_FUNCTOR_DISPATCH(LENode); ATTR_FUNCTOR_DISPATCH(LTNode); ATTR_FUNCTOR_DISPATCH(EQNode); ATTR_FUNCTOR_DISPATCH(NENode); ATTR_FUNCTOR_DISPATCH(AndNode); ATTR_FUNCTOR_DISPATCH(OrNode); ATTR_FUNCTOR_DISPATCH(NotNode); ATTR_FUNCTOR_DISPATCH(CastNode); ATTR_FUNCTOR_DISPATCH(CallNode); ATTR_FUNCTOR_DISPATCH(SelectNode); return vtable; } }; } // namespace tvm #endif // TVM_IR_ATTR_FUNCTOR_H_
https://github.com/zk-ml/tachikoma
src/meta_schedule/module_equality.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_META_SCHEDULE_MODULE_EQUALITY_H_ #define TVM_META_SCHEDULE_MODULE_EQUALITY_H_ #include <tvm/ir/module.h> #include <memory> #include <string> namespace tvm { namespace meta_schedule { /*! \brief Method to compute hash and determine equality of modules */ class ModuleEquality { public: virtual ~ModuleEquality() = default; virtual size_t Hash(IRModule mod) const = 0; virtual bool Equal(IRModule lhs, IRModule rhs) const = 0; /*! * \brief Create a ModuleEquality instance * \param mod_eq_name A string to specify the module equality testing and hashing method. * It must be one of the followings: * - "structural": Use StructuralEqual/Hash * - "ignore-ndarray": Same as "structural", but ignore ndarray raw data during * equality testing and hashing. * - "anchor-block": Apply equality testing and hashing on the anchor block extracted from a * given module. The "ignore-ndarray" varint is used for the extracted blocks * or in case no anchor block is found. * For the definition of the anchor block, see tvm/tir/analysis.h. * \return An owning pointer to the created instance */ static std::unique_ptr<ModuleEquality> Create(const std::string& mod_eq_name); }; /*! \brief Functor to compute hash a module using the provided method. */ class ModuleHash { public: explicit ModuleHash(const ModuleEquality& mod_eq) : mod_eq_(mod_eq) {} size_t operator()(const IRModule& mod) const { return mod_eq_.Hash(mod); } private: const ModuleEquality& mod_eq_; }; /*! \brief Functor to determine equality of modules using the provided method. */ class ModuleEqual { public: explicit ModuleEqual(const ModuleEquality& mod_eq) : mod_eq_(mod_eq) {} bool operator()(const IRModule& lhs, const IRModule& rhs) const { return mod_eq_.Equal(lhs, rhs); } private: const ModuleEquality& mod_eq_; }; } // namespace meta_schedule } // namespace tvm #endif // TVM_META_SCHEDULE_MODULE_EQUALITY_H_
https://github.com/zk-ml/tachikoma
src/meta_schedule/schedule_rule/multi_level_tiling.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_META_SCHEDULE_SCHEDULE_RULE_MULTI_LEVEL_TILING_H_ #define TVM_META_SCHEDULE_SCHEDULE_RULE_MULTI_LEVEL_TILING_H_ #include <tvm/meta_schedule/schedule_rule.h> #include <tvm/tir/schedule/schedule.h> #include <unordered_map> #include <utility> #include <vector> #include "../../support/array.h" namespace tvm { namespace meta_schedule { /*! * \brief Configuration of data reuse type: * 0) kNoReuse: no reuse is allowed, then no cache_read/write is performed. * 1) kMayReuse: reuse is allowed, but no reuse is explored. * 2) kMustReuse: reuse is allowed and no reuse is not explored. */ enum class ReuseType : int32_t { kNoReuse = 0, kMayReuse = 1, kMustReuse = 2, }; /*! * \brief Converts a string to ReuseType. * \param str The string to be converted. * \return The converted ReuseType. */ inline ReuseType Str2ReuseType(const String& str) { if (str == "no") { return ReuseType::kNoReuse; } else if (str == "may") { return ReuseType::kMayReuse; } else if (str == "must") { return ReuseType::kMustReuse; } else { LOG(FATAL) << "ValueError: Unknown ReuseType: " << str; throw; } } /*! \brief Configuration of data reuse patterns */ struct ReuseConfig { /*! \brief Type of data reuse: no-reuse, may-reuse or must-reuse */ ReuseType req; /*! \brief Which levels are caching stage inserted at */ std::vector<int> levels; /*! \brief The storage scope */ String scope; /*! \brief Default constructor: no data reuse */ ReuseConfig() : req(ReuseType::kNoReuse) {} /*! \brief Construct from a configuration dictionary */ explicit ReuseConfig(const Map<String, ObjectRef>& config) : req(Str2ReuseType(Downcast<String>(config.at("req")))), levels(support::AsVector<Integer, int>(Downcast<Array<Integer>>(config.at("levels")))), scope(Downcast<String>(config.at("scope"))) { ICHECK_EQ(config.size(), 3); } }; // Forware declaration class State; /*! \brief The state of auto scheduling for the multi-level tiling rule */ class StateNode : public Object { public: /*! \brief The schedule to date */ tir::Schedule sch; /*! \brief The block to be tiled */ tir::BlockRV block_rv; /*! \brief The loop tiles */ Array<Array<tir::LoopRV>> tiles; /*! \brief The mapping from buffer index to read cache block. */ std::unordered_map<int, tir::BlockRV> read_reuse; /*! \brief The mapping from buffer index to write cache block. */ std::unordered_map<int, tir::BlockRV> write_reuse; /*! * \brief Create a copy of the state. The underlying schedule is copied. Schedule rules that * produce multiple states should use this method to create new states. */ virtual State Copy() const; static constexpr const char* _type_key = "meta_schedule.State"; TVM_DECLARE_BASE_OBJECT_INFO(StateNode, Object); }; /*! \brief Managed reference to StateNode */ class State : public ObjectRef { public: /*! \brief Default constructor */ explicit State(tir::Schedule sch, tir::BlockRV block_rv, Array<Array<tir::LoopRV>> tiles = {}); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(State, ObjectRef, StateNode); }; /*! * \brief Helper to apply a sub-rule to a list of auto scheduling states * \tparam FLambda The type of the sub-rule functor * \param states The list of states to be applied * \return The list of states after applying the sub-rule */ template <class FLambda> std::vector<State> SubRule(std::vector<State> states, FLambda sub_rule) { std::vector<State> results; for (auto&& state : states) { std::vector<State> next = sub_rule(std::move(state)); results.insert(results.end(), // std::make_move_iterator(next.begin()), // std::make_move_iterator(next.end())); } return results; } /*! * \brief The mega rule: multi-level tiling with data reuse */ class MultiLevelTilingNode : public ScheduleRuleNode { public: virtual ~MultiLevelTilingNode() = default; // SubRule 1. add write cache std::vector<State> AddWriteReuse(State state) const; // SubRule 2. tile the loop nest std::vector<State> TileLoopNest(State state) const; // SubRule 3. add read cache std::vector<State> AddReadReuse(State state) const; // Do nothing; Inherited from ScheduleRuleNode void InitializeWithTuneContext(const TuneContext& context) final; // Entry of the mega rule; Inherited from ScheduleRuleNode Array<tir::Schedule> Apply(const tir::Schedule& sch, const tir::BlockRV& block_rv) override; // Inherited from ScheduleRuleNode ScheduleRule Clone() const override; protected: virtual std::vector<State> ApplySubRules(std::vector<State> states); virtual Array<tir::LoopRV> SplitLoop(const tir::Schedule& sch, tir::BlockRV block, tir::LoopRV loop, int n_tiles) const; // Annotate a block to use cooperative fetching void AnnotateCooperativeFetching(tir::Schedule* sch, const tir::BlockRV& block) const; public: /*! * \brief The tiling structure. Recommended: * - 'SSRSRS' on CPU * - 'SSSRRSRS' on GPU */ String structure; /*! \brief For each level of tiles, which thread axis it is bound to */ Array<String> tile_binds; /*! \brief The maximum size of the innermost factor */ int max_innermost_factor; /*! \brief The length of vector lane in vectorized cooperative fetching */ std::vector<int> vector_load_lens; /*! \brief Data reuse configuration for reading */ ReuseConfig reuse_read_; /*! \brief Data reuse configuration for writing */ ReuseConfig reuse_write_; /*! \brief The indices of spatial tiles in `structure` */ std::vector<int> s_indices_; /*! \brief The indices of reduction tiles in `structure` */ std::vector<int> r_indices_; /*! \brief The size of the thread warp */ int thread_warp_size_; /*! \brief The maximum number of threads to be used size of a thread warp */ int max_threads_per_block_; /*! \brief The logging function */ PackedFunc logger; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("structure", &structure); v->Visit("tile_binds", &tile_binds); v->Visit("max_innermost_factor", &max_innermost_factor); // `vector_load_lens` is not visited // `reuse_read_` is not visited // `reuse_write_` is not visited // `s_indices_` is not visited // `r_indices_` is not visited // `thread_warp_size_` is not visited // `max_threads_per_block` is not visited } static constexpr const char* _type_key = "meta_schedule.MultiLevelTiling"; TVM_DECLARE_BASE_OBJECT_INFO(MultiLevelTilingNode, ScheduleRuleNode); }; template <typename NodeType> ObjectPtr<NodeType> MultiLevelTilingInitCommon(String structure, Optional<Array<String>> tile_binds, Optional<Integer> max_innermost_factor, Optional<Array<Integer>> vector_load_lens, Optional<Map<String, ObjectRef>> reuse_read, Optional<Map<String, ObjectRef>> reuse_write) { ObjectPtr<NodeType> n = make_object<NodeType>(); n->structure = structure; n->tile_binds = tile_binds.value_or({}); n->max_innermost_factor = max_innermost_factor.value_or(Integer(-1))->value; n->vector_load_lens = vector_load_lens.defined() ? support::AsVector<Integer, int>(vector_load_lens.value()) : std::vector<int>(); n->reuse_read_ = reuse_read.defined() ? ReuseConfig(reuse_read.value()) : ReuseConfig(); n->reuse_write_ = reuse_write.defined() ? ReuseConfig(reuse_write.value()) : ReuseConfig(); for (int i = 0, len = structure.size(); i < len; ++i) { char c = structure.data()[i]; if (c == 'S') { n->s_indices_.push_back(i); } else if (c == 'R') { n->r_indices_.push_back(i); } else { LOG(FATAL) << "ValueError: Invalid tiling structure: " << structure; } } n->thread_warp_size_ = -1; n->max_threads_per_block_ = -1; return n; } } // namespace meta_schedule } // namespace tvm #endif // TVM_META_SCHEDULE_SCHEDULE_RULE_MULTI_LEVEL_TILING_H_
https://github.com/zk-ml/tachikoma
src/meta_schedule/trace_apply.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_META_SCHEDULE_TRACE_APPLY_H_ #define TVM_META_SCHEDULE_TRACE_APPLY_H_ #include <tvm/meta_schedule/schedule_rule.h> #include <tvm/target/target.h> #include <tvm/tir/schedule/schedule.h> #include <tvm/tir/schedule/trace.h> #include <string> namespace tvm { namespace meta_schedule { /*! * \brief Apply the trace from a TIR module whose anchor block is the same but fused elemewise * op blocks differ. This function can be used for transferring a trace tuned on a conv2d -> add * subgraph to other subgraphs having the same conv2d workload, for example. We call such trace * an "anchor trace". Those blocks that are not scheduled by the given anchor trace will be either * inlined or parallelized. * \param sch The schedule to apply the anchor trace. * \param anchor_trace The trace tuned on other subgraph with the same anchor-block workload. * \param target The target information needed for inlining and parallelization. */ void ScheduleUsingAnchorTrace(tir::Schedule sch, const tir::Trace& anchor_trace, const tvm::Target& target); } // namespace meta_schedule } // namespace tvm #endif // TVM_META_SCHEDULE_TRACE_APPLY_H_
https://github.com/zk-ml/tachikoma
src/meta_schedule/utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_META_SCHEDULE_UTILS_H_ #define TVM_META_SCHEDULE_UTILS_H_ #include <dmlc/memory_io.h> #include <tvm/arith/analyzer.h> #include <tvm/meta_schedule/arg_info.h> #include <tvm/meta_schedule/builder.h> #include <tvm/meta_schedule/cost_model.h> #include <tvm/meta_schedule/database.h> #include <tvm/meta_schedule/extracted_task.h> #include <tvm/meta_schedule/feature_extractor.h> #include <tvm/meta_schedule/measure_callback.h> #include <tvm/meta_schedule/profiler.h> #include <tvm/meta_schedule/runner.h> #include <tvm/meta_schedule/schedule_rule.h> #include <tvm/meta_schedule/search_strategy.h> #include <tvm/meta_schedule/space_generator.h> #include <tvm/meta_schedule/task_scheduler.h> #include <tvm/meta_schedule/tune_context.h> #include <tvm/node/node.h> #include <tvm/node/serialization.h> #include <tvm/runtime/container/optional.h> #include <tvm/support/parallel_for.h> #include <tvm/tir/schedule/schedule.h> #include <tvm/tir/transform.h> #include <algorithm> #include <string> #include <unordered_set> #include <utility> #include <vector> #include "../printer/text_printer.h" #include "../support/array.h" #include "../support/base64.h" #include "../support/nd_int_set.h" #include "../support/table_printer.h" #include "../support/utils.h" #include "../tir/schedule/primitive.h" #include "../tir/schedule/utils.h" #define TVM_PY_LOG(logging_level, logger) \ ::tvm::meta_schedule::PyLogMessage(__FILE__, __LINE__, logger, \ PyLogMessage::Level::logging_level) \ .stream() #define TVM_PY_LOG_CLEAR_SCREEN(logging_func) clear_logging(__FILE__, __LINE__, logging_func) namespace tvm { namespace meta_schedule { /*! * \brief Class to accumulate an log message on the python side. Do not use directly, instead use * TVM_PY_LOG(DEBUG), TVM_PY_LOG(INFO), TVM_PY_LOG(WARNING), TVM_PY_ERROR(ERROR). * \sa TVM_PY_LOG * \sa TVM_PY_LOG_CLEAR_SCREEN */ class PyLogMessage { public: enum class Level : int32_t { CLEAR = -10, DEBUG = 10, INFO = 20, WARNING = 30, ERROR = 40, // FATAL not included }; explicit PyLogMessage(const char* filename, int lineno, PackedFunc logger, Level logging_level) : filename_(filename), lineno_(lineno), logger_(logger), logging_level_(logging_level) {} TVM_NO_INLINE ~PyLogMessage() { ICHECK(logging_level_ != Level::CLEAR) << "Cannot use CLEAR as logging level in TVM_PY_LOG, please use TVM_PY_LOG_CLEAR_SCREEN."; if (this->logger_ != nullptr) { logger_(static_cast<int>(logging_level_), std::string(filename_), lineno_, stream_.str()); } else { if (logging_level_ == Level::INFO) { runtime::detail::LogMessage(filename_, lineno_, TVM_LOG_LEVEL_INFO).stream() << stream_.str(); } else if (logging_level_ == Level::WARNING) { runtime::detail::LogMessage(filename_, lineno_, TVM_LOG_LEVEL_WARNING).stream() << stream_.str(); } else if (logging_level_ == Level::ERROR) { runtime::detail::LogMessage(filename_, lineno_, TVM_LOG_LEVEL_ERROR).stream() << stream_.str(); } else if (logging_level_ == Level::DEBUG) { runtime::detail::LogMessage(filename_, lineno_, TVM_LOG_LEVEL_DEBUG).stream() << stream_.str(); } else { runtime::detail::LogFatal(filename_, lineno_).stream() << stream_.str(); } } } std::ostringstream& stream() { return stream_; } private: const char* filename_; int lineno_; std::ostringstream stream_; PackedFunc logger_; Level logging_level_; }; /*! * \brief Whether the tuning is running on ipython kernel. * \return A boolean indicating whether ipython kernel is used. */ inline bool using_ipython() { bool flag = false; const auto* f_using_ipython = runtime::Registry::Get("meta_schedule.using_ipython"); if (f_using_ipython) { flag = (*f_using_ipython)(); } return flag; } /*! * \brief Print out the performance table interactively in jupyter notebook. * \param str The serialized performance table. */ inline void print_interactive_table(const String& data) { const auto* f_print_interactive_table = runtime::Registry::Get("meta_schedule.print_interactive_table"); ICHECK(f_print_interactive_table->defined()) << "Cannot find print_interactive_table function in registry."; (*f_print_interactive_table)(data); } /*! * \brief A helper function to clear logging output for ipython kernel and console. * \param file The file name. * \param lineno The line number. * \param logging_func The logging function. */ inline void clear_logging(const char* file, int lineno, PackedFunc logging_func) { if (logging_func.defined() && using_ipython()) { logging_func(static_cast<int>(PyLogMessage::Level::CLEAR), file, lineno, ""); } else { // this would clear all logging output in the console runtime::detail::LogMessage(file, lineno, TVM_LOG_LEVEL_INFO).stream() << "\033c\033[3J\033[2J\033[0m\033[H"; } } /*! \brief The type of the random state */ using TRandState = support::LinearCongruentialEngine::TRandState; /*! * \brief Get the base64 encoded result of a string. * \param str The string to encode. * \return The base64 encoded string. */ inline std::string Base64Encode(std::string str) { std::string result; dmlc::MemoryStringStream m_stream(&result); support::Base64OutStream b64stream(&m_stream); static_cast<dmlc::Stream*>(&b64stream)->Write(str); b64stream.Finish(); return result; } /*! * \brief Get the base64 decoded result of a string. * \param str The string to decode. * \return The base64 decoded string. */ inline std::string Base64Decode(std::string str) { std::string result; dmlc::MemoryStringStream m_stream(&str); support::Base64InStream b64stream(&m_stream); b64stream.InitPosition(); static_cast<dmlc::Stream*>(&b64stream)->Read(&result); return result; } /*! * \brief Parses a json string into a json object. * \param json_str The json string. * \return The json object */ ObjectRef JSONLoads(std::string json_str); /*! * \brief Dumps a json object into a json string. * \param json_obj The json object. * \return The json string */ std::string JSONDumps(ObjectRef json_obj); /*! * \brief Converts a structural hash code to string * \param hash_code The hash code * \return The string representation of the hash code */ inline String SHash2Str(Workload::THashCode hash_code) { return std::to_string(hash_code); } /*! * \brief Converts an TVM object to the hex string representation of its structural hash. * \param obj The TVM object. * \return The hex string representation of the hash code. */ inline String SHash2Hex(const ObjectRef& obj) { std::ostringstream os; size_t hash_code = 0; if (obj.defined()) { hash_code = StructuralHash()(obj); } os << "0x" << std::setw(16) << std::setfill('0') << std::hex << hash_code; return os.str(); } /*! * \brief Fork a random state into another, i.e. PRNG splitting. * The given random state is also mutated. * \param rand_state The random state to be forked * \return The forked random state */ inline support::LinearCongruentialEngine::TRandState ForkSeed( support::LinearCongruentialEngine::TRandState* rand_state) { return support::LinearCongruentialEngine(rand_state).ForkSeed(); } /*! * \brief Fork a random state into another ones, i.e. PRNG splitting. * The given random state is also mutated. * \param rand_state The random state to be forked * \param n The number of forks * \return The forked random states */ inline std::vector<support::LinearCongruentialEngine::TRandState> ForkSeed( support::LinearCongruentialEngine::TRandState* rand_state, int n) { std::vector<support::LinearCongruentialEngine::TRandState> results; results.reserve(n); for (int i = 0; i < n; ++i) { results.push_back(support::LinearCongruentialEngine(rand_state).ForkSeed()); } return results; } /*! * \brief Get deep copy of an IRModule. * \param mod The IRModule to make a deep copy. * \return The deep copy of the IRModule. */ inline IRModule DeepCopyIRModule(IRModule mod) { return Downcast<IRModule>(LoadJSON(SaveJSON(mod))); } /*! * \brief Concatenate strings * \param strs The strings to concatenate * \param delim The delimiter * \return The concatenated string */ inline std::string Concat(const Array<String>& strs, const std::string& delim) { if (strs.empty()) { return ""; } std::ostringstream os; os << strs[0]; for (int i = 1, n = strs.size(); i < n; ++i) { os << delim << strs[i]; } return os.str(); } /*! * \brief Get the BlockRV from a block StmtSRef * \param sch The schedule * \param block_sref The block StmtSRef * \param global_var_name The global variable name * \return The BlockRV */ inline tir::BlockRV GetRVFromSRef(const tir::Schedule& sch, const tir::StmtSRef& block_sref, const String& global_var_name) { const tir::BlockNode* block = TVM_SREF_TO_BLOCK(block_sref); return sch->GetBlock(block->name_hint, global_var_name); } /*! * \brief A helper data structure that replays a trace and collects failure counts * for each postprocessor */ struct ThreadedTraceApply { /*! \brief Constructor */ explicit ThreadedTraceApply(const Array<Postproc>& postprocs) : n_(postprocs.size()), items_(new Item[n_]) { for (int i = 0; i < n_; ++i) { items_[i].postproc = postprocs[i]; items_[i].fail_counter = 0; } } /*! \brief Destructor */ ~ThreadedTraceApply() { delete[] items_; } /*! * \brief Apply the trace and postprocessors to an IRModule * \param mod The IRModule to be applied * \param trace The trace to apply to the IRModule * \param rand_state The random seed * \return The schedule created, or NullOpt if any postprocessor fails */ Optional<tir::Schedule> Apply(const IRModule& mod, const tir::Trace& trace, TRandState* rand_state) { tir::Schedule sch = tir::Schedule::Traced(mod, /*rand_state=*/ForkSeed(rand_state), /*debug_mode=*/0, /*error_render_level=*/tir::ScheduleErrorRenderLevel::kNone); trace->ApplyToSchedule(sch, /*remove_postproc=*/true); sch->EnterPostproc(); for (int i = 0; i < n_; ++i) { Item& item = items_[i]; if (!item.postproc->Apply(sch)) { item.fail_counter++; return NullOpt; } } return sch; } /*! \brief Returns a string summarizing the failures on each postprocessor */ std::string SummarizeFailures() const { std::ostringstream os; for (int i = 0; i < n_; ++i) { const Item& item = items_[i]; os << "Postproc #" << i << " [" << item.postproc // << "]: " << item.fail_counter.load() << " failure(s)"; if (i != n_ - 1) { os << "\n"; } } return os.str(); } private: /*! \brief A helper data structure that stores the fail count for each postprocessor. */ struct Item { /*! \brief The postprocessor. */ Postproc postproc{nullptr}; /*! \brief The thread-safe postprocessor failure counter. */ std::atomic<int> fail_counter{0}; }; /*! \brief The number of total postprocessors. */ int n_; /*! \brief The pointer to the list of postprocessor items. */ Item* items_; }; /*! * \brief Get the number of cores in CPU * \param target The target * \return The number of cores. */ inline int GetTargetNumCores(const Target& target) { int num_cores = target->GetAttr<Integer>("num-cores").value_or(-1).IntValue(); if (num_cores == -1) { static const auto* f_cpu_count = runtime::Registry::Get("meta_schedule.cpu_count"); ICHECK(f_cpu_count) << "ValueError: Cannot find the packed function \"meta_schedule._cpu_count\""; num_cores = (*f_cpu_count)(false); LOG(FATAL) << "Target does not have attribute \"num-cores\", physical core number must be " "defined! For example, on the local machine, the target must be \"llvm -num-cores " << num_cores << "\""; } return num_cores; } /*! * \brief Get the median of the running time from RunnerResult in millisecond * \param results The results from RunnerResult * \return The median of the running time in millisecond */ inline double GetRunMsMedian(const RunnerResult& runner_result) { Array<FloatImm> run_secs = runner_result->run_secs.value(); ICHECK(!run_secs.empty()); std::vector<double> v; v.reserve(run_secs.size()); std::transform(run_secs.begin(), run_secs.end(), std::back_inserter(v), [](const FloatImm& f) -> double { return f->value; }); std::sort(v.begin(), v.end()); int n = v.size(); if (n % 2 == 0) { return (v[n / 2 - 1] + v[n / 2]) * 0.5 * 1000.0; } else { return v[n / 2] * 1000.0; } } /*! * \brief Convert the given object to an array of floating point numbers * \param obj The object to be converted * \return The array of floating point numbers */ inline Array<FloatImm> AsFloatArray(const ObjectRef& obj) { const ArrayNode* arr = obj.as<ArrayNode>(); ICHECK(arr) << "TypeError: Expect an array, but gets: " << obj->GetTypeKey(); Array<FloatImm> results; results.reserve(arr->size()); for (const ObjectRef& elem : *arr) { if (const auto* int_imm = elem.as<IntImmNode>()) { results.push_back(FloatImm(DataType::Float(32), int_imm->value)); } else if (const auto* float_imm = elem.as<FloatImmNode>()) { results.push_back(FloatImm(DataType::Float(32), float_imm->value)); } else { LOG(FATAL) << "TypeError: Expect an array of float or int, but gets: " << elem->GetTypeKey(); } } return results; } /*! * \brief Convert the given object to an array of integers * \param obj The object to be converted * \return The array of integers */ inline Array<Integer> AsIntArray(const ObjectRef& obj) { const ArrayNode* arr = obj.as<ArrayNode>(); ICHECK(arr) << "TypeError: Expect an array, but gets: " << obj->GetTypeKey(); Array<Integer> results; results.reserve(arr->size()); for (const ObjectRef& elem : *arr) { if (const auto* int_imm = elem.as<IntImmNode>()) { results.push_back(Integer(int_imm->value)); } else { LOG(FATAL) << "TypeError: Expect an array of integers, but gets: " << elem->GetTypeKey(); } } return results; } /*! \brief The struct defining comparison function of sorting by mean run seconds. */ struct SortTuningRecordByMeanRunSecs { static const constexpr double kMaxMeanTime = 1e10; static double Mean(const Array<FloatImm>& a) { if (a.empty()) { return kMaxMeanTime; } double sum = 0.0; for (const FloatImm& i : a) { sum += i->value; } return sum / a.size(); } bool operator()(const TuningRecord& a, const TuningRecord& b) const { double a_time = Mean(a->run_secs.value_or({})); double b_time = Mean(b->run_secs.value_or({})); return a_time < b_time; } }; /*! * \brief The helper function to clone schedule rules, postprocessors, and mutators. * \param src The source space generator. * \param dst The destination space generator. */ inline void CloneRules(const SpaceGeneratorNode* src, SpaceGeneratorNode* dst) { if (src->sch_rules.defined()) { Array<ScheduleRule> original = src->sch_rules.value(); Array<ScheduleRule> sch_rules; sch_rules.reserve(original.size()); for (const ScheduleRule& sch_rule : original) { sch_rules.push_back(sch_rule->Clone()); } dst->sch_rules = std::move(sch_rules); } if (src->postprocs.defined()) { Array<Postproc> original = src->postprocs.value(); Array<Postproc> postprocs; postprocs.reserve(original.size()); for (const Postproc& postproc : original) { postprocs.push_back(postproc->Clone()); } dst->postprocs = std::move(postprocs); } if (src->mutator_probs.defined()) { Map<Mutator, FloatImm> original = src->mutator_probs.value(); Map<Mutator, FloatImm> mutator_probs; for (const auto& kv : original) { mutator_probs.Set(kv.first->Clone(), kv.second); } dst->mutator_probs = std::move(mutator_probs); } } /*! \brief Returns true if the given target is one of the supported gpu targets. */ inline bool IsGPUTarget(const std::string& target_name) { static const std::unordered_set<std::string> gpu_targets{"cuda", "rocm", "vulkan", "metal"}; return gpu_targets.count(target_name); } /*! * \brief Create an AutoInline schedule rule for the given target. * \param target_name The name of the target ("llvm", "cuda", etc.) * \return The AutoInline schedule rule for the given target. */ inline ScheduleRule GetDefaultAutoInline(const std::string& target_name) { Array<ScheduleRule> rules{nullptr}; if (target_name == "llvm") { rules = ScheduleRule::DefaultLLVM(); } else if (target_name == "hexagon") { rules = ScheduleRule::DefaultHexagon(); } else if (IsGPUTarget(target_name)) { rules = ScheduleRule::DefaultCUDA(); } else { LOG(FATAL) << "ValueError: Unsupported target: " << target_name; } for (const ScheduleRule& rule : rules) { if (rule->GetTypeKey() == "meta_schedule.AutoInline") { return rule; } } LOG(FATAL) << "ValueError: AutoInline rule is not found in the default rules for target: " << target_name; throw; } /*! * \brief Summarize the run time of the given FloatImm array. * \param arr The array of FloatImm. * \return The summary of the values in the given array. */ inline double Sum(const Array<FloatImm>& arr) { double sum = 0; for (const FloatImm& f : arr) { sum += f->value; } return sum; } } // namespace meta_schedule } // namespace tvm #endif // TVM_META_SCHEDULE_UTILS_H_
https://github.com/zk-ml/tachikoma
src/node/attr_registry.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/node/attr_registry.h * \brief Common global registry for objects that also have additional attrs. */ #ifndef TVM_NODE_ATTR_REGISTRY_H_ #define TVM_NODE_ATTR_REGISTRY_H_ #include <tvm/node/attr_registry_map.h> #include <tvm/runtime/packed_func.h> #include <memory> #include <mutex> #include <unordered_map> #include <utility> #include <vector> namespace tvm { /*! * \brief Implementation of registry with attributes. * * \tparam EntryType The type of the registry entry. * \tparam KeyType The actual key that is used to lookup the attributes. * each entry has a corresponding key by default. */ template <typename EntryType, typename KeyType> class AttrRegistry { public: using TSelf = AttrRegistry<EntryType, KeyType>; /*! * \brief Get an entry from the registry. * \param name The name of the item. * \return The corresponding entry. */ const EntryType* Get(const String& name) const { auto it = entry_map_.find(name); if (it != entry_map_.end()) return it->second; return nullptr; } /*! * \brief Get an entry or register a new one. * \param name The name of the item. * \return The corresponding entry. */ EntryType& RegisterOrGet(const String& name) { auto it = entry_map_.find(name); if (it != entry_map_.end()) return *it->second; uint32_t registry_index = static_cast<uint32_t>(entries_.size()); auto entry = std::unique_ptr<EntryType>(new EntryType(registry_index)); auto* eptr = entry.get(); eptr->name = name; entry_map_[name] = eptr; entries_.emplace_back(std::move(entry)); return *eptr; } /*! * \brief List all the entry names in the registry. * \return The entry names. */ Array<String> ListAllNames() const { Array<String> names; for (const auto& kv : entry_map_) { names.push_back(kv.first); } return names; } /*! * \brief Update the attribute stable. * \param attr_name The name of the attribute. * \param key The key to the attribute table. * \param value The value to be set. * \param plevel The support level. */ void UpdateAttr(const String& attr_name, const KeyType& key, runtime::TVMRetValue value, int plevel) { using runtime::TVMRetValue; std::lock_guard<std::mutex> lock(mutex_); auto& op_map = attrs_[attr_name]; if (op_map == nullptr) { op_map.reset(new AttrRegistryMapContainerMap<KeyType>()); op_map->attr_name_ = attr_name; } uint32_t index = key->AttrRegistryIndex(); if (op_map->data_.size() <= index) { op_map->data_.resize(index + 1, std::make_pair(TVMRetValue(), 0)); } std::pair<TVMRetValue, int>& p = op_map->data_[index]; ICHECK(p.second != plevel) << "Attribute " << attr_name << " of " << key->AttrRegistryName() << " is already registered with same plevel=" << plevel; ICHECK(value.type_code() != kTVMNullptr) << "Registered packed_func is Null for " << attr_name << " of operator " << key->AttrRegistryName(); if (p.second < plevel && value.type_code() != kTVMNullptr) { op_map->data_[index] = std::make_pair(value, plevel); } } /*! * \brief Reset an attribute table entry. * \param attr_name The name of the attribute. * \param key The key to the attribute table. */ void ResetAttr(const String& attr_name, const KeyType& key) { std::lock_guard<std::mutex> lock(mutex_); auto& op_map = attrs_[attr_name]; if (op_map == nullptr) { return; } uint32_t index = key->AttrRegistryIndex(); if (op_map->data_.size() > index) { op_map->data_[index] = std::make_pair(TVMRetValue(), 0); } } /*! * \brief Get an internal attribute map. * \param attr_name The name of the attribute. * \return The result attribute map. */ const AttrRegistryMapContainerMap<KeyType>& GetAttrMap(const String& attr_name) { std::lock_guard<std::mutex> lock(mutex_); auto it = attrs_.find(attr_name); if (it == attrs_.end()) { LOG(FATAL) << "Attribute \'" << attr_name << "\' is not registered"; } return *it->second.get(); } /*! * \brief Check of attribute has been registered. * \param attr_name The name of the attribute. * \return The check result. */ bool HasAttrMap(const String& attr_name) { std::lock_guard<std::mutex> lock(mutex_); return attrs_.count(attr_name); } /*! * \return a global singleton of the registry. */ static TSelf* Global() { static TSelf* inst = new TSelf(); return inst; } private: // mutex to avoid registration from multiple threads. std::mutex mutex_; // entries in the registry std::vector<std::unique_ptr<EntryType>> entries_; // map from name to entries. std::unordered_map<String, EntryType*> entry_map_; // storage of additional attribute table. std::unordered_map<String, std::unique_ptr<AttrRegistryMapContainerMap<KeyType>>> attrs_; }; } // namespace tvm #endif // TVM_NODE_ATTR_REGISTRY_H_
https://github.com/zk-ml/tachikoma
src/node/ndarray_hash_equal.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_NODE_NDARRAY_HASH_EQUAL_H_ #define TVM_NODE_NDARRAY_HASH_EQUAL_H_ #include <tvm/runtime/ndarray.h> namespace tvm { class SEqualReducer; class SHashReducer; /*! * \brief Test two NDArrays for equality. * \param lhs The left operand. * \param rhs The right operand. * \param equal A Reducer class to reduce the structural equality result of two objects. * See tvm/node/structural_equal.h. * \param compare_data Whether or not to consider ndarray raw data in the equality testing. * \return The equality testing result. */ bool NDArrayEqual(const runtime::NDArray::Container* lhs, const runtime::NDArray::Container* rhs, SEqualReducer equal, bool compare_data); /*! * \brief Hash NDArray. * \param arr The NDArray to compute the hash for. * \param hash_reduce A Reducer class to reduce the structural hash value. * See tvm/node/structural_hash.h. * \param hash_data Whether or not to hash ndarray raw data. */ void NDArrayHash(const runtime::NDArray::Container* arr, SHashReducer* hash_reduce, bool hash_data); } // namespace tvm #endif // TVM_NODE_NDARRAY_HASH_EQUAL_H_
https://github.com/zk-ml/tachikoma
src/parser/meta_ref.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file meta_ref.h * \brief A reference into the metadata section of the Relay text format. */ #ifndef TVM_PARSER_META_REF_H_ #define TVM_PARSER_META_REF_H_ #include <tvm/ir/attrs.h> #include <tvm/parser/parser.h> #include <tvm/relay/expr.h> #include <tvm/relay/function.h> #include <string> namespace tvm { namespace parser { using namespace relay; /*! * \brief Options for allocating storage. */ struct MetaRefAttrs : public tvm::AttrsNode<MetaRefAttrs> { tvm::String node_type_key; uint64_t node_index; TVM_DECLARE_ATTRS(MetaRefAttrs, "relay.attrs.MetaRefAttrs") { TVM_ATTR_FIELD(node_type_key) .describe("The type_key representing the type of the node referenced."); TVM_ATTR_FIELD(node_index).describe("The index into the type specific node array."); } }; /*! \brief A reference to a "meta-expression". * * In the text format we allow referencing metadata which * uses a compact serialization that proceeds the main * program body. * * We can reference this table using an expression of * the form `meta[Type][index]`. * * We must later resolve these references to actual in-memory * AST nodes but this requires first parsing the full program * then expanding these temporary AST nodes into their corresponding * nodes. * * For example the nth large constant will be pretty-printed as meta[relay.Constant][n] * with its compact binary serialization residing in the metadata section at the end * of the program. * * \param type_key The type key of the object in the meta section. * \param node_index The index into that subfield. * \returns The meta table reference. */ Expr MetaRef(std::string type_key, uint64_t node_index); relay::Function ExpandMetaRefs(const MetaTable& meta_table, const relay::Function& func); IRModule ExpandMetaRefs(const MetaTable& meta_table, const IRModule& mod); } // namespace parser } // namespace tvm #endif // TVM_PARSER_META_REF_H_
https://github.com/zk-ml/tachikoma
src/parser/op_table.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file token.h * \brief A operator table for parsing. * * Provides symbolic token sequences to map to TVM operators, with a given associativity and arity. */ #ifndef TVM_PARSER_OP_TABLE_H_ #define TVM_PARSER_OP_TABLE_H_ #include <tvm/ir/op.h> #include <tvm/runtime/object.h> #include <fstream> #include <string> #include <unordered_map> #include <vector> #include "./tokenizer.h" namespace tvm { namespace parser { struct Rule { std::vector<TokenType> tokens; int precedence; int arity; tvm::Op op; bool left_assoc; Rule() : tokens(), precedence(0), arity(0), op(tvm::Op()), left_assoc(false) {} Rule(std::vector<TokenType> tokens, tvm::Op op, int precedence, int arity = 2, bool left_assoc = false) : tokens(tokens), precedence(precedence), arity(arity), op(op), left_assoc(left_assoc) {} Rule(const Rule& rule) { this->tokens = rule.tokens; this->op = rule.op; this->precedence = rule.precedence; this->arity = rule.arity; this->left_assoc = rule.left_assoc; } }; struct OperatorTable { std::vector<Rule> rules; std::unordered_map<std::string, Rule> this_is_a_hack; explicit OperatorTable(std::vector<Rule> rules) : rules(rules), this_is_a_hack() { for (auto rule : rules) { std::stringstream key; for (auto token : rule.tokens) { key << ToString(token); } this->this_is_a_hack.insert({key.str(), rule}); } } }; OperatorTable DefaultOpTable() { return OperatorTable( {Rule({TokenType::kStar}, Op::Get("multiply"), 12, 2, true), Rule({TokenType::kDivision}, Op::Get("divide"), 12, 2, true), Rule({TokenType::kPlus}, Op::Get("add"), 10, 2, true), Rule({TokenType::kMinus}, Op::Get("subtract"), 10, 2, true), Rule({TokenType::kLAngle}, Op::Get("less"), 8, 2, true), Rule({TokenType::kLAngle, TokenType::kEqual}, Op::Get("less_equal"), 8, 2, true), Rule({TokenType::kRAngle}, Op::Get("greater"), 8, 2, true), Rule({TokenType::kRAngle, TokenType::kEqual}, Op::Get("greater_equal"), 8, 2, true), Rule({TokenType::kEqual, TokenType::kEqual}, Op::Get("equal"), 7, 2, true), Rule({TokenType::kBang, TokenType::kEqual}, Op::Get("not_equal"), 7, 2, true)}); } } // namespace parser } // namespace tvm #endif // TVM_PARSER_OP_TABLE_H_
https://github.com/zk-ml/tachikoma
src/parser/span_check.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file span_check.h * \brief Check that the Relay IR has correctly attached span information. */ #ifndef TVM_PARSER_SPAN_CHECK_H_ #define TVM_PARSER_SPAN_CHECK_H_ #include <tvm/ir/transform.h> #include <tvm/ir/type_functor.h> #include <tvm/relay/expr.h> #include <tvm/relay/expr_functor.h> #include <tvm/runtime/logging.h> #include <tvm/runtime/object.h> #include <fstream> #include <string> #include <unordered_map> #include <vector> namespace tvm { namespace parser { using namespace tvm::relay; using tvm::transform::Pass; struct SpanChecker : ExprVisitor { Expr expression; DiagnosticContext diag_ctx; std::vector<Span> span_stack; explicit SpanChecker(DiagnosticContext diag_ctx) : diag_ctx(diag_ctx) {} void VisitExpr(const Expr& expr) override; void VisitExpr_(const VarNode* op) override; void VisitExpr_(const GlobalVarNode* op) override; void VisitExpr_(const ConstantNode* op) override; void VisitExpr_(const TupleNode* op) override; void VisitExpr_(const FunctionNode* op) override; void VisitExpr_(const CallNode* op) override; void VisitExpr_(const LetNode* op) override; void VisitExpr_(const IfNode* op) override; void VisitExpr_(const OpNode* op) override; void VisitExpr_(const TupleGetItemNode* op) override; void VisitExpr_(const RefCreateNode* op) override; void VisitExpr_(const RefReadNode* op) override; void VisitExpr_(const RefWriteNode* op) override; void VisitExpr_(const ConstructorNode* op) override; void VisitExpr_(const MatchNode* op) override; void VisitType(const Type& t) override; void VisitClause(const Clause& c) override; void VisitPattern(const Pattern& c) override; void VisitSpan(const Span& span) override; }; Pass SpanCheck(); } // namespace parser } // namespace tvm #endif // TVM_PARSER_SPAN_CHECK_H_
https://github.com/zk-ml/tachikoma
src/parser/token.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file token.h * \brief The definition of tokens for the TVM parser. */ #ifndef TVM_PARSER_TOKEN_H_ #define TVM_PARSER_TOKEN_H_ #include <tvm/ir/span.h> #include <tvm/runtime/object.h> #include <fstream> #include <string> #include <utility> namespace tvm { namespace parser { using namespace runtime; enum class TokenType { kCommentStart, kCommentEnd, kLineComment, kComment, kWhitespace, kNewline, kStringLiteral, kIdentifier, kLocal, kGlobal, kOp, kGraph, kOpenParen, kCloseParen, kAtSymbol, kPercent, kComma, kPeriod, kEqual, kSemicolon, kColon, kInteger, kFloat, kDivision, kBoolean, kPlus, kStar, kMinus, kRAngle, kLAngle, kRCurly, kLCurly, kRSquare, kLSquare, kBang, kAt, kQuestion, kIf, kElse, kUnderscore, kLet, kFn, kDefn, kTypeDef, kExtern, kMatch, kPartialMatch, kMetadata, kMetaReference, kFreeVar, kRef, kRefRead, kRefWrite, kVersion, kUnknown, kEndOfFile, kNull, }; std::string ToString(const TokenType& token_type) { switch (token_type) { case TokenType::kCommentStart: return "CommentStart"; case TokenType::kCommentEnd: return "CommentEnd"; case TokenType::kLineComment: return "LineComment"; case TokenType::kComment: return "Comment"; case TokenType::kWhitespace: return "WhiteSpace"; case TokenType::kNewline: return "Newline"; case TokenType::kStringLiteral: return "StringLiteral"; case TokenType::kIdentifier: return "Identifier"; case TokenType::kLocal: return "Local"; case TokenType::kGlobal: return "Global"; case TokenType::kGraph: return "Graph"; case TokenType::kOp: return "Op"; case TokenType::kOpenParen: return "OpenParen"; case TokenType::kCloseParen: return "CloseParen"; case TokenType::kAtSymbol: return "AtSymbol"; case TokenType::kPercent: return "Percent"; case TokenType::kComma: return "Comma"; case TokenType::kColon: return "Colon"; case TokenType::kSemicolon: return "Semicolon"; case TokenType::kPeriod: return "Period"; case TokenType::kEqual: return "Equal"; case TokenType::kInteger: return "Integer"; case TokenType::kFloat: return "Float"; case TokenType::kPlus: return "Plus"; case TokenType::kStar: return "Star"; case TokenType::kMinus: return "Minus"; case TokenType::kDivision: return "Division"; case TokenType::kRAngle: return "RAngle"; case TokenType::kLAngle: return "LAngle"; case TokenType::kRCurly: return "RCurly"; case TokenType::kLCurly: return "LCurly"; case TokenType::kRSquare: return "RSquare"; case TokenType::kLSquare: return "LSquare"; case TokenType::kBang: return "Bang"; case TokenType::kUnderscore: return "Underscore"; case TokenType::kAt: return "At"; case TokenType::kLet: return "Let"; case TokenType::kIf: return "If"; case TokenType::kElse: return "Else"; case TokenType::kFn: return "Fn"; case TokenType::kDefn: return "Defn"; case TokenType::kTypeDef: return "TypeDef"; case TokenType::kExtern: return "Extern"; case TokenType::kMatch: return "Match"; case TokenType::kPartialMatch: return "PartialMatch"; case TokenType::kQuestion: return "Question"; case TokenType::kBoolean: return "Boolean"; case TokenType::kMetadata: return "Metadata"; case TokenType::kMetaReference: return "MetaReference"; case TokenType::kFreeVar: return "FreeVar"; case TokenType::kVersion: return "Version"; case TokenType::kRef: return "Ref"; case TokenType::kRefRead: return "RefRead"; case TokenType::kRefWrite: return "RefWrite"; case TokenType::kUnknown: return "Unknown"; case TokenType::kEndOfFile: return "EndOfFile"; case TokenType::kNull: return "Null"; // Older compilers warn even though the above code is exhaustive. default: LOG(FATAL) << "unreachable code"; return ""; } } std::string Pretty(const TokenType& token_type) { switch (token_type) { case TokenType::kCommentStart: return "`/*`"; case TokenType::kCommentEnd: return "`*/`"; case TokenType::kLineComment: return "`//`"; case TokenType::kComment: return "comment"; case TokenType::kWhitespace: return "whitespace"; case TokenType::kNewline: return "newline"; case TokenType::kStringLiteral: return "string literal"; case TokenType::kIdentifier: return "identifier"; case TokenType::kLocal: return "local variable"; case TokenType::kGlobal: return "global variable"; case TokenType::kGraph: return "graph variable"; case TokenType::kOp: return "operator"; case TokenType::kOpenParen: return "`(`"; case TokenType::kCloseParen: return "`)`"; case TokenType::kAtSymbol: return "`@`"; case TokenType::kPercent: return "`%`"; case TokenType::kComma: return "`,`"; case TokenType::kColon: return "`:`"; case TokenType::kSemicolon: return "`;`"; case TokenType::kPeriod: return "`.`"; case TokenType::kEqual: return "`=`"; case TokenType::kInteger: return "integer"; case TokenType::kFloat: return "float"; case TokenType::kPlus: return "`+`"; case TokenType::kStar: return "`*`"; case TokenType::kMinus: return "`-`"; case TokenType::kDivision: return "`/`"; case TokenType::kRAngle: return "`<`"; case TokenType::kLAngle: return "`>`"; case TokenType::kRCurly: return "`}`"; case TokenType::kLCurly: return "`{`"; case TokenType::kRSquare: return "`]`"; case TokenType::kLSquare: return "`[`"; case TokenType::kBang: return "`!`"; case TokenType::kUnderscore: return "`_`"; case TokenType::kAt: return "`@`"; case TokenType::kLet: return "`let`"; case TokenType::kIf: return "`if`"; case TokenType::kElse: return "`else`"; case TokenType::kFn: return "`fn`"; case TokenType::kDefn: return "`def`"; case TokenType::kTypeDef: return "`type`"; case TokenType::kExtern: return "`extern`"; case TokenType::kBoolean: return "boolean"; case TokenType::kMetadata: return "metadata section"; case TokenType::kMetaReference: return "`meta`"; case TokenType::kFreeVar: return "`free_var`"; case TokenType::kMatch: return "`match`"; case TokenType::kPartialMatch: return "`match?`"; case TokenType::kQuestion: return "`?`"; case TokenType::kRef: return "`ref`"; case TokenType::kRefRead: return "`ref_read`"; case TokenType::kRefWrite: return "`ref_write`"; case TokenType::kUnknown: return "unknown"; case TokenType::kEndOfFile: return "end of file"; case TokenType::kNull: return "null"; case TokenType::kVersion: return "version attribute"; // Older compilers warn even though the above code is exhaustive. default: LOG(FATAL) << "unreachable code"; return ""; } } class Token; class TokenNode : public Object { public: Span span; TokenType token_type; mutable runtime::ObjectRef data; void VisitAttrs(AttrVisitor* v) {} static constexpr const char* _type_key = "parser.Token"; TVM_DECLARE_FINAL_OBJECT_INFO(TokenNode, Object); }; TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) .set_dispatch<TokenNode>([](const ObjectRef& ref, ReprPrinter* p) { auto* node = static_cast<const TokenNode*>(ref.get()); p->stream << "Token(span=" << node->span << ", token_type=" << ToString(node->token_type) << ", data=" << node->data << ")"; }); TVM_REGISTER_NODE_TYPE(TokenNode); class Token : public ObjectRef { public: TVM_DLL explicit Token(Span span, TokenType token_type, ObjectRef data = ObjectRef()); static Token Null(); int64_t ToNumber() const; std::string ToString() const; Map<String, Array<ObjectRef>> ToMetadata() const; TVM_DEFINE_OBJECT_REF_METHODS(Token, ObjectRef, TokenNode); }; Token::Token(Span span, TokenType token_type, ObjectRef data) { ObjectPtr<TokenNode> n = make_object<TokenNode>(); n->span = span; n->token_type = token_type; n->data = data; data_ = std::move(n); } Token Token::Null() { return Token(Span(SourceName(), 0, 0, 0, 0), TokenType::kNull); } int64_t Token::ToNumber() const { return Downcast<tvm::Integer>(this->operator->()->data).IntValue(); } std::string Token::ToString() const { return Downcast<tvm::String>(this->operator->()->data); } Map<String, Array<ObjectRef>> Token::ToMetadata() const { ObjectRef data = this->operator->()->data; if (data.defined()) { return Downcast<Map<String, Array<ObjectRef>>>(data); } else { return Map<String, Array<ObjectRef>>({}); } } } // namespace parser } // namespace tvm #endif // TVM_PARSER_TOKEN_H_
https://github.com/zk-ml/tachikoma
src/parser/tokenizer.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file parser.h * \brief A parser for TVM IR. */ #ifndef TVM_PARSER_TOKENIZER_H_ #define TVM_PARSER_TOKENIZER_H_ #include <tvm/node/serialization.h> #include <tvm/runtime/object.h> #include <fstream> #include <limits> #include <string> #include <unordered_map> #include <utility> #include <vector> #include "../support/scalars.h" #include "./meta_ref.h" #include "./token.h" namespace tvm { namespace parser { using namespace runtime; // trim from start (in place) static inline void ltrim(std::string& s) { // NOLINT(*) s.erase(s.begin(), std::find_if(s.begin(), s.end(), [](int ch) { return !std::isspace(ch); })); } // trim from end (in place) static inline void rtrim(std::string& s) { // NOLINT(*) s.erase(std::find_if(s.rbegin(), s.rend(), [](int ch) { return !std::isspace(ch); }).base(), s.end()); } bool IsDigit(char c) { return '0' <= c && c <= '9'; } bool IsWhitespace(char c) { return ' ' == c || c == '\t' || c == '\n'; } bool IsNumeric(char c) { return (IsDigit(c) || c == '.' || c == 'e' || c == '-' || c == '+' || c == 'E') && !IsWhitespace(c); } bool IsIdentLetter(char c) { return '_' == c || c == '/' || ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z'); } bool IsIdent(char c) { return IsIdentLetter(c) || IsDigit(c); } static std::unordered_map<std::string, TokenType> KEYWORD_TABLE = { {"let", TokenType::kLet}, {"fn", TokenType::kFn}, {"def", TokenType::kDefn}, {"if", TokenType::kIf}, {"else", TokenType::kElse}, {"type", TokenType::kTypeDef}, {"match", TokenType::kMatch}, {"extern", TokenType::kExtern}, {"free_var", TokenType::kFreeVar}, {"ref", TokenType::kRef}, {"ref_read", TokenType::kRefRead}, {"ref_write", TokenType::kRefWrite}}; struct Tokenizer { DiagnosticContext diag_ctx; const SourceName& source_name; size_t pos; int col; int line; char next_char; String source; std::vector<Token> tokens; char Next() { char c = this->source.at(this->pos); if (c == '\n') { this->line += 1; this->col = 1; } else { this->col += 1; } pos += 1; return c; } bool More() { return this->pos < this->source.size(); } char Peek() { ICHECK(pos < this->source.size()); return this->source.at(this->pos); } Token NewToken(TokenType token_type, ObjectRef data = ObjectRef(), int lines = 0, int cols = 1) { auto span = Span(this->source_name, this->line, this->line + lines, this->col, this->col + cols); return Token(span, token_type, data); } Span SpanFrom(int line, int column) { int end_line = this->line; int end_column = this->col; return Span(this->source_name, line, end_line, column, end_column); } enum CommentParserState { Proceed, Forward, Backward, }; void MatchComment(std::string* buffer) { // We only invoke this after we have matched the first start // token assume, we are proceeding the parse forward with // nesting = 1. // // When we are done we should be at nesting zero and be // in the stop state. CommentParserState state = CommentParserState::Proceed; int nesting = 1; while (More()) { switch (state) { case CommentParserState::Proceed: { if (Peek() == '/') { state = CommentParserState::Forward; } else if (Peek() == '*') { state = CommentParserState::Backward; } buffer->operator+=(Next()); continue; } case CommentParserState::Forward: { if (Peek() == '*') { nesting += 1; buffer->operator+=(Next()); } state = CommentParserState::Proceed; continue; } case CommentParserState::Backward: { if (Peek() == '/') { nesting -= 1; if (nesting == 0) { Next(); buffer->pop_back(); return; } } buffer->operator+=(Next()); state = CommentParserState::Proceed; continue; } } } } Token ParseNumber(bool is_pos, bool is_float, std::string number) { ICHECK(number.size() > 0) << "an empty string is an invalid number"; Token token = NewToken(is_float ? TokenType::kFloat : TokenType::kInteger); size_t suffix_pos = number.rfind(is_float ? 'f' : 'i'); if (suffix_pos == std::string::npos) { suffix_pos = number.size(); } std::string literal_text = number.substr(0, suffix_pos); std::string suffix; if (suffix_pos < number.size()) { suffix = number.substr(suffix_pos + 1, number.size() - suffix_pos); } int width = 32; if (suffix.size()) { try { width = std::stoi(suffix); } catch (const std::invalid_argument& err) { this->diag_ctx.Emit(Diagnostic::Error(token->span) << "invalid numeric suffix `" << suffix << "`"); } catch (const std::out_of_range& err) { this->diag_ctx.Emit(Diagnostic::Error(token->span) << "invalid numeric suffix `" << suffix << "`"); } } if (is_float) { double value = 0.0; size_t index = 0; try { value = stod(literal_text, &index); } catch (const std::invalid_argument& err) { this->diag_ctx.Emit(Diagnostic::Error(token->span) << "invalid floating point number `" << literal_text << "`"); } catch (const std::out_of_range& err) { this->diag_ctx.Emit(Diagnostic::Error(token->span) << "invalid floating point number `" << literal_text << "`"); } if (index < literal_text.size()) { this->diag_ctx.Emit(Diagnostic::Error(token->span) << "invalid floating point number `" << literal_text << "`"); } value = is_pos ? value : -value; token->data = support::ValueToFloatImm(value, width); if (!token->data.defined()) { this->diag_ctx.Emit(Diagnostic::Error(token->span) << "floating point number `" << literal_text << "` unrepresentable in width " << width); token->data = support::ValueToFloatImm(0.0, width); } } else { int64_t value = 0; size_t index = 0; try { value = std::stoll(literal_text, &index); } catch (const std::invalid_argument& err) { this->diag_ctx.Emit(Diagnostic::Error(token->span) << "invalid integer number `" << literal_text << "`"); } catch (const std::out_of_range& err) { this->diag_ctx.Emit(Diagnostic::Error(token->span) << "invalid integer number `" << literal_text << "`"); } if (index < literal_text.size()) { this->diag_ctx.Emit(Diagnostic::Error(token->span) << "invalid integer number `" << literal_text << "`"); } value = is_pos ? value : -value; token->data = support::ValueToIntImm(value, width); if (!token->data.defined() && suffix.empty()) { // Without any i suffix the legacy behavior was to default to int64 if out of range // for int32. width = 64; token->data = support::ValueToIntImm(value, width); } if (!token->data.defined()) { this->diag_ctx.Emit(Diagnostic::Error(token->span) << "integer number `" << literal_text << "` unrepresentable in width " << width); token->data = support::ValueToIntImm(0, width); } } return token; } Token ParseNumber(bool is_pos) { std::stringstream ss; while (More() && IsNumeric(Peek())) { ss << Next(); } bool is_float = false; if (More() && (Peek() == 'f' || Peek() == 'i')) { is_float = Peek() == 'f'; // Capture trailing width suffix ss << Next(); while (More() && IsNumeric(Peek())) { ss << Next(); } } return ParseNumber(is_pos, is_float, ss.str()); } bool MatchString(const std::string& string) { int start = this->pos; for (auto c : string) { if (Peek() != c) { this->pos = start; return false; } else { Next(); } } return true; } Token TokenizeMetaRef() { int line = this->line; int column = this->col; std::stringstream type_key; while (More() && Peek() != ']') { type_key << Next(); } ICHECK_EQ(Peek(), ']'); Next(); ICHECK_EQ(Peek(), '['); Next(); std::stringstream str_index; while (More() && Peek() != ']') { str_index << Next(); } ICHECK_EQ(Peek(), ']'); Next(); // todo: add error handling around bad indices auto index = ParseNumber(true, false, str_index.str()).ToNumber(); auto span = SpanFrom(line, column); return Token(span, TokenType::kMetaReference, MetaRef(type_key.str(), index)); } Token TokenizeAttr() { int line = this->line; int column = this->col; Next(); if (Peek() == '[') { Next(); std::stringstream raw_attribute; while (More() && Peek() != ']') { raw_attribute << Next(); } ICHECK_EQ(Next(), ']'); auto attribute = raw_attribute.str(); // Clean up the white-space on both sides. ltrim(attribute); rtrim(attribute); // Metadata can only appear at the bottom of a file and goes to EOF. if (attribute == "metadata") { std::stringstream metadata; while (More()) { metadata << Next(); } ObjectRef metadata_map = tvm::LoadJSON(metadata.str()); auto span = SpanFrom(line, column); return Token(span, TokenType::kMetadata, metadata_map); } if (attribute.rfind("version", 0) == 0) { std::string version = attribute.substr(attribute.find("=") + 1); ltrim(version); rtrim(version); auto span = SpanFrom(line, column); return Token(span, TokenType::kVersion, tvm::String(version)); } else { // TOOD(@jroesch): maybe make this a warning an continue parsing? auto span = SpanFrom(line, column); this->diag_ctx.EmitFatal(Diagnostic::Error(span) << "unsupported attribute " << attribute); return Token(); } } else { auto span = SpanFrom(line, column); this->diag_ctx .EmitFatal(Diagnostic::Error(span) << "`#` denotes the start of an attribute can only be followed by `[`" << " found `" << Peek() << "`"); return Token(); } } inline Token TokenizeOnce() { int line = this->line; int col = this->col; auto next = Peek(); VLOG(9) << "tvm::parser::TokenizeOnce: next=" << next; if (next == '\n') { auto token = NewToken(TokenType::kNewline); Next(); return token; } else if (next == '\r') { Next(); if (More() && Peek() == '\n') { auto token = NewToken(TokenType::kNewline); return token; } else { auto span = SpanFrom(line, col); this->diag_ctx.EmitFatal( Diagnostic::Error(span) << "\\r carriage returns must be followed by a \\n in the TVM text format"); return Token(); } } else if (next == '"') { // TODO(@jroesch): Properly tokenize escape sequences in strings. // see https://github.com/apache/tvm/issues/6153. Next(); std::stringstream string_content; while (More() && Peek() != '"') { string_content << Next(); } Next(); return NewToken(TokenType::kStringLiteral, tvm::String(string_content.str())); } else if (IsWhitespace(next)) { auto token = NewToken(TokenType::kWhitespace); Next(); return token; } else if (next == '-') { int negs = 0; while (More() && Peek() == '-') { Next(); negs++; } bool is_neg = negs % 2 == 1; if (More() && IsDigit(Peek())) { return ParseNumber(!is_neg); } else if (More() && MatchString("inff")) { return ParseNumber(!is_neg, true, "inff"); } else { // If there isn't a number right after either, // this is really slow for lexing, should replace // with multi-token return or something. pos = pos - (negs - 1); return NewToken(TokenType::kMinus); } } else if (IsDigit(next)) { return ParseNumber(true); } else if (MatchString("inff")) { return ParseNumber(true, true, "inff"); } else if (next == '.') { auto token = NewToken(TokenType::kPeriod); Next(); return token; } else if (next == ',') { auto token = NewToken(TokenType::kComma); Next(); return token; } else if (next == '=') { auto token = NewToken(TokenType::kEqual); Next(); return token; } else if (next == ';') { auto token = NewToken(TokenType::kSemicolon); Next(); return token; } else if (next == ':') { auto token = NewToken(TokenType::kColon); Next(); return token; } else if (next == '(') { auto token = NewToken(TokenType::kOpenParen); Next(); return token; } else if (next == ')') { auto token = NewToken(TokenType::kCloseParen); Next(); return token; } else if (next == '+') { auto token = NewToken(TokenType::kPlus); Next(); return token; } else if (next == '*') { auto token = NewToken(TokenType::kStar); Next(); return token; } else if (next == '<') { auto token = NewToken(TokenType::kLAngle); Next(); return token; } else if (next == '>') { auto token = NewToken(TokenType::kRAngle); Next(); return token; } else if (next == '{') { auto token = NewToken(TokenType::kLCurly); Next(); return token; } else if (next == '}') { auto token = NewToken(TokenType::kRCurly); Next(); return token; } else if (next == '[') { auto token = NewToken(TokenType::kLSquare); Next(); return token; } else if (next == ']') { auto token = NewToken(TokenType::kRSquare); Next(); return token; } else if (next == '!') { auto token = NewToken(TokenType::kBang); Next(); return token; } else if (next == '@') { auto token = NewToken(TokenType::kAt); Next(); return token; } else if (next == '?') { auto token = NewToken(TokenType::kQuestion); Next(); return token; } else if (MatchString("meta[")) { return TokenizeMetaRef(); } else if (next == '#') { return TokenizeAttr(); } else if (next == '%') { auto token = NewToken(TokenType::kPercent); Next(); std::stringstream number; while (More() && IsDigit(Peek())) { number << Next(); } auto number_str = number.str(); if (number_str.size()) { auto num_tok = ParseNumber(true, false, number_str); auto span = SpanFrom(token->span->line, token->span->column); token = Token(span, TokenType::kGraph, num_tok->data); } return token; } else if (next == '/') { Next(); if (Peek() == '/') { auto token = NewToken(TokenType::kLineComment); // Consume the / Next(); std::stringstream comment; while (More() && Peek() != '\n') { comment << Next(); } token->data = tvm::String(comment.str()); return token; } else if (Peek() == '*') { // Eat the first /* pair before entering the state machine. Next(); std::string comment; MatchComment(&comment); auto token = NewToken(TokenType::kComment, tvm::String(comment)); return token; } else { return NewToken(TokenType::kDivision); } } else if (IsIdentLetter(next)) { std::stringstream ss; // Due the below code we need to patch // the line/col info to the start of // token. int line = this->line; int col = this->col; while (More() && IsIdent(Peek())) { ss << Next(); } std::string keyword = ss.str(); auto it = KEYWORD_TABLE.find(keyword); TokenType token_type; if (it != KEYWORD_TABLE.end()) { token_type = it->second; if (token_type == TokenType::kMatch) { if (More() && Peek() == '?') { Next(); token_type = TokenType::kPartialMatch; } } } else { token_type = TokenType::kIdentifier; } auto span = SpanFrom(line, col); return Token(span, token_type, tvm::String(ss.str())); } else { std::stringstream ss; while (More() && !IsWhitespace(Peek())) { ss << Next(); } auto token = NewToken(TokenType::kUnknown); token->data = tvm::String(ss.str()); return token; } } void Tokenize() { VLOG(9) << "tvm::parser::Tokenize"; while (this->More()) { auto token = TokenizeOnce(); ICHECK(token.defined()); this->tokens.push_back(token); } this->tokens.push_back(NewToken(TokenType::kEndOfFile)); } explicit Tokenizer(const DiagnosticContext& ctx, const Source& source) : diag_ctx(ctx), source_name(source->source_name), pos(0), col(1), line(1), source(source->source), tokens() {} }; std::vector<Token> Condense(const std::vector<Token>& tokens, Token* table) { std::vector<Token> out; bool found_metadata = false; for (size_t i = 0; i < tokens.size(); i++) { auto current = tokens.at(i); switch (current->token_type) { case TokenType::kMetadata: { if (!found_metadata) { found_metadata = true; *table = current; } else { LOG(FATAL) << "duplicate metadata section"; } continue; } case TokenType::kPercent: { auto next = tokens.at(i + 1); if (next->token_type == TokenType::kIdentifier) { // Match this token. i += 1; // TODO(@jroesch): merge spans auto tok = Token(current->span, TokenType::kLocal, next->data); ICHECK(tok.defined()); out.push_back(tok); } else if (next->token_type == TokenType::kInteger) { i += 1; auto tok = Token(current->span, TokenType::kGraph, next->data); ICHECK(tok.defined()); out.push_back(tok); } else { ICHECK(current.defined()); out.push_back(current); } continue; } case TokenType::kAt: { auto next = tokens.at(i + 1); if (next->token_type == TokenType::kIdentifier) { // Match this token. i += 1; // TODO(@jroesch): merge spans auto tok = Token(current->span, TokenType::kGlobal, next->data); ICHECK(tok.defined()); out.push_back(tok); } else { ICHECK(current.defined()); out.push_back(current); } continue; } case TokenType::kIdentifier: { std::string str = Downcast<tvm::String>(current->data); Token tok; // TODO(@jroesch): merge spans if (str == "True") { auto data = tvm::Integer(1); tok = Token(current->span, TokenType::kBoolean, data); } else if (str == "False") { auto data = tvm::Integer(0); tok = Token(current->span, TokenType::kBoolean, data); } else if (str == "_") { tok = Token(current->span, TokenType::kUnderscore); } else { tok = current; } out.push_back(tok); continue; } default: { out.push_back(current); continue; } } } return out; } std::pair<std::vector<Token>, Token> Tokenize(const DiagnosticContext& ctx, const Source& source) { auto tokenizer = Tokenizer(ctx, source); tokenizer.Tokenize(); Token meta_table(Span(), TokenType::kUnknown, ObjectRef()); auto tokens = Condense(tokenizer.tokens, &meta_table); for (auto token : tokens) { ICHECK(token.defined()); } return {tokens, meta_table}; } } // namespace parser } // namespace tvm #endif // TVM_PARSER_TOKENIZER_H_
https://github.com/zk-ml/tachikoma
src/printer/doc.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/printer/doc.h * \brief Doc ADT used for pretty printing. * * Reference: Philip Wadler. A Prettier Printer. Journal of Functional Programming'98 */ #ifndef TVM_PRINTER_DOC_H_ #define TVM_PRINTER_DOC_H_ #include <tvm/node/node.h> #include <tvm/runtime/data_type.h> #include <tvm/runtime/object.h> #include <string> #include <type_traits> #include <vector> namespace tvm { /*! * \brief Doc atom node for the ADT. * \sa DocAtom */ class DocAtomNode : public Object { public: static constexpr const char* _type_key = "printer.DocAtom"; TVM_DECLARE_BASE_OBJECT_INFO(DocAtomNode, Object); }; /*! * \brief Managed reference to DocAtomNode. * \sa DocAtomNode. */ class DocAtom : public ObjectRef { public: TVM_DEFINE_OBJECT_REF_METHODS(DocAtom, ObjectRef, DocAtomNode); }; /*! * \brief Stream-like interface for Doc DSL. * * The Doc DSL de-couples the layout decision from the printing decision. * * The layout(code formating) decisions include: * - Change indentation. * - Break single line into multiple ones(subjected to future improvements). */ class Doc { public: /*! \brief default constructor */ Doc() {} /*! * \brief Append right to the end of the current doc stream. * \param right The doc to be appended. * \return reference to self. */ Doc& operator<<(const Doc& right); /*! * \brief Append right to the end of the current doc stream. * \param right The doc to be appended. * \return reference to self. * \note pass by value to allow copy elison optimization. */ Doc& operator<<(std::string right); /*! * \brief Append right to the end of the current doc stream. * \param right The doc to be appended. * \return reference to self. */ Doc& operator<<(const DocAtom& right); /*! * \brief Convert value to string via std::ostreamstream * the append to the current doc stream. * \param right The doc to be appended. * \tparam T the type of the value. * \return reference to self. */ template <typename T, typename = typename std::enable_if<!std::is_class<T>::value>::type> Doc& operator<<(const T& value) { std::ostringstream os; os << value; return *this << os.str(); } /*! * \brief Convert the doc stream into string. * \return The string representation. */ std::string str(); /*! * \brief Create a doc that represents text content. * \return The created doc. */ static Doc Text(std::string value); /*! * \brief Create a doc that represents raw text(can have new lines) * \return The created doc. */ static Doc RawText(std::string value); /*! * \brief Create a doc that represents a new line. * \return The created doc. */ static Doc NewLine(int indent = 0); /*! * \brief Create a new doc that adds indentation to everyline of the doc. * \param indent The indent to be added. * \param doc The doc to be indented. * \return The created doc. * \note pass by value to allow copy elison optimization. */ static Doc Indent(int indent, Doc doc); /*! * \brief Create a Doc that represents a string literal. * \param value The content of the string literal. * \param quote The quote in the literal. * \return The created doc. */ static Doc StrLiteral(const std::string& value, std::string quote = "\""); /*! * \brief Create a Doc that represents a boolean literal in python syntax. * \param value The bool value. * \return The created doc. */ static Doc PyBoolLiteral(bool value); /*! * \brief Enclose body by brace and add indent. * \param body The body * \param open The open brace. * \param close The close brace. * \param indent amount of indentation. * \return The created doc. */ static Doc Brace(std::string open, const Doc& body, std::string close, int indent = 2); /*! * \brief Create a doc by concatenating together with separator. * \param vec The docs to be concatenated. * \param sep The seperator. * \return The created doc. */ static Doc Concat(const std::vector<Doc>& vec, const Doc& sep = Text(", ")); private: /*! \brief Internal doc stream. */ std::vector<DocAtom> stream_; }; } // namespace tvm #endif // TVM_PRINTER_DOC_H_
https://github.com/zk-ml/tachikoma
src/printer/meta_data.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/printer/meta_data.h * \brief Meta data context for printers. */ #ifndef TVM_PRINTER_META_DATA_H_ #define TVM_PRINTER_META_DATA_H_ #include <tvm/node/serialization.h> #include <string> #include <unordered_map> #include "doc.h" namespace tvm { /*! * \brief Meta data context for Printers * * This is an important part to enable bi-directional serializability. * We use tvm's Node system to build the current IR. * It can be hard to design a text format for all the possible nodes * as the set of nodes can grow when we do more extensions. * * Instead of trying to design readable text format for every node, * we support a meta data section in the text format. * We allow the text format to refer to a node in the meta data section. * * The meta data section is a json serialized string of an Map<string, Array<NodeRef>>. * Each element in the meta data section can be referenced by the text format. * Each meta data node is printed in the following format. * * meta[type-key-of-node>][<index-in-meta-section>] * * Specifically, consider the following IR(constructed by python). * * \code * * n = tvm.var("n") * x = tvm.relay.var("x", shape=(n, 1)) * f = tvm.relay.Function([x], x) * print(f.astext()) * * \endcode * * The corresponding text format is shown in the following code block. * * \code * * fn (%x: Tensor[(meta[Variable][0],), float32]) { * %x * } * # Meta data section is a json-serialized string * # of the following array. * # [tvm.var("n")] * * \endcode * * Note that we store tvm.var("n") in the meta data section. * Since it is stored in the index-0 in the meta data section, * we print it as meta[Variable][0]. * * The text parser can recover this object by loading from the corresponding * location in the meta data section. * * This is a design trade-off. * It allows us to embedded any meta data in the text format, * while still being able to tweak the text part of the printed IR easily. */ class TextMetaDataContext { public: /*! * \brief Get text representation of meta node. * \param node The node to be converted to meta node. * \return A string representation of the meta node. */ Doc GetMetaNode(const ObjectRef& node) { auto it = meta_repr_.find(node); if (it != meta_repr_.end()) { return it->second; } std::string type_key = node->GetTypeKey(); ICHECK(!type_key.empty()); Array<ObjectRef>& mvector = meta_data_[type_key]; int64_t index = static_cast<int64_t>(mvector.size()); mvector.push_back(node); Doc doc; doc << "meta[" << type_key << "][" << index << "]"; meta_repr_[node] = doc; return meta_repr_[node]; } /*! * \brief Test whether a node has been put in meta * \param node The query node * \return whether the node has been put in meta */ bool InMeta(const ObjectRef& node) { return meta_repr_.find(node) != meta_repr_.end(); } /*! * \brief Print a key value pair */ Doc PrintKeyValue(const std::string& str, const Doc& v) const { return Doc() << "\"" << str << "\": " << v; } /*! * \brief Get the metadata section in json format. * \return the meta data string. */ Doc GetMetaSection() const { if (meta_data_.size() == 0) return Doc(); return Doc::RawText(SaveJSON(Map<String, ObjectRef>(meta_data_.begin(), meta_data_.end()))); } /*! \return whether the meta data context is empty. */ bool empty() const { return meta_data_.empty(); } private: /*! \brief additional metadata stored in TVM json format */ std::unordered_map<String, Array<ObjectRef>> meta_data_; /*! \brief map from meta data into its string representation */ std::unordered_map<ObjectRef, Doc, ObjectPtrHash, ObjectPtrEqual> meta_repr_; }; } // namespace tvm #endif // TVM_PRINTER_META_DATA_H_
https://github.com/zk-ml/tachikoma
src/printer/text_printer.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file text_printer.h * \brief Printer to print out the unified IR text format * that can be parsed by a parser. */ #ifndef TVM_PRINTER_TEXT_PRINTER_H_ #define TVM_PRINTER_TEXT_PRINTER_H_ #include <tvm/ir/module.h> #include <tvm/ir/type_functor.h> #include <tvm/relay/expr_functor.h> #include <tvm/relay/pattern_functor.h> #include <tvm/tir/expr_functor.h> #include <tvm/tir/function.h> #include <tvm/tir/op.h> #include <tvm/tir/stmt_functor.h> #include <tvm/tir/var.h> #include <string> #include <unordered_map> #include <unordered_set> #include <vector> #include "../ir/attr_functor.h" #include "../relay/analysis/dependency_graph.h" #include "doc.h" #include "meta_data.h" #include "text_printer.h" namespace tvm { class TextPrinter; } // namespace tvm namespace tvm { namespace relay { class RelayTextPrinter : public ExprFunctor<Doc(const Expr&)>, public PatternFunctor<Doc(const Pattern&)>, public TypeFunctor<Doc(const Type&)>, public AttrFunctor<Doc(const ObjectRef&)> { public: explicit RelayTextPrinter(bool show_meta_data, TextMetaDataContext* meta, runtime::TypedPackedFunc<std::string(ObjectRef)> annotate) : show_meta_data_(show_meta_data), annotate_(annotate), meta_(meta) {} Doc VisitExpr(const Expr& expr) override; virtual Doc VisitLeaf(const Expr& expr); virtual bool CheckVisited(const Expr& expr); /*! * \brief Print additional info about expr in comment. * \param expr The expression. */ Doc PrintOptionalInfo(const Expr& expr); // indent a new body Doc PrintBody(const ObjectRef& node, int indent = 2); // create a new scope by creating a new printer object. This allows temp var // numbers to be reused and prevents hoisted vars from escaping too far Doc PrintScope(const ObjectRef& node); Doc PrintFinal(const ObjectRef& node); /*! * \brief Returns \p attrs printed using the generic attribute visitor, as a sequence * of key=value entries, if any. */ void AppendGenericAttrs(std::vector<Doc>* docs, const Attrs& attrs, bool include_type_key); /*! * \brief Returns \p attrs printed as a sequence of key=value entries, if any. * This is used for call attributes. */ std::vector<Doc> PrintCallAttrs(const Attrs& attrs, const Expr& op); /*! * \brief Returns \p dict_attrs printed as a sequence of key=value entries, if any. * This is used for function definition attributes. */ std::vector<Doc> PrintDictAttrs(const DictAttrs& dict_attrs); std::vector<Doc> PrintDictAttrs(const Map<String, ObjectRef>& dict_attrs); /*! * \brief Returns \p value printed as the rhs of an attribute key=value entry. If \p force_meta * is true then value is printed in meta[...] for irrespective of the show_meta_data_ flag. */ Doc PrintAttributeValue(const ObjectRef& value, bool force_meta = false); /*! * \brief Returns \p attrs printed as a self-contained value, ie wrapped in braces. */ Doc PrintAttrsAsAttributeValue(const Attrs& attrs); /*! * \brief Returns \p map printed as a self-contained value, ie wrapped in braces. */ Doc PrintMapAsAttributeValue(const Map<ObjectRef, ObjectRef>& map); Doc PrintSpan(const Span& span); Doc Print(const ObjectRef& node, bool meta = false, bool try_inline = false); Doc TempVar(int n); Doc AllocTemp(); /*! * \brief get a unique name with the corresponding prefix * \param prefix The prefix of the name * \return The returned name. */ Doc GetUniqueName(const std::string& prefix); Doc Print(Kind k); /*! * \brief Allocate name to a type variable. * \param var The input type variable. * \return The corresponding name. */ Doc AllocTypeVar(const TypeVar& var); /*! * \brief Allocate name to a variable. * \param var The input variable. * \return The corresponding name. */ Doc AllocVar(const Var& var); bool IsUnique(const Expr& expr); bool AlwaysInline(const Expr& expr); Doc PrintFunc(const Doc& prefix, const relay::Function& fn); Doc PrintFunc(const Doc& prefix, const BaseFunc& base_func); Doc PrintMod(const IRModule& mod); //------------------------------------ // Overload of Expr printing functions //------------------------------------ Doc PrintExpr(const Expr& expr, bool meta, bool try_inline, bool optional_info = true); // Should only be triggered when op is a free variable being visited for the // first time. Doc VisitExpr_(const VarNode* op) final; Doc VisitExpr_(const ConstantNode* op) final; Doc VisitExpr_(const TupleNode* op) final; Doc VisitExpr_(const TupleGetItemNode* op) final; Doc VisitExpr_(const IfNode* op) final; Doc VisitExpr_(const LetNode* op) final; Doc VisitExpr_(const FunctionNode* op) final; Doc VisitExpr_(const GlobalVarNode* op) final; Doc VisitExpr_(const OpNode* op) final; Doc VisitExpr_(const CallNode* op) final; Doc VisitExpr_(const RefCreateNode* op) final; Doc VisitExpr_(const RefReadNode* op) final; Doc VisitExpr_(const RefWriteNode* op) final; Doc VisitExpr_(const MatchNode* op) final; Doc PrintPattern(const Pattern& pattern, bool meta); Doc VisitPattern_(const PatternConstructorNode* p) final; Doc VisitPattern_(const PatternTupleNode* pt) final; Doc VisitPattern_(const PatternWildcardNode* pw) final; Doc VisitPattern_(const PatternVarNode* pv) final; Doc VisitExpr_(const ConstructorNode* n) final; //------------------------------------ // Overload of Type printing functions //------------------------------------ Doc PrintType(const Type& type, bool meta); Doc VisitTypeDefault_(const Object* node) final; Doc VisitType_(const TypeVarNode* node) final; Doc VisitType_(const GlobalTypeVarNode* node) final; Doc VisitType_(const TypeCallNode* node) final; Doc PrintDType(DataType dtype); Doc VisitType_(const TensorTypeNode* node) final; Doc VisitType_(const TupleTypeNode* node) final; Doc VisitType_(const FuncTypeNode* node) final; Doc VisitType_(const RelayRefTypeNode* node) final; Doc VisitType_(const TypeDataNode* node) final; //------------------------------------ // Overload of Attr printing functions //------------------------------------ Doc VisitAttrDefault_(const Object* op) final; Doc VisitAttr_(const ArrayNode* op) final; Doc VisitAttr_(const tir::IntImmNode* op) final; Doc VisitAttr_(const tir::FloatImmNode* op) final; Doc VisitAttr_(const tir::StringImmNode* op) final; private: /*! \brief Whether to print meta data. */ bool show_meta_data_; /*! \brief additional comment function */ runtime::TypedPackedFunc<std::string(ObjectRef)> annotate_; /*! \brief Stack of docs to implement scoped GNFing. */ std::vector<Doc> doc_stack_{}; /*! \brief Set for introduced vars */ std::unordered_set<Expr, ObjectPtrHash, ObjectPtrEqual> var_memo_; /*! \brief Set for exprs have been printed optional information */ std::unordered_set<Expr, ObjectPtrHash, ObjectPtrEqual> opt_info_memo_; /*! \brief Map for result and memo_ diffs for visited expression */ std::unordered_map<Expr, Doc, ObjectPtrHash, ObjectPtrEqual> result_memo_; /*! \brief Map from Expr to Doc */ std::unordered_map<Expr, Doc, ObjectPtrHash, ObjectPtrEqual> memo_; /*! \brief Map from Type to Doc */ std::unordered_map<Type, Doc, ObjectPtrHash, ObjectPtrEqual> memo_type_; /*! \brief Map from Type to Doc */ std::unordered_map<Pattern, Doc, ObjectPtrHash, ObjectPtrEqual> memo_pattern_; /*! \brief name allocation map */ std::unordered_map<std::string, int> name_alloc_map_; /*! \brief meta data context */ TextMetaDataContext* meta_; /*! \brief counter of temporary variable */ size_t temp_var_counter_{0}; /*! \brief whether the printer is currently in an ADT definition */ bool in_adt_def_; /*! \brief arena for dependency graph */ support::Arena arena_; /*! \brief dependency graph of the expr */ DependencyGraph dg_; class AttrPrinter; friend class AttrPrinter; friend class tvm::TextPrinter; }; } // namespace relay } // namespace tvm namespace tvm { namespace tir { /*! * \brief Meta node collector * If we decide to put some node into meta, then all the sub-nodes inside * it need to be put in meta as well, since when parsing we need to know * whether two refs are the same */ class MetaCollector : public StmtExprVisitor { public: explicit MetaCollector(TextMetaDataContext* meta) : meta_(meta) {} void Collect(const ObjectRef& n) { // these nodes can be print directly(StringLiteral or use identifier to identify) if (!n.defined() || n.as<StringImmNode>() || n.as<StringObj>() || n.as<SizeVarNode>() || n.as<VarNode>() || n.as<BufferNode>() || n.as<IterVarNode>()) { return; } if (n->IsInstance<StmtNode>()) { VisitStmt(Downcast<Stmt>(n)); } else if (n->IsInstance<PrimExprNode>()) { VisitExpr(Downcast<PrimExpr>(n)); } } void VisitStmt(const Stmt& n) override { meta_->GetMetaNode(n); StmtVisitor::VisitStmt(n); } void VisitExpr(const PrimExpr& n) override { meta_->GetMetaNode(n); ExprVisitor::VisitExpr(n); } private: TextMetaDataContext* meta_; }; class TIRTextPrinter : public StmtFunctor<Doc(const Stmt&)>, public ExprFunctor<Doc(const PrimExpr&)>, public TypeFunctor<Doc(const Type&)> { public: explicit TIRTextPrinter(bool show_meta, TextMetaDataContext* meta) : show_meta_(show_meta), meta_(meta), meta_collector_(meta) {} /*! \brief Print the node */ Doc Print(const ObjectRef& node); /*! \brief Place into `s` the name used in the preceding Print call for `v`. * \param v Var instance to check. Must point to a VarNode visited by Print. * \param s String to receive the name. * \return true when a name re-mapping was found. */ bool GetVarName(::tvm::tir::Var v, std::string* s); private: /*! \brief whether show meta data */ bool show_meta_; /*! \brief meta data context */ TextMetaDataContext* meta_; /*! \brief meta collector */ MetaCollector meta_collector_; /*! \brief Map from Var to Doc */ std::unordered_map<Var, Doc, ObjectPtrHash, ObjectPtrEqual> memo_var_; /*! \brief Map from Buffer to Doc */ std::unordered_map<Buffer, Doc, ObjectPtrHash, ObjectPtrEqual> memo_buf_; /*! \brief Map from Buffer to Doc */ std::unordered_map<DataProducer, Doc, ObjectPtrHash, ObjectPtrEqual> memo_producer_; /*! \brief name allocation map */ std::unordered_map<std::string, int> name_alloc_map_; friend class tvm::TextPrinter; Doc VisitExpr_(const IntImmNode* op) override; Doc VisitExpr_(const FloatImmNode* op) override; Doc VisitExpr_(const StringImmNode* op) override; Doc VisitExpr_(const CastNode* op) override; Doc VisitExpr_(const VarNode* op) override; Doc VisitExpr_(const AddNode* op) override; Doc VisitExpr_(const SubNode* op) override; Doc VisitExpr_(const MulNode* op) override; Doc VisitExpr_(const DivNode* op) override; Doc VisitExpr_(const ModNode* op) override; Doc VisitExpr_(const FloorDivNode* op) override; Doc VisitExpr_(const FloorModNode* op) override; Doc VisitExpr_(const MinNode* op) override; Doc VisitExpr_(const MaxNode* op) override; Doc VisitExpr_(const EQNode* op) override; Doc VisitExpr_(const NENode* op) override; Doc VisitExpr_(const LTNode* op) override; Doc VisitExpr_(const LENode* op) override; Doc VisitExpr_(const GTNode* op) override; Doc VisitExpr_(const GENode* op) override; Doc VisitExpr_(const AndNode* op) override; Doc VisitExpr_(const OrNode* op) override; Doc VisitExpr_(const NotNode* op) override; Doc VisitExpr_(const SelectNode* op) override; Doc VisitExpr_(const BufferLoadNode* op) override; Doc VisitExpr_(const ProducerLoadNode* op) override; Doc VisitExpr_(const LoadNode* op) override; Doc VisitExpr_(const RampNode* op) override; Doc VisitExpr_(const BroadcastNode* op) override; Doc VisitExpr_(const LetNode* op) override; Doc VisitExpr_(const CallNode* op) override; Doc VisitExpr_(const ShuffleNode* op) override; Doc VisitExpr_(const ReduceNode* op) override; Doc VisitExprDefault_(const Object* op) override; Doc VisitStmt_(const LetStmtNode* op) override; Doc VisitStmt_(const AttrStmtNode* op) override; Doc VisitStmt_(const AssertStmtNode* op) override; Doc VisitStmt_(const StoreNode* op) override; Doc VisitStmt_(const BufferStoreNode* op) override; Doc VisitStmt_(const ProducerStoreNode* op) override; Doc VisitStmt_(const BufferRealizeNode* op) override; Doc VisitStmt_(const ProducerRealizeNode* op) override; Doc VisitStmt_(const AllocateNode* op) override; Doc VisitStmt_(const AllocateConstNode* op) override; Doc VisitStmt_(const DeclBufferNode* op) override; Doc VisitStmt_(const IfThenElseNode* op) override; Doc VisitStmt_(const SeqStmtNode* op) override; Doc VisitStmt_(const EvaluateNode* op) override; Doc VisitStmt_(const ForNode* op) override; Doc VisitStmt_(const WhileNode* op) override; Doc VisitStmt_(const PrefetchNode* op) override; Doc VisitStmt_(const BlockRealizeNode* op) override; Doc VisitStmtDefault_(const Object* op) override; Doc VisitType_(const PrimTypeNode* node) override; Doc VisitType_(const PointerTypeNode* node) override; Doc VisitType_(const TupleTypeNode* node) override; Doc PrintIRModule(const IRModule& module); Doc PrintPrimFunc(const PrimFunc& primFunc); Doc PrintArray(const ArrayNode* op); Doc PrintIterVar(const IterVarNode* op); Doc PrintRange(const RangeNode* op); Doc PrintBuffer(const BufferNode* op); Doc PrintProducer(const DataProducerNode* op); Doc BufferNode2Doc(const BufferNode* op, Doc doc); Doc DataProducerNode2Doc(const DataProducerNode* op, Doc doc); Doc PrintString(const StringObj* op) { return Doc::StrLiteral(op->data); } Doc PrintBufferRegion(const BufferRegionNode* op); /*! * \brief special method to print out data type * \param dtype The data type */ static Doc PrintDType(DataType dtype); /*! * \brief special method to print out const scalar * \param dtype The data type * \param data The pointer to hold the data. */ template <typename T> static Doc PrintConstScalar(DataType dtype, const T& data); Doc GetUniqueName(std::string prefix); Doc AllocVar(const Var& var); Doc AllocConst(const AllocateConst& var); Doc AllocBuf(const Buffer& buffer); Doc AllocProducer(const DataProducer& buffer); /*! * \brief special method to render vectors of docs with a separator * \param vec vector of docs * \param sep separator */ static Doc PrintSep(const std::vector<Doc>& vec, const Doc& sep); Doc PrintBody(const Stmt& body, bool indent = true); }; String AsTVMScript(const ObjectRef& mod, const String& tir_prefix = "T", bool show_meta = false); String AsTVMScriptWithDiagnostic(const ObjectRef& mod, const String& tir_prefix, bool show_meta, runtime::TypedPackedFunc<std::string(Stmt)> annotate); } // namespace tir } // namespace tvm namespace tvm { class TextPrinter { public: explicit TextPrinter(bool show_meta_data, const runtime::TypedPackedFunc<std::string(ObjectRef)>& annotate, bool show_warning = true) : show_meta_data_(show_meta_data), show_warning_(show_warning), annotate_(annotate), relay_text_printer_(show_meta_data, &meta_, annotate), tir_text_printer_(show_meta_data, &meta_) {} /*! \brief whether show meta data */ bool show_meta_data_; /*! \brief whether show the meta data warning message */ bool show_warning_; /*! \brief meta data context */ TextMetaDataContext meta_; /*! \brief additional comment function */ runtime::TypedPackedFunc<std::string(ObjectRef)> annotate_; /*! \brief Relay Text Printer */ relay::RelayTextPrinter relay_text_printer_; /*! \brief TIR Text Printer */ tir::TIRTextPrinter tir_text_printer_; bool GetVarName(::tvm::tir::Var v, std::string* s) { return tir_text_printer_.GetVarName(v, s); } Doc PrintFinal(const ObjectRef& node) { Doc doc; if (node.defined() && node->IsInstance<IRModuleNode>()) { doc << PrintMod(Downcast<IRModule>(node)); } else if (node.defined() && (node->IsInstance<tir::PrimFuncNode>() || node->IsInstance<PrimExprNode>() || node->IsInstance<tir::StmtNode>())) { doc << tir_text_printer_.Print(node); } else { doc << relay_text_printer_.PrintFinal(node); } if (!meta_.empty()) { doc << Doc::NewLine(); if (show_meta_data_) { doc << "#[metadata]" << Doc::NewLine() << meta_.GetMetaSection(); } else if (show_warning_) { doc << "/* For debugging purposes the metadata section has been omitted." << Doc::NewLine() << " * If you would like to see the full metadata section you can set the " << Doc::NewLine() << " * option to `True` when invoking `astext`. " << Doc::NewLine() << " */"; } } return doc; } Doc PrintMod(const IRModule& mod); }; } // namespace tvm #endif // TVM_PRINTER_TEXT_PRINTER_H_
https://github.com/zk-ml/tachikoma
src/relay/analysis/annotated_region_set.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/transforms/annotated_region_set.h * \brief Define data structures to extract and manipulate regions from * a relay function. Regions are denoted by region_begin and region_end * annotations that exist on all the input and output edges of the region. */ #ifndef TVM_RELAY_ANALYSIS_ANNOTATED_REGION_SET_H_ #define TVM_RELAY_ANALYSIS_ANNOTATED_REGION_SET_H_ #include <tvm/ir/error.h> #include <tvm/relay/analysis.h> #include <tvm/relay/attrs/annotation.h> #include <tvm/relay/expr.h> #include <tvm/relay/expr_functor.h> #include <tvm/relay/transform.h> #include <list> #include <string> #include <unordered_set> #include <utility> #include <vector> namespace tvm { namespace relay { class AnnotatedRegion; class AnnotatedRegionSet; class AnnotatedRegionNode : public Object { public: void VisitAttrs(AttrVisitor* v) { v->Visit("id", &id_); v->Visit("target", &target_); Array<Expr> nodes_array(nodes_.begin(), nodes_.end()); v->Visit("nodes", &nodes_array); Array<Expr> args_array(ins_.begin(), ins_.end()); v->Visit("args", &args_array); Array<Expr> rets_array(outs_.begin(), outs_.end()); v->Visit("rets", &rets_array); } /*! \brief Get the region ID. */ int GetID() const { return id_; } /*! \brief Get the region name. */ std::string GetName() const { return func_name_; } /*! \brief Get the region target. */ std::string GetTarget() const { return target_; } /*! \brief Get the region's inputs. */ std::list<Expr> GetInputs() const { return ins_; } /*! \brief Get the region's outputs. */ std::list<Expr> GetOutputs() const { return outs_; } /*! \brief Get the region's nodes. */ std::unordered_set<Expr, ObjectPtrHash, ObjectPtrEqual> GetNodes() const { return nodes_; } static constexpr const char* _type_key = "relay.AnnotatedRegion"; TVM_DECLARE_FINAL_OBJECT_INFO(AnnotatedRegionNode, Object); protected: /*! \brief The region ID. */ int id_{-1}; /*! \brief The func name. */ std::string func_name_ = "default"; /*! \brief The target for this region. */ std::string target_ = "default"; /*! \brief The inputs to this region. */ std::list<Expr> ins_; /*! \brief The outputs of this region */ std::list<Expr> outs_; /*! \brief Nodes in this region. */ std::unordered_set<Expr, ObjectPtrHash, ObjectPtrEqual> nodes_; friend class AnnotatedRegionSet; friend class AnnotatedRegionSetNode; }; /*! * \brief An object to hold the properties of a region as used by the * AnnotatedRegionSet class. This should be considered read-only. */ class AnnotatedRegion : public ObjectRef { public: AnnotatedRegion() { auto n = make_object<AnnotatedRegionNode>(); data_ = std::move(n); } /*! * \brief Construct from an object pointer. * \param n The object pointer. */ explicit AnnotatedRegion(ObjectPtr<Object> n) : ObjectRef(n) {} /*! \return Mutable pointers to the node. */ AnnotatedRegionNode* operator->() const { auto* ptr = get_mutable(); ICHECK(ptr != nullptr); return static_cast<AnnotatedRegionNode*>(ptr); } }; class AnnotatedRegionSetNode : public Object { using UnorderedRegionSet = std::unordered_set<AnnotatedRegion, ObjectPtrHash, ObjectPtrEqual>; // Create iterator alias for a RegionSet object. using iterator = UnorderedRegionSet::iterator; using const_iterator = UnorderedRegionSet::const_iterator; public: /*! \brief Default constructor. */ AnnotatedRegionSetNode() = default; /*! \return The begin iterator */ iterator begin() { return regions_.begin(); } /*! \return The end iterator */ iterator end() { return regions_.end(); } /*! \return The const begin iterator */ const_iterator begin() const { return regions_.begin(); } /*! \return The const end iterator */ const_iterator end() const { return regions_.end(); } /*! * \brief Get the region that an expression belongs to. * * \param expr Which expr to get the region for. * * \return A pointer to the region, nullptr if the expression * doesn't belong to a region. */ AnnotatedRegion GetRegion(const Expr& expr) const; /*! * \brief Merge src region into dest region. * * \param src The region to merge - will be erased. * \param dest The region into which src will be merged. */ void MergeRegions(AnnotatedRegion src, AnnotatedRegion dest); void VisitAttrs(AttrVisitor* v) { Array<AnnotatedRegion> regions_array(regions_.begin(), regions_.end()); v->Visit("regions", &regions_array); } static constexpr const char* _type_key = "relay.AnnotatedRegionSet"; TVM_DECLARE_FINAL_OBJECT_INFO(AnnotatedRegionSetNode, Object); private: /*! * \brief Add an expression to a region. * * \param dest The region to add the expression to. * \param expr The expression. */ void AddToRegion(AnnotatedRegion dest, const Expr& expr); /*! * \brief Make a new region for a target. * * \return The new region. */ AnnotatedRegion MakeRegion(const std::string& func_name, const std::string& target); std::unordered_set<AnnotatedRegion, ObjectPtrHash, ObjectPtrEqual> regions_; /*! \brief The next region ID to assign. */ int region_id_{0}; friend class AnnotatedRegionSet; }; /*! * \brief A class to hold a set of regions produced from a relay expression * that contains 'region_begin' and 'region_end' style annotations. The * regions should be disjoint. The class provides both a method to construct * the region set of a given relay expression as well as additional methods * to update and query regions. */ class AnnotatedRegionSet : public ObjectRef { using UnorderedRegionSet = std::unordered_set<AnnotatedRegion, ObjectPtrHash, ObjectPtrEqual>; // Create iterator alias for a RegionSet object. using iterator = UnorderedRegionSet::iterator; using const_iterator = UnorderedRegionSet::const_iterator; public: AnnotatedRegionSet() { auto n = make_object<AnnotatedRegionSetNode>(); data_ = std::move(n); } /*! * \brief Construct from an object pointer. * * \param n The object pointer. */ explicit AnnotatedRegionSet(ObjectPtr<Object> n) : ObjectRef(n) {} /*! \return The begin iterator. */ iterator begin() { auto* n = operator->(); ICHECK(n); return n->begin(); } /*! \return The end iterator. */ iterator end() { auto* n = operator->(); ICHECK(n); return n->end(); } /*! \return The begin iterator. */ const_iterator begin() const { const auto* n = operator->(); ICHECK(n); return n->begin(); } /*! \return The end iterator. */ const_iterator end() const { const auto* n = operator->(); ICHECK(n); return n->end(); } /*! \return mutable pointers to the node. */ AnnotatedRegionSetNode* operator->() const { auto* ptr = get_mutable(); ICHECK(ptr != nullptr); return static_cast<AnnotatedRegionSetNode*>(ptr); } /*! \return The region an expression belongs to. */ AnnotatedRegion operator[](const Expr& expr) { const auto* n = operator->(); ICHECK(n); return n->GetRegion(expr); } /*! \brief Create a RegionSet from a relay expression. * * \param expr The relay expr from which to construct the set. * \param begin Region begin annotation operator. * \param end Region end annotation operator. * \param func_name function name * * \return The created RegionSet for the expression. */ static AnnotatedRegionSet Create(const Expr& expr, const Op& begin, const Op& end, const std::string& func_name = "default"); private: /*! \brief Helper class to construct a RegionSet from an expr.*/ class Creator; }; } // namespace relay } // namespace tvm #endif // TVM_RELAY_ANALYSIS_ANNOTATED_REGION_SET_H_
https://github.com/zk-ml/tachikoma
src/relay/analysis/call_graph.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/analysis/call_graph.h * \brief Define data structures for the call graph of a IRModule. It borrows * the idea how LLVM constructs CallGraph. * * https://llvm.org/doxygen/CallGraph_8h_source.html */ #ifndef TVM_RELAY_ANALYSIS_CALL_GRAPH_H_ #define TVM_RELAY_ANALYSIS_CALL_GRAPH_H_ #include <tvm/ir/module.h> #include <tvm/relay/expr.h> #include <tvm/relay/function.h> #include <tvm/runtime/object.h> #include <memory> #include <string> #include <unordered_map> #include <unordered_set> #include <utility> #include <vector> namespace tvm { namespace relay { class CallGraphEntry; class CallGraph; class CallGraphNode : public Object { using CallGraphMap = std::unordered_map<GlobalVar, std::unique_ptr<CallGraphEntry>, ObjectPtrHash, ObjectPtrEqual>; // Create iterator alias for a CallGraphNode object. using iterator = CallGraphMap::iterator; using const_iterator = CallGraphMap::const_iterator; public: /*! \brief The IR module for creating a CallGraphNode. */ IRModule module; /*! \brief Default constructor. */ CallGraphNode() {} void VisitAttrs(AttrVisitor* v) { v->Visit("module", &module); } /*! * \brief Print the call graph. * * \param os The stream for printing. */ void Print(std::ostream& os) const; /*! \return The begin iterator. */ iterator begin() { return call_graph_.begin(); } /*! \return The end iterator. */ iterator end() { return call_graph_.end(); } /*! \return The begin iterator. */ const_iterator begin() const { return call_graph_.begin(); } /*! \return The end iterator. */ const_iterator end() const { return call_graph_.end(); } /*! * \brief Get an element from the CallGraphNode using a GlobalVar. * * \param gv The GlobalVar used for indexing. * * \return The fetched element. */ const CallGraphEntry* operator[](const GlobalVar& gv) const; /*! * \brief Get an element from the CallGraphNode using a GlobalVar. * * \param gv The GlobalVar used for indexing. * * \return The fetched element. */ CallGraphEntry* operator[](const GlobalVar& gv); /*! * \brief Get an element from the CallGraphNode using the global function name. * * \param gvar_name The global function name used for indexing. * * \return The fetched element. */ const CallGraphEntry* operator[](const std::string& gvar_name) const { return (*this)[module->GetGlobalVar(gvar_name)]; } /*! * \brief Get an element from the CallGraphNode using the global function name. * * \param gvar_name The global function name used for indexing. * * \return The fetched element. */ CallGraphEntry* operator[](const std::string& gvar_name) { return (*this)[module->GetGlobalVar(gvar_name)]; } /*! * \brief Get the global function corresponding to the variable. * * \param var The global variable. * * \return The found global function. */ BaseFunc GetGlobalFunction(const GlobalVar& var) const; /*! * \brief Get the entries/root nodes of CallGraphNode. * * Entry functions are never referenced by other functions. * Note these functions can be recursive as well. * * \return The list of CallGraphEntry that represent entry nodes. */ std::vector<CallGraphEntry*> GetEntryGlobals() const; /*! * \brief Remove a GlobalVar in a given CallGraphEntry from the current * IR module. * * \param cg_node The CallGraphEntry that contains a global function to be * removed. * \param update_call_graph Indicate if we will update the CallGraph as well * since updating is costly. We are only able to remove a leaf function * when update_call_graph is disabled because the edges pointing to * functions being removed are not updated. * * \return The GlobalVar removed from the current module. */ GlobalVar RemoveGlobalVarFromModule(CallGraphEntry* cg_node, bool update_call_graph = false); /*! * \brief Lookup a GlobalVar for the CallGraphNode. It creates an entry for * the GlobalVar if it doesn't exist. * * \param gv The GlobalVar for query. * * \return The queried entry. */ CallGraphEntry* LookupGlobalVar(const GlobalVar& gv); /*! * \brief Get the entries from the CallGraphNode in the topological order. * * This is useful for various module-level optimizations/analysis. For example, * inlining requires the correct order of the functions being processed, i.e. * callee should be always handled before callers. * * \return The list of collected entries that are sorted in the topological order. */ std::vector<CallGraphEntry*> TopologicalOrder() const; static constexpr const char* _type_key = "relay.CallGraph"; TVM_DECLARE_FINAL_OBJECT_INFO(CallGraphNode, Object); private: /*! * \brief Create a CallGraphEntry for a global function and add it to the * CallGraphNode. * * \param gv The global var. * \param func The global function corresponding to `gv`. */ void AddToCallGraph(const GlobalVar& gv, const Function& func); /*! \brief A record contains GlobalVar to CallGraphEntry mapping. */ CallGraphMap call_graph_; friend CallGraph; }; /*! * \brief The class that represents the call graph of a Relay IR module. It also * provides a variety of utility functions for users to query, view, and update * a call graph. */ class CallGraph : public ObjectRef { using CallGraphMap = std::unordered_map<GlobalVar, std::unique_ptr<CallGraphEntry>, ObjectPtrHash, ObjectPtrEqual>; // Create iterator alias for a CallGraph object. using iterator = CallGraphMap::iterator; using const_iterator = CallGraphMap::const_iterator; public: /*! * \brief Construct a CallGraph from a IR module. * * \param module The IR module */ explicit CallGraph(IRModule module); /*! * \brief Construct from an object pointer. * \param n The object pointer. */ explicit CallGraph(ObjectPtr<Object> n) : ObjectRef(n) {} /*! \return The begin iterator. */ iterator begin() { auto* n = operator->(); ICHECK(n); return n->begin(); } /*! \return The end iterator. */ iterator end() { auto* n = operator->(); ICHECK(n); return n->end(); } /*! \return The begin iterator. */ const_iterator begin() const { const auto* n = operator->(); ICHECK(n); return n->begin(); } /*! \return The end iterator. */ const_iterator end() const { const auto* n = operator->(); ICHECK(n); return n->end(); } /*! * \brief Get an element from the CallGraph using a GlobalVar. * * \param gv The GlobalVar used for indexing. * * \return The fetched element. */ const CallGraphEntry* operator[](const GlobalVar& gv) const { const auto* n = operator->(); ICHECK(n); return (*n)[gv]; } /*! * \brief Get an element from the CallGraph using a GlobalVar. * * \param gv The GlobalVar used for indexing. * * \return The fetched element. */ CallGraphEntry* operator[](const GlobalVar& gv) { auto* n = operator->(); ICHECK(n); return (*n)[gv]; } /*! * \brief Get an element from the CallGraph using the global function name. * * \param gvar_name The global function name used for indexing. * * \return The fetched element. */ const CallGraphEntry* operator[](const std::string& gvar_name) const { const auto* n = operator->(); ICHECK(n); return (*n)[gvar_name]; } /*! * \brief Get an element from the CallGraph using the global function name. * * \param gvar_name The global function name used for indexing. * * \return The fetched element. */ CallGraphEntry* operator[](const std::string& gvar_name) { auto* n = operator->(); ICHECK(n); return (*n)[gvar_name]; } /*! \return mutable pointers to the node. */ CallGraphNode* operator->() const { auto* ptr = get_mutable(); ICHECK(ptr != nullptr); return static_cast<CallGraphNode*>(ptr); } private: /*! \brief Overload the << operator to print a call graph. */ friend std::ostream& operator<<(std::ostream& os, const CallGraph&); }; /*! * \brief A node in the call graph. It maintains the edges from a caller to * all callees. */ class CallGraphEntry { public: using CallGraphEntryPair = std::pair<GlobalVar, CallGraphEntry*>; using CallGraphEntryVector = std::vector<CallGraphEntryPair>; using CallGraphEntrySet = std::unordered_set<const CallGraphEntry*>; // Create iterator alias for a CallGraphEntry object. using iterator = std::vector<CallGraphEntryPair>::iterator; using const_iterator = std::vector<CallGraphEntryPair>::const_iterator; /*! * \brief Construct from a GlobalVar. * * \param gv The GlobalVar to create a CallGraphEntry. */ explicit CallGraphEntry(const GlobalVar& gv) : global_(gv) {} /*! * \brief Delete copy constructor. */ CallGraphEntry(const CallGraphEntry&) = delete; /*! \brief Delete assignment. */ CallGraphEntry& operator=(const CallGraphEntry&) = delete; /*! \return The begin iterator */ iterator begin() { return called_globals_.begin(); } /*! \return The end iterator */ iterator end() { return called_globals_.end(); } /*! \return The const begin iterator */ const_iterator begin() const { return called_globals_.begin(); } /*! \return The const end iterator */ const_iterator end() const { return called_globals_.end(); } /*! * \brief Return if the list of called nodes is empty. * * \return true if the list is empty. Otherwise, false. */ bool empty() const { return called_globals_.empty(); } /*! * \brief Return the size of the list that represents the nodes are called by * the current node. * * \return The number of called nodes. */ uint32_t size() const { return static_cast<uint32_t>(called_globals_.size()); } /*! * \brief Fetch the i-th CallGraphEntry from the list of nodes that are called * by the current function. * * \param i The index. * * \return The fetched CallGraphEntry. */ CallGraphEntry* operator[](size_t i) const { ICHECK_LT(i, called_globals_.size()) << "Invalid Index"; return called_globals_[i].second; } /*! * \brief Print the call graph that is stemmed from the current CallGraphEntry. * * \param os The stream for printing. */ void Print(std::ostream& os) const; /*! * \brief Return the number of times the global function is referenced. * * \return The count. */ uint32_t GetRefCount() const { return ref_cnt_; } /*! * \brief Return the GlobalVar stored in the current CallGraphEntry. * * \return The GlobalVar. */ GlobalVar GetGlobalVar() const { return global_; } /*! * \brief Return the name hint of the GlobalVar stored in the CallGraphEntry. * * \return The name hint of the global function. */ std::string GetNameHint() const { return global_->name_hint; } /*! * \brief Return if the global function corresponding to the current * CallGraphEntry is a recursive function. * * \return true if it is recursive. Otherwise, false. */ bool IsRecursive() const { return is_recursive_; } /*! * \brief Return if the global function corresponding to the current * CallGraphEntry is both a recursive function and an entry function. This type * of function only has one reference which is called by itself. * * \return true if it is both a recursive function and an entry. Otherwise, false. */ bool IsRecursiveEntry() const { return GetRefCount() == 1 && IsRecursive(); } /*! * \brief Return the topological order of the CallGraphEntry. * * \param visited A set of CallGraphEntry objects that have been visited. * * \return The list of CallGraphEntry that is represented in topological order. */ std::vector<CallGraphEntry*> TopologicalOrder( CallGraphEntrySet* visited = new CallGraphEntrySet()) const; /*! * \brief Remove all edges from the current CallGraphEntry to any global * function it calls. */ void CleanCallGraphEntries(); /*! * \brief Add a node to the list of nodes that are being called by the current * global function. * * \param cg_node The CallGraphEntry that will be added to the call list. */ void AddCalledGlobal(CallGraphEntry* cg_node); /*! * \brief Remove a call edge to the global function from the current * function. * * \param callee The function that is being called. */ void RemoveCallTo(const GlobalVar& callee); /*! * \brief Remove all the edges that represent that calls to the global function * stored in a given CallGraphEntry. * * \param callee The function that is being called. */ void RemoveAllCallTo(CallGraphEntry* callee); private: /*! \brief Decrement the reference counter by 1. */ void DecRef() { ICHECK_GT(ref_cnt_, 0); --ref_cnt_; } /*! \brief Increment the reference counter by 1. */ void IncRef() { ++ref_cnt_; } /*! * \brief Mark if the global function stored in the CallGraphEntry is * recursive function. */ bool is_recursive_{false}; /*! \brief Count the number of times the global function is referenced. */ uint32_t ref_cnt_{0}; /*! \brief The GlobalVar stored in the current CallGraphEntry. */ GlobalVar global_; /*! \brief The list of entries called by the current CallGraphEntry. */ CallGraphEntryVector called_globals_; friend class CallGraph; /*! \brief Overload the << operator to print a call graph node. */ friend std::ostream& operator<<(std::ostream& os, const CallGraphEntry&); }; } // namespace relay } // namespace tvm #endif // TVM_RELAY_ANALYSIS_CALL_GRAPH_H_
https://github.com/zk-ml/tachikoma
src/relay/analysis/dependency_graph.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/analysis/dependency_graph.h * \brief create a dependency graph. */ #ifndef TVM_RELAY_ANALYSIS_DEPENDENCY_GRAPH_H_ #define TVM_RELAY_ANALYSIS_DEPENDENCY_GRAPH_H_ #include <tvm/relay/expr.h> #include <unordered_map> #include <vector> #include "../../support/arena.h" #include "../transforms/let_list.h" namespace tvm { namespace relay { using support::LinkedList; using support::LinkNode; /* DependencyGraph track input and output of an Expr. * Additionally, dummy scope is created to model scope. * It allow us to traverse the graph in reverse order. */ class DependencyGraph { public: /*! \brief A node in the graph. */ struct Node { // Determine scope boundaries. Used for calculating scopes, not for // constructing dependency graph. bool new_scope = false; // incoming edges LinkedList<Node*> children; // outgoing edges LinkedList<Node*> parents; }; /*! \brief Maps a Relay Expr to its node in the dependency graph. */ std::unordered_map<Expr, Node*, ObjectPtrHash, ObjectPtrEqual> expr_node; /*! \brief The dependency graph in post DFS order. */ std::vector<Node*> post_dfs_order; /*! * \brief Create a dependency graph. * \param arena The arena used for data allocation. * \param body The body of the expression to create a graph. */ static DependencyGraph Create(support::Arena* arena, const Expr& body); private: class Creator; }; } // namespace relay } // namespace tvm #endif // TVM_RELAY_ANALYSIS_DEPENDENCY_GRAPH_H_
https://github.com/zk-ml/tachikoma
src/relay/analysis/type_solver.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file type_solver.h * \brief Solver logic for type inference. */ #ifndef TVM_RELAY_ANALYSIS_TYPE_SOLVER_H_ #define TVM_RELAY_ANALYSIS_TYPE_SOLVER_H_ #include <tvm/ir/error.h> #include <tvm/relay/analysis.h> #include <tvm/relay/expr.h> #include <tvm/relay/type.h> #include <queue> #include <unordered_map> #include <unordered_set> #include <vector> #include "../../support/arena.h" namespace tvm { namespace relay { using support::LinkedList; using support::LinkNode; /*! * \brief Interface of type solver used in type inference. * * TypeSolver works on a list of constraints among incomplete types. * The user will populate the constraints by AddConstraint and Assign. * Then we can call Solve to trying to resolve the unknown. * * This can be viewed as "type program(computational graph)" of types, where * the type constraint are operators of the graph and the incomplete * types are intermediate value of the graph. * If all the input types are concretely known, we should be able to * just run a forward pass on the "type program" to get all the types. * * The list of constraints representation means we are storing it as a bipartite * graph instead of a DAG. This is because some constraints might go both direction. * TypeSolver could take advantage of bidirectional constraints to deduce input * value given output ones. Never-the-less, we should keep in mind that * there is a "forward direction" that the TypeSolver should take advantage of. */ class TypeSolver { public: TypeSolver(const GlobalVar& current_func, DiagnosticContext diag_ctx); ~TypeSolver(); /*! * \brief Add a type constraint to the solver. * \param constraint The constraint to be added. * \param location The location at which the constraint was incurred. */ void AddConstraint(const TypeConstraint& constraint, const Span& span); /*! * \brief Resolve type to the solution type in the solver. * \param type The type to be resolved. * \return The resolved type. */ Type Resolve(const Type& type); /*! * \brief Start to solve the types using the current known information. * \return Whether all the incomplete types has been fully resolved. */ bool Solve(); /*! * \brief Unify lhs and rhs. * \param lhs The left operand. * \param rhs The right operand * \param location The location at which the unification problem arose. */ Type Unify(const Type& lhs, const Type& rhs, const Span& span, bool assign_lhs = true, bool assign_rhs = true); /*! * \brief Report a diagnostic. * \param diag The diagnostic to report. */ void Emit(const Diagnostic& diag) { diag_ctx_.Emit(diag); } private: class OccursChecker; class Unifier; class Resolver; class Propagator; class Merger; class Reporter; struct TypeNode; struct RelationNode; // Internally the solver maintains a bipartite graph of Relation and Types. // All the object in the structure is managed by a arena allocator // which releases the memory upon distruction of the type solver. /*! * \brief type node struct * TypeNode implements a union-find data structure(via parent) * that can unifies the same types to the name resolved_type. * * It also contains collection of links to related Relations, * which is stored in rel_set. */ struct TypeNode { /*! \brief The final resolved type */ Type resolved_type; /*! \brief type node in the union find algorithm */ TypeNode* parent{nullptr}; /*! \brief set of relations that is related to this type node */ std::unordered_set<RelationNode*> rel_set; /*! * \brief Find the root type node, perform path compression * \return The root type node. */ TypeNode* FindRoot() { // fast path if (this->parent == nullptr) return this; // slow path with path compression. TypeNode* root = this; while (root->parent != nullptr) { root = root->parent; } for (TypeNode* p = this; p != root;) { TypeNode* parent = p->parent; p->parent = root; p = parent; } return root; } }; /*! \brief relation node */ struct RelationNode { /*! \brief Whether the relation is in the queue to be solved */ bool inqueue{false}; /*! \brief Whether the relation is resolved */ bool resolved{false}; /*! \brief The corresponding type relation */ TypeRelation rel; /*! \brief list types to this relation */ LinkedList<TypeNode*> type_list; /*! \brief The location this type relation originated from. */ Span span; }; /*! \brief A simple union find between shapes. */ tvm::Map<IndexExpr, IndexExpr> shape_uf_; /*! \brief List of all allocated type nodes */ std::vector<TypeNode*> type_nodes_; /*! \brief List of all allocated relation nodes */ std::vector<RelationNode*> rel_nodes_; /*! \brief Number of resolved relations */ size_t num_resolved_rels_{0}; /*! \brief map from types to type nodes. */ std::unordered_map<Type, TypeNode*, ObjectPtrHash, ObjectPtrEqual> tmap_; /*! \brief Internal queue to update the relation */ std::queue<RelationNode*> update_queue_; /*! \brief allocator of all the internal node obhect*/ support::Arena arena_; /*! \brief Reporter that reports back to self */ TypeReporter reporter_; /*! \brief The global representing the current function. */ GlobalVar current_func_; /*! \brief The diagnostic context. */ DiagnosticContext diag_ctx_; /*! \brief The module. */ IRModule module_; /*! * \brief GetTypeNode that is corresponds to t. * if it do not exist, create a new one. * \return The type node. */ TypeNode* GetTypeNode(const Type& t) { auto it = tmap_.find(t); if (it != tmap_.end()) { return it->second->FindRoot(); } else { TypeNode* n = arena_.make<TypeNode>(); type_nodes_.push_back(n); n->resolved_type = t; tmap_[t] = n; return n; } } /*! * \brief Add relation node rel to the update queue * \param rel The relation node */ void AddToQueue(RelationNode* rel) { if (rel->inqueue) return; ICHECK(!rel->resolved); rel->inqueue = true; update_queue_.push(rel); } /*! * \brief Merge rhs type node to lhs * \param src The source operand * \param dst The dst operand. */ void MergeFromTo(TypeNode* src, TypeNode* dst); }; } // namespace relay } // namespace tvm #endif // TVM_RELAY_ANALYSIS_TYPE_SOLVER_H_
https://github.com/zk-ml/tachikoma
src/relay/backend/aot/aot_lower_main.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_RELAY_BACKEND_AOT_AOT_LOWER_MAIN_H_ #define TVM_RELAY_BACKEND_AOT_AOT_LOWER_MAIN_H_ #include <tvm/ir/transform.h> #include <tvm/target/compilation_config.h> #include <tuple> #include <unordered_map> #include <vector> #include "../utils.h" namespace tvm { namespace relay { namespace backend { namespace aot { using StorageMap = std::unordered_map<Expr, StorageInfo, runtime::ObjectPtrHash, runtime::ObjectPtrEqual>; /*! \brief Exposed for testing, part of the implementation of AOTLowerMain */ std::tuple<StorageMap, std::vector<int>> CreateStorage(const Function& func); /*! \brief Lower the Relay main function into TIR for use with the AOT executor. * * This pass expects that all operators have already been lowered to TIR and * so only Calls to 'call_lowered' are present in main. * * \param mod_name The name of the module. * \param config The compilation config. * \param call_type The call type to use when calling functions. */ transform::Pass AOTLowerMain(String mod_name, tvm::CompilationConfig config, CallType call_type); } // namespace aot } // namespace backend } // namespace relay } // namespace tvm #endif // TVM_RELAY_BACKEND_AOT_AOT_LOWER_MAIN_H_
https://github.com/zk-ml/tachikoma
src/relay/backend/aot/create_executor_metadata.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_RELAY_BACKEND_AOT_CREATE_EXECUTOR_METADATA_H_ #define TVM_RELAY_BACKEND_AOT_CREATE_EXECUTOR_METADATA_H_ #include <tvm/ir/module.h> #include <tvm/relay/executor.h> #include <tvm/runtime/container/string.h> #include "../utils.h" namespace tvm { namespace relay { namespace backend { namespace aot { /*! \brief Create ExecutorCodegenMetadata needed for AOT execution. * \param mod The module. * \param mod_name The module name. * \param executor The executor configuration. * \param workspace_byte_alignment The alignment of the workspace pool. * \param constant_byte_alignment The alignment of the constant pool. * \return The ExecutorCodegenMetadata. */ ExecutorCodegenMetadata CreateExecutorMetadata(const IRModule& mod, String mod_name, Executor executor, Integer workspace_byte_alignment, Integer constant_byte_alignment); } // namespace aot } // namespace backend } // namespace relay } // namespace tvm #endif // TVM_RELAY_BACKEND_AOT_CREATE_EXECUTOR_METADATA_H_
https://github.com/zk-ml/tachikoma
src/relay/backend/aot/create_function_metadata.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_RELAY_BACKEND_AOT_CREATE_FUNCTION_METADATA_H_ #define TVM_RELAY_BACKEND_AOT_CREATE_FUNCTION_METADATA_H_ #include <tvm/ir/module.h> #include <tvm/runtime/container/map.h> #include <tvm/runtime/container/string.h> #include "../utils.h" namespace tvm { namespace relay { namespace backend { namespace aot { /*! \brief Create FunctionInfo metadata for all the PrimFuncs in a module lowered * for AOT execution. * \param mod The module. * \param workspace_byte_alignment The alignment of the workspace pool. * \param constant_byte_alignment The alignment of the constant pool. * \return A map between function names and FunctionInfos. */ Map<String, FunctionInfo> CreateFunctionMetadata(const IRModule& mod, Integer workspace_byte_alignment, Integer constant_byte_alignment); } // namespace aot } // namespace backend } // namespace relay } // namespace tvm #endif // TVM_RELAY_BACKEND_AOT_CREATE_FUNCTION_METADATA_H_
https://github.com/zk-ml/tachikoma
src/relay/backend/contrib/cmsisnn/buffer_size.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/backend/contrib/cmsisnn/buffer_size.h * \brief CMSIS-NN Buffer Size calculation functions */ #ifndef TVM_RELAY_BACKEND_CONTRIB_CMSISNN_BUFFER_SIZE_H_ #define TVM_RELAY_BACKEND_CONTRIB_CMSISNN_BUFFER_SIZE_H_ #include <tvm/ir/transform.h> #include "compiler_attrs.h" namespace tvm { namespace relay { namespace contrib { namespace cmsisnn { #define CH_IN_BLOCK_MVE (124) /*! * \brief Calculates the appropriate buffer size for CMSIS-NN Convolutions * See: * https://github.com/ARM-software/CMSIS_5/blob/8c60448c0e1e50e426180b26db9bc31ddf774361/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_wrapper_s8.c#L108-L127 * * \param is_int16 - type of conv2d * \param target - CMSIS-NN Target * \param padding_w - Width padding * \param padding_h - Height padding * \param input_n - Input batch size * \param input_h - Input height * \param input_c - Input channels * \param output_h - Output height * \param output_w - Output width * \param stride_w - Stride width * \param stride_h - Stride height * \param filter_w - Filter width * \param filter_h - Filter height * * \return Size of buffer to allocate for convolution */ int Conv2dBufferSize(bool is_int16, Target target, int32_t padding_w, int32_t padding_h, int32_t input_n, int32_t input_h, int32_t input_c, int32_t output_h, int32_t output_w, int32_t stride_w, int32_t stride_h, int32_t dilation_w, int32_t dilation_h, int32_t filter_w, int32_t filter_h); int Conv2dBufferSizeInt8(Target target, int32_t padding_w, int32_t padding_h, int32_t input_n, int32_t input_h, int32_t input_c, int32_t output_h, int32_t output_w, int32_t stride_w, int32_t stride_h, int32_t dilation_w, int32_t dilation_h, int32_t filter_w, int32_t filter_h); int Conv2dBufferSizeInt16(Target target, int32_t padding_w, int32_t padding_h, int32_t input_n, int32_t input_h, int32_t input_c, int32_t output_h, int32_t output_w, int32_t stride_w, int32_t stride_h, int32_t dilation_w, int32_t dilation_h, int32_t filter_w, int32_t filter_h); /*! * \brief Calculates the appropriate buffer size for CMSIS-NN Depthwise Convolutions * See: * https://github.com/ARM-software/CMSIS_5/blob/325443e52637b6c7eedbd160d238a6c462e89c9f/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_wrapper_s8.c#L115-L129 * * \param is_int16 - type of conv2d * \param target - CMSIS-NN Target * \param input_n - Input batch size * \param input_c - Input channels * \param output_c - Output channels * \param filter_w - Filter width * \param filter_h - Filter height * \param dilation_w - Dilation width * \param dilation_h - Dilation height * \param depth_multiplier - Depth Multiplier for Depthwise Convolution * * \return Size of buffer to allocate for depthwise convolution */ int DepthwiseConv2dBufferSize(bool is_int16, Target target, int32_t input_n, int32_t input_c, int32_t output_c, int32_t filter_w, int32_t filter_h, int32_t dilation_w, int32_t dilation_h, int32_t depth_multiplier); int DepthwiseConv2dBufferSizeInt8(Target target, int32_t input_n, int32_t input_c, int32_t output_c, int32_t filter_w, int32_t filter_h, int32_t dilation_w, int32_t dilation_h, int32_t depth_multiplier); int DepthwiseConv2dBufferSizeInt16(Target target, int32_t input_n, int32_t input_c, int32_t output_c, int32_t filter_w, int32_t filter_h, int32_t dilation_w, int32_t dilation_h, int32_t depth_multiplier); /*! * \brief Calculates the appropriate buffer size for CMSIS-NN Average Pooling * See: * https://github.com/ARM-software/CMSIS_5/blob/bff28575f0c96a4ee9008947fea2b018a69b4900/CMSIS/NN/Source/PoolingFunctions/arm_avgpool_s8.c#L388-L398 * * \param target - CMSIS-NN Target * \param input_c - Input channels * * \return Size of buffer to allocate for average pooling */ int AvgPoolBufferSize(Target target, int32_t input_c); } // namespace cmsisnn } // namespace contrib } // namespace relay } // namespace tvm #endif // TVM_RELAY_BACKEND_CONTRIB_CMSISNN_BUFFER_SIZE_H_
https://github.com/zk-ml/tachikoma
src/relay/backend/contrib/cmsisnn/compiler_attrs.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/backend/contrib/cmsisnn/compiler_attrs.h * \brief CMSIS-NN Compiler Attribute functionality */ #ifndef TVM_RELAY_BACKEND_CONTRIB_CMSISNN_COMPILER_ATTRS_H_ #define TVM_RELAY_BACKEND_CONTRIB_CMSISNN_COMPILER_ATTRS_H_ #include <tvm/ir/transform.h> #include <tvm/target/target.h> namespace tvm { namespace relay { namespace contrib { namespace cmsisnn { /*! \brief Attributes to store the compiler options for CMSIS-NN. */ struct CMSISNNCompilerConfigNode : public tvm::AttrsNode<CMSISNNCompilerConfigNode> { String mcpu; String mattr; TVM_DECLARE_ATTRS(CMSISNNCompilerConfigNode, "ext.attrs.CMSISNNCompilerConfigNode") { TVM_ATTR_FIELD(mcpu) .describe( "The CPU to configure CMSIS-NN for (i.e. cortex-m55, cortex-m4), can also include " "attributes (i.e. cortex-m55+nomve)") .set_default(""); TVM_ATTR_FIELD(mattr) .describe("The attributes to configure CMSIS-NN (i.e. +nodsp, +nomve)") .set_default(""); } }; class CMSISNNCompilerConfig : public Attrs { public: TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(CMSISNNCompilerConfig, Attrs, CMSISNNCompilerConfigNode); }; /*! \brief Convert External Code Generator options to TVM Target. */ Target CreateTarget(const tvm::transform::PassContext& ctx); } // namespace cmsisnn } // namespace contrib } // namespace relay } // namespace tvm #endif // TVM_RELAY_BACKEND_CONTRIB_CMSISNN_COMPILER_ATTRS_H_
https://github.com/zk-ml/tachikoma
src/relay/backend/contrib/cmsisnn/convolutions.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/backend/contrib/cmsisnn/convolutions.h * \brief CMSIS-NN utility functions for Convolutions */ #ifndef TVM_RELAY_BACKEND_CONTRIB_CMSISNN_CONVOLUTIONS_H_ #define TVM_RELAY_BACKEND_CONTRIB_CMSISNN_CONVOLUTIONS_H_ #include <tvm/relay/attrs/nn.h> #include <tvm/relay/attrs/transform.h> #include <tvm/relay/expr_functor.h> #include <tvm/relay/transform.h> #include <tvm/runtime/ndarray.h> #include "../../../op/make_op.h" #include "../../../qnn/utils.h" #include "../../../transforms/pattern_utils.h" namespace tvm { namespace relay { namespace contrib { namespace cmsisnn { /*! * \brief Checks if Relay Conv2D was originally CMSIS-NN compliant Depthwise Convolution * See: * https://github.com/apache/tvm/blob/6ed3ab3e33f8eafa4acaf53b7a671831de7587e9/python/tvm/relay/frontend/tflite.py#L2107 * * * \return true if a Conv2D is a Depthwise Convolution based on Conv2D's inputs' shapes and * attributes */ bool IsCMSISNNDepthwise(const Conv2DAttrs* conv2d_attrs, const Array<PrimExpr>& input_shape, const Array<PrimExpr>& kernel_shape); } // namespace cmsisnn } // namespace contrib } // namespace relay } // namespace tvm #endif // TVM_RELAY_BACKEND_CONTRIB_CMSISNN_CONVOLUTIONS_H_
https://github.com/zk-ml/tachikoma
src/relay/backend/contrib/codegen_c/codegen_c.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/backend/contrib/codegen_c/codegen_c.h * \brief The base class for external codegen tools. */ #ifndef TVM_RELAY_BACKEND_CONTRIB_CODEGEN_C_CODEGEN_C_H_ #define TVM_RELAY_BACKEND_CONTRIB_CODEGEN_C_CODEGEN_C_H_ #include <tvm/relay/expr.h> #include <tvm/relay/function.h> #include <tvm/relay/op.h> #include <sstream> #include <string> #include <utility> #include <vector> namespace tvm { namespace relay { namespace contrib { struct Output { std::string name; std::string dtype; int size; bool need_copy; }; struct GenerateBodyOutput { std::string decl; std::vector<std::string> buffers; std::vector<Output> outputs; }; class CSourceModuleCodegenBase { public: CSourceModuleCodegenBase() = default; virtual ~CSourceModuleCodegenBase() = default; /*! * \brief Create a runtime module for the external library. For example, it * could be a CSourceModule that can be directly compiled and linked together * with a DSOModule, or a json style module that emitts a json artifact that * is able to be executed by a customized json runtime. * * \param ref The ext_func Relay expression/module to be executed using extern ops. * * \return A runtime module. */ virtual runtime::Module CreateCSourceModule(const ObjectRef& ref) = 0; }; // The base class to generate the declaration functions in C. class CodegenCBase { public: virtual ~CodegenCBase() {} protected: /*! \brief Print indents using spaces. */ void PrintIndents() { for (int i = 0; i < indent_; i++) { code_stream_ << ' '; } } /*! * \brief Enter a new scope. */ void EnterScope() { indent_ += 2; } /*! * \brief Exit a scope. */ void ExitScope() { ICHECK_GE(indent_, 2U) << "Wrong ident found."; indent_ -= 2; } /*! * \brief Creates a runtime function header */ void PrintRuntimeFunctionHeader(std::string func_name) { code_stream_ << "#ifdef __cplusplus\n"; code_stream_ << "extern \"C\" {\n"; code_stream_ << "#endif\n"; code_stream_ << "TVM_DLL int32_t "; code_stream_ << func_name << "("; code_stream_ << "TVMValue* args, "; code_stream_ << "int* type_code, "; code_stream_ << "int num_args, "; code_stream_ << "TVMValue* out_value, "; code_stream_ << "int* out_type_code) {\n"; } /*! * \brief Adds a line to convert TVMValue args to DLTensors */ void PrintArgToData(int idx) { PrintIndents(); code_stream_ << "DLTensor* arg" << idx << " = "; code_stream_ << "(DLTensor*)(((TVMValue*)args)[" << idx << "].v_handle);\n"; } /*! * \brief Adds a line to convert TVMValue rets to DLTensors */ void PrintRetToData(int idx) { PrintIndents(); code_stream_ << "DLTensor* ret" << idx << " = "; code_stream_ << "(DLTensor*)(((TVMValue*)args)[" << idx << "].v_handle);\n"; } /*! * \brief Gerenate C code for the external function. * * \param func_name The name of the external function. * \param args arguments to the external function. * * \code * * Array<NDArray> foo_consts; * * // An example code for the generated C function. * int foo_wrapper_(DLTensor* arg0, * DLTensor* arg1, * DLTensor* out) { * foo_((float*)(arg0->data), * (float*)(arg1->data), * (float*)(out->data)); * return 0; * } * * TVM_DLL_EXPORT_TYPED_FUNC(foo, foo_wrapper_); * * int foo_init_wrapper_(Array<NDArray> arr) { * foo_consts = arr; * return 0; * } * * TVM_DLL_EXPORT_TYPED_FUNC(__init_foo, foo_init_wrapper_); * * \endcode */ void GenerateBackendCFunc(const std::string& func_name, const Array<Var>& args, const std::string& const_arr_name, const std::vector<Output>& outs, bool pass_dl_tensor = false) { // Print signature code_stream_ << "\n"; code_stream_ << "int " << func_name << "_wrapper_("; for (size_t i = 0; i < args.size(); i++) { code_stream_ << "DLTensor* arg" << i << ",\n"; code_stream_ << "\t"; } for (size_t i = 0; i < outs.size() - 1; i++) { code_stream_ << "DLTensor* out" << i << ",\n"; code_stream_ << "\t"; } code_stream_ << "DLTensor* out" << outs.size() - 1 << ") {\n"; EnterScope(); // Generate the internal call. PrintIndents(); code_stream_ << func_name << "_("; for (size_t i = 0; i < args.size(); i++) { if (pass_dl_tensor) { code_stream_ << "arg" << i << ",\n"; } else { const auto& dtype_str = GetDtypeString(args[i]); code_stream_ << "(" << dtype_str << "*)(arg" << i << "->data),\n"; } PrintIndents(); } for (size_t i = 0; i < outs.size() - 1; i++) { if (pass_dl_tensor) { code_stream_ << "out" << i << ",\n"; } else { code_stream_ << "(" << outs[i].dtype << "*)(out" << i << "->data),\n"; } PrintIndents(); } if (pass_dl_tensor) { code_stream_ << "out" << outs.size() - 1 << ");\n"; } else { code_stream_ << "(" << outs.back().dtype << "*)(out" << outs.size() - 1 << "->data));\n"; } PrintIndents(); code_stream_ << "return 0;\n"; ExitScope(); code_stream_ << "}\n\n"; // Create the external function PrintRuntimeFunctionHeader(func_name); EnterScope(); for (size_t i = 0; i < args.size(); i++) { PrintArgToData(i); } for (size_t i = 0; i < outs.size(); i++) { PrintRetToData(args.size() + i); } PrintIndents(); code_stream_ << func_name << "_wrapper_("; for (size_t i = 0; i < args.size(); i++) { code_stream_ << "arg" << i << ","; } for (size_t i = 0; i < outs.size() - 1; i++) { code_stream_ << "ret" << args.size() + i << ","; } code_stream_ << "ret" << args.size() + outs.size() - 1 << ");\n"; PrintIndents(); code_stream_ << "return 0;\n"; ExitScope(); code_stream_ << "}\n"; code_stream_ << "#ifdef __cplusplus\n"; code_stream_ << "}\n"; code_stream_ << "#endif\n"; if (!const_arr_name.empty()) { // If there are constants, insert the __init_ and the wrapper // This segment would be generated in C++ because of the usage // of tvm::runtime::Array. This is not ideal, but this to demonstrate // constant copying process used packed imports in other external // codegen. Moreover, in microTVM we dont expect this part to be generated. code_stream_ << "#ifdef __cplusplus\n"; code_stream_ << "int " << func_name << "_init_wrapper_(tvm::runtime::Array<tvm::runtime::NDArray> arr) {\n"; EnterScope(); PrintIndents(); code_stream_ << func_name << "_consts = arr;\n"; code_stream_ << "return 0;\n"; ExitScope(); code_stream_ << "}\n\n"; code_stream_ << "TVM_DLL_EXPORT_TYPED_FUNC(__init_" << func_name << ", " << func_name << "_init_wrapper_);\n\n"; code_stream_ << "#endif\n"; } } /*! * \brief Emit the code for external runtime. * * \param out The outputs. * * \return The code string. */ virtual std::string JIT(const std::vector<Output>& out) = 0; /*! * \brief A common interface that is used by various external runtime to * generate the wrapper to invoke external kernels. * * \param ext_func_id The unique id of an external function. It will be used * during runtime to pick the correct external function. * \param args The arguments used by the external function. * \param buf_decl The declaration of temporary buffers that used to store the * intermeidate of each external kernel. * \param body The statements of the external function. * \param out The name and id pairs for output. * * \return The emitted code string. */ std::string JitImpl(const std::string& ext_func_id, const Array<Var>& args, const std::vector<std::string>& buf_decl, const std::vector<std::string>& body, const std::string& const_arr_name, const std::vector<Output>& outs) { // Create a declaration for global ndarrays that contain constant data. if (!const_arr_name.empty()) { code_stream_ << "#ifdef __cplusplus\n"; code_stream_ << const_arr_name << "\n\n"; code_stream_ << "#endif\n"; } // Create the signature. For example, it could be: // void dnnl_0_(float* in0, float* in1, float* out0, float* out1) {} code_stream_ << "void " << ext_func_id << "_("; for (const auto& arg : args) { const auto& dtype_str = GetDtypeString(arg); code_stream_ << dtype_str << "* " << arg->name_hint() << ", "; } for (size_t i = 0; i < outs.size() - 1; ++i) { code_stream_ << outs[i].dtype << "* out" << i << ", "; } code_stream_ << outs.back().dtype << "* out" << outs.size() - 1 << ") {\n"; this->EnterScope(); // Function body for (auto decl : buf_decl) { this->PrintIndents(); code_stream_ << decl << "\n"; } code_stream_ << "\n"; for (auto stmt : body) { this->PrintIndents(); code_stream_ << stmt << "\n"; } // Copy output for (size_t i = 0; i < outs.size(); ++i) { if (!outs[i].need_copy) { continue; } this->PrintIndents(); code_stream_ << "memcpy(out" << i << ", " << outs[i].name << ", 4 * " << outs[i].size << ");\n"; } // Free buffers for (size_t i = 0; i < buf_decl.size(); i++) { this->PrintIndents(); code_stream_ << "free(buf_" << i << ");\n"; } this->ExitScope(); code_stream_ << "}\n"; // Create the wrapper to call the ext_func this->GenerateBackendCFunc(ext_func_id, args, const_arr_name, outs); return code_stream_.str(); } /*! * \brief Returns dtype string * * \param var Var to get the dtype of * * \return The dtype string. */ std::string GetDtypeString(const Var& var) { auto ttype = var->checked_type().as<TensorTypeNode>(); ICHECK(ttype) << "Expect TensorTypeNode"; return GetDtypeString(ttype); } /*! * \brief Returns dtype string * * \param ttype TensorTypeNode* to get the dtype of * * \return The dtype string. */ std::string GetDtypeString(const TensorTypeNode* ttype) { std::string dtype; if (runtime::TypeMatch(ttype->dtype, kDLFloat, 32)) { dtype = "float"; } else if (runtime::TypeMatch(ttype->dtype, kDLFloat, 16)) { dtype = "half"; } else if (runtime::TypeMatch(ttype->dtype, kDLBfloat, 16)) { dtype = "bfloat"; } else if (runtime::TypeMatch(ttype->dtype, kDLInt, 32)) { dtype = "int"; } else if (runtime::TypeMatch(ttype->dtype, kDLInt, 64)) { dtype = "int64_t"; } else { LOG(FATAL) << "Unsupported dtype " << ttype->dtype; } return dtype; } /*! * \brief Creates a checker to check if the NDArray pool is initialized * * \param symobl The Symbol of the current function * * \return The created checker */ std::string CreateInitChecker(const std::string& symbol) const { std::ostringstream oss; oss << "ICHECK(!" << symbol << "_consts.empty()) << \"C source module hasn't been initialized.\";\n"; return oss.str(); } /*! * \brief Generates the global ndarray pool declaration * * \param symobl The Symbol of the current function * * \return The created declaration */ std::string CreateNDArrayPool(const std::string& symbol) const { return "tvm::runtime::Array<tvm::runtime::NDArray> " + symbol + "_consts;"; } /*! * \brief Generates the reference to the data of a constant ndarray * * \param symobl The Symbol of the current function * \param symobl const_id The index of the constant * * \return The created reference */ std::string CreateDataReference(const std::string& symbol, size_t const_id) const { return "(float*)(" + symbol + "_consts[" + std::to_string(const_id) + "]->data)"; } /*! * \brief Returns the variable name for a constant variable * * \param symobl The Symbol of the current function * \param symobl const_id The index of the constant * * \return The created variable name */ std::string CreateConstVar(const std::string& symbol, size_t const_id) const { return symbol + "_const_" + std::to_string(const_id); } /*! \brief The external function source code stream. */ std::ostringstream code_stream_; private: /*! \brief Indent of the source code. */ int indent_{0}; }; /*! * \brief A pass to translate all "Primitive" Relay functions with "Compiler=ccompiler" to * a \p CSourceModule. */ transform::Pass CCompilerPass(); } // namespace contrib } // namespace relay } // namespace tvm #endif // TVM_RELAY_BACKEND_CONTRIB_CODEGEN_C_CODEGEN_C_H_
https://github.com/zk-ml/tachikoma
src/relay/backend/contrib/codegen_json/codegen_json.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file relay/backend/contrib/codegen_json.h * \brief Utilities for json codegen and runtime */ #ifndef TVM_RELAY_BACKEND_CONTRIB_CODEGEN_JSON_CODEGEN_JSON_H_ #define TVM_RELAY_BACKEND_CONTRIB_CODEGEN_JSON_CODEGEN_JSON_H_ #include <dmlc/any.h> #include <dmlc/json.h> #include <tvm/node/reflection.h> #include <tvm/tir/op.h> #include <cstdint> #include <limits> #include <memory> #include <string> #include <unordered_map> #include <utility> #include <vector> #include "../../../../runtime/contrib/json/json_node.h" #include "../../../../runtime/contrib/json/json_runtime.h" #include "../../utils.h" namespace tvm { namespace relay { namespace backend { namespace contrib { using namespace tvm::runtime::json; using ShapeVector = std::vector<std::vector<int64_t>>; using TypeVector = std::vector<std::string>; using JSONGraphObjectPtr = std::shared_ptr<JSONGraphNode>; /*! * \brief Helper class to extract all attributes of a certain op and save them * into text format. */ class OpAttrExtractor : public AttrVisitor { public: explicit OpAttrExtractor(JSONGraphObjectPtr node) : node_(node) {} template <typename T = double, typename = std::enable_if_t<std::is_floating_point<T>::value>> std::string Fp2String(const T value) { std::ostringstream out; out.precision(std::numeric_limits<T>::max_digits10); out << value; return out.str(); } void SetNodeAttr(const char* key, const std::vector<std::string>& value) { std::vector<dmlc::any> attr; attr.emplace_back(value); node_->SetAttr(key, attr); } void Visit(const char* key, double* value) final { SetNodeAttr(key, {Fp2String(*value)}); } void Visit(const char* key, int64_t* value) final { SetNodeAttr(key, {std::to_string(*value)}); } void Visit(const char* key, uint64_t* value) final { SetNodeAttr(key, {std::to_string(*value)}); } void Visit(const char* key, int* value) final { SetNodeAttr(key, {std::to_string(*value)}); } void Visit(const char* key, bool* value) final { SetNodeAttr(key, {std::to_string(*value)}); } void Visit(const char* key, std::string* value) final { SetNodeAttr(key, {*value}); } void Visit(const char* key, DataType* value) final { if (!value->is_void()) { SetNodeAttr(key, {runtime::DLDataType2String(*value)}); } else { SetNodeAttr(key, {""}); } } void Visit(const char* key, runtime::ObjectRef* value) final { if (const auto* an = (*value).as<ArrayNode>()) { std::vector<std::string> attr; for (size_t i = 0; i < an->size(); ++i) { if (const auto* im = (*an)[i].as<IntImmNode>()) { attr.push_back(std::to_string(im->value)); } else if (const auto* fm = (*an)[i].as<FloatImmNode>()) { attr.push_back(Fp2String(fm->value)); } else if (const auto* str = (*an)[i].as<StringObj>()) { String s = GetRef<String>(str); attr.push_back(s); } else { LOG(FATAL) << "Not supported type: " << (*an)[i]->GetTypeKey(); } } SetNodeAttr(key, attr); } else if (!(*value).defined()) { // Skip NullValue SetNodeAttr(key, std::vector<std::string>{""}); } else if (const auto* im = (*value).as<IntImmNode>()) { SetNodeAttr(key, std::vector<std::string>{std::to_string(im->value)}); } else if (const auto* fm = (*value).as<FloatImmNode>()) { SetNodeAttr(key, std::vector<std::string>{Fp2String(fm->value)}); } else if (const auto* str = (*value).as<StringObj>()) { String s = GetRef<String>(str); SetNodeAttr(key, std::vector<std::string>{s}); } else { LOG(FATAL) << "Not yet supported type: " << (*value)->GetTypeKey() << ": " << *value; } } void Visit(const char* key, runtime::NDArray* value) final { LOG(FATAL) << "NDArray is not allowed in op attribute"; } void Visit(const char* key, void** value) final { LOG(FATAL) << "void pointer is not allowed in op attribute"; } void Extract(Object* node) { if (node) { reflection_->VisitAttrs(node, this); } } private: JSONGraphObjectPtr node_; ReflectionVTable* reflection_ = ReflectionVTable::Global(); }; /*! \brief Serialize a Relay expression to JSON. */ class JSONSerializer : public MemoizedExprTranslator<std::vector<JSONGraphNodeEntry>> { public: /*! * \brief Constructor * * \param symbol The symbol that represents the graph being converted. * \param expr The Relay expression to be converted to the JSON form. */ JSONSerializer(std::string symbol, Expr expr) : symbol_(std::move(symbol)), func_(std::move(expr)) {} void serialize() { relay::Function func = Downcast<relay::Function>(func_); // First we convert all the parameters into input nodes. for (const auto& param : func->params) { auto node_ptr = std::make_shared<JSONGraphNode>(param->name_hint(), "input" /* op_type_ */); memo_[param] = AddNode(node_ptr, param); } heads_ = VisitExpr(func->body); } /*! * \brief Returns the accumulated map from constant names to the NDArray they must be bound to * at runtime. Also referred to a 'params' elsewhere in the code. */ const std::unordered_map<std::string, runtime::NDArray>& const_name_to_constant() const { return const_name_to_constant_; } /*! * \brief Return the constant names in order they were encountered during translation. */ const Array<String>& const_names() const { return const_names_; } /*!\brief Return the generated json. */ std::string GetJSON() { std::ostringstream os; dmlc::JSONWriter writer(&os); Save(&writer); return os.str(); } protected: /*! * \brief Add a node to graph. * * \param node A graph node. It is a shared pointer. Some attributes of it * will be added, i.e. shape and type. These attributes are attached to * the JSON graph in the end. * \param expr The relay expression. * \return A list of graph entry nodes. It the relay expr is a tuple type, we * will flatten it. */ std::vector<JSONGraphNodeEntry> AddNode(JSONGraphObjectPtr node, const Expr& expr) { auto checked_type = expr->checked_type(); auto node_id = nodes_.size(); nodes_.push_back(node); std::vector<JSONGraphNodeEntry> ret; ShapeVector shape; TypeVector dtype; // Flatten tuple node. if (const auto* tuple_type = checked_type.as<TupleTypeNode>()) { for (size_t i = 0; i < tuple_type->fields.size(); ++i) { const auto* tensor_type = tuple_type->fields[i].as<TensorTypeNode>(); ICHECK(tensor_type) << "Expect TensorType, but received: ." << tuple_type->fields[i]->GetTypeKey(); ret.push_back(JSONGraphNodeEntry(node_id, i)); shape.emplace_back(GetIntShape(tensor_type->shape)); dtype.emplace_back(DType2String(tensor_type->dtype)); } node->SetNumOutput(tuple_type->fields.size()); } else { const auto* tensor_type = checked_type.as<TensorTypeNode>(); ICHECK(tensor_type) << "Expect TensorType, but received: " << checked_type->GetTypeKey(); shape.emplace_back(GetIntShape(tensor_type->shape)); dtype.emplace_back(DType2String(tensor_type->dtype)); ret.push_back(JSONGraphNodeEntry(node_id, 0)); } std::vector<dmlc::any> shape_attrs; shape_attrs.emplace_back(shape); node->SetAttr("shape", shape_attrs); std::vector<dmlc::any> type_attrs; type_attrs.emplace_back(dtype); node->SetAttr("dtype", type_attrs); return ret; } void SetCallNodeAttribute(JSONGraphObjectPtr node, const CallNode* cn) { if (cn->op.as<OpNode>()) { OpAttrExtractor extractor(node); const Object* call_attr = cn->attrs.get(); extractor.Extract(const_cast<Object*>(call_attr)); } else if (const auto* fn = cn->op.as<FunctionNode>()) { auto pattern = fn->GetAttr<String>(attr::kPartitionedFromPattern); ICHECK(pattern.defined()); std::vector<std::string> values; values.push_back(pattern.value()); std::vector<dmlc::any> attr; attr.emplace_back(values); node->SetAttr("PartitionedFromPattern", attr); } } std::vector<JSONGraphNodeEntry> VisitExprDefault_(const Object* op) { LOG(FATAL) << "JSON runtime currently doesn't support " << op->GetTypeKey(); return {}; } std::vector<JSONGraphNodeEntry> VisitExpr_(const VarNode* vn) { ICHECK(memo_.count(GetRef<Expr>(vn))); return memo_[GetRef<Expr>(vn)]; } std::vector<JSONGraphNodeEntry> VisitExpr_(const ConstantNode* constant_node) { std::string name = symbol_ + "_const_" + std::to_string(const_names_.size()); VLOG(1) << "Will require parameter '" << name << "' to be supplied by the ConstLoaderModule at runtime"; ICHECK_EQ(const_name_to_constant_.count(name), 0); const_name_to_constant_.emplace(name, constant_node->data); const_names_.push_back(name); auto node = std::make_shared<JSONGraphNode>(name, /*op_type=*/"const"); return AddNode(node, GetRef<Expr>(constant_node)); } std::vector<JSONGraphNodeEntry> VisitExpr_(const TupleNode* tn) { std::vector<JSONGraphNodeEntry> fields; for (const auto& field : tn->fields) { auto ref = VisitExpr(field); fields.insert(fields.end(), ref.begin(), ref.end()); } return fields; } std::vector<JSONGraphNodeEntry> VisitExpr_(const CallNode* cn) { Expr expr = GetRef<Expr>(cn); std::string name; if (const auto* op_node = cn->op.as<OpNode>()) { name = op_node->name; } else if (const auto* fn = cn->op.as<FunctionNode>()) { auto comp = fn->GetAttr<String>(attr::kComposite); ICHECK(comp.defined()) << "JSON runtime only supports composite functions."; name = comp.value(); } else { LOG(FATAL) << "JSON runtime does not support calls to " << cn->op->GetTypeKey(); } std::vector<JSONGraphNodeEntry> inputs; for (const auto& arg : cn->args) { auto res = VisitExpr(arg); inputs.insert(inputs.end(), res.begin(), res.end()); } auto node = std::make_shared<JSONGraphNode>(name, /* name_ */ "kernel", /* op_type_ */ inputs, 1 /* num_outputs_ */); SetCallNodeAttribute(node, cn); return AddNode(node, GetRef<Expr>(cn)); } std::vector<JSONGraphNodeEntry> VisitExpr_(const LetNode* ln) { ICHECK_EQ(memo_.count(ln->var), 0); memo_[ln->var] = VisitExpr(ln->value); return VisitExpr(ln->body); } std::vector<JSONGraphNodeEntry> VisitExpr_(const TupleGetItemNode* gtn) { auto vtuple = VisitExpr(gtn->tuple); return {vtuple[gtn->index]}; } std::vector<JSONGraphNodeEntry> VisitExpr_(const FunctionNode* fn) { ICHECK(fn->GetAttr<String>(attr::kComposite).defined()) << "JSON runtime only supports composite functions"; // FunctionNode should be handled by the caller. return {}; } /*! * \brief Save to JSON graph * * \param writer A json writer */ void Save(dmlc::JSONWriter* writer) { std::vector<size_t> arg_nodes; for (size_t i = 0; i < nodes_.size(); ++i) { auto node = nodes_[i]; if (node->IsLeaf()) { arg_nodes.push_back(i); } } size_t num_entry = 0; std::vector<size_t> node_row_ptr{0}; for (auto node : nodes_) { num_entry += node->GetNumOutput(); node_row_ptr.push_back(num_entry); } writer->BeginObject(); writer->WriteObjectKeyValue("nodes", nodes_); writer->WriteObjectKeyValue("arg_nodes", arg_nodes); writer->WriteObjectKeyValue("heads", heads_); writer->WriteObjectKeyValue("node_row_ptr", node_row_ptr); writer->EndObject(); } private: /*! \brief The symbol that represents the json graph. */ std::string symbol_; /*! \brief The function to be serialized. */ const Expr func_; /*! \brief JSON graph nodes. */ std::vector<JSONGraphObjectPtr> nodes_; /*! \brief Output of the JSON graph. */ std::vector<JSONGraphNodeEntry> heads_; /*! * \brief A map from constant names to NDArrays for each Constant encountered during * translation to JSON. The JSON will record only the constant name. The actual NDArray must * be made available at runtime from a ConstLoaderModule. */ std::unordered_map<std::string, runtime::NDArray> const_name_to_constant_; /*! * \brief The domain of the above map, but in order the constants were encountered during * translation. */ Array<String> const_names_; }; } // namespace contrib } // namespace backend } // namespace relay } // namespace tvm #endif // TVM_RELAY_BACKEND_CONTRIB_CODEGEN_JSON_CODEGEN_JSON_H_
https://github.com/zk-ml/tachikoma
src/relay/backend/contrib/constant_transforms.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/backend/contrib/constant_transforms.h * \brief Transforms applied to constant operations during codegen for BYOC backends. */ #ifndef TVM_RELAY_BACKEND_CONTRIB_CONSTANT_TRANSFORMS_H_ #define TVM_RELAY_BACKEND_CONTRIB_CONSTANT_TRANSFORMS_H_ #include <tvm/relay/expr.h> #include <string> namespace tvm { namespace relay { namespace contrib { /*! *\brief Transpose weights from `source_layout` to `target_layout` * * \param data The constant expression to transpose. * \param source_layout The current layout of the constant e.g. "OHWI". * \param target_layout The target layout of the constant e.g. "HWIO". */ Constant TransposeWeights(const Constant& data, const std::string& source_layout, const std::string& target_layout); } // namespace contrib } // namespace relay } // namespace tvm #endif // TVM_RELAY_BACKEND_CONTRIB_CONSTANT_TRANSFORMS_H_
https://github.com/zk-ml/tachikoma
src/relay/backend/contrib/cutlass/codegen.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/backend/contrib/cutlass/codegen.h * \brief The 'custom' compilation pass for CUTLASS (invoked by the RelayToTIRTargetHook pass). */ #ifndef TVM_RELAY_BACKEND_CONTRIB_CUTLASS_CODEGEN_H_ #define TVM_RELAY_BACKEND_CONTRIB_CUTLASS_CODEGEN_H_ #include <tvm/ir/transform.h> namespace tvm { namespace relay { namespace contrib { namespace cutlass { /*! * \brief Returns the pass which replaces all calls to "Primitive" functions with "Compiler" * attribute of "cutlass" with an call to an extern, and binds a \p runtime::StaticLibrary * to the IRModule's "external_mods" attribute containing compiled implementations of * those functions using the CUTLASS C++ template library. */ transform::Pass CompileForCutlass(); } // namespace cutlass } // namespace contrib } // namespace relay } // namespace tvm #endif // TVM_RELAY_BACKEND_CONTRIB_CUTLASS_CODEGEN_H_
https://github.com/zk-ml/tachikoma
src/relay/backend/contrib/dnnl/comp_op_matcher.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/backend/contrib/dnnl/comp_op_matcher.h * \brief Implement matcher based function to parse complex composite nodes. */ #ifndef TVM_RELAY_BACKEND_CONTRIB_DNNL_COMP_OP_MATCHER_H_ #define TVM_RELAY_BACKEND_CONTRIB_DNNL_COMP_OP_MATCHER_H_ #include <tvm/relay/function.h> #include <string> #include <unordered_map> #include <vector> #include "../../../ir/dataflow_matcher_impl.h" /*! * \brief Converter value to dmlc attr acceptable format * * \tparam T type of value (auto deduction) * \param val value to convert * \return resulting dmlc object */ template <typename T, std::enable_if_t<std::is_integral<T>::value, bool> = true> dmlc::any dmlc_attr(const T& val) { std::vector<dmlc::any> attr; attr.emplace_back(std::vector<std::string>{std::to_string(val)}); return dmlc::any{attr}; } template <typename T, std::enable_if_t<std::is_same<T, std::string>::value, bool> = true> dmlc::any dmlc_attr(const T& val) { std::vector<dmlc::any> attr; attr.emplace_back(std::vector<std::string>{val}); return dmlc::any{attr}; } template <typename T, std::enable_if_t<std::is_same<T, std::vector<std::string>>::value, bool> = true> dmlc::any dmlc_attr(const T& val) { std::vector<dmlc::any> attr; attr.emplace_back(val); return dmlc::any{attr}; } /*! \brief Constructor of const scalar expression with defined type */ tvm::relay::Expr constant(float val) { auto value = tvm::runtime::NDArray::Empty({}, tvm::DataType::Float(32), {kDLCPU, 0}); value.CopyFromBytes(&val, sizeof(val)); auto res = tvm::relay::Constant(value); tvm::relay::transform::InferTypeLocal(res); return res; } /*! * \brief Simple helper to accumulate composite function arguments and corresponding attributes * with indexes of them. */ class ArgPacker { public: ArgPacker(std::unordered_map<std::string, dmlc::any>* attrs, std::vector<tvm::relay::Expr>* args) : attrs_(attrs), args_(args) {} int Put(const tvm::relay::Expr& arg, std::string tag_name = "") { if (!arg.defined()) return -1; int idx = args_->size(); args_->push_back(arg); if (!tag_name.empty()) { attrs_->operator[](tag_name) = dmlc_attr(idx); } return idx; } private: std::unordered_map<std::string, dmlc::any>* attrs_; std::vector<tvm::relay::Expr>* args_; }; const tvm::relay::CallNode* ParseQnnConvComp(const tvm::relay::FunctionNode& comp_fn, std::unordered_map<std::string, dmlc::any>* ext_attrs, std::vector<tvm::relay::Expr>* args) { using namespace tvm::relay; // Pattern auto src = IsWildcard(); auto wgh = IsWildcard(); auto sum_src = IsWildcard(); auto bias = IsConstant(); auto o_scl = IsConstant(); auto act_scl = IsConstant(); auto sum_scl = IsConstant(); auto dst_zp = IsConstant(); DFPattern cnv; DFPattern pat; cnv = IsOp("qnn.conv2d")({src, wgh, IsConstant(), IsConstant(), IsConstant(), IsConstant()}); pat = IsOp("cast")({cnv}); pat = IsOp("add")({pat, bias}) || pat; pat = IsOp("multiply")({pat, o_scl}); pat = IsOp("clip")({pat}); pat = IsOp("multiply")({pat, act_scl}) || pat; pat = IsOp("add")({pat, sum_scl * IsOp("cast")({sum_src})}) || pat; pat = IsOp("add")({pat, dst_zp}) || pat; pat = IsOp("cast")({pat}); // Check pattern match auto indexed_body = CreateIndexedGraph(comp_fn.body); DFPatternMatcher matcher(indexed_body.get()); auto res = matcher.Match(pat, comp_fn.body); ICHECK(res) << "Mismatch of DNNL partitioner and codegen logic"; // Handle arguments in deterministic order auto map = matcher.GetMemo(); auto find = [&map](const DFPattern& pat) -> tvm::relay::Expr { if (map.count(pat)) return map.at(pat)[0]; return {}; }; ArgPacker arg_holder(ext_attrs, args); arg_holder.Put(find(src)); arg_holder.Put(find(wgh)); arg_holder.Put(find(bias), "bias_idx"); arg_holder.Put(find(sum_src), "sum_idx"); arg_holder.Put(find(o_scl), "o_scl_idx"); arg_holder.Put(find(act_scl), "act_scl_idx"); arg_holder.Put(find(sum_scl), "sum_scl_idx"); arg_holder.Put(find(dst_zp), "dst_zp_idx"); // Activation. Default clip to simulate relu via uint8 cast std::vector<std::string> clip_attr{"clip"}; auto act_scl_val = map.count(act_scl) ? find(act_scl) : constant(1.0); clip_attr.push_back(std::to_string(arg_holder.Put(act_scl_val))); // act_scale clip_attr.push_back(std::to_string(arg_holder.Put(constant(0.0)))); // alpha clip_attr.push_back(std::to_string(arg_holder.Put(constant(255.0)))); // beta (*ext_attrs)["activation"] = dmlc_attr(clip_attr); return map.at(cnv)[0].as<CallNode>(); } const tvm::relay::CallNode* ParseQnnDenseComp(const tvm::relay::FunctionNode& comp_fn, std::unordered_map<std::string, dmlc::any>* ext_attrs, std::vector<tvm::relay::Expr>* args) { using namespace tvm::relay; // Pattern auto src = IsWildcard(); auto wgh = IsWildcard(); auto sum_src = IsWildcard(); auto bias = IsConstant(); auto o_scl = IsConstant(); auto act_scl = IsConstant(); auto sum_scl = IsConstant(); auto dst_zp = IsConstant(); DFPattern dns, act, pat; dns = IsOp("qnn.dense")({src, wgh, IsConstant(), IsConstant(), IsConstant(), IsConstant()}); pat = IsOp("cast")({dns}); pat = IsOp("add")({pat, bias}) || pat; pat = IsOp("multiply")({pat, o_scl}); pat = IsOp("clip")({pat}); pat = IsOp("multiply")({pat, act_scl}) || pat; pat = IsOp("add")({pat, sum_scl * IsOp("cast")({sum_src})}) || pat; pat = IsOp("add")({pat, dst_zp}) || pat; pat = IsOp("cast")({pat}); // Check pattern match auto indexed_body = CreateIndexedGraph(comp_fn.body); DFPatternMatcher matcher(indexed_body.get()); auto res = matcher.Match(pat, comp_fn.body); ICHECK(res) << "Mismatch of DNNL partitioner and codegen logic"; // Handle arguments in deterministic order auto memo = matcher.GetMemo(); auto find = [&memo](const DFPattern& pat) -> tvm::relay::Expr { if (memo.count(pat)) return memo.at(pat)[0]; return {}; }; ArgPacker arg_holder(ext_attrs, args); arg_holder.Put(find(src)); arg_holder.Put(find(wgh)); arg_holder.Put(find(bias), "bias_idx"); arg_holder.Put(find(sum_src), "sum_idx"); arg_holder.Put(find(o_scl), "o_scl_idx"); arg_holder.Put(find(act_scl), "act_scl_idx"); arg_holder.Put(find(sum_scl), "sum_scl_idx"); arg_holder.Put(find(dst_zp), "dst_zp_idx"); // Activation. Default clip to simulate relu via uint8 cast std::vector<std::string> clip_attr{"clip"}; auto act_scl_val = memo.count(act_scl) ? find(act_scl) : constant(1.0); clip_attr.push_back(std::to_string(arg_holder.Put(act_scl_val))); // act_scale clip_attr.push_back(std::to_string(arg_holder.Put(constant(0.0)))); // alpha clip_attr.push_back(std::to_string(arg_holder.Put(constant(255.0)))); // beta (*ext_attrs)["activation"] = dmlc_attr(clip_attr); return memo.at(dns)[0].as<CallNode>(); } /*! * Parse composite function and return real args, additional attributes and root call node * @param comp_fn composite function to parse * @param ext_attrs attr collection with additional attributes * @param args real arguments of node * @return root call node */ const tvm::relay::CallNode* ParseComposite(const tvm::relay::FunctionNode& comp_fn, std::unordered_map<std::string, dmlc::any>* ext_attrs, std::vector<tvm::relay::Expr>* args) { auto comp = comp_fn.GetAttr<tvm::String>(tvm::relay::attr::kComposite); ICHECK(comp.defined()) << "DNNL JSON runtime only supports composite functions."; auto name = comp.value(); const tvm::relay::CallNode* res = nullptr; if (name == "dnnl.qnn.conv2d") res = ParseQnnConvComp(comp_fn, ext_attrs, args); else if (name == "dnnl.qnn.dense") res = ParseQnnDenseComp(comp_fn, ext_attrs, args); return res; } #endif // TVM_RELAY_BACKEND_CONTRIB_DNNL_COMP_OP_MATCHER_H_
https://github.com/zk-ml/tachikoma
src/relay/backend/contrib/ethosn/codegen_ethosn.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/backend/contrib/ethosn/codegen_ethosn.h * \brief The Relay -> Arm(R) Ethos(TM)-N command stream compiler. */ #ifndef TVM_RELAY_BACKEND_CONTRIB_ETHOSN_CODEGEN_ETHOSN_H_ #define TVM_RELAY_BACKEND_CONTRIB_ETHOSN_CODEGEN_ETHOSN_H_ #include <dmlc/memory_io.h> #include <tvm/relay/attrs/nn.h> #include <tvm/relay/expr_functor.h> #include <tvm/relay/transform.h> #include <tvm/relay/type.h> #include <tvm/runtime/module.h> #include <tvm/runtime/registry.h> #include <algorithm> #include <fstream> #include <map> #include <memory> #include <sstream> #include <string> #include <unordered_map> #include <utility> #include <vector> #include "../../../../runtime/contrib/ethosn/ethosn_runtime.h" #include "../codegen_c/codegen_c.h" #include "ethosn_api.h" #include "ethosn_support_library/Support.hpp" #include "ethosn_support_library/SupportQueries.hpp" namespace tvm { namespace relay { namespace contrib { namespace ethosn { namespace sl = ::ethosn::support_library; /*! * \brief A struct to hold an uncompiled support library network alongside * the desired order of input and output operation ids. */ struct NetworkWithIDs { struct hash_pair { template <class T_0, class T_1> size_t operator()(const std::pair<T_0, T_1>& p) const { return std::hash<T_0>{}(p.first) ^ std::hash<T_1>{}(p.second); } }; std::shared_ptr<sl::Network> network; std::unordered_map<uint32_t, unsigned int> input_ids; std::unordered_map<std::pair<uint32_t, uint32_t>, unsigned int, hash_pair> output_ids; }; /*! * \brief A base class for error handling using ErrorReporter. */ class ErrorReportingPass { public: ErrorReportingPass(const IRModule& mod, const GlobalVar& var) : mod_(mod), var_(var) {} /*! * \brief Report fatal errors for an expression. * \param expr The expression to report errors at. * \param err The errors to report. */ void ReportFatalError(const ObjectRef& expr, const EthosnError& err) { for (const auto& msg : err.msgs) { error_reporter_.ReportAt(this->var_, expr, ErrorBuilder() << msg); } error_reporter_.RenderErrors(this->mod_); } protected: /*! \brief An ErrorReporter object to render the errors.*/ ErrorReporter error_reporter_; /*! \brief The module to report errors for. */ IRModule mod_; /*! \brief The GlobalVar to report errors for. */ GlobalVar var_; }; /*! * \brief A custom pass to infer the support library tensor information * for a Relay expression. * * Support Library requires that tensors are explicitly declared with * information on their size, data type, format (eg. NHWC) and quantisation * parameters. In Relay, size and data type are already determined when the * type_infer pass is run. However, format and quantisation parameters are * properties of the operators that consume the tensors. * * This pass works by having each node initialise the information of its * parents, essentially propagating the inferred information all the way up * to the inputs of the expression. * * Because the children initialise the information of the parents, it is * necessary to traverse the graph in such a way so as to ensure all the * children of a node are visited before the parent is. As Relay does not * keep a reference to child nodes, this pass goes in preorder but will * skip visiting a parent if all the children haven't yet been visited (see * VisitInferred for the logic that implements this). * * Inference only works for supported callnodes, for tuplenodes, tuplegetitem * nodes and free var nodes. Other nodes should not be off-loaded to Ethos-N. */ class InferTensorsVisitor : private ErrorReportingPass, private ExprVisitor { public: InferTensorsVisitor(const IRModule& mod, const GlobalVar& var) : ErrorReportingPass(mod, var) {} /*! * \brief Infer the support library tensor information for all the nodes * in an expression. * \param expr The expression for which to infer tensor information. * \return A map of expressions to tensor information. * \note This algorithm does not traverse into functions, so call it on * the body of the function you're interested in. */ std::map<Expr, std::vector<sl::TensorInfo>> Infer(const Expr& expr); private: // Infer a callnode if it's a supported operator/composite function void InferCall(const CallNode* cn); void VisitInferred(const Expr& expr); void VisitExpr_(const CallNode* cn) final; void VisitExpr_(const TupleNode* tn) final; void VisitExpr_(const TupleGetItemNode* tg) final; // Don't traverse into functions, the Ethos-N codegen isn't meant to support them. void VisitExpr_(const FunctionNode* fn) final {} /*! \brief A look-up table from Expr to tensor infos. */ std::map<Expr, std::vector<sl::TensorInfo>> tensor_table_; }; std::map<Expr, std::vector<sl::TensorInfo>> InferTensors(const IRModule& mod, const GlobalVar& var, const Expr& expr) { return InferTensorsVisitor(mod, var).Infer(expr); } /*! * \brief A pass to generate a support library network from a Relay function. * * This pass constructs an equivalent support library network from a Relay * function in two visits. One to infer the tensor information of all the nodes * and another in postorder to add the nodes as support library operands. * (Supported) Callnodes, tuplenodes, tuplegetitemnodes and (free) * varnodes are handled by this pass. * * As part of the pass, nodes in the function body are associated with both * type information in the 'tensor_table', and support library operands in the * 'operand_table'. Both of these are maps of vectors as a Relay node can have * tuple type and accordingly be associated with multiple tensors. For nodes * which are not tuple type, vectors of size 1 are used. */ class ConstructNetworkVisitor : public MixedModeVisitor, private ErrorReportingPass { public: explicit ConstructNetworkVisitor(const IRModule& mod, const GlobalVar& var) : ErrorReportingPass(mod, var) {} /*! * \brief Construct a support library network from a given Relay function. The * function should contain only nodes supported by Ethos-N. * \param func The Relay function for which to construct a support library network. * \return A support library network that performs the same operation as the Relay * function. */ NetworkWithIDs Construct(const Function& func); private: // Translate from a callnode to the appropriate 'Make' method sl::TensorsAndId HandleCall(const CallNode*); void VisitExpr_(const CallNode* cn) final; void VisitExpr_(const TupleNode* op) final; void VisitExpr_(const TupleGetItemNode* tg) final; void VisitLeaf(const Expr& expr) final; // Make a support library operand from a Call EthosnError MakeConvolutionLayer(const Call& call, sl::TensorAndId<sl::Operand>* out); EthosnError MakeFullyConnectedLayer(const Call&, sl::TensorAndId<sl::Operand>* out); EthosnError MakeMaxPool2DLayer(const Call& call, sl::TensorAndId<sl::Operand>* out); EthosnError MakeAvgPool2DLayer(const Call& call, sl::TensorAndId<sl::Operand>* out); EthosnError MakeReshapeLayer(const Call& call, sl::TensorAndId<sl::Operand>* out); EthosnError MakeAdditionLayer(const Call& call, sl::TensorAndId<sl::Operand>* out); EthosnError MakeSigmoidLayer(const Call& call, sl::TensorAndId<sl::Operand>* out); EthosnError MakeMeanLayer(const Call& call, sl::TensorAndId<sl::Operand>* out); EthosnError MakeTanhLayer(const Call& call, sl::TensorAndId<sl::Operand>* out); EthosnError MakeConv2DTransposeLayer(const Call& call, sl::TensorAndId<sl::Operand>* out); EthosnError MakeConcatenateLayer(const Call& call, sl::TensorAndId<sl::Operand>* out); EthosnError MakeSplitLayer(const Call& call, sl::TensorsAndId* outs); EthosnError MakeDepthToSpaceLayer(const Call& call, sl::TensorAndId<sl::Operand>* out); EthosnError MakeReluLayer(const Call& call, sl::TensorAndId<sl::Operand>* out); EthosnError MakeLeakyReLULayer(const Call& call, sl::TensorAndId<sl::Operand>* out); EthosnError MakeRequantizeLayer(const Call& call, sl::TensorAndId<sl::Operand>* out); EthosnError MakeReinterpretQuantizeLayer(const Call& call, sl::TensorAndId<sl::Operand>* out); EthosnError MakeResizeLayer(const Call& call, sl::TensorAndId<sl::Operand>* out); /*! \brief A look-up table from Expr to layers. */ std::map<Expr, std::vector<std::shared_ptr<sl::Operand>>> operand_table_; /*! \brief A look-up table from Expr to SL operation IDs. */ std::map<Expr, std::vector<std::pair<uint32_t, uint32_t>>> id_table_; /*! \brief A look-up table from Expr to tensor infos. */ std::map<Expr, std::vector<sl::TensorInfo>> tensor_table_; /*! \brief The support library network to compile. */ std::shared_ptr<sl::Network> network_; }; NetworkWithIDs ConstructNetwork(const IRModule& mod, const GlobalVar& var, const Function& func) { return ConstructNetworkVisitor(mod, var).Construct(func); } /*! \brief Attributes to store the compiler options for Ethos-N */ struct EthosnCompilerConfigNode : public tvm::AttrsNode<EthosnCompilerConfigNode> { String variant; String sram_size; String tops; String ple_ratio; bool strategy0; bool strategy1; bool strategy3; bool strategy4; bool strategy6; bool strategy7; bool dump_ram; bool initial_sram_dump; bool block_config_16x16; bool block_config_32x8; bool block_config_8x32; bool block_config_8x8; bool enable_intermediate_compression; bool disable_winograd; String debug_dir; bool inline_non_compute_intensive_partitions; TVM_DECLARE_ATTRS(EthosnCompilerConfigNode, "ext.attrs.EthosnCompilerConfigNode") { TVM_ATTR_FIELD(variant).describe("See Ethos-N documentation.").set_default("n78"); TVM_ATTR_FIELD(sram_size) .describe("Optionally override the default sram size. See Ethos(TM)-N documentation.") .set_default("0"); TVM_ATTR_FIELD(tops) .describe("Valid values 1, 2, 4 and 8. See Ethos(TM)-N documentation.") .set_default("1"); TVM_ATTR_FIELD(ple_ratio) .describe("Valid values 2 and 4. See Ethos(TM)-N documentation.") .set_default("2"); TVM_ATTR_FIELD(strategy0).set_default(true); TVM_ATTR_FIELD(strategy1).set_default(true); TVM_ATTR_FIELD(strategy3).set_default(true); TVM_ATTR_FIELD(strategy4).set_default(true); TVM_ATTR_FIELD(strategy6).set_default(true); TVM_ATTR_FIELD(strategy7).set_default(true); TVM_ATTR_FIELD(dump_ram).set_default(false); TVM_ATTR_FIELD(initial_sram_dump).set_default(false); TVM_ATTR_FIELD(block_config_16x16).set_default(true); TVM_ATTR_FIELD(block_config_32x8).set_default(true); TVM_ATTR_FIELD(block_config_8x32).set_default(true); TVM_ATTR_FIELD(block_config_8x8).set_default(true); TVM_ATTR_FIELD(enable_intermediate_compression).set_default(true); TVM_ATTR_FIELD(disable_winograd).set_default(false); TVM_ATTR_FIELD(debug_dir).set_default("."); TVM_ATTR_FIELD(inline_non_compute_intensive_partitions) .describe( "A heuristic to improve performance. Inlines functions partitioned for Arm(R) " "Ethos(TM)-N that are deemed 'non-compute-intensive'. The inlined functions will " "continue through TVM's standard compilation flow.") .set_default(true); } }; class EthosnCompilerConfig : public Attrs { public: TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(EthosnCompilerConfig, Attrs, EthosnCompilerConfigNode); }; TVM_REGISTER_NODE_TYPE(EthosnCompilerConfigNode); TVM_REGISTER_PASS_CONFIG_OPTION("relay.ext.ethos-n.options", EthosnCompilerConfig); EthosnCompilerConfig GetCompilerAttrs() { auto ctx = transform::PassContext::Current(); Optional<EthosnCompilerConfig> cfg = ctx->GetConfig<EthosnCompilerConfig>("relay.ext.ethos-n.options"); if (!cfg.defined()) { return AttrsWithDefaultValues<EthosnCompilerConfig>(); } return cfg.value(); } TVM_REGISTER_GLOBAL("relay.ext.ethos-n.get_compiler_attrs").set_body_typed(GetCompilerAttrs); /*! \brief The compiler for Ethos-N functions */ class EthosnCompiler { public: /*! * \brief Create an Ethos-N runtime module from a Relay Ethos-N function * \param ref An ObjectRef pointing to a Relay Ethos-N function * \return runtime_module An Ethos-N runtime module */ static runtime::Module CreateRuntimeModule(const ObjectRef& ref); /*! * \brief Initialise the is-supported functionality of the Ethos-N support library * with the target variant. * \return Error object */ static EthosnError SupportedSetup(); /*! * \brief Return the is-supported API of the Support Library * \return A reference to the API. */ static std::unique_ptr<sl::SupportQueries>& GetSupported() { ICHECK(m_Queries != nullptr); return m_Queries; } private: /*! * \brief Compile a single Relay Ethos-N function into an ordered compiled network. * Compilation options will be taken from the PassContext. * \param mod The module the function is stored in (for error reporting purposes) * \param gvar The global var corresponding to the function * \param func The function to be compiled * \return ordered_compiled_network A compiled network with additional information * to handle difference in input/output ordering between the TVM runtime and the * Ethos-N compiled network. */ static runtime::ethosn::OrderedCompiledNetwork CompileEthosnFunc(const IRModule& mod, const GlobalVar& gvar, const Function& func); /*! * \brief Get the Support Library compilation options from the PassContext * \return options The compilation options */ static sl::CompilationOptions CreateOptions(); /*! * \brief Determine the order in which inputs should be provided/outputs should be * read from a compiled network. This is required because when you compile a network * for Ethos-N, you don't have control over the order in which the inputs/outputs * are given. You can, however, query what order the compiler decided to give them in. * We therefore keep track of our desired order and the actual order and create a * small translation table between the two for use in the runtime. * \param network A network additionally with the desired input/output order * \param compiled_network The compiled network with an as yet undetermined input/output order * \return input_output_order The order in which to permute the inputs/outputs given * by the TVM runtime such that they map correctly to the compiled network. */ static std::pair<std::vector<uint32_t>, std::vector<uint32_t>> GetInputOutputOrder( NetworkWithIDs network, const std::unique_ptr<sl::CompiledNetwork>& compiled_network); /*! * \brief Determine the input and output sizes of a compiled network. * * These need to be queried from the compiled network as the compiler can choose * to add additional padding on the input/output in certain cases. * * \param compiled_network The network compiled by the NPU compiler. * \return Pair of vectors of buffer sizes for both the inputs and outputs of the * network. */ static std::pair<std::vector<uint32_t>, std::vector<uint32_t>> GetIOSizes( const std::unique_ptr<sl::CompiledNetwork>& compiled_network); /*! * \brief Query interface used to determine if the Ethos-N hardware supports an operation * with the supplied parameters. */ static std::unique_ptr<sl::SupportQueries> m_Queries; }; runtime::Module CompileEthosn(const ObjectRef& ref) { return EthosnCompiler::CreateRuntimeModule(ref); } TVM_REGISTER_GLOBAL("relay.ext.ethos-n").set_body_typed(CompileEthosn); TVM_REGISTER_GLOBAL("relay.ext.ethos-n.constant_updater") .set_body_typed([](Expr expr, std::string symbol) { return Map<String, runtime::NDArray>(); }); } // namespace ethosn } // namespace contrib } // namespace relay } // namespace tvm #endif // TVM_RELAY_BACKEND_CONTRIB_ETHOSN_CODEGEN_ETHOSN_H_
https://github.com/zk-ml/tachikoma
src/relay/backend/contrib/ethosn/ethosn_api.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/backend/contrib/ethosn/ethosn_api.h * \brief The Relay -> Arm(R) Ethos(TM)-N command stream compiler. */ #ifndef TVM_RELAY_BACKEND_CONTRIB_ETHOSN_ETHOSN_API_H_ #define TVM_RELAY_BACKEND_CONTRIB_ETHOSN_ETHOSN_API_H_ #include <tvm/relay/attrs/nn.h> #include <tvm/relay/expr.h> #include <tvm/relay/expr_functor.h> #include <tvm/relay/transform.h> #include <tvm/tir/analysis.h> #include <tvm/tir/op.h> #include <algorithm> #include <limits> #include <map> #include <memory> #include <string> #include <utility> #include <vector> #include "ethosn_support_library/Support.hpp" #include "ethosn_support_library/SupportQueries.hpp" namespace tvm { namespace relay { namespace contrib { namespace ethosn { namespace sl = ::ethosn::support_library; struct ConvolutionParams { sl::ConvolutionInfo conv_info; sl::TensorInfo input_info; sl::TensorInfo weights_info; sl::TensorInfo bias_info; sl::TensorInfo output_info; void* raw_weights = nullptr; void* raw_bias = nullptr; bool is_depthwise = false; }; struct FullyConnectedParams { sl::FullyConnectedInfo fc_info; sl::TensorInfo input_info; sl::TensorInfo weights_info; sl::TensorInfo bias_info; sl::TensorInfo output_info; runtime::NDArray raw_weights; runtime::NDArray raw_bias; }; struct MaxPool2DParams { sl::PoolingInfo pool_info = sl::PoolingInfo(0, 0, 0, 0, sl::Padding(), sl::PoolingType::MAX); sl::TensorInfo input_info; sl::TensorInfo output_info; }; struct AvgPool2DParams { sl::PoolingInfo pool_info = sl::PoolingInfo(0, 0, 0, 0, sl::Padding(), sl::PoolingType::AVG); sl::TensorInfo input_info; sl::TensorInfo output_info; }; struct ReshapeParams { sl::TensorShape new_shape{}; sl::TensorInfo input_info; sl::TensorInfo output_info; }; struct AdditionParams { sl::QuantizationInfo output_quantization_info; sl::TensorInfo lhs_info; sl::TensorInfo rhs_info; sl::TensorInfo output_info; }; struct SigmoidParams { sl::TensorInfo input_info; sl::TensorInfo output_info; }; struct MeanParams { sl::TensorInfo input_info; sl::TensorInfo output_info; }; struct TanhParams { sl::TensorInfo input_info; sl::TensorInfo output_info; }; struct LeakyReLUParams { sl::LeakyReluInfo leaky_relu_info; sl::TensorInfo input_info; sl::TensorInfo output_info; }; struct QnnConv2dTransposeParams { sl::ConvolutionInfo conv_info; sl::TensorInfo input_info; sl::TensorInfo weights_info; sl::TensorInfo bias_info; sl::TensorInfo output_info; runtime::NDArray raw_weights; runtime::NDArray raw_bias; }; struct ConcatenateParams { sl::QuantizationInfo qInfo; sl::ConcatenationInfo concat_info = sl::ConcatenationInfo(1, qInfo); std::vector<sl::TensorInfo> input_infos; sl::TensorInfo output_info; }; struct SplitParams { sl::SplitInfo split_info = sl::SplitInfo(0, {}); sl::TensorInfo input_info; std::vector<sl::TensorInfo> output_infos; }; struct DepthToSpaceParams { sl::DepthToSpaceInfo depth_info = sl::DepthToSpaceInfo(0); sl::TensorInfo input_info; sl::TensorInfo output_info; }; struct ReluParams { sl::ReluInfo relu_info; sl::TensorInfo input_info; sl::TensorInfo output_info; }; struct RequantizeParams { sl::RequantizeInfo requantize_info; sl::TensorInfo input_info; sl::TensorInfo output_info; }; struct ReinterpretQuantizationParams { sl::ReinterpretQuantizationInfo reinterpret_quantize_info; sl::TensorInfo input_info; sl::TensorInfo output_info; }; struct ResizeParams { sl::ResizeInfo resize_info; sl::TensorInfo input_info; sl::TensorInfo output_info; }; /*! * \brief A wrapper around std::stringstream to build an EthosnError. */ class ErrStrm { public: template <typename T> ErrStrm& operator<<(const T& val) { // NOLINT(*) stream_ << val; return *this; } private: std::stringstream stream_; friend class EthosnError; }; /*! * \brief Custom error class for storing error messages produced * during compilation for Ethos-N. */ class EthosnError { public: /*! \brief Default constructor */ EthosnError() {} /*! * \brief Construct error from an Array of Strings * \param msgs The messages */ explicit EthosnError(const Array<String>& msgs) : msgs(msgs) {} /*! * \brief Construct error from a String * \param msg The message */ explicit EthosnError(const String& msg) { if (msg.size()) msgs.push_back(msg); } /*! * \brief Construct error from an ErrStrm * \param err The ErrStrm */ explicit EthosnError(const ErrStrm& err) : EthosnError(err.stream_.str()) {} /*! \return Whether there are any error messages */ explicit operator bool() const { return !msgs.empty(); } /*! \brief Add together two errors to give a single error with all the msgs */ EthosnError& operator+=(const EthosnError& other) { msgs.insert(msgs.end(), other.msgs.begin(), other.msgs.end()); return *this; } /*! \brief The error messages */ Array<String> msgs; }; /*! * \brief Functions to interact with Support Library's API including the * translation of Relay ops/composite functions into Support Library * equivalents. */ class EthosnAPI { public: /*! \brief Create a default input tensor */ static sl::TensorInfo DefaultInputTensor(const Expr& expr); /*! \brief Extract the Support Library convolution params from an ethos-n.qnn_conv2d func */ static EthosnError QnnConv2d(const Expr& expr, ConvolutionParams* params); /*! \brief Extract the Support Library dense params from an ethos-n.qnn_fc func */ static EthosnError QnnFullyConnected(const Expr& expr, FullyConnectedParams* params); /*! \brief Extract the Support Library max_pool2d params from a Relay max_pool2d call */ static EthosnError MaxPool2D(const Expr& expr, MaxPool2DParams* params); /*! \brief Extract the Support Library avg_pool params from a Relay ethos-n.qnn_avg_pool2d func */ static EthosnError AvgPool2D(const Expr& expr, AvgPool2DParams* params); /*! \brief Extract the Support Library reshape params from a Relay reshape call */ static EthosnError Reshape(const Expr& expr, ReshapeParams* params); /*! \brief Extract the Support Library addition params from a Relay qnn.addition call */ static EthosnError Addition(const Expr& expr, AdditionParams* params); /*! \brief Extract the Support Library sigmoid params from a Relay an ethos-n.qnn_sigmoid func */ static EthosnError Sigmoid(const Expr& expr, SigmoidParams* params); /*! \brief Extract the Support Library mean params from a mean func */ static EthosnError Mean(const Expr& expr, MeanParams* params); /*! \brief Extract the Support Library tanh params from a Relay an ethos-n tanh func */ static EthosnError Tanh(const Expr& expr, TanhParams* params); /*! \brief Extract the Support Library leaky relu params from an ethos-n leaky relu Relu call. */ static EthosnError LeakyReLU(const Expr& expr, LeakyReLUParams* params); /*! \brief Extract the Support Library transpose params from a Relay * ethos-n.qnn_conv2d_transpose func */ static EthosnError QnnConv2dTranspose(const Expr& expr, QnnConv2dTransposeParams* params); /*! \brief Extract the Support Library concatenate params from a Relay qnn.concatenate call */ static EthosnError Concatenate(const Expr& expr, ConcatenateParams* params); /*! \brief Extract the Support Library split params from a Relay split call */ static EthosnError Split(const Expr& expr, SplitParams* params); /*! \brief Extract the Support Library depth_to_space params from a Relay depth_to_space call */ static EthosnError DepthToSpace(const Expr& expr, DepthToSpaceParams* params); /*! \brief Extract the Support Library relu params from a Relay relu call */ static EthosnError Relu(const Expr& expr, ReluParams* params); /*! \brief Extract the Support Library requantize params from a Relay qnn.requantize call */ static EthosnError Requantize(const Expr& expr, RequantizeParams* params); /*! * \brief Extact the Support Library reinterpret quantization params from a Relay qnn.requantize * call. * * \note This is used for the conversion from add and mul to a reinterpret quantization operator. * This is effectively an identity operation, as not the same as 'requantize'. */ static EthosnError ReinterpretQuantize(const Expr& expr, ReinterpretQuantizationParams* params); /*! \brief Extract the Support Library resize params from a Relay resize call */ static EthosnError Resize(const Expr& expr, ResizeParams* params); private: /*! \brief Convert a TVM IndexExpr array to a SL tensor shape */ static EthosnError Tvm2Npu(const Array<IndexExpr>& shape, sl::TensorShape* npu_shape); /*! \brief Convert a TVM data type to a SL data type */ static EthosnError Tvm2Npu(const tvm::DataType& dtype, sl::DataType* data_type); /*! \brief Convert TVM 1D padding to SL padding */ static EthosnError Tvm2Npu(const Array<IndexExpr>& padding, sl::Padding* npu_padding); /*! \brief Convert TVM 1D striding to SL striding */ static EthosnError Tvm2Npu(const Array<IndexExpr>& strides, sl::Stride* npu_stride); /*! \brief Convert TVM data format to SL data format */ static EthosnError Tvm2Npu(const std::string& dformat, sl::DataFormat* data_format); /*! \brief Convert TVM size array for pooling size to x and y values */ static EthosnError Tvm2Npu(const Array<IndexExpr>& size, uint32_t* x, uint32_t* y); /*! \brief Convert TVM quantization info to SL quantization info */ static EthosnError Tvm2Npu(const int32_t zero_point, const float scale, sl::QuantizationInfo* npu_qinfo); static EthosnError Tvm2Npu(const int32_t zero_point, const std::valarray<float> scales, const unsigned int axis, sl::QuantizationInfo* npu_qinfo); /*! \brief Convert TVM 2D padding to SL padding */ static EthosnError Tvm2Npu(const Array<Array<Integer>>& padding, sl::Padding* npu_padding); /*! \brief Convert a TVM Integer array to a SL tensor shape */ static EthosnError Tvm2Npu(const Array<Integer>& shape, sl::TensorShape* npu_shape); /*! \brief Convert a TVM Type to SL tensor info. */ static EthosnError Tvm2Npu(const tvm::Type& type, sl::TensorInfo* npu_tinfo); /*! \brief Convert a TVM pooling call to SL pooling information */ static EthosnError Pool2d(const Call& input, const Call& output, Array<IndexExpr> size, Array<IndexExpr> strides, Array<IndexExpr> padding, sl::PoolingType pooling_type, sl::PoolingInfo* pool_info, sl::TensorInfo* input_info, sl::TensorInfo* output_info, std::string layout); // Convert an array of IntImmNodes into ValueT // IndexT type of Array indexing variable // ValueT type of resulting value template <typename IndexT, typename ValueT, size_t N> static EthosnError AsArray(const Array<IndexT>& arr, std::array<ValueT, N>* v); // Get a T from a constant represented by a NDArray. template <typename T> static EthosnError AsConstant(const Expr& expr, T* out); static EthosnError AsConstant(const Expr& expr, std::valarray<float>* out); }; } // namespace ethosn } // namespace contrib } // namespace relay } // namespace tvm #endif // TVM_RELAY_BACKEND_CONTRIB_ETHOSN_ETHOSN_API_H_
https://github.com/zk-ml/tachikoma
src/relay/backend/contrib/ethosu/utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file relay/backend/contrib/ethosu/utils.h * \brief Utilities for microNPU codegen */ #ifndef TVM_RELAY_BACKEND_CONTRIB_ETHOSU_UTILS_H_ #define TVM_RELAY_BACKEND_CONTRIB_ETHOSU_UTILS_H_ #include <tvm/ir/expr.h> #include <tvm/target/target.h> #include <tvm/tir/stmt.h> namespace tvm { namespace relay { namespace contrib { namespace ethosu { /*! * \brief Base addresses are input pointers to * the driver that get accessed by the command stream * using offsets to read/write data. */ struct BaseAddressNode : public Object { /*! \brief The identifier, usually it the param name of the PrimFunc that gets lowered */ String name; /*! \brief The index in the params array of the PrimFunc. This is needed to keep aligned * between the PrimFunc arguments ordering and argument ordering of generated code */ Integer primfunc_param_idx; /*! \brief The region used by the command stream. This needs to match with base address * index passed into the driver */ Integer region; /*! \brief The size of the buffer accessible by this base address */ Integer size; /*! \brief This is a runtime allocation that needs to be done in the function */ Bool is_runtime_allocation{Bool(false)}; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("name", &name); v->Visit("primfunc_param_idx", &primfunc_param_idx); v->Visit("region", &region); v->Visit("size", &size); v->Visit("is_runtime_allocation", &is_runtime_allocation); } bool SEqualReduce(const BaseAddressNode* other, SEqualReducer equal) const { return equal(name, other->name) && equal(primfunc_param_idx, other->primfunc_param_idx) && equal(region, other->region) && equal(size, other->size) && equal(is_runtime_allocation, other->is_runtime_allocation); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(name); hash_reduce(primfunc_param_idx); hash_reduce(region); hash_reduce(size); hash_reduce(is_runtime_allocation); } static constexpr const char* _type_key = "relay.ext.ethos-u.BaseAddress"; TVM_DECLARE_FINAL_OBJECT_INFO(BaseAddressNode, Object); }; class BaseAddress : public ObjectRef { public: TVM_DLL BaseAddress(String name, Integer primfunc_param_idx, Integer region, Integer size, Bool is_runtime_allocation = Bool(false)); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(BaseAddress, ObjectRef, BaseAddressNode); }; /*! * \brief Captures all the binary artifactes required to create * the C-source runtime module */ struct CompilationArtifactNode : public Object { /*! \brief The function name for this artifact belongs to */ String function_name; /*! \brief The binary command stream (CS) in hex format */ String command_stream; /*! \brief The encoded biases and weights in hex format */ String encoded_constants; /*! \brief The information regarding the base addresses */ Array<BaseAddress> base_addresses; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("function_name", &function_name); v->Visit("command_stream", &command_stream); v->Visit("encoded_constants", &encoded_constants); v->Visit("base_addresses", &base_addresses); } bool SEqualReduce(const CompilationArtifactNode* other, SEqualReducer equal) const { return equal(function_name, other->function_name) && equal(command_stream, other->command_stream) && equal(encoded_constants, other->encoded_constants) && equal(base_addresses, other->base_addresses); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(function_name); hash_reduce(command_stream); hash_reduce(encoded_constants); hash_reduce(base_addresses); } static constexpr const char* _type_key = "relay.ext.ethos-u.CompilationArtifact"; TVM_DECLARE_FINAL_OBJECT_INFO(CompilationArtifactNode, Object); }; class CompilationArtifact : public ObjectRef { public: TVM_DLL CompilationArtifact(String function_name, String command_stream, String encoded_constants, Array<BaseAddress> base_addresses); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(CompilationArtifact, ObjectRef, CompilationArtifactNode); }; } // namespace ethosu } // namespace contrib } // namespace relay } // namespace tvm #endif // TVM_RELAY_BACKEND_CONTRIB_ETHOSU_UTILS_H_
https://github.com/zk-ml/tachikoma
src/relay/backend/contrib/tachikoma/comp_op_matcher.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/backend/contrib/tachikoma/comp_op_matcher.h * \brief Implement matcher based function to parse complex composite nodes. */ #ifndef TVM_RELAY_BACKEND_CONTRIB_TACHIKOMA_COMP_OP_MATCHER_H_ #define TVM_RELAY_BACKEND_CONTRIB_TACHIKOMA_COMP_OP_MATCHER_H_ #include <tvm/relay/function.h> #include <string> #include <unordered_map> #include <vector> #include "../../../ir/dataflow_matcher_impl.h" /*! * \brief Converter value to dmlc attr acceptable format * * \tparam T type of value (auto deduction) * \param val value to convert * \return resulting dmlc object */ template <typename T, std::enable_if_t<std::is_integral<T>::value, bool> = true> dmlc::any dmlc_attr(const T& val) { std::vector<dmlc::any> attr; attr.emplace_back(std::vector<std::string>{std::to_string(val)}); return dmlc::any{attr}; } template <typename T, std::enable_if_t<std::is_same<T, std::string>::value, bool> = true> dmlc::any dmlc_attr(const T& val) { std::vector<dmlc::any> attr; attr.emplace_back(std::vector<std::string>{val}); return dmlc::any{attr}; } template <typename T, std::enable_if_t<std::is_same<T, std::vector<std::string>>::value, bool> = true> dmlc::any dmlc_attr(const T& val) { std::vector<dmlc::any> attr; attr.emplace_back(val); return dmlc::any{attr}; } /*! \brief Constructor of const scalar expression with defined type */ tvm::relay::Expr constant(float val) { auto value = tvm::runtime::NDArray::Empty({}, tvm::DataType::Float(32), {kDLCPU, 0}); value.CopyFromBytes(&val, sizeof(val)); auto res = tvm::relay::Constant(value); tvm::relay::transform::InferTypeLocal(res); return res; } /*! * \brief Simple helper to accumulate composite function arguments and corresponding attributes * with indexes of them. */ class ArgPacker { public: ArgPacker(std::unordered_map<std::string, dmlc::any>* attrs, std::vector<tvm::relay::Expr>* args) : attrs_(attrs), args_(args) {} int Put(const tvm::relay::Expr& arg, std::string tag_name = "") { if (!arg.defined()) return -1; int idx = args_->size(); args_->push_back(arg); if (!tag_name.empty()) { attrs_->operator[](tag_name) = dmlc_attr(idx); } return idx; } private: std::unordered_map<std::string, dmlc::any>* attrs_; std::vector<tvm::relay::Expr>* args_; }; const tvm::relay::CallNode* ParseQnnConvComp(const tvm::relay::FunctionNode& comp_fn, std::unordered_map<std::string, dmlc::any>* ext_attrs, std::vector<tvm::relay::Expr>* args) { using namespace tvm::relay; // Pattern auto src = IsWildcard(); auto wgh = IsWildcard(); auto sum_src = IsWildcard(); auto bias = IsConstant(); auto o_scl = IsConstant(); auto act_scl = IsConstant(); auto sum_scl = IsConstant(); auto dst_zp = IsConstant(); DFPattern cnv; DFPattern pat; cnv = IsOp("qnn.conv2d")({src, wgh, IsConstant(), IsConstant(), IsConstant(), IsConstant()}); pat = IsOp("cast")({cnv}); pat = IsOp("add")({pat, bias}) || pat; pat = IsOp("multiply")({pat, o_scl}); pat = IsOp("clip")({pat}); pat = IsOp("multiply")({pat, act_scl}) || pat; pat = IsOp("add")({pat, sum_scl * IsOp("cast")({sum_src})}) || pat; pat = IsOp("add")({pat, dst_zp}) || pat; pat = IsOp("cast")({pat}); // Check pattern match auto indexed_body = CreateIndexedGraph(comp_fn.body); DFPatternMatcher matcher(indexed_body.get()); auto res = matcher.Match(pat, comp_fn.body); ICHECK(res) << "Mismatch of Tachikoma partitioner and codegen logic"; // Handle arguments in deterministic order auto map = matcher.GetMemo(); auto find = [&map](const DFPattern& pat) -> tvm::relay::Expr { if (map.count(pat)) return map.at(pat)[0]; return {}; }; ArgPacker arg_holder(ext_attrs, args); arg_holder.Put(find(src)); arg_holder.Put(find(wgh)); arg_holder.Put(find(bias), "bias_idx"); arg_holder.Put(find(sum_src), "sum_idx"); arg_holder.Put(find(o_scl), "o_scl_idx"); arg_holder.Put(find(act_scl), "act_scl_idx"); arg_holder.Put(find(sum_scl), "sum_scl_idx"); arg_holder.Put(find(dst_zp), "dst_zp_idx"); // Activation. Default clip to simulate relu via uint8 cast std::vector<std::string> clip_attr{"clip"}; auto act_scl_val = map.count(act_scl) ? find(act_scl) : constant(1.0); clip_attr.push_back(std::to_string(arg_holder.Put(act_scl_val))); // act_scale clip_attr.push_back(std::to_string(arg_holder.Put(constant(0.0)))); // alpha clip_attr.push_back(std::to_string(arg_holder.Put(constant(255.0)))); // beta (*ext_attrs)["activation"] = dmlc_attr(clip_attr); return map.at(cnv)[0].as<CallNode>(); } const tvm::relay::CallNode* ParseQnnDenseComp(const tvm::relay::FunctionNode& comp_fn, std::unordered_map<std::string, dmlc::any>* ext_attrs, std::vector<tvm::relay::Expr>* args) { using namespace tvm::relay; // Pattern auto src = IsWildcard(); auto wgh = IsWildcard(); auto sum_src = IsWildcard(); auto bias = IsConstant(); auto o_scl = IsConstant(); auto act_scl = IsConstant(); auto sum_scl = IsConstant(); auto dst_zp = IsConstant(); DFPattern dns, act, pat; dns = IsOp("qnn.dense")({src, wgh, IsConstant(), IsConstant(), IsConstant(), IsConstant()}); pat = IsOp("cast")({dns}); pat = IsOp("add")({pat, bias}) || pat; pat = IsOp("multiply")({pat, o_scl}); pat = IsOp("clip")({pat}); pat = IsOp("multiply")({pat, act_scl}) || pat; pat = IsOp("add")({pat, sum_scl * IsOp("cast")({sum_src})}) || pat; pat = IsOp("add")({pat, dst_zp}) || pat; pat = IsOp("cast")({pat}); // Check pattern match auto indexed_body = CreateIndexedGraph(comp_fn.body); DFPatternMatcher matcher(indexed_body.get()); auto res = matcher.Match(pat, comp_fn.body); ICHECK(res) << "Mismatch of Tachikoma partitioner and codegen logic"; // Handle arguments in deterministic order auto memo = matcher.GetMemo(); auto find = [&memo](const DFPattern& pat) -> tvm::relay::Expr { if (memo.count(pat)) return memo.at(pat)[0]; return {}; }; ArgPacker arg_holder(ext_attrs, args); arg_holder.Put(find(src)); arg_holder.Put(find(wgh)); arg_holder.Put(find(bias), "bias_idx"); arg_holder.Put(find(sum_src), "sum_idx"); arg_holder.Put(find(o_scl), "o_scl_idx"); arg_holder.Put(find(act_scl), "act_scl_idx"); arg_holder.Put(find(sum_scl), "sum_scl_idx"); arg_holder.Put(find(dst_zp), "dst_zp_idx"); // Activation. Default clip to simulate relu via uint8 cast std::vector<std::string> clip_attr{"clip"}; auto act_scl_val = memo.count(act_scl) ? find(act_scl) : constant(1.0); clip_attr.push_back(std::to_string(arg_holder.Put(act_scl_val))); // act_scale clip_attr.push_back(std::to_string(arg_holder.Put(constant(0.0)))); // alpha clip_attr.push_back(std::to_string(arg_holder.Put(constant(255.0)))); // beta (*ext_attrs)["activation"] = dmlc_attr(clip_attr); return memo.at(dns)[0].as<CallNode>(); } /*! * Parse composite function and return real args, additional attributes and root call node * @param comp_fn composite function to parse * @param ext_attrs attr collection with additional attributes * @param args real arguments of node * @return root call node */ const tvm::relay::CallNode* ParseComposite(const tvm::relay::FunctionNode& comp_fn, std::unordered_map<std::string, dmlc::any>* ext_attrs, std::vector<tvm::relay::Expr>* args) { auto comp = comp_fn.GetAttr<tvm::String>(tvm::relay::attr::kComposite); ICHECK(comp.defined()) << "Tachikoma JSON runtime only supports composite functions."; auto name = comp.value(); const tvm::relay::CallNode* res = nullptr; if (name == "tachikoma.qnn.conv2d") res = ParseQnnConvComp(comp_fn, ext_attrs, args); else if (name == "tachikoma.qnn.dense") res = ParseQnnDenseComp(comp_fn, ext_attrs, args); return res; } #endif // TVM_RELAY_BACKEND_CONTRIB_TACHIKOMA_COMP_OP_MATCHER_H_
https://github.com/zk-ml/tachikoma
src/relay/backend/contrib/tensorrt/codegen.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/backend/contrib/tensorrt/codegen.h * \brief The 'custom' compilation pass for TensorRT (invoked by the RelayToTIRTargetHook pass). */ #ifndef TVM_RELAY_BACKEND_CONTRIB_TENSORRT_CODEGEN_H_ #define TVM_RELAY_BACKEND_CONTRIB_TENSORRT_CODEGEN_H_ #include <tvm/ir/transform.h> namespace tvm { namespace relay { namespace contrib { namespace tensorrt { /*! * \brief Returns the pass which replaces all calls to "Primitive" functions with a "Compiler" * attribute of "tensorrt" with calls to an extern which is implemented by a \p TensorRTRuntime * runtime module added to the IRModule's "external_mods" attribute. */ transform::Pass CompileForTensorRT(); } // namespace tensorrt } // namespace contrib } // namespace relay } // namespace tvm #endif // TVM_RELAY_BACKEND_CONTRIB_TENSORRT_CODEGEN_H_
https://github.com/zk-ml/tachikoma
src/relay/backend/liveness_analysis.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/backend/liveness_analysis.h * \brief Analysis that collects the live variables before and after each node. * NOTE: the input IR should be in ANF. */ #ifndef TVM_RELAY_BACKEND_LIVENESS_ANALYSIS_H_ #define TVM_RELAY_BACKEND_LIVENESS_ANALYSIS_H_ #include <tvm/relay/transform.h> #include <unordered_map> #include <unordered_set> #include <vector> #include "../../support/arena.h" #include "../op/memory/device_copy.h" #include "../transforms/device_aware_visitors.h" #include "../transforms/let_list.h" namespace tvm { namespace relay { namespace transform { using support::Arena; using VarSet = std::unordered_set<Var, ObjectPtrHash, ObjectPtrEqual>; // TODO(@altanh, @mbs, @mbrookhart): we should do a survey of all "*-flow graphs" in the codebase // to see what can be deduplicated. // TODO(@altanh): support Relay Refs once/if they are supported by the VM. /*! * \brief A representation of an input expression (typically a Function) as a directed graph of * basic blocks, with edges between basic blocks corresponding to control flow branching. */ class ControlFlowGraph { public: struct Node; struct BasicBlock; using NodePtr = Node*; using BasicBlockPtr = BasicBlock*; /*! * \brief A chunk of IR that does not have any control flow branching. At this stage in the IR, * basic blocks correspond to: * (1) a sequence of nested Let expressions, where each node in the block corresponds to a * binding and the last node is either the (non-Let) body or a binding that branches * (e.g. "let %x = if (%c) { true_block } else { false_block }"). * (2) an atomic expression representing the target expression of a control flow branch, e.g. * %v and %u in "let %x = if (%c) { %v } else { %u }". */ struct BasicBlock { // The nodes of the basic block. std::vector<NodePtr> nodes; // The predecessor basic blocks. std::vector<BasicBlockPtr> pred; // The successor basic blocks. std::vector<BasicBlockPtr> succ; static BasicBlockPtr Make(support::Arena* arena) { return arena->make<BasicBlock>(); } }; /*! * \brief Roughly corresponds to a "statement" in the IR, such as an individual binding in a * basic block or the "return value" of a block. Each node maps to a single corresponding expr in * the IR, but the converse is not true (e.g. in the case of variables). */ struct Node { /*! \brief The basic block this node belongs to. */ BasicBlockPtr parent; /*! \brief The index into the parent basic block where this node is. */ size_t index; /*! \brief The expr this node corresponds to. */ Expr expr; /*! \brief Returns whether or not this node is the first one in the parent basic block. */ bool IsFirst() const { return index == 0; } /*! \brief Returns whether or not this node is the last one in the parent basic block. */ bool IsLast() const { return index == parent->nodes.size() - 1; } /*! \brief Returns the predecessor nodes of this node. */ std::vector<NodePtr> GetPred() const { std::vector<NodePtr> pred; if (IsFirst()) { for (const BasicBlockPtr& pred_block : parent->pred) { pred.push_back(pred_block->nodes.back()); } } else { pred.push_back(parent->nodes[index - 1]); } return pred; } /*! \brief Returns the successor nodes of this node. */ std::vector<NodePtr> GetSucc() const { std::vector<NodePtr> succ; if (IsLast()) { for (const BasicBlockPtr& succ_block : parent->succ) { succ.push_back(succ_block->nodes.front()); } } else { succ.push_back(parent->nodes[index + 1]); } return succ; } /*! \brief Creates a node with the given expr and appends it to the parent basic block. */ static NodePtr Make(Arena* arena, BasicBlockPtr parent, Expr expr) { NodePtr n = arena->make<Node>(); n->parent = parent; n->expr = expr; n->index = parent->nodes.size(); parent->nodes.push_back(n); return n; } }; /*! \brief The basic block where control flow begins. */ BasicBlockPtr entry; /*! * \brief Mapping from Let expressions to their corresponding nodes. Note that Let expressions * are never shared in ANF (unlike vars), so this is an injection. */ std::unordered_map<Expr, NodePtr, ObjectPtrHash, ObjectPtrEqual> let_map; /*! \brief The nodes of the CFG in reverse post order. */ std::vector<NodePtr> reverse_post_order; /*! \brief Creates and returns the CFG of the given expression. */ static ControlFlowGraph Create(Arena* arena, const Expr& body); private: class Creator; }; /*! \brief Helper class for building CFGs. */ class ControlFlowGraph::Creator : private ExprFunctor<void(const Expr&, BasicBlockPtr)> { public: Creator() {} ControlFlowGraph Create(Arena* arena, const Expr& body); private: /*! \brief The arena allocator. */ Arena* arena_; /*! \brief The CFG being built. */ ControlFlowGraph cfg_; /*! * \brief Whether or not we are in a function. CFGs do not support nested functions so this is * used to error out in such a case. */ bool in_func_ = false; /*! * \brief Link \p to as a successor block to \p from. */ void Succ(BasicBlockPtr from, BasicBlockPtr to); #define DEFAULT_CFG(OP) \ void VisitExpr_(const OP* op, BasicBlockPtr parent) final { \ NodePtr n = Node::Make(arena_, parent, GetRef<Expr>(op)); \ cfg_.reverse_post_order.push_back(n); \ } void VisitExpr_(const FunctionNode* f, BasicBlockPtr parent) final; void VisitExpr_(const LetNode* let_node, BasicBlockPtr parent) final; void VisitExpr_(const IfNode* if_node, BasicBlockPtr parent); void VisitExpr_(const MatchNode* match_node, BasicBlockPtr parent); DEFAULT_CFG(VarNode); DEFAULT_CFG(GlobalVarNode); DEFAULT_CFG(ConstantNode); DEFAULT_CFG(CallNode); DEFAULT_CFG(OpNode); DEFAULT_CFG(TupleNode); DEFAULT_CFG(TupleGetItemNode); }; /*! * \brief Helper class for collecting the variables used/read by an expression. NOTE: for If exprs, * only the condition is included (not the branches). Similarly, for Match exprs only the value * being deconstructed is included. */ class VarUseCollector : public ExprFunctor<VarSet(const Expr& e)> { public: VarSet VisitExpr_(const VarNode* var_node); VarSet VisitExpr_(const CallNode* call_node); VarSet VisitExpr_(const TupleNode* tuple_node); VarSet VisitExpr_(const TupleGetItemNode* get_node); VarSet VisitExpr_(const IfNode* if_node); VarSet VisitExpr_(const MatchNode* match_node); VarSet VisitExpr_(const ConstructorNode* cons_node) { return {}; } VarSet VisitExpr_(const GlobalVarNode* gvar_node) { return {}; } VarSet VisitExpr_(const ConstantNode* const_node) { return {}; } VarSet VisitExpr_(const OpNode* op_node) { return {}; } VarSet VisitExpr_(const FunctionNode* func_node) { return {}; } }; /*! * \brief Analysis that collects the variables used and defined at each node. */ struct UseDefAnalysis { using CFG = ControlFlowGraph; /*! \brief Mapping of node -> variables used/read by node. */ std::unordered_map<CFG::NodePtr, VarSet> use; /*! \brief Mapping of node -> variable defined/written by node. */ std::unordered_map<CFG::NodePtr, Var> def; VarUseCollector use_collector; static UseDefAnalysis Analyze(const CFG& cfg); }; /*! \brief Returns whether \p a and \p b are the same set of vars. */ bool SetEqual(const VarSet& a, const VarSet& b); /*! * \brief Analysis that collects the live variables before and after each node. */ struct LivenessAnalysis { using CFG = ControlFlowGraph; /*! \brief Mapping of node -> set of variables live before node. */ std::unordered_map<CFG::NodePtr, VarSet> live_in; /*! \brief Mapping of node -> set of variables live after node. */ std::unordered_map<CFG::NodePtr, VarSet> live_out; /*! * \brief Analyze the input \p cfg (using info from \p use_def). * * \param cfg The input control flow graph. * \param use_def Use-def analysis of \p cfg. * \return LivenessAnalysis */ static LivenessAnalysis Analyze(const ControlFlowGraph& cfg, const UseDefAnalysis& use_def); }; } // namespace transform } // namespace relay } // namespace tvm #endif // TVM_RELAY_BACKEND_LIVENESS_ANALYSIS_H_
https://github.com/zk-ml/tachikoma
src/relay/backend/name_transforms.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file relay/backend/name_transforms.h * \brief Transformations which are applied on names to generate appropriately named compiler * artifacts * * Example: * ToCFunctionStyle(PrefixName(CombineNames({"Device", "target", "Invoke"}))) * // TVMDeviceTargetInvoke * * ToCFunctionStyle(PrefixGeneratedName(CombineNames({"model", "Run"}))) * // TVMGenModelRun * * ToCVariableStyle(PrefixName(CombineNames({"Device", "target", "t"}))) * // tvm_device_target_t * * ToCVariableStyle(PrefixGeneratedName(CombineNames({"model", "Devices"}))) * // tvmgen_model_devices * * ToCConstantStyle(PrefixGeneratedName(CombineNames({"model", "Devices"}))) * // TVMGEN_MODEL_DEVICES * */ #include <tvm/runtime/container/array.h> #include <tvm/runtime/container/string.h> #include <tvm/runtime/logging.h> #include <algorithm> #include <iostream> #include <string> #ifndef TVM_RELAY_BACKEND_NAME_TRANSFORMS_H_ #define TVM_RELAY_BACKEND_NAME_TRANSFORMS_H_ namespace tvm { namespace relay { namespace backend { /*! * \brief Transform a name to the C variable style assuming it is * appropriately constructed using the prefixing functions * \param original_name Original name * \return Transformed function in the C function style */ std::string ToCFunctionStyle(const std::string& original_name); /*! * \brief Transform a name to the C variable style assuming it is * appropriately constructed using the prefixing functions * \param name Original name * \return Transformed function in the C variable style */ std::string ToCVariableStyle(const std::string& original_name); /*! * \brief Transform a name to the C constant style assuming it is * appropriately constructed using the prefixing functions * \param name Original name * \return Transformed function in the C constant style */ std::string ToCConstantStyle(const std::string& original_name); /*! * \brief Combine names together for use as a generated name * \param names Vector of strings to combine * \return Combined together names */ std::string CombineNames(const Array<String>& names); /*! * \brief Apply TVM-specific prefix to a name * \param names Vector of names to combine to form a combined name * \return Name with prefix applied or prefix-only if no name passed */ inline std::string PrefixName(const Array<String>& names) { return "TVM_" + CombineNames(names); } /*! * \brief Apply generated TVM-specific prefix to a name * \param names Vector of names to combine to form a combined name * \return Name with prefix applied or prefix-only if no name passed */ inline std::string PrefixGeneratedName(const Array<String>& names) { return "TVMGen_" + CombineNames(names); } } // namespace backend } // namespace relay } // namespace tvm #endif // TVM_RELAY_BACKEND_NAME_TRANSFORMS_H_
https://github.com/zk-ml/tachikoma
src/relay/backend/param_dict.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file param_dict.h * \brief Definitions for serializing and deserializing parameter dictionaries. */ #ifndef TVM_RELAY_BACKEND_PARAM_DICT_H_ #define TVM_RELAY_BACKEND_PARAM_DICT_H_ #include <tvm/node/node.h> #include <tvm/runtime/ndarray.h> #include <tvm/runtime/packed_func.h> #include <tvm/tir/expr.h> #include <string> namespace tvm { namespace relay {} // namespace relay } // namespace tvm #endif // TVM_RELAY_BACKEND_PARAM_DICT_H_
https://github.com/zk-ml/tachikoma
src/relay/backend/te_compiler.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file relay/backend/te_compiler.h * \brief Internal compilation layer which lowers Relay "primitive functions" to TIR PrimFns. * * * This represents the new design of the Relay compilation flow and will replace the interface * contained in compile_engine.h as we migrate towards a standard pass based lowering of * Relay functions. * * This files provides an internal API which lowers Relay programs to components which * can be combined with TVM produced kernels to compile an entire program. * * The result of lowering contains a combination of `runtime::Module`s produced by external * compilers and a set of lowered PrimFns which can be code generated for targets. */ #ifndef TVM_RELAY_BACKEND_TE_COMPILER_H_ #define TVM_RELAY_BACKEND_TE_COMPILER_H_ #include <tvm/node/structural_equal.h> #include <tvm/node/structural_hash.h> #include <tvm/relay/analysis.h> #include <tvm/relay/attrs/memory.h> #include <tvm/relay/expr.h> #include <tvm/relay/op_strategy.h> #include <tvm/relay/transform.h> #include <tvm/runtime/module.h> #include <tvm/topi/elemwise.h> #include <functional> #include <string> #include <unordered_map> #include "../transforms/infer_layout_utils.h" #include "../transforms/pass_utils.h" #include "./te_compiler_cache.h" #include "./utils.h" namespace tvm { namespace relay { namespace tec { using ProcessFn = std::function<void(BaseFunc)>; /*! * \brief A compiler which lowers primitive Relay functions to tensor expressions * and schedules them into TIR functions. */ class TECompilerNode : public Object { public: /*! \brief destructor */ virtual ~TECompilerNode() {} /*! * \brief Get lowered result. * \param key The key to the cached function. * \return The result. */ virtual CachedFunc Lower(const CCacheKey& key) = 0; /*! * \brief Get lowered result. * \param key The key to the cached function. * \return The result. */ virtual CachedFunc Lower(const CCacheKey& key, const String mod_name) = 0; /* Return all functions which have been lowered by the compiler in an IRModule, annotated with * their target. */ virtual IRModule GetLoweredFunctions() = 0; /*! * \brief Just in time compile to get a PackedFunc. * \param key The key to the cached function. * \return The result. */ virtual PackedFunc JIT(const CCacheKey& key) = 0; /*! * \brief Lower the shape function. * \param key The key to the cached function. * \return The result. */ virtual CachedFunc LowerShapeFunc(const CCacheKey& key) = 0; /*! * \brief Lower the external function using external codegen tools. * \return The runtime modules for each needed external codegen tool. */ virtual tvm::Array<tvm::runtime::Module> LowerExternalFunctions() = 0; /*! * \brief Update \p module to remove functions marked with the "Compiler" attribute and replace * them with their 'external' representation using the "ExternalSymbol" attribute. * * TODO(mbs): This is a stepping stone while we migrate to a more official representation * of 'external functions' in the IRModule and allow lowering to incrementally updatethe * module stead of forcing everything via the cache. * */ virtual void AddExterns(IRModule module) = 0; /*! * \brief Get C Device API context mapping * \return Map of GlobalVar to associated C Device API context name (either Target or kCompiler * annotated) */ virtual Map<GlobalVar, String> GetDeviceContexts() = 0; virtual void SetDeviceContexts(const Map<GlobalVar, String>& device_contexts) = 0; virtual Map<String, Integer> GetOpWeights() const = 0; /*! \brief clear the cache. */ virtual void Clear() = 0; void VisitAttrs(AttrVisitor*) {} static constexpr const char* _type_key = "relay.TECompiler"; TVM_DECLARE_FINAL_OBJECT_INFO(TECompilerNode, Object); }; /*! \brief cache entry used in compile engine */ class TECompiler : public ObjectRef { public: explicit TECompiler(Optional<IRModule> opt_mod = {}, Optional<String> mod_name = {}); explicit TECompiler(ObjectPtr<Object> n) : ObjectRef(n) {} TECompilerNode* operator->() { return static_cast<TECompilerNode*>(get_mutable()); } using ContainerType = TECompilerNode; TVM_DLL static TECompiler& Global(); }; /*! * \brief A function to create the function metadata for an input function (ie calculate buffer * input/output sizes) * \param func The function to calculate function metadata for * \param function_metadata The map that stores all the function metadatas * \param workspace_byte_alignment Byte alignment for allocations */ void UpdateFunctionMetadata(BaseFunc relay_func, Map<String, backend::FunctionInfo>& function_metadata, // NOLINT(*) Integer workspace_byte_alignment = 16); /*! * \brief Update the "main" control function's metadata * * \param mod The module * \param config All the available targets. * \return function_infos Function info for each function in the module */ backend::FunctionInfo UpdateMainWorkspaceSize(const IRModule& mod, const CompilationConfig& config, Map<Expr, backend::StorageInfo> storage_info_map); /*! \brief Returns all the global \p PrimFunc functions in \p mod, but separated into an \p IRModule * per \p Target. * * \param mod The IRModule to extract the per target module from * \return The map from Target to IRModule */ Map<Target, IRModule> GetPerTargetModules(IRModule mod); inline void DefaultProcessFn(BaseFunc) {} /*! * \brief Pass to lower an IRModule's primitive functions to TIR. * * This is the "back half" of the Relay compiler which lowers "primitive functions" * to TE expressions, schedules them, and emits PrimFuncs. * * \param module_name The name of this module, used as a prefix for generated globals. * \param config All available targets. * \param process_fn Callback allowing one-level up code generators to process * each function that we lower (default is no-op). * \returns The pass which lowers primitive functions to TIR */ transform::Pass LowerTE(String module_name, CompilationConfig config, ProcessFn process_fn = DefaultProcessFn); } // namespace tec } // namespace relay } // namespace tvm #endif // TVM_RELAY_BACKEND_TE_COMPILER_H_
https://github.com/zk-ml/tachikoma
src/relay/backend/te_compiler_cache.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file relay/backend/tec_compiler_cache.h * \brief Utilities for compiling tensor expressions inside of the Relay compiler. */ #ifndef TVM_RELAY_BACKEND_TE_COMPILER_CACHE_H_ #define TVM_RELAY_BACKEND_TE_COMPILER_CACHE_H_ #include <tvm/ir/name_supply.h> #include <tvm/node/structural_equal.h> #include <tvm/node/structural_hash.h> #include <tvm/relay/analysis.h> #include <tvm/relay/attrs/memory.h> #include <tvm/relay/expr.h> #include <tvm/relay/op_strategy.h> #include <tvm/relay/transform.h> #include <tvm/runtime/module.h> #include <tvm/topi/elemwise.h> #include <functional> #include <string> #include <tuple> #include <unordered_map> #include <utility> #include "../transforms/infer_layout_utils.h" namespace tvm { namespace relay { namespace tec { /*! \brief Indicate whether the data or shape or both of a parameter is used in the shape func. */ enum ShapeFuncParamState { kNoNeed = 0, kNeedInputData = 1, kNeedInputShape = 2, kNeedBoth = 3, }; struct LoweredOutputNode : public Object { /*! \brief The outputs to the function */ tvm::Array<te::Tensor> outputs; /*! \brief The implementation used to compute the output */ OpImplementation implementation; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("outputs", &outputs); v->Visit("implementation", &implementation); } static constexpr const char* _type_key = "relay.LoweredOutput"; TVM_DECLARE_FINAL_OBJECT_INFO(LoweredOutputNode, Object); }; class LoweredOutput : public ObjectRef { public: TVM_DLL LoweredOutput(tvm::Array<te::Tensor> outputs, OpImplementation impl); TVM_DEFINE_OBJECT_REF_METHODS(LoweredOutput, ObjectRef, LoweredOutputNode); }; class CCacheKey; /*! \brief Compile cache key */ class CCacheKeyNode : public Object { public: /*! \brief The source function to be lowered. */ Function source_func; /*! \brief The hardware target.*/ Target target; /*! \brief The virtual device constrains.*/ VirtualDevice virtual_device; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("source_func", &source_func); v->Visit("target", &target); v->Visit("virtual_device", &virtual_device); } /*! \return The hash value of CCacheKey. */ inline size_t Hash() const; /*! * \brief check content equality * \param other The other value. * \return The result of equality check. */ inline bool Equal(const CCacheKeyNode* other) const; static constexpr const char* _type_key = "relay.CCacheKey"; TVM_DECLARE_FINAL_OBJECT_INFO(CCacheKeyNode, tvm::Object); private: /*! * \brief internal cached hash value. */ mutable size_t hash_{0}; }; /*! \brief cache entry used in compile engine */ class CCacheKey : public ObjectRef { public: CCacheKey() {} explicit CCacheKey(ObjectPtr<Object> n) : ObjectRef(n) {} /*! * \brief The constructor * \param source_func The source function. * \param target The target device. */ TVM_DLL CCacheKey(Function source_func, Target target, VirtualDevice virtual_device = VirtualDevice::FullyUnconstrained()); const CCacheKeyNode* operator->() const { return static_cast<const CCacheKeyNode*>(get()); } // comparator inline bool operator==(const CCacheKey& other) const { ICHECK(defined() && other.defined()); return (*this)->Equal(other.operator->()); } using ContainerType = CCacheKeyNode; }; /*! \brief Node container to represent a cached function. */ struct CachedFuncNode : public Object { /*! \brief compiled target */ tvm::Target target; /*! \brief Primitive Function Name */ GlobalVar prim_fn_var; /*! \brief The inputs to the function */ tvm::Array<te::Tensor> inputs; /*! \brief The outputs to the function */ tvm::Array<te::Tensor> outputs; /*! \brief The schedule to the function */ te::Schedule schedule; /*! \brief The TIR function if lowering in the meta schedule path */ Optional<tir::PrimFunc> prim_func; /*! \brief Parameter usage states in the shape function. */ tvm::Array<Integer> shape_func_param_states; /*! \brief The lowered functions to support the function. */ IRModule funcs = IRModule(Map<GlobalVar, BaseFunc>({})); std::unordered_map<const ConstantNode*, te::Tensor> constant_tensors; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("target", &target); v->Visit("prim_fn_var", &prim_fn_var); v->Visit("inputs", &inputs); v->Visit("outputs", &outputs); v->Visit("schedule", &schedule); v->Visit("prim_func", &prim_func); v->Visit("funcs", &funcs); v->Visit("shape_func_param_states", &shape_func_param_states); } static constexpr const char* _type_key = "relay.CachedFunc"; TVM_DECLARE_FINAL_OBJECT_INFO(CachedFuncNode, Object); }; class CachedFunc : public ObjectRef { public: CachedFunc(tvm::Target target, GlobalVar prim_fn_name, tvm::Array<te::Tensor> inputs, tvm::Array<te::Tensor> outputs, te::Schedule schedule, tir::PrimFunc prim_func, tvm::Array<Integer> shape_func_param_states, IRModule funcs = IRModule(Map<GlobalVar, BaseFunc>({})), std::unordered_map<const ConstantNode*, te::Tensor> constant_tensors = {}); public: TVM_DEFINE_OBJECT_REF_METHODS(CachedFunc, ObjectRef, CachedFuncNode); }; /*! \brief Node container for compile cache. */ class CCacheValueNode : public Object { public: /*! \brief The corresponding function */ CachedFunc cached_func; /*! \brief Result of Packed function generated by JIT */ PackedFunc packed_func; /*! \brief usage statistics */ int use_count{0}; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("cached_func", &cached_func); v->Visit("use_count", &use_count); } static constexpr const char* _type_key = "relay.CCacheValue"; TVM_DECLARE_FINAL_OBJECT_INFO(CCacheValueNode, tvm::Object); }; /*! \brief cache entry used in compile engine */ class CCacheValue : public ObjectRef { public: CCacheValue() {} explicit CCacheValue(ObjectPtr<Object> n) : ObjectRef(n) {} CCacheValueNode* operator->() { return static_cast<CCacheValueNode*>(get_mutable()); } const CCacheValueNode* operator->() const { return static_cast<const CCacheValueNode*>(get()); } using ContainerType = CCacheValueNode; }; Array<IndexExpr> GetShape(const Array<IndexExpr>& shape); /*! * \brief Lowers Relay primitive Function to TE Compute * \param source_func The primitive function to be lowered. * \param target The target we want to create schedule for. * \param return_inputs If true, prepend input tensors to the output array of tensors. * \return Tuple of the lowered TE compute, constant raw data, and fused function name. */ std::tuple<Array<te::Tensor>, Array<runtime::NDArray>, std::string> LowerTECompute( const Function& source_func, Target target, bool return_inputs = true); /*! * \brief Create schedule for target. * \param source_func The primitive function to be lowered. * \param target The target we want to create schedule for. * \return Pair of schedule and cache. * The funcs field in cache is not yet populated. */ CachedFunc PrimFuncFor(const Function& source_func, const Target& target, GlobalVarSupply global_var_supply); CachedFunc ShapeFuncFor(const Function& prim_func, const Target& target, GlobalVarSupply global_var_supply); // implementations inline size_t CCacheKeyNode::Hash() const { if (hash_ != 0) return hash_; // do structral hash, avoid 0. hash_ = tvm::StructuralHash()(this->source_func); hash_ = dmlc::HashCombine(hash_, std::hash<std::string>()(target->str())); if (hash_ == 0) hash_ = 1; return hash_; } inline bool CCacheKeyNode::Equal(const CCacheKeyNode* other) const { if (Hash() != other->Hash()) return false; return this->target->str() == other->target->str() && this->virtual_device == other->virtual_device && tvm::StructuralEqual()(this->source_func, other->source_func); } } // namespace tec } // namespace relay } // namespace tvm namespace std { // overload hash template <> struct hash<::tvm::relay::tec::CCacheKey> { size_t operator()(const ::tvm::relay::tec::CCacheKey& key) const { ICHECK(key.defined()); return key->Hash(); } }; } // namespace std #endif // TVM_RELAY_BACKEND_TE_COMPILER_CACHE_H_
https://github.com/zk-ml/tachikoma
src/relay/backend/token_allocator.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file relay/backend/token_allocator.h * \brief Token allocation classes for backend */ #ifndef TVM_RELAY_BACKEND_TOKEN_ALLOCATOR_H_ #define TVM_RELAY_BACKEND_TOKEN_ALLOCATOR_H_ #include <tvm/relay/type.h> #include <tvm/target/virtual_device.h> #include <map> #include <string> #include <unordered_map> #include <unordered_set> #include <vector> #include "../../runtime/texture.h" namespace tvm { namespace relay { /*! A representation of a block of memory required at runtime on some device. */ struct StorageToken { /*! \brief Reference counter */ int ref_counter{0}; /*! \brief number of bytes */ size_t max_bytes{0}; /*! \brief The corresponding tensor type. */ TensorType ttype{nullptr}; /*! \brief VirtualDevice on which the memory will reside. */ VirtualDevice virtual_device = VirtualDevice::FullyUnconstrained(); /*! \brief The storage id */ int64_t storage_id{-1}; bool is_valid() const { return !virtual_device->IsFullyUnconstrained(); } bool is_compatible(const StorageToken& that) const { return virtual_device == that.virtual_device; } std::string ToString() const { std::ostringstream os; os << "{storage_id: " << storage_id << ", max_bytes: " << max_bytes << ", ttype: " << PrettyPrint(ttype) << ", virtual_device: " << virtual_device << "}"; return os.str(); } }; /** * @brief Memory manager for flattened 1d memory (buffers) */ class TokenAllocator1D { public: /*! * \brief ceil(size/word_size) to get number of words. * \param size The original size. * \param word_size The element size. */ static size_t DivRoundUp(size_t size, size_t word_size) { return (size + word_size - 1) / word_size; } /*! * \brief Get the memory requirement. * \param prototype The prototype token. * \return The required memory size. * * TODO(mbs): Gf GetMemorySizeBytes in aot_executor_codegen.cc, * CalculateRelayExprSizeBytes in utils.cc */ size_t GetMemorySize(StorageToken* prototype); /*! * \brief Request a storage token for a given prototype. * \param prototype. The prototype storage token. * \return The result token. */ StorageToken* Request(StorageToken* prototype); /*! * \brief Alloacte a storage token by consuming prototype * \param prototype The prototype token. * \param size The size of memory being requested. */ StorageToken* Alloc(StorageToken* prototype, int64_t storage_id); /*! * \brief Check if we can release token. * \param tok The token to be released. */ void CheckForRelease(StorageToken* tok); private: // scale used for rough match const size_t match_range_{16}; // free list of storage entry std::multimap<size_t, StorageToken*> free_; // all the storage resources available std::vector<StorageToken*> data_; }; /** * @brief Memory manager for 2d memory (textures) */ class TokenAllocator2D { public: /*! * \brief Request a storage token for a given prototype. * \param prototype. The prototype storage token. * \return The result token. */ StorageToken* Request(StorageToken* prototype); /*! * \brief Alloacte a storage token by consuming prototype * \param prototype The prototype token. * \param size The size of memory being requested. */ StorageToken* Alloc(StorageToken* prototype, int64_t storage_id); /*! * \brief Check if we can release token. * \param tok The token to be released. */ void CheckForRelease(StorageToken* tok); /*! * \brief Get the texture 2d size requirement * \param prototype The prototype token. * \return The required texture 2d memory size in (width, height, channel). */ runtime::Texture2DShape<int64_t> GetSize2D(StorageToken* prototype); protected: struct MemBlock { StorageToken* token_; int64_t x_; int64_t y_; }; std::unordered_map<int64_t, MemBlock> blocks_; std::unordered_set<int64_t> free_list_; }; } // namespace relay } // namespace tvm #endif // TVM_RELAY_BACKEND_TOKEN_ALLOCATOR_H_
https://github.com/zk-ml/tachikoma
src/relay/backend/utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file relay/backend/utils.h * \brief Utils function for backend */ #ifndef TVM_RELAY_BACKEND_UTILS_H_ #define TVM_RELAY_BACKEND_UTILS_H_ #include <dmlc/json.h> #include <tvm/driver/driver_api.h> #include <tvm/relay/executor.h> #include <tvm/relay/expr.h> #include <tvm/relay/expr_functor.h> #include <tvm/relay/transform.h> #include <tvm/relay/type.h> #include <tvm/target/codegen.h> #include <tvm/target/virtual_device.h> #include <tvm/te/operation.h> #include <tvm/tir/usmp/utils.h> #include <iostream> #include <sstream> #include <string> #include <typeinfo> #include <unordered_map> #include <unordered_set> #include <utility> #include <vector> #include "../../runtime/meta_data.h" #include "../../target/metadata.h" #include "tvm/runtime/ndarray.h" namespace tvm { namespace relay { namespace tec { class TECompiler; } namespace backend { using Pass = tvm::transform::Pass; /*! \brief Describes the type of kernel call emitted. */ enum CallType { /*! * \brief Emit PackedFunc calls bound just-in-time using TVMBackend* functions. * * When this type is selected, assumes all operators must be called via TVMFuncCall. Given the * implementation of TVMFuncCall in the C++ runtime, this in practice implies that those * functions are of type TVMBackendPackedCFunc. * * The following code is emitted at call sites to call a function named `func`: * void* func_ptr = TVMBackendGetFuncFromEnv("func"); * TVMFuncCall(func_ptr, values, tcodes, num_args, ret_values, ret_tcodes) * * The arguments given to the tir::Call node are encoded into `values`, `tcodes`, and `num_args` * by LowerTVMBuiltin TIR transform. * * If `resource_handle` is passed to `func`, it is determined by TVMFuncCall (often, * `resource_handle` is registered with the C++ runtime to provide a `this` equivalent when * `func` is implemented in C). * * Compatible with both C++ and C runtimes, implemented with the C runtime only. */ kPacked, // Emit tir.call_packed and wrap all arguments in DLTensor. /*! * \brief Directly call a TVMBackendPackedCFunc named according to the tir::Call. * * When this type is selected, assumes all operators are implemented in functions of type * `TVMBackendPackedCFunc` and should be called directly. That is, presumes at the time of * downstream compilation that there is a symbol named after the 0th arg to tir::Call of * type `TVMBackendPackedCFunc`. This situation should occur when target_host == target. * * The following code is emitted at call sites to call a function named `func`: * func(values, tcodes, num_args, ret_values, ret_tcodes, resource_handle) * * The arguments given to the tir::Call node are encoded into `values`, `tcodes`, and `num_args` * by LowerTVMBuiltin TIR transform. * * `resource_handle` is encoded as the final argument to the tir::Call node. In practice, it is * always the device context parameter when not null. At present, the implementation does not * support forwarding device context parameters to CPacked. * * Compatible with the C runtime and C++ runtime (so long as target_host == target). Implemented * in the same scenarios. */ kCPacked, // Emit tir.call_cpacked and wrap all arguments in DLTensor. /*! \brief Directly call a function accepting the `data` arrays as args. * * When this type is selected, assumes all operaotrs are implemented in C functions whose * arguments are 1-to-1 with those in the tir::Call. DLTensor arguments are encoded as just the * `data` parameters (i.e. no DLTensor object is passed along). * * The following code is emitted at call sites to a function named `func`: * func(void* arg0, void* arg1, ..., void* argN) // no resource_handle * -or- * func(void* arg0, void* arg1, ..., void* argN, void* resource_handle) // with resource_handle * * `resource_handle` is encoded as the final argument to the tir::Call node. In practice, it is * always the device context parameter when not null. * * Compatible with the C runtime and C++ runtime (so long as target_host == target). Implemented * with the C runtime only. */ kUnpacked, // Emit tir.call_extern passing only the `data` part of DLTensors. }; /*! * \brief Structure that can be optionally used by the executor codegen */ class ExecutorCodegenMetadataNode : public Object { public: /*! \brief input information for the main function */ Array<tir::Var> inputs; /*! \brief input tensor type information */ Array<TensorType> input_tensor_types; /*! \brief output information for the main function */ Array<String> outputs; /*! \brief output tensor type information */ Array<TensorType> output_tensor_types; /*! \brief pool information for the main function */ Array<tir::Var> pools; /*! \brief device contexts information for the main function */ Array<String> devices; /*! \brief the executor to be used to run the model */ String executor = runtime::kTvmExecutorGraph; /*! \brief The external API (packed or c) in use */ String interface_api; /*! \brief The internal API (packed or unpacked) in use */ bool unpacked_api; /*! \brief Alginment of the workspace in bytes */ Integer workspace_alignment; /*! \brief Alginment of the constants in bytes */ Integer constant_alignment; /*! \brief the input var names that correspond to pool_inputs */ Optional<Map<tir::Var, tir::usmp::AllocatedPoolInfo>> pool_inputs; /*! \brief the I/O tensor to PoolAllocations if any*/ Map<String, tir::usmp::PoolAllocation> io_pool_allocations; String mod_name = ""; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("inputs", &inputs); v->Visit("input_tensor_types", &input_tensor_types); v->Visit("outputs", &outputs); v->Visit("output_tensor_types", &output_tensor_types); v->Visit("pools", &pools); v->Visit("devices", &devices); v->Visit("executor", &executor); v->Visit("interface_api", &interface_api); v->Visit("unpacked_api", &unpacked_api); v->Visit("workspace_alignment", &workspace_alignment); v->Visit("constant_alignment", &constant_alignment); v->Visit("pool_inputs", &pool_inputs); v->Visit("io_pool_allocations", &io_pool_allocations); v->Visit("mod_name", &mod_name); } static constexpr const char* _type_key = "MetadataObj"; TVM_DECLARE_FINAL_OBJECT_INFO(ExecutorCodegenMetadataNode, Object); }; /*! * \brief Managed reference to ExecutorCodegenMetadataNode. */ class ExecutorCodegenMetadata : public ObjectRef { public: TVM_DLL ExecutorCodegenMetadata(Array<tir::Var> inputs, Array<TensorType> input_tensor_types, Array<String> outputs, Array<TensorType> output_tensor_types, Array<tir::Var> pools, Array<String> devices, String executor, String mod_name, String interface_api = "packed", bool unpacked_api = false, Integer workspace_alignment = 16, Integer constant_alignment = 16, Map<tir::Var, tir::usmp::AllocatedPoolInfo> pool_inputs = Map<tir::Var, tir::usmp::AllocatedPoolInfo>(), Map<String, tir::usmp::PoolAllocation> io_pool_allocations = {}); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(ExecutorCodegenMetadata, ObjectRef, ExecutorCodegenMetadataNode); }; /*! * \brief The static storage information for each Tensor in the result of a Relay expression * (as per relay::FlattenTupleType). */ class StorageInfoNode : public Object { public: // TODO(mbs): Switch from struct-of-array to array-of-struct repr throughout. /*! \brief The set of storage ids where the expression is stored. */ std::vector<int64_t> storage_ids; /* \brief The virtual devices these expressions are stored within. */ std::vector<VirtualDevice> virtual_devices; /* \brief The sizes of each storage element, in bytes. */ std::vector<int64_t> storage_sizes_in_bytes; // TODO(@jroesch): expose the fields void VisitAttrs(AttrVisitor* v) {} static constexpr const char* _type_key = "relay.StorageInfo"; TVM_DECLARE_FINAL_OBJECT_INFO(StorageInfoNode, Object); }; /*! \brief The storage information for a single expression. */ class StorageInfo : public ObjectRef { public: StorageInfo(std::vector<int64_t> storage_ids, std::vector<VirtualDevice> virtual_devices, std::vector<int64_t> storage_sizes_in_bytes); TVM_DEFINE_OBJECT_REF_METHODS(StorageInfo, ObjectRef, StorageInfoNode); }; /*! * \brief The result of static memory planning. */ class StaticMemoryPlanNode : public Object { public: Map<Expr, StorageInfo> expr_to_storage_info; void VisitAttrs(AttrVisitor* v) { v->Visit("expr_to_storage_info", &expr_to_storage_info); } static constexpr const char* _type_key = "relay.StaticMemoryPlan"; TVM_DECLARE_FINAL_OBJECT_INFO(StaticMemoryPlanNode, Object); }; /*! \brief The result of running static memory planning. */ class StaticMemoryPlan : public ObjectRef { public: explicit StaticMemoryPlan(Map<Expr, StorageInfo> expr_to_storage_info); TVM_DEFINE_OBJECT_REF_METHODS(StaticMemoryPlan, ObjectRef, StaticMemoryPlanNode); }; struct FunctionInfoNode : public Object { Map<Target, Integer> workspace_sizes; Map<Target, Integer> io_sizes; Map<Target, Integer> constant_sizes; Map<Target, tir::PrimFunc> tir_primfuncs; Map<Target, Function> relay_primfuncs; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("workspace_sizes", &workspace_sizes); v->Visit("io_sizes", &io_sizes); v->Visit("constant_sizes", &constant_sizes); v->Visit("tir_primfuncs", &tir_primfuncs); v->Visit("relay_primfuncs", &relay_primfuncs); } static constexpr const char* _type_key = "relay.backend.FunctionInfo"; TVM_DECLARE_FINAL_OBJECT_INFO(FunctionInfoNode, Object); }; class FunctionInfo : public ObjectRef { public: FunctionInfo(Map<Target, Integer> workspace_sizes, Map<Target, Integer> io_sizes, Map<Target, Integer> constant_sizes, Map<Target, tir::PrimFunc> tir_primfuncs, Map<Target, Function> relay_primfuncs); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(FunctionInfo, ObjectRef, FunctionInfoNode); }; /*! * \brief Calculate the bytes of memory needed to hold a tensor of a given shape and data type. * \param shape The shape of the tensor * \param dtype The data type of the tensor */ size_t GetMemorySizeBytes(const Array<PrimExpr>& shape, const DataType& dtype); /*! * \brief Calculate the storage required to store the type of relay.Expr * * \param func The relay expr for which the storage is calculated */ int64_t CalculateRelayExprSizeBytes(const Type& expr_type); /*! * \brief Executor generator artifacts. Those artifacts are subsequently * used by the relay build process. */ struct LoweredOutput { std::string graph_json; Map<Target, IRModule> lowered_funcs; Array<tvm::runtime::Module> external_mods; Map<String, FunctionInfo> function_metadata; /*! * \brief Map from constant names (allocated by the codegen as constants are encountered) * to the constant's value. */ std::unordered_map<std::string, tvm::runtime::NDArray> params; ExecutorCodegenMetadata metadata; }; /*! * \brief This class is needed to avoid a GCC 5 bug that prevents maps containing enums from being compiled. If i386 GCC version is increased, we can remove it. */ struct EnumClassHash { template <typename T> std::size_t operator()(T t) const { return static_cast<std::size_t>(t); } }; /*! * \brief A helper to expand the params by adding the ones used in a given expression. */ struct ConstantUpdater : public ExprVisitor { public: ConstantUpdater(const std::string& symbol, std::unordered_map<std::string, runtime::NDArray>* params) : symbol_(symbol), params_(params) {} void VisitExpr_(const ConstantNode* cn) final { std::string name = symbol_ + "_const_" + std::to_string(const_idx_++); VLOG(1) << "binding '" << name << "' to constant of type " << PrettyPrint(cn->checked_type()); (*params_)[name] = cn->data; } private: int const_idx_{0}; std::string symbol_; std::unordered_map<std::string, runtime::NDArray>* params_; }; /*! * \brief A function to update the params with constants found in an external function. * \param func The function from which to get the constant params. * \param params The params to update with the constants. */ inline void UpdateConstants(BaseFunc func, std::unordered_map<std::string, runtime::NDArray>* params) { VLOG_CONTEXT << "UpdateConstants"; VLOG(1) << "updating constants for:" << std::endl << PrettyPrint(func); auto codegen = func->GetAttr<String>(attr::kCompiler); ICHECK(codegen.defined()) << "No external codegen is set"; std::string codegen_name = codegen.value(); const auto name_node = func->GetAttr<String>(tvm::attr::kGlobalSymbol); std::string symbol = std::string(name_node.value()); std::string const_update_name = "relay.ext." + codegen_name + ".constant_updater"; // Get the constant updater for the external codegen auto pf = tvm::runtime::Registry::Get(const_update_name); // If the backend hasn't registered a constant updater, use a default one if (pf == nullptr) { ConstantUpdater const_visit(symbol, params); const_visit(func); } else { Map<String, tvm::runtime::NDArray> constants = (*pf)(func, symbol); for (const auto& it : constants) { std::string const_name(it.first); // Constant names should begin this the compiler name (to avoid conflicts) ICHECK(const_name.find(codegen_name) == 0) << "External constant names must start with compiler name"; (*params)[const_name] = it.second; } } for (const auto& pair : *params) { VLOG(1) << "Constants: " << pair.first << " = " << PrettyPrint(pair.second); } } /*! * \brief A simple wrapper around ExprFunctor for a single argument case. * The result of visit is memoized. */ template <typename OutputType> class MemoizedExprTranslator : public ::tvm::relay::ExprFunctor<OutputType(const Expr&)> { using BaseFunctor = ::tvm::relay::ExprFunctor<OutputType(const Expr&)>; public: /*! \brief virtual destructor */ virtual ~MemoizedExprTranslator() {} /*! * \brief The memoized call. * \param n The expression node. * \return The result of the call */ virtual OutputType VisitExpr(const Expr& n) { ICHECK(n.defined()); auto it = memo_.find(n); if (it != memo_.end()) { return it->second; } auto res = BaseFunctor::VisitExpr(n); memo_[n] = res; return res; } protected: /*! \brief Internal map used for memoization. */ std::unordered_map<Expr, OutputType, ObjectPtrHash, ObjectPtrEqual> memo_; }; /*! * \brief Get the Packed Func * * \param func_name * \return const PackedFunc* */ inline const PackedFunc* GetPackedFunc(const std::string& func_name) { return tvm::runtime::Registry::Get(func_name); } /*! * \brief Get a typed packed function. * * \param func_name * \return const PackedFunc* */ template <typename R, typename... Args> inline const runtime::TypedPackedFunc<R(Args...)> GetTypedPackedFunc(const std::string& func_name) { auto* pf = GetPackedFunc(func_name); ICHECK(pf != nullptr) << "can not find packed function"; return runtime::TypedPackedFunc<R(Args...)>(*pf); } /*! * \brief Extract shape from an IndexExpr array to std::vector<int64_t> * * \param shape The shape in Array * \return The converted shape in std::vector<int64_t> */ inline std::vector<int64_t> GetIntShape(const Array<IndexExpr>& shape) { std::vector<int64_t> ret; for (const auto& dim : shape) { const int64_t* pval = tir::as_const_int(dim); ret.push_back(pval ? *pval : -1); } return ret; } /*! * \brief Convert type to string * * \param typ * \return std::string string format of type */ inline std::string DType2String(const tvm::DataType dtype) { std::ostringstream os; if (dtype.is_float()) { os << "float"; } else if (dtype.is_int()) { os << "int"; } else if (dtype.is_uint()) { os << "uint"; } else if (dtype.is_bfloat16()) { os << "bfloat"; } else if ((*GetPackedFunc("runtime._datatype_get_type_registered"))(dtype.code())) { os << "custom[" << (*GetPackedFunc("runtime._datatype_get_type_name"))(dtype.code()).operator std::string() << "]"; } else { LOG(FATAL) << "Unknown type with code " << static_cast<unsigned>(dtype.code()); } os << dtype.bits(); return os.str(); } /*! * \brief Bind params to function by using name * \param func Relay function * \param params params dict * \return relay::Function */ relay::Function BindParamsByName(relay::Function func, const std::unordered_map<std::string, runtime::NDArray>& params); /*! * \brief Bind params to the main function in Relay module, using BindParamsByName * \param mod Relay module * \param params params dict */ void BindParamsInModule(IRModule mod, const std::unordered_map<std::string, runtime::NDArray>& params); void BindParamsInModule(IRModule mod, Map<String, runtime::NDArray> params); /*! * \brief Extract the shape from a Relay tensor type. * \param type The provided type. * \return The extracted shape in a list. */ inline std::vector<int> GetShape(const Type& type) { const auto* ttype = type.as<TensorTypeNode>(); ICHECK(ttype) << "Expect TensorTypeNode"; std::vector<int> shape; for (size_t i = 0; i < ttype->shape.size(); ++i) { auto* val = ttype->shape[i].as<IntImmNode>(); ICHECK(val); shape.push_back(val->value); } return shape; } /*! * \brief Check if a call has the provided name. * \param call A Relay call node. * \param op_name The name of the expected call. * \return true if the call's name is equivalent to the given name. Otherwise, * false. */ inline bool IsOp(const CallNode* call, const std::string& op_name) { const auto* op_node = call->op.as<OpNode>(); ICHECK(op_node) << "Expects a single op."; Op op = GetRef<Op>(op_node); return op == Op::Get(op_name); } /*! * \brief Retrieve the "root" op nested inside a fused call, such as conv2d in relu(add(conv2d)) * \param call A Relay call node. Typically nn.relu when called the first time. * \param depth The number of calls before the root op, counting from current_call. * \param expected_op_names The names of ops in this fused call. Example: {"nn.conv2d", "add", * "nn.relu"} * \return A CallNode corresponding to the root op, whose name is expected_op_names[0] */ inline const CallNode* GetRootCall(const CallNode* current_call, int depth, const std::vector<std::string>& expected_op_names) { ICHECK(current_call && depth >= 0 && static_cast<size_t>(depth) < expected_op_names.size() && IsOp(current_call, expected_op_names[depth])); if (depth == 0) { return current_call; } ICHECK_GT(current_call->args.size(), 0); size_t valid_node_idx = 0; while (valid_node_idx < current_call->args.size() && current_call->args[valid_node_idx].as<VarNode>()) { valid_node_idx++; } while (valid_node_idx < current_call->args.size() && !(IsOp(current_call->args[valid_node_idx].as<CallNode>(), expected_op_names[depth - 1]))) { valid_node_idx++; } const auto* next_call = current_call->args[valid_node_idx].as<CallNode>(); return GetRootCall(next_call, depth - 1, expected_op_names); } /*! * \brief Retrieve the "root" op nested inside a fused call, such as conv2d in relu(add(conv2d)) * Unlike the previous definition, it does not verify operator names of intermediate nodes. Instead, * it recursively visit child nodes until it finds a call node with the given op_name. * \param call A Relay call node. * \param op_name The name of an op to look for, such as ""nn.conv2d". * \return A CallNode corresponding to the root op with the given op_name */ inline const CallNode* GetRootCall(const CallNode* current_call, const std::string& op_name) { if (current_call == nullptr) return nullptr; if (IsOp(current_call, op_name)) return current_call; ICHECK_GT(current_call->args.size(), 0); const auto* next_call = current_call->args[0].as<CallNode>(); return GetRootCall(next_call, op_name); } /*! * \brief Retrieve the expected "root" op nested inside a fused call, such as conv2d in * relu(add(conv2d)) * \param call A Relay call node. Typically nn.relu when called the first time. * \param max_depth The maximum number of calls before the root op, counting from current_call. * \param op_name The name of expected "root" op in this fused call. * \return A CallNode corresponding to the root op */ inline const CallNode* GetRootCall(const CallNode* current_call, int max_depth, const std::string& op_name) { ICHECK(current_call && max_depth >= 0); if (max_depth == 0) { ICHECK(current_call && IsOp(current_call, op_name)); return current_call; } if (IsOp(current_call, op_name)) { return current_call; } ICHECK_GT(current_call->args.size(), 0); size_t valid_node_idx = 0; while (valid_node_idx < current_call->args.size() && current_call->args[valid_node_idx].as<VarNode>()) { valid_node_idx++; } const auto* next_call = current_call->args[valid_node_idx].as<CallNode>(); return GetRootCall(next_call, max_depth - 1, op_name); } /*! * \brief Get the external symbol of the Relay function name. * * \param func The provided function. * \return An external symbol. */ inline std::string GetExtSymbol(const Function& func) { const auto name_node = func->GetAttr<String>(tvm::attr::kGlobalSymbol); ICHECK(name_node.defined()) << "Fail to retrieve external symbol."; return std::string(name_node.value()); } /*! * \brief Return whether the auto scheduler is enabled in the pass context. */ inline bool IsAutoSchedulerEnabled() { return transform::PassContext::Current() ->GetConfig<Bool>("relay.backend.use_auto_scheduler", Bool(false)) .value(); } /*! * \brief Return whether the meta schedule is enabled in the pass context. */ inline bool IsMetaScheduleEnabled() { return transform::PassContext::Current() ->GetConfig<Bool>("relay.backend.use_meta_schedule", Bool(false)) .value(); } /*! \brief Consider MetaSchedule's dispatch option. */ inline int UseMetaScheduleDispatch() { return transform::PassContext::Current() ->GetConfig<Integer>("relay.backend.use_meta_schedule_dispatch", Integer(0)) .value() ->value; } /*! * \brief Method in TECompiler to convert TE compute to scheduleable TIR * \param args The arguments of the TE compute * \param constants The constants used in AllocateConst * \return NullOpt if conversion fails; Otherwise the converted TIR * \note This method could be further used as a task filtering mechanism in task extraction */ using FTECompilerTIRConverter = runtime::TypedPackedFunc< // Optional<tir::PrimFunc>( // const Array<te::Tensor>& args, // const Array<runtime::NDArray>& constants)>; /*! \brief Return a task filter for AutoTIR according to `relay.backend.tir_converter` */ inline FTECompilerTIRConverter GetTIRConverter() { String name = transform::PassContext::Current() ->GetConfig<String>("relay.backend.tir_converter", "default") .value(); const PackedFunc* f = runtime::Registry::Get("relay.backend.tir_converter." + name); ICHECK(f != nullptr) << "IndexError: Cannot find TIR converter: " << name; return FTECompilerTIRConverter(*f); } /*! \brief Converts a PrimFunc to IRModule. */ inline IRModule PrimFuncToIRModule(tir::PrimFunc f) { f = WithAttrs(f, Map<String, ObjectRef>{ {tvm::attr::kGlobalSymbol, String("main")}, {tvm::tir::attr::kNoAlias, Bool(1)}, }); return IRModule({{GlobalVar("main"), f}}); } /*! * \brief Get the sequence of Relay optimization passes based on backend type. * The prefix of the Relay passes almost overlaps between the vm and graph backend, with some slight * difference. This function unifies the shared optimization pass prefix between vm and graph * runtime, and returns the pass prefix given the backend type. * * \param is_homogeneous True if all primitives are to be executed on the same device and target. * \param is_vm True if passes are to be used for the vm executor. * \return An array of passes. */ Array<Pass> GetPassPrefix(bool is_homogeneous, bool is_vm); /*! \brief Target hash function */ struct TargetStrHash { /*! * \brief Calculate the hash code of a Target based on the string value of the Target KIND. Note that this hash should NOT be used in new usecases, equality of targets based on their value is not well-defined. This will be removed when maps from Targets to IRModules are removed from the codebase. * \param target The Target to hash * \return String hash of the target */ size_t operator()(const Target& target) const { std::string s(target->kind->name); return String::HashBytes(s.c_str(), s.size()); } }; /*! \brief Target equality function based on the string value of Target Note that this equality function should NOT be used in new usecases, equality of targets based on their value is not well-defined. This will be removed when maps from Targets to IRModules are removed from the codebase.*/ struct TargetStrEqual { /*! * \brief Check if the two Targets are equal * \param target One Target * \param other_target The other Target * \return String equality of the targets */ const bool operator()(const Target& target, const Target& other_target) const { TargetStrHash target_hash = TargetStrHash(); return target_hash(target) == target_hash(other_target); } }; /*! * \brief Convert a Map<Target, IRModule> to std::unordered_map<Target, IRmodule, TargetStrHash, * TargetStrEqual> Target equality is currently based on pointer equality, which is a problem since * we have a lot of Map<Target, IRModule> in the codebase. This function converts the map to a * version that is keyed based on string value of the Target instead. Note that once we remove * Map<Target, IRModule>, this function will be removed. * \param input_map The map to convert * \return The converted map */ std::unordered_map<Target, IRModule, TargetStrHash, TargetStrEqual> TargetModuleMapToTargetStrModuleMap(Map<Target, IRModule> input_map); /*! * \brief Convert a std::unordered_map<Target, IRmodule, TargetStrHash, TargetStrEqual> to * Map<Target, IRModule> This function is a helper that undoes TargetModuleMapToTargetStr. Note that * once we remove Map<Target, IRModule>, this function will be removed. * \param input_map The map to convert * \return The converted map */ Map<Target, IRModule> TargetStrModuleMapToTargetModuleMap( std::unordered_map<Target, IRModule, TargetStrHash, TargetStrEqual> input_map); /*! * \brief Call "weight update callback" to communicate op weights seen during Relay module * lowering back to the auto scheduler. * Op weights refer to the number of times each distinct op/workload appears in a given module. * It is called "use_count" in TECompiler. * \param IRModule after lowering by LowerTEPass. */ void UpdateAutoSchedulerOpWeights(const IRModule& module); /*! * \brief Extract shape from expr to vector<int64_t> * * \param shape * \return std::vector<int64_t> */ std::vector<int64_t> ShapeToJSON(tvm::Array<IndexExpr> shape); } // namespace backend } // namespace relay } // namespace tvm #endif // TVM_RELAY_BACKEND_UTILS_H_
https://github.com/zk-ml/tachikoma
src/relay/backend/vm/compiler.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/backend/vm/compiler.h * \brief A compiler from relay::Module to the VM byte code. */ #ifndef TVM_RELAY_BACKEND_VM_COMPILER_H_ #define TVM_RELAY_BACKEND_VM_COMPILER_H_ #include <tvm/ir/error.h> #include <tvm/relay/expr_functor.h> #include <tvm/relay/interpreter.h> #include <tvm/relay/transform.h> #include <tvm/runtime/logging.h> #include <tvm/runtime/vm/vm.h> #include <tvm/tir/function.h> #include <iostream> #include <memory> #include <string> #include <unordered_map> #include <unordered_set> #include <utility> #include <vector> #include "../../../runtime/vm/naive_allocator.h" #include "../../../runtime/vm/profiler/vm.h" #include "../../transforms/pass_utils.h" #include "../te_compiler.h" #include "../te_compiler_cache.h" namespace tvm { namespace relay { namespace vm { using namespace tvm::runtime; using namespace tvm::runtime::vm; using namespace relay::transform; template <typename T, typename U> using NodeMap = std::unordered_map<T, U, ObjectPtrHash, ObjectPtrEqual>; using TagMap = NodeMap<tvm::relay::Constructor, Index>; using TagNameMap = std::unordered_map<size_t, tvm::relay::Constructor>; using GlobalMap = NodeMap<GlobalVar, Index>; using ConstMap = NodeMap<Constant, Index>; using ConstTensorShapeMap = NodeMap<TensorType, std::pair<Index, NDArray>>; struct VMCompilerContext { // The module context for the compilation IRModule module; // Error reporter ErrorReporter err_reporter; // Map from a unique integer to ADT constructor tag TagNameMap tag_index_map; // Map from ADT constructor tag to a unique integer TagMap tag_map; // Map from global var to a unique integer GlobalMap global_map; // List of constants std::vector<NDArray> constants; // Device indexes for constants std::vector<Index> const_device_indexes; // Map from names of primitive functions already allocated to their primitive function index. std::unordered_map<std::string, Index> primitive_map; // The virtual devices corresponding to each device index. std::vector<VirtualDevice> virtual_devices_; }; class VMCompiler : public runtime::ModuleNode { public: VMCompiler() = default; virtual ~VMCompiler() = default; virtual PackedFunc GetFunction(const std::string& name, const ObjectPtr<Object>& sptr_to_self); const char* type_key() const final { return "VMCompiler"; } /*! * \brief Set the parameters * * \param name name of parameter * \param data_in input DLTensor */ void SetParam(const std::string& name, runtime::NDArray data_in); /*! * \brief Lower the functions in a Module. * * ---------------------------------------------------------------------------------- * | This is the main entry point for the VM compilation flow. | * | - Preceded by \p SetParam for the global params. | * | - Followed by \p Codegen() to finalize the executable. | * | - Then the result runtime::Module can be constructed by GetExecutable. | * ---------------------------------------------------------------------------------- * * \param mod Relay Module * \param raw_targets List of available targets for running kernels. Any host target should * be conveyed by the 'host' target field. */ void Lower(IRModule mod, const Array<Target>& raw_targets); /* * \brief Perform a series of optimizations on the input IR module. Can be used instead * of Lower if wish to stop and observe optimized IRModule. Otherwise not needed on * regular compilation flow. * * \param mod The input IRModule. * \param raw_targets List of available target for running kernels. * * \return The optimized IRModule. */ IRModule OptimizeModule(IRModule mod, const Array<Target>& raw_targets); /*! \brief Generate the machine code for lowered functions. */ void Codegen(); /*! \brief Returns the runtime::Module containing the compiled VM code. */ runtime::Module GetExecutable() const; protected: /*! \brief Builds the executor and compilation config to match \p raw_targets. */ void Setup(const Array<Target>& raw_targets); /*! \brief Internal implementation of \p Lower. */ void LowerImpl(IRModule mod); /*! \brief Internal implementation of \p OptimizeModule. */ IRModule OptimizeModuleImpl(IRModule mod); /*! \brief Returns the passes which layout memory. */ transform::Sequential MemoryOpt(const CompilationConfig& config); /*! \brief Returns the passes which fuse then lower Relay primitive operators. */ transform::Sequential FuseAndLowerOperators(const CompilationConfig& config); /*! * \brief Populate the global function names in a map where the value is used * as the index by the VMFunctions. Returns the number of functions. */ size_t PopulateGlobalMap(); protected: /*! \brief Targets and scopes needed for compilation. */ CompilationConfig config_; /*! \brief Global shared meta data */ VMCompilerContext context_; /*! \brief Compiled executable. */ ObjectPtr<Executable> exec_; /*! \brief parameters */ std::unordered_map<std::string, runtime::NDArray> params_; }; } // namespace vm } // namespace relay } // namespace tvm #endif // TVM_RELAY_BACKEND_VM_COMPILER_H_
https://github.com/zk-ml/tachikoma
src/relay/collage/candidate_function_cache.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/collage/candidate_function_cache.h * \brief A cache of the unique global symbol name and cost for partitioned functions. */ #ifndef TVM_RELAY_COLLAGE_CANDIDATE_FUNCTION_CACHE_H_ #define TVM_RELAY_COLLAGE_CANDIDATE_FUNCTION_CACHE_H_ #include <tvm/relay/function.h> #include <memory> #include <string> #include <unordered_map> #include <utility> #include "../transforms/compiler_function_utils.h" #include "./cost.h" #include "./name_supply.h" namespace tvm { namespace relay { namespace collage { /*! * \brief A cache of the unique global symbol and cost for functions extracted to represent * partitions. If two functions are structurally equal (which includes equality of their "Compiler" * attributes) then they will share the same global symbol and estimated cost. We rely on the * function's attributes to distinguish partitions which are structurally the same graph but * intended for different targets. */ class CandidateFunctionCache : public transform::GlobalSymbolCache { public: explicit CandidateFunctionCache(std::shared_ptr<NameSupply> name_supply) : name_supply_(std::move(name_supply)) {} struct Entry { GlobalVar global_symbol; Cost cost = Cost::Unknown(); // Filled in when have estimated cost. explicit Entry(GlobalVar global_symbol) : global_symbol(std::move(global_symbol)) {} }; /*! * \brief Returns the unique entry for \p function. If no such entry already exists, create it * and assign it a unique global symbol name. */ Entry& GetEntry(const std::string& label, const Function& function); GlobalVar GetGlobalSymbol(const Function& function) final; private: std::shared_ptr<NameSupply> name_supply_; std::unordered_map<Function, Entry, StructuralHash, StructuralEqual> cache_; }; } // namespace collage } // namespace relay } // namespace tvm #endif // TVM_RELAY_COLLAGE_CANDIDATE_FUNCTION_CACHE_H_
https://github.com/zk-ml/tachikoma
src/relay/collage/candidate_partition.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/collage/candidate_partition.cc * \brief A potential partition in the Collage search. */ #ifndef TVM_RELAY_COLLAGE_CANDIDATE_PARTITION_H_ #define TVM_RELAY_COLLAGE_CANDIDATE_PARTITION_H_ #include <tvm/runtime/container/string.h> #include <tvm/target/compilation_config.h> #include <memory> #include <string> #include <vector> #include "./candidate_function_cache.h" #include "./cost.h" #include "./cost_estimator.h" #include "./name_supply.h" #include "./sub_graph.h" namespace tvm { namespace relay { namespace collage { class PartitionSpec; /*! * \brief A candidate partition w.r.t. the overall Relay model. * * We represent the partition as a sub-graph. This means not only can we represent the scope * of Relay sub-expressions intended for a particular partition (or kernel), but we can also * represent various conventions for encoding how the operators within the partition should be * tagged for downstream processing. */ class CandidatePartitionNode : public Object { public: CandidatePartitionNode() = default; /*! * \brief Combination of all the partition rule names which produced this candidate. * For debugging and explainability. */ String rule_name_; /*! * \brief The sub-graph of the overall expression matched by the partition rule. */ SubGraph sub_graph_; /*! * \brief The partition specification which produced this candidate. */ ObjectRef /* actually PartitionSpec */ spec_; /*! * \brief The (cached) cost of the partition. * * Initially Cost::Unknown, calculated and cached by EstimateCost. */ mutable Cost cost_ = Cost::Unknown(); void VisitAttrs(AttrVisitor* v); /*! * \brief Returns the partition specification which produced this candidate. */ PartitionSpec partition_spec() const; /*! * \brief Returns the name of the partition specification which produced this candidate. */ std::string partition_spec_name() const; /*! * \brief Returns the target of the partition specification which produced this candidate. */ Target target() const; /*! * \brief Return the estimated cost of the candidate partition, using \p cost_estimator and * \p cache. */ Cost EstimatedCost(const DataflowGraph& dataflow_graph, const CostEstimator& cost_estimator, const std::shared_ptr<CandidateFunctionCache>& cache) const; /*! * \brief Returns a brief description of candidate suitable for debugging output. */ std::string ToSummary(const DataflowGraph& dataflow_graph) const; std::string ToString() const; static constexpr const char* _type_key = "relay.collage.CandidatePartition"; TVM_DECLARE_FINAL_OBJECT_INFO(CandidatePartitionNode, Object); }; class CandidatePartition : public ObjectRef { public: CandidatePartition(String rule_name, SubGraph sub_graph, ObjectRef /* actually PartitionSpec */ spec, Cost cost = Cost::Unknown()); bool operator<(const CandidatePartition& that) const; /*! * \brief Returns true if this and \p that candidate are disjoint, have the same (or no) target, * and touch. This does not imply the \p DisjointUnion of this and that will be valid. For * example, the result may be too deep or have too many outputs. */ bool AreTouching(const DataflowGraph& dataflow_graph, const CandidatePartition& that) const; /*! * \brief Returns the disjoint union of this and \p that. */ CandidatePartition DisjointUnion(const DataflowGraph& dataflow_graph, const CandidatePartition& that) const; /*! * \brief Returns the disjoint union of all \p candidates. */ static CandidatePartition DisjointUnion(const DataflowGraph& dataflow_graph, std::vector<CandidatePartition> candidates); /*! * \brief Returns the root expression of \p dataflow_graph rewritten to apply all the partitions * implied by \p candidates. The candidates can be in any order but must be disjoint. */ static Expr ParallelRewrite(const DataflowGraph& dataflow_graph, const std::vector<CandidatePartition>& candidates); /*! * Eagerly merge all touching candidates for the same target. The candidates must be disjoint * and have their Targets filled in. This is typically called on the optimal list of candidate * partitions found by the Collage search in order to remove unnecessary partition boundaries. * Ideally the search would never produce such candidates however to keep the search space * manageable Collage may only consider candidate partitions up to a particular depth. */ static std::vector<CandidatePartition> MaxCoalesce(const DataflowGraph& dataflow_graph, std::vector<CandidatePartition> candidates); TVM_DEFINE_OBJECT_REF_METHODS(CandidatePartition, ObjectRef, CandidatePartitionNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(CandidatePartitionNode); }; CandidatePartition WithRuleName(CandidatePartition candidate, String rule_name); CandidatePartition WithTarget(CandidatePartition candidate, Target target); CandidatePartition WithSubGraph(CandidatePartition candidate, SubGraph sub_graph); struct CandidatePartitionHash { size_t operator()(const CandidatePartition& candidate) const { return candidate->sub_graph_->hash(); } }; struct CandidatePartitionEquals { bool operator()(const CandidatePartition& left, const CandidatePartition& right) const { return *left->sub_graph_.get() == *right->sub_graph_.get(); } }; struct CandidatePartitionCompare { bool operator()(const CandidatePartition& left, const CandidatePartition& right) const { return *left->sub_graph_.get() < *right->sub_graph_.get(); } }; } // namespace collage } // namespace relay } // namespace tvm #endif // TVM_RELAY_COLLAGE_CANDIDATE_PARTITION_H_
https://github.com/zk-ml/tachikoma
src/relay/collage/candidate_partition_index.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file relay/collage/candidate_partition_index.h * \brief Index for finding relevant candidate partitions for a particular search state. */ #ifndef TVM_RELAY_COLLAGE_CANDIDATE_PARTITION_INDEX_H_ #define TVM_RELAY_COLLAGE_CANDIDATE_PARTITION_INDEX_H_ #include <tvm/relay/expr.h> #include <memory> #include <string> #include <unordered_map> #include <vector> #include "./partition_spec.h" namespace tvm { namespace relay { namespace collage { /*! * \brief Collects and indexes all the candidate partitions for the overall expression. This index * is used during partitioning search to find the next valid candidate partition to explore from the * current search state. We do not yet attempt to estimate the cost of each candidate partition, and * when we do so during the search we may discover it to be infeasible. */ class CandidatePartitionIndex { public: CandidatePartitionIndex(const std::unordered_map<const ExprNode*, VirtualDevice>* virtual_devices, DataflowGraph* dataflow_graph); /*! \brief Constructs the index. */ void Index(const Array<PartitionSpec>& partition_specs); /*! \brief Returns all the candidates which may begin at \p index. */ const std::vector<CandidatePartition>& candidates_at(PostDfsIndex index) const { ICHECK_LT(index, dataflow_graph_->size()); return first_inside_index_to_candidates_[index]; } /*! \brief Estimates the casts of all candidates in the index. Each candidate caches its cost. */ void EstimateAllCosts(const CostEstimator cost_estimator, const std::shared_ptr<CandidateFunctionCache>& cache); size_t size() const { return size_; } std::string ToSummary() const; private: /*! * \brief Returns true if \p candidate's desired target is compatible with any existing target * constraints on the candidate's sub-expressions. */ bool IsCompatibleWithVirtualDevice(const CandidatePartition& candidate); /*! \brief Returns all valid candidates found from \p partition_specs. */ std::vector<CandidatePartition> Collect(const Array<PartitionSpec>& partition_specs); /*! * \brief The \p VirtualDevice for every sub-expression in the overall expression. Needed to * ensure candidates do not contradict the target/device placement already determined by * device planning. */ const std::unordered_map<const ExprNode*, VirtualDevice>* virtual_devices_; /*! \brief Dataflow graph for overall expression. */ DataflowGraph* dataflow_graph_; /*! * \brief Maps post-dfs indexes to the all the candidates which have that as their first inside * index, and which should be considered in the Collage search. */ std::vector<std::vector<CandidatePartition>> first_inside_index_to_candidates_; /*! \brief Number of entries in above. */ size_t size_ = 0; }; } // namespace collage } // namespace relay } // namespace tvm #endif // TVM_RELAY_COLLAGE_CANDIDATE_PARTITION_INDEX_H_
https://github.com/zk-ml/tachikoma
src/relay/collage/candidate_set.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/collage/candidate_set.h * \brief Collects a set of candidate partitions. */ #ifndef TVM_RELAY_COLLAGE_CANDIDATE_SET_H_ #define TVM_RELAY_COLLAGE_CANDIDATE_SET_H_ #include <algorithm> #include <unordered_set> #include <utility> #include <vector> #include "./candidate_partition.h" #include "./dataflow_graph.h" namespace tvm { namespace relay { namespace collage { /*! * \brief Holds a vector of current candidates and the additions/removals to apply to them. */ struct CandidateSet { CandidateSet() = default; explicit CandidateSet(std::vector<CandidatePartition> candidates_to_add); /*! * \brief Schedule \p new_candidate for addition before the next round (unless it is not valid). */ void Add(const DataflowGraph& dataflow_graph, const CandidatePartition& new_candidate); /*! \brief Schedule \p old_candidate for removal before the next round. */ void Remove(const CandidatePartition& old_candidate); /*! * \brief Update \p current_candidates and \p first_new_index. Return false if no * new candidates were added, in which case we have reached a fixed point. */ bool PrepareForNextRound(); size_t size() const { return current_candidates_.size(); } CandidatePartition operator[](size_t i) const { ICHECK_LT(i, current_candidates_.size()); return current_candidates_[i]; } CandidatePartition at(size_t i) const { return (*this)[i]; } size_t first_new_index() const { return first_new_index_; } void sort() { std::sort(current_candidates_.begin(), current_candidates_.end()); } std::vector<CandidatePartition> MovedCurrentCandidates() { return std::move(current_candidates_); } private: /*! * \brief Index of first candidate in current_candidates added in last round. This can be used to * avoid considering candidates or candidate combinations which have already been considered in an * earlier round. */ size_t first_new_index_ = 0; /*! \brief Candidates gathered in previous rounds. */ std::vector<CandidatePartition> current_candidates_; /*! \brief New candidates gathered in the current round. */ std::vector<CandidatePartition> candidates_to_add_; /*! \brief Existing candidates to remove before starting the next round. */ std::vector<CandidatePartition> candidates_to_remove_; /*! \brief Which candidates have been seen so far and should not be added again. */ std::unordered_set<CandidatePartition, CandidatePartitionHash, CandidatePartitionEquals> seen_; }; } // namespace collage } // namespace relay } // namespace tvm #endif // TVM_RELAY_COLLAGE_CANDIDATE_SET_H_
https://github.com/zk-ml/tachikoma
src/relay/collage/collage_partitioner.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file relay/collage/collage_partitioner.h * \brief Search for an optimal partitioning of a Relay model. * * See: * Collage: Automated Integration of Deep Learning Backends * Byungsoo Jeon, Sunghyun Park, Peiyuan Liao, Sheng Xu, Tianqi Chen, Zhihao Jia * https://arxiv.org/pdf/2111.00655.pdf */ #ifndef TVM_RELAY_COLLAGE_COLLAGE_PARTITIONER_H_ #define TVM_RELAY_COLLAGE_COLLAGE_PARTITIONER_H_ #include <tvm/relay/transform.h> #include "./cost_estimator.h" namespace tvm { namespace relay { namespace collage { /*! * \brief Explores the space of all possible (sub-graph, target) pairs which cover the * model, and applies the globally optimal choice (assuming partition costs are additive). */ transform::Pass CollagePartition(CompilationConfig config, CostEstimator cost_estimator); } // namespace collage } // namespace relay } // namespace tvm #endif // TVM_RELAY_COLLAGE_COLLAGE_PARTITIONER_H_
https://github.com/zk-ml/tachikoma
src/relay/collage/combiner_rule.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/collage/combiner_rule.h * \brief Helpers for the \p CombinePartitionRule */ #ifndef TVM_RELAY_COLLAGE_COMBINER_RULE_H_ #define TVM_RELAY_COLLAGE_COMBINER_RULE_H_ #include <tvm/relay/dataflow_pattern.h> #include <tvm/relay/expr.h> #include <string> #include "./candidate_partition.h" #include "./candidate_set.h" #include "./sub_graph.h" namespace tvm { namespace relay { namespace collage { /*! * \brief Base class for all 'simple' combiner rules. * * Given \p upstream and \p downstream candidates which touch, a simple combiner rule returns * true if their union should also be considered a candidate. */ class SimpleCombinerRuleNode : public Object { public: String rule_name_; void VisitAttrs(AttrVisitor* v); virtual bool Fires(const DataflowGraph& dataflow_graph, const CandidatePartition& upstream, const CandidatePartition& downstream) const; virtual std::string ToString() const; static constexpr const char* _type_key = "relay.collage.SimpleCombinerRule"; static constexpr const uint32_t _type_child_slots = 1; TVM_DECLARE_BASE_OBJECT_INFO(SimpleCombinerRuleNode, Object); }; class SimpleCombinerRule : public ObjectRef { public: explicit SimpleCombinerRule(String rule_name); TVM_DEFINE_OBJECT_REF_METHODS(SimpleCombinerRule, ObjectRef, SimpleCombinerRuleNode); }; /*! * \brief A simple combiner rule which fires if the \p upstream and \p downstream candidates have * the given \p upstream_kind and \p downstream_kind (or less) respectively. */ class ByKindSimpleCombinerRuleNode : public SimpleCombinerRuleNode { public: OpPatternKind upstream_kind_; OpPatternKind downstream_kind_; void VisitAttrs(AttrVisitor* v); bool Fires(const DataflowGraph& dataflow_graph, const CandidatePartition& upstream, const CandidatePartition& downstream) const override; std::string ToString() const override; static constexpr const char* _type_key = "relay.collage.ByKindSimpleCombinerRule"; TVM_DECLARE_FINAL_OBJECT_INFO(ByKindSimpleCombinerRuleNode, SimpleCombinerRuleNode); }; class ByKindSimpleCombinerRule : public SimpleCombinerRule { public: ByKindSimpleCombinerRule(OpPatternKind upstream_kind, OpPatternKind downstream_kind); TVM_DEFINE_OBJECT_REF_METHODS(ByKindSimpleCombinerRule, SimpleCombinerRule, ByKindSimpleCombinerRuleNode); }; /*! \brief Context required by CombineRuleNode::AppendAllResultsContext. */ struct AppendAllResultsContext { AppendAllResultsContext(const DataflowGraph* dataflow_graph, size_t max_depth, CandidateSet* candidate_set) : dataflow_graph(dataflow_graph), max_depth(max_depth), candidate_set(candidate_set) {} const DataflowGraph* dataflow_graph; size_t max_depth; CandidateSet* candidate_set; }; /*! * \brief Base class for all 'combiner' rules. * * Given the current candidate set, a combiner rule looks for opportunities to form larger * candidates, optionally removing existing candidates in the process. */ class CombinerRuleNode : public Object { public: String rule_name_; void VisitAttrs(AttrVisitor* v); virtual void AppendAllResults(AppendAllResultsContext* ctxt) const; virtual std::string ToString() const; static constexpr const char* _type_key = "relay.collage.CombinerRule"; static constexpr const uint32_t _type_child_slots = 4; TVM_DECLARE_BASE_OBJECT_INFO(CombinerRuleNode, Object); }; class CombinerRule : public ObjectRef { public: explicit CombinerRule(String rule_name); TVM_DEFINE_OBJECT_REF_METHODS(CombinerRule, ObjectRef, CombinerRuleNode); }; /*! * \brief A combiner rule which runs one or more simple combiner rules over the current * touching candidates. */ class AllSimpleCombinerRuleNode : public CombinerRuleNode { public: Array<SimpleCombinerRule> simple_rules_; void VisitAttrs(AttrVisitor* v); void AppendAllResults(AppendAllResultsContext* ctxt) const override; std::string ToString() const override; static constexpr const char* _type_key = "relay.collage.AllSimpleCombinerRule"; TVM_DECLARE_FINAL_OBJECT_INFO(AllSimpleCombinerRuleNode, CombinerRuleNode); }; class AllSimpleCombinerRule : public CombinerRule { public: AllSimpleCombinerRule(String rule_name, Array<SimpleCombinerRule> simple_rules); TVM_DEFINE_OBJECT_REF_METHODS(AllSimpleCombinerRule, CombinerRule, AllSimpleCombinerRuleNode); }; /*! * \brief A combiner rule which combines injective sub-groups which appear inside tuples which are * themselves inputs to injective sub-groups. */ class TupleArgCombinerRuleNode : public CombinerRuleNode { public: void VisitAttrs(AttrVisitor* v); void AppendAllResults(AppendAllResultsContext* ctxt) const override; std::string ToString() const override; static constexpr const char* _type_key = "relay.collage.TupleArgCombinerRule"; TVM_DECLARE_FINAL_OBJECT_INFO(TupleArgCombinerRuleNode, CombinerRuleNode); }; class TupleArgCombinerRule : public CombinerRule { public: explicit TupleArgCombinerRule(String rule_name); TVM_DEFINE_OBJECT_REF_METHODS(TupleArgCombinerRule, CombinerRule, TupleArgCombinerRuleNode); }; /*! * \brief A combiner rule which combines tuple projection if it's an output of an injective * group. */ class TupleProjCombinerRuleNode : public CombinerRuleNode { public: void VisitAttrs(AttrVisitor* v); void AppendAllResults(AppendAllResultsContext* ctxt) const override; std::string ToString() const override; static constexpr const char* _type_key = "relay.collage.TupleProjCombinerRule"; TVM_DECLARE_FINAL_OBJECT_INFO(TupleProjCombinerRuleNode, CombinerRuleNode); }; class TupleProjCombinerRule : public CombinerRule { public: explicit TupleProjCombinerRule(String rule_name); TVM_DEFINE_OBJECT_REF_METHODS(TupleProjCombinerRule, CombinerRule, TupleProjCombinerRuleNode); }; /*! * \brief A combiner rule which combines constants in argument positions to existing candidates. * Note that scalars are always inlined, so this rule only combines tensor constant arguments. */ class ConstantCombinerRuleNode : public CombinerRuleNode { public: void VisitAttrs(AttrVisitor* v); void AppendAllResults(AppendAllResultsContext* ctxt) const override; std::string ToString() const override; static constexpr const char* _type_key = "relay.collage.ConstantCombinerRule"; TVM_DECLARE_FINAL_OBJECT_INFO(ConstantCombinerRuleNode, CombinerRuleNode); }; class ConstantCombinerRule : public CombinerRule { public: explicit ConstantCombinerRule(String rule_name); TVM_DEFINE_OBJECT_REF_METHODS(ConstantCombinerRule, CombinerRule, ConstantCombinerRuleNode); }; } // namespace collage } // namespace relay } // namespace tvm #endif // TVM_RELAY_COLLAGE_COMBINER_RULE_H_
https://github.com/zk-ml/tachikoma
src/relay/collage/cost.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/collage/cost.h * \brief Represents the estimated cost of a candidate partition. */ #ifndef TVM_RELAY_COLLAGE_COST_H_ #define TVM_RELAY_COLLAGE_COST_H_ #include <tvm/runtime/logging.h> #include <cmath> #include <limits> #include <string> namespace tvm { namespace relay { namespace collage { /*! * \brief The assumed cost for a candidate partition. Generally average execution time in seconds. * However other cost functions are possible, for example to introduce a penalty for high memory * use, etc. */ class Cost { public: Cost() = delete; static Cost Zero() { return Cost(0.0); } /*! * \brief Returns the distinguished 'invalid' cost signaling a candidate partition is not * supported by the intended target, for example because the sub-graph has an unsupported operator * or the intermediate memory required exceeds some system limit. */ static Cost Invalid() { return Cost(std::numeric_limits<double>::infinity()); } bool is_invalid() const { return std::isinf(value_) && value_ > 0.0; } /*! * \brief Returns the distinguished 'unknown' cost, signaling fixed priorities should be used to * choose the best partitions. This can be used to disable tuning and fallback to fixed rules, * much as TVM will use an un-tuned kernel if no tuning records are available. */ static Cost Unknown() { return Cost(std::numeric_limits<double>::quiet_NaN()); } bool is_unknown() const { return std::isnan(value_); } /*! \brief Returns cost with given finite, non-negative value. */ static Cost Value(double value) { ICHECK(!std::isnan(value) && !std::isinf(value) && value >= 0.0); return Cost(value); } bool is_value() const { return !std::isnan(value_) && !std::isinf(value_); } double value() const { ICHECK(is_value()); return value_; } /*! \brief Return true if the less-than relation is defined for this and that. */ bool are_comparable(Cost that) const { return !std::isnan(value_) && !std::isnan(that.value_); } /*! \brief Returns sum of this and that. */ Cost operator+(Cost that) const { return Cost(value_ + that.value_); } /*! \brief Returns difference of this and that. */ Cost operator-(Cost that) const { return Cost(value_ - that.value_); } /*! \brief Returns true if this is cheaper than that, assuming they are comparable. */ bool operator<(Cost that) const { return value_ < that.value_; } std::string ToString() const; private: explicit Cost(double value) : value_(value) {} /*! * \brief Non-negative value or: * - +inf if candidate partition is not feasible. * - NaN if candidate partition has an unknown cost (priority may be used to break ties). */ double value_ = 0.0; }; } // namespace collage } // namespace relay } // namespace tvm #endif // TVM_RELAY_COLLAGE_COST_H_
https://github.com/zk-ml/tachikoma
src/relay/collage/cost_estimator.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/collage/cost_estimator.cc * \brief Interface for measuring candidate partition cost. */ #ifndef TVM_RELAY_COLLAGE_COST_ESTIMATOR_H_ #define TVM_RELAY_COLLAGE_COST_ESTIMATOR_H_ #include <tvm/relay/function.h> #include "./cost.h" namespace tvm { namespace relay { namespace collage { /*! * \brief An (abstract) estimator for the cost of executing "main" in an \p IRModule representing * a candidate partition, using the given target for lowering and codegen. * * Generally the implementation will compile to a \p runtime::Module (possibly on a target-specific * worker if cross-compilation is not available), repeatedly invoke "main" with random data until * measure variance is acceptable (on a target-specific worker), and return the summarized costs. * * If using a TVM native \p Target, it is possible compilation will itself invoke TVM tuning. * * TODO(mbs): Actually, currently not abstract so can get some local measurements. */ class CostEstimatorNode : public Object { public: /*! * \brief Returns the estimated cost (possibly after many many minutes of training time) of * running "main" in \p mod using \p target, which represents a possible partitioning of * some overall Relay expression. */ virtual Cost Estimate(const IRModule& mod, const Target& target) const; static constexpr const char* _type_key = "relay.collage.CostEstimator"; TVM_DECLARE_BASE_OBJECT_INFO(CostEstimatorNode, Object); }; class CostEstimator : public ObjectRef { public: CostEstimator(); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(CostEstimator, ObjectRef, CostEstimatorNode); }; } // namespace collage } // namespace relay } // namespace tvm #endif // TVM_RELAY_COLLAGE_COST_ESTIMATOR_H_
https://github.com/zk-ml/tachikoma
src/relay/collage/dataflow_graph.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/collage/dataflow_graph.h * \brief A representation of the dataflow for an overall Relay expression. */ #ifndef TVM_RELAY_COLLAGE_DATAFLOW_GRAPH_H_ #define TVM_RELAY_COLLAGE_DATAFLOW_GRAPH_H_ #include <tvm/relay/expr.h> #include <memory> #include <vector> #include "../ir/indexed_graph.h" #include "./index_set.h" namespace tvm { namespace relay { namespace collage { /*! * \brief Represents the dataflow of an overall Relay expression. */ class DataflowGraph { public: using Node = IndexedGraph<Expr>::Node; explicit DataflowGraph(Expr expr); size_t size() const { return indexed_graph_->size(); } const Node* index_to_node(PostDfsIndex index) const { return indexed_graph_->index_to_node(index); } const Node* item_to_node(const Expr& expr) const { return indexed_graph_->item_to_node(expr); } const Node* item_to_node(const ExprNode* expr_node) const { return indexed_graph_->item_to_node(expr_node); } const Expr& expr() const { return expr_; } const IndexedGraph<Expr>& indexed_graph() const { return *indexed_graph_; } const IndexSet& downstream_of(PostDfsIndex index) const { ICHECK_LT(index, indexed_graph_->size()); return downstream_map_[index]; } private: /*! \brief The overall expression. */ Expr expr_; /*! \brief The indexed graph which captures the main dataflow. */ std::unique_ptr<IndexedGraph<Expr>> indexed_graph_; /*! \brief Map from a node's PostDfsIndex to the set of its downstream dataflow node indexes. */ std::vector<IndexSet> downstream_map_; }; } // namespace collage } // namespace relay } // namespace tvm #endif // TVM_RELAY_COLLAGE_DATAFLOW_GRAPH_H_
https://github.com/zk-ml/tachikoma
src/relay/collage/gather_partition_specs.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/collage/gather_partition_specs.h * \brief Gather the relevant \p PartitionSpecs from the available \p Targets. */ #ifndef TVM_RELAY_COLLAGE_GATHER_PARTITION_SPECS_H_ #define TVM_RELAY_COLLAGE_GATHER_PARTITION_SPECS_H_ #include <tvm/target/compilation_config.h> #include "./partition_spec.h" namespace tvm { namespace relay { namespace collage { /*! * \brief The 'styles' of BYOC integrations. Used to influence how their corresponding * partition rule is constructed. */ enum BYOCStyle { /*! * \brief The BYOC patterns pick out 'ideal' candidates directly, either because: * - the BYOC toolchain does not perform any fusion so each matched sub-expression maps 1:1 to a * BYOC-provided operator, or * - the BYOC toolchain does perform fusion, however the patterns have been written to pick out * fusable sub-graphs. */ kNoFusionBYOCStyle, /*! * \brief The BYOC patterns pick out supported operators, but the BYOC backend may perform * fusion over those operators in much the same way TVM does. */ kTVMFusionBYOCStyle, /*! * \brief The BYOC patterns pick out supported operators, but the BYOC backend may perform * arbitrary fusion over those operators. */ kArbitraryFusionBYOCStyle, }; /*! * \brief Returns all the partition specifications gathered from the \p Targets in \p config. */ Array<PartitionSpec> GatherPartitionSpecs(const CompilationConfig& config); } // namespace collage } // namespace relay } // namespace tvm #endif // TVM_RELAY_COLLAGE_GATHER_PARTITION_SPECS_H_
https://github.com/zk-ml/tachikoma
src/relay/collage/index_set.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/collage/index_set.h * \brief Efficient representation of a set of post-dfs indexes. */ #ifndef TVM_RELAY_COLLAGE_INDEX_SET_H_ #define TVM_RELAY_COLLAGE_INDEX_SET_H_ #include <string> #include <unordered_map> #include <utility> #include <vector> #include "../ir/dataflow_matcher_impl.h" #include "../ir/indexed_graph.h" namespace tvm { namespace relay { namespace collage { using IndexSubst = std::unordered_map<size_t, size_t>; class IndexSet { public: IndexSet() = default; explicit IndexSet(size_t size) : bitvec_(size, false) {} IndexSet(size_t size, const std::vector<size_t>& indexes); IndexSet operator&(const IndexSet& that) const; IndexSet operator|(const IndexSet& that) const; IndexSet operator-(const IndexSet& that) const; bool AreDisjoint(const IndexSet& that) const; bool IsSubset(const IndexSet& that) const; bool Intersects(const IndexSet& that) const; bool operator[](size_t index) const { ICHECK_LT(index, bitvec_.size()); return bitvec_[index]; } IndexSet& Add(size_t index) { ICHECK_LT(index, bitvec_.size()); bitvec_[index] = true; return *this; } IndexSet Subst(size_t new_size, const IndexSubst& subst) const; size_t end_index() const { return bitvec_.size(); } size_t PopCount() const; bool IsZero() const; size_t FirstInsideIndex() const; size_t LastInsideIndex() const; size_t NextIndex(size_t index) const; size_t FirstOutsideIndex() const; bool operator==(const IndexSet& that) const; bool operator!=(const IndexSet& that) const; bool operator<(const IndexSet& that) const; size_t hash() const; std::string ToString() const; struct IndexSetIterator { const IndexSet* set; size_t i; size_t operator*() const { ICHECK_LT(i, set->end_index()); return i; } const IndexSetIterator& operator++() { ICHECK_LT(i, set->end_index()); i = set->NextIndex(i); return *this; } bool operator==(const IndexSetIterator& that) const { ICHECK(set == that.set); return i == that.i; } bool operator!=(const IndexSetIterator& that) const { ICHECK(set == that.set); return i != that.i; } }; IndexSetIterator begin() const { return IndexSetIterator{this, FirstInsideIndex()}; } IndexSetIterator end() const { return IndexSetIterator{this, end_index()}; } private: explicit IndexSet(std::vector<bool> bitvec) : bitvec_(std::move(bitvec)) {} std::vector<bool> bitvec_; }; struct IndexSetEqual { bool operator()(const IndexSet& left, const IndexSet& right) const { return left == right; } }; struct IndexSetHash { size_t operator()(const IndexSet& set) const { return set.hash(); } }; } // namespace collage } // namespace relay } // namespace tvm #endif // TVM_RELAY_COLLAGE_INDEX_SET_H_
https://github.com/zk-ml/tachikoma
src/relay/collage/mock_cost_estimator.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/collage/mock_cost_estimator.cc * \brief A mock CostEstimator to support unit tests. */ #ifndef TVM_RELAY_COLLAGE_MOCK_COST_ESTIMATOR_H_ #define TVM_RELAY_COLLAGE_MOCK_COST_ESTIMATOR_H_ #include <tvm/relay/function.h> #include "./cost.h" #include "./cost_estimator.h" namespace tvm { namespace relay { namespace collage { // Clang (15.0.3, at least) validly complains about `@main`, but it invalidly // complains even about `\c @main`. #if __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wdocumentation-unknown-command" #endif /*! * \brief A mock cost estimator which can determine the cost of a candidate based on both * the candidate's target and the number of operator calls inside it. * * The help unit tests the estimator also ICHECK fails if: * - the module has inlined "Compiler" functions * - @main has non-tensor arguments (eg a tuple) * - more than the given number of candidate modules are measured * * To support unit testing only. */ class MockCostEstimatorNode : public CostEstimatorNode { public: Cost Estimate(const IRModule& mod, const Target& target) const override; static constexpr const char* _type_key = "relay.collage.MockCostEstimator"; TVM_DECLARE_FINAL_OBJECT_INFO(MockCostEstimatorNode, CostEstimatorNode); protected: /*! * \brief Map from target kind name to assumed baseline cost (in integer seconds) for all * operator calls. */ Map<String, Integer> target_costs_; /*! * \brief If non-zero, the maximum number of distinct modules which may be estimated. */ Integer max_estimates_; /*! \brief Number of calls to Estimate. */ mutable size_t num_estimates_ = 0; friend class MockCostEstimator; }; #if __clang__ #pragma clang diagnostic pop #endif class MockCostEstimator : public CostEstimator { public: explicit MockCostEstimator(Map<String, Integer> target_costs, Integer max_estimates = 0); TVM_DEFINE_OBJECT_REF_METHODS(MockCostEstimator, CostEstimator, MockCostEstimatorNode); }; } // namespace collage } // namespace relay } // namespace tvm #endif // TVM_RELAY_COLLAGE_MOCK_COST_ESTIMATOR_H_
https://github.com/zk-ml/tachikoma
src/relay/collage/name_supply.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/collage/name_supply.h * \brief A source of fresh variable names. */ #ifndef TVM_RELAY_COLLAGE_NAME_SUPPLY_H_ #define TVM_RELAY_COLLAGE_NAME_SUPPLY_H_ #include <string> #include <unordered_map> #include <utility> namespace tvm { namespace relay { namespace collage { /*! \brief A supply of fresh names. */ class NameSupply { public: explicit NameSupply(std::string prefix) : prefix_(std::move(prefix)) {} NameSupply MakeSubNameSupply(); void Reserve(const std::string& existing) { next_free_index_.emplace(existing, 1); } std::string Fresh(const std::initializer_list<std::string>& hints); private: /*! \brief Prefix for all names. May be empty. */ std::string prefix_; /*! \brief Next unused index for variables with given basename. */ std::unordered_map<std::string, int> next_free_index_; }; } // namespace collage } // namespace relay } // namespace tvm #endif // TVM_RELAY_COLLAGE_NAME_SUPPLY_H_
https://github.com/zk-ml/tachikoma
src/relay/collage/partition_rule.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/collage/partition_rule.h * \brief Compositional partitioning rules. */ #ifndef TVM_RELAY_COLLAGE_PARTITION_RULE_H_ #define TVM_RELAY_COLLAGE_PARTITION_RULE_H_ #include <tvm/relay/dataflow_pattern.h> #include <tvm/relay/expr.h> #include <string> #include <vector> #include "../../printer/doc.h" #include "./candidate_partition.h" #include "./combiner_rule.h" #include "./sub_graph.h" namespace tvm { namespace relay { namespace collage { /*! * \brief Type of function to check if a matched sub-expression should be accepted by a rule. This * can be used to, eg, reject operators of unsupported shape or dtype, or otherwise implement rules * which are difficult to express in the dataflow pattern language directly. */ using TPatternPredicate = TypedPackedFunc<bool(const Expr& matched_sub_expr)>; /*! * \brief The default pattern predicate. Always returns true. */ bool DefaultPatternPredicate(const Expr& matched_sub_expr); /*! * \brief Base class of all partition rules. * * A \p PartitionRule describes how to find a set of \p CandidatePartitions for a \p DataflowGraph. * The candidates are allowed to overlap, and ultimately it is the job of the Collage searcher to * find a selection of candidates which covers the whole Relay expression without overlap. Partition * rules are paired with their \p Target and other 'top level' configuration in a \p PartitionSpec. * * We provide a set of 'base' partition rules which produce candidates from the dataflow graph * directly. We also provide a set of 'combinator' partition rules which can produce new candidates * from the results of an arbitrary sub-rule or sub-rules. By mixing these base and combinator * rules we can express a wide variety of partition strategies and encoding conventions. * * There may be many thousands of candidates in flight during the Collage search. We take care to * defer constructing or rewriting Relay expressions until absolutely necessary. We only pay for * extracting a function to represent a candidate when we need to measure it's cost. And we only * pay for rewriting the overall Relay expression to commit to a partitioning when the Collage * search has completed. * * The base rules implemented so far: * - \p DFPatternPartitionRule: Given a \p DFPattern and expression predicate, produces a candidate * for every sub-graph matched by the pattern and predicate. Unlike the \p PatternRewriter, * candidates are free to overlap. Used to bring BYOC patterns into the Collage framework. * - \p OpCallByKindPartitionRule: Uses the "TOpPattern" attribute provided for every Relay * operator to produce a candidate for every call to a 'fusable Relay operator'. Used to * look ahead to how TVM will fuse sub-graphs. * * The combinator rules implemented so far: * - \p CompositePartitionRule: Indicates all candidates matched by the sub-rule should be wrapped * by a "Composite" function. The "Composite" name is taken from the rule name. Used to indicate * Relay operators (or groups of Relay operators) should be mapped to target-specific operators, * both for BYOC and TVM external library integrations. * - \p PrimitivePartitionRule: Indicates all candidates matched by the sub-rule should be wrapped * by a "Primitive" function, possibly with an additional "Compiler" attribute. Used to * delineate a partition (or kernel). * - \p UnionPartitionRule: Simply unions all the candidates from all sub-rules together. Used to * combine individual \p DFPatternPartitionRules. * - \p CombinePartitionRule: Given a sub-rule and a list of 'combiner' rules, finds * all possible ways of combining the sub-rule's candidates to yield even larger candidates. * Note that the sub-rule's candidates may also be directly included in the results. The * 'combiner' rules allow combining by \p OpPatternKinds, combining the arguments to tuples * which themselves are arguments to Relay operator calls, and so on. This rule is intended to * mimic the existing TVM \p FuseOps pass, though: * i) all candidates are found rather than just the largest, ii) the starting set of candidates * can be provided by any other rule, and iii) we rely on \p SubGraph validity checking to weed * out infeasible candidates. * - \p OnlyValidPartitionRule: Given a \p SubGraphConfig, ignores candidates with 'invalid' * sub-graphs. Used to limit the maximum candidate depth, the number of independent outputs, * and whether intermediate 'taps' are allowed. * - \p HostPartitionRule: Produces candidates for all Relay expressions which could be * 'left behind' for execution by the host (eg on the VM). This rule lets us simplify the * overall Collage search algorithm. * * (Though not yet implemented, we'd like to allow a combinator rule which will union candidate * based on their 'anchor' operators. This can be used to implement 'vertical' and 'horizontal' * partition on more primitive candidates. Note that the \p SubGraph machinery supports * multiple-input and -output sub-graphs and their validation, so horizontal partition is easy * implement.) * * Here are some typical ways to combine \p PartitionRules for different partition/fusion * strategies: * * - Classic pattern-based BYOC with \p MergeComposite/AnnotateTarget/PartitionGraph passes: * \code * PrimitivePartitionRule * OnlyValidPartitionRule * CombinePartitionRule (with join-anything combiner rule) * UnionPartitionRule * CompositePartitionRule(label1) * DFPatternPartitionRule(pattern1) * : * CompositePartitionRule(labeln) * DFPatternPartitionRule(patternn) * \endcode * * - "Consider this library implementation for these sub-expressions", using \p DFPatterns to * pick out which Relay operators are supported: * \code * OnlyValidPartitionRule * CombinePartitionRule (with default TVM combiner rules) * UnionPartitionRule * OpCallByKindPartitionRule * CompositePartitionRule(lable1) * DFPatternPartitionRule(pattern1) * : * CompositePartitionRule(lablen) * DFPatternPartitionRule(patternn) * \endcode * * - Classic TVM \p FuseOps * \code * PrimitivePartitionRule * OnlyValidPartitionRule * CombinePartitionRule (with default TVM combiner rules) * OpCallByKindPartitionRule * \endcode * * - "Just fuse what I tell you to fuse", using \p DFPatterns to directly select candidates: * \code * PrimitivePartitionRule * OnlyValidPartitionRule * UnionPartitionRule * DFPatternPartitionRule(pattern1) * : * DFPatternPartitionRule(patternn) * \endcode */ class PartitionRuleNode : public Object { public: /*! * \brief A unique (over all rules for the same target) name for the rule. Rule names are * combined and captured with \p PartitionCandidate rule names for debuggability and * explainability. Some rules will copy the rule name into function attributes. * */ String rule_name_; void VisitAttrs(AttrVisitor* v); /*! * \brief Returns all the possible candidate partitions according to this rule for the overall * expression corresponding to \p dataflow_graph. The candidates will generally have unknown * target and cost: the target will be filled in by the \p PartitionSpec, while the cost will * be filled in lazily. */ virtual std::vector<CandidatePartition> AllCandidates(const DataflowGraph& dataflow_graph, const PartitionSpec& spec) const; std::string ToString() const; Doc ToDoc() const; protected: virtual void AppendBodyItems(std::vector<Doc>* body_items) const; public: static constexpr const char* _type_key = "relay.collage.PartitionRule"; static constexpr const uint32_t _type_child_slots = 10; TVM_DECLARE_BASE_OBJECT_INFO(PartitionRuleNode, Object); }; class PartitionRule : public ObjectRef { public: explicit PartitionRule(String rule_name); TVM_DEFINE_OBJECT_REF_METHODS(PartitionRule, ObjectRef, PartitionRuleNode); }; /*! * \brief Partition rule which fires on all sub-expressions matching a dataflow-pattern and pattern * predicate. It is valid for matching candidates to overlap. */ class DFPatternPartitionRuleNode : public PartitionRuleNode { public: /*! * \brief Relay pattern. */ DFPattern pattern_; /*! * \brief Predicate on matched sub-expression to decide if partition rule should fire. */ TPatternPredicate predicate_; void VisitAttrs(AttrVisitor* v); std::vector<CandidatePartition> AllCandidates(const DataflowGraph& dataflow_graph, const PartitionSpec& spec) const override; void AppendBodyItems(std::vector<Doc>* body_items) const override; static constexpr const char* _type_key = "relay.collage.DFPatternPartitionRule"; TVM_DECLARE_FINAL_OBJECT_INFO(DFPatternPartitionRuleNode, PartitionRuleNode); }; class DFPatternPartitionRule : public PartitionRule { public: DFPatternPartitionRule(String rule_name, DFPattern pattern, TPatternPredicate predicate = DefaultPatternPredicate); TVM_DEFINE_OBJECT_REF_METHODS(DFPatternPartitionRule, PartitionRule, DFPatternPartitionRuleNode); }; /*! * \brief Partition rule which wraps candidates within a function with the "Composite" attribute * bound to the given rule name. * * This is the standard way by which operators or operator groups are tagged as being supported * by a particular externally provided function. It is up to the BYOC lowering function to * recognize the "Composite" name and emit the appropriate code or call. */ class CompositePartitionRuleNode : public PartitionRuleNode { public: /*! \brief The sub-partition rule. */ PartitionRule sub_rule_; void VisitAttrs(AttrVisitor* v); std::vector<CandidatePartition> AllCandidates(const DataflowGraph& dataflow_graph, const PartitionSpec& spec) const override; void AppendBodyItems(std::vector<Doc>* body_items) const override; static constexpr const char* _type_key = "relay.collage.CompositePartitionRule"; TVM_DECLARE_FINAL_OBJECT_INFO(CompositePartitionRuleNode, PartitionRuleNode); }; class CompositePartitionRule : public PartitionRule { public: CompositePartitionRule(String rule_name, PartitionRule sub_rule); TVM_DEFINE_OBJECT_REF_METHODS(CompositePartitionRule, PartitionRule, CompositePartitionRuleNode); }; /*! * \brief Partition rule which wraps candidates within a function with the "Primitive" attribute * bound to 1. If the partition spec target(s) have the "compiler" attribute then that name is * also added to the function as a "Compiler" attribute. * * This is the standard way by which sub-graphs are marked as being in a 'partition' who's * compilation will be managed by an external BYOC toolchain. It can also be used to mark * sub-graphs for lowering to a single kernel by the built-in TVM lowering machinery. */ class PrimitivePartitionRuleNode : public PartitionRuleNode { public: /*! \brief The sub-partition rule. */ PartitionRule sub_rule_; void VisitAttrs(AttrVisitor* v); std::vector<CandidatePartition> AllCandidates(const DataflowGraph& dataflow_graph, const PartitionSpec& spec) const override; void AppendBodyItems(std::vector<Doc>* body_items) const override; static constexpr const char* _type_key = "relay.collage.PrimitivePartitionRule"; TVM_DECLARE_FINAL_OBJECT_INFO(PrimitivePartitionRuleNode, PartitionRuleNode); }; class PrimitivePartitionRule : public PartitionRule { public: PrimitivePartitionRule(String rule_name, PartitionRule sub_rule); TVM_DEFINE_OBJECT_REF_METHODS(PrimitivePartitionRule, PartitionRule, PrimitivePartitionRuleNode); }; /*! * \brief Partition rule which simply unions all matches from all sub-partition rules. * * This can be used to combine the results of a set of, eg, DFPatternPartitionRules. */ class UnionPartitionRuleNode : public PartitionRuleNode { public: Array<PartitionRule> sub_rules_; void VisitAttrs(AttrVisitor* v); std::vector<CandidatePartition> AllCandidates(const DataflowGraph& dataflow_graph, const PartitionSpec& spec) const override; void AppendBodyItems(std::vector<Doc>* body_items) const override; static constexpr const char* _type_key = "relay.collage.UnionPartitionRule"; TVM_DECLARE_FINAL_OBJECT_INFO(UnionPartitionRuleNode, PartitionRuleNode); }; class UnionPartitionRule : public PartitionRule { public: UnionPartitionRule(String rule_name, Array<PartitionRule> sub_rules); TVM_DEFINE_OBJECT_REF_METHODS(UnionPartitionRule, PartitionRule, UnionPartitionRuleNode) }; /* *! \brief Partition rule which places calls to Relay operators with a "TOpPattern" attribute of * \p kOutEWiseFusable or less in their own singleton sub-graph. No other Relay sub-expressions * (such as tuples or tuple projection) are selected, and it is up to outer partition rules to * account for them. */ class OpCallByKindPartitionRuleNode : public PartitionRuleNode { public: void VisitAttrs(AttrVisitor* v); std::vector<CandidatePartition> AllCandidates(const DataflowGraph& dataflow_graph, const PartitionSpec& spec) const override; void AppendBodyItems(std::vector<Doc>* body_items) const override; static constexpr const char* _type_key = "relay.collage.OpCallByKindPartitionRule"; TVM_DECLARE_FINAL_OBJECT_INFO(OpCallByKindPartitionRuleNode, PartitionRuleNode); }; class OpCallByKindPartitionRule : public PartitionRule { public: explicit OpCallByKindPartitionRule(String rule_name); TVM_DEFINE_OBJECT_REF_METHODS(OpCallByKindPartitionRule, PartitionRule, OpCallByKindPartitionRuleNode); }; /*! * \brief Partition rule which combines sub-graphs to exploit optimizations commonly available in * backends (including the TVM lowering backend). Those optimization rules are in turn described by * one or more primitive \p CombinerRules. * * For TVM these primitive combiner rules are guided by the \p OpPatternKind associated with every * sub-graph. That in turn is the maximum of the kind of each expression node in the sub-graph, * using the rules: * - Constants are \p kElemwise. * - A call to a Relay operator has the kind of its callee. * - Tuple construction and projection are injective provided all tuple fields are of tensor type. * - All other sub-expressions are opaque. * * The available \p OpPatternKinds (and our abbreviations for them) are: * - E: kElemWise, eg nn.relu * - B: kBroadcast, eg add * - I: kInjective, eg concatenate * - R: kCommReduce, eg sum * - A: kOutEWiseFusable, eg nn.conv2d (often called 'anchor nodes', hence the A abbreviation) * - O: kOpaque, everything else * (The kTuple kind is not used by this machinery.) * * Kinds are ordered as above from least- to most-constraining w.r.t. possible partition * opportunities. When we write a kind abbreviation below we intend it to mean that kind *or less*. * And when when write 'kl -> kr' we mean it to match a sub-expression of kind kr or less who's * dataflow inputs are all of kind kl or less. * * We can then mimic the classic \p FuseOps TVM Pass with the following more primitive combiner * rules: * - Sub-groups cannot have taps. In the classic \p FuseOps pass taps are avoided by construction * by always considering all node->dominator paths. Here we naively allow taps on all candidates, * but reject them using SubGraph::IsValid with a SubGraphConfig with allow_taps = false. * - Combine A -> B * - Combine B -> R * - Combine I -> I * - Combine I -> tuple -> I. That is, if an I sub-graph has a tuple as input, and at least one * tuple field can be provided by an I sub-graph exit, then both the tuple and all such fields * may be joined. gt* * Note that \p FuseOps only considers the largest possible sub-graphs. However this partition rule * considers all possibilities so as to 'make room' for other targets supplying other * overlapping candidates. * * See combiner_rule.h for the more primitive combiner rules which implement the above. */ class CombinePartitionRuleNode : public PartitionRuleNode { public: /*! \brief The sub-rule supplying the initial set of candidates. */ PartitionRule sub_rule_; /*! \brief The more primitive rules to use to combine the candidates found by the above rule. */ Array<CombinerRule> combiner_rules_; /*! \brief Maximum max_depth for candidates. */ size_t max_depth_; void VisitAttrs(AttrVisitor* v); std::vector<CandidatePartition> AllCandidates(const DataflowGraph& dataflow_graph, const PartitionSpec& spec) const override; void AppendBodyItems(std::vector<Doc>* body_items) const override; public: static constexpr const char* _type_key = "relay.collage.CombinePartitionRule"; TVM_DECLARE_FINAL_OBJECT_INFO(CombinePartitionRuleNode, PartitionRuleNode); }; class CombinePartitionRule : public PartitionRule { public: CombinePartitionRule(String rule_name, PartitionRule sub_rule, Array<CombinerRule> combiner_rules, size_t max_depth_); TVM_DEFINE_OBJECT_REF_METHODS(CombinePartitionRule, PartitionRule, CombinePartitionRuleNode); }; /*! * \brief Partition rules which keeps only candidates from the sub-rule whose sub-groups are valid * w.r.t. the given \p SubGraphConfig. */ class OnlyValidPartitionRuleNode : public PartitionRuleNode { public: PartitionRule sub_rule_; SubGraphConfig config_; void VisitAttrs(AttrVisitor* v); std::vector<CandidatePartition> AllCandidates(const DataflowGraph& dataflow_graph, const PartitionSpec& spec) const override; void AppendBodyItems(std::vector<Doc>* body_items) const override; public: static constexpr const char* _type_key = "relay.collage.OnlyValidPartitionRule"; TVM_DECLARE_FINAL_OBJECT_INFO(OnlyValidPartitionRuleNode, PartitionRuleNode); }; class OnlyValidPartitionRule : public PartitionRule { public: OnlyValidPartitionRule(String rule_name, PartitionRule sub_rule, const SubGraphConfig& config); TVM_DEFINE_OBJECT_REF_METHODS(OnlyValidPartitionRule, PartitionRule, OnlyValidPartitionRuleNode); }; /*! * \brief Partition rule which selects nodes which can be 'left behind' to be executed by the host * (eg on the VM). This includes most of the 'interstitial' Relay constructs, such a let bindings, * operators on references, calls to non-operator functions, and so on. It can also include the * construction of and projection from tuples which may not be supported within a partition. */ class HostPartitionRuleNode : public PartitionRuleNode { public: void VisitAttrs(AttrVisitor* v); std::vector<CandidatePartition> AllCandidates(const DataflowGraph& dataflow_graph, const PartitionSpec& spec) const override; void AppendBodyItems(std::vector<Doc>* body_items) const override; public: static constexpr const char* _type_key = "relay.collage.HostPartitionRule"; TVM_DECLARE_FINAL_OBJECT_INFO(HostPartitionRuleNode, PartitionRuleNode); }; class HostPartitionRule : public PartitionRule { public: explicit HostPartitionRule(String rule_name); TVM_DEFINE_OBJECT_REF_METHODS(HostPartitionRule, PartitionRule, HostPartitionRuleNode); }; } // namespace collage } // namespace relay } // namespace tvm #endif // TVM_RELAY_COLLAGE_PARTITION_RULE_H_
https://github.com/zk-ml/tachikoma
src/relay/collage/partition_spec.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/collage/partition_spec.h * \brief Combine a \p PartitionRule with a \p Target. */ #ifndef TVM_RELAY_COLLAGE_PARTITION_SPEC_H_ #define TVM_RELAY_COLLAGE_PARTITION_SPEC_H_ #include <tvm/relay/function.h> #include <tvm/runtime/container/string.h> #include <tvm/target/target.h> #include <string> #include <vector> #include "./partition_rule.h" #include "./sub_graph.h" namespace tvm { namespace relay { namespace collage { /*! * \brief Type of functions for checking the validity of partitions before they proceed to lowering * and codegen. The argument is the function extracted from the overall expression to represent * the partition. The result is a non-empty error message string if the candidate should be * rejected. */ using TValidateSubGraphFunc = TypedPackedFunc<String(const Function& function)>; /*! * \brief The default validation function. Always returns the empty string, ie no error. */ String DefaultValidateSubGraphFunc(const Function& function); /*! * \brief Pairs a \p PartitionRule with one or more \p Targets it can be used for. */ class PartitionSpecNode : public Object { public: /*! * \brief Specification name to distinguish this spec from all others. Typically the BYOC * 'compiler' name, "tvm", or "host". */ String spec_name_; /*! * \brief The target all candidate partitions should be compiled for. * * It's tempting to support multiple targets here since. Eg the partitioning rules for * TVM are the same irrespective of whether the target is "cuda" or "llvm", so it would make * sense to build the candidate partitions first without committing to any target, then 'stamp' * them for each target as the final step. * * However, we want to make sure any predicate in \p DFPatternPartitionRuleNode instances * can have access to the current target instance. Eg the predicate may need to consult * build-time configuration to decide what operators, shapes etc are actually supported. * That implies the specific target is known when the candidate partitions are being constructed. * * So for now we'll just force each spec to have exactly one target. */ Target target_; /*! * \brief The partition rule to use to gather candidates. */ PartitionRule rule_; /*! * \brief The validation function to apply to each candidate's the extracted function before * proceeding to lowering/codegen. */ TValidateSubGraphFunc validate_sub_graph_func_ = DefaultValidateSubGraphFunc; void VisitAttrs(AttrVisitor* v); /*! * \brief Returns all the candidate partitions found by this specification. The candidates * will be for a specific target, but will not yet have an extracted function or cost. */ std::vector<CandidatePartition> AllCandidates(const DataflowGraph& dataflow_graph) const; std::string ToString() const; static constexpr const char* _type_key = "relay.collage.PartitionSpec"; TVM_DECLARE_FINAL_OBJECT_INFO(PartitionSpecNode, Object); }; class PartitionSpec : public ObjectRef { public: PartitionSpec(String spec_name, Target target, PartitionRule rule, TValidateSubGraphFunc validate_sub_graph_func = DefaultValidateSubGraphFunc); TVM_DEFINE_OBJECT_REF_METHODS(PartitionSpec, ObjectRef, PartitionSpecNode); }; } // namespace collage } // namespace relay } // namespace tvm #endif // TVM_RELAY_COLLAGE_PARTITION_SPEC_H_
https://github.com/zk-ml/tachikoma
src/relay/collage/priority_queue.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/collage/priority_queue.h * \brief An updatable priority queue. */ #ifndef TVM_RELAY_COLLAGE_PRIORITY_QUEUE_H_ #define TVM_RELAY_COLLAGE_PRIORITY_QUEUE_H_ #include <set> namespace tvm { namespace relay { namespace collage { /*! \brief Priority queue of search states, ordered by increasing cost. */ template <typename T, typename CmpTPtr, typename EqTPtr> class PriorityQueue { public: PriorityQueue() = default; /*! \brief Pushes \p item onto the queue. */ void Push(T* item) { set_.emplace(item); } /*! \brief Pops the item with the least cost off the queue. */ T* Pop() { ICHECK(!set_.empty()); T* item = *set_.begin(); set_.erase(set_.begin()); return item; } /*! \brief Updates the queue to account for \p item's best cost being lowered. */ void Update(T* item) { auto itr = std::find_if(set_.begin(), set_.end(), [item](const T* that) { return EqTPtr()(that, item); }); ICHECK(itr != set_.end()); set_.erase(itr); set_.emplace(item); } bool empty() const { return set_.empty(); } size_t size() const { return set_.size(); } private: // TODO(mbs): Actually use a pri-queue datastructure! std::set<T*, CmpTPtr> set_; }; } // namespace collage } // namespace relay } // namespace tvm #endif // TVM_RELAY_COLLAGE_PRIORITY_QUEUE_H_
https://github.com/zk-ml/tachikoma
src/relay/collage/prune_candidates.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/collage/prune_candidates.h * \brief Try to remove candidates which will never contribute to an optimal partitioning. */ #ifndef TVM_RELAY_COLLAGE_PRUNE_CANDIDATES_H_ #define TVM_RELAY_COLLAGE_PRUNE_CANDIDATES_H_ #include <vector> #include "./candidate_partition.h" #include "./dataflow_graph.h" namespace tvm { namespace relay { namespace collage { /*! * \brief Returns \p initial_candidates with all unnecessary candidates pruned. * * We prune according to the following two heuristics: * 1. Given partitions (A, target) and (B, target) then * cost(A union B, target) < cost(A, target) + cost(B, target). * That is, there's no use estimating the cost of small partitions when a larger partition * containing them is also available. More precisely, call a partition 'maximal' if it is * not contained by any other partition for the same target. Then we want to prefer maximal * candidates when searching. * 2. Given maximal partitions (A union B, target) and (A union B, target') where * target != target', then min(cost(A union B, target), cost(A union B, target')) < * min(cost(A, target) + cost(B, target'), cost(A, target') + cost(B, target)). * That is, there's no use estimating cross-combinations of partitions which are not maximal. * * However, we can't prune a non-maximal candidate if it will make some other maximal candidate * unreachable during the Collage search. We achieve this by iterating until fixed point: * - Find maximal candidates of current set of candidates. * - Add those maximal candidates to the output 'pruned' set. * - If any two candidates in the 'pruned' set intersect without being equal, remove those from * the current set of candidates and go around again. That will force more candidates to * be considered 'maximal'. * That over-approximates the true necessary candidates but is at least simple. * * CAUTION: This is pretty experimental. The above heuristics won't always be safe, and I don't * have a proof the pruned candidate set won't lead to 'No candidate was found covering * sub-expression...' errors in Partitioner::Partition(). */ std::vector<CandidatePartition> PruneCandidates( const DataflowGraph& dataflow_graph, const std::vector<CandidatePartition>& initial_candidates); } // namespace collage } // namespace relay } // namespace tvm #endif // TVM_RELAY_COLLAGE_PRUNE_CANDIDATES_H_
https://github.com/zk-ml/tachikoma
src/relay/collage/sub_graph.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/collage/sub_graph.h * \brief Represents a sub-graph of an overall Relay expression. */ #ifndef TVM_RELAY_COLLAGE_SUB_GRAPH_H_ #define TVM_RELAY_COLLAGE_SUB_GRAPH_H_ #include <tvm/ir/transform.h> #include <tvm/relay/op_attr_types.h> #include <string> #include <unordered_map> #include <utility> #include <vector> #include "../ir/dataflow_matcher_impl.h" #include "../ir/indexed_graph.h" #include "./dataflow_graph.h" #include "./index_set.h" namespace tvm { namespace relay { namespace collage { /*! \brief Returns operator pattern kind as single-letter string. */ std::string KindToString(OpPatternKind kind); /*! * \brief Returns a kind and label for the single \p sub_expr, ignoring its nested sub expressions. */ std::pair<OpPatternKind, std::string> SubExprKindAndLabel(const Expr& sub_expr); /*! * \brief Returns a kind and label for all the nodes in \p inside. */ std::pair<OpPatternKind, std::string> SubGraphKindAndLabel(const DataflowGraph& dataflow_graph, const IndexSet& inside); /*! * \brief Returns the index set representing all the sub-expression matched by \p matcher. */ IndexSet MatcherToIndexSet(const DFPatternMatcher& matcher); /*! * \brief Configuration controlling which sub-graphs are considered valid. */ struct SubGraphConfig { /*! \brief Maximum number of exit nodes in the sub-graph, or zero if no limit. */ size_t max_exits = 0; /*! * \brief Whether a node inside the sub-graph may flow to nodes both inside and outside * the sub-graph (which we call a 'tap'). Note that it is still possible to have multiple outputs * even with this flag false. */ bool allow_taps = false; /*! * \brief Maximum allowed sub-graph depth, or zero if no-limit. */ size_t max_depth = 0; std::string ToString() const; }; class SubGraph; using FunctionAttrsMap = Map<String, ObjectRef>; /*! * \brief A nested sub-graph is a sub-graph which is to be nested inside a function as part of some * enclosing sub-graph. * * Extraction yields a function with input nodes replaced by parameters and exit nodes in the * function result. Rewriting replaces the sub-graph with a call to that function, and all * outputs with (projections from) the call result. * * (Note that it's tempting to move attrs_ into \p SubGraphNode and thus avoid this class. * However we found the implementation was easier to understand in this form since it makes * the result of \p Extract unambiguous.) */ class NestedSubGraphNode : public Object { public: /*! \brief The nested sub-graph. */ ObjectRef /* actually SubGraph */ sub_graph_obj_; /*! \brief Attributes (possibly empty) to attach to the extracted function. */ FunctionAttrsMap attrs_; void VisitAttrs(AttrVisitor* v); SubGraph sub_graph() const; bool operator==(const NestedSubGraphNode& that) const; bool operator!=(const NestedSubGraphNode& that) const { return !(*this == that); } bool operator<(const NestedSubGraphNode& that) const; size_t hash() const; std::string ToString() const; /*! * \brief Returns the function representing this nested sub-graph within the overall expression * represented by \p dataflow_graph: * - All sub-graph inputs become parameters. * - All sub-graph outputs become function results (either directly or as a field in a tuple). * - The function has attrs_ for attributes (which may be empty). * - The function body accounts for any rewrites implied by the nested sub-graph. */ Function Extract(const DataflowGraph& dataflow_graph) const; /*! * \brief Returns \p expr rewritten to encode the partitioning implied by this nested sub-graph. * * It is valid for \p expr to not be the same as \p dataflow_graph.expr(), however all nodes * inside this nested sub-graph must correspond to nodes shared between \p dataflow_graph.expr() * and \p expr. See \p SubGraph::ParallelRewrite below. */ Expr Rewrite(const DataflowGraph& dataflow_graph, const Expr& expr) const; static constexpr const char* _type_key = "relay.collage.NestedSubGraph"; TVM_DECLARE_FINAL_OBJECT_INFO(NestedSubGraphNode, Object); }; class NestedSubGraph : public ObjectRef { public: NestedSubGraph(SubGraph sub_graph, FunctionAttrsMap attrs); /*! * \brief Returns copy of this nested sub-graph with all indexes substituted according to * \p subst, whose range is w.r.t. \p new_dataflow_graph. */ NestedSubGraph Subst(const DataflowGraph& new_dataflow_graph, const std::unordered_map<PostDfsIndex, PostDfsIndex>& subst) const; /*! * \brief Returns true if this can be safely unioned. */ bool TriviallyUnionable(const NestedSubGraph& that) const; /*! * \brief Returns the disjoint union of this and \p that nested sub-graphs, which must agree on * their attributes. */ NestedSubGraph DisjointUnion(const DataflowGraph& dataflow_graph, const NestedSubGraph& that) const; /*! * \brief Returns \p expr rewritten according to all the given nested sub-graphs. The * nested sub-graphs can be given in any order, but must be disjoint. * * It is valid for \p expr to not be the same as \p dataflow_graph.expr(), however all nodes * inside the nested sub-graphs must correspond to nodes shared between \p dataflow_graph.expr() * and \p expr. See \p SubGraph::ParallelRewrite below. */ static Expr ParallelRewrite(const DataflowGraph& dataflow_graph, const Expr& expr, std::vector<NestedSubGraph> nested_sub_graphs); TVM_DEFINE_OBJECT_REF_METHODS(NestedSubGraph, ObjectRef, NestedSubGraphNode); }; using NestedSubGraphs = Array<NestedSubGraph>; /*! * \brief A compact representation of a sub-graph within an (implied) overall Relay expression. * * Sub-graphs can be used to represent partitions/kernels/composite functions without having to * pay the cost of constructing or rewriting any expressions. We also allow 'extracting' a * function to use for measuring a partition/kernel's latency independently from 'rewriting' * the overall Relay expression since only a tiny subset of candidate partitions will end up being * needed after Collage has completed its search. * * We expect O(thousands) of sub-graphs to be in flight while processing a given model, so we are * mindful of space overhead. * * A sub-graph classifies every dataflow node of the overall expression as either 'inside' or * 'outside' the sub-graph. Obviously not all such divisions make sense, for example it is not * valid for an inside node to feed into another inside node via outside nodes. We provide the * \p IsValid method to check for validity, and \p SubGraphConfig to control which validity rules * apply (such as maximum depth). * * We generally work with the \p DataflowGraph representation of the overall Relay expression * rather than the expression itself. We use the post-dfs visit index to uniquely refer to * expression nodes. * * As well as 'inside' and 'outside' we have four other flavors of dataflow nodes, all uniquely * determined from the 'inside' nodes: * - 'entry' nodes are those inside with at least one dataflow input outside. * - 'exit' nodes are those inside with at least one dataflow output outside, or which * are considered 'external' in the underlying dataflow graph (eg because they represent * the result of the overall function). * - 'input' nodes are those outside with at least one dataflow output inside. * - 'output' nodes are those outside with at least one dataflow input inside. * Index sets for these are cached with the sub-graph for performance. * * It is valid to have multiple entry nodes (we can bind a parameter for each). It may be valid to * have multiple exit nodes (we can build a tuple of all such). It may be valid to have exit nodes * which also contribute to other inside nodes (ie represent a 'tap' on an intermediate result). * * Sub-graphs are closed under: * - Disjoint union. * - Wrapping by a function with given attributes (see \p NestedSubGraph above). This can be used * to encode "Composite" functions, or to represent a candidate kernel within a "Primitive" * function. (By combining 'wrapping' with 'union' we can encode, eg, 'this sub-graph should * be placed inside a primitive function which itself may have calls to composite functions). * - Substitution, which allows a sub-graph w.r.t. one dataflow graph to be transformed to * match some other (typically smaller) dataflow graph. * * See the subclasses of \p PartitionRule for how sub-graphs are built and combined during Collage * search. * * To support some of the \p OpPatternKind-based fusion rule processing we give sub-graphs * a kind, which is generally the maximum of the kinds of all the operator calls appearing * inside it. We also given sub-graphs a (not necessarily unique) label to help debugging * and guide the selection of global symbol names. */ class SubGraphNode : public Object { public: /*! * \brief Which sub-expressions are inside the sub-graph (using their post-dfs indexes w.r.t. * the implied DataflowGraph). */ IndexSet inside_; /*! * \brief Index of first and last inside nodes. * * Cached for performance, uniquely determined by inside_. */ PostDfsIndex first_inside_index_ = 0; PostDfsIndex last_inside_index_ = 0; /*! * \brief Which sub-expressions are entry/exit/input/output for this sub-graph. * * Cached for performance, uniquely determined by inside_. */ IndexSet entry_; IndexSet exit_; IndexSet input_; IndexSet output_; /*! * \brief Maximum depth of any dataflow path from an entry to an output sub-expression. * * Cached for performance, uniquely determined by inside_. */ size_t depth_ = 0; /*! * \brief The \p OpPatternKind summarizing the input/output behavior of the sub-graph. * * A sub-graph consisting of a single Relay expression node is given kind: * - For Call to a Relay operator, the "TOpPattern" attribute of that operator (provided the * call does not involve data-dependent dynamic shapes). * - For Call to Relay Function, the "TOpPattern" attribute of the function (provided it has * that attribute) * - For Constants, \p kElemWise. * - For Tuple and tuple projections, \p kInjective (provided all tuple fields are of tensor * type) * - All other nodes \p kOpaque. * Sub-graphs with more than one node have the maximum of the kind of each node. * * Cached for performance, uniquely determined by inside_. */ OpPatternKind kind_ = kOpaque; /*! * \brief A label for the sub-graph. Not guaranteed to be unique, but is a human-readable summary * of the sub-graph which can help with debugging and guide the selection of global symbol names. */ String label_; /*! * \brief Nested sub-graphs of this sub-graph which must be represented by functions. These must * be disjoint, but it's ok for this sub-graph to have nodes not inside any nested sub-graph. */ NestedSubGraphs nested_sub_graphs_; void VisitAttrs(AttrVisitor* v); // TODO(mbs): 'Anchor nodes' and rules for unioning them. // In FuseOps it's just the unique kEWiseFusable node, if any. // I'd like to allow writing vertical fusion rules, eg if two candidates are directly // connected and have nn.conv2d anchors allow their join. // I'd also like to allow horizontal fusion rules, eg if two candidates are not directly // connected but could be joined without producing invalid (eg cyclic) and have nn.conv2d anchors // then do so. Come back to this. /*! \brief Number of nodes in overall dataflow graph. */ size_t overall_size() const { return inside_.end_index(); } bool IsEmpty() const { return inside_.IsZero(); } /*! \brief Number of nodes in sub-graph. */ size_t Size() const { return inside_.PopCount(); } /*! * \brief Returns the dataflow nodes downstream of all exit nodes. */ IndexSet Downstream(const DataflowGraph& dataflow_graph) const; /*! * \brief Returns true if this sub-graph is valid. Ie: * - no output of the sub-graph can flow to any input of the sub-graph (otherwise we'd end up * with a dataflow cycle when we partition). * - all inputs and outputs of the sub-graph are in the same scope, ie not separated by * control flow (otherwise there'd be no consistent program point at which to eval the * partitioned function). * - no more than config.max_outputs outputs are required. * - if config.allow_taps is false, no inside node has outputs to nodes both inside and * outside the sub-graph. */ bool IsValid(const DataflowGraph& dataflow_graph, const SubGraphConfig& config) const; /*! * \brief Returns this sub-graph extracted as a stand-alone function. The function will have * no attributes, and is suitable for building and profiling by the \p CostEstimator. */ Function ExtractAsFunction(const DataflowGraph& dataflow_graph) const; /*! * \brief Returns \p expr rewritten to encode the partitioning implied by this sub-graph. * * It is valid for \p expr to not be the same as \p dataflow_graph.expr(), however all nodes * inside this sub-graph must correspond to nodes shared between \p dataflow_graph.expr() and * \p expr. See \p SubGraph::ParallelRewrite below. */ Expr Rewrite(const DataflowGraph& dataflow_graph, const Expr& expr) const; std::string ToString() const; bool operator==(const SubGraphNode& that) const; bool operator!=(const SubGraphNode& that) const { return !(*this == that); } bool operator<(const SubGraphNode& that) const; size_t hash() const; private: /*! \brief Initialize the entry/exit/input/output sets given the inside and \p dataflow_graph. */ void Init(const DataflowGraph& dataflow_graph); /*! \brief Calculates and returns the maximum path depth. */ size_t Depth(const DataflowGraph& dataflow_graph) const; /*! \brief Returns true if any (input/output) of node is (outside/inside) the sub-graph. */ bool AnyInputOutside(const DataflowGraph::Node* node) const; bool AnyInputInside(const DataflowGraph::Node* node) const; bool AnyOutputOutside(const DataflowGraph::Node* node) const; bool AnyOutputInside(const DataflowGraph::Node* node) const; public: static constexpr const char* _type_key = "relay.collage.SubGraph"; TVM_DECLARE_FINAL_OBJECT_INFO(SubGraphNode, Object); friend class SubGraph; }; class SubGraph : public ObjectRef { public: /*! \brief Primitive constructor. The following constructors are generally more convenient. */ SubGraph(const DataflowGraph& dataflow_graph, IndexSet inside, OpPatternKind kind = kOpaque, String label = {}, std::vector<NestedSubGraph> nested_sub_graphs = {}); /*! \brief Constructs the empty sub-graph for \p dataflow_graph. */ explicit SubGraph(const DataflowGraph& dataflow_graph); /*! \brief Returns true if this and that are disjoint. */ bool AreDisjoint(const SubGraph& that) const; /*! * \brief Returns true if: * - \p this and \p that are disjoint, and * - an output node of \p this coincides with an entry node of \p that, and * - \p this and \p that are not obviously invalid after \p DisjointUnion * (eg because such a sub-graph would produce a cycle). * Note however that the \p DisjointUnion may not necessarily be valid even with the above * checks. */ bool AreTouching(const DataflowGraph& dataflow_graph, const SubGraph& that) const; /*! * \brief Returns true if: * - all the outputs of \p this are entries for \p that, and * - all the inputs of \p that are exits for \p this. */ bool AreSelfContained(const SubGraph& that) const; /*! * \brief Returns disjoint union of this and \p that sub-graphs. The result may not be valid. */ SubGraph DisjointUnion(const DataflowGraph& dataflow_graph, const SubGraph& that) const; /*! * \brief Returns copy of this sub-graph with all nodes placed inside a nested sub-graph with * given attributes. */ SubGraph WithAttrs(const DataflowGraph& dataflow_graph, FunctionAttrsMap attrs) const; /*! * \brief Returns copy of this sub-graph with all indexes substituted according to \p subst, * whose range is w.r.t. \p new_dataflow_graph. */ SubGraph Subst(const DataflowGraph& new_dataflow_graph, const std::unordered_map<PostDfsIndex, PostDfsIndex>& subst) const; /*! * \brief Returns the root expression of \p dataflow_graph rewritten according to all the * given sub-graphs. The sub-graphs can be given in any order, but must be disjoint. */ static Expr ParallelRewrite(const DataflowGraph& dataflow_graph, std::vector<SubGraph> sub_graphs); TVM_DEFINE_OBJECT_REF_METHODS(SubGraph, ObjectRef, SubGraphNode); }; struct SubGraphEqual { bool operator()(const SubGraph& left, const SubGraph& right) const { return *left.get() == *right.get(); } }; struct SubGraphHash { size_t operator()(const SubGraph& sub_graph) const { return sub_graph->hash(); } }; /*! * \brief Pass to partition every global function according to the post-dfs indexes * given in an array. Visible for testing from Python only, would never make sense to use * as a generic pass! */ tvm::transform::Pass PartitionOnIndexesForTesting(Array<Integer> indexes); } // namespace collage } // namespace relay } // namespace tvm #endif // TVM_RELAY_COLLAGE_SUB_GRAPH_H_
https://github.com/zk-ml/tachikoma
src/relay/collage/utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/collage/utils.h * \brief Misc helpers. */ #ifndef TVM_RELAY_COLLAGE_UTILS_H_ #define TVM_RELAY_COLLAGE_UTILS_H_ #include <tvm/relay/expr.h> #include <tvm/relay/function.h> #include <tvm/relay/op_attr_types.h> #include <tvm/runtime/container/string.h> #include <string> namespace tvm { namespace relay { namespace collage { /*! * \brief Distinguished partition spec names. */ constexpr const char* kTVMSpecNamePrefix = "tvm_"; constexpr const char* kHostSpecName = "host"; /*! * \brief Returns the partition spec name to use for \p target. For external codegen targets the * spec name is just the target kind name. For TVM native targets the spec name is of the form * "tvm_<kind_name>". */ String GetSpecName(const Target& target); /*! \brief Returns \p "<left>+<right>". */ String UnionLabels(String left, String right); /*! \brief Returns \p "<outer>.<inner>". */ String NestLabels(String outer, String inner); /*! \brief Returns abbreviation for \p kind. */ std::string KindToString(OpPatternKind kind); /*! \brief Returns maximum of \p left and \p right. */ OpPatternKind CombineKinds(OpPatternKind left, OpPatternKind right); /*! * \brief Returns true if \p expr can be safely inlined in body of function extracted * from sub-graph, even if \p expr was not technically matched by the pattern which produced * the sub-graph. */ bool CanInline(const Expr& expr); /*! * \brief Returns true if \p op_node can be directly handled by the VM. */ bool IsSpecialOp(const OpNode* op_node); /*! * \brief Return true if the Relay expression node given by \p expr cannot be evaluated by * the VM and must end up in a kernel. */ bool MustBeLowered(const Expr& expr); } // namespace collage } // namespace relay } // namespace tvm #endif // TVM_RELAY_COLLAGE_UTILS_H_
https://github.com/zk-ml/tachikoma
src/relay/ir/dataflow_matcher_impl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/tvm/relay/dataflow_matcher_impl.h * \brief The auxiliary data structure for dataflow matcher. */ #ifndef TVM_RELAY_IR_DATAFLOW_MATCHER_IMPL_H_ #define TVM_RELAY_IR_DATAFLOW_MATCHER_IMPL_H_ #include <tvm/relay/dataflow_matcher.h> #include <tvm/relay/dataflow_pattern.h> #include <tvm/relay/dataflow_pattern_functor.h> #include <tvm/relay/expr_functor.h> #include <memory> #include <string> #include <unordered_map> #include <vector> #include "indexed_graph.h" namespace tvm { namespace relay { class DFPatternMatcher : public DFPatternFunctor<bool(const DFPattern&, const Expr&)> { public: explicit DFPatternMatcher(const IndexedGraph<Expr>* expr_graph) : expr_graph_(expr_graph) {} bool Match(const DFPattern& pattern, const Expr& expr); Map<DFPattern, Array<Expr>> GetMemo() { return Map<DFPattern, Array<Expr>>(memo_); } const IndexedGraph<Expr>::Node* expr_to_node(const Expr& expr) const { return expr_graph_->item_to_node(expr); } const IndexedGraph<Expr>::Node* index_to_node(size_t index) const { return expr_graph_->index_to_node(index); } size_t size() const { return expr_graph_->size(); } const std::unordered_map<DFPattern, Array<Expr>, ObjectPtrHash, ObjectPtrEqual>& memo() const { return memo_; } const IndexedGraph<Expr>& expr_graph() const { return *expr_graph_; } protected: bool VisitDFPattern(const DFPattern& pattern, const Expr& expr) override; bool VisitDFPattern_(const AltPatternNode* op, const Expr& expr) override; bool VisitDFPattern_(const AttrPatternNode* op, const Expr& expr) override; bool VisitDFPattern_(const CallPatternNode* op, const Expr& expr) override; bool VisitDFPattern_(const ConstantPatternNode* op, const Expr& expr) override; bool VisitDFPattern_(const DataTypePatternNode* op, const Expr& expr) override; bool VisitDFPattern_(const DominatorPatternNode* op, const Expr& expr) override; bool VisitDFPattern_(const ExprPatternNode* op, const Expr& expr) override; bool VisitDFPattern_(const FunctionPatternNode* op, const Expr& expr) override; bool VisitDFPattern_(const IfPatternNode* op, const Expr& expr) override; bool VisitDFPattern_(const LetPatternNode* op, const Expr& expr) override; bool VisitDFPattern_(const ShapePatternNode* op, const Expr& expr) override; bool VisitDFPattern_(const TupleGetItemPatternNode* op, const Expr& expr) override; bool VisitDFPattern_(const TuplePatternNode* op, const Expr& expr) override; bool VisitDFPattern_(const TypePatternNode* op, const Expr& expr) override; bool VisitDFPattern_(const VarPatternNode* op, const Expr& expr) override; bool VisitDFPattern_(const WildcardPatternNode* op, const Expr& expr) override; void ClearMap(size_t watermark); bool MatchesPath(const DominatorPatternNode* op, const Expr& expr); bool DominatesParent(const DominatorPatternNode* op, const Expr& expr); const IndexedGraph<Expr>* expr_graph_; std::unordered_map<DFPattern, Array<Expr>, ObjectPtrHash, ObjectPtrEqual> memo_; std::vector<DFPattern> matched_nodes_; bool memoize_ = true; }; /*! * \brief PatternGrouper does pre-rewriting pattern matching and analysis * * This class creates a number of groups of matched expressions, ensures they don't overlap, and * returns them to the caller for post-analysis rewriting. * * This is primarily needed to support the post-dominator analysis required for dominator pattern * matching. */ class PatternGrouper { public: /*! \brief Internal Group class for storing analysis */ struct Group { Expr root_node; int gid; Map<DFPattern, Array<Expr>> matched_nodes; std::string name; Function function; Array<Expr> args; }; /*! \brief Return the group assignments of expressions */ inline const std::unordered_map<Expr, int, ObjectPtrHash, ObjectPtrEqual>& GetGIDAssignments() { return gid_assignments_; } /*! \brief Group expressions that match the pattern */ const std::unordered_map<int, Group>& GroupMatches(const DFPattern& pattern, const Expr& pre); protected: /*! \brief Iteratively traverse the Expression in pre-order to find subgraphs * * If we traverse the graph in post-order, we can run into situtations where a small subgraph will * match the pattern. Due to options like AltPattern, a larger subgraph with more nodes later in * the graph may also match the pattern. With post-order traversal, we mark the smaller subgraph * as matched and fail to catch the larger subgraph. This problem is fixed by using pre-order * traversal. */ void VisitExprs(); /*! \brief Create a group based on a matched expression */ void CreateGroup(const Expr& expr); /*! \brief EmbedConst implements rules for embedding constants into partitioned functions or * lifting them into the function arguments. * * The rules depend on what pattern the ConstantNode matched. * * The basic rules are: * If the constant matches ExprPattern(relay.const(*)) or a ConstantPattern(), embed the constant * in the partitioned function. If the constant matched an AltPattern, recursively check the * matched side of the pattern. For any other matching pattern (i.e, wildcard, VarPattern, etc), * lift the constant into the arguments of the partitioned function. */ bool EmbedConst(const Expr& expr, const DFPattern pattern); // Internal State DFPattern pattern_; std::unordered_map<int, Group> groups_; std::unordered_map<Expr, int, ObjectPtrHash, ObjectPtrEqual> gid_assignments_; DFPatternMatcher* matcher_ = nullptr; std::unique_ptr<IndexedGraph<DFPattern>> pattern_graph_; int gid_ = 0; int graph_number_ = 0; }; /*! * \brief PatternRewriter rewrites the expression by finding matches and allowing user callback * function to rewrite those matches * * The class uses PatternGrouper to support the dominator pattern. */ class PatternRewriter : protected MixedModeMutator { public: explicit PatternRewriter(IRModule mod) : mod_(mod) {} /*! \brief Rewrite can take a number of callbacks and will repeatedly rewrite the graph with the * callbacks until it stops changing */ virtual Expr Rewrite(const Array<DFPatternCallback>& callbacks, const Expr& pre); protected: virtual Expr DispatchVisitExpr(const Expr& pre); IRModule mod_; DFPatternCallback callback_; std::unordered_map<int, PatternGrouper::Group> groups_; std::unordered_map<Expr, int, ObjectPtrHash, ObjectPtrEqual> gid_assignments_; }; } // namespace relay } // namespace tvm #endif // TVM_RELAY_IR_DATAFLOW_MATCHER_IMPL_H_
https://github.com/zk-ml/tachikoma
src/relay/ir/indexed_graph.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/ir/indexed_graph.h * \brief A graph representation of the dataflow in a Relay expression or Relay (dataflow) * pattern. Each 'indexed graph' node is 1:1 with an expression/pattern 'node', hence the * term 'IndexedGraph'. Dataflow is captured in a generic representation which is convenient * for analysis, particularly pattern matching and partitioning. * * TODO(mbs): Copied from fuse_ops.cc, consider refactoring to share implementation. */ #ifndef TVM_RELAY_IR_INDEXED_GRAPH_H_ #define TVM_RELAY_IR_INDEXED_GRAPH_H_ #include <tvm/relay/dataflow_pattern.h> #include <memory> #include <stack> #include <string> #include <unordered_map> #include <unordered_set> #include <utility> #include <vector> namespace tvm { namespace relay { /*! \brief The index of a node in the post-dfs traversal of overall expression. */ using PostDfsIndex = size_t; /*! * \brief Returns a brief summary of the 'reference' expression or pattern. Only used by * IndexedGraph::ToString() for debugging. */ std::string RefToSummary(const Expr& expr); std::string RefToSummary(const DFPattern& pattern); /*! * \brief Represents the implied dataflow of an expression or (dataflow) pattern as a DAG who's * nodes are 1:1 with those in the underlying expression/pattern. * * Each indexed graph node captures: * - Dataflow inputs. * - Dataflow outputs (or a flag indicating the node is an implied output). * - Dominator parent (ie closest node at which all outputs of the current node re-combine). * - Dominator children (inverse of above). * - Basic block (ie node representing the body of a function, arm of an if, etc). * * This class is templated so we can analyze both DFPatterns and Exprs with the same infrastructure. * * IndexedGraph should be instantiated through the CreateIndexedGraph utilities below. */ template <typename T> class IndexedGraph { public: using TNode = typename T::ContainerType; /*! \brief A Node in the graph. */ struct Node { /*! \brief Node Constructor * \param ref The expression or dataflow pattern node this indexed graph node is augmenting. * \param index The index of this node in the topological order */ Node(const TNode* ref, PostDfsIndex index) : node_ref_(ref), index_(index) {} /*! \brief The underlying expression or pattern node. */ const TNode* node_ref_; T ref() const { ICHECK(node_ref_ != nullptr); return GetRef<T>(node_ref_); } /*! * \brief The index of this node in post-dfs order. If left.index_ > right.index_ then * left does not flow into right. If left.index_ = right.index_ then left and right are * the same node. */ const PostDfsIndex index_; /*! \brief If true this node has implicit outputs, for example as the result of a function. */ bool is_external_ = false; /*! \brief Immediate dataflow inputs to this node. */ std::vector<Node*> inputs_; /*! \brief Immediate dataflow outputs of this node -- may be empty if is_external_ is true. */ std::vector<Node*> outputs_; /*! * \brief The node representing the 'basic block' containing this node: * - Function bodies start a new basic block for their bodies. * - The true and false branches of an if start their own blocks. * - The arms of a match each have their own blocks. */ Node* basic_block_ = nullptr; /*! \brief The depth of this node in the dominator tree */ size_t depth_ = 0; /*! * \brief The dominator parent of this node. This is the node N with least index such that * all possible dataflows from this node pass through N. */ Node* dominator_parent_ = nullptr; /*! \brief The nodes this node dominates. */ std::vector<Node*> dominator_children_; /*! * Add to \p nodes all the nodes which are strictly downstream of \p this, ie can be * reached by following output paths. */ void AccumulateDownstreamNodes(std::unordered_set<const Node*>* nodes) const { std::stack<const Node*> stack; stack.push(this); while (!stack.empty()) { const Node* current = stack.top(); stack.pop(); for (auto node : current->outputs_) { if (nodes->count(node) == 0) { stack.push(node); nodes->insert(node); } } } } /*! * \brief Returns true if \p this is a dominator of \p other. Ie all dataflow paths from \p * other pass through \p this. */ bool Dominates(const Node* other) const { std::stack<const Node*> stack; std::unordered_set<const Node*> visited; stack.push(this); while (!stack.empty()) { const Node* current = stack.top(); stack.pop(); for (auto node : current->dominator_children_) { if (visited.count(node) == 0) { if (other == node) { return true; } else { stack.push(node); } visited.insert(node); } } } return false; } }; PostDfsIndex size() const { return topological_order_.size(); } Node* item_to_node(const T& item) { return item_to_node(item.get()); } const Node* item_to_node(const T& item) const { return item_to_node(item.get()); } Node* item_to_node(const TNode* item) { auto itr = node_map_.find(item); ICHECK(itr != node_map_.end()) << PrettyPrint(GetRef<T>(item)); return itr->second; } const Node* item_to_node(const TNode* item) const { auto itr = node_map_.find(item); ICHECK(itr != node_map_.end()) << PrettyPrint(GetRef<T>(item)); return itr->second; } Node* index_to_node(PostDfsIndex index) { ICHECK_LT(index, topological_order_.size()) << index; return topological_order_[index].get(); } const Node* index_to_node(PostDfsIndex index) const { ICHECK_LT(index, topological_order_.size()) << index; return topological_order_[index].get(); } /*! * \brief (For debugging only) Returns description of indexed graph with hints as to the * sub-expressions or sub-patterns corresponding to each indexed graph node. */ std::string ToString() const { std::ostringstream os; os << "IndexedGraph(size = " << topological_order_.size() << ") {" << std::endl; for (PostDfsIndex index = 0; index < topological_order_.size(); ++index) { const Node* node = topological_order_[index].get(); ICHECK_EQ(index, node->index_); os << " " << index << " (" << RefToSummary(node->ref()) << "): inputs=["; for (const auto* sub_node : node->inputs_) { os << sub_node->index_ << ","; } os << "], outputs=["; for (const auto* sub_node : node->outputs_) { os << sub_node->index_ << ","; } os << "]"; if (node->is_external_) { os << ", external"; } if (node->basic_block_) { os << ", basic_block=" << node->basic_block_->index_; } if (node->depth_ > 0) { os << ", depth=" << node->depth_; } if (node->dominator_parent_) { os << ", dom_parent=" << node->dominator_parent_->index_; } os << ", dom_children=["; for (const auto* sub_node : node->dominator_children_) { os << sub_node->index_ << ","; } os << "]" << std::endl; } os << "}"; return os.str(); } /*! * Check-fails if the graph is ill-formed. For debugging only. */ void CheckValid() const { ICHECK_GT(topological_order_.size(), 0); for (PostDfsIndex index = 0; index < topological_order_.size(); ++index) { const Node* node = topological_order_[index].get(); // We have a node. ICHECK(node); // Bijections with post-dfs indexes and expressions/patterns are correct. ICHECK_EQ(node->index_, index); ICHECK(node->node_ref_); auto itr = node_map_.find(node->node_ref_); ICHECK(itr != node_map_.end()); ICHECK_EQ(itr->second, node) << "at index " << index << " in:" << std::endl << ToString(); // Inputs come before. for (size_t i = 0; i < node->inputs_.size(); ++i) { const Node* input = node->inputs_[i]; ICHECK(input); ICHECK_LT(input->index_, index); ICHECK(std::find(input->outputs_.begin(), input->outputs_.end(), node) != input->outputs_.end()); } // Outputs come after. for (size_t i = 0; i < node->outputs_.size(); ++i) { const Node* output = node->outputs_[i]; ICHECK(output); ICHECK_GT(output->index_, index); ICHECK(std::find(output->inputs_.begin(), output->inputs_.end(), node) != output->inputs_.end()); } ICHECK_GT(node->depth_, 0); // Dominator children come before. for (size_t i = 0; i < node->dominator_children_.size(); ++i) { const Node* child = node->dominator_children_[i]; ICHECK(child); ICHECK_LT(child->index_, index); } if (node->dominator_parent_) { // Dominator comes after. ICHECK_GT(node->dominator_parent_->index_, index); } } } private: /*! \brief Construct the domination tree inside IndexedGraph */ void PostDom() { for (PostDfsIndex i = topological_order_.size(); i != 0; --i) { PostDfsIndex index = i - 1; auto* current = topological_order_[index].get(); if (current->is_external_) { current->depth_ = 1; current->dominator_parent_ = nullptr; } else { auto parent = LeastCommonAncestor(current->outputs_); current->depth_ = parent ? parent->depth_ + 1 : 1; current->dominator_parent_ = parent; if (parent) { parent->dominator_children_.push_back(current); } } } } /*! \brief Find the least common ancestor of all outputs of a node */ Node* LeastCommonAncestor(const std::vector<Node*>& outputs) { if (outputs.size() == 0) { return nullptr; } auto parent = outputs.at(0); for (size_t i = 1; i < outputs.size(); ++i) { parent = LeastCommonAncestor(parent, outputs.at(i)); } return parent; } /*! \brief Find the least common ancestor of two nodes */ Node* LeastCommonAncestor(Node* lhs, Node* rhs) { if (lhs == nullptr || rhs == nullptr) { return nullptr; } PostDfsIndex lhs_index = lhs->index_; PostDfsIndex rhs_index = rhs->index_; while (lhs != rhs) { ICHECK(lhs && rhs) << "LCA(" << lhs_index << ", " << rhs_index << ") on graph:" << std::endl << ToString(); if (lhs->depth_ < rhs->depth_) { rhs = rhs->dominator_parent_; } else if (lhs->depth_ > rhs->depth_) { lhs = lhs->dominator_parent_; } else { rhs = rhs->dominator_parent_; lhs = lhs->dominator_parent_; } } return lhs; } /*! * \brief Appends a node corresponding to \p ref, and maintains the sub-expression/sub-pattern to * node bijection. The insertion index will be the node's PostDfsIndex. All other node properties * are accumulated in-place. */ void AddNode(const T& ref) { PostDfsIndex index = topological_order_.size(); auto node = std::make_unique<Node>(ref.get(), index); node_map_[ref.get()] = node.get(); topological_order_.emplace_back(std::move(node)); } /*! * \brief Map from underlying sub-expression or sub-pattern nodes to their indexed graph nodes. */ std::unordered_map<const TNode*, Node*> node_map_; /*! \brief All nodes in increasing post-dfs index order. This vector owns all the nodes. */ std::vector<std::unique_ptr<Node>> topological_order_; friend std::unique_ptr<IndexedGraph<Expr>> CreateIndexedGraph(const Expr& expr); friend std::unique_ptr<IndexedGraph<DFPattern>> CreateIndexedGraph(const DFPattern& pattern); }; /*! \brief Returns an Indexed Graph for \p expr, which much outlive the result. */ std::unique_ptr<IndexedGraph<Expr>> CreateIndexedGraph(const Expr& expr); /*! * \brief Returns an Indexed Graph for \p pattern, which must outlive the result. * The dataflow for a pattern mimics the dataflow for the expression which would match * that pattern. */ std::unique_ptr<IndexedGraph<DFPattern>> CreateIndexedGraph(const DFPattern& pattern); } // namespace relay } // namespace tvm #endif // TVM_RELAY_IR_INDEXED_GRAPH_H_
https://github.com/zk-ml/tachikoma
src/relay/op/annotation/annotation.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file relay/op/annotation/annotation.h * \brief Helpers for working with various 'annotation' attributes. */ #ifndef TVM_RELAY_OP_ANNOTATION_ANNOTATION_H_ #define TVM_RELAY_OP_ANNOTATION_ANNOTATION_H_ #include <tvm/relay/attrs/annotation.h> #include <tvm/relay/expr.h> #include <tvm/relay/function.h> #include <tvm/runtime/ndarray.h> #include <vector> namespace tvm { namespace relay { /*! \brief Wraps \p data in a "stop_fusion" annotation. */ Expr StopFusion(Expr data); /*! \brief Wraps \p data in a "cast_hint" annotation for \p dtype. */ Expr CastHint(Expr data, DataType dtype); } // namespace relay } // namespace tvm #endif // TVM_RELAY_OP_ANNOTATION_ANNOTATION_H_
https://github.com/zk-ml/tachikoma
src/relay/op/call/call.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/op/call/call.h * \brief Operators for calling lowered functions. */ #ifndef TVM_RELAY_OP_CALL_CALL_H_ #define TVM_RELAY_OP_CALL_CALL_H_ #include <tvm/relay/attrs/call.h> #include <tvm/relay/expr.h> #include <utility> namespace tvm { namespace relay { /*! * \brief Returns the Relay call_lowered op. Use this helper to avoid extraneous calls to * Registry::Get. */ const Op& CallLoweredOp(); /*! * \brief Helper to construct a Relay call with the "call_lowered" op. * * The callee must: * - Be a global bound to a PrimFunc or an externally defined functions. * - Accept only tensor arguments and return tensor results. * - Arguments and results correspond to the flattened form (see FlattenTupleType) of the * Relay Function type. * - Return results by output pointer, ie use DPS. * The arguments remain in Relay form (ie not flattened). * The result remains in Relay form (ie returned from the call and not flattened). * * \param lowered_func Lowered function to call with call_lowered. * \param args Arguments to be passed to the function. * \param call_lowered_attrs Function attributes. * \param span TVM span for propagating debugging info. * \return */ Call CallLowered(GlobalVar lowered_func, Array<Expr> args, CallLoweredAttrs call_lowered_attrs, Span span); /*! * \brief Lowered function and the arguments to call it with. */ struct CallLoweredProps { /*! \brief Global variable pointing to the lowered function. */ GlobalVar lowered_func; /*! \brief Array of the arguments to call lowered_func with. */ Array<Expr> arguments; /*! \brief Attributes from the call_lowered op. */ CallLoweredAttrs attrs; }; /*! * \brief Helper to extract the lowered function and its arguments from a Call("call_lowered", ...). * Returns the null/empty \p CallLoweredProps if \p call_node is not in that form. */ CallLoweredProps GetCallLoweredProps(const CallNode* call_node); /*! * \brief Returns \p call_node in 'standard' Relay form. Ie if \p call_node is a call_lowered * then returns it in un-lowered form, otherwise returns \p call_node directly. * * Useful for passes which can act uniformly on calls irrespective of their form. */ Call GetAnyCall(const CallNode* call_node); /*! * \brief Returns true if lowered call described by \p props is to a reshape primitive. */ bool IsReshapeOnly(const CallLoweredProps& props); } // namespace relay } // namespace tvm #endif // TVM_RELAY_OP_CALL_CALL_H_
https://github.com/zk-ml/tachikoma
src/relay/op/contrib/ethosu/common.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/op/contrib/ethosu/common.h * \brief Functions for all Arm(R) Ethos(TM)-U NPU operators to use. */ #ifndef TVM_RELAY_OP_CONTRIB_ETHOSU_COMMON_H_ #define TVM_RELAY_OP_CONTRIB_ETHOSU_COMMON_H_ #include <tvm/relay/expr.h> namespace tvm { namespace relay { namespace op { namespace contrib { namespace ethosu { /*! \brief Infer the output tensor shape for binary elementwise operators. * \param ifm_shape The shape of Input Feature Map. * \param ifm_layout The layout of the IFM (NHWC or NHCWB16). * \param ofm_layout The layout of the OFM (NHWC or NHCWB16). * \param ofm_channels The number of Output Feature Map channels. * \return The shape of the output tensor. */ Array<IndexExpr> EthosuInferElementwiseOutputShape(Array<IndexExpr> ifm_shape, String ifm_layout, String ofm_layout, IndexExpr ofm_channels); /*! \brief Infer the output tensor shape for convolution and pooling operators. * \param ifm_shape The shape of Input Feature Map. * \param ifm_layout The layout of the IFM (NHWC or NHCWB16). * \param ofm_layout The layout of the OFM (NHWC or NHCWB16). * \param kernel_shape Kernel shape in format (height, width). * \param ofm_channels The number of Output Feature Map channels. * \param dilation The 2-dimensional dilation as (dilation_height, dilation_width). * \param strides The 2 dimensional strides as (stride_height, stride_width). * \param padding The 4 dimensional padding as (pad_top, pad_left, pad_bottom, pad_right). * \return The shape of the output tensor. */ Array<IndexExpr> EthosuInferKernelOutput(Array<IndexExpr> ifm_shape, String ifm_layout, String ofm_layout, Array<IndexExpr> kernel_shape, IndexExpr ofm_channels, Array<IndexExpr> dilation, Array<IndexExpr> strides, Array<IndexExpr> padding); /*! \brief Infer the Output Feature Map shape for operations that use upscaling. * \param ifm_shape The shape of the Input Feature Map. * \param ifm_layout The layout of the Input Feature Map. */ Array<IndexExpr> EthosuInferUpscaledInput(Array<IndexExpr> ifm_shape, String ifm_layout); /*! \brief Get data type from string representation. * \param dtype Data type in lower case format followed by number of bits e.g. "int8". */ DataType DataTypeFromString(const String& dtype); /*! \brief Check the data type for a given input matches one given in allowed_data_types. Raise a * type inference error if not. * \param reporter The infer type reporter. * \param data_type The data type to check. * \param allowed_data_types An initializer list of allowed data types. * \param operator_name The name of the operator to report. * \param tensor_name The name of the tensor to report e.g. "ifm", "ofm". * \param operator_type The type of the operator to report e.g. "ADD" for binary_elementwise. */ void CheckDataType(const TypeReporter& reporter, const DataType& data_type, const std::initializer_list<DataType>& allowed_data_types, const String& operator_name, const String& tensor_name, const String& operator_type = ""); /*! \brief Check the upscale method matches one given in allowed_upscale_methods. Raise a type * inference error if not. * \param reporter The infer type reporter. * \param upscale_method The upscale method string to check. * \param allowed_upscale_methods An initializer list of allowed upscale methods. * \param operator_name The name of the operator to report. * \param operator_type The type of the operator to report e.g. "ADD" for binary_elementwise. */ void CheckUpscaleMethod(const TypeReporter& reporter, const String& upscale_method, const std::initializer_list<String>& allowed_upscale_methods, const String& operator_name, const String& operator_type = ""); /*! \brief Check the data type matches that of the second data type provided. Raise a type inference * error if not. * \param reporter The infer type reporter. * \param data_type The data type to check. * \param data_type2 The second data type to check. * \param operator_name The name of the operator to report. * \param tensor_name The name of the tensor to report e.g. "ifm", "ofm". * \param tensor_name2 The name of the second tensor to report e.g. "ifm2". * \param operator_type The type of the operator to report e.g. "ADD" for binary_elementwise. */ void CheckDataTypeMatch(const TypeReporter& reporter, const DataType& data_type, const DataType& data_type2, const String& operator_name, const String& tensor_name, const String& tensor_name2, const String& operator_type = ""); } // namespace ethosu } // namespace contrib } // namespace op } // namespace relay } // namespace tvm #endif // TVM_RELAY_OP_CONTRIB_ETHOSU_COMMON_H_
https://github.com/zk-ml/tachikoma
src/relay/op/contrib/ethosu/op_attrs.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/relay/op/contrib/ethosu/op_attrs.h * \brief Attributes for the Arm(R) Ethos(TM)-U NPU operators. */ #ifndef TVM_RELAY_OP_CONTRIB_ETHOSU_OP_ATTRS_H_ #define TVM_RELAY_OP_CONTRIB_ETHOSU_OP_ATTRS_H_ #include <tvm/relay/op.h> namespace tvm { namespace relay { namespace op { namespace contrib { namespace ethosu { /*! \brief Attributes used by the Ethos(TM)-U NPU binary elementwise operators */ struct EthosuBinaryElementwiseAttrs : public tvm::AttrsNode<EthosuBinaryElementwiseAttrs> { String operator_type; double ifm_scale; int ifm_zero_point; double ifm2_scale; int ifm2_zero_point; double ofm_scale; int ofm_zero_point; IndexExpr ifm_channels; IndexExpr ifm2_channels; bool reversed_operands; String activation; int clip_min; int clip_max; String rounding_mode; String ifm_layout; String ifm2_layout; String ofm_layout; String ofm_dtype; TVM_DECLARE_ATTRS(EthosuBinaryElementwiseAttrs, "relay.attrs.EthosuBinaryElementwiseAttrs") { TVM_ATTR_FIELD(operator_type) .describe( "The type of the binary elementwise operator." "'ADD'" "'SUB'" "'MUL'" "'MIN'" "'MAX'" "'SHR'" "'SHL'"); TVM_ATTR_FIELD(ifm_scale).describe("The quantization scale for the Input Feature Map tensor."); TVM_ATTR_FIELD(ifm_zero_point) .describe("The quantization zero point for the Input Feature Map tensor."); TVM_ATTR_FIELD(ifm2_scale) .describe("The quantization scale for the Input Feature Map tensor 2."); TVM_ATTR_FIELD(ifm2_zero_point) .describe("The quantization zero point for the Input Feature Map tensor 2."); TVM_ATTR_FIELD(ofm_scale).describe("The quantization scale for the Output Feature Map tensor."); TVM_ATTR_FIELD(ofm_zero_point) .describe("The quantization zero point for the Output Feature Map tensor."); TVM_ATTR_FIELD(ifm_channels).describe("The number of the Input Feature Map channels."); TVM_ATTR_FIELD(ifm2_channels).describe("The number of the Input Feature Map 2 channels."); TVM_ATTR_FIELD(reversed_operands) .describe("True if IFM2 is the first operand and IFM is the second operand.") .set_default(false); TVM_ATTR_FIELD(activation) .describe( "The activation function to use. " "'NONE' - no activation function. " "'CLIP' - clip the output between clip_min and clip_max. " "'TANH' - tanh activation function. " "'SIGMOID' - sigmoid activation function. " "'LUT' - use a look-up table to perform the activation function." "Available activations for activation type:" "{int8, uint8}: 'NONE', 'CLIP', 'TANH', 'SIGMOID', 'LUT'" "{int32}: 'NONE'") .set_default("NONE"); TVM_ATTR_FIELD(clip_min) .describe("The minimum clipping value if activation = 'CLIP'.") .set_default(0); TVM_ATTR_FIELD(clip_max) .describe("The maximum clipping value if activation = 'CLIP'.") .set_default(0); TVM_ATTR_FIELD(rounding_mode) .describe( "The rounding mode to apply to the Output Feature Map tensor. " "'TFL' - Tensorflow Lite rounding scheme. " "'TRUNCATE' - Truncate towards zero." "'NATURAL' - Round to nearest value, with x.5 rounded up towards +infinity.") .set_default("TFL"); TVM_ATTR_FIELD(ifm_layout) .describe("The layout of the Input Feature Map tensor. Can be 'NHWC' or 'NHCWB16'.") .set_default("NHWC"); TVM_ATTR_FIELD(ifm2_layout) .describe("The layout of the Input Feature Map tensor 2. Can be 'NHWC' or 'NHCWB16'.") .set_default("NHWC"); TVM_ATTR_FIELD(ofm_layout) .describe("The layout of the Output Feature Map tensor. Can be 'NHWC' or 'NHCWB16'.") .set_default("NHWC"); TVM_ATTR_FIELD(ofm_dtype).describe( "The Output Feature Map tensor type." "MUL, ADD, SUB {IFM}->{OFM}:" " {uint8, int8 int32} -> {uint8, int8, int32}, any pairing" "MAX, MIN:" " IFM and OFM must be of the same type, one of:" " {int8, uint8}" "SHR {IFM}->{OFM}:" " {int32}->{int8, uint8, int32}, any pairing" "SHL:" " {int32}->{int32} only"); } }; TVM_REGISTER_NODE_TYPE(EthosuBinaryElementwiseAttrs); /*! \brief Attributes used by the Ethos(TM)-U NPU convolution operator */ struct EthosuConv2DAttrs : public tvm::AttrsNode<EthosuConv2DAttrs> { double ifm_scale; int ifm_zero_point; int weight_zero_point; double ofm_scale; int ofm_zero_point; Array<IndexExpr> kernel_shape; IndexExpr ofm_channels; Array<IndexExpr> strides; Array<IndexExpr> padding; Array<IndexExpr> dilation; String activation; int clip_min; int clip_max; String rounding_mode; String upscale; String ifm_layout; String ofm_layout; TVM_DECLARE_ATTRS(EthosuConv2DAttrs, "relay.attrs.EthosuConv2DAttrs") { TVM_ATTR_FIELD(ifm_scale).describe("The quantization scale for the Input Feature Map tensor."); TVM_ATTR_FIELD(ifm_zero_point) .describe("The quantization zero point for the Input Feature Map tensor."); TVM_ATTR_FIELD(weight_zero_point) .describe("The quantization zero point for the weight tensor."); TVM_ATTR_FIELD(ofm_scale).describe("The quantization scale for the Output Feature Map tensor."); TVM_ATTR_FIELD(ofm_zero_point) .describe("The quantization zero point for the Output Feature Map tensor."); TVM_ATTR_FIELD(kernel_shape) .describe("The 2 dimensional kernel shape as (kernel_height, kernel_width).") .set_default(NullValue<Array<IndexExpr>>()); TVM_ATTR_FIELD(ofm_channels) .describe("The number of the Output Feature Map channels.") .set_default(NullValue<IndexExpr>()); TVM_ATTR_FIELD(strides) .set_default(Array<IndexExpr>({1, 1})) .describe("The 2 dimensional strides as (stride_height, stride_width)."); TVM_ATTR_FIELD(padding) .set_default(Array<IndexExpr>({0, 0, 0, 0})) .describe("The 4 dimensional padding as (pad_top, pad_left, pad_bottom, pad_right)."); TVM_ATTR_FIELD(dilation) .set_default(Array<IndexExpr>({1, 1})) .describe("The 2 dimensional dilation as (dilation_height, dilation_width)."); TVM_ATTR_FIELD(activation) .describe( "The activation function to use. " "'NONE' - no activation function. " "'CLIP' - clip the output between clip_min and clip_max. " "'TANH' - tanh activation function. " "'SIGMOID' - sigmoid activation function. " "'LUT' - use a look-up table to perform the activation function.") .set_default("NONE"); TVM_ATTR_FIELD(clip_min) .describe("The minimum clipping value if activation = 'CLIP'.") .set_default(0); TVM_ATTR_FIELD(clip_max) .describe("The maximum clipping value if activation = 'CLIP'.") .set_default(0); TVM_ATTR_FIELD(rounding_mode) .describe( "The rounding mode to apply to the Output Feature Map tensor. " "'TFL' - Tensorflow Lite rounding scheme. " "'TRUNCATE' - Truncate towards zero." "'NATURAL' - Round to nearest value, with x.5 rounded up towards +infinity.") .set_default("TFL"); TVM_ATTR_FIELD(upscale) .describe( "The 2x2 upscaling mode to apply to the Input Feature Map tensor. " "'NONE' - no upscaling. " "'NEAREST' - upscale using nearest neighbour. " "'ZEROS' - upscale using zeros.") .set_default("NONE"); TVM_ATTR_FIELD(ifm_layout) .set_default("NHWC") .describe("The layout of the Input Feature Map tensor. Can be 'NHWC' or 'NHCWB16'."); TVM_ATTR_FIELD(ofm_layout) .set_default("NHWC") .describe("The layout of the Output Feature Map tensor. Can be 'NHWC' or 'NHCWB16'."); } }; TVM_REGISTER_NODE_TYPE(EthosuConv2DAttrs); /*! \brief Attributes used by the Ethos(TM)-U NPU depthwise operator */ struct EthosuDepthwiseConv2DAttrs : public tvm::AttrsNode<EthosuDepthwiseConv2DAttrs> { double ifm_scale; int ifm_zero_point; int weight_zero_point; double ofm_scale; int ofm_zero_point; Array<IndexExpr> kernel_shape; IndexExpr ofm_channels; Array<IndexExpr> strides; Array<IndexExpr> padding; Array<IndexExpr> dilation; String activation; int clip_min; int clip_max; String rounding_mode; String upscale; String ifm_layout; String ofm_layout; String ofm_dtype; TVM_DECLARE_ATTRS(EthosuDepthwiseConv2DAttrs, "relay.attrs.EthosuDepthwiseConv2DAttrs") { TVM_ATTR_FIELD(ifm_scale).describe("The quantization scale for the Input Feature Map tensor."); TVM_ATTR_FIELD(ifm_zero_point) .describe("The quantization zero point for the Output Feature Map tensor."); TVM_ATTR_FIELD(weight_zero_point) .describe("The quantization zero point for the weight tensor."); TVM_ATTR_FIELD(ofm_scale).describe("The quantization scale for the Output Feature Map tensor."); TVM_ATTR_FIELD(ofm_zero_point) .describe("The quantization zero point for the Output Feature Map tensor."); TVM_ATTR_FIELD(kernel_shape) .describe("The 2 dimensional kernel shape as (kernel_height, kernel_width).") .set_default(NullValue<Array<IndexExpr>>()); TVM_ATTR_FIELD(ofm_channels) .describe("The number of OFM channels.") .set_default(NullValue<IndexExpr>()); TVM_ATTR_FIELD(strides) .describe("The 2 dimensional strides as (stride_height, stride_width).") .set_default(Array<IndexExpr>({1, 1})); TVM_ATTR_FIELD(padding) .describe("The 4 dimensional padding as (pad_top, pad_left, pad_bottom, pad_right)") .set_default(Array<IndexExpr>({0, 0, 0, 0})); TVM_ATTR_FIELD(dilation) .describe("The 2 dimensional dilation as (dilation_height, dilation_width).") .set_default(Array<IndexExpr>({1, 1})); TVM_ATTR_FIELD(activation) .describe( "Description: The activation function to use." "'NONE' - no activation function." "'CLIP' - clip the output between clip_min and clip_max." "'TANH - tanh activation function." "'SIGMOID' - sigmoid activation function." "'LUT' - use a look-up table to perform the activation function.") .set_default("NONE"); TVM_ATTR_FIELD(clip_min) .describe("The minimum clipping value if activation = CLIP.") .set_default(0); TVM_ATTR_FIELD(clip_max) .describe("The maximum clipping value if activation = CLIP.") .set_default(0); TVM_ATTR_FIELD(rounding_mode) .describe( "The rounding mode to apply to the Output Feature Map tensor. " "'TFL' - Tensorflow Lite rounding scheme. " "'TRUNCATE' - Truncate towards zero." "'NATURAL' - Round to nearest value, with x.5 rounded up towards +infinity.") .set_default("TFL"); TVM_ATTR_FIELD(upscale) .describe( "The 2x2 upscaling mode to apply to the Input Feature Map tensor. " "'NONE' - no upscaling. " "'NEAREST' - upscale using nearest neighbour. " "'ZEROS' - upscale using zeros.") .set_default("NONE"); TVM_ATTR_FIELD(ifm_layout) .set_default("NHWC") .describe("The layout of the Input Feature Map tensor. Can be 'NHWC' or 'NHCWB16'."); TVM_ATTR_FIELD(ofm_layout) .set_default("NHWC") .describe("The layout of the Output Feature Map tensor. Can be 'NHWC' or 'NHCWB16'."); TVM_ATTR_FIELD(ofm_dtype) .describe("The Output Feature Map tensor data type. Can be 'int8', 'uint8' or 'int16'.") .set_default("int8"); } }; TVM_REGISTER_NODE_TYPE(EthosuDepthwiseConv2DAttrs); /*! \brief Attributes used by the NPU identity operator */ struct EthosuIdentityAttrs : public tvm::AttrsNode<EthosuIdentityAttrs> { double ifm_scale; int ifm_zero_point; double ofm_scale; int ofm_zero_point; String activation; TVM_DECLARE_ATTRS(EthosuIdentityAttrs, "relay.attrs.EthosuIdentityAttrs") { TVM_ATTR_FIELD(ifm_scale).describe("The quantization scale for the Input Feature Map tensor."); TVM_ATTR_FIELD(ifm_zero_point) .describe("The quantization zero point for the Input Feature Map tensor."); TVM_ATTR_FIELD(ofm_scale).describe("The quantization scale for the Output Feature Map tensor."); TVM_ATTR_FIELD(ofm_zero_point) .describe("The quantization zero point for the Output Feature Map tensor."); TVM_ATTR_FIELD(activation) .describe( "The activation function to use. " "'NONE' - no activation function. " "'TANH' - tanh activation function. " "'SIGMOID' - sigmoid activation function. " "'LUT' - use a look-up table to perform the activation function.") .set_default("NONE"); } }; TVM_REGISTER_NODE_TYPE(EthosuIdentityAttrs); /*! \brief Attributes used by the Ethos(TM)-U NPU pooling operator */ struct EthosuPoolingAttrs : public tvm::AttrsNode<EthosuPoolingAttrs> { String pooling_type; double ifm_scale; int ifm_zero_point; double ofm_scale; int ofm_zero_point; Array<IndexExpr> pool_shape; IndexExpr ofm_channels; Array<IndexExpr> strides; Array<IndexExpr> padding; String activation; int clip_min; int clip_max; String rounding_mode; String upscale; String ifm_layout; String ofm_layout; TVM_DECLARE_ATTRS(EthosuPoolingAttrs, "relay.attrs.EthosuPoolingAttrs") { TVM_ATTR_FIELD(pooling_type) .describe("The type of the pooling. 'AVG' - average pool, 'MAX' - max pool."); TVM_ATTR_FIELD(ifm_scale).describe("The quantization scale for the Input Feature Map tensor."); TVM_ATTR_FIELD(ifm_zero_point) .describe("The quantization zero point for the Input Feature Map tensor."); TVM_ATTR_FIELD(ofm_scale).describe("The quantization scale for the Output Feature Map tensor."); TVM_ATTR_FIELD(ofm_zero_point) .describe("The quantization zero point for the Output Feature Map tensor."); TVM_ATTR_FIELD(pool_shape) .describe("The 2 dimensional pool shape as (pool_shape_height, pool_shape_width).") .set_default(NullValue<Array<IndexExpr>>()); TVM_ATTR_FIELD(ofm_channels) .describe(" The number of the Output Feature Map channels.") .set_default(NullValue<IndexExpr>()); TVM_ATTR_FIELD(strides) .set_default(Array<IndexExpr>({1, 1})) .describe("The 2 dimensional strides as (stride_height, stride_width)."); TVM_ATTR_FIELD(padding) .describe("The 4 dimensional padding as (pad_top, pad_left, pad_bottom, pad_right).") .set_default(Array<IndexExpr>({0, 0, 0, 0})); TVM_ATTR_FIELD(activation) .describe( "The activation function to use. " "'NONE' - no activation function. " "'CLIP' - clip the output between clip_min and clip_max. " "'TANH' - tanh activation function. " "'SIGMOID' - sigmoid activation function. " "'LUT' - use a look-up table to perform the activation function.") .set_default("NONE"); TVM_ATTR_FIELD(clip_min) .describe("The minimum clipping value if activation = 'CLIP'.") .set_default(0); TVM_ATTR_FIELD(clip_max) .describe("The maximum clipping value if activation = 'CLIP'.") .set_default(0); TVM_ATTR_FIELD(rounding_mode) .describe( "The rounding mode to apply to the Output Feature Map tensor. " "'TFL' - Tensorflow Lite rounding scheme. " "'TRUNCATE' - Truncate towards zero." "'NATURAL' - Round to nearest value, with x.5 rounded up towards +infinity.") .set_default("TFL"); TVM_ATTR_FIELD(upscale) .describe( "The 2x2 upscaling mode to apply to the Input Feature Map tensor. " "'NONE' - no upscaling. " "'NEAREST' - upscale using nearest neighbour. " "'ZEROS' - upscale using zeros.") .set_default("NONE"); TVM_ATTR_FIELD(ifm_layout) .describe("The layout of the Input Feature Map tensor. Can be 'NHWC' or 'NHCWB16'.") .set_default("NHWC"); TVM_ATTR_FIELD(ofm_layout) .describe("The layout of the Output Feature Map tensor. Can be 'NHWC' or 'NHCWB16'.") .set_default("NHWC"); } }; TVM_REGISTER_NODE_TYPE(EthosuPoolingAttrs); /*! \brief Attributes used by the NPU unary elementwise operator */ struct EthosuUnaryElementwiseAttrs : public tvm::AttrsNode<EthosuUnaryElementwiseAttrs> { String operator_type; double ifm_scale; int ifm_zero_point; double ofm_scale; int ofm_zero_point; IndexExpr ofm_channels; String activation; int clip_min; int clip_max; String rounding_mode; String ifm_layout; String ofm_layout; TVM_DECLARE_ATTRS(EthosuUnaryElementwiseAttrs, "relay.attrs.EthosuUnaryElementwiseAttrs") { TVM_ATTR_FIELD(operator_type) .describe( "The type of the unary elementwise operator." "'ABS'" "'CLZ'"); TVM_ATTR_FIELD(ifm_scale).describe("The quantization scale for the Input Feature Map tensor."); TVM_ATTR_FIELD(ifm_zero_point) .describe("The quantization zero point for the Input Feature Map tensor."); TVM_ATTR_FIELD(ofm_scale).describe("The quantization scale for the Output Feature Map tensor."); TVM_ATTR_FIELD(ofm_zero_point) .describe("The quantization zero point for the Output Feature Map tensor."); TVM_ATTR_FIELD(ofm_channels).describe("The number of OFM channels."); TVM_ATTR_FIELD(activation) .describe( "The activation function to use. " "'NONE' - no activation function. " "'CLIP' - clip the output between clip_min and clip_max. " "'TANH' - tanh activation function. " "'SIGMOID' - sigmoid activation function. " "'LUT' - use a look-up table to perform the activation function.") .set_default("NONE"); TVM_ATTR_FIELD(clip_min) .describe("The minimum clipping value if activation = 'CLIP'.") .set_default(0); TVM_ATTR_FIELD(clip_max) .describe("The maximum clipping value if activation = 'CLIP'.") .set_default(0); TVM_ATTR_FIELD(rounding_mode) .describe( "The rounding mode to apply to the Output Feature Map tensor. " "'TFL' - Tensorflow Lite rounding scheme. " "'TRUNCATE' - Truncate towards zero." "'NATURAL' - Round to nearest value, with x.5 rounded up towards +infinity.") .set_default("TFL"); TVM_ATTR_FIELD(ifm_layout) .describe("The layout of the Input Feature Map tensor. Can be 'NHWC' or 'NHCWB16'.") .set_default("NHWC"); TVM_ATTR_FIELD(ofm_layout) .describe("The layout of the Output Feature Map tensor. Can be 'NHWC' or 'NHCWB16'.") .set_default("NHWC"); } }; TVM_REGISTER_NODE_TYPE(EthosuUnaryElementwiseAttrs); } // namespace ethosu } // namespace contrib } // namespace op } // namespace relay } // namespace tvm #endif // TVM_RELAY_OP_CONTRIB_ETHOSU_OP_ATTRS_H_
https://github.com/zk-ml/tachikoma
src/relay/op/dyn/nn/upsampling.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * * \file src/relay/op/dyn/nn/upsampling.h * \brief implementation of the InferCorrectLayout pass for dynamic upsampling */ #ifndef TVM_RELAY_OP_DYN_NN_UPSAMPLING_H_ #define TVM_RELAY_OP_DYN_NN_UPSAMPLING_H_ #include <tvm/relay/attrs/nn.h> #include <tvm/tir/data_layout.h> #include "../../op_common.h" namespace tvm { namespace relay { namespace dyn { template <typename T> InferCorrectLayoutOutput UpsamplingInferCorrectLayout(const Attrs& attrs, const Array<Layout>& new_in_layouts, const Array<Layout>& old_in_layouts, const Array<tvm::relay::Type>& old_in_types) { const auto* attrs_ptr = attrs.as<T>(); ICHECK(attrs_ptr); ObjectPtr<T> params = make_object<T>(*attrs_ptr); if (new_in_layouts.defined()) { ICHECK_GT(new_in_layouts.size(), 0); Layout raw_layout(params->layout); Layout input = new_in_layouts[0]; if (input.IndexOf(LayoutAxis::Get('W')) == raw_layout.IndexOf(LayoutAxis::Get('W')) && input.IndexOf(LayoutAxis::Get('H')) == raw_layout.IndexOf(LayoutAxis::Get('H')) && !input.Contains(LayoutAxis::Get('w')) && !input.Contains(LayoutAxis::Get('h')) && (input.IndexOf(LayoutAxis::Get('D')) == -1 || (input.IndexOf(LayoutAxis::Get('D')) == raw_layout.IndexOf(LayoutAxis::Get('D')) && !input.Contains(LayoutAxis::Get('d'))))) { params->layout = input.name(); // modify self to follow the input layout } } Layout inferred_layout(params->layout); Layout param_layout("NCHW"); return InferCorrectLayoutOutput({inferred_layout, param_layout, param_layout}, {inferred_layout}, Attrs(params)); } } // namespace dyn } // namespace relay } // namespace tvm #endif // TVM_RELAY_OP_DYN_NN_UPSAMPLING_H_
https://github.com/zk-ml/tachikoma