file_path
stringlengths
7
180
content
stringlengths
0
811k
repo
stringclasses
11 values
include/tvm/runtime/vm/vm.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/vm/vm.h * \brief The Relay virtual machine runtime. */ #ifndef TVM_RUNTIME_VM_VM_H_ #define TVM_RUNTIME_VM_VM_H_ #include <tvm/runtime/container/closure.h> #include <tvm/runtime/module.h> #include <tvm/runtime/object.h> #include <tvm/runtime/packed_func.h> #include <tvm/runtime/registry.h> #include <tvm/runtime/vm/bytecode.h> #include <tvm/runtime/vm/executable.h> #include <tvm/runtime/vm/memory_manager.h> #include <memory> #include <string> #include <unordered_map> #include <utility> #include <vector> namespace tvm { namespace runtime { namespace vm { /*! * \brief An object representing a vm closure. */ class VMClosureObj : public ClosureObj { public: /*! * \brief The index into the function list. The function could be any * function object that is compatible to the VM runtime. */ size_t func_index; /*! \brief The free variables of the closure. */ std::vector<ObjectRef> free_vars; static constexpr const uint32_t _type_index = TypeIndex::kDynamic; static constexpr const char* _type_key = "vm.Closure"; TVM_DECLARE_FINAL_OBJECT_INFO(VMClosureObj, ClosureObj); }; /*! \brief reference to closure. */ class VMClosure : public Closure { public: VMClosure(size_t func_index, std::vector<ObjectRef> free_vars); TVM_DEFINE_OBJECT_REF_METHODS(VMClosure, Closure, VMClosureObj); }; /*! * \brief A representation of a Relay function in the VM. * * Contains metadata about the compiled function, as * well as the compiled VM instructions. */ struct VMFunction { /*! \brief The function's name. */ std::string name; /*! \brief The function parameter names. */ std::vector<std::string> params; /*! \brief The instructions representing the function. */ std::vector<Instruction> instructions; /*! \brief The size of the frame for this function */ Index register_file_size = 0; /*! \brief The indexes for the device holding each function parameter. */ std::vector<Index> param_device_indexes; VMFunction(std::string name, std::vector<std::string> params, std::vector<Instruction> instructions, Index register_file_size, std::vector<Index> param_device_indexes) : name(std::move(name)), params(std::move(params)), instructions(std::move(instructions)), register_file_size(register_file_size), param_device_indexes(std::move(param_device_indexes)) { ICHECK_EQ(this->params.size(), this->param_device_indexes.size()); } VMFunction() = default; friend std::ostream& operator<<(std::ostream& os, const VMFunction&); }; /*! * \brief A representation of a stack frame. * * A stack frame is a record containing the information needed * to restore the caller's virtual machine state after returning * from a function call. */ struct VMFrame { /*! \brief The return program counter. */ Index pc; /*! \brief The index into the function table, points to the caller. */ Index func_index; /*! \brief The number of arguments. */ Index args; /*! \brief A pointer into the caller function's instructions. */ const Instruction* code; /*! \brief Statically allocated space for objects */ std::vector<ObjectRef> register_file; /*! \brief Register in caller's frame to put return value */ RegName caller_return_register; VMFrame(Index pc, Index func_index, Index args, const Instruction* code, Index register_file_size) : pc(pc), func_index(func_index), args(args), code(code), register_file(register_file_size), caller_return_register(0) {} }; /*! * \brief The virtual machine. * * The virtual machine contains all the current execution state, * as well as the executable. * * The goal is to have a single self-contained object, * enabling one to easily pass around VMs, execute them on * multiple threads, or serialize them to disk or over the * wire. */ class TVM_DLL VirtualMachine : public runtime::ModuleNode { public: /*! * \brief Get a PackedFunc from module. * * The PackedFunc may not be fully initialized, * there might still be first time running overhead when * executing the function on certain devices. * For benchmarking, use prepare to eliminate * * \param name the name of the function. * \param sptr_to_self The shared_ptr that points to this module node. * * \return PackedFunc(nullptr) when it is not available. * * \note The function will always remain valid. * If the function needs resource from the module(e.g. late linking), * it should capture sptr_to_self. */ virtual PackedFunc GetFunction(const std::string& name, const ObjectPtr<Object>& sptr_to_self); virtual ~VirtualMachine() {} const char* type_key() const final { return "VirtualMachine"; } VirtualMachine() : frames_(), func_index_(0), code_(nullptr), pc_(0), exec_(nullptr) {} /*! * \brief load the executable for the virtual machine. * \param exec The executable. */ virtual void LoadExecutable(const ObjectPtr<Executable>& exec); protected: /*! \brief Push a call frame on to the call stack. */ void PushFrame(Index arg_count, Index ret_pc, const VMFunction& vm_func); /*! * \brief Pop a frame off the call stack. * \return The number of frames left. */ Index PopFrame(); /*! * \brief Write to a VM register. * \param reg The register to write to. * \param obj The object to write to. */ inline void WriteRegister(RegName reg, const ObjectRef& obj); /*! * \brief Read a VM register. * \param reg The register to read from. * \return The read object. */ ObjectRef ReadRegister(RegName reg) const; /*! * \brief Read a VM register and cast it to int32_t * \param reg The register to read from. * \return The read scalar. */ int64_t LoadScalarInt(RegName reg) const; /*! * \brief Invoke a VM function. * \param func The function. * \param args The arguments to the function. * \return The object representing the result. */ ObjectRef Invoke(const VMFunction& func, const std::vector<ObjectRef>& args); // TODO(@jroesch): I really would like this to be a global variable. /*! * \brief Invoke a VM function by name. * \param name The function's name. * \param args The arguments to the function. * \return The object representing the result. */ ObjectRef Invoke(const std::string& name, const std::vector<ObjectRef>& args); /*! * \brief Invoke a VM function. * \param func The function. * \param input_args The input arguments to the function. * \param output_args The pre-allocated output arguments of the function. * \return The object(s) representing the result. */ ObjectRef Invoke(const VMFunction& func, const std::vector<ObjectRef>& input_args, const std::vector<ObjectRef>& output_args); /*! * \brief Invoke a PackedFunction * * \param packed_index The offset of the PackedFunction in all functions. * \param func The PackedFunction to be invoked. * \param arg_count The number of arguments to the PackedFunction. * \param output_size The number of outputs of the PackedFunction. * \param args Arguments to the PackedFunction. * * \note The return value will be stored in the last output_size slots of args. */ virtual void InvokePacked(Index packed_index, const PackedFunc& func, Index arg_count, Index output_size, const std::vector<ObjectRef>& args); /*! * \brief Initialize the virtual machine for a set of (physical) devices. * \param physical_devices The set of TVM devices. * \param alloc_types The allocator types for each device. */ void Init(const std::vector<Device>& physical_devices, const std::vector<AllocatorType>& alloc_types); /*! \brief Run VM dispatch loop. */ void RunLoop(const std::vector<Index>& output_tensor_reg_indices = {}); /*! \brief Get device from the device list based on a given device index. */ Device GetDevice(Index device_index) const; Allocator* GetAllocator(Index device_index) const; /*! * \brief Invoke a global setting up the VM state to execute. * * This does not begin execution of the VM. */ void InvokeGlobal(const VMFunction& func, const std::vector<ObjectRef>& args); /*! * \brief Set inputs to a function. * \param name The function name * \param args args[offset:] are arguments to the * function. If the arguments are not of the correct device for the function, * they will be copied to the device. * \param offset Starting offset of the arguments in `args`. */ void SetInput(std::string name, TVMArgs args, int offset); /*! * \brief Set one input tensor with index or name to a function. * \param name The function name. * \param tag index or name of the input tensor . * \param tensor the input tensor. If the tensor is not of the correct device for the function, * they will be copied to the device. */ void SetOneInput(std::string name, const TVMArgValue& tag, const TVMArgValue& tensor); /*! * \brief Set pre-allocated output tensors to a function. * It is native implementation of 'set_outputs' python method. * It is used in scenario when output tensors are allocated outside each invocation. * Note: it sets set_outputs_enabled_[name] true and fill outputs_[name] * but after invocation the first is switched off and the second is cleared * \param name The function name * \param args outputs to the function. */ void SetOutputs(std::string name, TVMArgs args); /*! * \brief Preparation part of Invoke method before RunLoop. * \param func the function. * \param args input args */ void PrintInfoAndSetInputArgs(const VMFunction& func, const std::vector<ObjectRef>& args); /*! * \brief Set pre-allocated outputs to register for specified function. * \param func_name The function's name. * \param outputs set of output tensors. */ void SetOutputTensorsToRegister(const std::string& func_name, const std::vector<ObjectRef>& outputs); /*! * \brief Internal hook for profiling the start of an op. * * This hook is only called on certain ops that are likely to take a * significant amount of runtime (normally because they alloc or transfer to * device). * * \param instr Instruction that will be executed after this hook fires */ virtual void OpStartHook(Instruction instr); /*! * \brief Internal hook for profiling the end of an op. */ virtual void OpStopHook(); private: /*! * \brief Get index of input tensor from its name. * \param func_name The function's name. * \param input_name The input tensor name. * \return The input tensor index. */ int64_t GetInputIndexFromVMFunction(const std::string& func_name, const std::string& input_name) const; /*! * \brief Get index of input tensor from its name. * \param params parameter names. * \param input_name The input tensor name. * \return The input tensor index. */ int64_t GetInputIndexFromName(const std::vector<std::string>& params, const std::string& input_name) const; /*! * \brief Check executable exists and get VM function from it. * \param func_name The function's name. * \return VM function. */ const VMFunction& CheckAndGetVMFunction(const std::string& func_name) const; /*! * \brief Creats inputs_ field, if it exists check its size. * \param func_name The function's name. * \param size inputs_ field size. * \return VM function. */ void CreateInputsOrCheckSize(const std::string& func_name, size_t size); /*! * \brief Set one input tensor with given index to set of input tensors if need copy to given * device. \param tensors the input tensors set (destination) \param tensor some tensor (not * necessary DLTensor). \param index The input tensor index. \param dev device to copy if need. */ void SetInputTensorWithIndex(std::vector<ObjectRef>& tensors, // NOLINT(*) const TVMArgValue& tensor, int index, Device dev); /*! * \brief Convert tensor from TVMArgValue to ObjectRef. * DLTensor and NDArray types are supported. * \param tensor given arg value containing tensor. * \return tensor in ObjectRef format */ ObjectRef TensorFromTVMArgValueToObjectRef(const TVMArgValue& tensor) const; /*! * \brief Get index of outputs in register_file from func code * \return result register index */ Index GetResultRegisterIndex() const; /*! * \brief Calculate the index of operation which destination is result * \param res_index is the index of op returning result */ void CalculatePreResultOpIndex(Index res_index); /*! * \brief Get indices from register_file for output tensors. * It helps to replace output tensors allocated in RunLoop by * tensors pre-allocated outside. Scenario is when `set_output` is used * \return indices from register_file for output tensors. */ std::vector<Index> GetOutputTensorRegIndices(); /*! * \brief Write new allocated tensor to register_file of frame. * \param instr current instruction containing shape and storage info. */ void WriteAllocatedTensor(const Instruction& instr); /*! * \brief 'set_outputs_enabled' is assumed true for using this method. * It is expected that result register has already contained tensor from outside, * new memory is not allocated and write, but expected shape and data type are checked. * For other register WriteAllocatedTensor method is used. * \param instr current instruction containing shape and storage info. */ void WriteAllocatedTensorFromOutside(const Instruction& instr); bool FindIndex(const std::vector<Index>& indices, Index val) const; protected: /*! \brief The virtual machine's packed function table. */ std::vector<PackedFunc> packed_funcs_; /*! \brief The current stack of call frames. */ std::vector<VMFrame> frames_; /*! \brief The fuction table index of the current function. */ Index func_index_; /*! \brief The current pointer to the code section. */ const Instruction* code_; /*! \brief The virtual machine PC. */ Index pc_; /*! \brief The special return register. */ ObjectRef return_register_; /*! \brief The executable the VM will operate on. */ ObjectPtr<Executable> exec_; /*! \brief The function name to inputs mapping. */ std::unordered_map<std::string, std::vector<ObjectRef>> inputs_; /*! \brief The function name to flag enabling scenario with set outputs. */ std::unordered_map<std::string, bool> set_outputs_enabled_; /*! \brief The index of operation which destination is result. */ Index preresult_op_index_ = -1; /*! \brief The function name to indices of output tensors in register file. */ std::unordered_map<std::string, std::vector<Index>> output_tensor_reg_indices_; /*! \brief The function name to pre-allocated outputs mapping. */ std::unordered_map<std::string, std::vector<ObjectRef>> outputs_; /*! * \brief The "physical" devices the VM can execute primitives on. All "device indexes" * are w.r.t. this vector. Each entry in this vector must match the corresponding entry * in the executable's "virtual" devices vector. */ std::vector<Device> devices_; /*! \brief The cached memory allocators, one per device. */ std::vector<Allocator*> allocators_; /*! * \brief The constant pool for runtime. It caches the device dependent * object to avoid rellocation of constants during inference. */ std::vector<ObjectRef> const_pool_; }; } // namespace vm } // namespace runtime } // namespace tvm #endif // TVM_RUNTIME_VM_VM_H_
https://github.com/zk-ml/tachikoma
include/tvm/script/ir_builder/base.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_SCRIPT_IR_BUILDER_BASE_H_ #define TVM_SCRIPT_IR_BUILDER_BASE_H_ #include <tvm/ir/expr.h> #include <tvm/ir/function.h> #include <tvm/node/node.h> #include <vector> namespace tvm { namespace script { namespace ir_builder { ////////////////////////////// IRBuilderFrame ////////////////////////////// /*! * \brief A stack frame of the IRBuilder used to keep track of the current scope. * Furthermore, the information stored in each stack frame can be useful for context-dependent * IR construction. * * \example * * The `T::MatchBuffer` below adds an element in `PrimFuncNode::buffer_map`: * * \code {.cpp} * * using T = tvm::script::ir_builder::tir; * With <PrimFuncFrame> _(...); * Buffer buffer = T::MatchBuffer(...); * * \endcode * * The `T::MatchBuffer` below instead generates `MatchBufferRegion` in a TIR block: * * \code {.cpp} * * using T = tvm::script::ir_builder::tir; * With <PrimFuncFrame> _(...); * { * With<BlockFrame> _2(...); * Buffer buffer = T::MatchBuffer(...); * } * * \endcode */ class IRBuilderFrameNode : public runtime::Object { public: /*! \brief A list of callbacks used when exiting the frame. */ std::vector<runtime::TypedPackedFunc<void()>> callbacks; void VisitAttrs(tvm::AttrVisitor* v) { // `callbacks` is not visited. } static constexpr const char* _type_key = "script.ir_builder.IRBuilderFrame"; TVM_DECLARE_BASE_OBJECT_INFO(IRBuilderFrameNode, runtime::Object); public: /*! \brief Default destructor. */ virtual ~IRBuilderFrameNode() = default; /*! * \brief The method called when entering RAII scope. * \sa tvm::support::With */ virtual void EnterWithScope(); /*! * \brief The method called when exiting RAII scope. * \sa tvm::support::With */ virtual void ExitWithScope(); /*! * \brief Add a callback method invoked when exiting the RAII scope. * \param callback The callback to be added. */ void AddCallback(runtime::TypedPackedFunc<void()> callback); }; /*! * \brief Managed reference to an IRBuilderFrameNode. * \sa IRBuilderFrameNode */ class IRBuilderFrame : public runtime::ObjectRef { public: /*! \brief Default destructor. */ virtual ~IRBuilderFrame() = default; TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(IRBuilderFrame, ObjectRef, IRBuilderFrameNode); protected: /*! \brief Disallow direct construction of this object. */ IRBuilderFrame() = default; public: /*! * \brief Redirected to `IRBuilderFrameNode::EnterWithScope`. * \sa IRBuilderFrameNode::EnterWithScope */ inline void EnterWithScope() { ICHECK(data_ != nullptr); static_cast<IRBuilderFrameNode*>(data_.get())->EnterWithScope(); } /*! * \brief Redirected to `IRBuilderFrameNode::ExitWithScope`. * \sa IRBuilderFrameNode::ExitWithScope */ inline void ExitWithScope() { ICHECK(data_ != nullptr); static_cast<IRBuilderFrameNode*>(data_.get())->ExitWithScope(); data_.reset(); } }; ////////////////////////////// IRBuilder ////////////////////////////// /*! * \brief A dialect-agnostic IRBuilder that constructs any IR of TVM. * An idiomatic use of this class is to put this inside the RAII with-scope, * call dialect-specific methods accordingly. Upon exiting the scope. * * \code * * PrimFunc ConstructPrimFunc() { * using tvm::script::ir_builder::IRBuilder; * using T = tvm::script::ir_builder::tir; * IRBuilder builder; * // Step 1. Place IRBuilder inside the with-scope. * { * With<IRBuilder> _(builder); * // Step 2. Call dialect-specific methods. * With<T::PrimFuncFrame> _2(...); * T::MatchBuffer(...); * } * // Step 3. Return the constructed PrimFunc. * return builder->Get<PrimFunc>(); * } * * \endcode */ class IRBuilderNode : public runtime::Object { public: /*! \brief A stack of context frames in the IRBuilder */ runtime::Array<IRBuilderFrame> frames; /*! \brief The outcome of IR construction */ Optional<ObjectRef> result; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("frames", &frames); v->Visit("result", &result); } static constexpr const char* _type_key = "script.ir_builder.IRBuilder"; TVM_DECLARE_FINAL_OBJECT_INFO(IRBuilderNode, runtime::Object); public: /*! * \brief Find a frame of the given type in the stack `this->frames` from top to bottom. * \tparam T The type of the frame to find. * \return The frame if found, otherwise NullOpt. */ template <typename TFrame> inline Optional<TFrame> FindFrame() const; /*! * \brief Get the frame on top of the stack `this->frames` if its type is `TFrame`. * \tparam TFrame The assumed type of the last frame on stack. * \return The frame if the stack is non-empty and the top of the stack is of type `TFrame`. * Otherwise NullOpt. */ template <typename TFrame> inline Optional<TFrame> GetLastFrame() const; /*! * \brief Get the IR being constructed. * \tparam TObjectRef The type of the IR being constructed. * \return The resulting IR. Throw an exception if the IR is not constructed yet. */ template <typename TObjectRef> inline TObjectRef Get() const; }; /*! * \brief Managed reference to an IRBuilderNode. * \sa IRBuilderNode */ class IRBuilder : public runtime::ObjectRef { public: /*! \brief Creates an IRBuilder. */ IRBuilder(); TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(IRBuilder, ObjectRef, IRBuilderNode); public: /*! * \brief Puts the current IRBuilder into a thread-local scope, which can be retrieved using * `IRBuilder::Current()`. * * \code {.cpp} * IRBuilder builder; * { * With<IRBuilder> _(builder); * // IRBuilder::Current() == builder * } * // IRBuilder::Current() == nullptr * \endcode * * \sa IRBuilder::Current * \sa IRBuilder::ExitWithScope * \sa tvm::support::With */ void EnterWithScope(); /*! * \brief Exit the RAII scope. * \sa IRBuilder::EnterWithScope * \sa IRBuilder::Current * \sa tvm::support::With */ void ExitWithScope(); /*! * \brief Get the current IRBuilder in the current thread-local scope. * \return The current IRBuilder. * \sa IRBuilder::EnterWithScope * \sa IRBuilder::ExitWithScope * \sa tvm::support::With */ static IRBuilder Current(); /*! * \brief Give a string name to the `obj` * \tparam TObjectRef The type of the object to name. * \param name The name to give to the object. * \param obj The object to name. */ template <class TObjectRef> inline static TObjectRef Name(String name, TObjectRef obj); }; ////////////////////////////// Details ////////////////////////////// namespace details { class Namer { public: using FType = NodeFunctor<void(const ObjectRef&, String)>; static FType& vtable(); static void Name(ObjectRef node, String name); }; } // namespace details template <class TObjectRef> inline TObjectRef IRBuilder::Name(String name, TObjectRef obj) { details::Namer::Name(obj, name); return Downcast<TObjectRef>(obj); } template <typename TFrame> inline Optional<TFrame> IRBuilderNode::FindFrame() const { using TFrameNode = typename TFrame::ContainerType; for (auto it = frames.rbegin(); it != frames.rend(); ++it) { if (const TFrameNode* p = (*it).template as<TFrameNode>()) { return GetRef<TFrame>(p); } } return NullOpt; } template <typename TFrame> inline Optional<TFrame> IRBuilderNode::GetLastFrame() const { using TFrameNode = typename TFrame::ContainerType; if (!frames.empty() && frames.back()->IsInstance<TFrameNode>()) { return Downcast<TFrame>(frames.back()); } return NullOpt; } template <typename TObjectRef> inline TObjectRef IRBuilderNode::Get() const { using TObject = typename TObjectRef::ContainerType; CHECK(result.defined()) << "IndexError: No result exists in IRBuilder yet"; const auto* n = result.as<TObject>(); CHECK(n != nullptr) << "TypeError: IRBuilder result is not of type: " << TObject::_type_key; return GetRef<TObjectRef>(n); } } // namespace ir_builder } // namespace script } // namespace tvm #endif // TVM_SCRIPT_IR_BUILDER_BASE_H_
https://github.com/zk-ml/tachikoma
include/tvm/script/ir_builder/ir/frame.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_SCRIPT_IR_BUILDER_IR_FRAME_H_ #define TVM_SCRIPT_IR_BUILDER_IR_FRAME_H_ #include <tvm/ir/expr.h> #include <tvm/ir/function.h> #include <tvm/node/node.h> #include <tvm/script/ir_builder/base.h> #include <vector> namespace tvm { namespace script { namespace ir_builder { namespace ir { /*! * \brief A frame that represents the IRModule frame with functions and global variables. * * \sa IRModuleFrame */ class IRModuleFrameNode : public IRBuilderFrameNode { public: Array<GlobalVar> global_vars; Array<BaseFunc> functions; void VisitAttrs(tvm::AttrVisitor* v) { IRBuilderFrameNode::VisitAttrs(v); v->Visit("global_vars", &global_vars); v->Visit("functions", &functions); } static constexpr const char* _type_key = "script.ir_builder.IRModuleFrame"; TVM_DECLARE_FINAL_OBJECT_INFO(IRModuleFrameNode, IRBuilderFrameNode); public: void ExitWithScope() final; }; /*! * \brief Managed reference to IRModuleFrameNode. * * \sa IRModuleFrameNode */ class IRModuleFrame : public IRBuilderFrame { public: TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(IRModuleFrame, IRBuilderFrame, IRModuleFrameNode); }; } // namespace ir } // namespace ir_builder } // namespace script } // namespace tvm #endif // TVM_SCRIPT_IR_BUILDER_IR_FRAME_H_
https://github.com/zk-ml/tachikoma
include/tvm/script/ir_builder/ir/ir.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_SCRIPT_IR_BUILDER_IR_IR_H_ #define TVM_SCRIPT_IR_BUILDER_IR_IR_H_ #include <tvm/ir/expr.h> #include <tvm/ir/function.h> #include <tvm/node/node.h> #include <tvm/script/ir_builder/ir/frame.h> #include <vector> namespace tvm { namespace script { namespace ir_builder { namespace ir { /*! * \brief The IRModule declaration statement. * \return The IRModuleFrame. */ TVM_DLL IRModuleFrame IRModule(); } // namespace ir } // namespace ir_builder } // namespace script } // namespace tvm #endif // TVM_SCRIPT_IR_BUILDER_IR_IR_H_
https://github.com/zk-ml/tachikoma
include/tvm/script/ir_builder/tir/frame.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_SCRIPT_IR_BUILDER_TIR_FRAME_H_ #define TVM_SCRIPT_IR_BUILDER_TIR_FRAME_H_ #include <tvm/script/ir_builder/base.h> #include <tvm/script/ir_builder/ir/frame.h> #include <tvm/tir/stmt.h> namespace tvm { namespace script { namespace ir_builder { namespace tir { /*! * \brief A base frame that represents the TIR fame with body of statements. * * \sa TIRFrame */ class TIRFrameNode : public IRBuilderFrameNode { public: /*! \brief The Stmt within in this frame. */ Array<tvm::tir::Stmt> stmts; void VisitAttrs(tvm::AttrVisitor* v) { IRBuilderFrameNode::VisitAttrs(v); v->Visit("stmts", &stmts); } static constexpr const char* _type_key = "script.ir_builder.tir.TIRFrame"; TVM_DECLARE_BASE_OBJECT_INFO(TIRFrameNode, IRBuilderFrameNode); }; /*! * \brief Managed reference to TIRFrameNode. * * \sa TIRFrameNode */ class TIRFrame : public IRBuilderFrame { public: TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(TIRFrame, IRBuilderFrame, TIRFrameNode); protected: TIRFrame() = default; }; /*! * \brief A frame that represents the PrimFunc containing TIR statements. * * \sa PrimFuncFrame */ class PrimFuncFrameNode : public TIRFrameNode { public: /*! \brief The name of the block. */ Optional<String> name; /*! \brief Function parameters. */ Array<tvm::tir::Var> args; /*! \brief The return type of the function. */ Optional<Type> ret_type; /*! \brief Maps some parameters to specific Buffer data structures. */ Map<tvm::tir::Var, tvm::tir::Buffer> buffer_map; /*! \brief The buffer map prior to flattening. */ Map<tvm::tir::Var, tvm::tir::Buffer> preflattened_buffer_map; /*! \brief Additional attributes storing the meta-data */ Optional<Map<String, ObjectRef>> attrs; /*! \brief The variable map bound to thread env. */ Map<tvm::tir::Var, tvm::tir::IterVar> env_threads; /*! \brief The buffer allocated in root block. */ Array<tvm::tir::Buffer> root_alloc_buffers; void VisitAttrs(tvm::AttrVisitor* v) { TIRFrameNode::VisitAttrs(v); v->Visit("name", &name); v->Visit("args", &args); v->Visit("ret_type", &ret_type); v->Visit("buffer_map", &buffer_map); v->Visit("preflattened_buffer_map", &preflattened_buffer_map); v->Visit("attrs", &attrs); v->Visit("env_threads", &env_threads); v->Visit("root_alloc_buffers", &root_alloc_buffers); } static constexpr const char* _type_key = "script.ir_builder.tir.PrimFuncFrame"; TVM_DECLARE_FINAL_OBJECT_INFO(PrimFuncFrameNode, TIRFrameNode); public: /*! * \brief The method called when exiting RAII scope. * \sa tvm::support::With */ void ExitWithScope() final; }; /*! * \brief Managed reference to PrimFuncFrameNode. * * \sa PrimFuncFrameNode */ class PrimFuncFrame : public TIRFrame { public: TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(PrimFuncFrame, TIRFrame, PrimFuncFrameNode); }; /*! * \brief A frame that represents the block. * * \sa BlockFrame */ class BlockFrameNode : public TIRFrameNode { public: /*! \brief The name of the block. */ String name; /*! \brief The variables of the block. */ Array<tvm::tir::IterVar> iter_vars; /*! \brief The read buffer regions of the block. */ Optional<Array<tvm::tir::BufferRegion>> reads; /*! \brief The write buffer regions of the block. */ Optional<Array<tvm::tir::BufferRegion>> writes; /*! \brief The init statement of the bolck. */ Optional<tvm::tir::Stmt> init; /*! \brief The buffer allocated in the block. */ Array<tvm::tir::Buffer> alloc_buffers; /*! \brief The match buffer regions. */ Array<tvm::tir::MatchBufferRegion> match_buffers; /*! \brief The annotation of the block. */ Optional<Map<String, ObjectRef>> annotations; /*! \brief The corresponding values of the iter vars. */ Array<PrimExpr> iter_values; /*! * \brief The predicate of the block realization, the block will only be executed when the * predicate is true. */ Optional<PrimExpr> predicate; /*! \brief The flag whether to construct BlockRealize or Block. */ bool no_realize; void VisitAttrs(tvm::AttrVisitor* v) { TIRFrameNode::VisitAttrs(v); v->Visit("name", &name); v->Visit("iter_vars", &iter_vars); v->Visit("reads", &reads); v->Visit("writes", &writes); v->Visit("init", &init); v->Visit("alloc_buffers", &alloc_buffers); v->Visit("match_buffers", &match_buffers); v->Visit("annotations", &annotations); v->Visit("iter_values", &iter_values); v->Visit("predicate", &predicate); v->Visit("no_realize", &no_realize); } static constexpr const char* _type_key = "script.ir_builder.tir.BlockFrame"; TVM_DECLARE_FINAL_OBJECT_INFO(BlockFrameNode, TIRFrameNode); public: /*! * \brief The method called when exiting RAII scope. * \sa tvm::support::With */ void ExitWithScope() final; }; /*! * \brief Managed reference to BlockFrameNode. * * \sa BlockFrameNode */ class BlockFrame : public TIRFrame { public: TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(BlockFrame, TIRFrame, BlockFrameNode); }; /*! * \brief A frame that represents the block initialization statment. * * \sa BlockInitFrame */ class BlockInitFrameNode : public TIRFrameNode { public: void VisitAttrs(tvm::AttrVisitor* v) { TIRFrameNode::VisitAttrs(v); } static constexpr const char* _type_key = "script.ir_builder.tir.BlockInitFrame"; TVM_DECLARE_FINAL_OBJECT_INFO(BlockInitFrameNode, TIRFrameNode); public: /*! * \brief The method called when entering RAII scope. * \sa tvm::support::With */ void EnterWithScope() final; /*! * \brief The method called when exiting RAII scope. * \sa tvm::support::With */ void ExitWithScope() final; }; /*! * \brief Managed reference to BlockInitFrameNode. * * \sa BlockInitFrameNode */ class BlockInitFrame : public TIRFrame { public: TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(BlockInitFrame, TIRFrame, BlockInitFrameNode); }; /*! * \brief A frame that represents the for loop. * * \sa ForFrame */ class ForFrameNode : public TIRFrameNode { public: /*! * \brief Functions that generate loop nests. * \param loop_vars The loop variables, from outer to inner * \param loop_extents The loop extents that correspond to loop variables * \param loop_body The loop body * \return A stmt, the loop nest */ using FMakeForLoop = runtime::TypedPackedFunc<tvm::tir::Stmt( Array<tvm::tir::Var> loop_vars, Array<Range> loop_extents, tvm::tir::Stmt loop_body)>; /*! \brief The loop variable. */ Array<tvm::tir::Var> vars; /*! \brief The domains of iteration. */ Array<Range> doms; /*! \brief The for loop generating function. */ FMakeForLoop f_make_for_loop; void VisitAttrs(tvm::AttrVisitor* v) { TIRFrameNode::VisitAttrs(v); v->Visit("vars", &vars); v->Visit("doms", &doms); // `f_make_for_loop` is not visited. } static constexpr const char* _type_key = "script.ir_builder.tir.ForFrame"; TVM_DECLARE_FINAL_OBJECT_INFO(ForFrameNode, TIRFrameNode); public: /*! * \brief The method called when exiting RAII scope. * \sa tvm::support::With */ void ExitWithScope() final; }; /*! * \brief Managed reference to ForFrameNode. * * \sa ForFrameNode */ class ForFrame : public TIRFrame { public: TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(ForFrame, TIRFrame, ForFrameNode); }; /*! * \brief A frame that represents the assert statement. Proceeds if the condition is true, * otherwise aborts with the message. * * \sa AssertFrame */ class AssertFrameNode : public TIRFrameNode { public: /*! \brief The PrimExpr to test. */ PrimExpr condition; /*! \brief The output error message when the assertion failed. */ PrimExpr message; void VisitAttrs(tvm::AttrVisitor* v) { TIRFrameNode::VisitAttrs(v); v->Visit("condition", &condition); v->Visit("message", &message); } static constexpr const char* _type_key = "script.ir_builder.tir.AssertFrame"; TVM_DECLARE_FINAL_OBJECT_INFO(AssertFrameNode, TIRFrameNode); public: /*! * \brief The method called when exiting RAII scope. * \sa tvm::support::With */ void ExitWithScope() final; }; /*! * \brief Managed reference to AssertFrameNode. * * \sa AssertFrameNode */ class AssertFrame : public TIRFrame { public: TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(AssertFrame, TIRFrame, AssertFrameNode); }; /*! * \brief A frame represents the let binding expression, which binds a var. * * \sa LetFrameNode */ class LetFrameNode : public TIRFrameNode { public: /*! \brief The variable we bind to */ tvm::tir::Var var; /*! \brief The value we bind var to */ PrimExpr value; void VisitAttrs(tvm::AttrVisitor* v) { TIRFrameNode::VisitAttrs(v); v->Visit("var", &var); v->Visit("value", &value); } static constexpr const char* _type_key = "script.ir_builder.tir.LetFrame"; TVM_DECLARE_FINAL_OBJECT_INFO(LetFrameNode, TIRFrameNode); public: /*! * \brief The method called when exiting RAII scope. * \sa tvm::support::With */ void ExitWithScope() final; }; /*! * \brief Managed reference to LetFrameNode. * * \sa LetFrameNode */ class LetFrame : public TIRFrame { public: TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(LetFrame, TIRFrame, LetFrameNode); }; /*! * \brief The LaunchThreadFrameNode. * \note It is used only inside a PrimFunc. */ class LaunchThreadFrameNode : public TIRFrameNode { public: /*! \brief The extent of environment thread. */ PrimExpr extent; /*! \brief The attribute key, could be either virtual_thread or thread_extent. */ String attr_key; /*! \brief The iteration variable. */ tvm::tir::IterVar iter_var; void VisitAttrs(tvm::AttrVisitor* v) { TIRFrameNode::VisitAttrs(v); v->Visit("extent", &extent); v->Visit("attr_key", &attr_key); v->Visit("iter_var", &iter_var); } static constexpr const char* _type_key = "script.ir_builder.tir.LaunchThreadFrame"; TVM_DECLARE_FINAL_OBJECT_INFO(LaunchThreadFrameNode, TIRFrameNode); public: /*! * \brief The method called when exiting RAII scope. * \sa tvm::support::With */ void ExitWithScope() final; }; /*! * \brief Managed reference to LaunchThreadFrameNode. * * \sa LaunchThreadFrameNode */ class LaunchThreadFrame : public TIRFrame { public: TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(LaunchThreadFrame, TIRFrame, LaunchThreadFrameNode); }; /*! * \brief A frame that represents realization. * * \sa RealizeFrame */ class RealizeFrameNode : public TIRFrameNode { public: /*! \brief The region of buffer access. */ tvm::tir::BufferRegion buffer_slice; /*! \brief The storage scope associated with this realization. */ String storage_scope; /*! \brief The condition expression. */ PrimExpr condition; void VisitAttrs(tvm::AttrVisitor* v) { TIRFrameNode::VisitAttrs(v); v->Visit("buffer_slice", &buffer_slice); v->Visit("storage_scope", &storage_scope); v->Visit("condition", &condition); } static constexpr const char* _type_key = "script.ir_builder.tir.RealizeFrame"; TVM_DECLARE_FINAL_OBJECT_INFO(RealizeFrameNode, TIRFrameNode); public: /*! * \brief The method called when exiting RAII scope. * \sa tvm::support::With */ void ExitWithScope() final; }; /*! * \brief Managed reference to RealizeFrameNode. * * \sa RealizeFrameNode */ class RealizeFrame : public TIRFrame { public: TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(RealizeFrame, TIRFrame, RealizeFrameNode); }; /*! * \brief A frame represents the allocate. * * \sa AllocateFrame */ class AllocateFrameNode : public TIRFrameNode { public: /*! \brief The extents of the allocate. */ Array<PrimExpr> extents; /*! \brief The data type of the buffer. */ DataType dtype; /*! \brief The storage scope. */ String storage_scope; /*! \brief The condition. */ PrimExpr condition; /*! \brief Additional annotation hints. */ Map<String, ObjectRef> annotations; /*! \brief The buffer var. */ tvm::tir::Var buffer_var; void VisitAttrs(tvm::AttrVisitor* v) { TIRFrameNode::VisitAttrs(v); v->Visit("extents", &extents); v->Visit("dtype", &dtype); v->Visit("storage_scope", &storage_scope); v->Visit("condition", &condition); v->Visit("annotations", &annotations); v->Visit("buffer_var", &buffer_var); } static constexpr const char* _type_key = "script.ir_builder.tir.AllocateFrame"; TVM_DECLARE_FINAL_OBJECT_INFO(AllocateFrameNode, TIRFrameNode); public: /*! * \brief The method called when exiting RAII scope. * \sa tvm::support::With */ void ExitWithScope() final; }; /*! * \brief Managed reference to AllocateFrameNode. * * \sa AllocateFrameNode */ class AllocateFrame : public TIRFrame { public: TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(AllocateFrame, TIRFrame, AllocateFrameNode); }; /*! * \brief A frame represents the allocate constant. * * \sa AllocateConstFrame */ class AllocateConstFrameNode : public TIRFrameNode { public: /*! \brief The data type of the buffer. */ DataType dtype; /*! \brief The extents of the allocate. */ Array<PrimExpr> extents; /*! \brief The data associated with the constant. */ tvm::runtime::NDArray data; /*! \brief The buffer var */ tvm::tir::Var buffer_var; /*! \brief Additional annotations about the allocation. */ Map<String, ObjectRef> annotations; void VisitAttrs(tvm::AttrVisitor* v) { TIRFrameNode::VisitAttrs(v); v->Visit("dtype", &dtype); v->Visit("extents", &extents); v->Visit("data", &data); v->Visit("buffer_var", &buffer_var); v->Visit("annotations", &annotations); } static constexpr const char* _type_key = "script.ir_builder.tir.AllocateConstFrame"; TVM_DECLARE_FINAL_OBJECT_INFO(AllocateConstFrameNode, TIRFrameNode); public: /*! * \brief The method called when exiting RAII scope. * \sa tvm::support::With */ void ExitWithScope() final; }; /*! * \brief Managed reference to AllocateConstFrameNode. * * \sa AllocateConstFrameNode */ class AllocateConstFrame : public TIRFrame { public: TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(AllocateConstFrame, TIRFrame, AllocateConstFrameNode); }; /*! * \brief A frame that represents attribute node. * * \sa AttrFrame */ class AttrFrameNode : public TIRFrameNode { public: /*! \brief The node to annotate the attribute. */ ObjectRef node; /*! \brief Attribute type key. */ String attr_key; /*! \brief The value of the attribute. */ PrimExpr value; void VisitAttrs(tvm::AttrVisitor* v) { TIRFrameNode::VisitAttrs(v); v->Visit("node", &node); v->Visit("attr_key", &attr_key); v->Visit("value", &value); } static constexpr const char* _type_key = "script.ir_builder.tir.AttrFrame"; TVM_DECLARE_FINAL_OBJECT_INFO(AttrFrameNode, TIRFrameNode); public: /*! * \brief The method called when exiting RAII scope. * \sa tvm::support::With */ void ExitWithScope() final; }; /*! * \brief Managed reference to AttrFrameNode. * * \sa AttrFrameNode */ class AttrFrame : public TIRFrame { public: TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(AttrFrame, TIRFrame, AttrFrameNode); }; /*! * \brief A frame that represents while loop. * * \sa WhileFrame */ class WhileFrameNode : public TIRFrameNode { public: /*! \brief The termination condition of while. */ PrimExpr condition; void VisitAttrs(tvm::AttrVisitor* v) { TIRFrameNode::VisitAttrs(v); v->Visit("condition", &condition); } static constexpr const char* _type_key = "script.ir_builder.tir.WhileFrame"; TVM_DECLARE_FINAL_OBJECT_INFO(WhileFrameNode, TIRFrameNode); public: /*! * \brief The method called when exiting RAII scope. * \sa tvm::support::With */ void ExitWithScope() final; }; /*! * \brief Managed reference to WhileFrameNode. * * \sa WhileFrameNode */ class WhileFrame : public TIRFrame { public: TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(WhileFrame, TIRFrame, WhileFrameNode); }; /*! * \brief A frame that represents if statement. * * \sa IfFrame */ class IfFrameNode : public TIRFrameNode { public: /*! \brief The condition of the if statement. */ PrimExpr condition; /*! \brief The statements in the true branch. */ Optional<Array<tvm::tir::Stmt>> then_stmts; /*! \brief The stetements in the false branch. */ Optional<Array<tvm::tir::Stmt>> else_stmts; void VisitAttrs(tvm::AttrVisitor* v) { TIRFrameNode::VisitAttrs(v); v->Visit("condition", &condition); v->Visit("then_stmts", &then_stmts); v->Visit("else_stmts", &else_stmts); } static constexpr const char* _type_key = "script.ir_builder.tir.IfFrame"; TVM_DECLARE_FINAL_OBJECT_INFO(IfFrameNode, TIRFrameNode); public: /*! * \brief The method called when exiting RAII scope. * \sa tvm::support::With */ void ExitWithScope() final; }; /*! * \brief Managed reference to IfFrameNode. * * \sa IfFrameNode */ class IfFrame : public TIRFrame { public: TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(IfFrame, TIRFrame, IfFrameNode); }; /*! * \brief A frame that represents then. * * \sa ThenFrame */ class ThenFrameNode : public TIRFrameNode { public: static constexpr const char* _type_key = "script.ir_builder.tir.ThenFrame"; TVM_DECLARE_FINAL_OBJECT_INFO(ThenFrameNode, TIRFrameNode); public: /*! * \brief The method called when entering RAII scope. * \sa tvm::support::With */ void EnterWithScope() final; /*! * \brief The method called when exiting RAII scope. * \sa tvm::support::With */ void ExitWithScope() final; }; /*! * \brief Managed reference to ThenFrameNode. * * \sa ThenFrameNode */ class ThenFrame : public TIRFrame { public: TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(ThenFrame, TIRFrame, ThenFrameNode); }; /*! * \brief A frame that represents else. * * \sa ElseFrame */ class ElseFrameNode : public TIRFrameNode { public: static constexpr const char* _type_key = "script.ir_builder.tir.ElseFrame"; TVM_DECLARE_FINAL_OBJECT_INFO(ElseFrameNode, TIRFrameNode); public: /*! * \brief The method called when entering RAII scope. * \sa tvm::support::With */ void EnterWithScope() final; /*! * \brief The method called when exiting RAII scope. * \sa tvm::support::With */ void ExitWithScope() final; }; /*! * \brief Managed reference to ElseFrameNode. * * \sa ElseFrameNode */ class ElseFrame : public TIRFrame { public: TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(ElseFrame, TIRFrame, ElseFrameNode); }; class DeclBufferFrameNode : public TIRFrameNode { public: /*! \brief The declared buffer. */ tvm::tir::Buffer buffer; /*! \brief The buffer allocated or not. */ bool allocated; void VisitAttrs(tvm::AttrVisitor* v) { TIRFrameNode::VisitAttrs(v); v->Visit("buffer", &buffer); v->Visit("allocated", &allocated); } static constexpr const char* _type_key = "script.ir_builder.tir.DeclBufferFrame"; TVM_DECLARE_FINAL_OBJECT_INFO(DeclBufferFrameNode, TIRFrameNode); public: void ExitWithScope() final; }; class DeclBufferFrame : public TIRFrame { public: TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(DeclBufferFrame, TIRFrame, DeclBufferFrameNode); }; } // namespace tir } // namespace ir_builder } // namespace script } // namespace tvm #endif // TVM_SCRIPT_IR_BUILDER_TIR_FRAME_H_
https://github.com/zk-ml/tachikoma
include/tvm/script/ir_builder/tir/ir.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_SCRIPT_IR_BUILDER_TIR_IR_H_ #define TVM_SCRIPT_IR_BUILDER_TIR_IR_H_ #include <tvm/script/ir_builder/base.h> #include <tvm/script/ir_builder/tir/frame.h> #include <tvm/tir/op.h> namespace tvm { namespace script { namespace ir_builder { namespace tir { using tvm::runtime::NDArray; using tvm::tir::Buffer; using tvm::tir::Var; /*! * \brief The buffer declaration function. * \param shape The type of the buffer prior to flattening. * \param dtype The data type in the content of the buffer. * \param buffer_name The name of the buffer. * \param data The pointer to the head of the data. * \param strides The strides of each dimension. * \param elem_offset The offset in terms of number of dtype elements (including lanes). * \param storage_scope The optional storage scope of buffer data pointer. * \param align The alignment requirement of data pointer in bytes. * \param offset_factor The factor of elem_offset field. * \param buffer_type The buffer type. * \param axis_separators The separators between input axes when generating flattened output axes. * \return The declared buffer. */ Buffer BufferDecl(Array<PrimExpr> shape, DataType dtype, String buffer_name, Optional<Var> data, Optional<Array<PrimExpr>> strides, Optional<PrimExpr> elem_offset, String storage_scope, int align, int offset_factor, String buffer_type, Optional<Array<IntImm>> axis_separators); /*! * \brief The primitive function statement. * \return The PrimFuncFrame. */ PrimFuncFrame PrimFunc(); /*! * \brief The PrimFunc variable arguments adding function. * \param name The name of the variable. * \param var The variable argument. * \return The variable. */ Var Arg(String name, Var var); /*! * \brief The PrimFunc buffer arguments adding function. * \param name The name of the buffer. * \param buffer The buffer argument. * \return The buffer. */ Buffer Arg(String name, Buffer buffer); /*! * \brief The PrimFunc naming statement. * \param name The name of the PrimFunc. */ void FuncName(String name); /*! * \brief The PrimFunc annotation statement. * \param attrs The annotations of the PrimFunc. */ void FuncAttrs(Map<String, ObjectRef> attrs); /*! * \brief The PrimFunc return type statement. * \param ret_type The return type of the PrimFunc. * \return The return type. */ Type FuncRet(Type ret_type); /*! * \brief The buffer match statement. * \param param The parameter of the PrimFunc to match. * \param shape The type of the buffer prior to flattening. * \param dtype The data type in the content of the buffer. * \param data The pointer to the head of the data. * \param strides The strides of each dimension. * \param elem_offset The offset in terms of number of dtype elements (including lanes). * \param storage_scope The optional storage scope of buffer data pointer. * \param align The alignment requirement of data pointer in bytes. * \param offset_factor The factor of elem_offset field. * \param buffer_type The buffer type. * \param axis_separators The separators between input axes when generating flattened output axes. * \return The matched buffer. */ Buffer MatchBuffer(ObjectRef param, Array<PrimExpr> shape, DataType dtype = DataType::Float(32), Optional<Var> data = NullOpt, Array<PrimExpr> strides = {}, PrimExpr elem_offset = PrimExpr(), String storage_scope = "global", int align = -1, int offset_factor = 0, String buffer_type = "default", Array<IntImm> axis_separators = {}); /*! * \brief The pre-flattened buffer statement. * \param postflattened_buffer The original buffer to be flattened. * \param shape The type of the buffer prior to flattening. * \param dtype The data type in the content of the buffer. * \param data The pointer to the head of the data. * \param strides The strides of each dimension. * \param elem_offset The offset in terms of number of dtype elements (including lanes). * \param storage_scope The optional storage scope of buffer data pointer. * \param align The alignment requirement of data pointer in bytes. * \param offset_factor The factor of elem_offset field. * \param buffer_type The buffer type. * \param axis_separators The separators between input axes when generating flattened output axes. */ void PreflattenedBuffer(Buffer postflattened_buffer, Array<PrimExpr> shape, DataType dtype = DataType::Float(32), Optional<Var> data = NullOpt, Array<PrimExpr> strides = {}, PrimExpr elem_offset = PrimExpr(), String storage_scope = "global", int align = -1, int offset_factor = 0, String buffer_type = "default", Array<IntImm> axis_separators = {}); /*! * \brief The block declaration statement. * \param name The name of the block. * \param no_realize The flag whether to construct BlockRealize or Block. * \return The BlockFrame. */ BlockFrame Block(String name, bool no_realize = false); /*! * \brief The block initialization statement. * \return The BlockInitFrame. */ BlockInitFrame Init(); /*! * \brief The block predicate statement. * \param predicate The predicate condition. */ void Where(PrimExpr predicate); /*! * \brief The block buffer region reading statement. * \param buffer_slices The array of buffer regions to read. */ void Reads(Array<ObjectRef> buffer_slices); /*! * \brief The block buffer region writing statement. * \param buffer_slices The array of buffer regions to write. */ void Writes(Array<ObjectRef> buffer_slices); /*! * \brief The block annotation statement. * \param attrs The annotation of the block. */ void BlockAttrs(Map<String, ObjectRef> attrs); /*! * \brief The buffer allocation function. * \param shape The type of the buffer prior to flattening. * \param dtype The data type in the content of the buffer. * \param data The pointer to the head of the data. * \param strides The strides of each dimension. * \param elem_offset The offset in terms of number of dtype elements (including lanes). * \param storage_scope The optional storage scope of buffer data pointer. * \param align The alignment requirement of data pointer in bytes. * \param offset_factor The factor of elem_offset field. * \param buffer_type The buffer type. * \param axis_separators The separators between input axes when generating flattened output axes. * \return The allocated buffer. */ Buffer AllocBuffer(Array<PrimExpr> shape, DataType dtype = DataType::Float(32), Optional<Var> data = NullOpt, Array<PrimExpr> strides = {}, PrimExpr elem_offset = PrimExpr(), String storage_scope = "", int align = -1, int offset_factor = 0, String buffer_type = "default", Array<IntImm> axis_separators = {}); namespace axis { /*! * \brief The spatial block axis defining function. * \param dom The domain of the iteration variable. * \param binding The binding value of the iteration variable. * \param dtype The data type of the iteration variable. * \return The iteration variable. */ Var Spatial(Range dom, PrimExpr binding, DataType dtype = DataType::Int(32)); /*! * \brief The reduced block axis defining function. * \param dom The domain of the iteration variable. * \param binding The binding value of the iteration variable. * \param dtype The data type of the iteration variable. * \return The iteration variable. */ Var Reduce(Range dom, PrimExpr binding, DataType dtype = DataType::Int(32)); /*! * \brief The scanning block axis defining function. * \param dom The domain of the iteration variable. * \param binding The binding value of the iteration variable. * \param dtype The data type of the iteration variable. * \return The iteration variable. */ Var Scan(Range dom, PrimExpr binding, DataType dtype = DataType::Int(32)); /*! * \brief The opaque block axis defining function. * \param dom The domain of the iteration variable. * \param binding The binding value of the iteration variable. * \param dtype The data type of the iteration variable. * \return The iteration variable. */ Var Opaque(Range dom, PrimExpr binding, DataType dtype = DataType::Int(32)); /*! * \brief The block axis remapping function. * \param kinds The types of the iteration variables. * \param bindings The binding values of the iteration variables. * \param dtype The data types of the iteration variables. * \return The iteration variables. */ Array<Var> Remap(String kinds, Array<PrimExpr> bindings, DataType dtype = DataType::Int(32)); } // namespace axis /*! * \brief The serial For statement. * \param start The minimum value of iteration. * \param stop The maximum value of iteration. * \param annotations The optional annotations of the For statement. * \return The ForFrame. */ ForFrame Serial(PrimExpr start, PrimExpr stop, Optional<Map<String, ObjectRef>> annotations = NullOpt); /*! * \brief The parallel For statement. * \param start The minimum value of iteration. * \param stop The maximum value of iteration. * \param annotations The optional annotations of the For statement. * \return The ForFrame. */ ForFrame Parallel(PrimExpr start, PrimExpr stop, Optional<Map<String, ObjectRef>> annotations = NullOpt); /*! * \brief The vectorized For statement. * \param start The minimum value of iteration. * \param stop The maximum value of iteration. * \param annotations The optional annotations of the For statement. * \return The ForFrame. */ ForFrame Vectorized(PrimExpr start, PrimExpr stop, Optional<Map<String, ObjectRef>> annotations = NullOpt); /*! * \brief The unrolled For statement. * \param start The minimum value of iteration. * \param stop The maximum value of iteration. * \param annotations The optional annotations of the For statement. * \return The ForFrame. */ ForFrame Unroll(PrimExpr start, PrimExpr stop, Optional<Map<String, ObjectRef>> annotations = NullOpt); /*! * \brief The thread-binding For statement. * \param start The minimum value of iteration. * \param stop The maximum value of iteration. * \param thread The thread for loop variable to bind. * \param annotations The optional annotations of the For statement. * \return The ForFrame. */ ForFrame ThreadBinding(PrimExpr start, PrimExpr stop, String thread, Optional<Map<String, ObjectRef>> annotations = NullOpt); /*! * \brief The grid For statement. * \param extents The extents of the iteration. * \return The ForFrame. */ ForFrame Grid(Array<PrimExpr> extents); /*! * \brief The assertion statement. * \param condition The assertion condition. * \param message The error message when the assertion fails. * \return The AssertFrame. */ AssertFrame Assert(PrimExpr condition, String message); /*! * \brief The let binding. * \param var The variable to bind. * \param value The value to be bound. * \return The created LetFrame. */ LetFrame Let(Var var, PrimExpr value); /*! * \brief The realization. * \param buffer_slice The region of buffer access. * \param storage_scope The storage scope associated with this realization. * \param condition The condition expression. * \return The result RealizeFrame. */ RealizeFrame Realize(tvm::tir::BufferRegion buffer_slice, String storage_scope, PrimExpr condition); /*! * \brief The allocate node. * \param extents The extents of the allocate. * \param dtype The data type of the buffer. * \param storage_scope The storage scope. * \param condition The condition. * \param annotations Additional annotation hints. * \return The created AllocateFrame. */ AllocateFrame Allocate(Array<PrimExpr> extents, DataType dtype, String storage_scope = "", Optional<PrimExpr> condition = NullOpt, Optional<Map<String, ObjectRef>> annotations = NullOpt); /*! * \brief The allocate constant node. * \param data The data associated with the constant. * \param dtype The data type of the buffer. * \param extents The extents of the allocate. * \param annotations Additional annotation hints. * \return The created AllocateConstFrame. */ AllocateConstFrame AllocateConst(NDArray data, DataType dtype, Array<PrimExpr> extents, Optional<Map<String, ObjectRef>> annotations = NullOpt); /*! * \brief Create an attribute. * \param node The node to annotate the attribute. * \param attr_key Attribute type key. * \param value The value of the attribute. * \return The result AttrFrame. */ AttrFrame Attr(ObjectRef node, String attr_key, PrimExpr value); /*! * \brief Create a while loop. * \param condition The termination condition of the loop. * \return The result WhileFrame. */ WhileFrame While(PrimExpr condition); /*! * \brief Create an if statement. * \param condition The condition of if statement. * \return The result IfFrame. */ IfFrame If(PrimExpr condition); /*! * \brief Create a then. * \return The result ThenFrame. */ ThenFrame Then(); /*! * \brief Create an else. * \return The result ElseFrame. */ ElseFrame Else(); /*! * \brief The buffer declaration frame. * \param shape The type of the buffer prior to flattening. * \param dtype The data type in the content of the buffer. * \param buffer_name The name of the buffer. * \param data The pointer to the head of the data. * \param strides The strides of each dimension. * \param elem_offset The offset in terms of number of dtype elements (including lanes). * \param storage_scope The optional storage scope of buffer data pointer. * \param align The alignment requirement of data pointer in bytes. * \param offset_factor The factor of elem_offset field. * \param buffer_type The buffer type. * \param axis_separators The separators between input axes when generating flattened output axes. * \return The declared buffer. */ DeclBufferFrame DeclBuffer(Array<PrimExpr> shape, DataType dtype, String buffer_name, Optional<Var> data, Optional<Array<PrimExpr>> strides, Optional<PrimExpr> elem_offset, String storage_scope, int align, int offset_factor, String buffer_type, Optional<Array<IntImm>> axis_separators); /*! * \brief Launch a thread. * \param var The iteration variable. * \param extent The extent of environment thread. * \return The result LaunchThreadFrame. */ LaunchThreadFrame LaunchThread(Var var, PrimExpr extent); /*! * \brief Bind a var to thread env. * \param thread_tag The thread type tag. * \return The result variable which gets bound to the thread env. */ Var EnvThread(String thread_tag); /*! * \brief Store data in a buffer. * \param buffer The buffer. * \param value The value to be stored. * \param indices The indices location to be stored. */ void BufferStore(Buffer buffer, PrimExpr value, Array<PrimExpr> indices); /*! * \brief The prefetch hint for a buffer * \param buffer The buffer to be prefetched. * \param bounds The bounds to be prefetched. */ void Prefetch(Buffer buffer, Array<Range> bounds); /*! * \brief Evaluate the input expression. * \param value The input expression to evaluate. */ void Evaluate(PrimExpr value); /*! * \brief The pointer declaration function. * \param dtype The data type of the pointer. * \param storage_scope The storage scope of the pointer. * \return The pointer. */ PrimExpr Ptr(runtime::DataType dtype, String storage_scope = "global"); #define TVM_TIR_IR_BUILDER_DEF_DTYPE_CAST(FuncName, DType) \ inline PrimExpr FuncName(Optional<PrimExpr> expr = NullOpt) { \ DataType dtype = DType; \ return expr.defined() ? tvm::cast(dtype, expr.value()) : tvm::tir::Var("", dtype); \ } #define TVM_TIR_IR_BUILDER_DEF_DTYPE_CAST_SIZES(DType, FDType) \ TVM_TIR_IR_BUILDER_DEF_DTYPE_CAST(DType##8, FDType(8)); \ TVM_TIR_IR_BUILDER_DEF_DTYPE_CAST(DType##16, FDType(16)); \ TVM_TIR_IR_BUILDER_DEF_DTYPE_CAST(DType##32, FDType(32)); \ TVM_TIR_IR_BUILDER_DEF_DTYPE_CAST(DType##64, FDType(64)); TVM_TIR_IR_BUILDER_DEF_DTYPE_CAST_SIZES(Float, DataType::Float); TVM_TIR_IR_BUILDER_DEF_DTYPE_CAST_SIZES(UInt, DataType::UInt); TVM_TIR_IR_BUILDER_DEF_DTYPE_CAST_SIZES(Int, DataType::Int); #define TVM_TIR_IR_BUILDER_DEF_DTYPE_CAST_LANES(FuncName, FDType, Size) \ TVM_TIR_IR_BUILDER_DEF_DTYPE_CAST(FuncName##x4, FDType(Size, 4)); \ TVM_TIR_IR_BUILDER_DEF_DTYPE_CAST(FuncName##x8, FDType(Size, 8)); \ TVM_TIR_IR_BUILDER_DEF_DTYPE_CAST(FuncName##x16, FDType(Size, 16)); \ TVM_TIR_IR_BUILDER_DEF_DTYPE_CAST(FuncName##x32, FDType(Size, 32)); \ TVM_TIR_IR_BUILDER_DEF_DTYPE_CAST(FuncName##x64, FDType(Size, 64)); #define TVM_TIR_IR_BUILDER_DEF_DTYPE_CAST_SIZES_LANES(DType, FDType) \ TVM_TIR_IR_BUILDER_DEF_DTYPE_CAST_LANES(DType##8, FDType, 8); \ TVM_TIR_IR_BUILDER_DEF_DTYPE_CAST_LANES(DType##16, FDType, 16); \ TVM_TIR_IR_BUILDER_DEF_DTYPE_CAST_LANES(DType##32, FDType, 32); \ TVM_TIR_IR_BUILDER_DEF_DTYPE_CAST_LANES(DType##64, FDType, 64); TVM_TIR_IR_BUILDER_DEF_DTYPE_CAST_SIZES_LANES(Float, DataType::Float); TVM_TIR_IR_BUILDER_DEF_DTYPE_CAST_SIZES_LANES(UInt, DataType::UInt); TVM_TIR_IR_BUILDER_DEF_DTYPE_CAST_SIZES_LANES(Int, DataType::Int); TVM_TIR_IR_BUILDER_DEF_DTYPE_CAST(Boolean, DataType::Bool()); TVM_TIR_IR_BUILDER_DEF_DTYPE_CAST(Handle, DataType::Handle()); TVM_TIR_IR_BUILDER_DEF_DTYPE_CAST(Void, DataType::Void()); #undef TVM_TIR_IR_BUILDER_DEF_DTYPE_CAST } // namespace tir } // namespace ir_builder } // namespace script } // namespace tvm #endif // TVM_SCRIPT_IR_BUILDER_TIR_IR_H_
https://github.com/zk-ml/tachikoma
include/tvm/script/printer.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_SCRIPT_PRINTER_H_ #define TVM_SCRIPT_PRINTER_H_ #include <tvm/node/node.h> #include <tvm/node/object_path.h> namespace tvm { namespace script { namespace printer { /*! * \brief Print IR graph as TVMScript code * * \param root_node The root node to print. * \param ir_name The dispatch token of the target IR, e.g., "tir", "relax". * \param ir_prefix The symbol name for TVMScript IR namespaces. For example, {"tir": "T"}. * \param indent_spaces Number of spaces used for indentation * \param print_line_numbers Whether to print line numbers * \param num_context_lines Number of context lines to print around the underlined text * \param path_to_underline Object path to be underlined * * \return the TVMScript code as string. */ String Script( // const ObjectRef& root_node, // String ir_name, // Map<String, String> ir_prefix, // int indent_spaces = 4, // bool print_line_numbers = false, // int num_context_lines = -1, // Optional<ObjectPath> path_to_underline = NullOpt // ); } // namespace printer } // namespace script } // namespace tvm #endif // TVM_SCRIPT_PRINTER_H_
https://github.com/zk-ml/tachikoma
include/tvm/script/printer/doc.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_SCRIPT_PRINTER_DOC_H_ #define TVM_SCRIPT_PRINTER_DOC_H_ #include <tvm/ir/expr.h> #include <tvm/node/node.h> #include <tvm/runtime/data_type.h> #include <tvm/script/printer/traced_object.h> namespace tvm { namespace script { namespace printer { /*! * \brief The base class of all Doc. * * Doc is an intermediate representation between IR from TVM * and the TVMScript code. * During printing, IR graph is first translated into Doc tree, * then the Doc tree is translated to the target language in * text format. * * \sa Doc */ class DocNode : public Object { public: /*! * \brief The list of object paths of the source IR node. * * This is used to trace back to the IR node position where * this Doc is generated, in order to position the diagnostic * message. */ mutable Array<ObjectPath> source_paths; void VisitAttrs(AttrVisitor* v) { v->Visit("source_paths", &source_paths); } static constexpr const char* _type_key = "script.printer.Doc"; TVM_DECLARE_BASE_OBJECT_INFO(DocNode, Object); public: virtual ~DocNode() = default; }; /*! * \brief Reference type of DocNode. * * \sa DocNode */ class Doc : public ObjectRef { protected: Doc() = default; public: virtual ~Doc() = default; TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(Doc, ObjectRef, DocNode); }; class ExprDoc; /*! * \brief The base class of expression doc. * * \sa ExprDoc */ class ExprDocNode : public DocNode { public: /*! * \brief Create a doc representing attribute access on the current ExprDoc * \param attr The attribute to access. */ ExprDoc Attr(String attr) const; /*! * \brief Create a doc representing attribute access on the current ExprDoc * \param attr The attribute to access. * * The ObjectPath of attr will be pushed to the source_path of the returned * doc. */ ExprDoc Attr(TracedObject<String> attr) const; /*! * \brief Create a doc representing index access on the current ExprDoc * \param indices The indices to access. */ ExprDoc operator[](Array<Doc> indices) const; /*! * \brief Create a doc representing calling the current ExprDoc * \param args The positional arguments of the function call. */ ExprDoc Call(Array<ExprDoc, void> args) const; /*! * \brief Create a doc representing attribute access on the current ExprDoc * \param args The positional arguments of the function call. * \param kwargs_keys Keys of keywords arguments of the function call. * \param kwargs_values Values of keywords arguments of the function call. */ ExprDoc Call(Array<ExprDoc, void> args, // Array<String> kwargs_keys, // Array<ExprDoc, void> kwargs_values) const; void VisitAttrs(AttrVisitor* v) { DocNode::VisitAttrs(v); } static constexpr const char* _type_key = "script.printer.ExprDoc"; TVM_DECLARE_BASE_OBJECT_INFO(ExprDocNode, DocNode); }; /*! * \brief Reference type of ExprDocNode. * * \sa ExprDocNode */ class ExprDoc : public Doc { protected: ExprDoc() = default; public: /*! * \brief Create a doc representing index access on the current ExprDoc * \param indices The indices to access. */ ExprDoc operator[](Array<Doc> indices) const; TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(ExprDoc, Doc, ExprDocNode); }; /*! * \brief The base class of statement doc. * * \sa StmtDoc */ class StmtDocNode : public DocNode { public: /*! * \brief The comment of this doc. * * The actual position of the comment depends on the type of Doc * and also the DocPrinter implementation. It could be on the same * line as the statement, or the line above, or inside the statement * if it spans over multiple lines. * */ mutable Optional<String> comment{NullOpt}; void VisitAttrs(AttrVisitor* v) { DocNode::VisitAttrs(v); v->Visit("comment", &comment); } static constexpr const char* _type_key = "script.printer.StmtDoc"; TVM_DECLARE_BASE_OBJECT_INFO(StmtDocNode, DocNode); }; /*! * \brief Reference type of StmtDocNode. * * \sa StmtDocNode */ class StmtDoc : public Doc { protected: StmtDoc() = default; public: TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(StmtDoc, Doc, StmtDocNode); }; /*! * \brief The container doc that holds a list of StmtDoc. * \note `StmtBlockDoc` is never used in the IR, but a temporary container that allows holding a * list of StmtDoc. * \sa StmtBlockDoc */ class StmtBlockDocNode : public DocNode { public: /*! \brief The list of statements. */ Array<StmtDoc> stmts; void VisitAttrs(AttrVisitor* v) { DocNode::VisitAttrs(v); v->Visit("stmts", &stmts); } static constexpr const char* _type_key = "script.printer.StmtBlockDoc"; TVM_DECLARE_FINAL_OBJECT_INFO(StmtBlockDocNode, DocNode); }; /*! * \brief Reference type of StmtBlockDocNode. * \sa StmtBlockDocNode */ class StmtBlockDoc : public Doc { public: /*! * \brief Constructor of StmtBlockDoc. * \param stmts The list of statements. */ explicit StmtBlockDoc(Array<StmtDoc> stmts); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(StmtBlockDoc, Doc, StmtBlockDocNode); }; /*! * \brief Doc that represents literal value. * * \sa LiteralDoc */ class LiteralDocNode : public ExprDocNode { public: /*! * \brief the internal representation of the literal value. * * Possible actual types: * - IntImm (integer or boolean) * - FloatImm * - String * - null */ ObjectRef value; void VisitAttrs(AttrVisitor* v) { ExprDocNode::VisitAttrs(v); v->Visit("value", &value); } static constexpr const char* _type_key = "script.printer.LiteralDoc"; TVM_DECLARE_FINAL_OBJECT_INFO(LiteralDocNode, ExprDocNode); }; /*! * \brief Reference type of LiteralDocNode. * * \sa LiteralDocNode */ class LiteralDoc : public ExprDoc { protected: explicit LiteralDoc(ObjectRef value); LiteralDoc(ObjectRef value, ObjectPath object_path); public: /*! * \brief Create a LiteralDoc to represent None/null/empty value. */ static LiteralDoc None() { return LiteralDoc(ObjectRef(nullptr)); } /*! * \brief Create a LiteralDoc to represent None/null/empty value. * \param object_path The source path of the returned Doc. */ static LiteralDoc None(ObjectPath object_path) { return LiteralDoc(ObjectRef(nullptr), object_path); } /*! * \brief Create a LiteralDoc to represent integer. * \param v The integer value. */ static LiteralDoc Int(int v) { return LiteralDoc(IntImm(DataType::Int(64), v)); } /*! * \brief Create a LiteralDoc to represent integer. * \param v The integer value. * * The ObjectPath of v will be pushed to the source_path of the returned doc. */ static LiteralDoc Int(const TracedObject<IntImm>& v) { return LiteralDoc(v.Get(), v.GetPath()); } /*! * \brief Create a LiteralDoc to represent integer. * \param v The integer value. * * The ObjectPath of v will be pushed to the source_path of the returned doc. */ static LiteralDoc Int(const TracedBasicValue<int>& v) { return LiteralDoc(IntImm(DataType::Int(64), v.Get()), v.GetPath()); } /*! * \brief Create a LiteralDoc to represent boolean. * \param v The boolean value. */ static LiteralDoc Boolean(bool v) { return LiteralDoc(IntImm(DataType::Bool(), v)); } /*! * \brief Create a LiteralDoc to represent boolean. * \param v The boolean value. * * The ObjectPath of v will be pushed to the source_path of the returned doc. */ static LiteralDoc Boolean(const TracedBasicValue<bool>& v) { return LiteralDoc(IntImm(DataType::Bool(), v.Get()), v.GetPath()); } /*! * \brief Create a LiteralDoc to represent float. * \param v The float value. */ static LiteralDoc Float(double v) { return LiteralDoc(FloatImm(DataType::Float(64), v)); } /*! * \brief Create a LiteralDoc to represent float. * \param v The float value. * * The ObjectPath of v will be pushed to the source_path of the returned doc. */ static LiteralDoc Float(const TracedObject<FloatImm>& v) { return LiteralDoc(v.Get(), v.GetPath()); } /*! * \brief Create a LiteralDoc to represent string. * \param v The string value. */ static LiteralDoc Str(const String& v) { return LiteralDoc(v); } /*! * \brief Create a LiteralDoc to represent string. * \param v The string value. * * The ObjectPath of v will be pushed to the source_path of the returned doc. */ static LiteralDoc Str(const TracedObject<String>& v) { return LiteralDoc(v.Get(), v.GetPath()); } TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(LiteralDoc, ExprDoc, LiteralDocNode); }; /*! * \brief Doc that represents identifier. * * \sa IdDoc */ class IdDocNode : public ExprDocNode { public: /*! \brief The name of the identifier */ String name; void VisitAttrs(AttrVisitor* v) { ExprDocNode::VisitAttrs(v); v->Visit("name", &name); } static constexpr const char* _type_key = "script.printer.IdDoc"; TVM_DECLARE_FINAL_OBJECT_INFO(IdDocNode, ExprDocNode); }; /*! * \brief Reference type of IdDocNode. * * \sa IdDocNode */ class IdDoc : public ExprDoc { public: /*! * \brief Constructor of IdDoc. * \param name The name of identifier. */ explicit IdDoc(String name); explicit IdDoc(std::nullptr_t) : ExprDoc(nullptr) {} TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(IdDoc, ExprDoc, IdDocNode); }; /*! * \brief Doc that represents attribute access on another expression. * * \sa AttrAccessDoc */ class AttrAccessDocNode : public ExprDocNode { public: /*! \brief The target expression to be accessed */ ExprDoc value{nullptr}; /*! \brief The attribute to be accessed */ String name; void VisitAttrs(AttrVisitor* v) { ExprDocNode::VisitAttrs(v); v->Visit("value", &value); v->Visit("name", &name); } static constexpr const char* _type_key = "script.printer.AttrAccessDoc"; TVM_DECLARE_FINAL_OBJECT_INFO(AttrAccessDocNode, ExprDocNode); }; /*! * \brief Reference type of AttrAccessDocNode. * * \sa AttrAccessDocNode */ class AttrAccessDoc : public ExprDoc { public: /*! * \brief Constructor of AttrAccessDoc * \param value The target expression of attribute access. * \param name The name of attribute to access. */ explicit AttrAccessDoc(ExprDoc value, String name); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(AttrAccessDoc, ExprDoc, AttrAccessDocNode); }; /*! * \brief Doc that represents index access on another expression. * * \sa IndexDoc */ class IndexDocNode : public ExprDocNode { public: /*! \brief The container value to be accessed */ ExprDoc value{nullptr}; /*! * \brief The indices to access * * Possible actual types: * - ExprDoc (single point access like a[1, 2]) * - SliceDoc (slice access like a[1:5, 2]) */ Array<Doc> indices; // Each element is union of: Slice / ExprDoc void VisitAttrs(AttrVisitor* v) { ExprDocNode::VisitAttrs(v); v->Visit("value", &value); v->Visit("indices", &indices); } static constexpr const char* _type_key = "script.printer.IndexDoc"; TVM_DECLARE_FINAL_OBJECT_INFO(IndexDocNode, ExprDocNode); }; /*! * \brief Reference type of IndexDocNode. * * \sa IndexDocNode */ class IndexDoc : public ExprDoc { public: /*! * \brief Constructor of IndexDoc * \param value The target expression of index access. * \param indices The indices to access. */ explicit IndexDoc(ExprDoc value, Array<Doc> indices); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(IndexDoc, ExprDoc, IndexDocNode); }; /*! * \brief Doc that represents function call. * * \sa CallDoc */ class CallDocNode : public ExprDocNode { public: /*! \brief The callee of this function call */ ExprDoc callee{nullptr}; /*! \brief The positional arguments */ Array<ExprDoc> args; /*! \brief The keys of keyword arguments */ Array<String> kwargs_keys; /*! * \brief The values of keyword arguments. * * The i-th element is the value of the i-th key in `kwargs_keys`. * It must have the same length as `kwargs_keys`. */ Array<ExprDoc> kwargs_values; void VisitAttrs(AttrVisitor* v) { ExprDocNode::VisitAttrs(v); v->Visit("callee", &callee); v->Visit("args", &args); v->Visit("kwargs_keys", &kwargs_keys); v->Visit("kwargs_values", &kwargs_values); } static constexpr const char* _type_key = "script.printer.CallDoc"; TVM_DECLARE_FINAL_OBJECT_INFO(CallDocNode, ExprDocNode); }; /*! * \brief Reference type of CallDocNode. * * \sa CallDocNode */ class CallDoc : public ExprDoc { public: /*! * \brief Constructor of CallDoc * \param callee The callee of this function call. * \param args The positional arguments. * \param kwargs_keys Keys of keyword arguments. * \param kwargs_values Values of keyword arguments, must have the same length as `kwargs_keys. */ CallDoc(ExprDoc callee, Array<ExprDoc> args, Array<String> kwargs_keys, Array<ExprDoc> kwargs_values); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(CallDoc, ExprDoc, CallDocNode); }; /*! * \brief Doc that represents operation. * * It can be unary, binary and other special operators (for example, * the if-then-else expression). * * \sa OperationDoc */ class OperationDocNode : public ExprDocNode { public: enum class Kind : int32_t { // Unary operators kUnaryStart = 0, kUSub = 1, // -x kInvert = 2, // ~x kNot = 3, // not x kUnaryEnd = 4, // Binary operators kBinaryStart = 5, kAdd = 6, // + kSub = 7, // - kMult = 8, // * kDiv = 9, // / kFloorDiv = 10, // // in Python kMod = 11, // % in Python kPow = 12, // ** in Python kLShift = 13, // << kRShift = 14, // >> kBitAnd = 15, // & kBitOr = 16, // | kBitXor = 17, // ^ kLt = 18, // < kLtE = 19, // <= kEq = 20, // == kNotEq = 21, // != kGt = 22, // > kGtE = 23, // >= kAnd = 24, // and kOr = 25, // or kBinaryEnd = 26, // Special kSpecialStart = 27, kIfThenElse = 28, // <operands[1]> if <operands[0]> else <operands[2]> kSpecialEnd = 29 }; /*! \brief The kind of operation (operator) */ Kind kind; /*! \brief Operands of this expression */ Array<ExprDoc> operands; void VisitAttrs(AttrVisitor* v) { ExprDocNode::VisitAttrs(v); v->Visit("kind", &kind); v->Visit("operands", &operands); } static constexpr const char* _type_key = "script.printer.OperationDoc"; TVM_DECLARE_FINAL_OBJECT_INFO(OperationDocNode, ExprDocNode); }; /*! * \brief Reference type of OperationDocNode. * * \sa OperationDocNode */ class OperationDoc : public ExprDoc { public: /*! * \brief Constructor of OperationDoc * \param kind The kind of operation. * \param operands Operands of this expression. */ explicit OperationDoc(OperationDocNode::Kind kind, Array<ExprDoc> operands); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(OperationDoc, ExprDoc, OperationDocNode); }; /*! * \brief Doc that represents anonymous function. * * LambdaDoc can only have positional arguments without type annotation, * and a single expression as body. * * \sa LambdaDoc */ class LambdaDocNode : public ExprDocNode { public: /*! \brief The arguments of this anonymous function */ Array<IdDoc> args; /*! \brief The body of this anonymous function */ ExprDoc body{nullptr}; void VisitAttrs(AttrVisitor* v) { ExprDocNode::VisitAttrs(v); v->Visit("args", &args); v->Visit("body", &body); } static constexpr const char* _type_key = "script.printer.LambdaDoc"; TVM_DECLARE_FINAL_OBJECT_INFO(LambdaDocNode, ExprDocNode); }; /*! * \brief Reference type of LambdaDocNode. * * \sa LambdaDocNode */ class LambdaDoc : public ExprDoc { public: /*! * \brief Constructor of LambdaDoc * \param args Arguments of this function. * \param body Body expression of this function. */ explicit LambdaDoc(Array<IdDoc> args, ExprDoc body); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(LambdaDoc, ExprDoc, LambdaDocNode); }; /*! * \brief Doc that represents tuple literal. * * \sa TupleDoc */ class TupleDocNode : public ExprDocNode { public: /*! \brief Elements of tuple */ Array<ExprDoc> elements; void VisitAttrs(AttrVisitor* v) { ExprDocNode::VisitAttrs(v); v->Visit("elements", &elements); } static constexpr const char* _type_key = "script.printer.TupleDoc"; TVM_DECLARE_FINAL_OBJECT_INFO(TupleDocNode, ExprDocNode); }; /*! * \brief Reference type of TupleDocNode. * * \sa TupleDocNode */ class TupleDoc : public ExprDoc { public: /*! * \brief Create an empty TupleDoc */ TupleDoc() : TupleDoc(runtime::make_object<TupleDocNode>()) {} /*! * \brief Constructor of TupleDoc * \param elements Elements of tuple. */ explicit TupleDoc(Array<ExprDoc> elements); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(TupleDoc, ExprDoc, TupleDocNode); }; /*! * \brief Doc that represents list literal. * * \sa AttrAccessDoc */ class ListDocNode : public ExprDocNode { public: /*! \brief Elements of list */ Array<ExprDoc> elements; void VisitAttrs(AttrVisitor* v) { ExprDocNode::VisitAttrs(v); v->Visit("elements", &elements); } static constexpr const char* _type_key = "script.printer.ListDoc"; TVM_DECLARE_FINAL_OBJECT_INFO(ListDocNode, ExprDocNode); }; /*! * \brief Reference type of ListDocNode. * * \sa ListDocNode */ class ListDoc : public ExprDoc { public: /*! * \brief Create an empty ListDoc */ ListDoc() : ListDoc(runtime::make_object<ListDocNode>()) {} /*! * \brief Constructor of ListDoc * \param elements Elements of list. */ explicit ListDoc(Array<ExprDoc> elements); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(ListDoc, ExprDoc, ListDocNode); }; /*! * \brief Doc that represents dictionary literal. * * \sa AttrAccessDoc */ class DictDocNode : public ExprDocNode { public: /*! \brief keys of dictionary */ Array<ExprDoc> keys; /*! * \brief Values of dictionary * * The i-th element is the value of the i-th element of `keys`. * It must have the same length as `keys`. */ Array<ExprDoc> values; void VisitAttrs(AttrVisitor* v) { ExprDocNode::VisitAttrs(v); v->Visit("keys", &keys); v->Visit("values", &values); } static constexpr const char* _type_key = "script.printer.DictDoc"; TVM_DECLARE_FINAL_OBJECT_INFO(DictDocNode, ExprDocNode); }; /*! * \brief Reference type of DictDocNode. * * \sa DictDocNode */ class DictDoc : public ExprDoc { public: /*! * \brief Create an empty dictionary */ DictDoc() : DictDoc(runtime::make_object<DictDocNode>()) {} /*! * \brief Constructor of DictDoc * \param keys Keys of dictionary. * \param values Values of dictionary, must have same length as `keys`. */ explicit DictDoc(Array<ExprDoc> keys, Array<ExprDoc> values); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(DictDoc, ExprDoc, DictDocNode); }; /*! * \brief Doc that represents slice in Index expression. * * This doc can only appear in IndexDoc::indices. * * \sa AttrAccessDoc */ class SliceDocNode : public DocNode { public: /*! \brief The start of slice */ Optional<ExprDoc> start; /*! \brief The exclusive end of slice */ Optional<ExprDoc> stop; /*! \brief The step of slice */ Optional<ExprDoc> step; void VisitAttrs(AttrVisitor* v) { DocNode::VisitAttrs(v); v->Visit("start", &start); v->Visit("stop", &stop); v->Visit("step", &step); } static constexpr const char* _type_key = "script.printer.SliceDoc"; TVM_DECLARE_FINAL_OBJECT_INFO(SliceDocNode, DocNode); }; /*! * \brief Reference type of SliceDocNode. * * \sa SliceDocNode */ class SliceDoc : public Doc { public: /*! * \brief Constructor of SliceDoc * \param start The start of slice. * \param stop The exclusive end of slice. * \param step The step of slice. */ explicit SliceDoc(Optional<ExprDoc> start, Optional<ExprDoc> stop, Optional<ExprDoc> step); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(SliceDoc, Doc, SliceDocNode); }; /*! * \brief Doc that represents assign statement. * * \sa AssignDoc */ class AssignDocNode : public StmtDocNode { public: /*! \brief The left hand side of the assignment */ ExprDoc lhs{nullptr}; /*! * \brief The right hand side of the assignment. * * If null, this doc represents declaration, e.g. `A: T.Buffer[(1,2)]` * */ Optional<ExprDoc> rhs; /*! \brief The type annotation of this assignment. */ Optional<ExprDoc> annotation; void VisitAttrs(AttrVisitor* v) { StmtDocNode::VisitAttrs(v); v->Visit("lhs", &lhs); v->Visit("rhs", &rhs); v->Visit("annotation", &annotation); } static constexpr const char* _type_key = "script.printer.AssignDoc"; TVM_DECLARE_FINAL_OBJECT_INFO(AssignDocNode, StmtDocNode); }; /*! * \brief Reference type of AssignDocNode. * * \sa AssignDoc */ class AssignDoc : public StmtDoc { public: /*! * \brief Constructor of AssignDoc. * \param lhs The left hand side of the assignment. * \param rhs The right hand side of the assignment. * \param annotation The type annotation of this assignment. */ explicit AssignDoc(ExprDoc lhs, Optional<ExprDoc> rhs, Optional<ExprDoc> annotation); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(AssignDoc, StmtDoc, AssignDocNode); }; /*! * \brief Doc that represent if-then-else statement. * * \sa IfDoc */ class IfDocNode : public StmtDocNode { public: /*! \brief The predicate of the if-then-else statement. */ ExprDoc predicate{nullptr}; /*! \brief The then branch of the if-then-else statement. */ Array<StmtDoc> then_branch; /*! \brief The else branch of the if-then-else statement. */ Array<StmtDoc> else_branch; void VisitAttrs(AttrVisitor* v) { StmtDocNode::VisitAttrs(v); v->Visit("predicate", &predicate); v->Visit("then_branch", &then_branch); v->Visit("else_branch", &else_branch); } static constexpr const char* _type_key = "script.printer.IfDoc"; TVM_DECLARE_FINAL_OBJECT_INFO(IfDocNode, StmtDocNode); }; /*! * \brief Reference type of IfDocNode. * * \sa IfDocNode */ class IfDoc : public StmtDoc { public: /*! * \brief Constructor of IfDoc. * \param predicate The predicate of the if-then-else statement. * \param then_branch The then branch of the if-then-else statement. * \param else_branch The else branch of the if-then-else statement. */ explicit IfDoc(ExprDoc predicate, Array<StmtDoc> then_branch, Array<StmtDoc> else_branch); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(IfDoc, StmtDoc, IfDocNode); }; /*! * \brief Doc that represents while statement. * * \sa WhileDoc */ class WhileDocNode : public StmtDocNode { public: /*! \brief The predicate of the while statement. */ ExprDoc predicate{nullptr}; /*! \brief The body of the while statement. */ Array<StmtDoc> body; void VisitAttrs(AttrVisitor* v) { StmtDocNode::VisitAttrs(v); v->Visit("predicate", &predicate); v->Visit("body", &body); } static constexpr const char* _type_key = "script.printer.WhileDoc"; TVM_DECLARE_FINAL_OBJECT_INFO(WhileDocNode, StmtDocNode); }; /*! * \brief Reference type of WhileDocNode. * * \sa WhileDocNode */ class WhileDoc : public StmtDoc { public: /*! * \brief Constructor of WhileDoc. * \param predicate The predicate of the while statement. * \param body The body of the while statement. */ explicit WhileDoc(ExprDoc predicate, Array<StmtDoc> body); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(WhileDoc, StmtDoc, WhileDocNode); }; /*! * \brief Doc that represents for statement. * * Example: * for 'lhs' in 'rhs': * 'body...' * * \sa ForDoc */ class ForDocNode : public StmtDocNode { public: /*! \brief The left hand side of the assignment of iterating variable. */ ExprDoc lhs{nullptr}; /*! \brief The right hand side of the assignment of iterating variable. */ ExprDoc rhs{nullptr}; /*! \brief The body of the for statement. */ Array<StmtDoc> body; void VisitAttrs(AttrVisitor* v) { StmtDocNode::VisitAttrs(v); v->Visit("lhs", &lhs); v->Visit("rhs", &rhs); v->Visit("body", &body); } static constexpr const char* _type_key = "script.printer.ForDoc"; TVM_DECLARE_FINAL_OBJECT_INFO(ForDocNode, StmtDocNode); }; /*! * \brief Reference type of ForDocNode. * * \sa ForDocNode */ class ForDoc : public StmtDoc { public: /*! * \brief Constructor of ForDoc. * \param lhs The left hand side of the assignment of iterating variable. * \param rhs The right hand side of the assignment of iterating variable. * \param body The body of the for statement. */ explicit ForDoc(ExprDoc lhs, ExprDoc rhs, Array<StmtDoc> body); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(ForDoc, StmtDoc, ForDocNode); }; /*! * \brief Doc that represents special scopes. * * Specifically, this means the with statement in Python: * * with 'rhs' as 'lhs': * 'body...' * * \sa ScopeDoc */ class ScopeDocNode : public StmtDocNode { public: /*! \brief The name of the scoped variable. */ Optional<ExprDoc> lhs{NullOpt}; /*! \brief The value of the scoped variable. */ ExprDoc rhs{nullptr}; /*! \brief The body of the scope doc. */ Array<StmtDoc> body; void VisitAttrs(AttrVisitor* v) { StmtDocNode::VisitAttrs(v); v->Visit("lhs", &lhs); v->Visit("rhs", &rhs); v->Visit("body", &body); } static constexpr const char* _type_key = "script.printer.ScopeDoc"; TVM_DECLARE_FINAL_OBJECT_INFO(ScopeDocNode, StmtDocNode); }; /*! * \brief Reference type of ScopeDocNode. * * \sa ScopeDocNode */ class ScopeDoc : public StmtDoc { public: /*! * \brief Constructor of ScopeDoc. * \param lhs The name of the scoped variable. * \param rhs The value of the scoped variable. * \param body The body of the scope doc. */ explicit ScopeDoc(Optional<ExprDoc> lhs, ExprDoc rhs, Array<StmtDoc> body); /*! * \brief Constructor of ScopeDoc. * \param rhs The value of the scoped variable. * \param body The body of the scope doc. */ explicit ScopeDoc(ExprDoc rhs, Array<StmtDoc> body); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(ScopeDoc, StmtDoc, ScopeDocNode); }; /*! * \brief Doc that represents an expression as statement. * * \sa ExprStmtDoc */ class ExprStmtDocNode : public StmtDocNode { public: /*! \brief The expression represented by this doc. */ ExprDoc expr{nullptr}; void VisitAttrs(AttrVisitor* v) { StmtDocNode::VisitAttrs(v); v->Visit("expr", &expr); } static constexpr const char* _type_key = "script.printer.ExprStmtDoc"; TVM_DECLARE_FINAL_OBJECT_INFO(ExprStmtDocNode, StmtDocNode); }; /*! * \brief Reference type of ExprStmtDocNode. * * \sa ExprStmtDocNode */ class ExprStmtDoc : public StmtDoc { public: /*! * \brief Constructor of ExprStmtDoc. * \param expr The expression represented by this doc. */ explicit ExprStmtDoc(ExprDoc expr); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(ExprStmtDoc, StmtDoc, ExprStmtDocNode); }; /*! * \brief Doc that represents assert statement. * * \sa AssertDoc */ class AssertDocNode : public StmtDocNode { public: /*! \brief The expression to test. */ ExprDoc test{nullptr}; /*! \brief The optional error message when assertion failed. */ Optional<ExprDoc> msg{NullOpt}; void VisitAttrs(AttrVisitor* v) { StmtDocNode::VisitAttrs(v); v->Visit("test", &test); v->Visit("msg", &msg); } static constexpr const char* _type_key = "script.printer.AssertDoc"; TVM_DECLARE_FINAL_OBJECT_INFO(AssertDocNode, StmtDocNode); }; /*! * \brief Reference type of AssertDocNode. * * \sa AssertDocNode */ class AssertDoc : public StmtDoc { public: /*! * \brief Constructor of AssertDoc. * \param test The expression to test. * \param msg The optional error message when assertion failed. */ explicit AssertDoc(ExprDoc test, Optional<ExprDoc> msg = NullOpt); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(AssertDoc, StmtDoc, AssertDocNode); }; /*! * \brief Doc that represents return statement. * * \sa ReturnDoc */ class ReturnDocNode : public StmtDocNode { public: /*! \brief The value to return. */ ExprDoc value{nullptr}; void VisitAttrs(AttrVisitor* v) { StmtDocNode::VisitAttrs(v); v->Visit("value", &value); } static constexpr const char* _type_key = "script.printer.ReturnDoc"; TVM_DECLARE_FINAL_OBJECT_INFO(ReturnDocNode, StmtDocNode); }; /*! * \brief Reference type of ReturnDocNode. * * \sa ReturnDocNode */ class ReturnDoc : public StmtDoc { public: /*! * \brief Constructor of ReturnDoc. * \param value The value to return. */ explicit ReturnDoc(ExprDoc value); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(ReturnDoc, StmtDoc, ReturnDocNode); }; /*! * \brief Doc that represents function definition. * * \sa FunctionDoc */ class FunctionDocNode : public StmtDocNode { public: /*! \brief The name of function. */ IdDoc name{nullptr}; /*! * \brief The arguments of function. * * The `lhs` means argument name, * `annotation` means argument type, * and `rhs` means default value. */ Array<AssignDoc> args; /*! \brief Decorators of function. */ Array<ExprDoc> decorators; /*! \brief The return type of function. */ Optional<ExprDoc> return_type{NullOpt}; /*! \brief The body of function. */ Array<StmtDoc> body; void VisitAttrs(AttrVisitor* v) { StmtDocNode::VisitAttrs(v); v->Visit("name", &name); v->Visit("args", &args); v->Visit("decorators", &decorators); v->Visit("return_type", &return_type); v->Visit("body", &body); } static constexpr const char* _type_key = "script.printer.FunctionDoc"; TVM_DECLARE_FINAL_OBJECT_INFO(FunctionDocNode, StmtDocNode); }; /*! * \brief Reference type of FunctionDocNode. * * \sa FunctionDocNode */ class FunctionDoc : public StmtDoc { public: /*! * \brief Constructor of FunctionDoc. * \param name The name of function.. * \param args The arguments of function. * \param decorators The decorator of function. * \param return_type The return type of function. * \param body The body of function. */ explicit FunctionDoc(IdDoc name, Array<AssignDoc> args, Array<ExprDoc> decorators, Optional<ExprDoc> return_type, Array<StmtDoc> body); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(FunctionDoc, StmtDoc, FunctionDocNode); }; /*! * \brief Doc that represents class definition. * * \sa ClassDoc */ class ClassDocNode : public StmtDocNode { public: /*! \brief The name of class. */ IdDoc name{nullptr}; /*! \brief Decorators of class. */ Array<ExprDoc> decorators; /*! \brief The body of class. */ Array<StmtDoc> body; void VisitAttrs(AttrVisitor* v) { StmtDocNode::VisitAttrs(v); v->Visit("name", &name); v->Visit("decorators", &decorators); v->Visit("body", &body); } static constexpr const char* _type_key = "script.printer.ClassDoc"; TVM_DECLARE_FINAL_OBJECT_INFO(ClassDocNode, StmtDocNode); }; /*! * \brief Reference type of ClassDocNode. * * \sa ClassDocNode */ class ClassDoc : public StmtDoc { public: /*! * \brief Constructor of ClassDoc. * \param name The name of class. * \param decorators The decorator of class. * \param body The body of class. */ explicit ClassDoc(IdDoc name, Array<ExprDoc> decorators, Array<StmtDoc> body); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(ClassDoc, StmtDoc, ClassDocNode); }; } // namespace printer } // namespace script } // namespace tvm #endif // TVM_SCRIPT_PRINTER_DOC_H_
https://github.com/zk-ml/tachikoma
include/tvm/script/printer/doc_printer.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_SCRIPT_PRINTER_DOC_PRINTER_H_ #define TVM_SCRIPT_PRINTER_DOC_PRINTER_H_ #include <tvm/script/printer/doc.h> namespace tvm { namespace script { namespace printer { /*! * \brief Convert Doc into Python script. * * This function unpacks the DocPrinterOptions into function arguments * to be FFI friendly. * * \param doc Doc to be converted * \param indent_spaces Number of spaces used for indentation * \param print_line_numbers Whether to print line numbers * \param num_context_lines Number of context lines to print around the underlined text * \param path_to_underline Object path to be underlined */ String DocToPythonScript(Doc doc, int indent_spaces = 4, bool print_line_numbers = false, int num_context_lines = -1, Optional<ObjectPath> path_to_underline = NullOpt); } // namespace printer } // namespace script } // namespace tvm #endif // TVM_SCRIPT_PRINTER_DOC_PRINTER_H_
https://github.com/zk-ml/tachikoma
include/tvm/script/printer/frame.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_SCRIPT_PRINTER_FRAME_H_ #define TVM_SCRIPT_PRINTER_FRAME_H_ #include <tvm/node/node.h> #include <tvm/script/printer/doc.h> #include <utility> #include <vector> namespace tvm { namespace script { namespace printer { /*! * Frame is the core data structure for semantic information * when printing IR graph into TVMScript code. */ class FrameNode : public Object { public: void VisitAttrs(tvm::AttrVisitor* v) {} virtual ~FrameNode() = default; /*! * \brief Add a callback function to be called when this frame exits. * \param cb The callback function. It should have signature void(). */ template <typename TCallback> void AddExitCallback(TCallback&& cb) { callbacks_.emplace_back(std::forward<TCallback>(cb)); } /*! * \brief Method that's called when Frame enters the scope. */ virtual void EnterWithScope() {} /*! * \brief Method that's called when Frame exits the scope. */ virtual void ExitWithScope() { for (const std::function<void()>& callback : callbacks_) { callback(); } callbacks_.clear(); } static constexpr const char* _type_key = "script.printer.Frame"; TVM_DECLARE_BASE_OBJECT_INFO(FrameNode, Object); private: std::vector<std::function<void()>> callbacks_; }; /*! * \brief Reference type of FrameNode */ class Frame : public ObjectRef { protected: Frame() = default; public: virtual ~Frame() = default; TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(Frame, ObjectRef, FrameNode); }; /*! * \brief MetadataFrame contains information like contant parameter array. */ class MetadataFrameNode : public FrameNode { public: Array<ObjectRef> metadata; void VisitAttrs(tvm::AttrVisitor* v) { FrameNode::VisitAttrs(v); v->Visit("metadata", &metadata); } static constexpr const char* _type_key = "script.printer.MetadataFrame"; TVM_DECLARE_FINAL_OBJECT_INFO(MetadataFrameNode, FrameNode); }; /*! * \brief Reference type of MetadataFrameNode */ class MetadataFrame : public Frame { public: MetadataFrame(); TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(MetadataFrame, Frame, MetadataFrameNode); }; /*! * \brief VarDefFrame contains information about the free variables that needs to be defined * at the beginning of the printed snippet. */ class VarDefFrameNode : public FrameNode { public: Array<StmtDoc> stmts; void VisitAttrs(tvm::AttrVisitor* v) { FrameNode::VisitAttrs(v); v->Visit("stmts", &stmts); } static constexpr const char* _type_key = "script.printer.VarDefFrame"; TVM_DECLARE_FINAL_OBJECT_INFO(VarDefFrameNode, FrameNode); }; /*! * \brief Reference type of VarDefFrameNode */ class VarDefFrame : public Frame { public: VarDefFrame(); TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(VarDefFrame, Frame, VarDefFrameNode); }; } // namespace printer } // namespace script } // namespace tvm #endif // TVM_SCRIPT_PRINTER_FRAME_H_
https://github.com/zk-ml/tachikoma
include/tvm/script/printer/ir_docsifier.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_SCRIPT_PRINTER_IR_DOCSIFIER_H_ #define TVM_SCRIPT_PRINTER_IR_DOCSIFIER_H_ #include <tvm/node/node.h> #include <tvm/runtime/logging.h> #include <tvm/script/printer/doc.h> #include <tvm/script/printer/frame.h> #include <tvm/script/printer/traced_object.h> #include <tvm/script/printer/traced_object_functor.h> #include <tvm/script/printer/var_table.h> #include <tvm/support/with.h> namespace tvm { namespace script { namespace printer { using WithCtx = With<ContextManager>; /*! * \brief IRDocsifier is the top-level interface in the IR->Doc process. * * It provides methods to convert IR node object to Doc, operate on Frame * objects and change dispatch tokens. * * Example usage: * \code * TVM_STATIC_IR_FUNCTOR(IRDocsifier, vtable) * .set_dispatch([](TracedObject<tir::Var> obj, IRDocsifier p) { return IdDoc("x"); }); * * TracedObject<tir::Var> var = ...; * IRDocsifier p; * p->AsDoc(var); // returns an IdDoc("x") * \endcode * */ class IRDocsifierNode : public Object { public: /*! * \brief The var table to use during the printing process. * \sa VarTableNode */ VarTable vars; /*! * \brief The stack of frames. * \sa FrameNode */ Array<Frame> frames; /*! * \brief The stack of dispatch tokens. * * The dispatch token on the top decides which dispatch function to use * when converting IR node object to Doc. */ Array<String> dispatch_tokens; /*! * \brief This map connects IR dipatch token to the name of identifier. */ Map<String, String> ir_prefix; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("vars", &vars); v->Visit("frames", &frames); v->Visit("dispatch_tokens", &dispatch_tokens); v->Visit("ir_prefix", &ir_prefix); } static constexpr const char* _type_key = "script.printer.IRDocsifier"; TVM_DECLARE_FINAL_OBJECT_INFO(IRDocsifierNode, Object); public: /*! * \brief Transform the input object into TDoc. * \param obj The object to be transformed. * * \return The Doc object. */ template <class TDoc> TDoc AsDoc(const TracedObject<ObjectRef>& obj) const { auto result = Downcast<TDoc>(AsDocImpl(obj)); result->source_paths.push_back(obj.GetPath()); return result; } /*! * \brief Helper method to transform object into ExprDoc. * \param obj The object to be transformed. * * \return The ExprDoc object. */ ExprDoc AsExprDoc(const TracedObject<ObjectRef>& obj) { return AsDoc<ExprDoc>(obj); } /*! * \brief Push a new dispatch token into the stack * \details The top dispatch token decides which dispatch table to use * when printing Object. This method returns a RAII guard which * pops the token when going out of the scope. * * \param token The dispatch token to push. * * \return A RAII guard to pop dispatch token when going out of scope. */ WithCtx WithDispatchToken(const String& token) { this->dispatch_tokens.push_back(token); return WithCtx(nullptr, [this]() { this->dispatch_tokens.pop_back(); }); } /*! * \brief Push a new frame the stack * \details Frame contains the contextual information that's needed during printing, * for example, variables in the scope. This method returns a RAII guard which * pops the frame and call the cleanup method of frame when going out of the scope. * * \param frame The frame to push. * * \return A RAII guard to pop frame and call the exit method of frame * when going out of scope */ WithCtx WithFrame(const Frame& frame) { frame->EnterWithScope(); this->frames.push_back(frame); return WithCtx(nullptr, [this, pushed_frame = frame]() { Frame last_frame = this->frames.back(); ICHECK_EQ(last_frame, pushed_frame); this->frames.pop_back(); last_frame->ExitWithScope(); }); } /*! * \brief Get the top frame with type FrameType * \tparam FrameType The type of frame to get. */ template <typename FrameType> Optional<FrameType> GetFrame() const { for (auto it = frames.rbegin(); it != frames.rend(); ++it) { if (const auto* f = (*it).as<typename FrameType::ContainerType>()) { return GetRef<FrameType>(f); } } return NullOpt; } private: Doc AsDocImpl(const TracedObject<ObjectRef>& obj) const; }; /*! * \brief Reference type of IRDocsifierNode. */ class IRDocsifier : public ObjectRef { public: /*! * \brief Create a IRDocsifier. * \param ir_prefix The ir_prefix to use for this IRDocsifier. */ explicit IRDocsifier(Map<String, String> ir_prefix); using FType = TracedObjectFunctor<printer::Doc, IRDocsifier>; /*! * \brief The registration table for IRDocsifier. */ TVM_DLL static FType& vtable(); TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(IRDocsifier, ObjectRef, IRDocsifierNode); }; /*! * \brief A wrapper object to provide injection point for printer of each IR. * * For any IR node to be transformed by IRDocsifier, it will be wrapped by RootNodeContainer * and be dispatched to the corresponding function first. This provides an injection point for * each IR's printer implemention to add specialized logic, for example, pushing a special * Frame to the IRDocsifier before doing any IR->Doc transformation. * * \code * TVM_STATIC_IR_FUNCTOR(IRDocsifier, vtable) * .set_dispatch("relax", [](TracedObject<RootNodeContainer> obj, IRDocsifier p) { * const ObjectRef& root_node = obj.Get()->root_node; * // For example, relax printer can create a Frame specialized to Relax here * RelaxGeneralFrame frame; * auto ctx = p->WithFrame(frame); * // More specialized logic for your IR. * return p->AsDoc<Doc>(MakeTraced(root_node)); * }); * \endcode */ class RootNodeContainerNode : public Object { public: /*! \brief The root node to print. */ ObjectRef root_node; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("root_node", &root_node); } static constexpr const char* _type_key = "script.printer.RootNodeContainer"; TVM_DECLARE_FINAL_OBJECT_INFO(RootNodeContainerNode, Object); }; class RootNodeContainer : public ObjectRef { public: /*! * \brief Constructor of RootNodeContainer. * \param root_node The root node to print. * */ explicit RootNodeContainer(ObjectRef root_node); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(RootNodeContainer, ObjectRef, RootNodeContainerNode); }; } // namespace printer } // namespace script } // namespace tvm #endif // TVM_SCRIPT_PRINTER_IR_DOCSIFIER_H_
https://github.com/zk-ml/tachikoma
include/tvm/script/printer/traced_object.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/script/printer/traced_object.h * Wrappers around TVM objects that also store an ObjectPath from some "root" object * to the wrapper object. */ #ifndef TVM_SCRIPT_PRINTER_TRACED_OBJECT_H_ #define TVM_SCRIPT_PRINTER_TRACED_OBJECT_H_ #include <tvm/node/object_path.h> #include <tvm/node/reflection.h> #include <tvm/runtime/object.h> #include <string> #include <utility> namespace tvm { template <typename RefT> class TracedObject; template <typename K, typename V> class TracedMap; template <typename T> class TracedArray; template <typename T> class TracedOptional; template <typename T> class TracedBasicValue; namespace detail { template <typename T, bool IsObject = std::is_base_of<ObjectRef, T>::value> struct TracedObjectWrapperSelector; template <typename T> struct TracedObjectWrapperSelector<T, false> { using Type = TracedBasicValue<T>; }; template <typename T> struct TracedObjectWrapperSelector<T, true> { using Type = TracedObject<T>; }; template <typename K, typename V> struct TracedObjectWrapperSelector<Map<K, V>, true> { using Type = TracedMap<K, V>; }; template <typename T> struct TracedObjectWrapperSelector<Array<T>, true> { using Type = TracedArray<T>; }; template <typename T> struct TracedObjectWrapperSelector<Optional<T>, true> { using Type = TracedOptional<T>; }; } // namespace detail /*! * \brief Traced wrapper for regular (non-container) TVM objects. */ template <typename RefT> class TracedObject { using ObjectType = typename RefT::ContainerType; public: using ObjectRefType = RefT; // Don't use this direcly. For convenience, call MakeTraced() instead. explicit TracedObject(const RefT& object_ref, ObjectPath path) : ref_(object_ref), path_(std::move(path)) {} // Implicit conversion from a derived reference class template <typename DerivedRef> TracedObject(const TracedObject<DerivedRef>& derived) : ref_(derived.Get()), path_(derived.GetPath()) {} /*! * \brief Get a traced wrapper for an attribute of the wrapped object. */ template <typename T, typename BaseType> typename detail::TracedObjectWrapperSelector<T>::Type GetAttr(T BaseType::*member_ptr) const { using WrapperType = typename detail::TracedObjectWrapperSelector<T>::Type; const ObjectType* node = static_cast<const ObjectType*>(ref_.get()); const T& attr = node->*member_ptr; Optional<String> attr_key = ICHECK_NOTNULL(GetAttrKeyByAddress(node, &attr)); return WrapperType(attr, path_->Attr(attr_key)); } /*! * \brief Access the wrapped object. */ const RefT& Get() const { return ref_; } /*! * \brief Check if the reference to the wrapped object can be converted to `RefU`. */ template <typename RefU> bool IsInstance() const { return ref_->template IsInstance<typename RefU::ContainerType>(); } /*! * \brief Same as Get().defined(). */ bool defined() const { return ref_.defined(); } /*! * \brief Convert the wrapped reference type to a subtype. * * Throws an exception if IsInstance<RefU>() is false. */ template <typename RefU> TracedObject<RefU> Downcast() const { return TracedObject<RefU>(tvm::runtime::Downcast<RefU>(ref_), path_); } /*! * \brief Convert the wrapped reference type to a subtype. * * Returns an empty optional if IsInstance<RefU>() is false. */ template <typename RefU> TracedOptional<RefU> TryDowncast() const { if (ref_->template IsInstance<typename RefU::ContainerType>()) { return Downcast<RefU>(); } else { return TracedOptional<RefU>(NullOpt, path_); } } /*! * \brief Get the path of the wrapped object. */ const ObjectPath& GetPath() const { return path_; } private: RefT ref_; ObjectPath path_; }; /*! * \brief Iterator class for TracedMap<K, V> */ template <typename K, typename V> class TracedMapIterator { public: using WrappedV = typename detail::TracedObjectWrapperSelector<V>::Type; using MapIter = typename Map<K, V>::iterator; using iterator_category = std::bidirectional_iterator_tag; using difference_type = ptrdiff_t; using value_type = const std::pair<K, WrappedV>; using pointer = value_type*; using reference = value_type; explicit TracedMapIterator(MapIter iter, ObjectPath map_path) : iter_(iter), map_path_(std::move(map_path)) {} bool operator==(const TracedMapIterator& other) const { return iter_ == other.iter_; } bool operator!=(const TracedMapIterator& other) const { return iter_ != other.iter_; } pointer operator->() const = delete; reference operator*() const { auto kv = *iter_; return std::make_pair(kv.first, WrappedV(kv.second, map_path_->MapValue(kv.first))); } TracedMapIterator& operator++() { ++iter_; return *this; } TracedMapIterator operator++(int) { TracedMapIterator copy = *this; ++(*this); return copy; } private: MapIter iter_; ObjectPath map_path_; }; /*! * \brief Traced wrapper for Map objects. */ template <typename K, typename V> class TracedMap { public: using WrappedV = typename detail::TracedObjectWrapperSelector<V>::Type; using iterator = TracedMapIterator<K, V>; // Don't use this direcly. For convenience, call MakeTraced() instead. explicit TracedMap(Map<K, V> map, ObjectPath path) : map_(std::move(map)), path_(std::move(path)) {} /*! * \brief Get a value by its key, wrapped in a traced wrapper. */ WrappedV at(const K& key) const { auto it = map_.find(key); ICHECK(it != map_.end()) << "No such key in Map"; auto kv = *it; return WrappedV(kv.second, path_->MapValue(kv.first)); } /*! * \brief Access the wrapped map object. */ const Map<K, V>& Get() const { return map_; } /*! * \brief Get the path of the wrapped object. */ const ObjectPath& GetPath() const { return path_; } /*! * \brief Get an iterator to the first item of the map. */ iterator begin() const { return iterator(map_.begin(), path_); } /*! * \brief Get an iterator to the end of the map. */ iterator end() const { return iterator(map_.end(), path_); } /*! * \brief Returns true iff the wrapped map is empty. */ bool empty() const { return map_.empty(); } private: Map<K, V> map_; ObjectPath path_; }; /*! * \brief Iterator class for TracedArray<T> */ template <typename T> class TracedArrayIterator { public: using WrappedT = typename detail::TracedObjectWrapperSelector<T>::Type; using difference_type = ptrdiff_t; using value_type = WrappedT; using pointer = WrappedT*; using reference = WrappedT&; using iterator_category = std::random_access_iterator_tag; explicit TracedArrayIterator(Array<T> array, size_t index, ObjectPath array_path) : array_(array), index_(index), array_path_(array_path) {} TracedArrayIterator& operator++() { ++index_; return *this; } TracedArrayIterator& operator--() { --index_; return *this; } TracedArrayIterator operator++(int) { TracedArrayIterator copy = *this; ++index_; return copy; } TracedArrayIterator operator--(int) { TracedArrayIterator copy = *this; --index_; return copy; } TracedArrayIterator operator+(difference_type offset) const { return TracedArrayIterator(array_, index_ + offset, array_path_); } TracedArrayIterator operator-(difference_type offset) const { return TracedArrayIterator(array_, index_ - offset, array_path_); } difference_type operator-(const TracedArrayIterator& rhs) const { return index_ - rhs.index_; } bool operator==(TracedArrayIterator other) const { return array_.get() == other.array_.get() && index_ == other.index_; } bool operator!=(TracedArrayIterator other) const { return !(*this == other); } value_type operator*() const { return WrappedT(array_[index_], array_path_->ArrayIndex(index_)); } private: Array<T> array_; size_t index_; ObjectPath array_path_; }; /*! * \brief Traced wrapper for Array objects. */ template <typename T> class TracedArray { public: using WrappedT = typename detail::TracedObjectWrapperSelector<T>::Type; using iterator = TracedArrayIterator<T>; // Don't use this direcly. For convenience, call MakeTraced() instead. explicit TracedArray(Array<T> array, ObjectPath path) : array_(std::move(array)), path_(std::move(path)) {} /*! * \brief Access the wrapped array object. */ const Array<T>& Get() const { return array_; } /*! * \brief Get the path of the wrapped array object. */ const ObjectPath& GetPath() const { return path_; } /*! * \brief Get an element by index, wrapped in a traced wrapper. */ WrappedT operator[](size_t index) const { return WrappedT(array_[index], path_->ArrayIndex(index)); } /*! * \brief Get an iterator to the first array element. * * The iterator's dereference operator will automatically wrap each element in a traced wrapper. */ iterator begin() const { return iterator(array_, 0, path_); } /*! * \brief Get an iterator to the end of the array. * * The iterator's dereference operator will automatically wrap each element in a traced wrapper. */ iterator end() const { return iterator(array_, array_.size(), path_); } /*! * \brief Returns true iff the wrapped array is empty. */ bool empty() const { return array_.empty(); } /*! * \brief Get the size of the wrapped array. */ size_t size() const { return array_.size(); } private: Array<T> array_; ObjectPath path_; }; /*! * \brief Traced wrapper for Optional objects. */ template <typename T> class TracedOptional { public: using WrappedT = typename detail::TracedObjectWrapperSelector<T>::Type; /*! * \brief Implicit conversion from the corresponding non-optional traced wrapper. */ TracedOptional(const WrappedT& value) // NOLINT(runtime/explicit) : optional_(value.Get().defined() ? value.Get() : Optional<T>(NullOpt)), path_(value.GetPath()) {} // Don't use this direcly. For convenience, call MakeTraced() instead. explicit TracedOptional(Optional<T> optional, ObjectPath path) : optional_(std::move(optional)), path_(std::move(path)) {} /*! * \brief Access the wrapped optional object. */ const Optional<T>& Get() const { return optional_; } /*! * \brief Get the path of the wrapped optional object. */ const ObjectPath& GetPath() const { return path_; } /*! * \brief Returns true iff the object is present. */ bool defined() const { return optional_.defined(); } /*! * \brief Returns a non-optional traced wrapper, throws if defined() is false. */ WrappedT value() const { return WrappedT(optional_.value(), path_); } /*! * \brief Same as defined(). */ explicit operator bool() const { return optional_.defined(); } private: Optional<T> optional_; ObjectPath path_; }; /*! * \brief Traced wrapper for basic values (i.e. non-TVM objects) */ template <typename T> class TracedBasicValue { public: explicit TracedBasicValue(const T& value, ObjectPath path) : value_(value), path_(std::move(path)) {} /*! * \brief Access the wrapped value. */ const T& Get() const { return value_; } /*! * \brief Get the path of the wrapped value. */ const ObjectPath& GetPath() const { return path_; } /*! * \brief Transform the wrapped value without changing its path. */ template <typename F> typename detail::TracedObjectWrapperSelector<typename std::invoke_result<F, const T&>::type>::Type ApplyFunc(F&& f) const { return MakeTraced(f(value_), path_); } private: T value_; ObjectPath path_; }; /*! * \brief Wrap the given root object in an appropriate traced wrapper class. */ template <typename RefT> typename detail::TracedObjectWrapperSelector<RefT>::Type MakeTraced(const RefT& object) { using WrappedT = typename detail::TracedObjectWrapperSelector<RefT>::Type; return WrappedT(object, ObjectPath::Root()); } /*! * \brief Wrap the given object with the given path in an appropriate traced wrapper class. */ template <typename RefT> typename detail::TracedObjectWrapperSelector<RefT>::Type MakeTraced(const RefT& object, ObjectPath path) { using WrappedT = typename detail::TracedObjectWrapperSelector<RefT>::Type; return WrappedT(object, std::move(path)); } } // namespace tvm #endif // TVM_SCRIPT_PRINTER_TRACED_OBJECT_H_
https://github.com/zk-ml/tachikoma
include/tvm/script/printer/traced_object_functor.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_SCRIPT_PRINTER_TRACED_OBJECT_FUNCTOR_H_ #define TVM_SCRIPT_PRINTER_TRACED_OBJECT_FUNCTOR_H_ #include <tvm/node/node.h> #include <tvm/runtime/logging.h> #include <tvm/runtime/packed_func.h> #include <tvm/script/printer/traced_object.h> #include <string> #include <type_traits> #include <unordered_map> #include <utility> #include <vector> namespace tvm { namespace script { namespace printer { /* * This type alias and the following free functions are created to reduce the binary bloat * from template and also hide implementation details from this header */ using DispatchTable = std::unordered_map<std::string, std::vector<runtime::PackedFunc>>; /*! * \brief Get function from dispatch table. * \param dispatch_table The dispatch table. * \param token The dispatch token. * \param type_index The type index of the Object type to be dispatched. * * \return The dispatch function. */ const runtime::PackedFunc& GetDispatchFunction(const DispatchTable& dispatch_table, const String& token, uint32_t type_index); /*! * \brief Set function in dispatch table. * \param dispatch_table The dispatch table. * \param token The dispatch token. * \param type_index The type index of the Object type to be dispatched. * \param f The dispatch function. */ void SetDispatchFunction(DispatchTable* dispatch_table, const String& token, uint32_t type_index, runtime::PackedFunc f); /*! * \brief Remove function from dispatch table. * \param dispatch_table The dispatch table. * \param token The dispatch token. * \param type_index The TVM object type index for the dispatch function to be removed. */ void RemoveDispatchFunction(DispatchTable* dispatch_table, const String& token, uint32_t type_index); constexpr const char* kDefaultDispatchToken = ""; /*! * \brief Dynamic dispatch functor based on TracedObject. * * This functor dispatches based on the type of object ref inside the input TracedObject, * and the input dispatch token. */ template <typename R, typename... Args> class TracedObjectFunctor { private: using TSelf = TracedObjectFunctor<R, Args...>; template <class TObjectRef, class TCallable> using IsDispatchFunction = typename std::is_convertible<TCallable, std::function<R(TracedObject<TObjectRef>, Args...)>>; public: /*! * \brief Call the dispatch function. * \param token The dispatch token. * \param traced_object The traced object. * \param args Other args. * * \return The return value of the dispatch function * * If the TObjectRef isn't registered with the token, it will try to find * dispatch function for TObjectRef with kDefaultDispatchToken. */ template <class TObjectRef> R operator()(const String& token, TracedObject<TObjectRef> traced_object, Args... args) const { const runtime::PackedFunc& dispatch_function = GetDispatchFunction(dispatch_table_, token, traced_object.Get()->type_index()); return dispatch_function(traced_object.Get(), traced_object.GetPath(), args...); } /*! * \brief Set the dispatch function * \param token The dispatch token. * \param type_index The TVM object type index for this dispatch function. * \param f The dispatch function. * * This takes a type-erased packed function as input. It should be used * through FFI boundary, for example, registering dispatch function from Python. */ TSelf& set_dispatch(String token, uint32_t type_index, runtime::PackedFunc f) { SetDispatchFunction(&dispatch_table_, token, type_index, std::move(f)); return *this; } /*! * \brief Set the dispatch function * \param token The dispatch token. * \param f The dispatch function. * * The diaptch function should have signature `R(TracedObject<TObjectRef>, Args...)`. */ template <typename TObjectRef, typename TCallable, typename = std::enable_if_t<IsDispatchFunction<TObjectRef, TCallable>::value>> TSelf& set_dispatch(String token, TCallable f) { return set_dispatch( token, // TObjectRef::ContainerType::RuntimeTypeIndex(), // runtime::TypedPackedFunc<R(TObjectRef, ObjectPath, Args...)>( [f = std::move(f)](TObjectRef object, ObjectPath path, Args... args) -> R { return f(MakeTraced(object, path), args...); })); } /*! * \brief Set the default dispatch function * \param f The dispatch function. * * Default dispatch function will be used if there is no function registered * with the requested dispatch token. * * Default dispatch function has an empty string as dispatch token. */ template <typename TObjectRef, typename TCallable, typename = std::enable_if_t<IsDispatchFunction<TObjectRef, TCallable>::value>> TSelf& set_dispatch(TCallable&& f) { return set_dispatch<TObjectRef>(kDefaultDispatchToken, std::forward<TCallable>(f)); } /*! * \brief Remove dispatch function * \param token The dispatch token. * \param type_index The TVM object type index for the dispatch function to be removed. * * This is useful when dispatch function comes from other language's runtime, and * those function should be removed before that language runtime shuts down. */ void remove_dispatch(String token, uint32_t type_index) { RemoveDispatchFunction(&dispatch_table_, token, type_index); } private: DispatchTable dispatch_table_; }; } // namespace printer } // namespace script } // namespace tvm #endif // TVM_SCRIPT_PRINTER_TRACED_OBJECT_FUNCTOR_H_
https://github.com/zk-ml/tachikoma
include/tvm/script/printer/var_table.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_SCRIPT_PRINTER_VAR_TABLE_H_ #define TVM_SCRIPT_PRINTER_VAR_TABLE_H_ #include <tvm/node/node.h> #include <tvm/node/object_path.h> #include <tvm/script/printer/doc.h> #include <tvm/script/printer/frame.h> #include <tvm/script/printer/traced_object.h> #include <unordered_map> #include <unordered_set> namespace tvm { namespace script { namespace printer { /*! * \brief Variable Table manages mapping from variable object to ExprDoc during * the process of printing TVMScript. * * The value type of this map is ExprDoc rather than IdDoc or String. It's * because variables can be implicitly defined. For example in TIR buffer (tir::Buffer), * `buf->data` is a variable, while its representation in TVMScript should be an * expression `x.data`, where `x` is the variable for the buffer itself. */ class VarTableNode : public Object { public: void VisitAttrs(AttrVisitor*) {} /*! * \brief Define variable by name. * \param obj The variable object. * \param name_hint The hint for variable name. * \param object_path The object_path for the returned ExprDoc. * \param frame The frame that this variable is defined in. * * \return The id doc for this variable. * * This function will rename the variable to avoid name conflict with other variables * in the table. */ IdDoc Define(const ObjectRef& obj, const String& name_hint, const ObjectPath& object_path, const Frame& frame); /*! * \brief Define variable by name. * \param obj The variable object. * \param name_hint The hint for variable name. * \param frame The frame that this variable is defined in. * * \return The id doc for this variable. * * This is a shortcut version of `Define` which accepts a traced string. */ IdDoc Define(const ObjectRef& obj, const TracedObject<String>& name_hint, const Frame& frame) { return Define(obj, name_hint.Get(), name_hint.GetPath(), frame); } using DocFactory = std::function<ExprDoc()>; /*! * \brief Define variable by doc factory. * \param obj The variable object. * \param doc_factory The function to return an ExprDoc object for this variable. * \param frame The frame that this variable is defined in. * * This function is a special form of `Define`. Variable is mapped to ExprDoc rather * than IdDoc. It's useful when a variable is implicitly defined without a name, like * the buf->data in TIR, which should be mapped to `AttrDoc(IdDoc("<buffer_name>"), "data")`. * * This function takes a DocFactory instead of Doc. It's because GetVarDoc needs to * return a new Doc object every time it's called, as the returned doc will have * different `soruce_path`. Currently there isn't a good way to deep copy a TVMObject * so VarTable needs to call a factory function to get a freshly-constructed Doc object * every time GetVarDoc is called. */ void DefineByDoc(const ObjectRef& obj, DocFactory doc_factory, const Frame& frame); /*! * \brief Get the doc for variable. * \param obj The variable object. * \param object_path The object path for the variable. * * \return The doc for variable, if it exists in the table. Otherwise it returns NullOpt. */ Optional<ExprDoc> GetVarDoc(const ObjectRef& obj, const ObjectPath& object_path) const; /*! * \brief Get the doc for variable. * \param obj The traced variable object. * * \return The doc for variable, if it exists in the table. Otherwise it returns NullOpt. */ template <typename TObjectRef> Optional<ExprDoc> GetVarDoc(const TracedObject<TObjectRef> obj) const { return GetVarDoc(obj.Get(), obj.GetPath()); } /*! * \brief Check if a variable exists in the table. * \param obj The variable object. * * \return a boolean for whether variable exists. */ bool IsVarDefined(const ObjectRef& obj) const; static constexpr const char* _type_key = "script.printer.VarTable"; TVM_DECLARE_FINAL_OBJECT_INFO(VarTableNode, Object); private: void RemoveVar(const ObjectRef& obj); struct VariableInfo { DocFactory doc_factory; Optional<String> name; }; std::unordered_map<ObjectRef, VariableInfo, ObjectPtrHash, ObjectPtrEqual> obj2info; std::unordered_set<String> defined_names; }; /*! * \brief Reference type of VarTableNode. */ class VarTable : public ObjectRef { public: /*! * \brief Create an empty VarTable. */ VarTable(); TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(VarTable, ObjectRef, VarTableNode); }; } // namespace printer } // namespace script } // namespace tvm #endif // TVM_SCRIPT_PRINTER_VAR_TABLE_H_
https://github.com/zk-ml/tachikoma
include/tvm/support/parallel_for.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file parallel_for.h * \brief An implementation to run loop in parallel. */ #ifndef TVM_SUPPORT_PARALLEL_FOR_H_ #define TVM_SUPPORT_PARALLEL_FOR_H_ #include <tvm/runtime/c_runtime_api.h> #include <functional> #include <vector> namespace tvm { namespace support { using PartitionerFuncType = std::function<std::vector<std::vector<int>>(int, int, int, int)>; /*! * \brief A partitioner to split the task to each thread in Round-robin manner. * \param begin The start index of this parallel loop(inclusive). * \param end The end index of this parallel loop(exclusive). * \param step The traversal step to the index. * \param num_threads The number of threads(the number of tasks to be partitioned to). * \return A list with `num_threads` elements, and each is a list of integers indicating the loop * indexes for the corresponding thread to process. */ TVM_DLL std::vector<std::vector<int>> rr_partitioner(int begin, int end, int step, int num_threads); /*! * \brief A runtime api provided to run the task function in parallel. * e.g. A for loop: * for (int i = 0; i < 10; i++) { * a[i] = i; * } * should work the same as: * parallel_for(0, 10, [&a](int index) { * a[i] = i; * }); * \param begin The start index of this parallel loop(inclusive). * \param end The end index of this parallel loop(exclusive). * \param f The task function to be executed. Assert to take an int index as input with no output. * \param step The traversal step to the index. * \param partitioner A partition function to split tasks to different threads. Use Round-robin * partitioner by default. * \note 1. Currently do not support nested parallel_for; 2. The order of execution in each thread * is not guaranteed, the for loop task should be thread independent and thread safe. */ TVM_DLL void parallel_for(int begin, int end, const std::function<void(int)>& f, int step = 1, const PartitionerFuncType partitioner = rr_partitioner); /*! * \brief An API to launch fix amount of threads to run the specific functor in parallel. * Different from `parallel_for`, the partition is determined dynamically on the fly, * i.e. any time when a thread is idle, it fetches the next task to run. * The behavior is similar to dynamic scheduling in OpenMP: * * \#pragma omp parallel for schedule(dynamic) num_threads(num_threads) * for (int i = 0; i < 10; i++) { * a[i] = i; * } * * \param begin The start index of this parallel loop (inclusive). * \param end The end index of this parallel loop (exclusive). * \param num_threads The number of threads to be used. * \param f The task function to be executed. Takes the thread index and the task index as * input with no output. * \note `step` support is left for future work. */ TVM_DLL void parallel_for_dynamic(int begin, int end, int num_threads, const std::function<void(int thread_id, int task_id)>& f); } // namespace support } // namespace tvm #endif // TVM_SUPPORT_PARALLEL_FOR_H_
https://github.com/zk-ml/tachikoma
include/tvm/support/random_engine.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file random_engine.h * \brief Random number generator. It provides a generic interface consistent with * `std::uniform_random_bit_generator` */ #ifndef TVM_SUPPORT_RANDOM_ENGINE_H_ #define TVM_SUPPORT_RANDOM_ENGINE_H_ #include <tvm/runtime/logging.h> #include <cstdint> #include <random> namespace tvm { namespace support { /*! * \brief This linear congruential engine is a drop-in replacement for std::minstd_rand. It strictly * corresponds to std::minstd_rand and is designed to be platform-independent. * \note Our linear congruential engine is a complete implementation of * std::uniform_random_bit_generator so it can be used as generator for any STL random number * distribution. However, parts of std::linear_congruential_engine's member functions are not * included for simplification. For full member functions of std::minstd_rand, please check out the * following link: https://en.cppreference.com/w/cpp/numeric/random/linear_congruential_engine */ class LinearCongruentialEngine { public: using TRandState = int64_t; /*! \brief The result type. */ using result_type = uint64_t; /*! \brief The multiplier */ static constexpr TRandState multiplier = 48271; /*! \brief The increment */ static constexpr TRandState increment = 0; /*! \brief The modulus */ static constexpr TRandState modulus = 2147483647; /*! \brief The minimum possible value of random state here. */ static constexpr result_type min() { return 0; } /*! \brief The maximum possible value of random state here. */ static constexpr result_type max() { return modulus - 1; } /*! * \brief Get a device random state * \return The random state */ static TRandState DeviceRandom() { return (std::random_device()()) % modulus; } /*! * \brief Operator to move the random state to the next and return the new random state. According * to definition of linear congruential engine, the new random state value is computed as * new_random_state = (current_random_state * multiplier + increment) % modulus. * \return The next current random state value in the type of result_type. * \note In order for better efficiency, the implementation here has a few assumptions: * 1. The multiplication and addition won't overflow. * 2. The given random state pointer `rand_state_ptr` is not nullptr. * 3. The given random state `*(rand_state_ptr)` is in the range of [0, modulus - 1]. */ result_type operator()() { (*rand_state_ptr_) = ((*rand_state_ptr_) * multiplier + increment) % modulus; return *rand_state_ptr_; } /*! * \brief Normalize the random seed to the range of [1, modulus - 1]. * \param rand_state The random seed. * \return The normalized random seed. */ static TRandState NormalizeSeed(TRandState rand_state) { if (rand_state == -1) { rand_state = DeviceRandom(); } else { rand_state %= modulus; } if (rand_state == 0) { rand_state = 1; } if (rand_state < 0) { LOG(FATAL) << "ValueError: Random seed must be non-negative"; } return rand_state; } /*! * \brief Change the start random state of RNG with the seed of a new random state value. * \param rand_state The random state given in result_type. */ void Seed(TRandState rand_state) { ICHECK(rand_state_ptr_ != nullptr); *rand_state_ptr_ = NormalizeSeed(rand_state); } /*! * \brief Fork a new seed for another RNG from current random state. * \return The forked seed. */ TRandState ForkSeed() { // In order for reproducibility, we compute the new seed using RNG's random state and a // different set of parameters. Note that both 32767 and 1999999973 are prime numbers. return ((*this)() * 32767) % 1999999973; } /*! * \brief Construct a random number generator with a random state pointer. * \param rand_state_ptr The random state pointer given in result_type*. * \note The random state is not checked for whether it's nullptr and whether it's in the range of * [0, modulus-1]. We assume the given random state is valid or the Seed function would be * called right after the constructor before any usage. */ explicit LinearCongruentialEngine(TRandState* rand_state_ptr) { rand_state_ptr_ = rand_state_ptr; } private: TRandState* rand_state_ptr_; }; } // namespace support } // namespace tvm #endif // TVM_SUPPORT_RANDOM_ENGINE_H_
https://github.com/zk-ml/tachikoma
include/tvm/support/span.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * * \file tvm/support/span.h * \brief Reimplementation of part of C++-20 style span. */ #ifndef TVM_SUPPORT_SPAN_H_ #define TVM_SUPPORT_SPAN_H_ #include <cstddef> #include <iterator> #include <type_traits> #include <vector> namespace tvm { namespace support { /*! * \brief A partial implementation of the C++20 std::span. * * At the time of writing, TVM must compile against C++17. */ template <class T, class W> class Span { public: using value_type = W; using const_W = typename std::add_const<W>::type; template <class W1> class iterator_base { public: using iterator_category = std::input_iterator_tag; using value_type = W; using difference_type = std::ptrdiff_t; using pointer = const W*; using reference = const W&; inline iterator_base(T* ptr, T* end) : ptr_{ptr}, end_{end} { CHECK_GE(end, ptr); } inline W1 operator*() { return W1(*ptr_); } inline iterator_base<W1>& operator++() { if (ptr_ != end_) ptr_++; return *this; } inline bool operator==(iterator_base<W1> other) { return ptr_ == other.ptr_ && end_ == other.end_; } inline bool operator!=(iterator_base<W1> other) { return !(*this == other); } template <class X = W1, typename = std::enable_if_t<!std::is_const<X>::value>> inline operator iterator_base<const_W>() const { return iterator_base<const_W>(ptr_, end_); } private: T* ptr_; T* end_; }; using iterator = iterator_base<W>; using const_iterator = iterator_base<const_W>; inline Span(T* begin, int num_elements) : begin_{begin}, end_{begin + num_elements} {} inline Span(T* begin, T* end) : begin_{begin}, end_{end} {} inline iterator begin() const { return iterator(begin_, end_); } inline iterator end() const { return iterator(end_, end_); } size_t size() const { return end_ - begin_; } inline W operator[](int i) { T* to_return = begin_ + i; ICHECK_LT(to_return, end_) << "Span access out of bounds: " << i; return W(*to_return); } inline operator std::vector<W>() { return std::vector<W>(begin(), end()); } protected: T* begin_; T* end_; }; } // namespace support } // namespace tvm #endif // TVM_SUPPORT_SPAN_H_
https://github.com/zk-ml/tachikoma
include/tvm/support/with.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/support/with.h * \brief RAII wrapper function to enter and exit a context object * similar to python's with syntax. */ #ifndef TVM_SUPPORT_WITH_H_ #define TVM_SUPPORT_WITH_H_ #include <dmlc/common.h> #include <functional> #include <utility> namespace tvm { /*! * \brief RAII wrapper function to enter and exit a context object * similar to python's with syntax. * * \code * // context class * class MyContext { * private: * friend class With<MyContext>; MyContext(arguments); * void EnterWithScope(); * void ExitWithScope(); * }; * * { * With<MyContext> scope(arguments); * // effect take place. * } * \endcode * * \tparam ContextType Type of the context object. */ template <typename ContextType> class With { public: /*! * \brief constructor. * Enter the scope of the context. */ template <typename... Args> explicit With(Args&&... args) : ctx_(std::forward<Args>(args)...) { ctx_.EnterWithScope(); } /*! \brief destructor, leaves the scope of the context. */ ~With() DMLC_THROW_EXCEPTION { ctx_.ExitWithScope(); } // Disable copy and move construction. `With` is intended only for // use in nested contexts that are exited in the reverse order of // entry. Allowing context to be copied or moved would break this // expectation. With(const With& other) = delete; With& operator=(const With& other) = delete; With(With&& other) = delete; With& operator=(With&& other) = delete; ContextType* get() { return &ctx_; } const ContextType* get() const { return &ctx_; } ContextType* operator->() { return get(); } const ContextType* operator->() const { return get(); } ContextType& operator*() { return *get(); } const ContextType* operator*() const { return *get(); } ContextType operator()() { return ctx_; } private: /*! \brief internal context type. */ ContextType ctx_; }; /*! * \brief A context type that delegates EnterWithScope and ExitWithScope * to user-provided functions. */ class ContextManager { public: /*! * \brief Constructor of ContextManager. * \param f_enter The function to call when entering scope. If it's nullptr, do nothing when * entering. * \param f_exit The function to call when exiting scope. If it's nullptr, do nothing * when exiting. */ template <class FEnter, class FExit> explicit ContextManager(FEnter f_enter, FExit f_exit) : f_enter_(f_enter), f_exit_(f_exit) {} private: void EnterWithScope() { if (f_enter_) f_enter_(); } void ExitWithScope() { if (f_exit_) f_exit_(); } std::function<void()> f_enter_; std::function<void()> f_exit_; template <typename> friend class With; }; } // namespace tvm #endif // TVM_SUPPORT_WITH_H_
https://github.com/zk-ml/tachikoma
include/tvm/target/codegen.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/target/codegen.h * \brief Translates IRModule to runtime::Module. */ #ifndef TVM_TARGET_CODEGEN_H_ #define TVM_TARGET_CODEGEN_H_ #include <tvm/ir/module.h> #include <tvm/runtime/packed_func.h> #include <tvm/target/target.h> #include <tvm/tir/expr.h> #include <string> namespace tvm { /*! \brief namespace for target translation and codegen. */ namespace codegen { // use packed function from runtime. using runtime::PackedFunc; using runtime::TVMArgs; using runtime::TVMRetValue; /*! * \brief Build a module from array of lowered function. * \param mod The Module to be built * \param target The target to be built. * \return The result runtime::Module. */ runtime::Module Build(IRModule mod, Target target); /*! * \brief Pack imported device library to a C file. * Compile the C file and link with the host library * will allow the DSO loader to automatically discover and import * the dependency from the shared library. * * \param m The host module with the imports. * \param system_lib Whether expose as system library. * \return cstr The C string representation of the file. */ std::string PackImportsToC(const runtime::Module& m, bool system_lib); /*! * \brief Pack imported device library to a LLVM module. * Compile the LLVM module and link with the host library * will allow the DSO loader to automatically discover and import * the dependency from the shared library. * * \param m The host module with the imports. * \param system_lib Whether expose as system library. * \param target_triple LLVM target triple * \return runtime::Module The generated LLVM module. */ runtime::Module PackImportsToLLVM(const runtime::Module& m, bool system_lib, const std::string& target_triple); } // namespace codegen } // namespace tvm #endif // TVM_TARGET_CODEGEN_H_
https://github.com/zk-ml/tachikoma
include/tvm/target/compilation_config.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/target/compilation_config.h * \brief A helper class to collect all the targets in canonical form necessary for compilation. */ #ifndef TVM_TARGET_COMPILATION_CONFIG_H_ #define TVM_TARGET_COMPILATION_CONFIG_H_ #include <tvm/target/virtual_device.h> #include <string> namespace tvm { /*! * \brief Gathers the \p Targets and distinguished \p VirtualDevices in canonical form needed to * compile a Relay module for execution over possibly heterogeneous devices. Centralizes the * validation and canonicalization logic needed to transition from targets supplied by the Python * APIs to a single internal representation. Also holds a cache of canonical \p VirtualDevices * so that structural equal virtual devices have pointer equal canonical virtual devices. * * The construction of \p CompilationConfig is idempotent, in that given the same \p PassContext * \p ctx and an arbitrary \p Array<Target> \p raw_targets: * * \code * CompilationConfig(ctxt, raw_targets) * is structurally equal to * CompilationConfig(ctxt, CompilationConfig(ctxt, raw_targets)->primitive_targets) * \endcode * * TODO(mbs): This is subject to change as we rework compilation options in general. This class * is probably better called a 'CompositeTarget', and may be better made a sub-class of Target or * some other common-target-root class. */ class CompilationConfigNode : public Object { public: /*! * \brief The host target. Used for 'scalar' data and code (such as shapes and shape * functions) and residual Relay expressions and data (such as conditionals and ADTs). * Each \p primitive_target below will have this exact target object as its 'host'. * * Note that it is possible for a \p Target used for primitive operations to be structurally * equal to the host \p Target (up to the \p host field.) However the \p Target objects will * be distinct, and can be used as keys within a \p Map without collision. */ Target host_target; /*! * \brief Vector of all available \p Targets for partitioning or compiling primitive tensor * operators (kernels). May contain a \p Target for the same device type as for the * \p host_target, however the \p host_target should be used for all host computations and data. * Each \p Target will have \p host_target as its 'host'. * * Primitive targets must be unique by their kind name. In this way the * \p FindPrimitiveTargetForKind method will find the unique target for the given kind name. * This method is used when transitioning from an external codegen "Compiler" attribute value * to the external codegen target representing that compiler. * * It is possible to have multiple primitive targets for the same device type. However given * primitive targets left and right where: * - left appears before right in the array * - left->GetTargetDeviceType() == right->GetTargetDeviceType() * then: * - right.IsExternalCodegenFor(left) must be true * In this way the \p FindPrimitiveTargetForDeviceOrFail method will find the 'most general' * target for the requested device type. This method is used when transitioning from a device * constraint to the target needed to compile for that device. * * In the homogeneous case primitive_targets will have just one entry, which will be pointer equal * to optional_homogeneous_target. * * In the homogenous case where the 'host' is the same device as used for compiling kernels it * is *not* the case that optional_homogenous_target == host_target. This is because all * primitive always have their host field set to the host_target. Ie, it is valid to have: * \code * host_target=Target("llvm") * optional_homogenous_target=Target("llvm", host=host_target) * \endcode */ Array<Target> primitive_targets; /*! * \brief \p VirtualDevice for primitive operators which are not otherwise constrained to a * particular device. Used by the PlanDevices pass to determine a virtual device for every * sub-expression. */ VirtualDevice default_primitive_virtual_device = VirtualDevice::FullyUnconstrained(); /*! \brief VirtualDevice for the host. */ VirtualDevice host_virtual_device = VirtualDevice::FullyUnconstrained(); /*! * \brief If defined then compile and/or run in 'homogenous execution mode'. In this mode all * primitives are compiled for this target only. * * This is to support legacy passes which have not been adapted to heterogeneous execution and * rely on an implicit global \p Target to be in scope. * * TODO(mbs): Remove once all passes are 'heterogeneous aware'. */ Target optional_homogeneous_target; void VisitAttrs(AttrVisitor* v); /*! * \brief Returns the unique \p Target to use for \p device_type. Fail if no such target exists. * * This will be the first primitive target with matching device type. */ Target FindPrimitiveTargetForDeviceOrFail(DLDeviceType device_type) const; /*! * \brief Returns the unique \p Target to use for \p kind_name. Returns null if none such. */ Optional<Target> FindPrimitiveTargetForKind(const std::string& kind_name) const; /*! * \brief Returns a \p Target structurally equal to \p target, however prefer a structually equal * known host or primitive target if the configuration has one. */ Target CanonicalTarget(const Target& target) const; /*! * \brief Returns a \p VirtualDevice which is structurally equal to \p virtual_device on all its * constrained fields, however: * - If \p virtual_device has a device type but not a target, fill in a target using * \p FindPrimitiveTargetOrFail. This is the one place we allow targets to be defaulted * from device types alone. * - If \p virtual_device has a target, also canonicalize it using \p CanonicalTarget. * The returned object will be unique for the adjusted virtual device w.r.t. all other * \p VirtualDevices returned by this method. * * We call the result the 'canonical' \p VirtualDevice. Two canonical \p VirtualDevices are * structurally equal if and only if they are pointer equal. In this way we can build maps * from virtual devices using just pointer equality. */ VirtualDevice CanonicalVirtualDevice(const VirtualDevice& virtual_device) const; static constexpr const char* _type_key = "CompilationConfig"; TVM_DECLARE_FINAL_OBJECT_INFO(CompilationConfigNode, Object) private: /*! * \brief Sets the primitive targets, the host target, the default primitive virtual device, and * the host virtual device given: * - the vector of 'raw' targets (in any order) supplied by one of the TVM entry points. * - any "relay.fallback_device_type" attribute on \p pass_ctx. * - whether the LLVM backend is available. * Will look for a suitable host target in the given primitive targets, but if none found may * reuse a raw target or create a default CPU target. */ void Init(const transform::PassContext& pass_ctx, const Array<Target>& raw_targets); /*! * \brief Returns a freshly constructed CPU \p Target. */ static Target MakeDefaultCPUTarget(); /*! * \brief A cache of constructed virtual devices. */ mutable VirtualDeviceCache virtual_device_cache_; friend class CompilationConfig; }; /*! * \brief Managed reference class to \p CompilationConfig * * \sa CompilationConfig */ class CompilationConfig : public ObjectRef { public: /*! * \brief Constructs the compilation config given the settings in \p pass_ctx and supplied * \p raw_targets. See \p CompilationConfigNode::Init for details. */ TVM_DLL CompilationConfig(const transform::PassContext& pass_ctx, const Array<Target>& raw_targets); TVM_DEFINE_OBJECT_REF_METHODS(CompilationConfig, ObjectRef, CompilationConfigNode); }; } // namespace tvm #endif // TVM_TARGET_COMPILATION_CONFIG_H_
https://github.com/zk-ml/tachikoma
include/tvm/target/generic_func.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/target/generic_func.h * \brief Generic function that can be specialzied on a per target basis. */ #ifndef TVM_TARGET_GENERIC_FUNC_H_ #define TVM_TARGET_GENERIC_FUNC_H_ #include <tvm/runtime/packed_func.h> #include <tvm/support/with.h> #include <tvm/target/target.h> #include <string> #include <unordered_map> #include <utility> #include <vector> namespace tvm { class GenericFuncNode; /*! * \brief Generic function that can be specialized on a per-target basis. */ class GenericFunc : public ObjectRef { public: GenericFunc() {} explicit GenericFunc(ObjectPtr<Object> n) : ObjectRef(n) {} /*! * \brief Set the default function implementaiton. * \param value The default function * \param allow_override If true, this call may override a previously registered function. If * false, an error will be logged if the call would override a previously registered function. * \return reference to self. */ TVM_DLL GenericFunc& set_default(const runtime::PackedFunc value, bool allow_override = false); /*! * \brief Register a specialized function * \param tags The tags for this specialization * \param value The specialized function * \param allow_override If true, this call may override previously registered tags. If false, * an error will be logged if the call would override previously registered tags. * \return reference to self. */ TVM_DLL GenericFunc& register_func(const std::vector<std::string>& tags, const runtime::PackedFunc value, bool allow_override = false); /*! * \brief Call generic function by directly passing in unpacked format. * \param args Arguments to be passed. * \tparam Args arguments to be passed. * * \code * // Example code on how to call generic function * void CallGeneric(GenericFunc f) { * // call like normal functions by pass in arguments * // return value is automatically converted back * int rvalue = f(1, 2.0); * } * \endcode */ template <typename... Args> inline runtime::TVMRetValue operator()(Args&&... args) const; /*! * \brief Invoke the relevant function for the current target context, set by set_target_context. * Arguments are passed in packed format. * \param args The arguments to pass to the function. * \param ret The return value */ TVM_DLL void CallPacked(runtime::TVMArgs args, runtime::TVMRetValue* ret) const; /*! * \brief Get the packed function specified for the current target context. */ TVM_DLL PackedFunc GetPacked() const; /*! * \brief Find or register the GenericFunc instance corresponding to the give name * \param name The name of the registered GenericFunc * \return The GenericFunc instance */ TVM_DLL static GenericFunc Get(const std::string& name); /*! * \brief Add a GenericFunc instance to the registry * \param func The GenericFunc instance * \param name The name of the registered GenericFunc */ TVM_DLL static void RegisterGenericFunc(GenericFunc func, const std::string& name); /*! * \brief access the internal node container * \return the pointer to the internal node container */ inline GenericFuncNode* operator->(); // declare container type using ContainerType = GenericFuncNode; // Internal class. struct Manager; private: friend struct Manager; }; template <typename... Args> inline runtime::TVMRetValue GenericFunc::operator()(Args&&... args) const { const int kNumArgs = sizeof...(Args); const int kArraySize = kNumArgs > 0 ? kNumArgs : 1; TVMValue values[kArraySize]; int type_codes[kArraySize]; runtime::detail::for_each(runtime::TVMArgsSetter(values, type_codes), std::forward<Args>(args)...); runtime::TVMRetValue rv; CallPacked(runtime::TVMArgs(values, type_codes, kNumArgs), &rv); return rv; } /*! * \brief Represents a generic function that can be specialized on a per-target basis. */ class GenericFuncNode : public Object { public: /*! \brief name of the function */ std::string name_; /* \brief the generic builder */ runtime::PackedFunc generic_func_; /* \brief map from keys to registered functions */ std::unordered_map<std::string, runtime::PackedFunc> dispatch_dict_; void VisitAttrs(AttrVisitor* v) {} static constexpr const char* _type_key = "GenericFunc"; TVM_DECLARE_FINAL_OBJECT_INFO(GenericFuncNode, Object); }; inline GenericFuncNode* GenericFunc::operator->() { return static_cast<GenericFuncNode*>(get_mutable()); } #define TVM_GENERIC_FUNC_REG_VAR_DEF static TVM_ATTRIBUTE_UNUSED ::tvm::GenericFunc& __mk_##TVM /*! * \def TVM_REGISTER_GENERIC_FUNC * \brief Register a new generic function, or set a device-specific variant * of the corresponding function. * * \param name The name of the function */ #define TVM_REGISTER_GENERIC_FUNC(name) \ TVM_STR_CONCAT(TVM_GENERIC_FUNC_REG_VAR_DEF, __COUNTER__) = ::tvm::GenericFunc::Get(#name) } // namespace tvm #endif // TVM_TARGET_GENERIC_FUNC_H_
https://github.com/zk-ml/tachikoma
include/tvm/target/tag.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/target/tag.h * \brief Target tag registry */ #ifndef TVM_TARGET_TAG_H_ #define TVM_TARGET_TAG_H_ #include <tvm/node/attr_registry_map.h> #include <tvm/node/node.h> #include <tvm/target/target.h> #include <utility> namespace tvm { /*! \brief A target tag */ class TargetTagNode : public Object { public: /*! \brief Name of the target */ String name; /*! \brief Config map to generate the target */ Map<String, ObjectRef> config; void VisitAttrs(AttrVisitor* v) { v->Visit("name", &name); v->Visit("config", &config); } static constexpr const char* _type_key = "TargetTag"; TVM_DECLARE_FINAL_OBJECT_INFO(TargetTagNode, Object); private: /*! \brief Return the index stored in attr registry */ uint32_t AttrRegistryIndex() const { return index_; } /*! \brief Return the name stored in attr registry */ String AttrRegistryName() const { return name; } /*! \brief Index used for internal lookup of attribute registry */ uint32_t index_; template <typename, typename> friend class AttrRegistry; template <typename> friend class AttrRegistryMapContainerMap; friend class TargetTagRegEntry; }; /*! * \brief Managed reference class to TargetTagNode * \sa TargetTagNode */ class TargetTag : public ObjectRef { public: /*! * \brief Retrieve the Target given it the name of target tag * \param target_tag_name Name of the target tag * \return The Target requested */ TVM_DLL static Optional<Target> Get(const String& target_tag_name); /*! * \brief List all names of the existing target tags * \return A dictionary that maps tag name to the concrete target it corresponds to */ TVM_DLL static Map<String, Target> ListTags(); /*! * \brief Add a tag into the registry * \param name Name of the tag * \param config The target config corresponding to the tag * \param override Allow overriding existing tags * \return Target created with the tag */ TVM_DLL static Target AddTag(String name, Map<String, ObjectRef> config, bool override); TVM_DEFINE_OBJECT_REF_METHODS(TargetTag, ObjectRef, TargetTagNode); private: /*! \brief Mutable access to the container class */ TargetTagNode* operator->() { return static_cast<TargetTagNode*>(data_.get()); } friend class TargetTagRegEntry; }; class TargetTagRegEntry { public: /*! * \brief Set the config dict corresponding to the target tag * \param config The config dict for target creation */ inline TargetTagRegEntry& set_config(Map<String, ObjectRef> config); /*! \brief Set name of the TargetTag to be the same as registry if it is empty */ inline TargetTagRegEntry& set_name(); /*! * \brief Register or get a new entry. * \param target_tag_name The name of the TargetTag. * \return the corresponding entry. */ TVM_DLL static TargetTagRegEntry& RegisterOrGet(const String& target_tag_name); private: TargetTag tag_; String name; /*! \brief private constructor */ explicit TargetTagRegEntry(uint32_t reg_index) : tag_(make_object<TargetTagNode>()) { tag_->index_ = reg_index; } template <typename, typename> friend class AttrRegistry; friend class TargetTag; }; inline TargetTagRegEntry& TargetTagRegEntry::set_config(Map<String, ObjectRef> config) { tag_->config = std::move(config); return *this; } inline TargetTagRegEntry& TargetTagRegEntry::set_name() { if (tag_->name.empty()) { tag_->name = name; } return *this; } #define TVM_TARGET_TAG_REGISTER_VAR_DEF \ static DMLC_ATTRIBUTE_UNUSED ::tvm::TargetTagRegEntry& __make_##TargetTag /*! * \def TVM_REGISTER_TARGET_TAG * \brief Register a new target tag, or set attribute of the corresponding target tag. * \param TargetTagName The name of target tag */ #define TVM_REGISTER_TARGET_TAG(TargetTagName) \ TVM_STR_CONCAT(TVM_TARGET_TAG_REGISTER_VAR_DEF, __COUNTER__) = \ ::tvm::TargetTagRegEntry::RegisterOrGet(TargetTagName).set_name() } // namespace tvm #endif // TVM_TARGET_TAG_H_
https://github.com/zk-ml/tachikoma
include/tvm/target/target.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/target/target.h * \brief Compilation target object. */ #ifndef TVM_TARGET_TARGET_H_ #define TVM_TARGET_TARGET_H_ #include <tvm/ir/expr.h> #include <tvm/ir/module.h> #include <tvm/node/node.h> #include <tvm/support/with.h> #include <tvm/target/target_kind.h> #include <string> #include <unordered_set> #include <vector> namespace tvm { class TargetInternal; class Target; /*! * \brief Compilation target. * \sa Target */ class TargetNode : public Object { public: /*! \brief The kind of the target device */ TargetKind kind; /*! \brief Target host information, must be Target type */ Optional<ObjectRef> host; /*! \brief Tag of the target, can be empty */ String tag; /*! \brief Keys for this target */ Array<String> keys; /*! \brief Collection of attributes */ Map<String, ObjectRef> attrs; /*! \brief Target features */ Map<String, ObjectRef> features; /*! * \brief The raw string representation of the target * \return the full device string to pass to codegen::Build * \note It will be deprecated after the Target RFC is fully landed. */ TVM_DLL const std::string& str() const; /*! \return Export target to JSON-like configuration */ TVM_DLL Map<String, ObjectRef> Export() const; /*! \return The Optional<Target> typed target host of the TargetNode */ TVM_DLL Optional<Target> GetHost() const; /*! \return The device type for this target */ TVM_DLL int GetTargetDeviceType() const; /*! * \brief Returns a human readable representation of \p Target which includes all fields, * especially the host. Useful for diagnostic messages and debugging. * * TODO(mbs): The ReprPrinter version should perhaps switch to this form, however currently * code depends on str() and << being the same. */ String ToDebugString() const; void VisitAttrs(AttrVisitor* v) { v->Visit("kind", &kind); v->Visit("tag", &tag); v->Visit("keys", &keys); v->Visit("attrs", &attrs); v->Visit("features", &features); v->Visit("host", &host); } /*! * \brief Get an entry from attrs of the target * \tparam TObjectRef Type of the attribute * \param attr_key The name of the attribute key * \param default_value The value returned if the key is not present * \return An optional, NullOpt if not found, otherwise the value found */ template <typename TObjectRef> Optional<TObjectRef> GetAttr( const std::string& attr_key, Optional<TObjectRef> default_value = Optional<TObjectRef>(nullptr)) const { static_assert(std::is_base_of<ObjectRef, TObjectRef>::value, "Can only call GetAttr with ObjectRef types."); auto it = attrs.find(attr_key); if (it != attrs.end()) { return Downcast<Optional<TObjectRef>>((*it).second); } else { return default_value; } } /*! * \brief Get an entry from attrs of the target * \tparam TObjectRef Type of the attribute * \param attr_key The name of the attribute key * \param default_value The value returned if the key is not present * \return An optional, NullOpt if not found, otherwise the value found */ template <typename TObjectRef> Optional<TObjectRef> GetAttr(const std::string& attr_key, TObjectRef default_value) const { return GetAttr<TObjectRef>(attr_key, Optional<TObjectRef>(default_value)); } /*! * \brief Get a Target feature * * \param feature_key The feature key. * \param default_value The default value if the key does not exist, defaults to nullptr. * * \return The result * * \tparam TOBjectRef the expected object type. * \throw Error if the key exists but the value does not match TObjectRef * * \code * * void GetTargetFeature(const Target& target) { * Bool has_feature = target->GetFeature<Bool>("has_feature", false).value(); * } * * \endcode */ template <typename TObjectRef> Optional<TObjectRef> GetFeature( const std::string& feature_key, Optional<TObjectRef> default_value = Optional<TObjectRef>(nullptr)) const { Optional<TObjectRef> feature = Downcast<Optional<TObjectRef>>(features.Get(feature_key)); if (!feature) { return default_value; } return feature; } // variant that uses TObjectRef to enable implicit conversion to default value. template <typename TObjectRef> Optional<TObjectRef> GetFeature(const std::string& attr_key, TObjectRef default_value) const { return GetFeature<TObjectRef>(attr_key, Optional<TObjectRef>(default_value)); } /*! \brief Get the keys for this target as a vector of string */ TVM_DLL std::vector<std::string> GetKeys() const; /*! \brief Get the keys for this target as an unordered_set of string */ TVM_DLL std::unordered_set<std::string> GetLibs() const; bool SEqualReduce(const TargetNode* other, SEqualReducer equal) const; void SHashReduce(SHashReducer hash_reduce) const; static constexpr const char* _type_key = "Target"; static constexpr const bool _type_has_method_sequal_reduce = true; static constexpr const bool _type_has_method_shash_reduce = true; TVM_DECLARE_FINAL_OBJECT_INFO(TargetNode, Object); private: /*! \brief Internal string repr. */ mutable std::string str_repr_; friend class TargetInternal; }; /*! * \brief Managed reference class to TargetNode. * \sa TargetNode */ class Target : public ObjectRef { public: /*! \brief Construct a null Target */ TVM_DLL explicit Target(std::nullptr_t) { data_ = nullptr; } /*! * \brief Construct a Target given a string * \param tag_or_config_or_target_str the string to parse for target */ TVM_DLL explicit Target(const String& tag_or_config_or_target_str); /*! * \brief Construct a Target using a JSON-like configuration * \param config The JSON-like configuration for target */ TVM_DLL explicit Target(const Map<String, ObjectRef>& config); /*! * \brief Get the current target context from thread local storage. * \param allow_not_defined If the context stack is empty and this is set to true, an * undefined Target will be returned. Otherwise, an empty context stack will cause a * runtime error. * \return The target that is the current context. The target may not be defined if * allow_not_defined is true. */ TVM_DLL static tvm::Target Current(bool allow_not_defined = true); /*! * \brief Construct a Target given target and host * \param target The Target typed object with host field undefined for target * \param host The Target typed object for target host * \return The Target with given target and host context information */ TVM_DLL explicit Target(Target target, Target host); TVM_DEFINE_OBJECT_REF_METHODS(Target, ObjectRef, TargetNode); /*! * \brief Create a new Target object with given target (w.o host) and target host. * \param target The current Target typed object target, with or without host field. * \param host The given Target typed object target host * \return The new Target object with the given target and host field of given host. */ static Target WithHost(const Target& target, const Target& host); /*! * \brief Returns true if \p this target represents an external codegen. If so, * \p this->kind->name can be used as the "Compiler" attribute on partitioned functions, * and can be used to retrieve a partitioning pattern table using * \p get_pattern_table. */ bool IsExternalCodegen() const; /*! * \brief Returns true if \p this target represents an external codegen which is compatible * with \p that target. In particular: * - \p this has a true ::tvm::attr::kIsExternalCodegen attribute * - \p that does not have a true ::tvm::attr::kIsExternalCodegen attribute * - \p this and \p that have the same GetTargetDeviceType() * * After partitioning, the external codegen compilation path may use \p that to guide it's * compilation to a \p runtime::Module. Given \p this, an appropriate \p that can be * found using \p CompilationConfig::FindPrimitiveTargetOrFail(this->GetTargetDeviceType()). * * The \p CollagePartition pass uses this method to guide it's search over candidate partitions * using external codegen. */ bool IsExternalCodegenFor(const Target& that) const; private: Target(TargetKind kind, Optional<ObjectRef> host, String tag, Array<String> keys, Map<String, ObjectRef> attrs); // enable with syntax. friend class TargetInternal; friend class With<Target>; /*! * \brief Push a new target context onto the thread local stack. * The Target on top of the stack is used to determine which * specialization to use when invoking a GenericFunc. */ TVM_DLL void EnterWithScope(); /*! * \brief Pop a target off the thread local context stack, * restoring the previous target as the current context. */ TVM_DLL void ExitWithScope(); }; /*! * \brief Check and update host field of the given legacy target and target host pair. * Note that this function is for legacy target api compatibility issue only, not * recommended for other use. * \param target The pointer to a Target typed object with host field to be updated * \param host The pointer to a Target typed object for target host to be updated */ void CheckAndUpdateHostConsistency(Target* target, Target* host); /*! * \brief Check and update host field of the given legacy heterogeneous targets and * target host.Note that this function is for legacy target api compatibility issue only, * not recommended for other use. * \param ir_modules The pointer to a Map objects with keys being Target objects * \param host The Target typed object for target host to be updated */ void CheckAndUpdateHostConsistency(Map<Target, IRModule>* ir_modules, Target* host); } // namespace tvm #endif // TVM_TARGET_TARGET_H_
https://github.com/zk-ml/tachikoma
include/tvm/target/target_info.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/target/target_info.h * \brief Various information about target. */ #ifndef TVM_TARGET_TARGET_INFO_H_ #define TVM_TARGET_TARGET_INFO_H_ #include <tvm/ir/expr.h> #include <string> namespace tvm { /*! * \brief Memory information of special memory region. * Use MemoryInfo as its container type */ class MemoryInfoNode : public Object { public: /*! \brief The addressable unit */ int64_t unit_bits; /*! \brief Maximum number of bits supported in the memory */ int64_t max_num_bits; /*! \brief maximum number of bits to be used in simd op */ int64_t max_simd_bits; /*! * \brief head address of the buffer, if visible to CPU * This address can be None. */ PrimExpr head_address; void VisitAttrs(AttrVisitor* v) { v->Visit("unit_bits", &unit_bits); v->Visit("max_num_bits", &max_num_bits); v->Visit("max_simd_bits", &max_simd_bits); v->Visit("head_address", &head_address); } static constexpr const char* _type_key = "MemoryInfo"; TVM_DECLARE_FINAL_OBJECT_INFO(MemoryInfoNode, Object); }; /*! \brief Defines memory info */ class MemoryInfo : public ObjectRef { public: TVM_DEFINE_OBJECT_REF_METHODS(MemoryInfo, ObjectRef, MemoryInfoNode); }; /*! * \brief get memory info given scope * \param scope The scope name. * \return info The memory info. */ TVM_DLL MemoryInfo GetMemoryInfo(const std::string& scope); } // namespace tvm #endif // TVM_TARGET_TARGET_INFO_H_
https://github.com/zk-ml/tachikoma
include/tvm/target/target_kind.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/target/target_kind.h * \brief Target kind registry */ #ifndef TVM_TARGET_TARGET_KIND_H_ #define TVM_TARGET_TARGET_KIND_H_ #include <tvm/ir/transform.h> #include <tvm/node/attr_registry_map.h> #include <tvm/node/node.h> #include <memory> #include <unordered_map> #include <utility> #include <vector> namespace tvm { class Target; /*! * \brief Map containing parsed features of a specific Target */ using TargetFeatures = Map<String, ObjectRef>; /*! * \brief TargetParser to apply on instantiation of a given TargetKind * * \param target_json Target in JSON format to be transformed during parsing. * * \return The transformed Target JSON object. */ using TargetJSON = Map<String, ObjectRef>; using FTVMTargetParser = TypedPackedFunc<TargetJSON(TargetJSON)>; /*! * \brief RelayToTIR tvm::transform::Pass specific to a TargetKind * * Called before the default lowering passes. * * \param mod The module that an optimization pass runs on. * \param pass_ctx The pass context that can provide information for the optimization. * * \return The transformed module. */ using FTVMRelayToTIR = transform::Pass; /*! * \brief TIRToRuntime conversion specific to a TargetKind * * This function is responsible for scanning an IRModule for appropriate Target-specific functions and generating a Runtime module representing the compiled output * * \param ir_module Unified IRModule * \param target Target to filter on or retrieve arguments from * \return Runtime Module containing compiled functions */ using FTVMTIRToRuntime = runtime::TypedPackedFunc<runtime::Module(IRModule, Target)>; namespace detail { template <typename, typename, typename> struct ValueTypeInfoMaker; } class TargetInternal; template <typename> class TargetKindAttrMap; /*! \brief Target kind, specifies the kind of the target */ class TargetKindNode : public Object { public: /*! \brief Name of the target kind */ String name; /*! \brief Device type of target kind */ int default_device_type; /*! \brief Default keys of the target */ Array<String> default_keys; /*! \brief Function used to preprocess on target creation */ PackedFunc preprocessor; /*! \brief Function used to parse a JSON target during creation */ FTVMTargetParser target_parser; void VisitAttrs(AttrVisitor* v) { v->Visit("name", &name); v->Visit("default_device_type", &default_device_type); v->Visit("default_keys", &default_keys); } static constexpr const char* _type_key = "TargetKind"; TVM_DECLARE_FINAL_OBJECT_INFO(TargetKindNode, Object); private: /*! \brief Return the index stored in attr registry */ uint32_t AttrRegistryIndex() const { return index_; } /*! \brief Return the name stored in attr registry */ String AttrRegistryName() const { return name; } /*! \brief Stores the required type_key and type_index of a specific attr of a target */ struct ValueTypeInfo { String type_key; uint32_t type_index; std::unique_ptr<ValueTypeInfo> key; std::unique_ptr<ValueTypeInfo> val; }; /*! \brief A hash table that stores the type information of each attr of the target key */ std::unordered_map<String, ValueTypeInfo> key2vtype_; /*! \brief A hash table that stores the default value of each attr of the target key */ std::unordered_map<String, ObjectRef> key2default_; /*! \brief Index used for internal lookup of attribute registry */ uint32_t index_; template <typename, typename, typename> friend struct detail::ValueTypeInfoMaker; template <typename, typename> friend class AttrRegistry; template <typename> friend class AttrRegistryMapContainerMap; friend class TargetKindRegEntry; friend class TargetInternal; }; /*! * \brief Managed reference class to TargetKindNode * \sa TargetKindNode */ class TargetKind : public ObjectRef { public: TargetKind() = default; /*! \brief Get the attribute map given the attribute name */ template <typename ValueType> static inline TargetKindAttrMap<ValueType> GetAttrMap(const String& attr_name); /*! * \brief Retrieve the TargetKind given its name * \param target_kind_name Name of the target kind * \return The TargetKind requested */ TVM_DLL static Optional<TargetKind> Get(const String& target_kind_name); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(TargetKind, ObjectRef, TargetKindNode); private: /*! \brief Mutable access to the container class */ TargetKindNode* operator->() { return static_cast<TargetKindNode*>(data_.get()); } TVM_DLL static const AttrRegistryMapContainerMap<TargetKind>& GetAttrMapContainer( const String& attr_name); friend class TargetKindRegEntry; friend class TargetInternal; }; /*! * \brief Map<TargetKind, ValueType> used to store meta-information about TargetKind * \tparam ValueType The type of the value stored in map */ template <typename ValueType> class TargetKindAttrMap : public AttrRegistryMap<TargetKind, ValueType> { public: using TParent = AttrRegistryMap<TargetKind, ValueType>; using TParent::count; using TParent::get; using TParent::operator[]; explicit TargetKindAttrMap(const AttrRegistryMapContainerMap<TargetKind>& map) : TParent(map) {} }; /*! \brief Value used with --runtime in target specs to indicate the C++ runtime. */ static constexpr const char* kTvmRuntimeCpp = "c++"; /*! \brief Value used with --runtime in target specs to indicate the C runtime. */ static constexpr const char* kTvmRuntimeCrt = "c"; /*! * \brief Helper structure to register TargetKind * \sa TVM_REGISTER_TARGET_KIND */ class TargetKindRegEntry { public: /*! * \brief Register additional attributes to target_kind. * \param attr_name The name of the attribute. * \param value The value to be set. * \param plevel The priority level of this attribute, * an higher priority level attribute * will replace lower priority level attribute. * Must be bigger than 0. * * Cannot set with same plevel twice in the code. * * \tparam ValueType The type of the value to be set. */ template <typename ValueType> inline TargetKindRegEntry& set_attr(const String& attr_name, const ValueType& value, int plevel = 10); /*! * \brief Set DLPack's device_type the target * \param device_type Device type */ inline TargetKindRegEntry& set_default_device_type(int device_type); /*! * \brief Set DLPack's device_type the target * \param keys The default keys */ inline TargetKindRegEntry& set_default_keys(std::vector<String> keys); /*! * \brief Set the pre-processing function applied upon target creation * \tparam FLambda Type of the function * \param f The pre-processing function */ template <typename FLambda> inline TargetKindRegEntry& set_attrs_preprocessor(FLambda f); /*! * \brief Set the parsing function applied upon target creation * \param parser The Target parsing function */ inline TargetKindRegEntry& set_target_parser(FTVMTargetParser parser); /*! * \brief Register a valid configuration option and its ValueType for validation * \param key The configuration key * \tparam ValueType The value type to be registered */ template <typename ValueType> inline TargetKindRegEntry& add_attr_option(const String& key); /*! * \brief Register a valid configuration option and its ValueType for validation * \param key The configuration key * \param default_value The default value of the key * \tparam ValueType The value type to be registered */ template <typename ValueType> inline TargetKindRegEntry& add_attr_option(const String& key, ObjectRef default_value); /*! \brief Set name of the TargetKind to be the same as registry if it is empty */ inline TargetKindRegEntry& set_name(); /*! * \brief List all the entry names in the registry. * \return The entry names. */ TVM_DLL static Array<String> ListTargetKinds(); /*! * \brief Get all supported option names and types for a given Target kind. * \return Map of option name to type */ TVM_DLL static Map<String, String> ListTargetKindOptions(const TargetKind& kind); /*! * \brief Register or get a new entry. * \param target_kind_name The name of the TargetKind. * \return the corresponding entry. */ TVM_DLL static TargetKindRegEntry& RegisterOrGet(const String& target_kind_name); private: TargetKind kind_; String name; /*! \brief private constructor */ explicit TargetKindRegEntry(uint32_t reg_index) : kind_(make_object<TargetKindNode>()) { kind_->index_ = reg_index; } /*! * \brief update the attribute TargetKindAttrMap * \param key The name of the attribute * \param value The value to be set * \param plevel The priority level */ TVM_DLL void UpdateAttr(const String& key, TVMRetValue value, int plevel); template <typename, typename> friend class AttrRegistry; friend class TargetKind; }; namespace detail { template <typename Type, template <typename...> class Container> struct is_specialized : std::false_type { using type = std::false_type; }; template <template <typename...> class Container, typename... Args> struct is_specialized<Container<Args...>, Container> : std::true_type { using type = std::true_type; }; template <typename ValueType, typename IsArray = typename is_specialized<ValueType, Array>::type, typename IsMap = typename is_specialized<ValueType, Map>::type> struct ValueTypeInfoMaker {}; template <typename ValueType> struct ValueTypeInfoMaker<ValueType, std::false_type, std::false_type> { using ValueTypeInfo = TargetKindNode::ValueTypeInfo; ValueTypeInfo operator()() const { uint32_t tindex = ValueType::ContainerType::_GetOrAllocRuntimeTypeIndex(); ValueTypeInfo info; info.type_index = tindex; info.type_key = runtime::Object::TypeIndex2Key(tindex); info.key = nullptr; info.val = nullptr; return info; } }; template <typename ValueType> struct ValueTypeInfoMaker<ValueType, std::true_type, std::false_type> { using ValueTypeInfo = TargetKindNode::ValueTypeInfo; ValueTypeInfo operator()() const { using key_type = ValueTypeInfoMaker<typename ValueType::value_type>; uint32_t tindex = ValueType::ContainerType::_GetOrAllocRuntimeTypeIndex(); ValueTypeInfo info; info.type_index = tindex; info.type_key = runtime::Object::TypeIndex2Key(tindex); info.key = std::make_unique<ValueTypeInfo>(key_type()()); info.val = nullptr; return info; } }; template <typename ValueType> struct ValueTypeInfoMaker<ValueType, std::false_type, std::true_type> { using ValueTypeInfo = TargetKindNode::ValueTypeInfo; ValueTypeInfo operator()() const { using key_type = ValueTypeInfoMaker<typename ValueType::key_type>; using val_type = ValueTypeInfoMaker<typename ValueType::mapped_type>; uint32_t tindex = ValueType::ContainerType::_GetOrAllocRuntimeTypeIndex(); ValueTypeInfo info; info.type_index = tindex; info.type_key = runtime::Object::TypeIndex2Key(tindex); info.key = std::make_unique<ValueTypeInfo>(key_type()()); info.val = std::make_unique<ValueTypeInfo>(val_type()()); return info; } }; } // namespace detail template <typename ValueType> inline TargetKindAttrMap<ValueType> TargetKind::GetAttrMap(const String& attr_name) { return TargetKindAttrMap<ValueType>(GetAttrMapContainer(attr_name)); } template <typename ValueType> inline TargetKindRegEntry& TargetKindRegEntry::set_attr(const String& attr_name, const ValueType& value, int plevel) { ICHECK_GT(plevel, 0) << "plevel in set_attr must be greater than 0"; runtime::TVMRetValue rv; rv = value; UpdateAttr(attr_name, rv, plevel); return *this; } inline TargetKindRegEntry& TargetKindRegEntry::set_default_device_type(int device_type) { kind_->default_device_type = device_type; return *this; } inline TargetKindRegEntry& TargetKindRegEntry::set_default_keys(std::vector<String> keys) { kind_->default_keys = keys; return *this; } template <typename FLambda> inline TargetKindRegEntry& TargetKindRegEntry::set_attrs_preprocessor(FLambda f) { LOG(WARNING) << "set_attrs_preprocessor is deprecated please use set_target_parser instead"; using FType = typename tvm::runtime::detail::function_signature<FLambda>::FType; kind_->preprocessor = tvm::runtime::TypedPackedFunc<FType>(std::move(f)).packed(); return *this; } inline TargetKindRegEntry& TargetKindRegEntry::set_target_parser(FTVMTargetParser parser) { kind_->target_parser = parser; return *this; } template <typename ValueType> inline TargetKindRegEntry& TargetKindRegEntry::add_attr_option(const String& key) { ICHECK(!kind_->key2vtype_.count(key)) << "AttributeError: add_attr_option failed because '" << key << "' has been set once"; kind_->key2vtype_[key] = detail::ValueTypeInfoMaker<ValueType>()(); return *this; } template <typename ValueType> inline TargetKindRegEntry& TargetKindRegEntry::add_attr_option(const String& key, ObjectRef default_value) { add_attr_option<ValueType>(key); kind_->key2default_[key] = default_value; return *this; } inline TargetKindRegEntry& TargetKindRegEntry::set_name() { if (kind_->name.empty()) { kind_->name = name; } return *this; } #define TVM_TARGET_KIND_REGISTER_VAR_DEF \ static DMLC_ATTRIBUTE_UNUSED ::tvm::TargetKindRegEntry& __make_##TargetKind namespace attr { // // Distinguished TargetKind attribute names. // /*! * \brief A \p TargetKind attribute of type \p Bool. If true, then the target kind name also * corresponds to an external codegen 'compiler' name. That name may be used: * - To retrieve partitioning rules using \p get_partition_table. * - To attach to Relay Functions under the \p attr::kCompiler attribute to indicate * the function is to be compiled by the external codegen path. * * The \p CollagePartition pass uses this attribute to guide it's search over candidate partitions * using external codegen. * * See also \p Target::IsExternalCodegenFor */ constexpr const char* kIsExternalCodegen = "is_external_codegen"; /*! * \brief A \p TargetKind attribute of type \p FTVMRelayToTIR. If set, then the target kind name * also corresponds to an external codegen 'compiler' name, and the bound value is a \p Pass * to apply before the TVM lowering. * * See also \p Target::IsExternalCodegenFor */ constexpr const char* kRelayToTIR = "RelayToTIR"; } // namespace attr /*! * \def TVM_REGISTER_TARGET_KIND * \brief Register a new target kind, or set attribute of the corresponding target kind. * * \param TargetKindName The name of target kind * \param DeviceType The DLDeviceType of the target kind * * \code * * TVM_REGISTER_TARGET_KIND("llvm") * .set_attr<TPreCodegenPass>("TPreCodegenPass", a-pre-codegen-pass) * .add_attr_option<Bool>("system_lib") * .add_attr_option<String>("mtriple") * .add_attr_option<String>("mattr"); * * \endcode */ #define TVM_REGISTER_TARGET_KIND(TargetKindName, DeviceType) \ TVM_STR_CONCAT(TVM_TARGET_KIND_REGISTER_VAR_DEF, __COUNTER__) = \ ::tvm::TargetKindRegEntry::RegisterOrGet(TargetKindName) \ .set_name() \ .set_default_device_type(DeviceType) \ .add_attr_option<Array<String>>("keys") \ .add_attr_option<String>("tag") \ .add_attr_option<String>("device") \ .add_attr_option<String>("model") \ .add_attr_option<Array<String>>("libs") \ .add_attr_option<Target>("host") \ .add_attr_option<Integer>("from_device") \ .add_attr_option<Integer>("target_device_type") } // namespace tvm #endif // TVM_TARGET_TARGET_KIND_H_
https://github.com/zk-ml/tachikoma
include/tvm/target/virtual_device.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/target/virtual_device.h * \brief A compile time representation for where data is to be stored at runtime, and how to * compile code to compute it. */ #ifndef TVM_TARGET_VIRTUAL_DEVICE_H_ #define TVM_TARGET_VIRTUAL_DEVICE_H_ #include <tvm/ir/transform.h> #include <tvm/target/target.h> #include <string> #include <unordered_set> #include <utility> namespace tvm { /*! * Abstract label for an area of memory. * * Currently uninterpreted and arbitrary. Likely to be replaced by a structured representation * of a memory pool in the future. Please try to use this alias instead of String to aid future * code migration. */ using MemoryScope = String; /*! * \brief Describes at compile time the constraints on where data is to be stored at runtime * down to the (virtual) device and memory scope level, and how to compile code to compute that * data. Used by the \p PlanDevices pass to collect and solve (virtual) device constraints for * the whole Relay program. * * Is a quadruple of: * - A \p device_type (\p DLDeviceType). May be \p kInvalidDeviceType if unconstrained. * - A \p virtual_device_id (\p int). This allows us to distinguish distinct devices * with the same \p Target, for example in a multi-GPU system. May be -1 if unconstrained. * See "Virtual Devices" below. * - A \p target (\p Target) describing how to compile code for the intended device. May be null * if unconstrained. * - A \p memory_scope (\p MemoryScope, which is currently just \p String) describing which memory * area is to be used to hold data. May be "" if unconstrained. See "Memory Scopes and Devices" * below. * * Some or all of these fields may be unconstrained, signaling that device planning is free to * choose a value consistent with the whole program. However if a \p target is given then the \p * device_type must equal \p target->GetTargetDeviceType(). * * Note that currently we assume if a function returns its result on a particular (virtual) device * then the function body is also executed on that device. See the overview comment in * src/relay/transforms/device_planner.cc for more details. * * By 'data' we include both tensors and additional supporting datastructures such as shapes, * Relay ADT items (including tuples), Relay references, and Relay closures. Typically non-tensor * data must reside on a 'CPU'-like host device with good support for scalars. * * By 'execution' we include both (fused) primitive operators, and all the Relay expressions * surrounding them which coordinates data and control flow. Again, typically non-primitive * operators must be executed on a 'CPU'-like device with good support for control flow. * * Since TVM targets such a wide range of systems it is not possible for \p VirtualDevice to impose * much semantics on these fields, particularly for \p virtual_device_id and \p memory_scope. * Instead we assume downstream passes and codegen will interpret an validate these fields * appropriately. * * Targets vs Devices * ------------------ * Generally \p Targets (a compile-time only datastructue) describe compiler options for a specific * microarchitecture and toolchain, while \p Devices (a runtime datastructure also available at * compile time) describe a physical device on the target system. Obviously the target must agree * with the device's microarchitecture, but we otherwise don't impose any constraints between them: * - It's ok to use different \p Targets for the same \p Device, eg to squeeze some extra perf * out of a particular primitive using particular compiler flags. * - It's ok to use the same \p Target for multiple \p Devices, eg if we have multiple CPUs. * * Traditionally TVM assumes at most one \p Target per \p DLDeviceType. We are moving away from that * assumption. * * Virtual vs Physical Devices * --------------------------- * The \p virtual_device_id may be used by downstream passes or the runtime to help decide which * \p device_id to use for a particular physical runtime \p Device. For example: * - Some runtimes may support passing in an array of actual `device` specifications, and the * \p virtual_device_id can be used at runtime as an index into that array. * - Some runtimes may support dynamically allocating computations to physical devices. On these * systems a large space of \p virtual_device_ids could be used at compile time, even though * at runtime only a few physical devices will be present. * * The \p virtual_device_id may also be left unconstrained if not needed. * * Memory Scopes and Devices * ------------------------- * Multi-device systems can have complex memory hierarchies. For example * \code * (kDLCPU, 0, "llvm", "global") * \endcode * and * \code * (kDLCPU, 1, "llvm", "global") * \endcode * could denote: * - The same memory area accessible from two separate CPUs without any CPU affinity; * - Distinct memory areas in a NUMA architecture for which cross-device access is handled * by the memory system; * - Outright distinct memory areas, where one device cannot directly address the memory of * another. * * Similarly: * \code * (kDLCPU, 0, "llvm", "global") * \endcode * and * \code * (kDLCUDA, 0, "cuda", "host") * \endcode * could denote the same memory area, but with very different access costs. * * Furthermore, not all memory scopes are accessible to all devices, and it is possible for * a memory scope to only be accessible to a device when code is compiled with particular * \p Target options. * * \p VirtualDevices themselves have no system-level understanding. Currently the \p PlanDevices * pass will simply insert "device_copy" operators wherever \p VirtualDevices are not exactly * pointwise equal. We may revisit this in the future as the work on memory pools matures. * * Joining and Defaulting * ---------------------- * It is possible to 'join' two \p VirtualDevices to yield the most constrained \p VirtualDevice * which agrees with both join arguments. Eg: * \code * Join((kDLCPU, -1, "llvm", ""), (kInvalidDeviceType, 3, null, "global)) * => (kDLCPU, 3, "llvm", "global") * Join((kDLCPU, -1, "llvm", ""), (kInvalidDeviceType, 3, null, "local)) * => null (no join possible) * \endcode * * Related to 'join' is 'default', which only takes constrained fields from the rhs when the * lhs is unconstrained: * \code * Default(kDLCPU, -1, "llvm", "local"), (kDLCPU, 3, null, "global")) * => (kDLCPU, 3, "llvm", "local") * \endcode * * These operations are needed during device planning. */ class VirtualDeviceNode : public AttrsNode<VirtualDeviceNode> { private: /*! * \brief The \p DLDeviceType (represented as an int) of the virtual device. If \p target is * known then this will be equal to \p target->GetTargetDeviceType(). If \p target is null then * the target is to be determined later. * * This is needed to support the legacy "on_device" and "device_copy" calls which only allow * a \p DLDeviceTypes (as an integer) to be given. * * kInvalidDeviceType denotes unconstrained. An int since the DLDeviceType enum representation * is not fixed. Private to discourage further int vs DLDeviceType confusion. */ int /* actually DLDeviceType */ device_type_int; public: DLDeviceType device_type() const { return static_cast<DLDeviceType>(device_type_int); } /*! * \brief The device identifier for the virtual device. This must be resolved to a physical * device identifier either during compilation or at runtime. * * -1 denotes unconstrained. */ int virtual_device_id; /*! * \brief The \p Target describing how to compile for the virtual device. * * Null denotes unconstrained. Note that if a target later becomes known for this \p VirtualDevice * then it must be consistent with the \p device_type if already known. This is enforced by the * Join and Default methods. */ Target target; /*! * \brief The scope of memory w.r.t. the virtual device which holds data. * * Empty denotes unconstrained. */ MemoryScope memory_scope; /*! * \brief Returns true if virtual device is 'fully unconstrained', ie no target/device type, * device id or memory scope is specified. */ bool IsFullyUnconstrained() const { return !target.defined() && device_type() == kInvalidDeviceType && virtual_device_id == -1 && memory_scope.empty(); } /*! * \brief Returns true if virtual device is 'fully constrained', ie target, device id and memory * scope are all specified. */ bool IsFullyConstrained() const { return target.defined() && virtual_device_id != -1 && !memory_scope.empty(); } /*! * \brief Returns the (virtual) \p Device implied by this \p VirtualDevice. Both the \p * device_type and \p virtual_device_must be constrained. The returned \p Device may not * correspond to any physical device available at compile time or even runtime: see "Virtual vs * Physical Devices" above. */ Device ToDevice() const { ICHECK(device_type() != kInvalidDeviceType); ICHECK(virtual_device_id != -1); Device device; device.device_type = device_type(); device.device_id = virtual_device_id; return device; } TVM_DECLARE_ATTRS(VirtualDeviceNode, "VirtualDevice") { TVM_ATTR_FIELD(device_type_int) .describe("The type of the virtual device.") .set_default(kInvalidDeviceType); TVM_ATTR_FIELD(virtual_device_id) .describe("The device id of the virtual device.") .set_default(-1); TVM_ATTR_FIELD(target) .describe("The target describing how to compile for the virtual device.") .set_default(Target()); TVM_ATTR_FIELD(memory_scope) .describe("The area of memory w.r.t. the virtual device where data is stored.") .set_default(""); } friend class VirtualDevice; }; /*! * \brief Managed reference class to \p VirtualDeviceNode. */ class VirtualDevice : public ObjectRef { public: /*! * \brief Construct a virtual device. * \param device_type The device type for the virtual device, or \p kInvalidDeviceType if * unconstrained. If \p target is defined then must match its \p target->GetTargetDeviceType(). * \param virtual_device_id The device id for the virtual device, or -1 if unconstrained. * \param target The target describing how to compile for the virtual device, or null if * unconstrained. * \param memory_scope The memory scope w.r.t. the virtual device which holds data, or "" if * unconstrained. * \return The virtual device. */ explicit VirtualDevice(DLDeviceType device_type = kInvalidDeviceType, int virtual_device_id = -1, Target target = {}, MemoryScope memory_scope = {}); /*! \brief Returns the unique fully unconstrained \p VirtualDevice. */ static VirtualDevice FullyUnconstrained(); /*! * \brief Returns the \p VirtualDevice for \p device_type and (if not -1) \p virtual_device_id. * The target and memory scope will be unconstrained. */ static VirtualDevice ForDeviceType(DLDeviceType device_type, int virtual_device_id = -1) { ICHECK_GT(device_type, 0); return VirtualDevice(device_type, virtual_device_id); } static VirtualDevice ForDeviceType(int device_type, int virtual_device_id = -1) { return ForDeviceType(static_cast<DLDeviceType>(device_type), virtual_device_id); } static VirtualDevice ForDeviceType(const Integer& device_type, int virtual_device_id = -1) { return ForDeviceType(static_cast<int>(device_type->value), virtual_device_id); } /*! \brief Returns the \p VirtualDevice for \p device. */ static VirtualDevice ForDevice(const Device& device) { return ForDeviceType(device.device_type, device.device_id); } /*! \brief Returns the \p VirtualDevice for \p device and \p target. */ static VirtualDevice ForDeviceAndTarget(const Device& device, Target target) { return VirtualDevice(device.device_type, device.device_id, std::move(target)); } /*! \brief Returns the \p VirtualDevice for \p target. */ static VirtualDevice ForTarget(Target target) { DLDeviceType device_type = static_cast<DLDeviceType>(target->GetTargetDeviceType()); return VirtualDevice(device_type, /*virtual_device_id=*/0, std::move(target)); } /*! \brief Returns the \p VirtualDevice for \p memory_scope alone. */ static VirtualDevice ForMemoryScope(MemoryScope memory_scope) { return VirtualDevice(kInvalidDeviceType, -1, {}, std::move(memory_scope)); } /*! \brief Returns the \p VirtualDevice for \p device, \p target and \p memory_scope. */ TVM_DLL static VirtualDevice ForDeviceTargetAndMemoryScope(const Device& device, Target target, MemoryScope memory_scope) { return VirtualDevice(device.device_type, device.device_id, std::move(target), std::move(memory_scope)); } /*! * \brief Returns the 'join' of \p lhs and \p rhs. The result will agree pointwise with * \p lhs and \p rhs on all their constrained fields. Returns the null optional if no such * join exists, ie there's disagreement on at least one constrained field. */ static Optional<VirtualDevice> Join(const VirtualDevice& lhs, const VirtualDevice& rhs); /*! * \brief Returns the 'default' of \p lhs and \p rhs. The result will be \p lhs, except any * unconstrained fields in \p lhs will take their value from \p rhs. Always well-defined. */ static VirtualDevice Default(const VirtualDevice& lhs, const VirtualDevice& rhs); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(VirtualDevice, ObjectRef, VirtualDeviceNode); friend class VirtualDeviceCache; // Private implementation helper. }; /*! * \brief A cache of \p VirtualDevices. This can be used: * - To avoid ending up with lots of identical instances, since the space of VirtualDevices for any * one compilation is very small but the number of points they need to be constructed can * be very large (eg during device planning). * - So we can assume \p VirtualDevices are pointer equal if and only if they are structurally * equal. This simplifies the unification of 'device domains' which are built on \p VirtualDevices. */ class VirtualDeviceCache { public: /*! \brief Returns the unique \p VirtualDevice representing given fields. */ VirtualDevice Make(DLDeviceType device_type = kInvalidDeviceType, int virtual_device_id = -1, Target target = {}, MemoryScope memory_scope = {}); /*! * \brief Returns the unique \p VirtualDevice structurally equal to the given \p virtual_device. */ VirtualDevice Unique(const VirtualDevice& virtual_device); private: /*! \brief Already constructed VirtualDevices. */ std::unordered_set<VirtualDevice, StructuralHash, StructuralEqual> cache_; }; /*! brief The attribute key for the virtual device. This key will be promoted to first class on * functions. For use in the parser and printer only. * * Type: VirtualDevice */ constexpr const char* kVirtualDevice = "virtual_device"; } // namespace tvm #endif // TVM_TARGET_VIRTUAL_DEVICE_H_
https://github.com/zk-ml/tachikoma
include/tvm/te/autodiff.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/te/autodiff.h * \brief Automatic differentiation of tensor expressions. */ #ifndef TVM_TE_AUTODIFF_H_ #define TVM_TE_AUTODIFF_H_ #include <tvm/runtime/object.h> #include <tvm/tir/expr.h> #include "tensor.h" namespace tvm { /*! \brief Tensor expression language DSL. */ namespace te { /*! * \brief Take the derivative of the expression with respect to the given variable. * \param expr The expression to differentiate. * \param var The variable to differentiate with respect to. * \return The expression for the derivative. */ PrimExpr Derivative(const PrimExpr& expr, const Var& var); /*! * \brief Get the tensor representing the Jacobian of the output with respect to the input. * * Note that if \p output depends on \p input indirectly (by using some other tensor * depending on \p input), this dependency won't contribute to the resulting Jacobian. * For such cases use the function ::Gradient. * * \param output The tensor to differentiate. * \param input The input tensor, which \p output should directly use. * \return The tensor representing the Jacobian of shape `output.shape + input.shape`. */ Tensor Jacobian(const Tensor& output, const Tensor& input); /*! * \brief The building block for reverse-mode AD. * * Differentiate \p output wrt \p input and multiply the result by \p head on the left using tensor * dot product. \p input must be an immediate dependency of \p output (must be called from within * the body of \p output). That is, the function will compute one summand of the adjoint for \p * input given the adjoint for \p output (which is called \p head here). * * \param output The tensor to differentiate. * \param input The input tensor, which \p output should directly use. * \param head The adjoint of \p output. Must be of shape `prefix + output.shape` * \return The tensor of shape `prefix + input.shape` * representing the partial adjoint of \p input wrt one of its consumers (output) */ Tensor VectorJacobianProduct(const Tensor& output, const Tensor& input, const Tensor& head); /*! * \brief Perform reverse mode automatic differentiation. * * Each item of the `result` field of the result is an adjoint for the corresponding item of * \p inputs, i.e. \p head multiplied by the Jacobian of \p output with respect to the * corresponding item of \p inputs. * * \param output The tensor to differentiate. * \param inputs The array of input tensors. When the array is empty, will perform differentiation * wrt all tensors the output depends on. * \param head The adjoint of the output, in other words, some tensor, by which the Jacobians * will be multiplied (using tensordot axes=`output.shape`). * Its shape must be of the form `prefix + output.shape`. If the null pointer is * provided, the identity tensor of shape `output.shape + output.shape` will be used. \return An * array of adjoints corresponding to \p inputs. */ TVM_DLL Array<Tensor> Gradient(const Tensor& output, const Array<Tensor>& inputs, const Tensor& head = Tensor()); } // namespace te } // namespace tvm #endif // TVM_TE_AUTODIFF_H_
https://github.com/zk-ml/tachikoma
include/tvm/te/operation.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/te/operation.h * \brief Operation node can generate one or multiple Tensors */ #ifndef TVM_TE_OPERATION_H_ #define TVM_TE_OPERATION_H_ #include <tvm/arith/analyzer.h> #include <tvm/te/schedule.h> #include <tvm/te/tensor.h> #include <tvm/tir/buffer.h> #include <tvm/tir/expr.h> #include <tvm/tir/op.h> #include <string> #include <unordered_map> #include <vector> namespace tvm { /*! \brief Tensor expression language DSL. */ namespace te { /*! * \brief Temporary data structure to store union * of bounds of each axis of Tensor. */ struct TensorDom { // constructor explicit TensorDom(int ndim) : data(ndim) {} /*! \brief The domain data */ std::vector<std::vector<IntSet>> data; }; /*! * \brief Base class of all operation nodes */ class TVM_DLL OperationNode : public Object { public: /*! \brief optional name of the operation */ std::string name; /*! \brief optional tag of the operation */ std::string tag; /*! \brief additional attributes of the operation*/ Map<String, ObjectRef> attrs; // virtual destructor. virtual ~OperationNode() {} /*! \return number of outputs */ virtual int num_outputs() const = 0; /*! * \return The list of iteration variable at root * \note root_iter_vars decides the shape of the outputs. */ virtual Array<IterVar> root_iter_vars() const = 0; /*! * \brief Get data type. i-th output tensor. * \param i The output index. * \return type of i-th output. */ virtual DataType output_dtype(size_t i) const = 0; /*! * \brief Get shape of i-th output tensor. * \param i The output index. * \return shape of i-th output. */ virtual Array<PrimExpr> output_shape(size_t i) const = 0; /*! * \brief List all the input Tensors. * \return List of input tensors. */ virtual Array<Tensor> InputTensors() const = 0; /*! * \brief Replace the input of the operation by pattern specified by rmap. * * \param self The reference to self. * \param rmap The replacement map. * \return self if nothing is replaced, otherwise return replaced op. */ virtual Operation ReplaceInputs(const Operation& self, const std::unordered_map<Tensor, Tensor>& rmap) const = 0; /*! * \brief Propagate the bounds to inputs * \param self The reference to self. * \param analyzer The analyzer to be used in the function. * \param dom_map the domain map of Variables(corresponds to root_iter_vars) * \param out_dom_map The output domain. * The function is only asked to fill the bounds for Tensors that * is already in the out_dom_map */ virtual void PropBoundToInputs(const Operation& self, arith::Analyzer* analyzer, const std::unordered_map<const VarNode*, IntSet>& dom_map, std::unordered_map<Tensor, TensorDom>* out_dom_map) const = 0; /*! * \brief Gather the bound from output tensor. * Set the range of each root_iter_vars in the op to out_dom_map * * \param self The reference to self. * \param tensor_dom Domain map of Tensor->access set of each dimension. * \param out_dom_map The output domain map of each IterVar to be setted. */ virtual void GatherBound(const Operation& self, const std::unordered_map<Tensor, TensorDom>& tensor_dom, std::unordered_map<IterVar, Range>* out_dom_map) const = 0; /*! * \brief Build the Realize statement that realizes * the op's output tensors. * \param stage the op's stage. * \param realize_map The realization domain map of the operators. * \param body The body that is going to get * \param storage_scope The storage scope associated with this realization * \return A realization statement that wraps body. */ virtual Stmt BuildRealize(const Stage& stage, const std::unordered_map<IterVar, Range>& realize_map, const Stmt& body, String storage_scope = "") const = 0; /*! * \brief Build the statement that provide the output tensors. * \param stage The schedule stage of the op. * \param dom_map The domain map of all iteration domains. * \param debug_keep_trivial_loop Whether keep trivial loops with extent of 1 * \return A statement that add production and wraps consumer. */ virtual Stmt BuildProvide(const Stage& stage, const std::unordered_map<IterVar, Range>& dom_map, bool debug_keep_trivial_loop) const = 0; static constexpr const char* _type_key = "Operation"; TVM_DECLARE_BASE_OBJECT_INFO(OperationNode, Object); }; /*! * \brief A placeholder op represents an input placeholder. */ class PlaceholderOpNode : public OperationNode { public: /*! \brief The shape of the input */ Array<PrimExpr> shape; /*! \brief The data type of the input. */ DataType dtype; // override behavior. int num_outputs() const final; Array<IterVar> root_iter_vars() const final; DataType output_dtype(size_t i) const final; Array<PrimExpr> output_shape(size_t i) const final; Array<Tensor> InputTensors() const final; Operation ReplaceInputs(const Operation& self, const std::unordered_map<Tensor, Tensor>& rmap) const final; void PropBoundToInputs(const Operation& self, arith::Analyzer* analyzer, const std::unordered_map<const VarNode*, IntSet>& dom_map, std::unordered_map<Tensor, TensorDom>* out_dom_map) const final; void GatherBound(const Operation& self, const std::unordered_map<Tensor, TensorDom>& tensor_dom, std::unordered_map<IterVar, Range>* out_dom_map) const final; Stmt BuildRealize(const Stage& stage, const std::unordered_map<IterVar, Range>& realize_map, const Stmt& body, String storage_scope = "") const final; Stmt BuildProvide(const Stage& stage, const std::unordered_map<IterVar, Range>& dom_map, bool debug_keep_trivial_loop) const final; void VisitAttrs(AttrVisitor* v) { v->Visit("name", &name); v->Visit("tag", &tag); v->Visit("attrs", &attrs); v->Visit("shape", &shape); v->Visit("dtype", &dtype); } static constexpr const char* _type_key = "PlaceholderOp"; TVM_DECLARE_FINAL_OBJECT_INFO(PlaceholderOpNode, OperationNode); }; /*! * \brief Managed reference to PlaceholderOpNode * \sa PlaceholderOpNode */ class PlaceholderOp : public Operation { public: TVM_DLL PlaceholderOp(std::string name, Array<PrimExpr> shape, DataType dtype); TVM_DEFINE_OBJECT_REF_METHODS(PlaceholderOp, Operation, PlaceholderOpNode); }; /*! * \brief A Compute op that compute a tensor on certain domain. * This is the base class for ComputeOp (operating on a scalar at a time) and * TensorComputeOp (operating on a TensorSlice at a time) */ class TVM_DLL BaseComputeOpNode : public OperationNode { public: /*! \brief IterVar on each axis */ Array<IterVar> axis; /*! \brief IterVar on each reduction axis, if the body is a Reduce */ Array<IterVar> reduce_axis; // override functions Array<IterVar> root_iter_vars() const final; Array<PrimExpr> output_shape(size_t idx) const final; void GatherBound(const Operation& self, const std::unordered_map<Tensor, TensorDom>& tensor_dom, std::unordered_map<IterVar, Range>* out_dom_map) const final; Stmt BuildRealize(const Stage& stage, const std::unordered_map<IterVar, Range>& realize_map, const Stmt& body, String storage_scope = "") const final; virtual size_t num_schedulable_dims() const = 0; static constexpr const char* _type_key = "BaseComputeOp"; TVM_DECLARE_BASE_OBJECT_INFO(BaseComputeOpNode, OperationNode); }; /*! * \brief A Compute op that compute a tensor on certain domain. */ class TVM_DLL ComputeOpNode : public BaseComputeOpNode { public: /*! \brief the compute expression */ Array<PrimExpr> body; /*! \brief constructor */ ComputeOpNode() {} // override functions int num_outputs() const final; DataType output_dtype(size_t i) const final; Array<Tensor> InputTensors() const final; Operation ReplaceInputs(const Operation& self, const std::unordered_map<Tensor, Tensor>& rmap) const final; void PropBoundToInputs(const Operation& self, arith::Analyzer* analyzer, const std::unordered_map<const VarNode*, IntSet>& dom_map, std::unordered_map<Tensor, TensorDom>* out_dom_map) const final; Stmt BuildProvide(const Stage& stage, const std::unordered_map<IterVar, Range>& dom_map, bool debug_keep_trivial_loop) const final; size_t num_schedulable_dims() const final; void VisitAttrs(AttrVisitor* v) { v->Visit("name", &name); v->Visit("tag", &tag); v->Visit("attrs", &attrs); v->Visit("axis", &axis); v->Visit("reduce_axis", &reduce_axis); v->Visit("body", &body); } static constexpr const char* _type_key = "ComputeOp"; TVM_DECLARE_FINAL_OBJECT_INFO(ComputeOpNode, BaseComputeOpNode); }; /*! * \brief Managed reference to ComputeOpNode * \sa ComputeOpNode */ class ComputeOp : public Operation { public: TVM_DLL ComputeOp(std::string name, std::string tag, Map<String, ObjectRef> attrs, Array<IterVar> axis, Array<PrimExpr> body); TVM_DEFINE_OBJECT_REF_METHODS(ComputeOp, Operation, ComputeOpNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(ComputeOpNode); }; /*! * \brief A TenorCompute op that compute a tensor with an tensor intrinsic. */ class TensorComputeOpNode : public BaseComputeOpNode { public: /*! \brief number of axes that can be scheduled */ int schedulable_ndim; /*! \brief TensorIntrin used to compute */ TensorIntrin intrin; /*! \brief input tensors of intrin */ Array<Tensor> inputs; /*! \brief region of input tensors */ Array<Region> input_regions; /*! \brief scalar expression inputs */ Array<PrimExpr> scalar_inputs; /*! \brief constructor */ TensorComputeOpNode() {} // override functions int num_outputs() const final; DataType output_dtype(size_t i) const final; Array<Tensor> InputTensors() const final; Operation ReplaceInputs(const Operation& self, const std::unordered_map<Tensor, Tensor>& rmap) const final; void PropBoundToInputs(const Operation& self, arith::Analyzer* analyzer, const std::unordered_map<const VarNode*, IntSet>& dom_map, std::unordered_map<Tensor, TensorDom>* out_dom_map) const final; Stmt BuildProvide(const Stage& stage, const std::unordered_map<IterVar, Range>& dom_map, bool debug_keep_trivial_loop) const final; size_t num_schedulable_dims() const final; void VisitAttrs(AttrVisitor* v) { v->Visit("name", &name); v->Visit("tag", &tag); v->Visit("axis", &axis); v->Visit("reduce_axis", &reduce_axis); v->Visit("schedulable_ndim", &schedulable_ndim); v->Visit("intrin", &intrin); v->Visit("inputs", &inputs); v->Visit("input_regions", &input_regions); v->Visit("scalar_inputs", &scalar_inputs); } static constexpr const char* _type_key = "TensorComputeOp"; TVM_DECLARE_FINAL_OBJECT_INFO(TensorComputeOpNode, BaseComputeOpNode); }; /*! * \brief Managed reference to TensorComputeOpNode * \sa TensorComputeOpNode */ class TensorComputeOp : public Operation { public: TVM_DLL TensorComputeOp(std::string name, std::string tag, Array<IterVar> axis, Array<IterVar> reduce_axis, int schedulable_ndim, TensorIntrin intrin, Array<Tensor> tensors, Array<Region> regions, Array<PrimExpr> scalar_inputs); TVM_DEFINE_OBJECT_REF_METHODS(TensorComputeOp, Operation, TensorComputeOpNode); }; /*! * \brief Symbolic scan. */ class ScanOpNode : public OperationNode { public: /*! \brief IterVar to scan over */ IterVar scan_axis; /*! \brief the initialization tensors */ Array<Tensor> init; /*! \brief the update function represented by tensor */ Array<Tensor> update; /*! \brief The placeholder to refer as states in update. */ Array<Tensor> state_placeholder; /*! * \brief the inputs to the scan, these are optionally provided * But they can be helpful to provide hints to speedup get of scan body. */ Array<Tensor> inputs; /*! * \brief Spatial axis to indicate spatial dimension of each output. * They corresponds to flattened spatial axis of the outputs. * * [output[0].axis[1], output[0].axis[2]... output[k].axis[j]...] * These are auxiliary data structure for storing result of bound inference. * They do not corresponds to splittable iterations, thus the name comes * with underscore. */ Array<IterVar> spatial_axis_; /*! \brief constructor */ ScanOpNode() {} // override behavior. int num_outputs() const final; Array<IterVar> root_iter_vars() const final; DataType output_dtype(size_t i) const final; Array<PrimExpr> output_shape(size_t i) const final; Array<Tensor> InputTensors() const final; Operation ReplaceInputs(const Operation& self, const std::unordered_map<Tensor, Tensor>& rmap) const final; void PropBoundToInputs(const Operation& self, arith::Analyzer* analyzer, const std::unordered_map<const VarNode*, IntSet>& dom_map, std::unordered_map<Tensor, TensorDom>* out_dom_map) const final; void GatherBound(const Operation& self, const std::unordered_map<Tensor, TensorDom>& tensor_dom, std::unordered_map<IterVar, Range>* out_dom_map) const final; Stmt BuildRealize(const Stage& stage, const std::unordered_map<IterVar, Range>& realize_map, const Stmt& body, String storage_scope = "") const final; Stmt BuildProvide(const Stage& stage, const std::unordered_map<IterVar, Range>& dom_map, bool debug_keep_trivial_loop) const final; void VisitAttrs(AttrVisitor* v) { v->Visit("name", &name); v->Visit("tag", &tag); v->Visit("attrs", &attrs); v->Visit("scan_axis", &scan_axis); v->Visit("init", &init); v->Visit("update", &update); v->Visit("state_placeholder", &state_placeholder); v->Visit("inputs", &inputs); v->Visit("spatial_axis_", &spatial_axis_); } static constexpr const char* _type_key = "ScanOp"; TVM_DECLARE_FINAL_OBJECT_INFO(ScanOpNode, OperationNode); }; /*! * \brief Managed reference to ScanOpNode * \sa ScanOpNode */ class ScanOp : public Operation { public: TVM_DLL ScanOp(std::string name, std::string tag, Map<String, ObjectRef> attrs, IterVar axis, Array<Tensor> init, Array<Tensor> update, Array<Tensor> state_placeholder, Array<Tensor> input); TVM_DEFINE_OBJECT_REF_METHODS(ScanOp, Operation, ScanOpNode); }; /*! * \brief External computation that cannot be splitted. */ class ExternOpNode : public OperationNode { public: /*! \brief The input tensors */ Array<Tensor> inputs; /*! \brief Symbolic placeholder representation of inputs */ Array<Buffer> input_placeholders; /*! \brief Symbolic placeholder representation of outputs */ Array<Buffer> output_placeholders; /*! \brief the statement that generates the computation. */ Stmt body; /*! \brief constructor */ ExternOpNode() {} // override functions int num_outputs() const final; Array<IterVar> root_iter_vars() const final; DataType output_dtype(size_t i) const final; Array<PrimExpr> output_shape(size_t i) const final; Array<Tensor> InputTensors() const final; Operation ReplaceInputs(const Operation& self, const std::unordered_map<Tensor, Tensor>& rmap) const final; void PropBoundToInputs(const Operation& self, arith::Analyzer* analyzer, const std::unordered_map<const VarNode*, IntSet>& dom_map, std::unordered_map<Tensor, TensorDom>* out_dom_map) const final; void GatherBound(const Operation& self, const std::unordered_map<Tensor, TensorDom>& tensor_dom, std::unordered_map<IterVar, Range>* out_dom_map) const final; Stmt BuildRealize(const Stage& stage, const std::unordered_map<IterVar, Range>& realize_map, const Stmt& body, String storage_scope = "") const final; Stmt BuildProvide(const Stage& stage, const std::unordered_map<IterVar, Range>& dom_map, bool debug_keep_trivial_loop) const final; void VisitAttrs(AttrVisitor* v) { v->Visit("name", &name); v->Visit("tag", &tag); v->Visit("attrs", &attrs); v->Visit("inputs", &inputs); v->Visit("input_placeholders", &input_placeholders); v->Visit("output_placeholders", &output_placeholders); v->Visit("body", &body); } static constexpr const char* _type_key = "ExternOp"; TVM_DECLARE_FINAL_OBJECT_INFO(ExternOpNode, OperationNode); }; /*! * \brief Managed reference to ExternOpNode * \sa ExternOpNode */ class ExternOp : public Operation { public: TVM_DLL ExternOp(std::string name, std::string tag, Map<String, ObjectRef> attrs, Array<Tensor> inputs, Array<Buffer> input_placeholders, Array<Buffer> output_placeholders, Stmt body); TVM_DEFINE_OBJECT_REF_METHODS(ExternOp, Operation, ExternOpNode); }; /*! * \brief A computation operator that generated by hybrid script. */ class HybridOpNode : public OperationNode { public: /*! \brief The input tensors */ Array<Tensor> inputs; /*! \brief Symbolic placeholder representation of outputs */ Array<Tensor> outputs; /*! \brief The axis of iterations */ Array<IterVar> axis; /*! \brief the statement that generates the computation. This is * slightly different from the body in ExternOpNode. All the output * tensors keep its own name specified by users in the script. * However, when compilation, these tensors will be placed by those * actual output tensors. */ Stmt body; /*! \brief constructor */ HybridOpNode() {} // override functions int num_outputs() const final; Array<IterVar> root_iter_vars() const final; DataType output_dtype(size_t i) const final; Array<PrimExpr> output_shape(size_t i) const final; Array<Tensor> InputTensors() const final; Operation ReplaceInputs(const Operation& self, const std::unordered_map<Tensor, Tensor>& rmap) const final; void PropBoundToInputs(const Operation& self, arith::Analyzer* analyzer, const std::unordered_map<const VarNode*, IntSet>& dom_map, std::unordered_map<Tensor, TensorDom>* out_dom_map) const final; void GatherBound(const Operation& self, const std::unordered_map<Tensor, TensorDom>& tensor_dom, std::unordered_map<IterVar, Range>* out_dom_map) const final; Stmt BuildRealize(const Stage& stage, const std::unordered_map<IterVar, Range>& realize_map, const Stmt& body, String storage_scope = "") const final; Stmt BuildProvide(const Stage& stage, const std::unordered_map<IterVar, Range>& dom_map, bool debug_keep_trivial_loop) const final; void VisitAttrs(AttrVisitor* v) { v->Visit("name", &name); v->Visit("tag", &tag); v->Visit("attrs", &attrs); v->Visit("inputs", &inputs); v->Visit("outputs", &outputs); v->Visit("axis", &axis); v->Visit("body", &body); } static constexpr const char* _type_key = "HybridOp"; TVM_DECLARE_FINAL_OBJECT_INFO(HybridOpNode, OperationNode); }; /*! * \brief Managed reference to HybridOpNode * \sa HybridOpNode */ class HybridOp : public Operation { public: TVM_DLL HybridOp(std::string name, std::string tag, Map<String, ObjectRef> attrs, Array<Tensor> inputs, Array<Tensor> outputs, Stmt body); TVM_DEFINE_OBJECT_REF_METHODS(HybridOp, Operation, HybridOpNode); }; /*! * \brief Construct a new Var expression * \param name_hint The name hint for the expression * \param t The type of the expression */ TVM_DLL Var var(std::string name_hint, DataType t = DataType::Int(32)); /*! * \brief Create a new IterVar that represents an axis in thread. * * \param dom Optional, domain of the thread axis. * \param tag The thread tag of the axis. */ TVM_DLL IterVar thread_axis(Range dom, std::string tag); /*! * \brief Create a new IterVar for reduction operations. * * \param dom The domain of the reduction axis. * \param name The name of the reduction axis. */ TVM_DLL IterVar reduce_axis(Range dom, std::string name = "rv"); /*! \brief The compute function to specify the input source of a Tensor */ using FCompute = std::function<PrimExpr(const Array<Var>& i)>; /*! \brief The compute function to specify the inputs source of Tensors */ using FBatchCompute = std::function<Array<PrimExpr>(const Array<Var>& i)>; /*! * \brief create a place holder tensor. * \param shape The shape of the tensor. * \param dtype the data type of the tensor. * \param name The name of the Tensor. */ TVM_DLL Tensor placeholder(Array<PrimExpr> shape, DataType dtype = DataType::Float(32), std::string name = "placeholder"); /*! * \brief Construct a new tensor by computing over shape, * using the computation rule: result_tensor[axis] = fcompute(axis) * \param shape Shape of the tensor. * \param fcompute The compute function to create the tensor. * \param name The optional name of the tensor. * \param tag The optional tag of the tensor. * \param attrs Optional additional attributes of the compute. */ TVM_DLL Tensor compute(Array<PrimExpr> shape, FCompute fcompute, std::string name = "tensor", std::string tag = "", Map<String, ObjectRef> attrs = {}); /*! * \brief Construct a new tensor by computing over shape, * using the computation rule: result_tensor[axis] = fcompute(axis) * \param shape Shape of the tensor. * \param fcompute The compute function to create the tensors. * \param name The optional name of the tensor. * \param tag The optional tag of the tensor. * \param attrs Optional additional attributes of the compute. */ TVM_DLL Array<Tensor> compute(Array<PrimExpr> shape, FBatchCompute fcompute, std::string name = "tensor", std::string tag = "", Map<String, ObjectRef> attrs = {}); /*! * \brief Construct new tensors by scan. * * \param init The intialize tensor of first K steps. * \param update The update tensor indicated the updated result after each timestamp. * \param state_placeholder The placeholder for the states. * \param inputs The inputs to the scan body, this is optional, * but recommended to provide concrete information about scan body. * \param name The optional name of the tensor. * \param tag The optional tag of the tensor. * \param attrs Optional additional attributes of the compute. */ TVM_DLL Array<Tensor> scan(Array<Tensor> init, Array<Tensor> update, Array<Tensor> state_placeholder, Array<Tensor> inputs = Array<Tensor>(), std::string name = "scan", std::string tag = "", Map<String, ObjectRef> attrs = {}); // same as compute, specialized for different fcompute function inline Tensor compute(Array<PrimExpr> shape, std::function<PrimExpr(Var)> f, std::string name = "tensor", std::string tag = "", Map<String, ObjectRef> attrs = {}) { FCompute fc = [f](const Array<Var>& i) { return f(i[0]); }; return compute(shape, fc, name, tag, attrs); } inline Tensor compute(Array<PrimExpr> shape, std::function<PrimExpr(Var, Var)> f, std::string name = "tensor", std::string tag = "", Map<String, ObjectRef> attrs = {}) { FCompute fc = [f](const Array<Var>& i) { return f(i[0], i[1]); }; return compute(shape, fc, name, tag, attrs); } inline Tensor compute(Array<PrimExpr> shape, std::function<PrimExpr(Var, Var, Var)> f, std::string name = "tensor", std::string tag = "", Map<String, ObjectRef> attrs = {}) { FCompute fc = [f](const Array<Var>& i) { return f(i[0], i[1], i[2]); }; return compute(shape, fc, name, tag, attrs); } inline Tensor compute(Array<PrimExpr> shape, std::function<PrimExpr(Var, Var, Var, Var)> f, std::string name = "tensor", std::string tag = "", Map<String, ObjectRef> attrs = {}) { FCompute fc = [f](const Array<Var>& i) { return f(i[0], i[1], i[2], i[3]); }; return compute(shape, fc, name, tag, attrs); } // inline function. inline const OperationNode* Operation::operator->() const { return static_cast<const OperationNode*>(get()); } } // namespace te } // namespace tvm #endif // TVM_TE_OPERATION_H_
https://github.com/zk-ml/tachikoma
include/tvm/te/schedule.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/te/schedule.h * \brief Define a schedule. */ // Acknowledgement: Many schedule primitives originate from Halide and Loopy. #ifndef TVM_TE_SCHEDULE_H_ #define TVM_TE_SCHEDULE_H_ #include <tvm/support/with.h> #include <tvm/te/tensor.h> #include <tvm/te/tensor_intrin.h> #include <tvm/tir/expr.h> #include <tvm/tir/index_map.h> #include <string> #include <unordered_map> namespace tvm { namespace te { // Node container for Stage class StageNode; // Node container for Schedule class ScheduleNode; // Node container for IterVarRelation class IterVarRelationNode; // Attribute of itervar. class IterVarAttrNode; /*! \brief the attachment type */ enum AttachType : int { kGroupRoot = 1, kInline = 2, kInlinedAlready = 3, kScope = 4, kScanUpdate = 5 }; /*! \brief Stage, contains scheduling for a stage of computation. */ class Stage : public ObjectRef { public: Stage() {} explicit Stage(ObjectPtr<Object> n) : ObjectRef(n) {} /*! * \brief create a new schedule for op. * \param op The operator in the schedule */ explicit Stage(Operation op); /*! * \brief access the internal node container * \return the pointer to the internal node container */ inline const StageNode* operator->() const; /*! * \brief access the internal node container * \return the pointer to the internal node container */ inline StageNode* operator->(); /*! * \brief set the memory scope of the stage * \param scope The memory scope. */ TVM_DLL Stage& set_scope(std::string scope); // NOLINT(*) /*! * \brief specify the schedule to be computed at the parent schedule's scope. * \param parent The parent schedule. * \param scope The iteration point to carry the schedule. * \return reference to self. */ TVM_DLL Stage& compute_at(Stage parent, IterVar scope); // NOLINT(*) /*! * \brief Compute the function inline. * \return reference to self. */ TVM_DLL Stage& compute_inline(); // NOLINT(*) /*! * \brief Compute the function at group root. * \return reference to self. */ TVM_DLL Stage& compute_root(); // NOLINT(*) /*! * \brief Bind the IterVar to thread index. * * \param ivar The IterVar to be bound. * \param thread_ivar The thread axis to be bound. * \return reference to self. */ TVM_DLL Stage& bind(IterVar ivar, IterVar thread_ivar); /*! * \brief Set the predicate to determine whether a store to the array should be performed. * Use this when there are multiple threads performing the same store and we only * need one of them to do the store. * * \note This is a dangerous scheduling primitive that can change behavior of program. * Only do when we are certain that thare are duplicated stores. * \param predicate The condition to be checked. * \return reference to self. */ TVM_DLL Stage& set_store_predicate(PrimExpr predicate); /*! * \brief Specify environment threads that launched around the group's scope. * This can only be used in group stage. * \param threads The threads to be launched around the scope. * \note Each thread can only appear in one env_threads. * This is a beta feature. * \return reference to self. */ TVM_DLL Stage& env_threads(Array<IterVar> threads); /*! * \brief Split the parent by factor, generate * \param parent The parent iteration domain. * \param factor The split factor of the loop. * \param p_outer The result outer domain * \param p_inner The result inner domain. * \return reference to self. */ TVM_DLL Stage& split(IterVar parent, PrimExpr factor, IterVar* p_outer, IterVar* p_inner); // NOLINT(*) /*! * \brief Split the iteration with given number of parts. * * \param parent The parent domain. * \param nparts The number of parts in the outer domain. * \param p_outer The result outer domain. * \param p_inner The result inner domain. * \return reference to self. */ TVM_DLL Stage& split_by_nparts(IterVar parent, PrimExpr nparts, IterVar* p_outer, IterVar* p_inner); // NOLINT(*) /*! * \brief Fuse the inner outer domain to the target * \param outer The outer domain to be fused. * \param inner The inner domain to be fused * \param p_target The result target domain. * \return reference to self. */ TVM_DLL Stage& fuse(IterVar outer, IterVar inner, IterVar* p_target); // NOLINT(*) /*! * \brief Fuse all the axes together into a single axis. * * \param axes All the axes to be fused. * \param p_target The result target domain. * * \note axes can be an empty array, * in that case, a singleton IterVar is created and * inserted to the outermost loop. * The fuse of empty array is used to support zero-dimension tensors. * * \return reference to self. */ TVM_DLL Stage& fuse(const Array<IterVar>& axes, IterVar* p_target); // NOLINT(*) /*! * \brief Reorder the iteration * \param order The order of iteration variable. * \return reference to self. */ TVM_DLL Stage& reorder(const Array<IterVar>& order); // NOLINT(*) /*! * \brief Perform tiling on two dimensions * The final loop order from outmost to inner most are * [x_outer, y_outer, x_inner, y_inner] * * \param x_parent The original x dimension * \param y_parent The original y dimension * \param x_factor The stride factor on x axis * \param y_factor The stride factor on y axis * \param p_x_outer Outer axis of x dimension * \param p_y_outer Outer axis of y dimension * \param p_x_inner Inner axis of x dimension * \param p_y_inner Inner axis of y dimension * \return reference to self. */ TVM_DLL Stage& tile(IterVar x_parent, IterVar y_parent, // NOLINT(*) PrimExpr x_factor, PrimExpr y_factor, IterVar* p_x_outer, IterVar* p_y_outer, IterVar* p_x_inner, IterVar* p_y_inner); /*! * \brief Vectorize iteration. * \param var The axis to be vectorized. * \return reference to self. */ TVM_DLL Stage& vectorize(IterVar var); // NOLINT(*) /*! * \brief Replace computation of the current stage by tensor intrinsic f. * \param var The axis marks beginning of tensorization. * Every operations inside the axis(include axis itself is tensorized). * \param f The Tensor compute intrinsics. * \return reference to self. */ TVM_DLL Stage& tensorize(IterVar var, TensorIntrin f); // NOLINT(*) /*! * \brief Unroll iteration. * \param var The axis to be unrolled. * \return reference to self. */ TVM_DLL Stage& unroll(IterVar var); // NOLINT(*) /*! * \brief Parallelize iteration. * \param var The axis to be parallelized. * \return reference to self. */ TVM_DLL Stage& parallel(IterVar var); // NOLINT(*) /*! * \brief Annotate the iteration with pragma * * \param var The axis to be parallelized. * \param pragma_type The pragma type. * \param pragma_value The pragma value * * \return reference to self. */ TVM_DLL Stage& pragma(IterVar var, const std::string& pragma_type, const PrimExpr& pragma_value = PrimExpr()); // NOLINT(*) /*! * \brief Fetch data in advance. * \param domain the tensor to be prefetched * \param var the iteration point at which to apply prefetching * \param offset the number of iterations be to fetched in advance * \return reference to self */ TVM_DLL Stage& prefetch(const Tensor& domain, IterVar var, PrimExpr offset); // NOLINT(*) /*! * \brief Set alignment requirement for specific dimension. * * Such that stride[axis] == k * factor + offset for some k. * * \param axis The dimension to be specified for alignment. * \param factor The factor multiple of alignment * \param offset The required offset factor. * \return reference to self */ TVM_DLL Stage& storage_align(IterVar axis, int factor, int offset); // NOLINT(*) /*! * \brief Compute current stage with double buffering. * \return reference to self. */ TVM_DLL Stage& double_buffer(); // NOLINT(*) /*! * \brief Compute current stage with rolling buffering. * \return reference to self. */ TVM_DLL Stage& rolling_buffer(); // NOLINT(*) /*! * \brief Defines a layout transformation to be applied to the buffer. * * The map from initial_index to final_index must be an * invertible affine transformation. * * \param initial_indices An array of variables to represent a * value's location in the tensor, using the pre-transformation * layout. These variables are used as binding occurrences to * represent the initial indices when applying the initial->final * mapping, and should not occur elsewhere in the * Schedule. (i.e. Pass in newly constructed variables, not the * initial IterVar::var) * * \param final_indices An array of expressions, giving the * value's location in the tensor, using the post-transformation layout. * Expressions should be in terms of the variables given in * initial_indices. * * \param out_iter_vars An optional output location for the updated * loop iteration variables. * * \return reference to self */ TVM_DLL Stage& transform_layout(const Array<Var>& initial_indices, const Array<PrimExpr>& final_indices, Array<IterVar>* out_iter_vars = nullptr); /*! \brief Defines separators between groups of axes. * * Used to define `BufferNode::axis_separators`, which has * additional details. * * \param axis_separators A list of axis separators. */ TVM_DLL Stage& set_axis_separators(const Array<IntImm>& axis_separators); /*! * \brief whether the stage has been scheduled. * \return whether the stage has been scheduled. */ bool is_scheduled() const; /*! * \brief Get attachment spec of current stage. * If the stage compute at Group root, this function * will traverse the group function to get the * final spec from the group. * \return A stage representing the attach spec of the group. */ Stage GetAttachSpec() const; // declare container type using ContainerType = StageNode; }; /*! * \brief Global schedule container * For operations and all the operations they depend on. * The schedule per Operation is named as stage. */ class Schedule : public ObjectRef { public: Schedule() {} explicit Schedule(ObjectPtr<Object> n) : ObjectRef(n) {} /*! * \brief Create a schedule for array of ops(and their dependencies). * \param ops The ops to be scheduled. * \return sch The created Schedule. */ TVM_DLL explicit Schedule(Array<Operation> ops); /*! * \brief Get a copy of current schedule. * \return The copied schedule. */ Schedule copy() const; /*! * \brief Get the stage corresponds to the op * \param op The operation. */ TVM_DLL Stage operator[](const Operation& op); /*! * \brief Short hand for getting the stage of tensor's operation. * \param tensor The tensor * \return The stage corresponding to the tensor's op */ TVM_DLL Stage operator[](const Tensor& tensor) { return this->operator[](tensor->op); } /*! * \brief Create a new stage group for all intermediate * operations between inputs and outputs. * * \param outputs The output boundary of the group. * \param inputs The input boundary of the group. * \param include_inputs Whether include inputs if they are reachable from outputs. * \return The new grouped stage. */ TVM_DLL Stage create_group(const Array<Tensor>& outputs, const Array<Tensor>& inputs, bool include_inputs = false); /*! * \brief create a cache read of original tensor for readers. * This will mutate the body of the readers. * A new stage will be created for the tensor. * \param tensor The tensor cached. * \param scope The scope of the cache. * \param readers The readers to redirect to the tensor. * \return The created tensor. */ TVM_DLL Tensor cache_read(const Tensor& tensor, const std::string& scope, const Array<Operation>& readers); /*! * \brief Create a cache write tensor for producing tensor. * The tensor will take over body of original tensor op. * * This function can be used to do data layout transformation. * If there is a split/fuse/reorder on the data parallel axis of tensor * before cache_write is called. The intermediate cache stores * the data in the layout as the iteration order of leave axis. * The data will be transformed back to the original layout in the original tensor. * User can further call compute_inline to inline the original layout and keep * the data stored in the transformed layout. * * \param tensor The tensors to be produced. * \param scope The scope of the storage. * \return The created tensor. */ TVM_DLL Array<Tensor> cache_write(const Array<Tensor>& tensor, const std::string& scope); /*! * \brief Create a cache write tensor for producing tensor. * The tensor will take over body of original tensor op. * * This function can be used to do data layout transformation. * If there is a split/fuse/reorder on the data parallel axis of tensor * before cache_write is called. The intermediate cache stores * the data in the layout as the iteration order of leave axis. * The data will be transformed back to the original layout in the original tensor. * User can further call compute_inline to inline the original layout and keep * the data stored in the transformed layout. * * \param tensor The tensor to be produced. * \param scope The scope of the storage. * \return The created tensor. */ TVM_DLL Tensor cache_write(const Tensor& tensor, const std::string& scope); /*! * \brief Factor a reduction axis in tensor's schedule to be an explicit axis. * This will create a new stage that generated the new tensor with axis * as the first dimension. The tensor's body will be rewritten as a reduction * over the factored tensor. * * P. Suriana, A. Adams and S. Kamil. Parallel associative reductions in halide. CGO'17 * * \param tensor The tensor to be factored. * \param axis The reduction axis in tensor's schedule to be factored. * \param factor_axis The position where the new axis is placed. * \return The created factored tensors. */ TVM_DLL Array<Tensor> rfactor(const Tensor& tensor, const IterVar& axis, int factor_axis = 0); /*! * \brief Normalize the schedule. * This is needed before bound inference. * Insert necessary RebaseNode to make sure all leaf_iter_vars * are in form [0, extent) * * \return A normalized schedule, can be same as current one. */ Schedule normalize(); /*! * \brief Normalize the schedule for feature extraction in auto-scheduler. * This is similar to `Schedule::normalize`, but we do aggressive simplification * to the TE compute with const_matrix=True for faster compilation and feature extraction. * The resulted schedule may be wrong, but it is good enough for feature extraction * purposes. * * \return A normalized schedule, can be same as current one. */ Schedule normalize_for_feature_extraction(); /*! * \brief access the internal node container * \return the pointer to the internal node container */ inline const ScheduleNode* operator->() const; /*! * \brief access the internal node container * \return the pointer to the internal node container */ inline ScheduleNode* operator->(); // declare container type using ContainerType = ScheduleNode; }; /*! * \brief The schedule relation between IterVars * can be Split, Fuse. */ class IterVarRelation : public ObjectRef { public: IterVarRelation() {} explicit IterVarRelation(ObjectPtr<Object> n) : ObjectRef(n) {} /*! * \brief access the internal node container * \return the pointer to the internal node container */ inline const IterVarRelationNode* operator->() const; }; /*! * \brief Additional scheduable attributes about IterVar. */ class IterVarAttr : public ObjectRef { public: IterVarAttr() {} explicit IterVarAttr(ObjectPtr<Object> n) : ObjectRef(n) {} /*! * \brief access the internal node container * \return the pointer to the internal node container */ inline const IterVarAttrNode* operator->() const; }; /*! * \brief represents a stage. * * relations form a Directed acylic hypergraph in bipartite manner. * With each node is represented by a IterVar, * and each hyper-edge is represented by a IterVarRelation. * The relations connects the IterVars in the graph. * * Besides typical stage that corresponds to operations. * There is also group stage, which groups stages together. * Each stage's group(given by group) represent an constraint, * the stage can only be attached to stages within the group. * * The group stage node can be attached to IterVars as in normal stage. */ class StageNode : public Object { public: /*! * \brief The operation of stage, can be different from original op. * If it is null, then this stage is a group stage. */ Operation op; /*! * \brief The original operator. * The op field can change during schedule to alternate the dataflow, * while origin_op remains fixed. */ Operation origin_op; /*! \brief All the nodes in the iter var * * Each element of all_iter_vars represents an iteration variable * that may appear within this stage's computation. Any element * of `all_iter_vars` that is in `leaf_iter_vars` represents a * variable that is directly defined and usable within the stage's * computation. All other elements of `all_iter_vars` represent * variables whose value must be computed from the variables in * `leaf_iter_vars`. (e.g. Support index k has been split by * ``ko, ki = s.split(k, factor=4)``. ko and ki will appear in * `leaf_iter_vars`, while k will not, and must be computed as * `4*ko + ki`. */ Array<IterVar> all_iter_vars; /*! \brief The current active leaf iter vars in the stage. * * Each element of leaf_iter_vars will either be replaced with the * bound index (e.g. threadIdx.x), or will be expanded into a loop * over the variable's extent. `leaf_iter_vars` is a subset of * `all_iter_vars`. */ Array<IterVar> leaf_iter_vars; /*! * \brief Specify threads to be launched at the stage. * This is only valid for composite ops such as Scan. * \note Experimental primitive: used for thread persistence. */ Array<IterVar> env_threads; /*! * \brief The predicate under which store can happen * Use this when there can be duplicated threads doing the same store. * \note Experimental primitive: used by cross thread-reduction. */ PrimExpr store_predicate; /*! \brief The relation bwteen of IterVars */ Array<IterVarRelation> relations; /*! \brief additional attributes about iter var. */ Map<IterVar, IterVarAttr> iter_var_attrs; /*! \brief The attachment type of the schedule */ AttachType attach_type{kGroupRoot}; /*! \brief The attach point of this schedule. */ IterVar attach_ivar; /*! \brief The stage this node attaches to */ Stage attach_stage; /*! \brief The thread storage scope level of the stage */ std::string scope; /*! \brief Whether this is an output stage */ bool is_output{false}; /*! \brief Whether apply double buffer optimization to this stage */ bool double_buffer{false}; /*! \brief Whether apply rolling buffer optimization to this stage */ bool rolling_buffer{false}; /*! \brief Layout transformations to be applied onto the stage's tensors. */ Array<IndexMap> layout_transforms; /*! \brief List of axes after which to divide physical axes. * * Used to populate `BufferNode::axis_separators`, which has * additional details. */ Array<IntImm> axis_separators; /*! * \brief The parent group of the current stage. * The stage cannot be assigned to stages outside the group. */ Stage group; /*! \brief Number of direct child stages, only used for group stage.*/ int num_child_stages{0}; void VisitAttrs(AttrVisitor* v) { v->Visit("op", &op); v->Visit("origin_op", &origin_op); v->Visit("all_iter_vars", &all_iter_vars); v->Visit("leaf_iter_vars", &leaf_iter_vars); v->Visit("env_threads", &env_threads); v->Visit("relations", &relations); v->Visit("iter_var_attrs", &iter_var_attrs); v->Visit("attach_type", &attach_type); v->Visit("attach_ivar", &attach_ivar); v->Visit("attach_stage", &attach_stage); v->Visit("scope", &scope); v->Visit("is_output", &is_output); v->Visit("double_buffer", &double_buffer); v->Visit("layout_transforms", &layout_transforms); v->Visit("axis_separators", &axis_separators); v->Visit("group", &group); v->Visit("num_child_stages", &num_child_stages); } static constexpr const char* _type_key = "Stage"; TVM_DECLARE_FINAL_OBJECT_INFO(StageNode, Object); }; /*! \brief node container for schedule */ class ScheduleNode : public Object { public: /*! \brief The output operations in original data flow graph */ Array<Operation> outputs; /*! * \brief list of all stages for ops. * The stages are sorted in dependency order. */ Array<Stage> stages; /*! * \brief List of all stage groups. */ Array<Stage> groups; /*! \brief map of original operation to the stages */ Map<Operation, Stage> stage_map; /*! * \brief Internal stage map to map internal ops to stages. * This is created on demand and can be invalidated. */ std::unordered_map<const Object*, Stage> op2stage_cache_; void VisitAttrs(AttrVisitor* v) { v->Visit("outputs", &outputs); v->Visit("stages", &stages); v->Visit("groups", &groups); v->Visit("stage_map", &stage_map); } /*! \brief Initialize temp cache. */ void InitCache(); /*! \brief Invalidate temp cache. */ void InvalidateCache(); /*! * \brief Check if the schedule contains an Operation. * \param op The candidate Operation. * \return true if the schedule has the Operation. Otherwise, false. */ TVM_DLL bool Contain(const Operation& op) const; /*! * \brief Check if the schedule contains a Tensor. * \param tensor The candidate tensor. * \return true if the schedule has the tensor. Otherwise, false. */ TVM_DLL bool Contain(const Tensor& tensor) const { return Contain(tensor->op); } static constexpr const char* _type_key = "Schedule"; TVM_DECLARE_FINAL_OBJECT_INFO(ScheduleNode, Object); }; /*! * \brief Create a schedule for array of ops(and their dependencies). * \param ops The ops to be scheduled. * \return sch The created Schedule. */ inline Schedule create_schedule(Array<Operation> ops) { return Schedule(ops); } /*! \brief node container for IterVar attr */ class IterVarAttrNode : public Object { public: /*! \brief The iteration type. */ IterVarType iter_type{kDataPar}; /*! \brief The thread this iter Var binds, can be null */ IterVar bind_thread; /*! \brief List of tensor to be prefetched in this loop */ Array<Tensor> prefetch_data; /*! \brief The offset used in each prefetch */ Array<PrimExpr> prefetch_offset; /*! * \brief Tensor intrinsic used in tensorization, * when the axis is marked as Tensorized */ TensorIntrin tensor_intrin; /*! \brief Alignment factor of buffer dimension */ int dim_align_factor{0}; /*! \brief Alignment offset of buffer dimension */ int dim_align_offset{0}; /*! * \brief Additional pragma keys, array of StringImm */ Array<PrimExpr> pragma_keys; /*! * \brief Additional values of pragma, if any */ Array<PrimExpr> pragma_values; void VisitAttrs(AttrVisitor* v) { v->Visit("iter_type", &iter_type); v->Visit("bind_thread", &bind_thread); v->Visit("prefetch_data", &prefetch_data); v->Visit("prefetch_offset", &prefetch_offset); v->Visit("tensor_intrin", &tensor_intrin); v->Visit("dim_align_factor", &dim_align_factor); v->Visit("dim_align_offset", &dim_align_offset); v->Visit("pragma_keys", &pragma_keys); v->Visit("pragma_values", &pragma_values); } static constexpr const char* _type_key = "IterVarAttr"; TVM_DECLARE_FINAL_OBJECT_INFO(IterVarAttrNode, Object); }; /*! \brief base node of iteration var */ class IterVarRelationNode : public Object { public: static constexpr const char* _type_key = "IterVarRelation"; TVM_DECLARE_BASE_OBJECT_INFO(IterVarRelationNode, Object); }; /*! * \brief Split the parent domain into product of * outer and iter. */ class SplitNode : public IterVarRelationNode { public: /*! \brief The parent domain */ IterVar parent; /*! \brief The outer domain */ IterVar outer; /*! \brief The inner domain */ IterVar inner; /*! \brief The split factor */ PrimExpr factor; /*! \brief Number of parts, only factor or nparts can be given */ PrimExpr nparts; void VisitAttrs(AttrVisitor* v) { v->Visit("parent", &parent); v->Visit("outer", &outer); v->Visit("inner", &inner); v->Visit("factor", &factor); v->Visit("nparts", &nparts); } static constexpr const char* _type_key = "Split"; TVM_DECLARE_FINAL_OBJECT_INFO(SplitNode, IterVarRelationNode); }; /*! * \brief Managed reference to SplitNode * \sa SplitNode */ class Split : public IterVarRelation { public: TVM_DLL Split(IterVar parent, IterVar outer, IterVar inner, PrimExpr factor, PrimExpr nparts); TVM_DEFINE_OBJECT_REF_METHODS(Split, IterVarRelation, SplitNode); }; /*! * \brief Fuse two domains into one domain. */ class FuseNode : public IterVarRelationNode { public: /*! \brief The outer domain */ IterVar outer; /*! \brief The inner domain */ IterVar inner; /*! \brief The target domain */ IterVar fused; void VisitAttrs(AttrVisitor* v) { v->Visit("outer", &outer); v->Visit("inner", &inner); v->Visit("fused", &fused); } static constexpr const char* _type_key = "Fuse"; TVM_DECLARE_FINAL_OBJECT_INFO(FuseNode, IterVarRelationNode); }; /*! * \brief Managed reference to FuseNode * \sa FuseNode */ class Fuse : public IterVarRelation { public: TVM_DLL Fuse(IterVar outer, IterVar inner, IterVar fused); TVM_DEFINE_OBJECT_REF_METHODS(Fuse, IterVarRelation, FuseNode); }; /*! * \brief Rebase the iteration to make min to be 0. * This is useful to normalize the Schedule * to make every leaf variable's min to be 0. */ class RebaseNode : public IterVarRelationNode { public: /*! \brief The parent domain */ IterVar parent; /*! \brief The inner domain */ IterVar rebased; void VisitAttrs(AttrVisitor* v) { v->Visit("parent", &parent); v->Visit("rebased", &rebased); } static constexpr const char* _type_key = "Rebase"; TVM_DECLARE_FINAL_OBJECT_INFO(RebaseNode, IterVarRelationNode); }; /*! * \brief Managed reference to RebaseNode * \sa RebaseNode */ class Rebase : public IterVarRelation { public: TVM_DLL Rebase(IterVar parent, IterVar rebased); TVM_DEFINE_OBJECT_REF_METHODS(Rebase, IterVarRelation, RebaseNode); }; /*! * \brief Singleton iterator [0, 1) */ class SingletonNode : public IterVarRelationNode { public: /*! \brief The singleton iterator */ IterVar iter; void VisitAttrs(AttrVisitor* v) { v->Visit("iter", &iter); } static constexpr const char* _type_key = "Singleton"; TVM_DECLARE_FINAL_OBJECT_INFO(SingletonNode, IterVarRelationNode); }; /*! * \brief Managed reference to SingletonNode * \sa SingletonNode */ class Singleton : public IterVarRelation { public: TVM_DLL explicit Singleton(IterVar iter); TVM_DEFINE_OBJECT_REF_METHODS(Singleton, IterVarRelation, SingletonNode); }; /*! * \brief Transform iterator according to some arbitrary expression. */ class TransformNode : public IterVarRelationNode { public: /*! \brief The loop variables that were replaced by the transformation. * * Prior to applying a layout transformation, these represent the * loops to iterate over a tensor as it is being computed, following * a row-major traversal of the tensor's original shape in the * compute definition. */ Array<IterVar> original_variables; /*! \brief The variables generated by the transformation. * * After to applying a layout transformation, these represent the * loops to iterate over a tensor as it is being computed, following * a row-major traversal of the transformed shape of the tensor. */ Array<IterVar> transformed_variables; /*! \brief Map from the original variables to the transformed variables. * * Used to determine iterator ranges over the transformed variables. */ IndexMap forward_transformation; /*! \brief Map from transformed variables to the original variables * * Used to rewrite expressions containing the original loop iterators * in terms of the transformed loop iterators. */ IndexMap inverse_transformation; void VisitAttrs(AttrVisitor* v) { v->Visit("original_variables", &original_variables); v->Visit("transformed_variables", &transformed_variables); v->Visit("forward_transformation", &forward_transformation); v->Visit("inverse_transformation", &inverse_transformation); } static constexpr const char* _type_key = "Transform"; TVM_DECLARE_FINAL_OBJECT_INFO(TransformNode, IterVarRelationNode); }; class Transform : public IterVarRelation { public: TVM_DLL explicit Transform(Array<IterVar> original_variables, Array<IterVar> transformed_variables, IndexMap forward_transformation, IndexMap inverse_transformation); TVM_DEFINE_OBJECT_REF_METHODS(Transform, IterVarRelation, TransformNode); }; /*! \brief Container for specialization conditions. */ class SpecializedConditionNode : public Object { public: /*! * \brief List of conditions in conjunctive joint form (CNF). * Each condition should be a simple expression, e.g., n > 16, m % 8 == 0, etc., * where n, m are tvm::Var that represents a dimension in the tensor shape. */ Array<PrimExpr> clauses; void VisitAttrs(AttrVisitor* v) { v->Visit("clauses", &clauses); } static constexpr const char* _type_key = "SpecializedCondition"; TVM_DECLARE_FINAL_OBJECT_INFO(SpecializedConditionNode, Object); }; /*! * \brief Specialized condition to enable op specialization */ class SpecializedCondition : public ObjectRef { public: /*! * \brief construct from conditions * \param conditions The clauses in the specialized condition. */ TVM_DLL SpecializedCondition(Array<PrimExpr> conditions); // NOLINT(*) /*! * \brief Get the current specialized condition. * \return the current specialized condition. */ TVM_DLL static SpecializedCondition Current(); TVM_DEFINE_OBJECT_REF_METHODS(SpecializedCondition, ObjectRef, SpecializedConditionNode); class Internal; private: // enable with syntax. friend class Internal; friend class With<SpecializedCondition>; /*! \brief Push a new specialized condition onto the thread local stack. */ TVM_DLL void EnterWithScope(); /*! \brief Pop a specialized condition off the thread local context stack. */ TVM_DLL void ExitWithScope(); }; // implementations inline const StageNode* Stage::operator->() const { return static_cast<const StageNode*>(get()); } inline StageNode* Stage::operator->() { return static_cast<StageNode*>(get_mutable()); } inline const ScheduleNode* Schedule::operator->() const { return static_cast<const ScheduleNode*>(get()); } inline ScheduleNode* Schedule::operator->() { return static_cast<ScheduleNode*>(get_mutable()); } inline const IterVarRelationNode* IterVarRelation::operator->() const { return static_cast<const IterVarRelationNode*>(get()); } inline const IterVarAttrNode* IterVarAttr::operator->() const { return static_cast<const IterVarAttrNode*>(get()); } } // namespace te } // namespace tvm #endif // TVM_TE_SCHEDULE_H_
https://github.com/zk-ml/tachikoma
include/tvm/te/schedule_pass.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/te/schedule_pass.h * \brief Collection of Schedule pass functions. * * These passes works on the schedule hyper-graph * and infers information such as bounds, check conditions * read/write dependencies between the IterVar */ #ifndef TVM_TE_SCHEDULE_PASS_H_ #define TVM_TE_SCHEDULE_PASS_H_ #include <tvm/te/schedule.h> #include <tvm/tir/function.h> namespace tvm { namespace te { /*! * \brief To automatically inline the element-wise operations. * * \param sch The schedule to be inlined. */ void AutoInlineElemWise(Schedule sch); /*! * \brief To automatically inline the broadcast operations. * * \param sch The schedule to be inlined. */ void AutoInlineBroarcast(Schedule sch); /*! * \brief To automatically inline operations with injective writes * (i.e. writes without reduction or sequential loops). Note * that in this case, guarantees about contiguity, transpose, stride, * alignemnt and memory footprint in general do not hold. * * \param sch The schedule to be inlined. */ TVM_DLL void AutoInlineInjective(Schedule sch); /*! * \brief Infer the bound of all iteration variables relates to the schedule. * * \param sch The root schedule to infer all the bounds. * \return the result bound of the iteration Variable */ Map<IterVar, Range> InferBound(const Schedule& sch); /*! * \brief Verify if there is any argument bound to compact buffer. * * \param stmt The stmt to be verified. * \return true if there is any buffer_bind_scope attribute found, * otherwise, false. */ bool VerifyCompactBuffer(const Stmt& stmt); /*! * \brief Schedule s' dependent operations. * * \param s The schedule to be realized * \param dom_map The domain of each iter vars. * \param debug_keep_trivial_loop Whether keep trivial loops with extent of 1 during lowering. * This is a debug feature for dataflow/axis analysis. * Note: If this is true, The lowered IR may be incorrect, * because we will also delete the init part of reduction * \return the result Stmt */ Stmt ScheduleOps(Schedule s, Map<IterVar, Range> dom_map, bool debug_keep_trivial_loop); /*! * \brief Postprocessing the Stmt generated by ScheduleOps to create * a PrimFunc that can then be used for further TIR optimizations. * * Perform this translation before running any TIR optimizations. * * List of actions taken by the function: * - Remove occurrences of te::Tensor, te::Operation in the IR * and replace them by corresponding IR nodes via tir::Buffer. * - Add annotation of extern buffers using the buffer_map field * in the PrimFunc type. * * \param arg_list Array of Tensor/Var/Buffer arguments to the function. * \param body The body of the function. * \param bindings potential Tensor to Buffer bindings for the Tensors in the body. */ PrimFunc SchedulePostProcToPrimFunc(Array<ObjectRef> arg_list, Stmt body, Optional<Map<Tensor, Buffer>> bindings); } // namespace te } // namespace tvm #endif // TVM_TE_SCHEDULE_PASS_H_
https://github.com/zk-ml/tachikoma
include/tvm/te/tensor.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/te/tensor.h * \brief Dataflow tensor object */ #ifndef TVM_TE_TENSOR_H_ #define TVM_TE_TENSOR_H_ #include <tvm/arith/bound.h> #include <tvm/tir/expr.h> #include <tvm/tir/op.h> #include <string> #include <type_traits> #include <utility> #include <vector> namespace tvm { namespace te { using arith::IntSet; using namespace tvm::tir; // internal node container for Operation class OperationNode; class Tensor; /*! \brief Operation that produces tensors */ class Operation : public ObjectRef { public: /*! \brief default constructor */ Operation() {} explicit Operation(ObjectPtr<Object> n) : ObjectRef(n) {} /*! * \brief access the internal node container * \return the pointer to the internal node container */ inline const OperationNode* operator->() const; /*! * \brief get the i-th output of the operation. * \param i the output index. * \return The i-th output. */ TVM_DLL Tensor output(size_t i) const; /*! \brief specify container node */ using ContainerType = OperationNode; }; /*! \brief Node to represent a tensor */ class TensorNode : public DataProducerNode { public: /*! \brief The shape of the tensor */ Array<PrimExpr> shape; /*! \brief data type in the content of the tensor */ DataType dtype; /*! \brief the source operation, can be None */ Operation op; /*! \brief the output index from source operation */ int value_index{0}; /*! \brief constructor */ TensorNode() {} void VisitAttrs(AttrVisitor* v) { v->Visit("shape", &shape); v->Visit("dtype", &dtype); v->Visit("op", &op); v->Visit("value_index", &value_index); } Array<PrimExpr> GetShape() const final { return shape; } DataType GetDataType() const final { return dtype; } TVM_DLL String GetNameHint() const final; static constexpr const char* _type_key = "Tensor"; TVM_DECLARE_FINAL_OBJECT_INFO(TensorNode, DataProducerNode); }; /*! * \brief Tensor structure representing a possible input, * or intermediate computation result. */ class Tensor : public DataProducer { private: /*! * \brief Helper for indexing operations into tensors * \param indices The indices * \param support_negative_indices Whether to normalize indices in the case of negative indices. * \return the result expression representing tensor read. */ inline PrimExpr IndexTensor(Array<PrimExpr> indices, bool support_negative_indices) const; public: TVM_DLL Tensor(Array<PrimExpr> shape, DataType dtype, Operation op, int value_index); /*! * \brief check if two tensors equals each other. * \param other tensor to be checked. * \return whether the two tensors equals each other. */ inline bool operator==(const Tensor& other) const; /*! * \brief check if two tensors are different. * \param other tensor to be checked. * \return whether the two tensors are different. */ inline bool operator!=(const Tensor& other) const; /*! \return The dimension of the tensor */ inline size_t ndim() const; /*! * \brief Take elements from the tensor * \param args The indices * \return the result expression representing tensor read. */ template <typename... Args> inline PrimExpr operator()(Args&&... args) const { Array<PrimExpr> indices{std::forward<Args>(args)...}; return operator()(indices); } /*! * \brief Take elements from the tensor * \param indices the indices. * \return the result expression representing tensor read. */ TVM_DLL PrimExpr operator()(Array<PrimExpr> indices) const; /*! * \brief Take elements from the tensor * \param indices the indices. * \return the result expression representing tensor read. */ TVM_DLL PrimExpr operator()(Array<Var> indices) const; /*! * \brief Take elements from the tensor with support for negative indices. * \param args The indices * \return the result expression representing tensor read. */ template <typename... Args> TVM_DLL PrimExpr IndexWithNegativeIndices(Args&&... args) const { Array<PrimExpr> indices{std::forward<Args>(args)...}; return IndexWithNegativeIndices(indices); } /*! * \brief Take elements from the tensor with support for negative indices. * \param indices the indices. * \return the result expression representing tensor read. */ TVM_DLL PrimExpr IndexWithNegativeIndices(Array<PrimExpr> indices) const; /*! * \brief Take elements from the tensor with support for negative indices. * \param indices the indices. * \return the result expression representing tensor read. */ TVM_DLL PrimExpr IndexWithNegativeIndices(Array<Var> indices) const; /*! * \brief data structure to represent a slice that fixes first k coordinates. * This is used to enable syntax sugar of Tensor[x][y][z] to get the element. */ class Slice { public: // construct via tensor and indices Slice(const Tensor& tensor, std::vector<PrimExpr> indices) : tensor_(tensor), indices_(indices) {} /*! * \brief get i-th slice from the current slice. * \param i the index of the coordinate * \return the subsequent slice. */ inline Slice operator[](PrimExpr i) { std::vector<PrimExpr> other = indices_; other.emplace_back(i); return Slice(tensor_, other); } /*! * \brief Convert slice to expression. * This is only valid when all the coordinates are fully specified. * \return the corresponding expression of this slice. */ inline operator PrimExpr() const { return tensor_(indices_); } private: const Tensor& tensor_; std::vector<PrimExpr> indices_; }; /*! * \brief get i-th slice from the current Tensor. * \param i the index of the coordinate * \return the subsequent slice. */ inline Slice operator[](PrimExpr i) const { return Slice(*this, {i}); } TVM_DEFINE_OBJECT_REF_METHODS(Tensor, DataProducer, TensorNode); }; // Implementations of inline functions inline size_t Tensor::ndim() const { return (*this)->shape.size(); } inline bool Tensor::operator==(const Tensor& other) const { if (get() == other.get()) return true; if (get() == nullptr || other.get() == nullptr) return false; if ((*this)->op.defined() || other->op.defined()) { return (*this)->op == other->op && (*this)->value_index == other->value_index; } else { return false; } } inline bool Tensor::operator!=(const Tensor& other) const { return !(*this == other); } // macro to turn every operation of slice to expression #define DEFINE_OVERLOAD_SLICE_UNARY_OP(Op) \ inline PrimExpr operator Op(const Tensor::Slice& a) { return Op a.operator PrimExpr(); } #define DEFINE_OVERLOAD_SLICE_BINARY_OP(Op) \ template <typename T> \ inline PrimExpr operator Op(const Tensor::Slice& a, const T& b) { \ return a.operator PrimExpr() Op b; \ } \ template <typename T> \ inline PrimExpr operator Op(const T& a, const Tensor::Slice& b) { \ return a Op b.operator PrimExpr(); \ } \ inline PrimExpr operator Op(const Tensor::Slice& a, const Tensor::Slice& b) { \ return a.operator PrimExpr() Op b.operator PrimExpr(); \ } DEFINE_OVERLOAD_SLICE_UNARY_OP(!); DEFINE_OVERLOAD_SLICE_UNARY_OP(-); DEFINE_OVERLOAD_SLICE_BINARY_OP(+); DEFINE_OVERLOAD_SLICE_BINARY_OP(-); DEFINE_OVERLOAD_SLICE_BINARY_OP(*); DEFINE_OVERLOAD_SLICE_BINARY_OP(==); DEFINE_OVERLOAD_SLICE_BINARY_OP(<=); DEFINE_OVERLOAD_SLICE_BINARY_OP(>=); DEFINE_OVERLOAD_SLICE_BINARY_OP(!=); DEFINE_OVERLOAD_SLICE_BINARY_OP(&&); DEFINE_OVERLOAD_SLICE_BINARY_OP(||); DEFINE_OVERLOAD_SLICE_BINARY_OP(>>); DEFINE_OVERLOAD_SLICE_BINARY_OP(<<); DEFINE_OVERLOAD_SLICE_BINARY_OP(>); // NOLINT(*) DEFINE_OVERLOAD_SLICE_BINARY_OP(<); // NOLINT(*) } // namespace te } // namespace tvm namespace std { template <> struct hash<::tvm::te::Operation> : public ::tvm::ObjectPtrHash {}; template <> struct hash<::tvm::te::Tensor> { std::size_t operator()(const ::tvm::te::Tensor& k) const { ::tvm::ObjectPtrHash hasher; if (k.defined() && k->op.defined()) { return hasher(k->op); } else { return hasher(k); } } }; } // namespace std #endif // TVM_TE_TENSOR_H_
https://github.com/zk-ml/tachikoma
include/tvm/te/tensor_intrin.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/te/tensor_intrin.h * \brief Tensor intrinsic operations. */ #ifndef TVM_TE_TENSOR_INTRIN_H_ #define TVM_TE_TENSOR_INTRIN_H_ #include <tvm/te/tensor.h> #include <tvm/tir/buffer.h> #include <string> namespace tvm { namespace te { /*! \brief Node to represent a Tensor intrinsic operator */ class TensorIntrinNode : public Object { public: /*! \brief The name of the intrinsic */ std::string name; /*! \brief The operation this intrinsics is carrying out */ Operation op; /*! \brief List of inputs of operator, placeholder in postdfs order */ Array<Tensor> inputs; /*! * \brief Symbolic buffers of each output/input tensor * buffers[0:len(inputs)] are buffers of the inputs. * buffers[len(inputs):] are buffers of each output. * * \note When a field in Buffer is Var, it means we can be flexible * wrt that field and Var can occur in body. * When it is a constant, it means we can only take data in that shape. */ Array<Buffer> buffers; /*! \brief List of scalar variables, used in body. These placeholders * will be bound to expressions passed in when the TensorIntrin is called * from a TensorComputeOp. */ Array<Var> scalar_params; /*! \brief The normal statement to execute the intrinsic */ Stmt body; /*! * \brief Special statement for reduction op, can be None * reset the value of output buffer to identity value. */ Stmt reduce_init; /*! * \brief Special statement for reduction op, can be None * Reduce: do a reduction of current output buffer with the result. */ Stmt reduce_update; /*! \brief constructor */ TensorIntrinNode() {} void VisitAttrs(AttrVisitor* v) { v->Visit("name", &name); v->Visit("op", &op); v->Visit("inputs", &inputs); v->Visit("buffers", &buffers); v->Visit("scalar_params", &scalar_params); v->Visit("body", &body); v->Visit("reduce_init", &reduce_init); v->Visit("reduce_update", &reduce_update); } static constexpr const char* _type_key = "TensorIntrin"; TVM_DECLARE_FINAL_OBJECT_INFO(TensorIntrinNode, Object); }; /*! * \brief Managed reference to TensorIntrinNode * \sa TensorIntrinNode */ class TensorIntrin : public ObjectRef { public: TVM_DLL TensorIntrin(std::string name, Operation op, Array<Tensor> inputs, Array<Buffer> buffers, Array<Var> scalar_params, Stmt body, Stmt reduce_init, Stmt reduce_update); TVM_DEFINE_OBJECT_REF_METHODS(TensorIntrin, ObjectRef, TensorIntrinNode); }; class TensorIntrinCallNode : public Object { public: /*! \brief the tensor intrinsic */ TensorIntrin intrin; /*! \brief input tensors of the intrinsic */ Array<Tensor> tensors; /*! \brief regions of input tensors */ Array<Region> regions; /*! * \brief IterVar on each reduction axis, if the * intrin will use the reduce axis */ Array<IterVar> reduce_axis; /*! \brief scalar expression inputs */ Array<PrimExpr> scalar_inputs; void VisitAttrs(AttrVisitor* v) { v->Visit("intrin", &intrin); v->Visit("tensors", &tensors); v->Visit("regions", &regions); v->Visit("reduce_axis", &reduce_axis); v->Visit("scalar_inputs", &scalar_inputs); } static constexpr const char* _type_key = "TensorIntrinCall"; TVM_DECLARE_FINAL_OBJECT_INFO(TensorIntrinCallNode, Object); }; /*! * \brief Managed reference to TensorIntrinCallNode * \sa TensorIntrinCallNode */ class TensorIntrinCall : public ObjectRef { public: TVM_DLL TensorIntrinCall(TensorIntrin intrin, Array<Tensor> tensors, Array<Region> regions, Array<IterVar> reduce_axis, Array<PrimExpr> scalar_inputs); TVM_DEFINE_OBJECT_REF_METHODS(TensorIntrinCall, ObjectRef, TensorIntrinCallNode); }; } // namespace te } // namespace tvm #endif // TVM_TE_TENSOR_INTRIN_H_
https://github.com/zk-ml/tachikoma
include/tvm/tir/analysis.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/tir/analysis.h * \brief Analysis utilities and passes for TIR. */ #ifndef TVM_TIR_ANALYSIS_H_ #define TVM_TIR_ANALYSIS_H_ #include <tvm/ir/module.h> #include <tvm/ir/transform.h> #include <tvm/tir/expr.h> #include <tvm/tir/function.h> #include <tvm/tir/op_attr_types.h> #include <tvm/tir/stmt.h> #include <string> namespace tvm { namespace tir { /*! * \brief Compare two expressions recursively and check if they are equal * to each other without var remapping. * * This function does not remap variable bindings, it will not * return true for (let x = 1 in x + 1) vs (let y = 1 in y + 1), unless x.same_as(y). * * Use StructuralEqual for such cases. * * Due to the restriction of not remapping variables, this function can run * faster than StructuralEqual and can be used as a utility function during arithmetic * simplifications. * * \sa StructuralEqual */ struct ExprDeepEqual { public: TVM_DLL bool operator()(const PrimExpr& lhs, const PrimExpr& rhs) const; }; /*! * \brief Visit the PrimFuncs in the IRModule * \tparam FLambda The type of the PrimFunc visitor * \param mod The IRModule to be visited * \param fvisit The visitor to the PrimFuncs in the IRModule */ template <class FLambda> inline void VisitPrimFuncs(const IRModule& mod, FLambda fvisit) { for (const auto& kv : mod->functions) { const BaseFunc& base_func = kv.second; if (const auto* prim_func = base_func.as<PrimFuncNode>()) { fvisit(prim_func); } } } /*! * \brief Estimate the FLOPs of a TIR fragment. * \param stmt The TIR fragment to be estimated. * \return The estimated FLOPs. */ TVM_DLL double EstimateTIRFlops(const Stmt& stmt); /*! * \brief Estimate the FLOPs of TIRs in an IRModule. * \param mod The IRModule to be estimated. * \return The estimated FLOPs. */ TVM_DLL double EstimateTIRFlops(const IRModule& mod); /*! * \brief Find undefined vars in the statement. * \param stmt The function to be checked. * \param defs The vars that is defined. * \return Array of undefined vars. */ TVM_DLL Array<Var> UndefinedVars(const Stmt& stmt, const Array<Var>& defs); /*! * \brief Find undefined vars in the expression. * \param expr The expression to be checked. * \return Array of undefined vars. */ TVM_DLL Array<Var> UndefinedVars(const PrimExpr& expr); /*! * \brief Analyze the side effect * \param expr The expression to be checked. * * \return CallEffectKind, can be kPure, kReadState or kUpdateState */ TVM_DLL CallEffectKind SideEffect(const PrimExpr& expr); /*! * \brief Whether the given Stmt uses any var in the given variable set. * \param stmt The Stmt to be checked. * \param vset_contains The check function to see if a var is in the variable set. * \return Whether `stmt` uses any var in the given variable set. */ TVM_DLL bool UsesVar(const Stmt& stmt, std::function<bool(const VarNode*)> vset_contains); /*! * \brief Whether the given PrimExpr uses any var in the given variable set. * \param expr The PrimExpr to be checked. * \param vset_contains The check function to see if var is in the variable set. * \return Whether `expr` uses any var in the given variable set. */ TVM_DLL bool UsesVar(const PrimExpr& expr, std::function<bool(const VarNode*)> vset_contains); /*! * \brief Verifies whether the IR stmt or Expr is in SSA form. * That is: each Var is defined and assigned once(in Let/For) * * \param func The function to be verified. * \return Whether IR is in SSA form. * * \note All passes in TIR consume and produce SSA form. */ TVM_DLL bool VerifySSA(const PrimFunc& func); /*! * \brief Verify if memory accesses are legal for a specific target device type. * * In the case that tgt is cuda, if not all workload is bound with * threads, CPU code is generated that tries to access GPU memory, * which is illegal. This pass performs verification for this case. * * \param func The function to be verified. * \return Success of memory verification. */ TVM_DLL bool VerifyMemory(const PrimFunc& func); /*! * \brief Verify the correctness of a GPU code * It will check the whether the amount of memory usage or the number of threads * in a block exceeds the limit * \param func The function to be checked * \param constraints The dict to specify constraints to check. * Possible keys are * * "max_local_memory_per_block": Total amount of local memory per block (in bytes). * "max_shared_memory_per_block": Total amount of shared memory per block (in bytes). * "max_threads_per_block": Maximum number of threads per block. * "max_thread_x": Maximum length of threadIdx.x. * "max_thread_y": Maximum length of threadIdx.y. * "max_thread_z": Maximum length of threadIdx.z. * * If one key is missing in this argument, the pass won't check for that item. * \return valid Whether it is a valid GPU code * */ TVM_DLL bool VerifyGPUCode(const PrimFunc& func, Map<String, PrimExpr> constraints); /*! * \brief Auto detect the block access region according to its body stmt * It will detect the access region as an array in order of appearance in AST * \param block The block to be detected * \param buffer_var_map The outside buffers which may be accessed the block. * It is a map from buffer var to the buffer. * \return Array of access regions. * There are three arrays of BufferRegion: * - first: read regions * - second: write regions * - third: opaque regions */ TVM_DLL Array<Array<BufferRegion>> GetBlockAccessRegion(const Block& block, const Map<Var, Buffer>& buffer_var_map); /*! * \brief Auto detect the block read/write region according to its body stmt. An opaque access will * be counted as both a read and a write access * \param block The block to be detected * \param buffer_var_map The outside buffers which may be accessed the block. * It is a map from buffer var to the buffer * \return An array only consisting of the read regions and write regions of the input block */ TVM_DLL Array<Array<BufferRegion>> GetBlockReadWriteRegion(const Block& block, const Map<Var, Buffer>& buffer_var_map); /*! * \brief Calculate the expresion complexity based on number of symbols it contains. * \param expr The expr to be calculated. */ TVM_DLL size_t CalculateExprComplexity(const PrimExpr& expr); /*! * \brief Calculate the constants size in bytes needed by the TIR allocates inside the TIR PrimFunc * \param func The TIR PrimFunc for which the constants size to be calculated * \param constant_byte_alignment The byte alignment required for each constant allocated */ TVM_DLL size_t CalculateConstantBytes(const PrimFunc& func, const Integer& constant_byte_alignment); /*! * \brief Calculate the workspace size in bytes needed by the TIR allocates inside the TIR PrimFunc * \param func The TIR PrimFunc for which the workspace size to be calculated * \param workspace_byte_alignment The byte alignment required for each tensor allocated in this * workspace */ TVM_DLL size_t CalculateWorkspaceBytes(const PrimFunc& func, const Integer& workspace_byte_alignment); /*! * \brief Detect the lowest common ancestor(LCA) of buffer access, including both high-level * access(BufferLoad, BufferStore) and low-level access(Load, Store and opaque access). * The LCA may be a For loop or a Block. * \param func The PrimFunc to be detected. * \return The Map from buffer to the LCA of all access to it. The lca is function root if the * return stmt is NullOpt. */ TVM_DLL Map<Buffer, Optional<Stmt>> DetectBufferAccessLCA(const PrimFunc& func); /*! * \brief Verify if the given TIR is well-formed. The verification includes: * - Check if expressions not contain vars that is defined outside the block. * \param func The PrimFunc to be verified. * \param assert_mode The indicator if it raises an error when the function is not well-formed. * \return Whether it is a well-formed TIR function. */ TVM_DLL bool VerifyWellFormed(const PrimFunc& func, bool assert_mode = true); /*! * \brief Find the entry function of the given IRModule, i.e, functions marked by * `tir::attr::kIsEntryFunc`, whose name is `main` or being the only PrimeFunc. * \param mod The IRModule to find the entry function. * \param result_g_var The result GlobalVar of the entry function. * \return The entry function. */ const PrimFuncNode* FindEntryFunc(const IRModule& mod, GlobalVar* result_g_var); /*! * \brief Find the "anchor block" of the given module. * We define the anchor block to be the block with (1) an init statement and (2) having * the biggest flops count. The latter condition is only used when there are multiple blocks * with an init statement. * For example, if the input module is conv2d + fused spatial blocks, conv2d is the anchor block. * The input module may not contain more than one such block. For example, a module having * two conv2d is not allowed as an input. * However, a module created from winograd convolution has multiple blocks with an init statement * (input transform, batched GEMM, and output transform). We use the second condition, the flops * count, to determine that the batched GEMM block is the anchor block. * \param mod The input TIR module. * \return The anchor block if found, nullptr otherwise. */ const tir::BlockNode* FindAnchorBlock(const IRModule& mod); // Pass variants of verification analysis // directly throws RuntimeError when verification fails. namespace transform { using tvm::transform::Pass; using tvm::transform::PassContext; /*! * \brief Pass variant of VerifySSA. * * \returns The pass. * \sa tvm::tir::VerifySSA */ TVM_DLL Pass VerifySSA(); /*! * \brief Pass variant of VerifyMemory. * * \returns The pass. * \sa tvm::tir::VerifyMemory */ TVM_DLL Pass VerifyMemory(); /*! * \brief Pass variant of VerifyGPUCode. * * \param constraints The dict to specify constraints to check. * * \returns The pass. * \sa tvm::tir::VerifyGPUCode */ TVM_DLL Pass VerifyGPUCode(Map<String, PrimExpr> constraints); /*! * \brief Statically check TIR code for out of bounds array access. * * This analysis is conservative: it will only raise errors if it can prove * that out of bounds access occurs. Cases that are uncertain do not raise * errors. * * \returns The pass. */ TVM_DLL Pass OOBChecker(); } // namespace transform } // namespace tir } // namespace tvm #endif // TVM_TIR_ANALYSIS_H_
https://github.com/zk-ml/tachikoma
include/tvm/tir/buffer.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/tir/buffer.h * \brief Symbolic n-dimensional array, to represent a memory buffer. */ #ifndef TVM_TIR_BUFFER_H_ #define TVM_TIR_BUFFER_H_ #include <tvm/ir/expr.h> #include <tvm/runtime/container/array.h> #include <tvm/runtime/container/string.h> #include <tvm/tir/var.h> #include <string> namespace tvm { namespace tir { // forward declare Stmt class Stmt; /*! \brief buffer type */ enum BufferType : int { kDefault = 1, // Maps buffer[i][j][k] -> buffer[i][0][k] if dimension i's shape equals 1. kAutoBroadcast = 2, }; /*! \brief Node to represent a buffer */ class BufferNode : public Object { public: // Data fields. /*! * \brief The pointer to the head of the data * \sa data_alignment The alignment of data in bytes. */ Var data; /*! \brief data type in the content of the tensor */ DataType dtype; /*! \brief The type of the buffer prior to flattening * * This contains the shape as it is accessed by * BufferLoad/BufferStore nodes, and used by the low-level code * generators. */ Array<PrimExpr> shape; /*! * \brief Separators between input axes when generating flattened output axes * * For buffers representing flat 1-d memory (e.g. any buffer in * RAM), this should be an empty array. For buffers representing * non-flat memory, each entry in axis_separators should be the * first input axis that is part of a new flattened axis. */ Array<IntImm> axis_separators; /*! * \brief The strides of each dimension * This can be an empty array, indicating array is contiguous */ Array<PrimExpr> strides; /*! \brief The offset in terms of number of dtype elements (including lanes) */ PrimExpr elem_offset; // Meta data /*! \brief optional name of the buffer */ String name; /*! \brief Alignment requirement of data pointer in bytes. */ int data_alignment; /*! * \brief Factor of elem_offset field, * elem_offset is guaranteed to be multiple of offset_factor. */ int offset_factor; /*! \brief buffer type */ BufferType buffer_type; /*! * \brief Span that points to the original source code. * Reserved debug information. */ mutable Span span; /*! \brief constructor */ BufferNode() {} void VisitAttrs(AttrVisitor* v) { v->Visit("data", &data); v->Visit("dtype", &dtype); v->Visit("shape", &shape); v->Visit("strides", &strides); v->Visit("axis_separators", &axis_separators); v->Visit("elem_offset", &elem_offset); v->Visit("name", &name); v->Visit("data_alignment", &data_alignment); v->Visit("offset_factor", &offset_factor); v->Visit("buffer_type", &buffer_type); v->Visit("span", &span); } bool SEqualReduce(const BufferNode* other, SEqualReducer equal) const { // Use DefEqual as buffer can define variables in its semantics, // skip name as name is not important. return equal.DefEqual(data, other->data) && equal(dtype, other->dtype) && equal.DefEqual(shape, other->shape) && equal.DefEqual(strides, other->strides) && equal.DefEqual(axis_separators, other->axis_separators) && equal.DefEqual(elem_offset, other->elem_offset) && equal(data_alignment, other->data_alignment) && equal(buffer_type, other->buffer_type); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce.DefHash(data); hash_reduce(dtype); hash_reduce.DefHash(shape); hash_reduce.DefHash(strides); hash_reduce.DefHash(elem_offset); hash_reduce.DefHash(axis_separators); hash_reduce(data_alignment); hash_reduce(buffer_type); } /*! \return preferred index type for this buffer node */ DataType DefaultIndexType() const { return shape.size() != 0 ? shape[0].dtype() : DataType::Int(32); } /*! \brief Determine the offset in the buffer of the given index. * * Returns the buffer offset, in number of elements of type dtype, * without adjusting for number of lanes. (e.g. The number of * float16x4 elements in a buffer of type float16x4.) */ Array<PrimExpr> ElemOffset(Array<PrimExpr> index) const; static constexpr const char* _type_key = "tir.Buffer"; static constexpr const bool _type_has_method_sequal_reduce = true; static constexpr const bool _type_has_method_shash_reduce = true; TVM_DECLARE_FINAL_OBJECT_INFO(BufferNode, Object); }; /*! * \brief Buffer is a symbolic n-darray structure. * It is a composition of primitive symbolic types, * used to specify the memory layout of the Tensor used in program input. */ class Buffer : public ObjectRef { public: // User can specify data_alignment and offset_factor to be 0 // A default value will be picked. TVM_DLL Buffer(Var data, DataType dtype, Array<PrimExpr> shape, Array<PrimExpr> strides, PrimExpr elem_offset, String name, int data_alignment, int offset_factor, BufferType buffer_type, Array<IntImm> axis_separators = {}, Span span = Span()); /*! * \brief Return a new buffer that is equivalent with current one * but always add stride field. * \return The strided version of the buffer. */ TVM_DLL Buffer MakeStrideView() const; /*! * \brief Make a new symbolic buffer representing a slice of the buffer. * \param begins The beginning position of each dimension. * \param extents The extent of each dimension. * \note This function will make target buffer as compact as possible. * If stride is not needed in the slice, it won't be presented * \return the result buffer. */ TVM_DLL Buffer MakeSlice(Array<PrimExpr> begins, Array<PrimExpr> extents) const; /*! * \brief Get access ptr to the entire buffer. * \param access_mask The access mask * \param ptr_type The type of the pointer. * \param content_lanes The number of lanes for the (data) type. * \param offset The offset of ptr. * \param input_extent The extent of ptr. */ TVM_DLL PrimExpr access_ptr(int access_mask, DataType ptr_type = DataType::Handle(), int content_lanes = 1, PrimExpr offset = IntImm(DataType::Int(32), 0), Optional<PrimExpr> input_extent = NullOpt) const; /*! * \brief Create an Expr that does a vector load at begin index. * \param begin The beginning index * \param dtype The data type to be loaded. */ TVM_DLL PrimExpr vload(Array<PrimExpr> begin, DataType dtype) const; /*! * \brief Create a Stmt that does a vector store at begin index. * \param begin The beginning index * \param value The value to be stored. */ TVM_DLL Stmt vstore(Array<PrimExpr> begin, PrimExpr value) const; /*! * \brief Get a flattened version of the buffer */ Buffer GetFlattenedBuffer() const; /*! \brief Determine the offset in the buffer of the given index. * * Returns the buffer offset, in number of elements of type dtype, * without adjusting for number of lanes. (e.g. The number of * float16x4 elements in a buffer of type float16x4.) */ Array<PrimExpr> OffsetOf(Array<PrimExpr> index) const; /*! * \brief Return the storage scope associated with this buffer. */ TVM_DLL String scope() const; TVM_DEFINE_OBJECT_REF_METHODS(Buffer, ObjectRef, BufferNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(BufferNode); }; /*! * \brief Construct a new buffer given shape, and dtype. * \param shape The shape of the buffer, * \param dtype The content data type. * \param name The name of the buffer * \param storage_scope The storage scope associated with this buffer * \param axis_separators Divisions defining the groups of axes that will be flattened together. * \param span The location of this object in the source code. * \return The created buffer. * \sa Buffer for complete constructor. */ TVM_DLL Buffer decl_buffer(Array<PrimExpr> shape, DataType dtype = DataType::Float(32), String name = "buffer", String storage_scope = "", Array<IntImm> axis_separators = {}, Span span = Span()); /*! * \brief Base node for data producers. * * A DataProducer stores necessary information(e.g. a tensor expression) to produce * a multi-dimensional array. The stored information is opaque to the TIR. * DataProducer can appear in high-level DSLs that are built on top of the TIR. * * A valid TIR PrimFunc should not contain any DataProducer, high level DSLs should lower * all DataProducers to Buffers before TIR transformations. * * \sa tvm::te::Tensor */ class DataProducerNode : public Object { public: /*! \brief destructor. */ virtual ~DataProducerNode() {} /*! * \brief Get the shape of the result. * \return The shape. */ virtual Array<PrimExpr> GetShape() const = 0; /*! * \brief Get the data type of the result. * \return The data type. */ virtual DataType GetDataType() const = 0; /*! * \brief Get the name hint of the data producer. * \return The data type. */ virtual String GetNameHint() const = 0; bool SEqualReduce(const DataProducerNode* other, SEqualReducer equal) const { // because buffer producer is opaque, we just do pointer equality. return this == other; } void SHashReduce(SHashReducer hash_reduce) const {} static constexpr const char* _type_key = "tir.DataProducer"; static constexpr const bool _type_has_method_sequal_reduce = true; static constexpr const bool _type_has_method_shash_reduce = true; TVM_DECLARE_BASE_OBJECT_INFO(DataProducerNode, Object); }; /*! * \brief Managed reference to DataProducerNode. * \sa DataProducerNode */ class DataProducer : public ObjectRef { public: TVM_DEFINE_OBJECT_REF_METHODS(DataProducer, ObjectRef, DataProducerNode); }; /*! * \brief Creates TIR Buffer for provided parameters * \param shape shape of the buffer * \param dtype data type * \param name buffer name * \param data_alignment alignment requirement of data pointer in bytes * \param offset_factor Factor of elem_offset field, elem_offset is guaranteed to be * multiple of offset_factor User can specify data_alignment and offset_factor to be 0 * A default value will be picked. * \param compact If the statement has already bound to a compact buffer. * \param memory_scope memory scope of the buffer */ TVM_DLL tir::Buffer BufferWithOffsetAlignment(Array<PrimExpr> shape, DataType dtype, std::string name, int data_alignment, int offset_factor, bool compact, std::string memory_scope = ""); } // namespace tir } // namespace tvm #endif // TVM_TIR_BUFFER_H_
https://github.com/zk-ml/tachikoma
include/tvm/tir/builtin.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/tir/builtin.h * \brief TIR builtin intrinsics. * * TIR builtin intrinsics are stored as tvm:Op. * They are processed in the same way as we process Ops. * * It is not necessary to create a function for every Op, * as we can obtain them through Op::Get. * * This file contains the most commonly used intrinsics or * those that have special semantics and need compiler support. */ #ifndef TVM_TIR_BUILTIN_H_ #define TVM_TIR_BUILTIN_H_ #include <tvm/ir/op.h> #include <tvm/tir/expr.h> namespace tvm { namespace tir { /*! \brief Collection of builtin intrinsics as ops */ namespace builtin { /*! * \brief Return value. */ TVM_DLL const Op& ret(); /*! * \brief Reinterpret the value using the target type. */ TVM_DLL const Op& reinterpret(); /*! * \brief Marks a condition is likely going to happen. */ TVM_DLL const Op& likely(); /*! * \brief Bitwise and operator. */ TVM_DLL const Op& bitwise_and(); /*! * \brief Bitwise or operator. */ TVM_DLL const Op& bitwise_or(); /*! * \brief Bitwise xor operator. */ TVM_DLL const Op& bitwise_xor(); /*! * \brief Bitwise not operator. */ TVM_DLL const Op& bitwise_not(); /*! * \brief Left shift */ TVM_DLL const Op& shift_left(); /*! * \brief Right shift */ TVM_DLL const Op& shift_right(); /*! * \brief See pesudo code * * Construct a big uint that may not be representable by int64 * * Expr large_uint_imm(uint32_t v0, uin32_t v1) { * return (v1 << 32) | v0; * } */ TVM_DLL const Op& large_uint_imm(); /*! * \brief Execute a multiplication between two Q-numbers x and y * followed by a right shift s * The default rounding rule is to the nearest value, rounding half up * (i.e., round(x.1) = x and round (x.5) = x+1) */ TVM_DLL const Op& q_multiply_shift(); /*! * \brief Returns the address of an element in the buffer (see pseudocode below). * * The number of indices should match the dimensionality of the buffer * being accessed. If this operation occurs after buffer flattening, * the number of indices must be supported by the target (i.e. N>1 * only on targets that support non-flat memory buffers). * * Handle address_of(BufferLoad *op) { * return &op->buffer_var[op->indices[0], op->indices[1], ..., op->indices[N-1]]; * } */ TVM_DLL const Op& address_of(); /*! * \brief Same as select, used for unsafe memory access. * * Type tvm_if_then_else(cond, a, b) { * return cond ? a : b; * } */ TVM_DLL const Op& if_then_else(); /*! * \brief See pesudo code * * bool isnullptr(void* handle) { * return handle == nullptr * } */ TVM_DLL const Op& isnullptr(); /*! * \brief Check if value is nan */ TVM_DLL const Op& isnan(); /*! * \brief Popcount */ TVM_DLL const Op& popcount(); /*! * \brief Fused multiply add * * Type fma(a, b, c) { * return a * b + c; * } */ TVM_DLL const Op& fma(); /*! * \brief Call an extern C function with given name * and signature from the types of args in the runtime environment. * * Type call_extern(name, args...) { * return dlsym(name)(args...); * } * * \note This intrinsic does not provide any type checking, * and is main used for backward compatibility reasons. * Always consider use pre-registered and typed tvm::Op first. */ TVM_DLL const Op& call_extern(); /*! * \brief Call an pure extern C function with given name * and signature from the types of args in the runtime environment. * * Type call_pure_extern(name, args...) { * return dlsym(name)(args...); * } * * \note This intrinsic does not provide any type checking, * and is main used for backward compatibility reasons. * Always consider use pre-registered and typed tvm::Op first. */ TVM_DLL const Op& call_pure_extern(); /*! * \brief Call an LLVM intrinsic with a given intrinsic id * and signature from the types of args in the runtime environment. * * Type call_llvm_pure_intrin(intrin_id, args...) { * return dlsym(name)(args...); * } * * \note This op does not provide any type checking. */ TVM_DLL const Op& call_llvm_intrin(); /*! * \brief Call an LLVM pure intrinsic with a given intrinsic id * and signature from the types of args in the runtime environment. * * Type call_llvm_pure_intrin(intrin_id, args...) { * return dlsym(name)(args...); * } * * \note This op does not provide any type checking. */ TVM_DLL const Op& call_llvm_pure_intrin(); /*! * \brief Call an SPIRV pure GLSL450 intrinsic. * * Type call_spirv_pure_glsl450(intrin_id, args...) { * return dlsym(name)(args...); * } * * \note This op does not provide any type checking. */ TVM_DLL const Op& call_spirv_pure_glsl450(); // TODO(tvm-team) revisit the builtins below // some of them can simply become ops with special codegen attr. /*! * \brief Prefetch a cacheline */ TVM_DLL const Op& prefetch(); /*! * \brief Get head access address with memory access pattern info. * * This operator also marks range of the memory access * The offset and extent are in unit of the DType(including vectorization factor). * rw_mask is a bit_mask setting whether the access is a read(1) or write(2). * The access is assume to happen in the current expression. * * PtrType tvm_access_ptr(Expr dtype, DType* data, * int offset, int extent, * int rw_mask) { * // DType == dtype.type(); * return &data[offset]; * } */ TVM_DLL const Op& tvm_access_ptr(); /*! * \brief Create a function local static handle that iniitalizes to nullptr. * can be used to cache function local static resources. */ TVM_DLL const Op& tvm_static_handle(); /*! * \brief Return a unique context id, used for hint of workspace separation. * Different context id ganrantees not having overlapping workspace. */ TVM_DLL const Op& tvm_context_id(); /*! * \brief tvm_tuple is not an actual function and cannot codegen. * It is used to represent tuple structure in value field of AttrStmt, * for the sake of giving hint to optimization. * * Handle tvm_tuple(value0, value1, ..., value_n); */ TVM_DLL const Op& tvm_tuple(); /*! * \brief See pesudo code * * Type tvm_struct_get(StructType* arr, int index, int field_id) { * return arr[index]->field; * } * \sa TVMStructFieldKind */ TVM_DLL const Op& tvm_struct_get(); /*! * \brief See pesudo code * * Handle tvm_struct_set(StructType* arr, int index, int field_id, value) { * arr[index]->field = value; * } * \sa TVMStructFieldKind */ TVM_DLL const Op& tvm_struct_set(); /*! * \brief See pseudo code * Type lookup_param(String param_name) { * return __tvm_param__param_name; * } */ TVM_DLL const Op& lookup_param(); /*! * \brief See pesudo code * * void tvm_throw_last_error() { * throw TVMGetLastError(); * } */ TVM_DLL const Op& tvm_throw_last_error(); /*! * \brief See pesudo code * * dtype in {shape, array, arg_value, arg_tcode} * * Handle tvm_stack_alloca(string dtype, int num) { * return new on stack dtype[num]; * } */ TVM_DLL const Op& tvm_stack_alloca(); /*! * \brief Allocate a shape tuple on stack, return the handle. * * Handle tvm_stack_make_shape(list args) { * ret = alloca stack int64_t[len(args)]; * for i in range(len(args)): * ret[i] = args[i] * return &ret[0]; * } */ TVM_DLL const Op& tvm_stack_make_shape(); /*! * \brief Allocate a NDArray(DLTensor) on stack, return the handle. * * Type tvm_stack_make_array(Expr data, * Expr shape, * Expr strides, * Expr ndim, * Expr dtype, * Expr elem_offset) { * ret = alloca stack DLTensor(); * ret->data = data; * ret->shape = shape; * ret->strides = strides != 0 ? strides : nullptr; * ret->ndim = ndim; * ret->dtype = dtype.type(); * ret->byte_offset = elem_offset * sizeof(dtype); * return ret; * } */ TVM_DLL const Op& tvm_stack_make_array(); /*! * \brief See pesudo code * * return_type tvm_call_packed(name, TVMValue* args) { * TVMValue ret_value; * int ret_code; * ModuleNode* env = GetCurrentEnv(); * const PackedFunc* f = env->GetFuncFromEnv(name); * (*f)(args, type_code_of(args), len(args), &ret_value, &ret_code); * // return type can be int, float, handle. * return cast(return_type, ret_value.v_return_type); * } */ TVM_DLL const Op& tvm_call_packed(); /*! * \brief See pesudo code * * return_type tvm_call_packed(fname, TVMValue* args) { * int ret_code; * TVMValue ret_value; * (*fname)(args, type_code_of(args), len(args), &ret_value, &ret_code); * return cast(return_type, ret_value.v_return_type); * } */ TVM_DLL const Op& tvm_call_cpacked(); /*! * \brief See pesudo code * * return_type tvm_call_trace_packed(name, TVMValue* args) { * ModuleNode* env = GetCurrentEnv(); * const PackedFunc* f = env->GetFuncFromEnv(name); * (*f)(args, type_code_of(args), len(args)); * // return type can be int, float, handle. * return cast(return_type, ret_value.v_return_type); * } */ TVM_DLL const Op& tvm_call_trace_packed(); /*! * \brief Checks the return value of another call is correct or returns a given value. * * \note This is meant to serve a specific case for AOT code generator whilst this * cannot be fully represented in TIR. * * Type tvm_check_return(expected, return_unexpected, nested_call) { * if (nested_call() != expected) { * return return_unexpected; * } * } */ TVM_DLL const Op& tvm_check_return(); /*! * \brief See pesudo code * Mark the content as thread local context, can get optimized * by only call the call once at thread start. * * Do not allow nesting(getting a thread context from another). * * Handle tvm_thread_context(Expr call) { * return call; * } */ TVM_DLL const Op& tvm_thread_context(); /*! * \brief Lowered version of call packed, the space of value and * type codes are explicitly allocated. * * return_type tvm_call_packed_lowered(name, * TVMValue* value_stack, * int* tcode_stack, * int begin, * int end) { * ModuleNode* env = GetCurrentEnv(); * const PackedFunc* f = env->GetFuncFromEnv(name); * f->CallPacked(TVMArgs(value_stack[begin:end], * tcode_stack[begin:end]), * TVMRetValue(value_stack + end, tcode_stack + end)); * // return type can be int, float, handle. * return cast(return_type, load_return_from(tcode_stack + end)) * } */ TVM_DLL const Op& tvm_call_packed_lowered(); /*! * \brief Lowered version of call c-packed, the space of value and * type codes are explicitly allocated. * * int tvm_call_packed_lowered(fname, * TVMValue* value_stack, * int* tcode_stack, * int begin, * int end) { * fname(TVMArgs(value_stack[begin:end], tcode_stack[begin:end]), * TVMRetValue(value_stack + end, tcode_stack + end)); * } */ TVM_DLL const Op& tvm_call_cpacked_lowered(); /*! * \brief Lowered version of trace intrinsic, the space of value and * type codes are explicitly allocated. The return value is the * (end - 1) value on the stack. * * return_type tvm_call_trace_packed_lowered(name, * TVMValue* value_stack, * int* tcode_stack, * int begin, * int end) { * ModuleNode* env = GetCurrentEnv(); * const PackedFunc* f = env->GetFuncFromEnv(name); * f->CallPacked(TVMArgs(value_stack[begin:end], * tcode_stack[begin:end]), * TVMRetValue(value_stack + end, tcode_stack + end)); * // return type can be int, float, handle. * return cast(return_type, load_return_from(tcode_stack + end)) * } */ TVM_DLL const Op& tvm_call_trace_packed_lowered(); /*! * \brief See pseudo code * * int tvm_storage_sync(std::string storage_scope) { * __sync(storage_scope); * return 0; * } */ TVM_DLL const Op& tvm_storage_sync(); /*! * \brief See pseudo code * * Type tvm_warp_shuffle(mask, Type value, warp_id, width, warp_size) { * return (value passed in by warp indicated by this_warp_id); * } * * Type tvm_warp_shuffle_up(mask, Type value, offset, width, warp_size) { * return (value passed in by warp indicated by this_warp_id - offset); * } * * Type tvm_warp_shuffle_down(mask, Type value, offset, width, warp_size) { * return (value passed in by warp indicated by this_warp_id + offset); * } * * unsigned tvm_warp_activemask() { * return (32-bit mask of currently active threads in the calling warp); * } * * Parameter warp_id indicates the source thread ID in a warp. * * Parameter offset indicates the relative distance to this_warp_id. * * Parameter width indicates the number of threads involved in one * shuffle. See CUDA document for __shfl_sync, __shfl_up_sync, * __shfl_down_sync and __activemask. * * Parameter warp_size is the size of a warp, which helps a backend * to determine wheter the width paramter is legal. * */ TVM_DLL const Op& tvm_warp_shuffle(); TVM_DLL const Op& tvm_warp_shuffle_up(); TVM_DLL const Op& tvm_warp_shuffle_down(); TVM_DLL const Op& tvm_warp_activemask(); /*! * \brief Initialize the global barrier. * Call this at beginning of kernel that need global barrier. */ TVM_DLL const Op& tvm_global_barrier_kinit(); /*! * \brief See pesudo code * * void tvm_thread_allreduce(UIntImm size, Expr source0, ..., Expr cond, * Var reduce_temp0, .., Var thread_idx1, ...) { * // constraint by the other thread_idx remain the same. * // reduce_temp is used to save intermediate result. * reduce_temp0, ... = reduce(combiner, source0, ..., cond * over [thread_idx1, thread_idx2] passed by any caller) * } */ TVM_DLL const Op& tvm_thread_allreduce(); // TODO(tvm-team) TensorCore specific intrinsics should be directly registered under // cuda. namespace and used through op. /*! * \brief tvm intrinsic for tensor core load operators. * * void tvm_load_matrix_sync(Var fragment, UIntImm m, UIntImm, n, UIntImm k, * Expr index, Expr buffer_ptr, Expr stride, * StringImm layout) { * // m, n, k are the shape of wmma fragment. * // Determine fragment layout(column-major or row major) by layout. * // fragments must be in 'wmma.matrix_a' or 'wmma.matrix_b' scope. * nvcuda::wmma::load_matrix_sync(fragment[index], buffer_ptr, stride); * } */ TVM_DLL const Op& tvm_load_matrix_sync(); /*! * \brief tvm intrinsic for tensor core mma_sync operators. * * void tvm_mma_sync(Var fragment_d, Expr index_d, * Var fragment_a, Expr index_a, * Var fragment_b, Expr index_b, * Var fragment_c, Expr index_c) { * nvcuda::wmma::mma_sync(fragment_d[index_d], fragment_a[index_a], * fragment_b[index_b], fragment_c[index_c]); * } */ TVM_DLL const Op& tvm_mma_sync(); /*! * \brief tvm intrinsic for tensor core bmma_sync operators. * * void tvm_bmma_sync(Var fragment_d, Expr index_d, * Var fragment_a, Expr index_a, * Var fragment_b, Expr index_b, * Var fragment_c, Expr index_c) { * nvcuda::wmma::bmma_sync(fragment_d[index_d], fragment_a[index_a], * fragment_b[index_b], fragment_c[index_c]); * } */ TVM_DLL const Op& tvm_bmma_sync(); /*! * \brief tvm intrinsic for tensor core fill_fragment operators. * * void tvm_fill_fragment(Var fragment, UIntImm m, UIntImm, n, UIntImm k, * Expr index, Expr value) { * // m, n, k are the shape of wmma fragment * // fragments must be in 'wmma.accumulator' scope. * nvcuda::wmma::fill_fragment(fragment[index], value); * } */ TVM_DLL const Op& tvm_fill_fragment(); /*! * \brief tvm intrinsic for tensor core store operators. * * void tvm_store_matrix_sync(Var fragment, UIntImm m, UIntImm, n, UIntImm k, * Expr index, Expr buffer_ptr, Expr stride, * StringImm layout) { * // m, n, k are the shape of wmma fragment * // fragments must be in 'wmma.accumulator' scope. * nvcuda::wmma::store_matrix_sync(fragment[index], buffer_ptr, stride, layout); * } */ TVM_DLL const Op& tvm_store_matrix_sync(); /*! * \brief tvm intrinsic for ptx tensor core mma instructions. * * void ptx_mma(StringImm shape, StringImm A_layout, StringImm B_layout, * StringImm A_dtype, StringImm B_dtype, StringImm C_dtype, * Var multiplicand_a, Expr a_index, * Var multiplicand_b, Expr b_index, * Var accumulator, Expr c_index, bool saturate); */ TVM_DLL const Op& ptx_mma(); /*! * \brief tvm intrinsic for sparse tensor core ptx instructions. * * void ptx_mma_sp(StringImm shape, StringImm A_layout, StringImm B_layout, * StringImm A_dtype, StringImm B_dtype, StringImm C_dtype, * Var multiplicand_a, Expr a_index, * Var multiplicand_b, Expr b_index, * Var accumulator, Expr c_index, * Var metadata, Expr meta_index, * Var sparse_selector, bool saturate); */ TVM_DLL const Op& ptx_mma_sp(); /*! * \brief tvm intrinsic for ptx load matrix from shared memory. * * void ptx_ldmatrix(Bool trans, IntImm num, StringImm type, * Var local_ptr, Expr local_offset, * Var smem_ptr, Expr smem_offset); */ TVM_DLL const Op& ptx_ldmatrix(); /*! * \brief tvm intrinsics for ptx async copy from global to shared memory * * void ptx_cp_async(Var shared_ptr, Expr shared_offset, Var global_ptr, Expr global_offset, size_t * bytes); * */ TVM_DLL const Op& ptx_cp_async(); /*! * \brief tvm intrinsics for ptx async copy commit and wait. * * void ptx_commit_group(); * void ptx_wait_group(int num); * */ TVM_DLL const Op& ptx_commit_group(); TVM_DLL const Op& ptx_wait_group(); /*! * \brief tvm intrinsic for storing the result of PTX MMA into a destination pointer. * For example, if each thread in a warp of size 32 has 4 elements from the result of * m16xn8xk16 MMA in its registers, this intrinsic can be used to store the result in a * 16x8 region in shared or global memory. * * There is no real PTX instruction that does that, but we want to hide details of * complex index manipulation behind this intrinsic to simplify TIR lowering passes (e.g. * LowerWarpMemory). * * void mma_store(IntImm m, IntImm n, Var dst_ptr, Var src_ptr, Expr src_offset, Var dst_stride); */ TVM_DLL const Op& mma_store(); /*! * \brief tvm intrinsic for zero-initalizing an MMA accumulation registor. * For example, if each thread in a warp of size 32 has 8 elements from the A matrix in * m16xn8xk16 MMA in its registers, this intrinsic can be used to zero-initialize its * 4 accumulation registers. * * There is no real PTX instruction that does that, but we introduce this intrinsic for the * same reason as mma_store above. * * void mma_fill(IntImm local_size, Var local_ptr, Expr offset); */ TVM_DLL const Op& mma_fill(); // TODO(tvm-team) replace the usage of the vector operations by Shuffle. /*! * \brief Get the high level half of the vector */ TVM_DLL const Op& vectorhigh(); /*! * \brief Get the low-level half of the vector */ TVM_DLL const Op& vectorlow(); /*! * \brief Concat two vectors. */ TVM_DLL const Op& vectorcombine(); /*! * \brief atomic add instruction, corresponding e.g. to atomicAdd in CUDA */ TVM_DLL const Op& atomic_add(); /*! * \brief Create an Nd memory allocation with storage scope */ TVM_DLL const Op& nd_mem_alloc_with_scope(); /*! * \brief Store to texture 2d memory */ TVM_DLL const Op& texture2d_store(); /*! * \brief Load from texture 2d memory */ TVM_DLL const Op& texture2d_load(); /*! * \brief Copy 1d memory from source to destination * Same semantics as memcpy(destination, source, size) * Allows for device specific implementations e.g. direct memory access (DMA) */ TVM_DLL const Op& mem_copy(); /*! * \brief Initiate a non-blocking DMA copy from source to destination */ TVM_DLL const Op& dma_copy(); /*! * \brief Wait until the number of DMAs in flight is less than or equal to some maximum */ TVM_DLL const Op& dma_wait(); /*! * \brief Provide a true statement that can be used for simplifications * * Compile-time representation of known constraints about function * inputs. This assumption is removed when lowering, and does not * occur in codegen. */ TVM_DLL const Op& assume(); /*! * \brief Returns an initialized but arbitrary value * * Compile-time representation of memory locations whose values may be * altered as a result of optimizations. */ TVM_DLL const Op& undef(); /*! * \brief Profiling intrinsic */ TVM_DLL const Op& start_profile_intrinsic(); /*! * \brief Profiling intrinsic */ TVM_DLL const Op& end_profile_intrinsic(); /*! \brief The kind of structure field info used in intrinsic */ enum TVMStructFieldKind : int { // array head address kArrAddr, kArrData, kArrShape, kArrStrides, kArrNDim, kArrTypeCode, kArrTypeBits, kArrTypeLanes, kArrByteOffset, kArrDeviceId, kArrDeviceType, kArrKindBound_, // TVMValue field kTVMValueContent, kTVMValueKindBound_ }; } // namespace builtin } // namespace tir } // namespace tvm #endif // TVM_TIR_BUILTIN_H_
https://github.com/zk-ml/tachikoma
include/tvm/tir/data_layout.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/tir/data_layout.h * \brief Layout expression to describe the data organization of a tensor. * And BijectiveLayout to mapping two data layouts between each other. */ #ifndef TVM_TIR_DATA_LAYOUT_H_ #define TVM_TIR_DATA_LAYOUT_H_ #include <tvm/tir/expr.h> #include <tvm/tir/op.h> #include <algorithm> #include <sstream> #include <string> #include <utility> #include <vector> namespace tvm { namespace tir { class Layout; class LayoutAxis { public: static const LayoutAxis& Get(const char name); // Get the singleton LayoutAxis using itvar->var->name_hint static const LayoutAxis& Get(const tir::IterVar& itvar); // Get the singleton LayoutAxis using name[0] (size of name must be 1). static const LayoutAxis& Get(const std::string& name); inline bool IsPrimal() const { return name_ >= 'A' && name_ <= 'Z'; } inline std::string name() const { return std::string(1, name_); } // if current axis is primal, switch the axis to its subordinate one, // else switch to the primal. inline const LayoutAxis& ToDual() const { if (name_ >= 'A' && name_ <= 'Z') { return LayoutAxis::Get(name_ - 'A' + 'a'); } else { return LayoutAxis::Get(name_ - 'a' + 'A'); } } // return the primal axis. If it is already primal, return itself. const LayoutAxis& ToPrimal() const { return IsPrimal() ? *this : ToDual(); } // return the subordinate axis. If it is already subordinate, return itself. const LayoutAxis& ToSubordinate() const { return IsPrimal() ? ToDual() : *this; } inline bool operator==(const LayoutAxis& rhs) const { return name_ == rhs.name_; } friend std::ostream& operator<<(std::ostream& os, const LayoutAxis& l) { os << l.name(); return os; } private: static const LayoutAxis UPPER_CASE[]; static const LayoutAxis LOWER_CASE[]; LayoutAxis(const LayoutAxis&); LayoutAxis& operator=(const LayoutAxis&); explicit LayoutAxis(const char name) : name_(name) {} const char name_; }; /*! * \brief Layout is to describe how data is organized within an N-dimention tensor. * It is composed of upper cases, lower cases and numbers, * where upper case indicates a primal axis and * the corresponding lower case with factor size indicates the subordinate axis. * For example, NCHW16c can describe a 5-D tensor of * [batch_size, channel, height, width, channel_block]. * Here subordinate axis channel_block=16 is the factor size of the primal axis C (channel). * Layout for scalar is defined, while both its name and axes have size 0. */ class LayoutNode : public Object { public: /*! \brief string representation of layout, "" for scalar. */ String name; /*! \brief specify each axis of the layout, * in which the variable name is the name of the axis. * The IterVar's extent indicates the size of the axis, * it is a variable for a primal axis, but a constant for a subordinate axis. * Empty for scalar's layout. */ Array<tir::IterVar> axes; void VisitAttrs(AttrVisitor* v) { v->Visit("name", &name); v->Visit("axes", &axes); } static constexpr const char* _type_key = "tir.Layout"; TVM_DECLARE_FINAL_OBJECT_INFO(LayoutNode, Object); }; /*! * \brief Managed reference to LayoutNode * \sa LayoutNode */ class Layout : public ObjectRef { public: explicit Layout(const Array<tir::IterVar>& axes); /*! \brief construct from a string */ Layout(const tvm::String& name) : Layout(name.operator std::string()) {} // NOLINT(*) /*! \brief construct from a string */ Layout(const char* name) : Layout(std::string(name)) {} // NOLINT(*) /*! * \brief construct from a string. * \param name input in layout convention: * upper case indicates a dimension and * the corresponding lower case with factor size * indicates the split dimension. * return undefined layout if "__undef__" is passed. */ TVM_DLL Layout(const std::string& name); // NOLINT(*) /*! * \brief access the internal node container * \return the pointer to the internal node container */ LayoutNode* operator->() { return static_cast<LayoutNode*>(get_mutable()); } /*! * \brief Return an undefined layout. * \return a (global) undefined layout. */ static const Layout& Undef() { static Layout undef; return undef; } /*! * \brief Returns a sub-layout which is the portion of the object * that starts at dimension \p pos and spans \p len dimensions * (or until the end of the layout, whichever comes first). * \param pos The start position. * \param len The length of the sub-layout. if 0, return layout of scalar * \return A newly constructed Layout object. */ Layout SubLayout(size_t pos, size_t len) const; /*! * \brief Split \p axis by \p size and put the sub-axis to position \p target_pos. * \param axis The source axis to be split. It must be a primal-axis; * \param target_pos The target position of the newly split subordinate-axis. * \param factor size of the sub-dimension. * \return A newly constructed Layout object. */ Layout Split(const LayoutAxis& axis, size_t target_pos, int32_t factor) const; /*! \return number of dimensions */ inline size_t ndim() const { if (!defined()) return 0; return operator->()->axes.size(); } /*! \return number of super dimensions */ inline size_t ndim_primal() const { if (!defined()) return 0; size_t ct = 0; for (auto x : operator->()->axes) { if (LayoutAxis::Get(x).IsPrimal()) { ct++; } } return ct; } /*! * \brief Returns a new layout where the dims have been expanded to match the primal dimensions. * \param dst_layout The dst layout to which current layout has to be expanded. * \return The expanded Layout. */ inline Layout ExpandPrimal(const Layout& dst_layout) { Layout new_src_layout; // 1) Find the axis which are missing in the current layout. Make them the prefix. std::string new_src_layout_str = ""; for (auto dst_axis : dst_layout->axes) { if (LayoutAxis::Get(dst_axis).IsPrimal()) { if (!this->Contains(LayoutAxis::Get(dst_axis))) { new_src_layout_str += dst_axis->var->name_hint; } } } // 2) Now, add the primal axis of the current layout. new_src_layout_str += this->name(); new_src_layout = Layout(new_src_layout_str); return new_src_layout; } /*! * \brief return the index of the input axis. * If it is not found in the layout or the layout is undefined, * return -1. * \param axis the input axis. * \return the index or -1 if not found. */ inline int32_t IndexOf(const LayoutAxis& axis) const { if (!this->defined()) return -1; const auto axes = operator->()->axes; for (size_t i = 0; i < axes.size(); ++i) { if (axes[i]->var->name_hint == axis.name()) return static_cast<int32_t>(i); } return -1; } /*! * \brief Get the factor size of the subordinate axis. * \param axis the input primal-axis or subordinate-axis. * \return the size of the subordinate-axis of \p axis (if \p axis is a primal-axis), * or the size of \p axis itself (if \p axis is a subordinate-axis). * Return -1 if \p axis is not in the layout the layout is undefined. */ int32_t FactorOf(const LayoutAxis& axis) const; /*! * \brief Whether the layout contains an axis. * \param axis axis to be checked. * \return Whether the layout contains the axis. */ bool Contains(const LayoutAxis& axis) const { if (!defined()) return false; for (const tir::IterVar var : operator->()->axes) { if (var->var->name_hint == axis.name()) { return true; } } return false; } const LayoutAxis& operator[](int32_t i) const { ICHECK(defined()) << "Try to access axis from an undefined layout."; int32_t index = i < 0 ? static_cast<int32_t>(ndim() + i) : i; ICHECK(index >= 0 && static_cast<size_t>(index) < ndim()) << "Invalid index " << i; const tir::IterVar axis = operator->()->axes[index]; return LayoutAxis::Get(axis); } /*! \return the string description of the layout */ inline std::string name() const { if (!defined()) return "__undef__"; return operator->()->name; } /*! * \brief Whether the two layouts are equal. * \param rhs Another layout. * \return whether the two layouts are equal. */ inline bool Equals(const Layout& rhs) const { return name() == rhs.name(); } /*! * \brief allow output string of layout to ostream * \param os the output stream * \param l the layout * \return the ostream */ friend std::ostream& operator<<(std::ostream& os, const Layout& l) { os << l.name(); return os; } TVM_DEFINE_OBJECT_REF_METHODS(Layout, ObjectRef, LayoutNode); }; // Internal node container BijectiveLayout class BijectiveLayoutNode : public Object { public: /*! \brief Describes how source axes can be mapped to the destination axes, * e.g., [i0 / 16, i1, i0 % 16] can describe NC -> NC16n */ Array<PrimExpr> index_forward_rule; /*! \brief Describes how destination axes can be mapped to the source axes */ Array<PrimExpr> index_backward_rule; /*! \brief Describes how source shapes can be mapped to the destination shapes */ Array<PrimExpr> shape_forward_rule; /*! \brief Describes how destination shapes can be mapped to the source shapes */ Array<PrimExpr> shape_backward_rule; /*! \brief The source layout */ Layout src_layout; /*! \brief The destination layout */ Layout dst_layout; void VisitAttrs(AttrVisitor* v) { v->Visit("src_layout", &src_layout); v->Visit("dst_layout", &dst_layout); v->Visit("index_forward_rule", &index_forward_rule); v->Visit("index_backward_rule", &index_backward_rule); v->Visit("shape_forward_rule", &shape_forward_rule); v->Visit("shape_backward_rule", &shape_backward_rule); } static constexpr const char* _type_key = "tir.BijectiveLayout"; TVM_DECLARE_FINAL_OBJECT_INFO(BijectiveLayoutNode, Object); }; /*! * \brief Bijective function mapping for data layout transformation. * Given two Layout, BijectiveLayout build and store the mapping rules, * provides API to transform N-dimention tensor from the source indices (i0, i1, .., im) * to the destination indices (j0, j1, .., jm). */ class BijectiveLayout : public ObjectRef { public: /*! * \brief The constructor * \param src_layout The source layout * \param dst_layout The destination layout */ TVM_DLL BijectiveLayout(Layout src_layout, Layout dst_layout); // Given the source shape, infer the destination shape. TVM_DLL Array<PrimExpr> ForwardShape(const Array<PrimExpr>& shape) const; // Given the destination shape, recover the source shape. TVM_DLL Array<PrimExpr> BackwardShape(const Array<PrimExpr>& dst_shape) const; // Given the destination indices, infer the destination indices. TVM_DLL Array<PrimExpr> ForwardIndex(const Array<PrimExpr>& index) const; // Given the destination indices, recover the source indices. TVM_DLL Array<PrimExpr> BackwardIndex(const Array<PrimExpr>& dst_index) const; TVM_DEFINE_OBJECT_REF_METHODS(BijectiveLayout, ObjectRef, BijectiveLayoutNode); }; } // namespace tir } // namespace tvm #endif // TVM_TIR_DATA_LAYOUT_H_
https://github.com/zk-ml/tachikoma
include/tvm/tir/expr.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/tir/expr.h * \brief TIR expressions. */ // Acknowledgement: Many low-level IR nodes originate from Halide. #ifndef TVM_TIR_EXPR_H_ #define TVM_TIR_EXPR_H_ #include <tvm/ir/expr.h> #include <tvm/node/functor.h> #include <tvm/node/node.h> #include <tvm/runtime/c_runtime_api.h> #include <tvm/runtime/container/array.h> #include <tvm/runtime/container/map.h> #include <tvm/runtime/container/string.h> #include <tvm/runtime/data_type.h> #include <tvm/tir/buffer.h> #include <tvm/tir/var.h> #include <algorithm> #include <iostream> #include <limits> #include <string> #include <unordered_map> #include <utility> namespace tvm { namespace tir { using IntImmNode = tvm::IntImmNode; using FloatImmNode = tvm::FloatImmNode; /*! \brief String constants, only used in asserts. */ class StringImmNode : public PrimExprNode { public: /*! \brief The constant value content. */ String value; void VisitAttrs(AttrVisitor* v) { v->Visit("dtype", &dtype); v->Visit("value", &value); v->Visit("span", &span); } bool SEqualReduce(const StringImmNode* other, SEqualReducer equal) const { return equal(value, other->value); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(value); } static constexpr const char* _type_key = "tir.StringImm"; TVM_DECLARE_FINAL_OBJECT_INFO(StringImmNode, PrimExprNode); }; /*! * \brief Managed reference to StringImmNode. * \sa StringImmNode */ class StringImm : public PrimExpr { public: TVM_DLL StringImm(String value, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(StringImm, PrimExpr, StringImmNode); }; /*! * \brief Cast value from one data type to another. * \note The lanes of value should keep fixed. */ class CastNode : public PrimExprNode { public: /*! \brief Original data type. */ PrimExpr value; void VisitAttrs(AttrVisitor* v) { v->Visit("dtype", &dtype); v->Visit("value", &value); v->Visit("span", &span); } bool SEqualReduce(const CastNode* other, SEqualReducer equal) const { return equal(dtype, other->dtype) && equal(value, other->value); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(dtype); hash_reduce(value); } static constexpr const char* _type_key = "tir.Cast"; TVM_DECLARE_FINAL_OBJECT_INFO(CastNode, PrimExprNode); }; /*! * \brief Managed reference to CastNode * \sa CastNode */ class Cast : public PrimExpr { public: TVM_DLL Cast(DataType dtype, PrimExpr value, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(Cast, PrimExpr, CastNode); }; /*! * \brief Base template to implement binary ops. * \tparam T The type of the child class. */ template <typename T> class BinaryOpNode : public PrimExprNode { public: /*! \brief The left operand. */ PrimExpr a; /*! \brief The right operand. */ PrimExpr b; void VisitAttrs(AttrVisitor* v) { v->Visit("dtype", &(this->dtype)); v->Visit("a", &a); v->Visit("b", &b); v->Visit("span", &span); } bool SEqualReduce(const T* other, SEqualReducer equal) const { return equal(dtype, other->dtype) && equal(a, other->a) && equal(b, other->b); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(dtype); hash_reduce(a); hash_reduce(b); } TVM_DECLARE_FINAL_OBJECT_INFO(T, PrimExprNode); }; /*! \brief a + b */ class AddNode : public BinaryOpNode<AddNode> { public: static constexpr const char* _type_key = "tir.Add"; }; /*! * \brief Managed reference to AddNode * \sa AddNode */ class Add : public PrimExpr { public: TVM_DLL Add(PrimExpr a, PrimExpr b, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(Add, PrimExpr, AddNode); }; /*! \brief a - b */ class SubNode : public BinaryOpNode<SubNode> { public: static constexpr const char* _type_key = "tir.Sub"; }; /*! * \brief Managed reference to SubNode * \sa SubNode */ class Sub : public PrimExpr { public: TVM_DLL Sub(PrimExpr a, PrimExpr b, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(Sub, PrimExpr, SubNode); }; /*! \brief a * b */ class MulNode : public BinaryOpNode<MulNode> { public: static constexpr const char* _type_key = "tir.Mul"; }; /*! * \brief Managed reference to MulNode * \sa MulNode */ class Mul : public PrimExpr { public: TVM_DLL Mul(PrimExpr a, PrimExpr b, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(Mul, PrimExpr, MulNode); }; /*! * \brief a / b in the C semnatics. * \note For integer division, C standard uses trunc div. */ class DivNode : public BinaryOpNode<DivNode> { public: static constexpr const char* _type_key = "tir.Div"; }; /*! * \brief Managed reference to DivNode * \sa DivNode */ class Div : public PrimExpr { public: TVM_DLL Div(PrimExpr a, PrimExpr b, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(Div, PrimExpr, DivNode); }; /*! * \brief a % b in the C semnatics. * \note For integer division, C standard uses trunc div. */ class ModNode : public BinaryOpNode<ModNode> { public: static constexpr const char* _type_key = "tir.Mod"; }; /*! * \brief Managed reference to ModNode * \sa ModNode */ class Mod : public PrimExpr { public: TVM_DLL Mod(PrimExpr a, PrimExpr b, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(Mod, PrimExpr, ModNode); }; /*! \brief Floor division, floor(a/b) */ class FloorDivNode : public BinaryOpNode<FloorDivNode> { public: static constexpr const char* _type_key = "tir.FloorDiv"; }; /*! * \brief Managed reference to FloorDivNode * \sa FloorDivNode */ class FloorDiv : public PrimExpr { public: TVM_DLL FloorDiv(PrimExpr a, PrimExpr b, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(FloorDiv, PrimExpr, FloorDivNode); }; /*! \brief The remainder of the floordiv */ class FloorModNode : public BinaryOpNode<FloorModNode> { public: static constexpr const char* _type_key = "tir.FloorMod"; }; /*! * \brief Managed reference to FloorModNode * \sa FloorModNode */ class FloorMod : public PrimExpr { public: TVM_DLL FloorMod(PrimExpr a, PrimExpr b, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(FloorMod, PrimExpr, FloorModNode); }; /*! \brief min(a, b) */ class MinNode : public BinaryOpNode<MinNode> { public: static constexpr const char* _type_key = "tir.Min"; }; /*! * \brief Managed reference to MinNode * \sa MinNode */ class Min : public PrimExpr { public: TVM_DLL Min(PrimExpr a, PrimExpr b, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(Min, PrimExpr, MinNode); }; /*! \brief max(a, b) */ class MaxNode : public BinaryOpNode<MaxNode> { public: static constexpr const char* _type_key = "tir.Max"; }; /*! * \brief Managed reference to MaxNode * \sa MaxNode */ class Max : public PrimExpr { public: TVM_DLL Max(PrimExpr a, PrimExpr b, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(Max, PrimExpr, MaxNode); }; /*! * \brief Base template to implement comparison ops. * \tparam T The type of the child class. */ template <typename T> class CmpOpNode : public PrimExprNode { public: /*! \brief The left operand. */ PrimExpr a; /*! \brief The right operand. */ PrimExpr b; void VisitAttrs(AttrVisitor* v) { v->Visit("dtype", &(this->dtype)); v->Visit("a", &a); v->Visit("b", &b); v->Visit("span", &span); } bool SEqualReduce(const T* other, SEqualReducer equal) const { return equal(dtype, other->dtype) && equal(a, other->a) && equal(b, other->b); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(dtype); hash_reduce(a); hash_reduce(b); } TVM_DECLARE_FINAL_OBJECT_INFO(T, PrimExprNode); }; /*! \brief a == b */ class EQNode : public CmpOpNode<EQNode> { public: static constexpr const char* _type_key = "tir.EQ"; }; /*! * \brief Managed reference to EQNode * \sa EQNode */ class EQ : public PrimExpr { public: TVM_DLL EQ(PrimExpr a, PrimExpr b, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(EQ, PrimExpr, EQNode); }; /*! \brief a != b */ class NENode : public CmpOpNode<NENode> { public: static constexpr const char* _type_key = "tir.NE"; }; /*! * \brief Managed reference to NENode * \sa NENode */ class NE : public PrimExpr { public: TVM_DLL NE(PrimExpr a, PrimExpr b, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(NE, PrimExpr, NENode); }; /*! \brief a < b */ class LTNode : public CmpOpNode<LTNode> { public: static constexpr const char* _type_key = "tir.LT"; }; /*! * \brief Managed reference to LTNode * \sa LTNode */ class LT : public PrimExpr { public: TVM_DLL LT(PrimExpr a, PrimExpr b, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(LT, PrimExpr, LTNode); }; /*! \brief a <= b */ struct LENode : public CmpOpNode<LENode> { public: static constexpr const char* _type_key = "tir.LE"; }; /*! * \brief Managed reference to LENode * \sa LENode */ class LE : public PrimExpr { public: TVM_DLL LE(PrimExpr a, PrimExpr b, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(LE, PrimExpr, LENode); }; /*! \brief a > b */ class GTNode : public CmpOpNode<GTNode> { public: static constexpr const char* _type_key = "tir.GT"; }; /*! * \brief Managed reference to GTNode * \sa GTNode */ class GT : public PrimExpr { public: TVM_DLL GT(PrimExpr a, PrimExpr b, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(GT, PrimExpr, GTNode); }; /*! \brief a >= b */ class GENode : public CmpOpNode<GENode> { public: static constexpr const char* _type_key = "tir.GE"; }; /*! * \brief Managed reference to GENode * \sa GENode */ class GE : public PrimExpr { public: TVM_DLL GE(PrimExpr a, PrimExpr b, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(GE, PrimExpr, GENode); }; /*! \brief a && b */ class AndNode : public PrimExprNode { public: /*! \brief The left operand. */ PrimExpr a; /*! \brief The right operand. */ PrimExpr b; void VisitAttrs(AttrVisitor* v) { v->Visit("dtype", &(this->dtype)); v->Visit("a", &a); v->Visit("b", &b); v->Visit("span", &span); } bool SEqualReduce(const AndNode* other, SEqualReducer equal) const { return equal(dtype, other->dtype) && equal(a, other->a) && equal(b, other->b); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(dtype); hash_reduce(a); hash_reduce(b); } static constexpr const char* _type_key = "tir.And"; TVM_DECLARE_FINAL_OBJECT_INFO(AndNode, PrimExprNode); }; /*! * \brief Managed reference to AndNode * \sa AndNode */ class And : public PrimExpr { public: TVM_DLL And(PrimExpr a, PrimExpr b, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(And, PrimExpr, AndNode); }; /*! \brief a || b */ class OrNode : public PrimExprNode { public: /*! \brief The left operand. */ PrimExpr a; /*! \brief The right operand. */ PrimExpr b; void VisitAttrs(AttrVisitor* v) { v->Visit("dtype", &dtype); v->Visit("a", &a); v->Visit("b", &b); v->Visit("span", &span); } bool SEqualReduce(const OrNode* other, SEqualReducer equal) const { return equal(dtype, other->dtype) && equal(a, other->a) && equal(b, other->b); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(dtype); hash_reduce(a); hash_reduce(b); } static constexpr const char* _type_key = "tir.Or"; TVM_DECLARE_FINAL_OBJECT_INFO(OrNode, PrimExprNode); }; /*! * \brief Managed reference to OrNode * \sa OrNode */ class Or : public PrimExpr { public: TVM_DLL Or(PrimExpr a, PrimExpr b, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(Or, PrimExpr, OrNode); }; /*! \brief !a */ class NotNode : public PrimExprNode { public: /*! \brief The input operand. */ PrimExpr a; void VisitAttrs(AttrVisitor* v) { v->Visit("dtype", &dtype); v->Visit("a", &a); v->Visit("span", &span); } bool SEqualReduce(const NotNode* other, SEqualReducer equal) const { return equal(dtype, other->dtype) && equal(a, other->a); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(dtype); hash_reduce(a); } static constexpr const char* _type_key = "tir.Not"; TVM_DECLARE_FINAL_OBJECT_INFO(NotNode, PrimExprNode); }; /*! * \brief Managed reference to NotNode * \sa NotNode */ class Not : public PrimExpr { public: TVM_DLL Not(PrimExpr a, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(Not, PrimExpr, NotNode); }; /*! * \brief return true_value if condition is true, otherwise return false_value. * \note Both true_value and false_value could be evaluated * regardless of the condition value. * Do not use it to guard against out of bound access, * please use if_then_else instead. */ class SelectNode : public PrimExprNode { public: /*! \brief The condition */ PrimExpr condition; /*! \brief value to be returned when condition is true. */ PrimExpr true_value; /*! \brief value to be returned when condition is false. */ PrimExpr false_value; void VisitAttrs(AttrVisitor* v) { v->Visit("dtype", &dtype); v->Visit("condition", &condition); v->Visit("true_value", &true_value); v->Visit("false_value", &false_value); v->Visit("span", &span); } bool SEqualReduce(const SelectNode* other, SEqualReducer equal) const { return equal(dtype, other->dtype) && equal(condition, other->condition) && equal(true_value, other->true_value) && equal(false_value, other->false_value); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(dtype); hash_reduce(condition); hash_reduce(true_value); hash_reduce(false_value); } static constexpr const char* _type_key = "tir.Select"; TVM_DECLARE_FINAL_OBJECT_INFO(SelectNode, PrimExprNode); }; /*! * \brief Managed reference to SelectNode * \sa SelectNode */ class Select : public PrimExpr { public: TVM_DLL Select(PrimExpr condition, PrimExpr true_value, PrimExpr false_value, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(Select, PrimExpr, SelectNode); }; /*! * \brief Load value from the high dimension buffer. * * \code * * value = buffer[i, j]; * * \endcode * \sa BufferStore */ class BufferLoadNode : public PrimExprNode { public: /*! \brief The buffer variable. */ Buffer buffer; /*! \brief The indices location to be loaded. */ Array<PrimExpr> indices; void VisitAttrs(AttrVisitor* v) { v->Visit("dtype", &(this->dtype)); v->Visit("buffer", &buffer); v->Visit("indices", &indices); v->Visit("span", &span); } bool SEqualReduce(const BufferLoadNode* other, SEqualReducer equal) const { return equal(dtype, other->dtype) && equal(buffer, other->buffer) && equal(indices, other->indices); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(dtype); hash_reduce(buffer); hash_reduce(indices); } static constexpr const char* _type_key = "tir.BufferLoad"; TVM_DECLARE_FINAL_OBJECT_INFO(BufferLoadNode, PrimExprNode); private: /*! \brief Set the dtype based on the buffer/indices * * Usually, the BufferLoad's dtype will be the same dtype as the * buffer. This may have a different number of lanes than the * buffer's dtype if index values have more than 1 lane. * * This function should only be called during construction and after * CopyOnWrite. Friend class used here to restrict usage. */ void LegalizeDType(); friend class BufferLoad; friend class CustomDatatypesLowerer; friend class VectorTypeRewriter; friend class Vectorizer; }; /*! * \brief Managed reference to BufferLoadNode. * \sa BufferLoadNode */ class BufferLoad : public PrimExpr { public: TVM_DLL explicit BufferLoad(Buffer buffer, Array<PrimExpr> indices, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(BufferLoad, PrimExpr, BufferLoadNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(BufferLoadNode); }; /*! * \brief Load value from the result produced by the producer. * * \note This node only appears in high-level DSLs that are built on top of the TIR. * It should not appear in a valid TIR PrimFunc. A high-level DSL needs to lower * this node before TIR transformations. * * \sa ProducerLoad, DataProducerNode */ class ProducerLoadNode : public PrimExprNode { public: /*! \brief The buffer producer. */ DataProducer producer; /*! \brief The location arguments. */ Array<PrimExpr> indices; void VisitAttrs(AttrVisitor* v) { v->Visit("dtype", &(this->dtype)); v->Visit("producer", &producer); v->Visit("indices", &indices); v->Visit("span", &span); } bool SEqualReduce(const ProducerLoadNode* other, SEqualReducer equal) const { return equal(dtype, other->dtype) && equal(producer, other->producer) && equal(indices, other->indices); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(dtype); hash_reduce(producer); hash_reduce(indices); } static constexpr const char* _type_key = "tir.ProducerLoad"; TVM_DECLARE_FINAL_OBJECT_INFO(ProducerLoadNode, PrimExprNode); }; /*! * \brief Managed reference to ProducerLoadNode. * \sa ProducerLoadNode */ class ProducerLoad : public PrimExpr { public: TVM_DLL explicit ProducerLoad(DataProducer producer, Array<PrimExpr> indices, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(ProducerLoad, PrimExpr, ProducerLoadNode); }; /*! * \brief Load the value from buffer_var. * * Equivalent to ((DType*)buffer_var)[index] * where DType is the type specified by type().element_of(). * * For example, if type = float32x3, then the load will corresponds to * * \code * * auto buffer = static_cast<float*>(buffer_var); * auto loaded_val = float32x3(buffer[index.v0], buffer[index.v1], buffer[index.v2]); * * \endcode */ class LoadNode : public PrimExprNode { public: /*! \brief The buffer variable. */ Var buffer_var; /*! \brief The index locations to be loaded. */ PrimExpr index; /*! \brief The predicate to mask which lanes would be loaded. */ PrimExpr predicate; void VisitAttrs(AttrVisitor* v) { v->Visit("dtype", &dtype); v->Visit("buffer_var", &buffer_var); v->Visit("index", &index); v->Visit("predicate", &predicate); v->Visit("span", &span); } bool SEqualReduce(const LoadNode* other, SEqualReducer equal) const { return equal(dtype, other->dtype) && equal(buffer_var, other->buffer_var) && equal(index, other->index) && equal(predicate, other->predicate); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(dtype); hash_reduce(buffer_var); hash_reduce(index); hash_reduce(predicate); } static constexpr const char* _type_key = "tir.Load"; TVM_DECLARE_FINAL_OBJECT_INFO(LoadNode, PrimExprNode); }; /*! * \brief Managed reference to LoadNode * \sa LoadNode */ class Load : public PrimExpr { public: TVM_DLL Load(DataType dtype, Var buffer_var, PrimExpr index, PrimExpr predicate, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(Load, PrimExpr, LoadNode); }; /*! * \brief Construct a vector with lanes elements * where its i-th element equals base + i * stride. * This is useful to construct a index for a continuous vector load. * * Examples: * - ramp(0, 1, 3) = [0, 1, 2] * - ramp(1, 2, 4) = [1, 3, 5, 7] */ class RampNode : public PrimExprNode { public: /*! \brief The base value. */ PrimExpr base; /*! \brief The stride of each step. */ PrimExpr stride; /*! \brief Total number of lanes. */ int lanes; void VisitAttrs(AttrVisitor* v) { v->Visit("dtype", &dtype); v->Visit("base", &base); v->Visit("stride", &stride); v->Visit("lanes", &lanes); v->Visit("span", &span); } bool SEqualReduce(const RampNode* other, SEqualReducer equal) const { return equal(dtype, other->dtype) && equal(base, other->base) && equal(stride, other->stride) && equal(lanes, other->lanes); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(dtype); hash_reduce(base); hash_reduce(stride); hash_reduce(lanes); } static constexpr const char* _type_key = "tir.Ramp"; TVM_DECLARE_FINAL_OBJECT_INFO(RampNode, PrimExprNode); }; /*! * \brief Managed reference to RampNode * \sa RampNode */ class Ramp : public PrimExpr { public: TVM_DLL Ramp(PrimExpr base, PrimExpr stride, int lanes, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(Ramp, PrimExpr, RampNode); }; /*! \brief Create a vector where all the elements are value. */ class BroadcastNode : public PrimExprNode { public: /*! \brief The base value. */ PrimExpr value; /*! \brief The number of lanes. */ int lanes; void VisitAttrs(AttrVisitor* v) { v->Visit("dtype", &dtype); v->Visit("value", &value); v->Visit("lanes", &lanes); v->Visit("span", &span); } bool SEqualReduce(const BroadcastNode* other, SEqualReducer equal) const { return equal(dtype, other->dtype) && equal(value, other->value) && equal(lanes, other->lanes); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(dtype); hash_reduce(value); hash_reduce(lanes); } static constexpr const char* _type_key = "tir.Broadcast"; TVM_DECLARE_FINAL_OBJECT_INFO(BroadcastNode, PrimExprNode); }; /*! * \brief Managed reference to BroadcastNode * \sa BroadcastNode */ class Broadcast : public PrimExpr { public: TVM_DLL Broadcast(PrimExpr value, int lanes, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(Broadcast, PrimExpr, BroadcastNode); }; /*! * \brief Let binding. Bind var to value then evaluate body. */ class LetNode : public PrimExprNode { public: /*! \brief The variable. */ Var var; /*! \brief The value to be binded. */ PrimExpr value; /*! \brief The result expression. */ PrimExpr body; void VisitAttrs(AttrVisitor* v) { v->Visit("dtype", &dtype); v->Visit("var", &var); v->Visit("value", &value); v->Visit("body", &body); v->Visit("span", &span); } bool SEqualReduce(const LetNode* other, SEqualReducer equal) const { return equal(dtype, other->dtype) && equal.DefEqual(var, other->var) && equal(value, other->value) && equal(body, other->body); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(dtype); hash_reduce.DefHash(var); hash_reduce(value); hash_reduce(body); } static constexpr const char* _type_key = "tir.Let"; TVM_DECLARE_FINAL_OBJECT_INFO(LetNode, PrimExprNode); }; /*! * \brief Managed reference to LetNode * \sa LetNode */ class Let : public PrimExpr { public: TVM_DLL Let(Var var, PrimExpr value, PrimExpr body, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(Let, PrimExpr, LetNode); }; /*! * \brief Call node. */ class CallNode : public PrimExprNode { public: /*! * \brief The operator(function) being invoked * * - It can be tvm::Op which corresponds to the primitive operators(intrinsics). * - It can also be another function in the IRModule (GlobalVar). */ RelayExpr op; /*! \brief The arguments. */ Array<PrimExpr> args; void VisitAttrs(AttrVisitor* v) { v->Visit("dtype", &dtype); v->Visit("op", &op); v->Visit("args", &args); v->Visit("span", &span); } bool SEqualReduce(const CallNode* other, SEqualReducer equal) const { return equal(dtype, other->dtype) && equal(op, other->op) && equal(args, other->args); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(dtype); hash_reduce(op); hash_reduce(args); } static constexpr const char* _type_key = "tir.Call"; TVM_DECLARE_FINAL_OBJECT_INFO(CallNode, PrimExprNode); }; /*! * \brief Managed reference to CallNode * \sa CallNode */ class Call : public PrimExpr { public: TVM_DLL Call(DataType dtype, RelayExpr op, Array<PrimExpr> args, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(Call, PrimExpr, CallNode); }; /*! * \brief Shuffle instruction. * vec = concat(vectors) * result = (vec[indices[0]], vec[indices[1]] ...) */ class ShuffleNode : public PrimExprNode { public: /*! \brief the input vectors. */ Array<PrimExpr> vectors; /*! \brief The indices of each element. */ Array<PrimExpr> indices; void VisitAttrs(AttrVisitor* v) { v->Visit("dtype", &dtype); v->Visit("vectors", &vectors); v->Visit("indices", &indices); v->Visit("span", &span); } bool SEqualReduce(const ShuffleNode* other, SEqualReducer equal) const { return equal(dtype, other->dtype) && equal(vectors, other->vectors) && equal(indices, other->indices); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(dtype); hash_reduce(vectors); hash_reduce(indices); } static constexpr const char* _type_key = "tir.Shuffle"; TVM_DECLARE_FINAL_OBJECT_INFO(ShuffleNode, PrimExprNode); }; /*! * \brief Managed reference to ShuffleNode * \sa ShuffleNode */ class Shuffle : public PrimExpr { public: TVM_DLL Shuffle(Array<PrimExpr> vectors, Array<PrimExpr> indices, Span span = Span()); TVM_DLL static PrimExpr Concat(Array<PrimExpr> vectors, Span span = Span()); TVM_DLL static PrimExpr ExtractElement(PrimExpr vector, int index, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(Shuffle, PrimExpr, ShuffleNode); }; // Reduce operator /*! * \brief A commutative reducer node to represent a commutative * binary operator with identity element */ class CommReducerNode : public Object { public: /*! \brief The left argument of reducer */ Array<Var> lhs; /*! \brief The right argument of reducer */ Array<Var> rhs; /*! \brief The result of reducer */ Array<PrimExpr> result; /*! * \brief The identity element of reducer, which leaves other * elements unchanged when combined with it, with respect to * the binary operation of this reducer uses. */ Array<PrimExpr> identity_element; /*! \brief Function call operator to combine a and b */ Array<PrimExpr> operator()(Array<PrimExpr> a, Array<PrimExpr> b) const; /*! * \brief Span that points to the original source code. * Reserved debug information. */ mutable Span span; void VisitAttrs(AttrVisitor* v) { v->Visit("lhs", &lhs); v->Visit("rhs", &rhs); v->Visit("result", &result); v->Visit("identity_element", &identity_element); v->Visit("span", &span); } bool SEqualReduce(const CommReducerNode* other, SEqualReducer equal) const { return equal.DefEqual(lhs, other->lhs) && equal.DefEqual(rhs, other->rhs) && equal(result, other->result) && equal(identity_element, other->identity_element); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce.DefHash(lhs); hash_reduce.DefHash(rhs); hash_reduce(result); hash_reduce(identity_element); } static constexpr const char* _type_key = "tir.CommReducer"; static constexpr const bool _type_has_method_sequal_reduce = true; static constexpr const bool _type_has_method_shash_reduce = true; TVM_DECLARE_FINAL_OBJECT_INFO(CommReducerNode, Object); }; /*! * \brief Managed reference to CommReducerNode * \sa CommReducerNode */ class CommReducer : public ObjectRef { public: TVM_DLL CommReducer(Array<Var> lhs, Array<Var> rhs, Array<PrimExpr> result, Array<PrimExpr> identity_element, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(CommReducer, ObjectRef, CommReducerNode); }; /*! \brief Reduction operator operator */ class ReduceNode : public PrimExprNode { public: /*! \brief The commutative combiner */ CommReducer combiner; /*! \brief The source operand */ Array<PrimExpr> source; /*! \brief The init operand */ Array<PrimExpr> init; /*! \brief The reduction axis */ Array<IterVar> axis; /*! * \brief Predicate on the reduction * Only add the body to reduction if condition is true. */ PrimExpr condition; /*! \brief the index of this reduce node */ int value_index; void VisitAttrs(AttrVisitor* v) { v->Visit("dtype", &dtype); v->Visit("combiner", &combiner); v->Visit("source", &source); v->Visit("init", &init); v->Visit("axis", &axis); v->Visit("condition", &condition); v->Visit("value_index", &value_index); v->Visit("span", &span); } bool SEqualReduce(const ReduceNode* other, SEqualReducer equal) const { // check axis first so IterVars can define the necessary variables. return equal(dtype, other->dtype) && equal(axis, other->axis) && equal(combiner, other->combiner) && equal(source, other->source) && equal(init, other->init) && equal(condition, other->condition) && equal(value_index, other->value_index); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(dtype); hash_reduce(axis); hash_reduce(combiner); hash_reduce(source); hash_reduce(init); hash_reduce(condition); hash_reduce(value_index); } static constexpr const char* _type_key = "tir.Reduce"; TVM_DECLARE_FINAL_OBJECT_INFO(ReduceNode, PrimExprNode); }; /*! * \brief Managed reference to ReduceNode * \sa ReduceNode */ class Reduce : public PrimExpr { public: TVM_DLL Reduce(CommReducer combiner, Array<PrimExpr> src, Array<IterVar> rdom, PrimExpr condition, int value_index, Array<PrimExpr> init, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(Reduce, PrimExpr, ReduceNode); }; /*! \brief Any shape. */ class AnyNode : public PrimExprNode { public: void VisitAttrs(AttrVisitor* v) { v->Visit("dtype", &dtype); v->Visit("span", &span); } bool SEqualReduce(const AnyNode* other, SEqualReducer equal) const { return equal(dtype, other->dtype); } void SHashReduce(SHashReducer hash_reduce) const {} /*! \brief Convert to var. */ Var ToVar() const { return Var("any_dim", DataType::Int(32)); } /*! \brief Convert to SizeVar. */ SizeVar ToSizeVar() const { return SizeVar("any_dim", DataType::Int(32)); } static constexpr const char* _type_key = "tir.Any"; TVM_DECLARE_FINAL_OBJECT_INFO(AnyNode, PrimExprNode); }; /*! * \brief Managed reference to AnyNode * \sa AnyNode */ class Any : public PrimExpr { public: TVM_DLL Any(Span span = Span()); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(Any, PrimExpr, AnyNode); }; /* * \brief Template function to convert Map to unordered_map * Sometimes useful for API gluing when internal uses unordered_map * \param dmap The container map * \return The corresponding unordered_map. * \tparam K the key of the Map. * \tparam V the value of the Map. */ template <typename K, typename V> inline std::unordered_map<K, V> as_unordered_map(const Map<K, V>& dmap) { std::unordered_map<K, V> ret; for (auto kv : dmap) { ret[kv.first] = kv.second; } return ret; } } // namespace tir } // namespace tvm namespace std { template <> struct hash<::tvm::tir::IterVar> : public ::tvm::ObjectPtrHash {}; } // namespace std #endif // TVM_TIR_EXPR_H_
https://github.com/zk-ml/tachikoma
include/tvm/tir/expr_functor.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/tir/expr_functor.h * * \brief Functors for tir expressions. */ #ifndef TVM_TIR_EXPR_FUNCTOR_H_ #define TVM_TIR_EXPR_FUNCTOR_H_ #include <tvm/node/functor.h> #include <tvm/tir/expr.h> #include <utility> namespace tvm { namespace tir { /*! * \brief A dynamical functor that dispatches on in the first Expr argument. * You can use this as a more powerful Visitor, since it allows you to * define function signatures of Visit Function. * * This helps you to avoid to book-keep return value of Visitor via state, * which can cause bugs easily when state is incorrectly maintained. * * \code * // A functor that set variable to b. and calculate results. * class MyExprFunctor * : public tir::ExprFunctor<int(const Expr&, int)> { * public: * int VisitExpr_(const Variable* op, int b) final { * return b; * } * int VisitExpr_(const IntImm* op, int b) final { * return op->value; * } * int VisitExpr_(const Add* op, int b) final { * return Visit(op->a, b) + Visit(op->b, b); * } * }; * MyExprFunctor f; * Var x("x"); * ICHECK_EQ(f(x + 1, 2), 3); * \endcode * * \note Why do we need this more powerful Functor: * * We often need to implement a transformer tasks. * Say we want to take Expr and transform it to some analysis result, * This easily be done incorrectly using plain Visitor. See IRVisitor's * document for possible error cases. * * \tparam FType function signiture * This type if only defined for FType with function signiture R(const Expr&, Args...) */ template <typename FType> class ExprFunctor; // functions to be overriden. #define EXPR_FUNCTOR_DEFAULT \ { return VisitExprDefault_(op, std::forward<Args>(args)...); } #define IR_EXPR_FUNCTOR_DISPATCH(OP) \ vtable.template set_dispatch<OP>([](const ObjectRef& n, TSelf* self, Args... args) { \ return self->VisitExpr_(static_cast<const OP*>(n.get()), std::forward<Args>(args)...); \ }); template <typename R, typename... Args> class ExprFunctor<R(const PrimExpr& n, Args...)> { private: using TSelf = ExprFunctor<R(const PrimExpr& n, Args...)>; using FType = NodeFunctor<R(const ObjectRef& n, TSelf* self, Args...)>; public: /*! \brief the result type of this functor */ using result_type = R; /*! \brief virtual destructor */ virtual ~ExprFunctor() {} /*! * \brief Same as call. * \param n The expression node. * \param args Additional arguments. * \return The result of the call */ R operator()(const PrimExpr& n, Args... args) { return VisitExpr(n, std::forward<Args>(args)...); } /*! * \brief The functor call. * \param n The expression node. * \param args Additional arguments. * \return The result of the call */ virtual R VisitExpr(const PrimExpr& n, Args... args) { static FType vtable = InitVTable(); return vtable(n, this, std::forward<Args>(args)...); } // Functions that can be overriden by subclass virtual R VisitExpr_(const VarNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const SizeVarNode* op, Args... args) { return VisitExpr_(static_cast<const VarNode*>(op), std::forward<Args>(args)...); } virtual R VisitExpr_(const BufferLoadNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const ProducerLoadNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const LoadNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const LetNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const CallNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const AddNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const SubNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const MulNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const DivNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const ModNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const FloorDivNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const FloorModNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const MinNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const MaxNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const EQNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const NENode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const LTNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const LENode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const GTNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const GENode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const AndNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const OrNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const ReduceNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const CastNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const NotNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const SelectNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const RampNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const BroadcastNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const ShuffleNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const IntImmNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const FloatImmNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const StringImmNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const AnyNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExprDefault_(const Object* op, Args...) { LOG(FATAL) << "Do not have a default for " << op->GetTypeKey(); return R(); } private: // initialize the vtable. static FType InitVTable() { FType vtable; // Set dispatch IR_EXPR_FUNCTOR_DISPATCH(VarNode); IR_EXPR_FUNCTOR_DISPATCH(SizeVarNode); IR_EXPR_FUNCTOR_DISPATCH(LoadNode); IR_EXPR_FUNCTOR_DISPATCH(BufferLoadNode); IR_EXPR_FUNCTOR_DISPATCH(ProducerLoadNode); IR_EXPR_FUNCTOR_DISPATCH(LetNode); IR_EXPR_FUNCTOR_DISPATCH(CallNode); IR_EXPR_FUNCTOR_DISPATCH(AddNode); IR_EXPR_FUNCTOR_DISPATCH(SubNode); IR_EXPR_FUNCTOR_DISPATCH(MulNode); IR_EXPR_FUNCTOR_DISPATCH(DivNode); IR_EXPR_FUNCTOR_DISPATCH(ModNode); IR_EXPR_FUNCTOR_DISPATCH(FloorDivNode); IR_EXPR_FUNCTOR_DISPATCH(FloorModNode); IR_EXPR_FUNCTOR_DISPATCH(MinNode); IR_EXPR_FUNCTOR_DISPATCH(MaxNode); IR_EXPR_FUNCTOR_DISPATCH(EQNode); IR_EXPR_FUNCTOR_DISPATCH(NENode); IR_EXPR_FUNCTOR_DISPATCH(LTNode); IR_EXPR_FUNCTOR_DISPATCH(LENode); IR_EXPR_FUNCTOR_DISPATCH(GTNode); IR_EXPR_FUNCTOR_DISPATCH(GENode); IR_EXPR_FUNCTOR_DISPATCH(AndNode); IR_EXPR_FUNCTOR_DISPATCH(OrNode); IR_EXPR_FUNCTOR_DISPATCH(ReduceNode); IR_EXPR_FUNCTOR_DISPATCH(CastNode); IR_EXPR_FUNCTOR_DISPATCH(NotNode); IR_EXPR_FUNCTOR_DISPATCH(SelectNode); IR_EXPR_FUNCTOR_DISPATCH(RampNode); IR_EXPR_FUNCTOR_DISPATCH(ShuffleNode); IR_EXPR_FUNCTOR_DISPATCH(BroadcastNode); IR_EXPR_FUNCTOR_DISPATCH(IntImmNode); IR_EXPR_FUNCTOR_DISPATCH(FloatImmNode); IR_EXPR_FUNCTOR_DISPATCH(StringImmNode); IR_EXPR_FUNCTOR_DISPATCH(AnyNode); return vtable; } }; #undef IR_EXPR_FUNCTOR_DISPATCH #undef EXPR_FUNCTOR_DEFAULT /*! * \brief ExprVisitor */ class TVM_DLL ExprVisitor : public ExprFunctor<void(const PrimExpr&)> { public: using ExprFunctor::operator(); protected: using ExprFunctor::VisitExpr; // list of functions to override. void VisitExpr_(const VarNode* op) override; void VisitExpr_(const SizeVarNode* op) override; void VisitExpr_(const LoadNode* op) override; void VisitExpr_(const BufferLoadNode* op) override; void VisitExpr_(const ProducerLoadNode* op) override; void VisitExpr_(const LetNode* op) override; void VisitExpr_(const CallNode* op) override; void VisitExpr_(const AddNode* op) override; void VisitExpr_(const SubNode* op) override; void VisitExpr_(const MulNode* op) override; void VisitExpr_(const DivNode* op) override; void VisitExpr_(const ModNode* op) override; void VisitExpr_(const FloorDivNode* op) override; void VisitExpr_(const FloorModNode* op) override; void VisitExpr_(const MinNode* op) override; void VisitExpr_(const MaxNode* op) override; void VisitExpr_(const EQNode* op) override; void VisitExpr_(const NENode* op) override; void VisitExpr_(const LTNode* op) override; void VisitExpr_(const LENode* op) override; void VisitExpr_(const GTNode* op) override; void VisitExpr_(const GENode* op) override; void VisitExpr_(const AndNode* op) override; void VisitExpr_(const OrNode* op) override; void VisitExpr_(const ReduceNode* op) override; void VisitExpr_(const CastNode* op) override; void VisitExpr_(const NotNode* op) override; void VisitExpr_(const SelectNode* op) override; void VisitExpr_(const RampNode* op) override; void VisitExpr_(const BroadcastNode* op) override; void VisitExpr_(const ShuffleNode* op) override; void VisitExpr_(const IntImmNode* op) override; void VisitExpr_(const FloatImmNode* op) override; void VisitExpr_(const StringImmNode* op) override; void VisitExpr_(const AnyNode* op) override; }; /*! * \brief ExprMutator that mutates expressions. */ class TVM_DLL ExprMutator : protected ExprFunctor<PrimExpr(const PrimExpr&)> { public: using ExprFunctor::operator(); protected: using ExprFunctor::VisitExpr; // list of functions to override. PrimExpr VisitExpr_(const VarNode* op) override; PrimExpr VisitExpr_(const SizeVarNode* op) override; PrimExpr VisitExpr_(const LoadNode* op) override; PrimExpr VisitExpr_(const BufferLoadNode* op) override; PrimExpr VisitExpr_(const ProducerLoadNode* op) override; PrimExpr VisitExpr_(const LetNode* op) override; PrimExpr VisitExpr_(const CallNode* op) override; PrimExpr VisitExpr_(const AddNode* op) override; PrimExpr VisitExpr_(const SubNode* op) override; PrimExpr VisitExpr_(const MulNode* op) override; PrimExpr VisitExpr_(const DivNode* op) override; PrimExpr VisitExpr_(const ModNode* op) override; PrimExpr VisitExpr_(const FloorDivNode* op) override; PrimExpr VisitExpr_(const FloorModNode* op) override; PrimExpr VisitExpr_(const MinNode* op) override; PrimExpr VisitExpr_(const MaxNode* op) override; PrimExpr VisitExpr_(const EQNode* op) override; PrimExpr VisitExpr_(const NENode* op) override; PrimExpr VisitExpr_(const LTNode* op) override; PrimExpr VisitExpr_(const LENode* op) override; PrimExpr VisitExpr_(const GTNode* op) override; PrimExpr VisitExpr_(const GENode* op) override; PrimExpr VisitExpr_(const AndNode* op) override; PrimExpr VisitExpr_(const OrNode* op) override; PrimExpr VisitExpr_(const ReduceNode* op) override; PrimExpr VisitExpr_(const CastNode* op) override; PrimExpr VisitExpr_(const NotNode* op) override; PrimExpr VisitExpr_(const SelectNode* op) override; PrimExpr VisitExpr_(const RampNode* op) override; PrimExpr VisitExpr_(const BroadcastNode* op) override; PrimExpr VisitExpr_(const ShuffleNode* op) override; PrimExpr VisitExpr_(const IntImmNode* op) override; PrimExpr VisitExpr_(const FloatImmNode* op) override; PrimExpr VisitExpr_(const StringImmNode* op) override; PrimExpr VisitExpr_(const AnyNode* op) override; }; } // namespace tir } // namespace tvm #endif // TVM_TIR_EXPR_FUNCTOR_H_
https://github.com/zk-ml/tachikoma
include/tvm/tir/function.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/tir/function.h * \brief TIR Function. */ #ifndef TVM_TIR_FUNCTION_H_ #define TVM_TIR_FUNCTION_H_ #include <tvm/ir/function.h> #include <tvm/runtime/ndarray.h> #include <tvm/tir/buffer.h> #include <tvm/tir/expr.h> #include <tvm/tir/stmt.h> #include <string> namespace tvm { namespace tir { /*! * \brief Primitive functions that contains TIR statements. * * The PrimFunc provides low-level code representation does not * automatically manage * * \sa PrimFunc */ class PrimFuncNode : public BaseFuncNode { public: /*! \brief Function parameters */ Array<tir::Var> params; /*! \brief The body of the function */ tir::Stmt body; /*! \brief The return type of the function. */ Type ret_type; /*! * \brief Maps some parameters to specific Buffer data structures. * * buffer_map provides a way to express data structure's field and shape * constraints. The provided information is used in the program analysis * and the code generation. * * - It defines the vars in the Buffer (m, n) in the cases below when * they appears in the buffer_map for the first time. * - When a var appears multiple times, they translate into runtime * assertion to check the field constraint. * * \code * * # The corresponding fields of f are as follows * # * # - f.params = [a, b] * # - f.buffer_map = {a: A, b: B} * # - A = decl_buffer(shape=[m, n]) * # - B = decl_buffer(shape=[m, n]) * * def f(a, b): * m, n = var(), var() * A = bind_buffer(a, shape=[m, n]) * B = bind_buffer(b, shape=[m, n]) * # body * * \endcode * * buffer_map is a sugar to express: * - Parameter unpacking: e.g. I can load a.shape[0] to get value of m * - Constraint checking: a.shape[0] must equal b.shape[0] because they * both corresponds to m. * While we could have express parameter unpacking and constraint using * normal statements, making buffer_map as first class citizen of PrimFunc * will make program analysis much easier. */ Map<tir::Var, Buffer> buffer_map; /*! \brief The buffer map prior to flattening. * * This contains the buffers as they exists prior to flattening, and * is used for validating an input tensor passed into the packed * API. Any buffer that is present in `buffer_map` but not present * in `preflattened_buffer_map` is assumed to be the same before * and after flattening (e.g. a 1-d tensor that is backed by 1-d * flat memory). * * TODO(Lunderberg): Remove preflattened_buffer_map, and instead * declare each flattened buffer as aliasing the original tensor * shape. This should include improving the StmtExprMutator to * provide easier interactions with Buffer objects, so that the * bookkeeping of relationships between buffers doesn't need to be * repeated across several transforms. */ Map<tir::Var, Buffer> preflattened_buffer_map; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("params", &params); v->Visit("body", &body); v->Visit("ret_type", &ret_type); v->Visit("buffer_map", &buffer_map); v->Visit("preflattened_buffer_map", &preflattened_buffer_map); v->Visit("attrs", &attrs); v->Visit("span", &span); v->Visit("_checked_type_", &checked_type_); } bool SEqualReduce(const PrimFuncNode* other, SEqualReducer equal) const { // visit params and buffer_map first as they contains defs. return equal.DefEqual(params, other->params) && equal(buffer_map, other->buffer_map) && equal(preflattened_buffer_map, other->preflattened_buffer_map) && equal(ret_type, other->ret_type) && equal(body, other->body) && equal(attrs, other->attrs); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce.DefHash(params); hash_reduce(buffer_map); hash_reduce(preflattened_buffer_map); hash_reduce(ret_type); hash_reduce(body); hash_reduce(attrs); } /*! * \brief Return the derived function annotation of this function. * * \return The function type annotation. * \note The function type annotation of PrimExpr is * directly derived from the Vars without the need of type inference. */ TVM_DLL FuncType func_type_annotation() const; static constexpr const char* _type_key = "tir.PrimFunc"; TVM_DECLARE_FINAL_OBJECT_INFO(PrimFuncNode, BaseFuncNode); }; /*! * \brief Managed reference to PrimFuncNode. * \sa PrimFuncNode */ class PrimFunc : public BaseFunc { public: /*! * \brief Constructor * * \param params The parameters of the function. * * \param body The body of the function. * * \param ret_type The return type of the function. * * \param buffer_map The buffer map for parameter buffer unpacking. * This contains buffer objects as they appear in the body of the * PrimFunc. (e.g. a buffer of shape ``[1024]`` originally * generated as a tensor of shape ``[32, 32]``) * * \param preflattened_buffer_map The buffer map for * parameter buffer unpacking. This contains buffer * objects as they are expected to be passed in by the * callee. (e.g. a buffer of shape ``[32, 32]`` originally * generated as a tensor of shape ``[32, 32]``) * * \param attrs Additional function attributes. * * \param span The location of this object in the source code. */ TVM_DLL PrimFunc( Array<tir::Var> params, Stmt body, Type ret_type = VoidType(), Map<tir::Var, Buffer> buffer_map = Map<tir::Var, Buffer>(), Optional<Map<tir::Var, Buffer>> preflattened_buffer_map = Optional<Map<tir::Var, Buffer>>(), DictAttrs attrs = NullValue<DictAttrs>(), Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(PrimFunc, BaseFunc, PrimFuncNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(PrimFuncNode); }; /*! * \brief Tensor intrinsics for tensorization */ class TensorIntrinNode : public Object { public: /*! \brief The function to describe the computation. */ PrimFunc desc; /*! \brief The function of the implementation for the execution. */ PrimFunc impl; void VisitAttrs(AttrVisitor* v) { v->Visit("desc", &desc); v->Visit("impl", &impl); } static constexpr const char* _type_key = "tir.TensorIntrin"; TVM_DECLARE_FINAL_OBJECT_INFO(TensorIntrinNode, Object); }; /*! * \brief Managed reference to TensorIntrinNode. */ class TensorIntrin : public ObjectRef { public: /*! * \brief Constructor * \param desc The function to describe the computation. * \param impl The function of the implementation for the execution. */ TVM_DLL explicit TensorIntrin(PrimFunc desc, PrimFunc impl); /*! * \brief Create and register a TensorIntrin. After registration, the TensorIntrin can be looked * up with its name. * \param name The name of the TensorIntrin to register * \param intrin The TensorIntrin to register. * \param override Whether override existing intrinsic. * \throws This method throws an exception if the TensorIntrin with the specified name already * exists. */ TVM_DLL static void Register(String name, TensorIntrin intrin, bool override = false); /*! * \brief Look up TensorIntrin by name. Raises an exception if not found. * \param name The name of the TensorIntrin. * \param allow_missing Whether to allow missing tensor intrin. If false, an exception is raised * if the tensor intrin is not found. * \return The TensorIntrin with the specified name. * \throws This method throws an exception if the TensorIntrin does not exist and allow_missing is * false. */ TVM_DLL static Optional<TensorIntrin> Get(String name, bool allow_missing = false); TVM_DEFINE_OBJECT_REF_METHODS(TensorIntrin, ObjectRef, TensorIntrinNode) }; /* * \brief Specialize parameters of PrimFunc. * \param func The PrimFunc to be specialized. * \param param_map The mapping from function params to the instance. * \return The new function with parameter specialized. * \note We can define a Meta TIR function with symbolic shape: * * \code * @T.prim_func * def mem_copy(a: T.handle, b: T.handle, m: T.int32, n: T.int32) -> None: * A = T.match_buffer(a, (m, n), "float32") * B = T.match_buffer(b, (m, n), "float32") * for i, j in T.grid(m, n): * with T.block(): * vi, vj = T.axis.remap("SS", [i, j]) * B[vi, vj] = A[vi, vj] * \endcode * * Then we can make it specialized with given shapes or buffers. * * \code * a, _, m, n = mem_copy.params * func = mem_copy.specialize({a: tir.decl_buffer((16, 16))}) * # or * func = mem_copy.specialize({n: 16, m: 16}) * \endcode * * \code {.language-id} * @T.prim_func * def mem_copy_16_16(a: T.handle, b: T.handle) -> None: * A = T.match_buffer(a, (16, 16), "float32") * B = T.match_buffer(b, (16, 16), "float32") * for i, j in T.grid(16, 16): * with T.block(): * vi, vj = T.axis.remap("SS", [i, j]) * B[vi, vj] = A[vi, vj] * \endcode */ PrimFunc Specialize(PrimFunc func, const Map<Var, ObjectRef>& param_map); /*! * \brief PrimFunc specific attribute names. * * \sa tvm::attr */ namespace attr { /*! * \brief List of thread IterVar that a DeviceLaunch function corresponds to. * * Type: Array<tir::IterVar> * * We call a device kernel launch function f using the following convention: * * Call(f, * [arg1, arg2, ..., arg_n, * work_size_1, work_size_2, ... work_size_m, dyn_shmem_size]) * * Here n = len(arg), m = len(work_size) = len(device_thread_axis). * * When kDeviceUseDynSharedMemory is not set, dyn_shmem_size argument is omitted. * * The list of device_thread_axis indicates how can be bind the * work_size arguments to the corresponding threads. * * \sa tvm::CallingConv::kDeviceKernelLaunch */ constexpr const char* kDeviceThreadAxis = "tir.device_thread_axis"; /*! * \brief Whether or not use dynamic shared memory. * * Type: Integer */ constexpr const char* kDeviceUseDynSharedMemory = "tir.device_use_dyn_shared_memory"; /*! * \brief Whether to set noalias rule on the function arguments. * * Type: Integer */ constexpr const char* kNoAlias = "tir.noalias"; /*! * \brief Mark the function as the entry function of * the final generated runtime module. * * Type: Integer * * \note There can only be one entry function per module. */ constexpr const char* kIsEntryFunc = "tir.is_entry_func"; /*! * \brief Mark the function as the global function called from the host. * * Type: Integer */ constexpr const char* kIsGlobalFunc = "tir.is_global_func"; } // namespace attr } // namespace tir } // namespace tvm #endif // TVM_TIR_FUNCTION_H_
https://github.com/zk-ml/tachikoma
include/tvm/tir/index_map.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/tir/index_map.h * \brief Defines a remapping of buffer indices * * For use with tvm::tir::Buffer. */ #ifndef TVM_TIR_INDEX_MAP_H_ #define TVM_TIR_INDEX_MAP_H_ #include <tvm/ir/expr.h> #include <tvm/runtime/container/array.h> #include <tvm/runtime/object.h> #include <tvm/tir/var.h> #include <utility> namespace tvm { namespace arith { class Analyzer; } } // namespace tvm namespace tvm { namespace tir { /*! * \brief Defines a mapping between two representations of indices * into a buffer. * * This is primarily used for layout transformations of Buffer * objects. */ class IndexMapNode : public Object { public: /*! \brief Variables representing the indices prior to remapping. * * If initial_indices is empty, then final_indices should also be * empty, and no mapping is applied. */ Array<Var> initial_indices; /*! * \brief Expressions defining the indices after remapping. * * These expressions should only be in terms of the initial_indices, * and must be expressible as an IterSumExpr. The mapping from * initial_indices to final_indices must be injective. * * If final_indices is empty, then initial_indices should also be * empty, and the map is an identity function. */ Array<PrimExpr> final_indices; /*! * \brief The inverse index map. * * When this is defined, IndexMap::Inverse will return the * pre-defined inverse index map. Otherwise, the inverse index map * will be computed on the fly. It is the user's responsibility to * ensure the correctness of the pre-defined inverse index map. * * \note ObjectRef is used here instead of IndexMap to avoid circular reference. */ Optional<ObjectRef> inverse_index_map; /*! * \brief Default constructor * * Defines the mapping as an identity function, with initial_indices * equal to the final indices. */ IndexMapNode() {} /*! * \brief Map indices to the output space * * \param indices The indices in the input space. Should contain * one value for each variable in `initial_indices`. * * \param analyzer An optional analyzer to be used to simplify the * resulting expressions. If null, will use a fresh analyzer. * * \returns The indices in the output space. Contains one value for * each expression in `final_indices`. */ Array<PrimExpr> MapIndices(const Array<PrimExpr>& indices, arith::Analyzer* analyzer = nullptr) const; /*! \brief Map a memory range to the output space * * If contiguous memory locations in the input space are not * necessarily contiguous in the output space (e.g. `lambda i: * [8*(i%8) + (i//8)]`), then this will return the smallest range * such that all valid indices are contained within the given range. * * \param ranges The ranges in the input space. Should contain one * value for each variable in `initial_indices`. * * \param analyzer An optional analyzer to be used to simplify the * resulting expressions. If null, will use a fresh analyzer. * * \returns The ranges in the output space. Contains one value for * each expression in `final_indices`. */ Array<Range> MapRanges(const Array<Range>& ranges, arith::Analyzer* analyzer = nullptr) const; /*! \brief Map a buffer shape to the output space * * \param shape The buffer shape in the input space. Should contain * one value for each variable in `initial_indices`. * * \param analyzer An optional analyzer to be used to simplify the * resulting expressions. If null, will use a fresh analyzer. * * \returns The buffer shape in the output space. Contains one * value for each expression in `final_indices`. */ Array<PrimExpr> MapShape(const Array<PrimExpr>& shape, arith::Analyzer* analyzer = nullptr) const; /* \brief Map an NDArray according to this index map * * \param arr_src The NDArray whose layout is transformed by this index map. * * \returns The transformed NDArray. */ runtime::NDArray MapNDArray(runtime::NDArray arr_src) const; /*! * \brief Convert to string representation in Python. * \return The stringified lambda expression in Python. */ String ToPythonString() const; void VisitAttrs(AttrVisitor* v) { v->Visit("initial_indices", &initial_indices); v->Visit("final_indices", &final_indices); v->Visit("inverse_index_map", &inverse_index_map); } bool SEqualReduce(const IndexMapNode* other, SEqualReducer equal) const { return equal.DefEqual(initial_indices, other->initial_indices) && equal(final_indices, other->final_indices); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce.DefHash(initial_indices); hash_reduce(final_indices); } static constexpr const char* _type_key = "tir.IndexMap"; static constexpr const bool _type_has_method_sequal_reduce = true; static constexpr const bool _type_has_method_shash_reduce = true; TVM_DECLARE_FINAL_OBJECT_INFO(IndexMapNode, Object); }; class IndexMap : public ObjectRef { public: /*! * \brief The constructor * \param initial_indices Variables representing the indices prior to remapping * \param final_indices Expressions defining the indices after remapping. * \param inverse_index_map The optional pre-defined inverse index map */ IndexMap(Array<Var> initial_indices, Array<PrimExpr> final_indices, Optional<IndexMap> inverse_index_map = NullOpt); /*! * \brief Create an index map from a packed function * \param ndim The number of dimensions * \param func The function to be applied * \param inverse_index_map The optional pre-defined inverse index map * \return The created index map */ static IndexMap FromFunc(int ndim, runtime::TypedPackedFunc<Array<PrimExpr>(Array<Var>)> func, Optional<IndexMap> inverse_index_map = NullOpt); /*! \brief Generate the inverse mapping. * * The range of the input indices is required in order to ensure * that the transformation is bijective over the input domain. * * If the user has supplied an `inverse_index_map`, that map is * assumed to be correct and bijective, and is returned. */ IndexMap Inverse(Array<Range> initial_ranges) const; /*! \brief Generate the inverse mapping. * * Determine the inverse, where the output range may contain * addresses that do not correspond to an address in the input * range. * * \return The inverted index map, along with the predicate for * which the inverse maps to a valid range. */ std::pair<IndexMap, PrimExpr> NonSurjectiveInverse(Array<Range> initial_ranges) const; TVM_DEFINE_OBJECT_REF_METHODS(IndexMap, ObjectRef, IndexMapNode); }; } // namespace tir } // namespace tvm #endif // TVM_TIR_INDEX_MAP_H_
https://github.com/zk-ml/tachikoma
include/tvm/tir/op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/tir/op.h * \brief Common operators defined for Expr. * * \note Most of the operator defined here perform simple constant folding * when the type is int32 or int64 for simplifying the index expressions. */ // Acknowledgement: Most operator APIs originate from Halide. #ifndef TVM_TIR_OP_H_ #define TVM_TIR_OP_H_ #include <tvm/ir/expr.h> #include <tvm/ir/op.h> #include <tvm/ir/type.h> #include <tvm/tir/expr.h> #include <tvm/tir/stmt.h> #include <algorithm> #include <limits> #include <type_traits> namespace tvm { // Most common operators can be overloaded by argument type(PrimExpr). // So we put them under the root namespace. // // We put more developer oriented APIs -- make_const and is_const under tir // as they are more specific to the tir namespace. /*! * \brief Get the type of the expression under the unified type system. * * This function could return a more refined type than * the runtime type provided by expr->dtype * * \param expr The input parameter. * \return The result type. * * \sa tvm/ir/type.h for discussion about the relation between Type and runtime::DataType. */ TVM_DLL Type GetType(const PrimExpr& expr); /*! * \brief Get the type corresponding to DataType * \param dtype The data type * \return The result type * * \sa tvm/ir/type.h for discussion about the relation between Type and runtime::DataType. */ TVM_DLL Type GetTypeFromRuntimeDataType(const DataType& dtype); /*! * \brief Get the implied DataType for storing values with type during runtime. * * \param type The input type. * \return The result runtime::DataType. * * \sa tvm/ir/type.h for discussion about the relation between Type and runtime::DataType. */ TVM_DLL runtime::DataType GetRuntimeDataType(const Type& type); /*! * \brief Return the value. * * \param value The returned value. * \param span The location of this operation in the source. * \return The return expression. */ TVM_DLL PrimExpr ret(PrimExpr value, Span span = Span()); /*! * Query the maximum possible value of dtype. * \param dtype The data type. * \param span The location of this operation in the source. * \return the maximum possible value in this format. */ TVM_DLL PrimExpr max_value(const DataType& dtype, Span span = Span()); /*! * Query the minimum possible value of dtype. * \param dtype The data type. * \param span The location of this operation in the source. * \return the minimum possible value in this format. */ TVM_DLL PrimExpr min_value(const DataType& dtype, Span span = Span()); /*! * Get the value of infinity. * \param dtype The data type. * \param span The location of this operation in the source. * \return the infinity value in this format. */ TVM_DLL PrimExpr infinity(const DataType& dtype, Span span = Span()); /*! * \brief cast value to type. * * \param t the target type. * \param value The value * \param span The location of this operation in the source. * \return The result expression. * \note This function may return value if the type is the same. */ TVM_DLL PrimExpr cast(const DataType& t, PrimExpr value, Span span = Span()); /*! * \brief perform reinterpret cast value to type. * * \param t the target type. * \param value The value * \param span The location of this operation in the source. * \return The result expression. * \note This function may return value if the type is the same. */ TVM_DLL PrimExpr reinterpret(const DataType& t, PrimExpr value, Span span = Span()); /*! * \brief add operator * * \param a left operand * \param b right operand * \param span The location of this operation in the source. * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr add(PrimExpr a, PrimExpr b, Span span = Span()); /*! * \brief subtraction operator * * \param a left operand * \param b right operand * \param span The location of this operation in the source. * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr sub(PrimExpr a, PrimExpr b, Span span = Span()); /*! * \brief negation. * * \param a input. * \param span The location of this operation in the source. * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr neg(PrimExpr a, Span span = Span()); /*! * \brief multiplication operator * * \param a left operand * \param b right operand * \param span The location of this operation in the source. * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr mul(PrimExpr a, PrimExpr b, Span span = Span()); /*! * \brief left shift operator * * \param a left operand * \param b right operand * \param span The location of this operation in the source. * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr left_shift(PrimExpr a, PrimExpr b, Span span = Span()); /*! * \brief right shift operator * * \param a left operand * \param b right operand * \param span The location of this operation in the source. * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr right_shift(PrimExpr a, PrimExpr b, Span span = Span()); /*! * \brief greater * * \param a left operand * \param b right operand * \param span The location of this operation in the source. * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr greater(PrimExpr a, PrimExpr b, Span span = Span()); /*! * \brief greater_equal * * \param a left operand * \param b right operand * \param span The location of this operation in the source. * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr greater_equal(PrimExpr a, PrimExpr b, Span span = Span()); /*! * \brief less * * \param a left operand * \param b right operand * \param span The location of this operation in the source. * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr less(PrimExpr a, PrimExpr b, Span span = Span()); /*! * \brief less_equal * * \param a left operand * \param b right operand * \param span The location of this operation in the source. * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr less_equal(PrimExpr a, PrimExpr b, Span span = Span()); /*! * \brief equal * * \param a left operand * \param b right operand * \param span The location of this operation in the source. * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr equal(PrimExpr a, PrimExpr b, Span span = Span()); /*! * \brief not_equal * * \param a left operand * \param b right operand * \param span The location of this operation in the source. * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr not_equal(PrimExpr a, PrimExpr b, Span span = Span()); /*! * \brief and * * \param a left operand * \param b right operand * \param span The location of this operation in the source. * \return The result expression. * \note This operator does eager constant folding. */ TVM_DLL PrimExpr logical_and(PrimExpr a, PrimExpr b, Span span = Span()); /*! * \brief or * * \param a left operand * \param b right operand * \param span The location of this operation in the source. * \return The result expression. * \note This operator does eager constant folding. */ TVM_DLL PrimExpr logical_or(PrimExpr a, PrimExpr b, Span span = Span()); /*! * \brief not * * \param a left operand * \param span The location of this operation in the source. * \return The result expression. * \note This operator does eager constant folding. */ TVM_DLL PrimExpr logical_not(PrimExpr a, Span span = Span()); /*! * \brief compute division in C semantics. * * a / b as in C/C++. * * When operands are integers, it directly corresponds to truncdiv. * * \param a left operand * \param b right operand * \param span The location of this operation in the source. * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr div(PrimExpr a, PrimExpr b, Span span = Span()); /*! * \brief compute trunc(a / b) * * This is the default integer division behavior in C. * * \param a left operand * \param b right operand * \param span The location of this operation in the source. * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr truncdiv(PrimExpr a, PrimExpr b, Span span = Span()); /*! * \brief compute the remainder of truncdiv * * This is the default integer division behavior in C. * * \param a left operand * \param b right operand * \param span The location of this operation in the source. * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr truncmod(PrimExpr a, PrimExpr b, Span span = Span()); /*! * \brief compute floor(a / b) where a and b are non-negative. * * Use this function for index split calculation. * * This function might take advantage of the fact * that a and b are non-negative. * * \param a left operand * \param b right operand * \param span The location of this operation in the source. * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr indexdiv(PrimExpr a, PrimExpr b, Span span = Span()); /*! * \brief compute ceil(a / b) where a and b are non-negative. * * Use this function for shape split calculation. * * This function might take advantage of the fact * that a and b are non-negative. * * \param a left operand * \param b right operand * \param span The location of this operation in the source. * \return The result expression. * \note this function does eager constant folding for * shape types(int32, int64) when possible. */ TVM_DLL PrimExpr shapediv(PrimExpr a, PrimExpr b, Span span = Span()); /*! * \brief compute the remainder floor(a / b) where a and b are non-negative. * * Use this function for index split calculation. * This function might take advantage of the fact * that a and b are non-negative. * * \param a left operand * \param b right operand * \param span The location of this operation in the source. * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr indexmod(PrimExpr a, PrimExpr b, Span span = Span()); /*! * \brief compute floor(a / b) * * \param a left operand * \param b right operand * \param span The location of this operation in the source. * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr floordiv(PrimExpr a, PrimExpr b, Span span = Span()); /*! * \brief compute ceil(a / b) * * \param a left operand * \param b right operand * \param span The location of this operation in the source. * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr ceildiv(PrimExpr a, PrimExpr b, Span span = Span()); /*! * \brief compute the remainder of floordiv * * \param a left operand * \param b right operand * \param span The location of this operation in the source. * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr floormod(PrimExpr a, PrimExpr b, Span span = Span()); /*! * \brief take maximum of two values * * \param a left operand * \param b right operand * \param span The location of this operation in the source. * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr max(PrimExpr a, PrimExpr b, Span span = Span()); /*! * \brief take minimum of two values * * \param a left operand * \param b right operand * \param span The location of this operation in the source. * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr min(PrimExpr a, PrimExpr b, Span span = Span()); /*! * \brief take bitwise and of two values * * \param a left operand * \param b right operand * \param span The location of this operation in the source. * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr bitwise_and(PrimExpr a, PrimExpr b, Span span = Span()); /*! * \brief take bitwise or of two values * * \param a left operand * \param b right operand * \param span The location of this operation in the source. * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr bitwise_or(PrimExpr a, PrimExpr b, Span span = Span()); /*! * \brief take bitwise xor of two values * * \param a left operand * \param b right operand * \param span The location of this operation in the source. * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr bitwise_xor(PrimExpr a, PrimExpr b, Span span = Span()); /*! * \brief take bitwise negation of two values * * \param a the input expression. * \param span The location of this operation in the source. * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr bitwise_neg(PrimExpr a, Span span = Span()); /*! * \brief Conditional expression. * * \param cond The condition * \param true_value The value when results are true. * \param false_value The value when results are false. * \param span The location of this operation in the source. * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr if_then_else(PrimExpr cond, PrimExpr true_value, PrimExpr false_value, Span span = Span()); /*! * \brief Mark condition as likely. * \param cond The condition * \param span The location of this operation in the source. * \return The marked expression. */ TVM_DLL PrimExpr likely(PrimExpr cond, Span span = Span()); /*! * \brief Calculate power(x, y) * \param x The left operand. * \param y The right operand. * \param span The location of this operation in the source. */ TVM_DLL PrimExpr pow(PrimExpr x, PrimExpr y, Span span = Span()); /*! * \brief Calculate absolute value of x. * \param x The input data * \param span The location of this operation in the source. * * \return The aboslute value of input data x */ TVM_DLL PrimExpr abs(PrimExpr x, Span span = Span()); /*! * \brief Check if x is NaN. * \param x The input data * \param span The location of this operation in the source. * \return The result expression. */ TVM_DLL PrimExpr isnan(PrimExpr x, Span span = Span()); /*! * \brief Check if x is finite. * \param x The input data * \param span The location of this operation in the source. * \return The result expression. */ TVM_DLL PrimExpr isfinite(PrimExpr x, Span span = Span()); /*! * \brief Check if x is infinite. * \param x The input data * \param span The location of this operation in the source. * \return The result expression. */ TVM_DLL PrimExpr isinf(PrimExpr x, Span span = Span()); /*! * \brief sum of source expression over axis * \param source The source expression. * \param axis List of iteration variables that will be used for reduction. * \param init The value with which to initialize the output. * \param span The location of this operation in the source. * \return The result. */ TVM_DLL PrimExpr sum(PrimExpr source, Array<tir::IterVar> axis, Array<PrimExpr> init = {}, Span span = Span()); /*! * \brief logical And of source expression over axis * \param source The source expression. * \param axis List of iteration variables that will be used for reduction. * \param init The value with which to initialize the output. * \param span The location of this operation in the source. */ TVM_DLL PrimExpr all(PrimExpr source, Array<tir::IterVar> axis, Array<PrimExpr> init = {}, Span span = Span()); /*! * \brief logical Or of source expression over axis * \param source The source expression. * \param axis List of iteration variables that will be used for reduction. * \param init The value with which to initialize the output. * \param span The location of this operation in the source. * \return The result. */ TVM_DLL PrimExpr any(PrimExpr source, Array<tir::IterVar> axis, Array<PrimExpr> init = {}, Span span = Span()); /*! * \brief max of source expression over axis * \param source The source expression. * \param axis List of iteration variables that will be used for reduction. * \param init The value with which to initialize the output. * \param span The location of this operation in the source. * \return The result. */ TVM_DLL PrimExpr max(PrimExpr source, Array<tir::IterVar> axis, Array<PrimExpr> init = {}, Span span = Span()); /*! * \brief max of source expression over axis * \param source The source expression. * \param axis List of iteration variables that will be used for reduction. * \param init The value with which to initialize the output. * \param span The location of this operation in the source. * \return The result. */ TVM_DLL PrimExpr min(PrimExpr source, Array<tir::IterVar> axis, Array<PrimExpr> init = {}, Span span = Span()); /*! * \brief product of source expression over axis * \param source The source expression. * \param axis List of iteration variables that will be used for reduction. * \param init The value with which to initialize the output. * \param span The location of this operation in the source. * \return The result. */ TVM_DLL PrimExpr prod(PrimExpr source, Array<tir::IterVar> axis, Array<PrimExpr> init = {}, Span span = Span()); /*! * \brief Calculate floor(x) * \param x The input expression. * \param span The location of this operation in the source. * \return The result expression. */ TVM_DLL PrimExpr floor(PrimExpr x, Span span = Span()); /*! * \brief Calculate ceil(x) * \param x The input expression. * \param span The location of this operation in the source. * \return The result expression. */ TVM_DLL PrimExpr ceil(PrimExpr x, Span span = Span()); /*! * \brief Calculate round(x) * \param x The input expression. * \param span The location of this operation in the source. * \return The result expression. */ TVM_DLL PrimExpr round(PrimExpr x, Span span = Span()); /*! * \brief Calculates std::nearbyint(x) * \param x The input expression. * \param span The location of this operation in the source. * \return The result expression. * This is a faster alternate to round. */ TVM_DLL PrimExpr nearbyint(PrimExpr x, Span span = Span()); /*! * \brief Calculate trunc(x) * \param x The input expression. * \param span The location of this operation in the source. * \return The result expression. */ TVM_DLL PrimExpr trunc(PrimExpr x, Span span = Span()); /*! * \brief Construct a large uint constant by its low 32 bits and high 32bits. * \param dtype The final data type. * \param low The lower 32 bits. * \param high The higher 32 bits. * \param span The location of this operation in the source. * \return The constructed expression. */ TVM_DLL PrimExpr LargeUIntImm(DataType dtype, int64_t low, int64_t high, Span span = Span()); /*! * \brief Execute a multiplication between two Q-numbers x and y * followed by a right shift s. The mathematical expression is: * * out = round(x*y*2^-s) * * Please note that the two Q-numbers x and y are supposed to have * the same number of fractional bits q. * * More about Q-numbers here: https://en.wikipedia.org/wiki/Q_(number_format) * * The rounding rule is to the nearest value, rounding half up * (i.e., round(x.1) = x and round (x.5) = x+1) * \param x first Q-number * \param y second Q-number * \param q number of fractional bits in x and y. Needs to be > 0 * \param s integer right shift * \param span The location of this operation in the source. * \return The constructed expression. */ TVM_DLL PrimExpr q_multiply_shift(PrimExpr x, PrimExpr y, PrimExpr q, PrimExpr s, Span span = Span()); // Intrinsic operators #define TVM_DECLARE_INTRIN_UNARY(OpName) \ inline PrimExpr OpName(PrimExpr x, Span span = Span()) { \ static const Op& op = Op::Get("tir." #OpName); \ if (x.dtype().is_bfloat16()) { \ DataType bf16_dtype = x.dtype(); \ DataType fp32_dtype(kDLFloat, 32, bf16_dtype.lanes()); \ PrimExpr x_fp32 = tir::Cast(fp32_dtype, {x}, span); \ PrimExpr result_fp32 = tir::Call(fp32_dtype, op, {x_fp32}, span); \ return tir::Cast(bf16_dtype, {result_fp32}, span); \ } else { \ return tir::Call(x.dtype(), op, {x}, span); \ } \ } TVM_DECLARE_INTRIN_UNARY(exp); TVM_DECLARE_INTRIN_UNARY(exp2); TVM_DECLARE_INTRIN_UNARY(exp10); TVM_DECLARE_INTRIN_UNARY(erf); TVM_DECLARE_INTRIN_UNARY(tanh); TVM_DECLARE_INTRIN_UNARY(sigmoid); TVM_DECLARE_INTRIN_UNARY(sqrt); TVM_DECLARE_INTRIN_UNARY(rsqrt); TVM_DECLARE_INTRIN_UNARY(log); TVM_DECLARE_INTRIN_UNARY(log2); TVM_DECLARE_INTRIN_UNARY(log10); TVM_DECLARE_INTRIN_UNARY(log1p); TVM_DECLARE_INTRIN_UNARY(popcount); TVM_DECLARE_INTRIN_UNARY(tan); TVM_DECLARE_INTRIN_UNARY(cos); TVM_DECLARE_INTRIN_UNARY(cosh); TVM_DECLARE_INTRIN_UNARY(sin); TVM_DECLARE_INTRIN_UNARY(sinh); TVM_DECLARE_INTRIN_UNARY(asin); TVM_DECLARE_INTRIN_UNARY(acos); TVM_DECLARE_INTRIN_UNARY(atan); TVM_DECLARE_INTRIN_UNARY(acosh); TVM_DECLARE_INTRIN_UNARY(asinh); TVM_DECLARE_INTRIN_UNARY(atanh); TVM_DECLARE_INTRIN_UNARY(clz); #define TVM_DECLARE_INTRIN_BINARY(OpName) \ inline PrimExpr OpName(PrimExpr x, PrimExpr y, Span span = Span()) { \ static const Op& op = Op::Get("tir." #OpName); \ return tir::Call(x.dtype(), op, {x, y}, span); \ } TVM_DECLARE_INTRIN_BINARY(atan2); TVM_DECLARE_INTRIN_BINARY(nextafter); TVM_DECLARE_INTRIN_BINARY(copysign); TVM_DECLARE_INTRIN_BINARY(hypot); TVM_DECLARE_INTRIN_BINARY(ldexp); namespace tir { /*! * \brief Check if type is a pointer to a runtime element type. * \param type The type to be checked. * \param element_type The corresponding element type. * \return The check results */ inline bool IsPointerType(const Type& type, const DataType& element_type) { if (!type.defined()) return false; if (const auto* ptr_type = type.as<PointerTypeNode>()) { if (const auto* prim_type = ptr_type->element_type.as<PrimTypeNode>()) { return prim_type->dtype == element_type; } } return false; } /*! * \brief Make a const value with certain data type. * \param t The target type. * \param value The input value * \return the result expression. * \tparam ValueType The constant value type * \param span The location of this operation in the source. */ template <typename ValueType, typename = typename std::enable_if<std::is_pod<ValueType>::value>::type> inline PrimExpr make_const(DataType t, ValueType value, Span span = Span()); /*! * \brief Make a const zero expr. * \param t The target type. * \param span The location of this operation in the source. * \return the result expression. */ inline PrimExpr make_zero(DataType t, Span span = Span()); /*! * \brief Make a constant true expression. * \param lanes The number of lanes in the bool * \param span The location of this operation in the source. * \return The result expression. */ inline PrimExpr const_true(int lanes = 1, Span span = Span()) { return make_const(DataType::UInt(1, lanes), 1); } /*! * \brief Make a constant false expression. * \param lanes The number of lanes in the bool * \param span The location of this operation in the source. * \return The result expression. */ inline PrimExpr const_false(int lanes = 1, Span span = Span()) { return make_const(DataType::UInt(1, lanes), 0); } /*! * \brief Get x as constant int expression. * \param x The expression * \return the address to the int expression, * return nullptr, if x is not IntImm. */ inline const int64_t* as_const_int(const PrimExpr& x) { if (!x.defined()) return nullptr; if (const tir::IntImmNode* op = x.as<tir::IntImmNode>()) { return &(op->value); } return nullptr; } /*! * \brief Check whether x is a constant integer expression. * \param x The input argument * \param value the value to be compared against. * \return whether x is constant expression. */ inline bool is_const_int(const PrimExpr& x, int64_t value); /*! * \brief Check whether stmt is nop. * \param stmt The input statement * \return whether stmt is nop */ inline bool is_no_op(const tir::Stmt& stmt); /*! * \brief Check whether x is a constant integer 1 * \param x The input argument. * \note This only return true for integer types. * \return whether x is constant 1 */ inline bool is_one(const PrimExpr& x) { return is_const_int(x, 1); } /*! * \brief Check whether x is a constant integer 0 * \param x The input argument * \return whether x is constant 0 * \note This only return true for integer types. */ inline bool is_zero(const PrimExpr& x) { return is_const_int(x, 0); } /*! * \brief Check whether x is an integer constant. * \note This only return true for integer types. * \return whether x is constant */ inline bool is_const_int(const PrimExpr& x); /*! * \brief Check whether x is an integer/float constant. * \note This only return true for integer types. * \return whether x is constant */ inline bool is_const_number(const PrimExpr& x); /*! * \brief Left fold. * \param freduce The reduction function. * \param init_value The initial value. * \param values The values to be folded. * \param span The location of the fold in the source. * \return The result. * \tparam FReduce The type of the reduction. */ template <typename FReduce> inline PrimExpr foldl(FReduce freduce, PrimExpr init_value, const Array<PrimExpr>& values, Span span = Span()); /*! * \brief Check whether x is a constant power of two * If x is power of two, write the power to the shift. * * \param x The input expression. * \param shift The output shift if x is power of two. * \return whether x is constant power of two */ TVM_DLL bool is_const_power_of_two_integer(const PrimExpr& x, int* shift); // Implementation details after this inline bool is_const_int(const PrimExpr& x) { return as_const_int(x); } inline bool is_const_number(const PrimExpr& x) { if (x.as<tir::IntImmNode>()) { return true; } else if (x.as<tir::FloatImmNode>()) { return true; } else if (const auto* op = x.as<tir::BroadcastNode>()) { return (op->value->IsInstance<tir::IntImmNode>() || op->value->IsInstance<tir::FloatImmNode>()); } return false; } inline bool is_positive_const(const PrimExpr& a) { const int64_t* as_int = as_const_int(a); return as_int && (*as_int > 0); } inline bool is_negative_const(const PrimExpr& a) { const int64_t* as_int = as_const_int(a); return as_int && (*as_int < 0); } inline bool is_const_int(const PrimExpr& x, int64_t value) { const int64_t* as_int = as_const_int(x); return as_int && (*as_int == value); } inline bool is_no_op(const tir::Stmt& stmt) { if (!stmt.defined()) return true; if (const auto* op = stmt.as<tir::EvaluateNode>()) { return is_const_int(op->value); } if (const auto* op = stmt.as<tir::SeqStmtNode>()) { return op->seq.size() == 0; } return false; } template <typename ValueType> inline PrimExpr MakeConstScalar(DataType t, ValueType value, Span span = Span()) { if (t.is_int()) return IntImm(t, static_cast<int64_t>(value), span); if (t.is_uint()) { // Use IntImm if it is a small integer uint64_t uval = static_cast<uint64_t>(value); if (value < static_cast<ValueType>(0)) { LOG(FATAL) << "cannot make uint from negative value " << value; } else if (uval <= static_cast<uint64_t>(std::numeric_limits<int64_t>::max())) { return IntImm(t, static_cast<int64_t>(value), span); } else { uint64_t mask = (static_cast<uint64_t>(1) << 32U) - 1U; uint64_t low = uval & mask; uint64_t high = uval >> 32U; return LargeUIntImm(t, static_cast<int64_t>(low), static_cast<int64_t>(high), span); } } if (t.is_float() || t.is_bfloat16()) return FloatImm(t, static_cast<double>(value), span); // For now, we store const scalar values of custom datatypes within doubles; later, during the // datatypes lowering pass, we will lower the value to its true representation in the format // specified by the datatype. // TODO(gus) when do we need to start worrying about doubles not being precise enough? if (static_cast<uint8_t>(t.code()) >= static_cast<uint8_t>(DataType::kCustomBegin)) { return FloatImm(t, static_cast<double>(value), span); } LOG(FATAL) << "cannot make const for type " << t; return PrimExpr(); } template <> inline PrimExpr MakeConstScalar(DataType t, bool value, Span span) { return MakeConstScalar(t, static_cast<int>(value), span); } template <typename ValueType, typename> inline PrimExpr make_const(DataType t, ValueType value, Span span) { if (t.lanes() == 1) { return MakeConstScalar(t, value, span); } else { return tir::Broadcast(MakeConstScalar(t.element_of(), value, span), t.lanes(), span); } } inline PrimExpr make_zero(DataType t, Span span) { if (t.is_handle()) { return reinterpret(t, make_const(DataType::UInt(64), 0, span)); } return make_const(t, 0, span); } template <typename FReduce> inline PrimExpr foldl(FReduce freduce, PrimExpr init_value, const Array<PrimExpr>& values, Span span) { for (PrimExpr val : values) { init_value = freduce(init_value, val, span); } return init_value; } } // namespace tir // additional const expression overloading #define TVM_DEFINE_ASSIGN_OP_OVERLOAD(Name, OpFunc) \ inline PrimExpr Name(PrimExpr& a, PrimExpr b) { \ a = OpFunc(a, b); \ return a; \ } #define TVM_DEFINE_BINOP_CONST_VAL_OVERLOAD(Name) \ inline PrimExpr Name(const PrimExpr& a, float b) { return Name(a, PrimExpr(b)); } \ inline PrimExpr Name(float a, const PrimExpr& b) { return Name(PrimExpr(a), b); } \ inline PrimExpr Name(int a, const PrimExpr& b) { \ return Name(tir::make_const(b.dtype(), a), b); \ } \ inline PrimExpr Name(const PrimExpr& a, int b) { \ return Name(a, tir::make_const(a.dtype(), b)); \ } \ inline PrimExpr Name(const PrimExpr& a, double b) { \ return Name(a, tir::make_const(DataType::Float(64), b)); \ } #define TVM_DEFINE_BINOP_CONST_VAL_OVERLOAD_SPANNED(Name) \ inline PrimExpr Name(const PrimExpr& a, float b, Span span = Span()) { \ return Name(a, PrimExpr(b), span); \ } \ inline PrimExpr Name(float a, const PrimExpr& b, Span span = Span()) { \ return Name(PrimExpr(a), b, span); \ } \ inline PrimExpr Name(int a, const PrimExpr& b, Span span = Span()) { \ return Name(tir::make_const(b.dtype(), a), b, span); \ } \ inline PrimExpr Name(const PrimExpr& a, int b, Span span = Span()) { \ return Name(a, tir::make_const(a.dtype(), b), span); \ } \ inline PrimExpr Name(const PrimExpr& a, double b, Span span = Span()) { \ return Name(a, tir::make_const(DataType::Float(64), b), span); \ } #define TVM_DEFINE_LOGICAL_OP_CONST_VAL_OVERLOAD(Name) \ inline PrimExpr Name(const PrimExpr& a, bool b) { return Name(a, PrimExpr(b)); } \ inline PrimExpr Name(bool a, const PrimExpr& b) { return Name(PrimExpr(a), b); } #define TVM_DEFINE_LOGICAL_OP_CONST_VAL_OVERLOAD_SPANNED(Name) \ inline PrimExpr Name(const PrimExpr& a, bool b, Span span = Span()) { \ return Name(a, PrimExpr(b), span); \ } \ inline PrimExpr Name(bool a, const PrimExpr& b, Span span = Span()) { \ return Name(PrimExpr(a), b, span); \ } #define TVM_DEFINE_INT_OP_CONST_VAL_OVERLOAD(Name) \ inline PrimExpr Name(const PrimExpr& a, int b) { \ return Name(a, tir::make_const(a.dtype(), b)); \ } \ inline PrimExpr Name(int a, const PrimExpr& b) { return Name(tir::make_const(b.dtype(), a), b); } #define TVM_DEFINE_INT_OP_CONST_VAL_OVERLOAD_SPANNED(Name) \ inline PrimExpr Name(const PrimExpr& a, int b, Span span = Span()) { \ return Name(a, tir::make_const(a.dtype(), b), span); \ } \ inline PrimExpr Name(int a, const PrimExpr& b, Span span = Span()) { \ return Name(tir::make_const(b.dtype(), a), b, span); \ } TVM_DEFINE_ASSIGN_OP_OVERLOAD(operator+=, operator+); TVM_DEFINE_ASSIGN_OP_OVERLOAD(operator-=, operator-); TVM_DEFINE_ASSIGN_OP_OVERLOAD(operator*=, operator*); TVM_DEFINE_BINOP_CONST_VAL_OVERLOAD(operator+); TVM_DEFINE_BINOP_CONST_VAL_OVERLOAD(operator-); TVM_DEFINE_BINOP_CONST_VAL_OVERLOAD(operator*); TVM_DEFINE_BINOP_CONST_VAL_OVERLOAD(operator>); // NOLINT(*) TVM_DEFINE_BINOP_CONST_VAL_OVERLOAD(operator>=); TVM_DEFINE_BINOP_CONST_VAL_OVERLOAD(operator<); // NOLINT(*) TVM_DEFINE_BINOP_CONST_VAL_OVERLOAD(operator<=); TVM_DEFINE_BINOP_CONST_VAL_OVERLOAD_SPANNED(max); TVM_DEFINE_BINOP_CONST_VAL_OVERLOAD_SPANNED(min); TVM_DEFINE_BINOP_CONST_VAL_OVERLOAD_SPANNED(div); TVM_DEFINE_BINOP_CONST_VAL_OVERLOAD_SPANNED(add); TVM_DEFINE_BINOP_CONST_VAL_OVERLOAD_SPANNED(sub); TVM_DEFINE_BINOP_CONST_VAL_OVERLOAD_SPANNED(mul); TVM_DEFINE_BINOP_CONST_VAL_OVERLOAD_SPANNED(greater); TVM_DEFINE_BINOP_CONST_VAL_OVERLOAD_SPANNED(greater_equal); TVM_DEFINE_BINOP_CONST_VAL_OVERLOAD_SPANNED(less); TVM_DEFINE_BINOP_CONST_VAL_OVERLOAD_SPANNED(less_equal); // integer related ops TVM_DEFINE_INT_OP_CONST_VAL_OVERLOAD_SPANNED(indexdiv); TVM_DEFINE_INT_OP_CONST_VAL_OVERLOAD_SPANNED(indexmod); TVM_DEFINE_INT_OP_CONST_VAL_OVERLOAD_SPANNED(truncdiv); TVM_DEFINE_INT_OP_CONST_VAL_OVERLOAD_SPANNED(truncmod); TVM_DEFINE_INT_OP_CONST_VAL_OVERLOAD_SPANNED(floordiv); TVM_DEFINE_INT_OP_CONST_VAL_OVERLOAD_SPANNED(floormod); TVM_DEFINE_INT_OP_CONST_VAL_OVERLOAD_SPANNED(right_shift); // NOLINT(*) TVM_DEFINE_INT_OP_CONST_VAL_OVERLOAD_SPANNED(left_shift); // NOLINT(*) TVM_DEFINE_INT_OP_CONST_VAL_OVERLOAD_SPANNED(bitwise_and); TVM_DEFINE_INT_OP_CONST_VAL_OVERLOAD_SPANNED(bitwise_or); TVM_DEFINE_INT_OP_CONST_VAL_OVERLOAD_SPANNED(bitwise_xor); TVM_DEFINE_INT_OP_CONST_VAL_OVERLOAD(operator>>); // NOLINT(*) TVM_DEFINE_INT_OP_CONST_VAL_OVERLOAD(operator<<); // NOLINT(*) TVM_DEFINE_INT_OP_CONST_VAL_OVERLOAD(operator&); TVM_DEFINE_INT_OP_CONST_VAL_OVERLOAD(operator|); TVM_DEFINE_INT_OP_CONST_VAL_OVERLOAD(operator^); // logical ops TVM_DEFINE_LOGICAL_OP_CONST_VAL_OVERLOAD(operator&&); TVM_DEFINE_LOGICAL_OP_CONST_VAL_OVERLOAD(operator||); TVM_DEFINE_LOGICAL_OP_CONST_VAL_OVERLOAD_SPANNED(logical_and); TVM_DEFINE_LOGICAL_OP_CONST_VAL_OVERLOAD_SPANNED(logical_or); /*! * \brief Helper function to raise a compiler error about division ambiguity. * \note The call to this function will always results in a compiler error. * \tparam TA Any class type. */ template <typename TA> inline void DivAmbiguityError(const TA& a) { constexpr bool div_ambiguity = !std::is_class<TA>::value; static_assert(div_ambiguity, "TVM supports multiple types of integer divisions, " "please call div, indexdiv/indexmod, " "floordiv/floormod or truncdiv/truncmod directly " "to avoid ambiguity in the code. " "Checkout these functions in tir/op.h."); } // The following code are not intended to be used in the codebase. // Instead, they generate clear compiler errors that ask developers // to use the specific division function. // The second template argument is necessary to make sure the // code compiles lazily by the compiler during invocation. template <typename TB> inline PrimExpr operator/(const PrimExpr& a, const TB& b) { DivAmbiguityError(a); return a; } template <typename TB> inline PrimExpr operator/=(const PrimExpr& a, const TB& b) { DivAmbiguityError(a); return a; } template <typename TB> inline PrimExpr operator%(const PrimExpr& a, const TB& b) { DivAmbiguityError(a); return a; } } // namespace tvm #endif // TVM_TIR_OP_H_
https://github.com/zk-ml/tachikoma
include/tvm/tir/op_attr_types.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/tir/op_attr_types.h * \brief Attribute types in the Op registry for TIR ops. * * These attributes can be set via OpRegEntry::set_attr * * \sa tvm/ir/op.h */ #ifndef TVM_TIR_OP_ATTR_TYPES_H_ #define TVM_TIR_OP_ATTR_TYPES_H_ #include <tvm/ir/expr.h> #include <tvm/runtime/container/string.h> #include <tvm/runtime/packed_func.h> namespace tvm { namespace tir { /*! * \brief Global symbol of the op after lowering. */ using TGlobalSymbol = String; /*! * \brief Whether the op is overloaded for vector form. */ using TVectorizable = bool; /*! * \brief The intrinsic lowering function for given op. */ using FLowerIntrinsic = runtime::TypedPackedFunc<PrimExpr(PrimExpr)>; /*! * \brief The legalization function for given tir op. */ using FLegalize = runtime::TypedPackedFunc<PrimExpr(PrimExpr)>; /*! * \brief The effect type of the call. */ enum class CallEffectKind : int { /*! \brief Function corresponds to an annotation(e.g. likely) and can translate to identity. */ kExprAnnotation = 0, /*! * \brief Pure function that do not interacts * with any external state. */ kPure = 1, /*! * \brief Function's that may read from states(e.g. RAM) */ kReadState = 2, /*! * \brief Function that may read/write from states(e.g. RAM). */ kUpdateState = 3, /*! * \brief Opaque function, cannot make any assumption */ kOpaque = kUpdateState, /*! * \brief Special intrinsic to annotate call arguments info * only valid as a direct argument to a call. */ kSpecialCallArg = 4, /*! * \brief Embed opaque information in the Expr, cannot be codegen. */ kEmbedInfo = 5, /*! * \brief Function that changes control flow */ kControlJump = 6, }; /*! \brief Use integer to record the kind. */ using TCallEffectKind = Integer; } // namespace tir } // namespace tvm #endif // TVM_TIR_OP_ATTR_TYPES_H_
https://github.com/zk-ml/tachikoma
include/tvm/tir/schedule/block_scope.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/tir/schedule/block_scope.h * \brief Definition of two pillar data structure for TensorIR scheduling: StmtSRef, BlockScope. * \sa StmtSRefNode * \sa BlockScopeNode */ #ifndef TVM_TIR_SCHEDULE_BLOCK_SCOPE_H_ #define TVM_TIR_SCHEDULE_BLOCK_SCOPE_H_ #include <tvm/tir/stmt.h> #include <unordered_map> namespace tvm { namespace tir { /*! * \brief An object that refers to schedulable elements (block/for-loop) in TensorIR, aka "sref". * * Glossary * - Block sref: A StmtSRef that points to a TensorIR block. * - Loop sref: A StmtSRef that points to a TensorIR for loop. * - Parent sref: The parent reference of an sref is the block or loop reference to the closest schedulable statement. We define closest to be the nearest schedulable statement of an ancestor in the AST. * schedulable statement of its ancestors on the TensorIR AST. * - Root sref: Sref to the root block. Every sref has exactly one parent sref except for root sref. * - Sref tree: The parent-children-relationship of srefs that forms a tree, uniquely determined by * the TensorIR AST. */ class StmtSRefNode : public Object { public: /*! * \brief The block or `for` stmt the object refers to * \note Non-owned reference (raw pointer) is used here, so that we can perform copy-on-write * optimization on statements when possible. The strong reference is held in the ScheduleState. */ const StmtNode* stmt; /*! \brief The parent sref. */ StmtSRefNode* parent; /*! * \brief If the statement the sref points to is an element of a SeqStmt in the AST, * then `seq_index` is set to its index; otherwise `seq_index` is -1 */ int64_t seq_index; void VisitAttrs(AttrVisitor* v) { // `stmt` is not visited // `parent` is not visited v->Visit("seq_index", &seq_index); } static constexpr const char* _type_key = "tir.StmtSRef"; TVM_DECLARE_FINAL_OBJECT_INFO(StmtSRefNode, Object); /*! \brief Reset the object inplace to the invalid state */ void Reset() { this->stmt = nullptr; this->parent = nullptr; this->seq_index = -1; } /*! * \brief Get the referenced statement with proper type checking. * It serves the same purpose as `ObjectRef::as`, but does not acquire strong reference to `stmt` * \tparam StmtType The type that `this->stmt` to be downcasted to. Presumably * tvm::tir::BlockNode or tvm::tir::ForNode * \return nullptr if type check fails, otherwise the casted result for `this->stmt` */ template <typename StmtType> const StmtType* StmtAs() const { if (stmt != nullptr && stmt->IsInstance<StmtType>()) { return static_cast<const StmtType*>(stmt); } else { return nullptr; } } }; /*! * \brief Managed reference to StmtSRefNode * \sa StmtSRefNode */ class StmtSRef : public ObjectRef { public: /*! * \brief The constructor * \param stmt The corresponding stmt node, can be either block or for loop. * \param parent The parent sref. * \param seq_index The location in an array if the parent of the stmt contains multiple children. * -1 if the parent does not contain multiple children. */ TVM_DLL explicit StmtSRef(const StmtNode* stmt, StmtSRefNode* parent, int64_t seq_index); /*! \return The mutable pointer to the StmtSRefNode */ StmtSRefNode* get() const { return static_cast<StmtSRefNode*>(data_.get()); } TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(StmtSRef, ObjectRef, StmtSRefNode); public: /*! * \return A special StmtSRef, which doesn't point to any stmt in the AST, * only serving as a "mark" to hint compute-at to do the work of compute-inline * \note This is only as a faked loop sref for compute-at and reverse-compute-at, * i.e. * * compute-at(block, loop_sref): * compute-inline(block) if loop_sref.same_as(InlineMark()) * no-op if loop_sref.same_as(RootMark()) * compute-at-impl(block, loop_sref) otherwise */ TVM_DLL static StmtSRef InlineMark(); /*! * \return A special StmtSRef, which doesn't point to any stmt in the AST, * only serving as a "mark" to hint compute-at to do nothing * \note This is only as a faked loop sref for compute-at and reverse-compute-at, * i.e. * * compute-at(block, loop_sref): * compute-inline(block) if loop_sref.same_as(InlineMark()) * no-op if loop_sref.same_as(RootMark()) * compute-at-impl(block, loop_sref) otherwise */ TVM_DLL static StmtSRef RootMark(); }; /*! * \brief Type of dependency. Right now we have 4 types of dependencies * 1) Read-after-write (kRAW) * 2) Write-after-write (kWAW) * 3) Write-after-read (kWAR) * 4) Opaque dependency (kOpaque) */ enum class DepKind : int32_t { kRAW = 0, kWAW = 1, kWAR = 2, kOpaque = 3, }; /*! * \brief A tuple (src, dst, kind) representing certain types of dependency. * For example, (A, B, kRAW) means block B depends on block A, and the dependency kind is * read-after-write, which means block B reads the result written by block A. */ class DependencyNode : public Object { public: /*! \brief The source of the dependency relation */ StmtSRef src; /*! \brief The destination of the dependency relation */ StmtSRef dst; /*! \brief The dependency kind */ DepKind kind; void VisitAttrs(AttrVisitor* v) { v->Visit("src", &src); v->Visit("dst", &dst); v->Visit("kind", &kind); } static constexpr const char* _type_key = "tir.Dependency"; TVM_DECLARE_FINAL_OBJECT_INFO(DependencyNode, Object); }; /*! * \brief Managed reference to DependencyNode * \sa DependencyNode */ class Dependency : public ObjectRef { public: /*! \brief Constructor */ TVM_DLL explicit Dependency(StmtSRef src, StmtSRef dst, DepKind kind); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(Dependency, ObjectRef, DependencyNode); }; /*! * \brief An object with 1-to-1 correspondence with each block reference in the sref tree. * This data structure is used to track the producer-consumer dependencies between blocks. * For example even leaf nodes have a scope node, even though they have no dependencies. * * Glossary: * - Block scope: A contiguous subtree of the sref tree, rooted at each block sref, * whose components are: * - scope root: a block sref * - internal srefs: loop srefs * - scope leaves: block srefs * - Child block: The scope leaf blocks under the scope root or a specific internal sref */ class BlockScopeNode : public Object { public: /*! * \brief Lookup table for the `src` of dependencies * \note We intentionally didn't use tvm::Map as the data structure, because we need the values * inside to be mutable so that they could be further maintained properly during transformations. */ std::unordered_map<StmtSRef, Array<Dependency>, ObjectPtrHash, ObjectPtrEqual> src2deps; /*! \brief Lookup table for the `dst` of dependencies */ std::unordered_map<StmtSRef, Array<Dependency>, ObjectPtrHash, ObjectPtrEqual> dst2deps; /*! \brief The mapping from the buffer to the blocks who write it */ std::unordered_map<Buffer, Array<StmtSRef>, ObjectPtrHash, ObjectPtrEqual> buffer_writers; /*! * \brief This property indicates that the block scope (rooted at its corresponding block) is * equivalent to of a stage pipeline. Under the following conditions: * * 1) The region cover property holds for every of its child blocks * 2) No write-after-read dependency or opaque dependency, only read-after-write and * write-after-write are allowed * 3) All the statements in the scope are schedulable statements, i.e. Block and For */ bool stage_pipeline{false}; void VisitAttrs(AttrVisitor* v) {} static constexpr const char* _type_key = "tir.BlockScope"; TVM_DECLARE_FINAL_OBJECT_INFO(BlockScopeNode, Object); public: /******** Dependency ********/ /*! * \brief Get all dependencies whose `src` equals `src` * \param src The queried block * \return The dependencies */ TVM_DLL Array<Dependency> GetDepsBySrc(const StmtSRef& src) const; /*! * \brief Get all dependencies whose `dst` equals `dst` * \param dst The queried block * \return The dependencies */ TVM_DLL Array<Dependency> GetDepsByDst(const StmtSRef& dst) const; }; /*! * \brief Managed reference to BlockScopeNode * \sa BlockScopeNode */ class BlockScope : public ObjectRef { public: /*! \brief The constructor creating an empty block scope with on dependency information */ TVM_DLL BlockScope(); /*! * \brief Create the object with the specific leaf blocks, and compute the dependency information * between the leaf blocks. * \param child_block_srefs The srefs to the leaf blocks * \note We assume the leaf blocks are given in pre-DFS order */ TVM_DLL explicit BlockScope(const Array<StmtSRef>& child_block_srefs); TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(BlockScope, ObjectRef, BlockScopeNode); }; } // namespace tir } // namespace tvm #endif // TVM_TIR_SCHEDULE_BLOCK_SCOPE_H_
https://github.com/zk-ml/tachikoma
include/tvm/tir/schedule/instruction.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_TIR_SCHEDULE_INSTRUCTION_H_ #define TVM_TIR_SCHEDULE_INSTRUCTION_H_ #include <tvm/node/reflection.h> #include <utility> namespace tvm { // Forward declaration template <typename, typename> class AttrRegistry; namespace tir { // Forward declaration class Schedule; /*! * \brief Type of the functor that applies the instruction to a TensorIR schedule * \param sch The schedule to be applied on * \param inputs The input random variables * \param attrs Instruction attributes * \param decision Decisions made on the instruction * \return The functor returns an array of output random variables */ using FInstructionApply = runtime::TypedPackedFunc<Array<ObjectRef>( Schedule sch, const Array<ObjectRef>& inputs, const Array<ObjectRef>& attrs, const Optional<ObjectRef>& decision)>; /*! * \brief Type of the functor that converts the instruction to a statement in python syntax * \param inputs Names of the input random variables * \param attrs Instruction attributes * \param decisions Decisions made on the instruction * \param outputs Names of the output random variables * \return A string representing the python api call */ using FInstructionAsPython = runtime::TypedPackedFunc<String( const Array<ObjectRef>& inputs, const Array<ObjectRef>& attrs, const Optional<ObjectRef>& decision, const Array<String>& outputs)>; /*! * \brief Type of the functor that serialize its attributes to JSON * \param attrs The attributes to be serialized * \return An array, serialized attributes * \note This functor is nullable */ using FInstructionAttrsAsJSON = runtime::TypedPackedFunc<ObjectRef(Array<ObjectRef> attrs)>; /*! * \brief Type of the functor that deserialize its attributes from JSON * \param json_attrs The attributes to be serialized * \return An array, deserialized attributes * \note This functor is nullable */ using FInstructionAttrsFromJSON = runtime::TypedPackedFunc<Array<ObjectRef>(ObjectRef json_attrs)>; /*! * \brief Kind of an instruction, e.g. Split, Reorder, etc. * Besides the name, every kind of instruction has its own properties, including: * 1) A boolean indicating if the instruction is pure, i.e. change nothing in the schedule state * 2) A functor that applies the instruction to a TensorIR schedule * 3) A functor that converts the instruction to a statement in python syntax * 4) A functor that serialize its attributes to JSON * 5) A functor that deserialize its attributes from JSON * * Unlike `tvm::OpNode`, `InstructionKindNode` doesn't support unstructured properties, * mainly because there is no such usecase yet to add any other property. */ class InstructionKindNode : public runtime::Object { public: /*! \brief The name of a kind of instructions */ String name; /*! * \brief Indicates if the instruction is pure, i.e. removing it alone doesn't mutate the schedule * state. For example, the instruction `GetBlock` is pure because it changes * nothing, while `ComputeInline` is not because removing it leads to a different resulting * schedule. */ bool is_pure{false}; /*! \brief A functor that applies the instruction to a TensorIR schedule */ FInstructionApply f_apply_to_schedule{nullptr}; /*! \brief A functor that converts the instruction to a statement in python syntax */ FInstructionAsPython f_as_python{nullptr}; /*! * \brief A functor that serialize its attributes to JSON * \note If the functor is null, it means no conversion is needed */ FInstructionAttrsAsJSON f_attrs_as_json{nullptr}; /*! * \brief A functor that deserialize its attributes from JSON * \note If the functor is null, it means no conversion is needed */ FInstructionAttrsFromJSON f_attrs_from_json{nullptr}; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("name", &name); v->Visit("_is_pure", &is_pure); // not visited: f_apply_to_schedule // not visited: f_as_python // not visited: f_attrs_as_json // not visited: f_attrs_from_json } /*! \brief Checks if the instruction kind is EnterPostproc */ bool IsPostproc() const; static constexpr const char* _type_key = "tir.InstructionKind"; TVM_DECLARE_FINAL_OBJECT_INFO(InstructionKindNode, runtime::Object); }; /*! * \brief Managed reference to InstructionKindNode * \sa InstructionKindNode */ class InstructionKind : public runtime::ObjectRef { public: /*! * \brief Retrieve an InstructionKind using its name * \param name The registered name of the InstructionKind * \return The InstructionKind retrieved */ static InstructionKind Get(const String& name); TVM_DEFINE_OBJECT_REF_METHODS(InstructionKind, runtime::ObjectRef, InstructionKindNode); }; /*! \brief Schedule instructions each corresponds to a schedule primitive */ class InstructionNode : public runtime::Object { public: /*! \brief The kind of the instruction */ InstructionKind kind; /*! * \brief The input random variables of the instruction, and the type of each element can be one * of the following: * - BlockRV * - LoopRV * - ExprRV * - FloatImm * - IntImm * - String * - null pointer */ Array<ObjectRef> inputs; /*! * \brief The attributes of the instruction. Similar to attributes of an operator, * attributes of an instruction are arbitrary constant metadata required by the instructions. * For example, the name of the block to be retrieved in `GetBlock`. */ Array<ObjectRef> attrs; /*! \brief The output random variables of the instruction, and the type of each element can be one * of the following: * - BlockRV * - LoopRV * - ExprRV, atomic variables only, won't be constants or composite PrimExpr */ Array<ObjectRef> outputs; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("kind", &kind); v->Visit("inputs", &inputs); v->Visit("attrs", &attrs); v->Visit("outputs", &outputs); } static constexpr const char* _type_key = "tir.Instruction"; TVM_DECLARE_FINAL_OBJECT_INFO(InstructionNode, runtime::Object); }; /*! * \brief Managed reference to InstructionNode * \sa InstructionNode */ class Instruction : public runtime::ObjectRef { public: /*! * \brief Constructor * \param kind The kind of the instruction * \param inputs The input random variables of the instruction * \param attrs The attributes of the instruction * \param outputs The output random variables of the instruction */ explicit Instruction(InstructionKind kind, Array<ObjectRef> inputs, Array<ObjectRef> attrs, Array<ObjectRef> outputs); TVM_DEFINE_OBJECT_REF_METHODS(Instruction, runtime::ObjectRef, InstructionNode); }; /*! * \brief A helper macro to register InstructionKind, only used in `TVM_REGISTER_INST_KIND` * \note This macro is not user-facing. * \sa TVM_REGISTER_INST_KIND */ #define TVM_INST_KIND_REGISTER_VAR_DEF \ static DMLC_ATTRIBUTE_UNUSED ::tvm::tir::InstructionKindRegEntry& __make_##InstructionKind /*! * \brief Register an InstructionKind * \param InstructionKindName The name of the InstructionKind * * Example: * * \code * * TVM_REGISTER_INST_KIND("ComputeInline") * .set_is_pure(false) * .set_apply_to_schedule(ApplyToSchedule) * .set_attrs_as_json(AttrsAsJSON) * .set_attrs_from_json(AttrsFromJSON) * .set_as_python(AsPython); * * \endcode */ #define TVM_REGISTER_INST_KIND(InstructionKindName) \ TVM_STR_CONCAT(TVM_INST_KIND_REGISTER_VAR_DEF, __COUNTER__) = \ ::tvm::tir::InstructionKindRegEntry::RegisterOrGet(InstructionKindName).set_name() /*! \brief An entry in the registry of InstructionKind */ class InstructionKindRegEntry { public: static InstructionKindRegEntry& RegisterOrGet(const String& name); InstructionKindRegEntry& set_name() { get_mutable()->name = this->name; return *this; } InstructionKindRegEntry& set_is_pure(bool is_pure) { get_mutable()->is_pure = is_pure; return *this; } InstructionKindRegEntry& set_apply_to_schedule(FInstructionApply f_apply_to_schedule) { get_mutable()->f_apply_to_schedule = std::move(f_apply_to_schedule); return *this; } InstructionKindRegEntry& set_as_python(FInstructionAsPython f_as_python) { get_mutable()->f_as_python = std::move(f_as_python); return *this; } InstructionKindRegEntry& set_attrs_as_json(FInstructionAttrsAsJSON f_attrs_as_json) { get_mutable()->f_attrs_as_json = std::move(f_attrs_as_json); return *this; } InstructionKindRegEntry& set_attrs_from_json(FInstructionAttrsFromJSON f_attrs_from_json) { get_mutable()->f_attrs_from_json = std::move(f_attrs_from_json); return *this; } private: /*! \brief Private constructor, used only by AttrRegistry */ explicit InstructionKindRegEntry(uint32_t reg_index); /*! \brief Get the mutable reference to the internal InstructionKind */ InstructionKindNode* get_mutable() const { return const_cast<InstructionKindNode*>(inst_kind_.get()); } /*! \brief The name of the registry entry */ String name; /*! \brief The instruction kind */ InstructionKind inst_kind_; template <typename, typename> friend class ::tvm::AttrRegistry; friend class InstructionKind; }; } // namespace tir } // namespace tvm #endif // TVM_TIR_SCHEDULE_INSTRUCTION_H_
https://github.com/zk-ml/tachikoma
include/tvm/tir/schedule/schedule.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_TIR_SCHEDULE_SCHEDULE_H_ #define TVM_TIR_SCHEDULE_SCHEDULE_H_ #include <tvm/support/random_engine.h> #include <tvm/tir/index_map.h> #include <tvm/tir/schedule/state.h> #include <tvm/tir/schedule/trace.h> namespace tvm { namespace tir { /*! \brief The level of detailed error message rendering */ enum class ScheduleErrorRenderLevel : int32_t { /*! \brief Render a detailed error message */ kDetail = 0, /*! \brief Render the error in fast mode */ kFast = 1, /*! \brief No error message at all */ kNone = 2, }; /*! \brief Type of buffer index */ enum class BufferIndexType : int32_t { /*! \brief Index of a read buffer */ kRead = 0, /*! \brief Index of a written buffer */ kWrite = 1, }; /**************** Random variable: BlockRV ****************/ /*! \brief A random variable that evaluates to a TensorIR block */ class BlockRVNode : public runtime::Object { public: void VisitAttrs(tvm::AttrVisitor* v) {} static constexpr const char* _type_key = "tir.BlockRV"; TVM_DECLARE_FINAL_OBJECT_INFO(BlockRVNode, runtime::Object); }; /*! * \brief Managed reference to BlockRVNode * \sa BlockRVNode */ class BlockRV : public runtime::ObjectRef { public: /*! \brief Constructor */ TVM_DLL BlockRV(); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(BlockRV, runtime::ObjectRef, BlockRVNode); }; /**************** Random variable: LoopRV ****************/ /*! \brief A random variable that evaluates to a TensorIR for loop */ class LoopRVNode : public runtime::Object { public: void VisitAttrs(tvm::AttrVisitor* v) {} static constexpr const char* _type_key = "tir.LoopRV"; TVM_DECLARE_FINAL_OBJECT_INFO(LoopRVNode, runtime::Object); }; /*! * \brief Managed reference to LoopRVNode * \sa LoopRVNode */ class LoopRV : public runtime::ObjectRef { public: /*! \brief Constructor */ TVM_DLL LoopRV(); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(LoopRV, runtime::ObjectRef, LoopRVNode); }; /**************** Random variable: ExprRV ****************/ /*! \brief An expr random variable */ using ExprRV = PrimExpr; using ExprRVNode = PrimExprNode; /**************** The Schedule class ****************/ class Schedule; /*! \brief The user-facing schedule class */ class ScheduleNode : public runtime::Object { friend class Schedule; public: virtual ~ScheduleNode() = default; static constexpr const char* _type_key = "tir.Schedule"; TVM_DECLARE_FINAL_OBJECT_INFO(ScheduleNode, runtime::Object); public: /*! \brief Get the IRModule associated with this schedule. */ virtual IRModule mod() const { return state()->mod; } /*! \return The internal state of scheduling */ virtual ScheduleState state() const = 0; /*! \return The internally maintained trace of scheduling program execution */ virtual Optional<Trace> trace() const = 0; /*! * \brief Instruct the schedule to work on a function in the IRModule. * * By default, the schedule works on the function with the name "main", or the only function in * the IRModule if there is only one. If there is multiple functions in the IRModule, and none of * their names are "main", users will have to call this method to explicitly specify which * function to work on. * * This sugar function will guide the `GetBlock` method if its `func_name` is not specified. * * \param func_name The name of the function to be working on * * \sa GetBlock */ virtual void WorkOn(const String& func_name) = 0; /*! * \brief Returns a copy of the schedule, including both its state and its symbol table, * guaranteeing that * 1) SRef tree is completely reconstructed; * 2) The IRModule being scheduled is not modified; * 3) All the random variables are valid in the copy, pointing to the corresponding sref * reconstructed */ virtual Schedule Copy() = 0; /*! * \brief Seed the randomness * \param seed The new random seed, -1 if use device random, otherwise non-negative */ virtual void Seed(support::LinearCongruentialEngine::TRandState seed) = 0; /*! \brief Fork the random state */ virtual support::LinearCongruentialEngine::TRandState ForkSeed() = 0; public: /******** Lookup/Remove random variables ********/ /*! * \brief Get the block corresponding to the specific BlockRV * \param block_rv The BlockRV to be looked up * \return The corresponding block */ virtual Block Get(const BlockRV& block_rv) const = 0; /*! * \brief Get the for loop corresponding to the specific LoopRV * \param loop_rv The LoopRV to be looked up * \return The corresponding for loop */ virtual For Get(const LoopRV& loop_rv) const = 0; /*! * \brief Get the expr corresponding to the specific random variable * \param expr_rv The random variable to be looked up * \return The corresponding expr */ virtual PrimExpr Get(const ExprRV& expr_rv) const = 0; /*! * \brief Get the block sref corresponding to the specific BlockRV * \param block_rv The BlockRV to be looked up * \return The corresponding block sref */ virtual StmtSRef GetSRef(const BlockRV& block_rv) const = 0; /*! * \brief Get the loop sref corresponding to the specific LoopRV * \param loop_rv The LoopRV to be looked up * \return The corresponding loop sref */ virtual StmtSRef GetSRef(const LoopRV& loop_rv) const = 0; /*! * \brief Check the existance of a specific BlockRV * \param block_rv The BlockRV to be looked up * \return Whether the corresponding block exists */ virtual bool HasBlock(const BlockRV& block_rv) const = 0; /*! * \brief Get the block/loop sref corresponding to the specific statement * \param stmt The statement to be looked up * \return The corresponding block/loop sref */ virtual StmtSRef GetSRef(const StmtNode* stmt) const; /*! * \brief Get the block/loop sref corresponding to the specific statement * \param stmt The statement to be looked up * \return The corresponding block/loop sref */ StmtSRef GetSRef(const Stmt& stmt) const { return this->GetSRef(stmt.get()); } /*! * \brief Remove a block random variable from the symbol table * \param block_rv The random variable to be removed */ virtual void RemoveRV(const BlockRV& block_rv) = 0; /*! * \brief Remove a loop random variable from the symbol table * \param loop_rv The random variable to be removed */ virtual void RemoveRV(const LoopRV& loop_rv) = 0; /*! * \brief Remove an integer random variable from the symbol table * \param expr_rv The random variable to be removed */ virtual void RemoveRV(const ExprRV& expr_rv) = 0; public: /******** Schedule: Sampling ********/ /*! * \brief Sample an integer given the probability distribution * \param candidates The candidates * \param probs The probability distribution of the candidates * \param decision The sampling decision * \return The random variable sampled from candidates */ virtual ExprRV SampleCategorical(const Array<Integer>& candidates, const Array<FloatImm>& probs, Optional<Integer> decision = NullOpt) = 0; /*! * \brief Sample the factors to perfect tile a specific loop * \param loop_rv The loop to be tiled * \param n The number of tiles to be sampled * \param max_innermost_factor The maximum tile size allowed to be sampled in the innermost loop * \param decision The sampling decision * \return A list of length `n`, the random perfect tile sizes sampled */ virtual Array<ExprRV> SamplePerfectTile(const LoopRV& loop_rv, int n, int max_innermost_factor, Optional<Array<Integer>> decision = NullOpt) = 0; /*! * \brief Sample a compute-at location of the given block * \param block_rv The block whose compute-at location is to be sampled * \param decision The sampling decision * \return The sampled loop where the input block is to be computed at */ virtual LoopRV SampleComputeLocation(const BlockRV& block_rv, Optional<Integer> decision = NullOpt) = 0; /******** Schedule: Get blocks & loops ********/ /*! * \brief Retrieve a block in a specific function with its name * * By default, if `func_name` is not specified, the schedule will search for the block in the * function that is currently being "worked on". To switch the function to be worked on, use * `WorkOn` before calling this method. * * \param name The name of the block to be retrieved * \param func_name The name of the function * \return The block retrieved * \note Indexing error is raised if 0 or multiple blocks exist with the specific name * * \sa WorkOn */ virtual BlockRV GetBlock(const String& name, const Optional<String>& func_name = NullOpt) = 0; /*! * \brief Get the parent loops of the block in its scope, from outer to inner * \param block_rv The query block * \return A list of loops above the given block in its scope, from outer to inner */ virtual Array<LoopRV> GetLoops(const BlockRV& block_rv) = 0; /*! * \brief Get the leaf blocks of a specific scope * \param block_rv The block where the scope is rooted * \return A list of child blocks */ virtual Array<BlockRV> GetChildBlocks(const BlockRV& block_rv) = 0; /*! * \brief Get the leaf blocks of under a specific loop * \param loop_rv The loop under which collecting is conducted * \return A list of child blocks */ virtual Array<BlockRV> GetChildBlocks(const LoopRV& loop_rv) = 0; /*! * \brief Get the producer of a specific block, under the same block scope * \param block_rv The block in the query * \return A list of blocks, the producers of the given block under the same scope of the given * block */ virtual Array<BlockRV> GetProducers(const BlockRV& block_rv) = 0; /*! * \brief Get the consumers of a specific block, under the same block scope * \param block_rv The block to be queried * \return A list of blocks, the consumers of the given block under the same scope of the given * block */ virtual Array<BlockRV> GetConsumers(const BlockRV& block_rv) = 0; /******** Schedule: Transform loops ********/ /*! * \brief Fuse a list of consecutive loops into one. It requires: * 1) The loops can't have annotations or thread bindings. * 2) The (i+1)-th loop must be the only child of the i-th loop. * 3) All loops must start with 0. * 4) The domain of a loop to be fused cannot depend on another loop to be fused. * \param loop_rvs The loops to be fused * \param preserve_unit_iters Whether or not to preserve unit iterators in block bindings * \return The new loop after fusion */ virtual LoopRV Fuse(const Array<LoopRV>& loop_rvs, bool preserve_unit_iters = true) = 0; /*! * \brief Split a loop into a list of consecutive loops. It requires: * 1) The loop can't have annotation or thread binding. * 2) The loop must start with 0. * \param loop_rv The loop to be split * \param factors The positive tiling factors, and at most one of which is `NullOpt`, which means * that factor is inferred. * \param preserve_unit_iters Whether or not to preserve unit iterators in block bindings * \return The new loops after split */ virtual Array<LoopRV> Split(const LoopRV& loop_rv, const Array<Optional<ExprRV>>& factors, bool preserve_unit_iters = true) = 0; /*! * \brief Reorder a list of loops. It doesn't require the loops to be consecutive. * It requires: * 1) The loops are in the same chain. That means: the loops can be ordered to [l_1, l_2, ... , * l_n] where l_i is an ancestor of l_{i+1} and there are only single-branch loops between * l_1 and l_n (which also indicates they are under the same scope). * 2) After reordering, the domain of an outer loop cannot depend on any of the inner loops. * 3) For every block under the loop nests, its block binding must be affine, and the block * variables must be either data parallel or reduction. * 4) No duplicated loops are allowed in the arguments. * \param ordered_loop_rvs The loops in the new order */ virtual void Reorder(const Array<LoopRV>& ordered_loop_rvs) = 0; /*! * \brief Create a new unit loop on top of the specific block. * \param block_rv The block above which the new loop is created * \return The new loop created */ virtual LoopRV AddUnitLoop(const BlockRV& block_rv) = 0; /*! * \brief Create a new unit loop on top of the specific loop. * \param loop_rv The loop above which the new loop is created * \return The new loop created */ virtual LoopRV AddUnitLoop(const LoopRV& loop_rv) = 0; /******** Schedule: Manipulate ForKind ********/ /*! * \brief Parallelize the input loop. It requires: * 1) The scope block that the loop is in should have stage-pipeline property * 2) All the blocks under the loop are complete blocks or reduction blocks, and have affine * bindings * 3) For each block under the loop, the loop can only be contained in data-parallel block iters' * bindings * \param loop_rv The loop to be parallelized */ virtual void Parallel(const LoopRV& loop_rv) = 0; /*! * \brief Vectorize the input loop. It requires: * 1) The scope block that the loop is in should have stage-pipeline property * 2) All the blocks under the loop are complete blocks or reduction blocks, and have affine * bindings * 3) For each block under the loop, the loop can only be contained in data-parallel block iters' * bindings * \param loop_rv The loop to be vectorized */ virtual void Vectorize(const LoopRV& loop_rv) = 0; /*! * \brief Bind the input loop to the given thread axis. It requires: * 1) The scope block that the loop is in should have stage-pipeline property * 2) All the blocks under the loop are complete blocks or reduction blocks, and have affine * bindings * 3) For each block under the loop, if the thread axis starts with "threadIdx`, the loop can only * be contained in data-parallel block iter and reduction block iters' bindings. Otherwise the * loop can only be contained in data-parallel block iters' bindings * \param loop_rv The loop to be bound to the thread axis * \param thread_axis The thread axis to be bound to the loop */ virtual void Bind(const LoopRV& loop_rv, const String& thread_axis) = 0; /*! * \brief Unroll the input loop. It requires nothing * \param loop_rv The loop to be unrolled */ virtual void Unroll(const LoopRV& loop_rv) = 0; /******** Schedule: Insert cache stages ********/ /*! * \brief Create a block that reads a buffer region into a read cache. It requires: * 1) There is at most one block who writes the buffer in the scope. * 2) The scope block have stage-pipeline property. * \param block_rv The consumer block of the target buffer. * \param read_buffer_index The index of the buffer in block's read region. * \param storage_scope The target storage scope. * \param consumer_blocks An optional list of consumers of the cache to rewrite. * \return The cache stage block. */ virtual BlockRV CacheRead(const BlockRV& block_rv, int read_buffer_index, const String& storage_scope, const Array<BlockRV> consumer_blocks = {}) = 0; /*! * \brief Create a block that writes a buffer region into a write cache. It requires: * 1) There is only one block who writes the target buffer. * 2) The scope block have stage-pipeline property. * \param block_rv The producer of the buffer * \param write_buffer_index The index of the buffer in block's write region * \param storage_scope The target storage scope * \return The cache stage block. */ virtual BlockRV CacheWrite(const BlockRV& block_rv, int write_buffer_index, const String& storage_scope) = 0; /*! * \brief Create 2 blocks that read&write a buffer region into a read/write cache. * It requires the the target block both read & write the target buffer. * \param block_rv The target block operates on the target buffer. * \param read_buffer_index The index of the buffer in block's read region. * \param storage_scope The target storage scope * \return The cache stage blocks, cache read block together with cache write block. */ virtual Array<BlockRV> CacheInplace(const BlockRV& block_rv, int read_buffer_index, const String& storage_scope) = 0; /*! * \brief Create a block to cache precomputed index for later use. * if there is no index computation, keep unchanged. * \param block_rv The target block * \param buffer_index The index of the target buffer in block's read region * \return The cache stage blocks. */ virtual Array<BlockRV> CacheIndex(const BlockRV& block_rv, int buffer_index) = 0; /*! * \brief Create a block that read/write a buffer region into a read/write cache with reindexing. * The layout of the cache will be the same as by the iterators of the block that reads/writes the * buffer. It requires: * 1) There is only one block who reads/writes the target buffer * 2) There is only one buffer load/store of this buffer in the block * \param block_rv The block operates on the target buffer. * \param buffer_index The index of the buffer in block's read or write region. * \param buffer_index_type The type of the buffer index, kRead or kWrite. * \return The reindex stage block. */ virtual BlockRV ReIndex(const BlockRV& block_rv, int buffer_index, BufferIndexType buffer_index_type) = 0; /******** Schedule: Compute location ********/ /*! * \brief Move a producer block under the specific loop, and regenerate the * loops induced by the block so that the buffer region produced by the producer block could * cover those regions consumed by its consumer blocks under the given loop. It requires: * 1) `block` and `loop` are under the same scope, `loop` is not the ancestor of `block` * 2) The scope block has stage-pipeline property * 3) The subtree of the scope block, where the given block is in, satisfies the compact dataflow * condition. i.e. all the blocks in the scope block's subtree must be either complete block or * reduction block * 4) The block is not an output block with regard to the scope block, i.e. the buffers written by * the block are allocated under the scope block * 5) All the consumers of the block are under the given loop * \param block_rv The block to be moved * \param loop_rv The loop where the block to be moved under * \param preserve_unit_loops Whether to keep the trivial loops whose extents are 1 * \param index The block index of the loop body subtree blocks: * - `index = -1` means inserted into the last possible insertion point; * - `index = -2` means inserted into the first possible insertion point; * - Otherwise, `index` is a nonnegative number that indicates the insertion point */ virtual void ComputeAt(const BlockRV& block_rv, const LoopRV& loop_rv, bool preserve_unit_loops, int index = -1) = 0; /*! * \brief Move a consumer block under the specific loop, and regenerate the * loops induced by the block so that the buffer region consumed by the consumer block could * cover those regions produced by its producer blocks under the given loop. It requires: * 1) `block` and `loop` are under the same scope, `loop` is not the ancestor of `block` * 2) The scope block has stage-pipeline property * 3) The subtree of the scope block, where the given block is in, satisfies the compact dataflow * condition. i.e. all the blocks in the scope block's subtree must be either complete block or * reduction block * 4) All the producers of the block are under the given loop * * \param block_rv The block to be moved * \param loop_rv The loop where the block to be moved under * \param preserve_unit_loops Whether to keep the trivial loops whose extents are 1 * \param index The block index of the loop body subtree blocks: * - `index = -1` means inserted into the last possible insertion point; * - `index = -2` means inserted into the first possible insertion point; * - Otherwise, `index` is a nonnegative number that indicates the insertion point */ virtual void ReverseComputeAt(const BlockRV& block_rv, const LoopRV& loop_rv, bool preserve_unit_loops, int index = -1) = 0; /*! * \brief Inline a block into its consumer(s). It requires: * 1) The block is a complete non-root block, which only produces one buffer * 2) The block must not be the only leaf in the scope. * 3) The body of the block must be a BufferStore statement in the form of, * A[i, j, k, ...] = ... * where the indices of the LHS are all distinct atomic variables, * and no variables other than those indexing variables are allowed in the statement. * \param block The block to be inlined to its consumer(s) */ virtual void ComputeInline(const BlockRV& block) = 0; /*! * \brief Inline a block into its only producer. It requires: * 1) The block is a complete non-root block, which only produces and consumers one buffer * 2) The block must not be the only leaf in the scope. * 3) The only producer of the block is a read-after-write producer and a complete non-root block * 4) The body of the block must be a BufferStore statement in the form of, * B[f(i, j, k, ...)] = g(i, j, k, A[i, j, k, ...] ...) * where the indices of each `BufferLoad` on the RHS are all distinct atomic variables, * and no variables other than those indexing variables are allowed in the statement. * \param block The block to be inlined to its producer */ virtual void ReverseComputeInline(const BlockRV& block) = 0; /******** Schedule: Reduction ********/ /*! * \brief Decompose a reduction block into two separate blocks. * a) The init block, which is translated from the init statement of the reduction block; * b) The update block, which is the original block without init statement. * * The init block is inserted right before the given loop. * * The schedule primitive requires: * 1) The input block is a reduction block. * 2) The input loop is the ancestor of the block. * 3) The input loop is not lower than all the loops related to reduce block var. * \param block_rv The reduction block to be decomposed * \param loop_rv The loop above which the init block is inserted before. * \return The init block */ virtual BlockRV DecomposeReduction(const BlockRV& block_rv, const LoopRV& loop_rv) = 0; /*! * \brief Factorize an associative reduction block by the specified loop. * \details An associative reduction cannot be parallelized directly, * because it leads to potential race condition during accumulation. * Alternatively, the reduction could be factorized on a loop with the following steps: * - Step 1: evenly slice the reduction into `n` separate chunks, where `n` is the loop extent * - Step 2: compute the chunks separately and write the result into `n` intermediate buffers; * - Step 3: accumulate the `n` separate buffer into the result buffer. * Note that the Step 2 above introduces opportunities for parallelization. * RFactor is a schedule primitive that implements the transformation described above. * \param loop_rv The loop outside block we want to do rfactor * \param factor_axis The position where the new dimension is placed in the new introduced rfactor * buffer. Suppose the original reduction block writes to buffer `B` with * ndim(B) dimensions, then `factor_axis` should be in range `[-ndim(B) - 1, * ndim(B)]`, and the negative index will be normalized to a non-negative one * \return The rfactor block */ virtual BlockRV RFactor(const LoopRV& loop_rv, int factor_axis) = 0; /******** Schedule: Block annotation ********/ /*! * \brief Set alignment requirement for specific dimension such that * stride[axis] == k * factor + offset for some k. This is useful to set memory layout for * more friendly memory access pattern. For example, we can set alignment to be factor=2, * offset=1 to avoid bank conflict for thread access on higher dimension in GPU shared * memory. * \param block_rv The producer block of the buffer * \param buffer_index The index of the buffer in block's write region * \param axis The dimension to be specified for alignment * \param factor The factor multiple of alignment * \param offset The required offset factor */ virtual void StorageAlign(const BlockRV& block_rv, int buffer_index, int axis, int factor, int offset) = 0; /*! * \brief Set the storage scope of a buffer, where the buffer is specified by the a block and a * write-index * \param block_rv The producer block of the buffer * \param buffer_index The index of the buffer in block's write region * \param storage_scope The storage scope to be set */ virtual void SetScope(const BlockRV& block_rv, int buffer_index, const String& storage_scope) = 0; /******** Schedule: Blockize & Tensorize ********/ /*! * \brief Convert the subtree rooted at a specific loop into a block. * \param loop_rv the root of the subtree * \return the new block */ virtual BlockRV Blockize(const LoopRV& loop_rv) = 0; /*! * \brief Tensorize the computation enclosed by loop with the tensor intrin. * \param loop_rv The loop to be tensorized * \param intrin Name of the tensor intrinsic */ virtual void Tensorize(const LoopRV& loop_rv, const String& intrin) = 0; /*! * \brief Tensorize the computation enclosed by loop with the tensor intrin. * \param block_rv The block to be tensorized * \param intrin Name of the tensor intrinsic */ virtual void Tensorize(const BlockRV& block_rv, const String& intrin) = 0; /******** Schedule: Annotation ********/ /*! * \brief Annotate a loop with a key value pair * \param loop_rv The loop to be annotated * \param ann_key The annotation key * \param ann_val The annotation value, a string or a ExprRV */ virtual void Annotate(const LoopRV& loop_rv, const String& ann_key, const ObjectRef& ann_val) = 0; /*! * \brief Annotate a block with a key value pair * \param block_rv The block to be annotated * \param ann_key The annotation key * \param ann_val The annotation value, a string or a ExprRV */ virtual void Annotate(const BlockRV& block_rv, const String& ann_key, const ObjectRef& ann_val) = 0; /*! * \brief Unannotate a loop's annotation with key ann_key * \param loop_rv The loop to be unannotated * \param ann_key The annotation key */ virtual void Unannotate(const LoopRV& loop_rv, const String& ann_key) = 0; /*! * \brief Unannotate a block's annotation with key ann_key * \param block_rv The block to be unannotated * \param ann_key The annotation key */ virtual void Unannotate(const BlockRV& block_rv, const String& ann_key) = 0; /******** Schedule: Layout transformation ********/ /*! * \brief Apply a transformation represented by IndexMap to buffer * \details The indices and the access region to the target buffer is transformed by the given * index_map. The index_map is used to infer the new shape of the buffer. Buffer must be either * a function parameter, or allocated in a block (it cannot be a buffer subregion created via * 'match_buffer'). * \param block_rv The block that accesses the target buffer. * \param buffer_index The index of the buffer in block's read or write region. * \param buffer_index_type The type of the buffer index, kRead or kWrite. * \param index_map The transformation to apply. * * \param pad_value The value to write into padding introduced by * the transformation. If the schedule contains a producer block * for the specified buffer, the pad value will be written as * part of the producer block if possible, or after the producer * block otherwise. Otherwise, if the buffer is an input, will * insert an annotation block to state that the padding contains * the known value. * * Note: If applied to an input buffer, the calling scope is * responsible for ensuring that the pad_value is present. * Algebraic symplifications, branch elimination, and other * optimizations may assume that this precondition is met, and * may result in incorrect results being returned. */ virtual void TransformLayout(const BlockRV& block_rv, int buffer_index, BufferIndexType buffer_index_type, const IndexMap& index_map, const Optional<IndexMap>& pad_value = NullOpt) = 0; /*! * \brief Apply a transformation represented by IndexMap to block * \details The block iters and the block body are transformed by the given index_map. * Outer loops corresponding to each new block iter are regenerated. * The index_map is required to be bijective affine since we need its inverse mapping. * \param block_rv The block to be transformed * \param index_map The transformation to apply. */ virtual void TransformBlockLayout(const BlockRV& block_rv, const IndexMap& index_map) = 0; /*! * \brief Set the axis separator of a buffer, where the buffer is specified by a block and a read * or write index * \param block_rv The block that accesses the target buffer. * \param buffer_index The index of the buffer in block's read or write region. * \param buffer_index_type The type of the buffer index, kRead or kWrite. * \param axis_separators The axis separator of the buffer */ virtual void SetAxisSeparator(const BlockRV& block_rv, int buffer_index, BufferIndexType buffer_index_type, const Array<IntImm>& axis_separators) = 0; /******** Schedule: Padding ********/ /*! * \brief Decompose a padding block into a block filling const pad values and a block * writing in-bound values. * \param block_rv The block that match the padding pattern. * \param loop_rv The loop above which the const filling block is inserted before. * \return The const pad value filling block. */ virtual BlockRV DecomposePadding(const BlockRV& block_rv, const LoopRV& loop_rv) = 0; /*! * \brief Pad the computation of Einsum. * \param block_rv The block that matches the Einsum pattern. * \param padding The padding for each block iter. * \details This schedule primitives identifies the Einsum pattern in the block body, and find its * producer blocks. It then pads the computation of the Einsum pattern and its producer blocks. * The output buffer and the producer buffer is resized according to the padding size. It requires * the output buffer and the producer buffer to be allocated inside the PrimFunc. * * The padding is a list of non-negative integers, each element corresponds to the padding for * each block iter in the order of block iters. The block and its producer blocks should have * trivial bindings, i.e. each block iter is bound to a single loop variable. After padding, the * block iter extent and the corresponding outer loop is extended by the padding size. * * The size of the producer buffers are infered from the padding size of the Einsum computation. * The producer buffers are padded by the initial value of the corresponding reduction. */ virtual void PadEinsum(const BlockRV& block_rv, const Array<Integer>& padding) = 0; /******** Schedule: Buffer transformation ********/ /*! * \brief Compute the target buffer via rolling buffering. * \details This primitive selects the outermost rollable axis with a positive bound overlap that * appears in the block's ancestor loops as `rolling axis`, fold and circularize the buffer along * the rolling dimension, append block predicate to avoid recomputing overlapping elements. * It requires: * 1) The buffer to be an intermediate buffer defined via `alloc_buffer`. * 2) The LCA of the producer and consumer of the buffer is a for loop, typically, * the producer and consumer of the buffer are cascaded through compute_at. * 3) The access region of the buffer has at least one dimension that contains * a positive bound overlap. * \param block_rv The producer block of the buffer. * \param write_buffer_index The index of the buffer in block's write region. */ virtual void RollingBuffer(const BlockRV& block_rv, int write_buffer_index) = 0; /******** Schedule: Misc ********/ /*! \brief A no-op that marks the start of postprocessing phase of scheduling */ virtual void EnterPostproc() = 0; }; /*! * \brief Managed reference to ScheduleNode * * A schedule is a set of transformations that change the order of computation but * preserve the semantics of computation. Some example of schedules: * 1) Split a loop into two; * 2) Reorder two loops; * 3) Inline the computation of a specific buffer into its consumer * * The schedule class stores auxiliary information to schedule correctly and efficiently. * * Link to tutorial: https://tvm.apache.org/docs/tutorials/language/schedule_primitives.html * * \sa ScheduleNode */ class Schedule : public runtime::ObjectRef { public: /*! * \brief Construct a concrete TensorIR schedule from an IRModule * \param mod The IRModule to be scheduled * \param seed The seed value for schedule's random state * \param debug_mask Do extra correctness checking after the class creation * and each time after calling the Replace method. * \param error_render_level The level of error rendering * \return The concrete schedule created * \sa ScheduleDebugMask * \note The checks performed includes: * 1) VerifySRefTree * 2) VerifyCachedFlags */ TVM_DLL static Schedule Concrete(IRModule mod, support::LinearCongruentialEngine::TRandState seed, int debug_mask, ScheduleErrorRenderLevel error_render_level); /*! * \brief Construct a traced concrete TensorIR schedule from an IRModule * \param mod The IRModule to be scheduled * \param seed The seed value for schedule's random state * \param debug_mask Do extra correctness checking after the class creation * and each time after calling the Replace method. * \param error_render_level The level of error rendering * \return The concrete schedule created * \sa ScheduleDebugMask * \note The checks performed include: * 1) VerifySRefTree * 2) VerifyCachedFlags */ TVM_DLL static Schedule Traced(IRModule mod, support::LinearCongruentialEngine::TRandState seed, int debug_mask, ScheduleErrorRenderLevel error_render_level); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(Schedule, runtime::ObjectRef, ScheduleNode); }; } // namespace tir } // namespace tvm #endif // TVM_TIR_SCHEDULE_SCHEDULE_H_
https://github.com/zk-ml/tachikoma
include/tvm/tir/schedule/state.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/tir/schedule/state.h * \brief This file defines ScheduleState, the core data structure of TensorIR scheduling. */ #ifndef TVM_TIR_SCHEDULE_STATE_H_ #define TVM_TIR_SCHEDULE_STATE_H_ #include <tvm/ir/module.h> #include <tvm/tir/function.h> #include <tvm/tir/schedule/block_scope.h> #include <unordered_map> #include <utility> namespace tvm { namespace tir { /*! * \brief The information about a TensorIR block, it contains two categories of information * 1) Info on the block scope rooted at a specific block, including dependency tracking, * flags indicating if the scope is a stage pipeline, etc. * 2) Info on the block itself, including if the block has a quasi-affine binding, if the regions it * reads are completely covered by their producers, etc. */ struct BlockInfo { /*! \brief Property of a block scope rooted at the block, storing dependencies in the scope */ BlockScope scope{nullptr}; // The properties below are information about the current block realization under its parent scope /*! \brief Property of a block, indicating the block realization binding is quasi-affine */ bool affine_binding{false}; /*! * \brief Property of a block, indicating each of the block's read regions is fully * produced by its producers */ bool region_cover{false}; BlockInfo() = default; explicit BlockInfo(BlockScope scope, bool affine_binding = false, bool region_cover = false) : scope(std::move(scope)), // affine_binding(affine_binding), // region_cover(region_cover) {} }; /*! * \brief The bitmask of the debug flag in the ScheduleStateNode. * \sa ScheduleStateNode */ enum ScheduleDebugMask : uint32_t { /*! \brief Verify the correctness of the sref tree */ kVerifySRefTree = 1, /*! \brief Verify the correctness of affine_binding, region_cover and stage_pipeline */ kVerifyCachedFlags = 2, }; /*! * \brief The state of scheduling, which exposes a `Replace` method as * the primary interface for all the scheduling primitives to manipulate the TensorIR. * * The data structure contains the following information * 1) The AST being scheduled (mod) * 2) The sref tree of schedulable statements (indicated by the srefs) * 3) The dependency information of each block scope (block_info) * 4) A reverse mapping from the AST nodes to that in the sref tree (stmt2ref) * 5) A debug flag, if set, extra checking is enabled (debug_mask) */ class ScheduleStateNode : public Object { public: /*! \brief The AST of the module being scheduled */ IRModule mod; /*! * \brief Mapping from a block sref to its correpsonding BlockInfo, * tracking the dependency inside the block scope, * and storing necessary information flags for scheduling */ std::unordered_map<StmtSRef, BlockInfo, ObjectPtrHash, ObjectPtrEqual> block_info; /*! \brief The reverse mapping from block/for-loop to their corresponding srefs */ std::unordered_map<const StmtNode*, StmtSRef> stmt2ref; /*! * \brief Do extra correctness checking after the class creation * and each time after calling the Replace method. * \sa ScheduleDebugMask */ int debug_mask; void VisitAttrs(AttrVisitor* v) { v->Visit("mod", &mod); // `block_info` is not visited // `stmt2ref` is not visited v->Visit("debug_mask", &debug_mask); } /*! * \brief Replace the part of the AST, as being pointed to by `src_sref`, * with a specific statement `tgt_stmt`, and maintain the sref tree accordingly. * Replace will try to perform copy on write as much as possible when the ScheduleState holds * the only copy to the IRModule and IR nodes. * * Only 3 types of replacements are allowed: from `src_sref->stmt` to `tgt_stmt`. * 1) Block -> Block * 2) Loop -> Loop * 3) Loop -> BlockRealize * * \param src_sref The sref to the statement to be replaced * \param tgt_stmt The statement to be replaced in * \param block_sref_reuse Maps an old block (to be replaced in the subtree under * `src_sref->stmt`) to a new block (replaced to, in the subtree under `tgt_stmt`), and enforces * reuse of srefs between them (rather than create new srefs) i.e. after being replaced, the sref * that points to the old block will point to the new one * \note The reuse of loop srefs are detected automatically according to the reuse of loop vars. */ TVM_DLL void Replace(const tir::StmtSRef& src_sref, const Stmt& tgt_stmt, const Map<Block, Block>& block_sref_reuse); /*! * \brief Trigger the verification according to the `debug_mask` bitmask. * 1) If the bitmask `kVerifySRefTree` is on, verify the correctness of the sref tree. * 2) If the bitmask `kVerifyCachedFlags` is on, verify the correctness of `affine_binding`, * `region_cover` and `stage_pipeline` */ TVM_DLL void DebugVerify() const; static constexpr const char* _type_key = "tir.ScheduleState"; TVM_DECLARE_FINAL_OBJECT_INFO(ScheduleStateNode, Object); /******** Property of blocks ********/ /*! \brief Returns the BlockInfo correpsonding to the block sref */ TVM_DLL BlockInfo GetBlockInfo(const StmtSRef& block_sref) const; /*! * \brief Recalculate the BlockInfo recursively under stmt. * If stmt is a Block itself, we will not reset its affine binding flag unless it doesn't * have block vars, since the affine flag depends on the outer scope of stmt. */ TVM_DLL void UpdateScopeBlockInfo(const Stmt& stmt); /*! * \brief Get the BlockScope correpsonding to the sref of scope root block * \param scope_root The block sref to be retrieved * \return The corresponding BlockScope */ BlockScope GetBlockScope(const StmtSRef& scope_root) const { return GetBlockInfo(scope_root).scope; } /*! * \brief Check a cached flag indicating if the specific block has quasi-affine bindings * \param block_sref The block sref to be checked * \return A boolean flag indicating if the block has quasi-affine bindings */ bool IsAffineBlockBinding(const StmtSRef& block_sref) const { return GetBlockInfo(block_sref).affine_binding; } /*! * \brief Check a cached flag indicating if each of the specific consumer block's read region * is fully produced by its producers * \param consumer_block_sref The specific consumer block * \return A boolean flag indicating if the block has quasi-affine bindings */ bool IsRegionCoveredConsumer(const StmtSRef& consumer_block_sref) const { return GetBlockInfo(consumer_block_sref).region_cover; } /*! * \brief Check a cached flag indicating if a block scope is an equivalence of a stage pipeline * \param scope_root The block sref to be retrieved * \return The corresponding BlockScope */ bool IsStagePipeline(const StmtSRef& scope_root) const { return GetBlockScope(scope_root)->stage_pipeline; } }; /*! * \brief Managed reference to ScheduleStateNode * \sa ScheduleStateNode */ class ScheduleState : public ObjectRef { public: /*! * \brief Construct a schedule state from an IRModule * \param mod The IRModule to be scheduled * \param debug_mask Do extra correctness checking after the class creation * and each time after calling the Replace method. */ TVM_DLL explicit ScheduleState(IRModule mod, int debug_mask = 0); /*! \return The mutable pointer to the ScheduleStateNode */ ScheduleStateNode* get() const { return static_cast<ScheduleStateNode*>(data_.get()); } TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(ScheduleState, ObjectRef, ScheduleStateNode); }; } // namespace tir } // namespace tvm #endif // TVM_TIR_SCHEDULE_STATE_H_
https://github.com/zk-ml/tachikoma
include/tvm/tir/schedule/trace.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_TIR_SCHEDULE_TRACE_H_ #define TVM_TIR_SCHEDULE_TRACE_H_ #include <tvm/tir/schedule/instruction.h> namespace tvm { namespace tir { // Forward declaration class Trace; /*! * \brief A callback that allows users to mutate decisions on the fly * when applying instructions. The signature of the callback is: * \param inst The instruction * \param inputs The input random variables * \param attrs The attributes * \param decision The original decision * \return A new decision */ using FTraceDecisionProvider = runtime::TypedPackedFunc<ObjectRef( const Instruction& inst, const Array<ObjectRef>& inputs, const Array<ObjectRef>& attrs, const Optional<ObjectRef>& decision)>; /*! * \brief An execution trace of a scheduling program * * A trace has two parts: * 1) The instructions invoked so far in the program execution * 2) The random decisions made upon those instructions, if any * * A trace can be serialized to: * 1) Roundtrippable JSON format: can be saved to file and loaded back * 2) Python syntax: allows users to copy-paste the trace to reproduce the scheduling process * * A trace can be applied to a TensorIR schedule by re-applying all its instructions possibly with * their decisions accordingly. Re-sampling is invoked if a sampling instruction doesn't have its * corresponding decision; Otherwise the existing decision will be reused accordingly. */ class TraceNode : public runtime::Object { public: /*! \brief The instructions invoked so far in the program execution */ Array<Instruction> insts; /*! \brief The random decisions made upon those instructions */ Map<Instruction, ObjectRef> decisions; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("insts", &insts); v->Visit("decisions", &decisions); } static constexpr const char* _type_key = "tir.Trace"; TVM_DECLARE_FINAL_OBJECT_INFO(TraceNode, runtime::Object); public: /*! * \brief Retrieve the decision made on a specific instruction * \param inst The instruction whose decision is to be retrieved * \return The corresponding decision; NullOpt if there is no decision made on the instruction */ Optional<ObjectRef> GetDecision(const Instruction& inst) const; /*! * \brief Append a new instruction to the trace * \param inst The new instruction to be appended */ void Append(Instruction inst); /*! * \brief Append a new instruction with a random decision to the trace * \param inst The new instruction to be appended * \param decision The random decision made on this instruction * The type of `decision` depends on the instruction, e.g. * the decision of `SamplePerfectTile` has type `Array<IntImm>` */ void Append(Instruction inst, ObjectRef decision); /*! * \brief Remove the last instruction, along with the decision made on that instruction, if any * \return The instruction removed; NullOpt if the trace is empty */ Optional<Instruction> Pop(); /*! * \brief Apply the trace to a TensorIR schedule * \param sch The schedule to be applied onto * \param remove_postproc If postprocessing instructions are removed * \param decision_provider A callback that allows users to mutate decisions on the fly * when applying instructions. * \sa FTraceDecisionProvider */ void ApplyToSchedule(Schedule sch, bool remove_postproc, FTraceDecisionProvider decision_provider = nullptr) const; /*! * \brief Serialize the trace as a JSON-style object * \param remove_postproc If postprocessing instructions are removed * \return The JSON-style object */ ObjectRef AsJSON(bool remove_postproc) const; /*! * \brief Serialize the trace as a sequence of python statements * \param remove_postproc If postprocessing instructions are removed * \return A sequence of python statements */ Array<String> AsPython(bool remove_postproc) const; /*! * \brief Create a new trace with an instruction whose decision is changed, * assuming this instruction exists in the resulting trace * \param inst The instruction whose decision is to be changed * \param decision The decision to be changed to * \param remove_postproc If postprocessing instructions are removed * \return The new trace with the decision changed */ Trace WithDecision(Instruction inst, ObjectRef decision, bool remove_postproc) const; /*! * \brief Simplify the trace with dead-code elimination * \param remove_postproc If postprocessing instructions are removed * \return A simplified trace */ Trace Simplified(bool remove_postproc) const; }; /*! * \brief Managed reference to TraceNode * \sa TraceNode */ class Trace : public runtime::ObjectRef { public: /*! \brief Default constructor. Creating an empty trace. */ Trace(); /*! * \brief Constructor. Creating a trace from existing instructions and their decisions * \param insts The instructions used * \param decisions The decisions made in sampling */ explicit Trace(Array<Instruction> insts, Map<Instruction, ObjectRef> decisions); /*! * \brief Apply a JSON-serialized trace to a TensorIR schedule * \param json The JSON-serialized trace * \param sch The TensorIR schedule */ static void ApplyJSONToSchedule(ObjectRef json, Schedule sch); TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(Trace, runtime::ObjectRef, TraceNode); }; } // namespace tir } // namespace tvm #endif // TVM_TIR_SCHEDULE_TRACE_H_
https://github.com/zk-ml/tachikoma
include/tvm/tir/stmt.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/tir/stmt.h * \brief TIR statements. */ // Acknowledgement: Many low-level stmts originate from Halide. #ifndef TVM_TIR_STMT_H_ #define TVM_TIR_STMT_H_ #include <tvm/tir/expr.h> #include <string> #include <type_traits> #include <utility> #include <vector> namespace tvm { namespace tir { /*! \brief Base node of all statements. */ class StmtNode : public Object { public: /*! * \brief Span that points to the original source code. * Reserved debug information. */ mutable Span span; StmtNode() = default; explicit StmtNode(Span span) : span(span) {} static constexpr const char* _type_key = "tir.Stmt"; static constexpr const bool _type_has_method_sequal_reduce = true; static constexpr const bool _type_has_method_shash_reduce = true; static constexpr const uint32_t _type_child_slots = 15; TVM_DECLARE_BASE_OBJECT_INFO(StmtNode, Object); }; /*! \brief Container of all statements */ class Stmt : public ObjectRef { public: TVM_DEFINE_OBJECT_REF_METHODS(Stmt, ObjectRef, StmtNode); }; /*! * \brief Let binding, bind var to value, then run body. */ class LetStmtNode : public StmtNode { public: /*! \brief The variable. */ Var var; /*! \brief The value to be binded. */ PrimExpr value; /*! \brief The body block. */ Stmt body; void VisitAttrs(AttrVisitor* v) { v->Visit("var", &var); v->Visit("value", &value); v->Visit("body", &body); v->Visit("span", &span); } bool SEqualReduce(const LetStmtNode* other, SEqualReducer equal) const { return equal.DefEqual(var, other->var) && equal(value, other->value) && equal(body, other->body); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce.DefHash(var); hash_reduce(value); hash_reduce(body); } static constexpr const char* _type_key = "tir.LetStmt"; TVM_DECLARE_FINAL_OBJECT_INFO(LetStmtNode, StmtNode); }; /*! * \brief Managed reference to LetStmtNode. * \sa LetStmtNode */ class LetStmt : public Stmt { public: TVM_DLL LetStmt(Var var, PrimExpr value, Stmt body, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(LetStmt, Stmt, LetStmtNode); }; /*! * \brief Define certain auxiliary attribute for the body to be a symbolic value. * This provide auxiliary information for IR passes that transforms body. * * In terms of effect, this is equivalent to Block(Evaluate(value), body). * * Examples of possible usage: * - Bound of function, variables. * - Hint which block corresponds to a parallel region. */ class AttrStmtNode : public StmtNode { public: /*! \brief this is attribute about certain node */ ObjectRef node; /*! \brief the type key of the attribute */ String attr_key; /*! \brief The attribute value, value is well defined at current scope. */ PrimExpr value; /*! \brief The body statement to be executed */ Stmt body; void VisitAttrs(AttrVisitor* v) { v->Visit("node", &node); v->Visit("attr_key", &attr_key); v->Visit("value", &value); v->Visit("body", &body); v->Visit("span", &span); } bool SEqualReduce(const AttrStmtNode* other, SEqualReducer equal) const { return equal(node, other->node) && equal(attr_key, other->attr_key) && equal(value, other->value) && equal(body, other->body); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(node); hash_reduce(attr_key); hash_reduce(value); hash_reduce(body); } static constexpr const char* _type_key = "tir.AttrStmt"; TVM_DECLARE_FINAL_OBJECT_INFO(AttrStmtNode, StmtNode); }; /*! * \brief Managed reference to AttrStmtNode. * \sa AttrStmtNode */ class AttrStmt : public Stmt { public: TVM_DLL AttrStmt(ObjectRef node, String attr_key, PrimExpr value, Stmt body, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(AttrStmt, Stmt, AttrStmtNode); }; /*! * \brief Assert condition, if an error occurs, return the error message. */ class AssertStmtNode : public StmtNode { public: /*! \brief Condition to be checked. */ PrimExpr condition; /*! \brief Error message when assertion failed. */ PrimExpr message; /*! * \brief Body which this assertion holds true. * Will be executed after the assertion. */ Stmt body; void VisitAttrs(AttrVisitor* v) { v->Visit("condition", &condition); v->Visit("message", &message); v->Visit("body", &body); v->Visit("span", &span); } bool SEqualReduce(const AssertStmtNode* other, SEqualReducer equal) const { return equal(condition, other->condition) && equal(message, other->message) && equal(body, other->body); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(condition); hash_reduce(message); hash_reduce(body); } static constexpr const char* _type_key = "tir.AssertStmt"; TVM_DECLARE_FINAL_OBJECT_INFO(AssertStmtNode, StmtNode); }; /*! * \brief Managed reference to AssertStmtNode. * \sa AssertStmtNode */ class AssertStmt : public Stmt { public: TVM_DLL AssertStmt(PrimExpr condition, PrimExpr message, Stmt body, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(AssertStmt, Stmt, AssertStmtNode); }; /*! * \brief Store value to the buffer. * * Equivalent to ((DType*)buffer_var)[index] = value. * where DType is the type specified by type().element_of(). * * For example, if type = float32x3, then the store will corresponds to * * \code * * auto buffer = static_cast<float*>(buffer_var); * buffer[index.v0] = value.v0; * buffer[index.v1] = value.v1; * buffer[index.v2] = value.v2; * * \endcode * \sa LoadNode */ class StoreNode : public StmtNode { public: /*! \brief The buffer variable. */ Var buffer_var; /*! \brief The value to be stored. */ PrimExpr value; /*! \brief The index locations to be stored. */ PrimExpr index; /*! \brief The predicate to mask which lanes would be stored. */ PrimExpr predicate; void VisitAttrs(AttrVisitor* v) { v->Visit("buffer_var", &buffer_var); v->Visit("value", &value); v->Visit("index", &index); v->Visit("predicate", &predicate); v->Visit("span", &span); } bool SEqualReduce(const StoreNode* other, SEqualReducer equal) const { return equal(buffer_var, other->buffer_var) && equal(value, other->value) && equal(index, other->index) && equal(predicate, other->predicate); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(buffer_var); hash_reduce(value); hash_reduce(index); hash_reduce(predicate); } static constexpr const char* _type_key = "tir.Store"; TVM_DECLARE_FINAL_OBJECT_INFO(StoreNode, StmtNode); }; /*! * \brief Managed reference to StoreNode. * \sa StoreNode */ class Store : public Stmt { public: TVM_DLL Store(Var buffer_var, PrimExpr value, PrimExpr index, PrimExpr predicate, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(Store, Stmt, StoreNode); }; /*! * \brief Store value to the high dimension buffer. * * \code * * buffer[i, j] = value; * * \endcode * \sa BufferLoad */ class BufferStoreNode : public StmtNode { public: /*! \brief The buffer variable. */ Buffer buffer; /*! \brief The value to be stored. */ PrimExpr value; /*! \brief The indices location to be stored. */ Array<PrimExpr> indices; void VisitAttrs(AttrVisitor* v) { v->Visit("buffer", &buffer); v->Visit("value", &value); v->Visit("indices", &indices); v->Visit("span", &span); } bool SEqualReduce(const BufferStoreNode* other, SEqualReducer equal) const { return equal(buffer, other->buffer) && equal(value, other->value) && equal(indices, other->indices); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(buffer); hash_reduce(value); hash_reduce(indices); } static constexpr const char* _type_key = "tir.BufferStore"; TVM_DECLARE_FINAL_OBJECT_INFO(BufferStoreNode, StmtNode); }; /*! * \brief Managed reference to BufferStoreNode. * \sa BufferStoreNode */ class BufferStore : public Stmt { public: TVM_DLL explicit BufferStore(Buffer buffer, PrimExpr value, Array<PrimExpr> indices, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(BufferStore, Stmt, BufferStoreNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(BufferStoreNode); }; /*! * \brief Annotate the region where the buffer need to * be read and write in the body. * We only need to allocate the space for the corresponding region. * * \note There should be at most one BufferRealize for each buffer. * BufferRealize is not necessary for external buffers, * since they are assumed to be fully allocated. * * \sa BufferLoad, BufferStore */ class BufferRealizeNode : public StmtNode { public: /*! \brief The buffer variable. */ Buffer buffer; /*! \brief Bounds to be realized */ Array<Range> bounds; /*! \brief Only realize if condition holds. */ PrimExpr condition; /*! \brief The body of realization. */ Stmt body; void VisitAttrs(AttrVisitor* v) { v->Visit("buffer", &buffer); v->Visit("bounds", &bounds); v->Visit("condition", &condition); v->Visit("body", &body); v->Visit("span", &span); } bool SEqualReduce(const BufferRealizeNode* other, SEqualReducer equal) const { return equal(buffer, other->buffer) && equal(bounds, other->bounds) && equal(condition, other->condition) && equal(body, other->body); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(buffer); hash_reduce(bounds); hash_reduce(condition); hash_reduce(body); } BufferRealizeNode() = default; BufferRealizeNode(Buffer buffer, Array<Range> bounds, PrimExpr condition, Stmt body, Span span = Span()) : StmtNode(span), buffer(buffer), bounds(bounds), condition(condition), body(body) {} static constexpr const char* _type_key = "tir.BufferRealize"; TVM_DECLARE_FINAL_OBJECT_INFO(BufferRealizeNode, StmtNode); }; /*! * \brief Managed reference to BufferRealizeNode. * \sa BufferRealizeNode */ class BufferRealize : public Stmt { public: TVM_DLL explicit BufferRealize(Buffer buffer, Array<Range> bounds, PrimExpr condition, Stmt body, Span span = Span()); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(BufferRealize, Stmt, BufferRealizeNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(BufferRealizeNode); }; /*! * \brief Store value into mult-dimensional array that will be read by the consumer * of the producer. * * \note This node only appears in high-level DSLs that are built on top of the TIR. * It should not appear in a valid TIR PrimFunc. A high-level DSL needs to lower * this node before TIR transformations. * * \sa DataProducer */ class ProducerStoreNode : public StmtNode { public: /*! \brief The producer to store the results into. */ DataProducer producer; /*! \brief The value to be stored. */ PrimExpr value; /*! \brief The index arguments of the function. */ Array<PrimExpr> indices; void VisitAttrs(AttrVisitor* v) { v->Visit("producer", &producer); v->Visit("value", &value); v->Visit("indices", &indices); v->Visit("span", &span); } bool SEqualReduce(const ProducerStoreNode* other, SEqualReducer equal) const { return equal(producer, other->producer) && equal(value, other->value) && equal(indices, other->indices); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(producer); hash_reduce(value); hash_reduce(indices); } static constexpr const char* _type_key = "tir.ProducerStore"; TVM_DECLARE_FINAL_OBJECT_INFO(ProducerStoreNode, StmtNode); }; /*! * \brief Managed reference to ProducerStoreNode. * \sa ProducerStoreNode */ class ProducerStore : public Stmt { public: TVM_DLL ProducerStore(DataProducer producer, PrimExpr value, Array<PrimExpr> indices, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(ProducerStore, Stmt, ProducerStoreNode); }; /*! * \brief Annotate the bounds where the data produced by the producer * need to be written and read in body. * We will need to allocate space for the corresponding regions. * * \note This node only appears in high-level DSLs that are built on top of the TIR. * It should not appear in a valid TIR PrimFunc. A high-level DSL needs to lower * this node before TIR transformations. * * \sa DataProducer */ class ProducerRealizeNode : public StmtNode { public: /*! \brief The producer that produces the data. */ DataProducer producer; /*! \brief Bounds to be realized. */ Region bounds; /*! \brief Only realize if condition holds. */ PrimExpr condition; /*! \brief The body of realization. */ Stmt body; /*! \brief The storage scope associated with this realization. */ String storage_scope; void VisitAttrs(AttrVisitor* v) { v->Visit("producer", &producer); v->Visit("bounds", &bounds); v->Visit("condition", &condition); v->Visit("body", &body); v->Visit("storage_scope", &storage_scope); v->Visit("span", &span); } bool SEqualReduce(const ProducerRealizeNode* other, SEqualReducer equal) const { return equal(producer, other->producer) && equal(bounds, other->bounds) && equal(condition, other->condition) && equal(body, other->body) && equal(storage_scope, other->storage_scope); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(producer); hash_reduce(bounds); hash_reduce(condition); hash_reduce(body); hash_reduce(storage_scope); } static constexpr const char* _type_key = "tir.ProducerRealize"; TVM_DECLARE_FINAL_OBJECT_INFO(ProducerRealizeNode, StmtNode); }; /*! * \brief Managed reference to ProducerRealizeNode. * \sa ProducerRealizeNode */ class ProducerRealize : public Stmt { public: TVM_DLL ProducerRealize(DataProducer producer, Region bounds, PrimExpr condition, Stmt body, String storage_scope = "", Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(ProducerRealize, Stmt, ProducerRealizeNode); }; /*! * \brief Allocate a buffer that can be used in body. */ class AllocateNode : public StmtNode { public: /*! \brief The buffer variable. */ Var buffer_var; /*! \brief The type of the buffer. */ DataType dtype; /*! \brief The extents of the buffer. */ Array<PrimExpr> extents; /*! \brief Only allocate buffer when condition is satisfied. */ PrimExpr condition; /*! \brief The body to be executed. */ Stmt body; /*! * \brief Additional annotations about the allocation. * * These annotations can be used as auxiliary hint * to future transformations. */ Map<String, ObjectRef> annotations; void VisitAttrs(AttrVisitor* v) { v->Visit("buffer_var", &buffer_var); v->Visit("dtype", &dtype); v->Visit("extents", &extents); v->Visit("condition", &condition); v->Visit("body", &body); v->Visit("annotations", &annotations); v->Visit("span", &span); } bool SEqualReduce(const AllocateNode* other, SEqualReducer equal) const { return equal.DefEqual(buffer_var, other->buffer_var) && equal(dtype, other->dtype) && equal(extents, other->extents) && equal(condition, other->condition) && equal(body, other->body) && equal(annotations, other->annotations); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce.DefHash(buffer_var); hash_reduce(dtype); hash_reduce(extents); hash_reduce(condition); hash_reduce(body); hash_reduce(annotations); } /*! * \brief If the buffer size is constant, return the size. * Otherwise return 0. * \return The result. */ int64_t ConstantAllocationSize() const { return ConstantAllocationSize(extents); } /*! * \brief If the buffer size is constant, return the size. * Otherwise return 0. * \param extents The extents of the buffer. * \return The result. */ TVM_DLL static int64_t ConstantAllocationSize(const Array<PrimExpr>& extents); static constexpr const char* _type_key = "tir.Allocate"; static constexpr const bool _type_has_method_sequal_reduce = true; static constexpr const bool _type_has_method_shash_reduce = true; TVM_DECLARE_FINAL_OBJECT_INFO(AllocateNode, StmtNode); }; /*! * \brief Managed reference to AllocateNode. * \sa AllocateNode */ class Allocate : public Stmt { public: TVM_DLL Allocate(Var buffer_var, DataType dtype, Array<PrimExpr> extents, PrimExpr condition, Stmt body, Map<String, ObjectRef> annotations = Map<String, ObjectRef>(), Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(Allocate, Stmt, AllocateNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(AllocateNode); }; /*! * \brief Allocate a buffer that can be used in body. */ class AllocateConstNode : public StmtNode { public: /*! \brief The buffer variable. */ Var buffer_var; /*! \brief The optional data associated to the constant. */ Optional<runtime::NDArray> data; /*! * \brief If the PrimFunc containing the Stmt is added to IRModule, this is an optional index * to indicate the index within "constants" attribute, that is a Array<NDArray> of IRModule. */ Optional<Integer> irmod_storage_idx; /*! \brief The type of the buffer. */ DataType dtype; /*! \brief The extents of the buffer. */ Array<PrimExpr> extents; /*! \brief The body to be executed. */ Stmt body; /*! * \brief Additional annotations about the allocation. * * These annotations can be used as auxiliary hint * to future transformations. */ Map<String, ObjectRef> annotations; void VisitAttrs(AttrVisitor* v) { v->Visit("buffer_var", &buffer_var); v->Visit("data", &data); v->Visit("irmod_storage_idx", &irmod_storage_idx); v->Visit("dtype", &dtype); v->Visit("extents", &extents); v->Visit("body", &body); v->Visit("annotations", &annotations); v->Visit("span", &span); } bool SEqualReduce(const AllocateConstNode* other, SEqualReducer equal) const { return equal.DefEqual(buffer_var, other->buffer_var) && equal(dtype, other->dtype) && equal(extents, other->extents) && equal(data, other->data) && equal(body, other->body) && equal(annotations, other->annotations); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce.DefHash(buffer_var); hash_reduce(dtype); hash_reduce(extents); hash_reduce(body); hash_reduce(annotations); hash_reduce(data); } /*! * \brief If the buffer size is constant, return the size. * Otherwise return 0. * \return The result. */ int64_t ConstantAllocationSize() const { return ConstantAllocationSize(extents); } /*! * \brief If the buffer size is constant, return the size. * Otherwise return 0. * \param extents The extents of the buffer. * \return The result. */ TVM_DLL static int64_t ConstantAllocationSize(const Array<PrimExpr>& extents); static constexpr const char* _type_key = "tir.AllocateConst"; static constexpr const bool _type_has_method_sequal_reduce = true; static constexpr const bool _type_has_method_shash_reduce = true; TVM_DECLARE_FINAL_OBJECT_INFO(AllocateConstNode, StmtNode); }; /*! * \brief Managed reference to AllocateConstNode. * \sa AllocateConstNode */ class AllocateConst : public Stmt { public: /* The constructor to create a IRNode with constant data * depending on the type of ObjectRef, it will either * create AllocateConstNode with irmod_storage_idx or data */ TVM_DLL AllocateConst(Var buffer_var, DataType dtype, Array<PrimExpr> extents, ObjectRef data_or_idx, Stmt body, Map<String, ObjectRef> annotations = Map<String, ObjectRef>(), Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(AllocateConst, Stmt, AllocateConstNode); }; /*! \brief Declare a buffer that can be used in the body */ class DeclBufferNode : public StmtNode { public: /*! \brief The buffer being declared */ Buffer buffer; /*! \brief The body to be executed */ Stmt body; void VisitAttrs(AttrVisitor* v) { v->Visit("buffer", &buffer); v->Visit("body", &body); v->Visit("span", &span); } bool SEqualReduce(const DeclBufferNode* other, SEqualReducer equal) const { return equal(buffer, other->buffer) && equal(body, other->body); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(buffer); hash_reduce(body); } static constexpr const char* _type_key = "tir.DeclBuffer"; TVM_DECLARE_FINAL_OBJECT_INFO(DeclBufferNode, StmtNode); }; /*! \brief Managed reference to DeclBufferNode */ class DeclBuffer : public Stmt { public: TVM_DLL DeclBuffer(Buffer buffer, Stmt body, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(DeclBuffer, Stmt, DeclBufferNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(DeclBufferNode); }; /*! * \brief The container of seq statement. * Represent a sequence of statements. */ class SeqStmtNode : public StmtNode { public: /*! \brief internal sequence content. */ Array<Stmt> seq; /*! \return get the size of the sequence */ size_t size() const { return seq.size(); } /*! * \brief Get the index-th element in the sequence. */ Stmt operator[](size_t index) const { return seq[index]; } void VisitAttrs(AttrVisitor* v) { v->Visit("seq", &seq); v->Visit("span", &span); } bool SEqualReduce(const SeqStmtNode* other, SEqualReducer equal) const { return equal(seq, other->seq); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(seq); } static constexpr const char* _type_key = "tir.SeqStmt"; TVM_DECLARE_FINAL_OBJECT_INFO(SeqStmtNode, StmtNode); }; /*! \brief Sequence statement. */ class SeqStmt : public Stmt { public: /*! * \brief Construct SeqStmt. * \param seq The sequence. * \param span The location of this object in the source code. */ TVM_DLL explicit SeqStmt(Array<Stmt> seq, Span span = Span()); /*! \return get the size of the sequence */ size_t size() const { return operator->()->size(); } /*! * \brief Get the index-th element in the sequence. */ Stmt operator[](size_t index) const { return (*(operator->()))[index]; } /*! * \brief Construct a sequence statement by flattening * all the arrays and sequences in the arguments * recursively. * * - When an argument is nullptr, it will be ignored. * - When an argument is an array or a SeqStmt, it will be flattened recursively. * - A normal Stmt will be appended to the end of the sequence. * * \note This function can directly return an element * if it is the only element in the sequence. * * \param seq_args The list of arguments to be flattened. * \tparam Args arguments * \return The constructed statement */ template <typename... Args> static Stmt Flatten(Args&&... seq_args) { Array<Stmt> seq; runtime::detail::for_each(Flattener(&seq), std::forward<Args>(seq_args)...); if (seq.size() == 1) return seq[0]; return SeqStmt(seq); } /*! \brief Helper class to flatten sequence of arguments into Array. */ class Flattener { public: explicit Flattener(Array<Stmt>* seq) : seq_(seq) {} void operator()(size_t i, const Stmt& stmt) const { if (!stmt.defined()) return; if (auto* op = stmt.as<SeqStmtNode>()) { operator()(0, op->seq); } else { seq_->push_back(stmt); } } template <typename T> void operator()(size_t i, const T& seq) const { for (auto v : seq) { this->operator()(0, v); } } private: Array<Stmt>* seq_; }; TVM_DEFINE_OBJECT_REF_METHODS(SeqStmt, Stmt, SeqStmtNode); }; /*! * \brief IfThenElse statment. */ class IfThenElseNode : public StmtNode { public: /*! \brief The condition. */ PrimExpr condition; /*! \brief The branch to be executed when condition is true. */ Stmt then_case; /*! \brief The branch to be executed when condition is false, can be null. */ Optional<Stmt> else_case; void VisitAttrs(AttrVisitor* v) { v->Visit("condition", &condition); v->Visit("then_case", &then_case); v->Visit("else_case", &else_case); v->Visit("span", &span); } bool SEqualReduce(const IfThenElseNode* other, SEqualReducer equal) const { return equal(condition, other->condition) && equal(then_case, other->then_case) && equal(else_case, other->else_case); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(condition); hash_reduce(then_case); hash_reduce(else_case); } static constexpr const char* _type_key = "tir.IfThenElse"; TVM_DECLARE_FINAL_OBJECT_INFO(IfThenElseNode, StmtNode); }; /*! * \brief Managed reference to IfThenElseNode. * \sa IfThenElseNode */ class IfThenElse : public Stmt { public: TVM_DLL IfThenElse(PrimExpr condition, Stmt then_case, Optional<Stmt> else_case = NullOpt, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(IfThenElse, Stmt, IfThenElseNode); }; /*! * \brief Evaluates an expression. * This is mostly used for putting a Call node into Stmt. * * If value do not have side-effect, this node can be safely removed. */ class EvaluateNode : public StmtNode { public: /*! \brief The expression to be evaluated. */ PrimExpr value; void VisitAttrs(AttrVisitor* v) { v->Visit("value", &value); v->Visit("span", &span); } bool SEqualReduce(const EvaluateNode* other, SEqualReducer equal) const { return equal(value, other->value); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(value); } static constexpr const char* _type_key = "tir.Evaluate"; TVM_DECLARE_FINAL_OBJECT_INFO(EvaluateNode, StmtNode); }; /*! * \brief Managed reference to EvaluateNode. * \sa EvaluateNode */ class Evaluate : public Stmt { public: TVM_DLL explicit Evaluate(PrimExpr value, Span span = Span()); explicit Evaluate(int value, Span span = Span()) : Evaluate(PrimExpr(value), span) {} TVM_DEFINE_OBJECT_REF_METHODS(Evaluate, Stmt, EvaluateNode); }; /*! * \brief The kind of the loop. * * ForKind can change the control flow semantics * of the loop. So the kind field needs to be considered * in all TIR passes. */ enum class ForKind : int { /*! \brief default semantics -- serial execution. */ kSerial = 0, /*! \brief Parallel execution on CPU. */ kParallel = 1, /*! * \brief Vector SIMD loop. * The loop body will be vectorized. */ kVectorized = 2, /*! \brief The loop body must be unrolled. */ kUnrolled = 3, /*! * \brief The loop variable is bound to a thread in * an environment. In the final stage of lowering, * the loop is simply removed and the loop variable is * mapped to the corresponding context thread. */ kThreadBinding = 4 }; /*! * \brief A for loop, with poissible type annotations. * * \code * * for (loop_var = min; loop_var < min + extent; ++loop_var) { * // body * } * \endcode */ class ForNode : public StmtNode { public: /*! \brief The loop variable. */ Var loop_var; /*! \brief The minimum value of iteration. */ PrimExpr min; /*! \brief The extent of the iteration. */ PrimExpr extent; /*! \brief The kind of the for loop. */ ForKind kind; /*! \brief The body of the for loop. */ Stmt body; /*! * \brief Only valid when kind == ForKind::kThreadBinding * The context thread that this loop variable bounds to. */ Optional<IterVar> thread_binding; /*! * \brief Additional annotations about the loop. * * These annotations can be used as auxiliary hint * to future transformations. An annotation should * not change the control flow semantics of the loop * and can be ignored in most passes. */ Map<String, ObjectRef> annotations; void VisitAttrs(AttrVisitor* v) { v->Visit("loop_var", &loop_var); v->Visit("min", &min); v->Visit("extent", &extent); v->Visit("kind", &kind); v->Visit("body", &body); v->Visit("thread_binding", &thread_binding); v->Visit("annotations", &annotations); v->Visit("span", &span); } bool SEqualReduce(const ForNode* other, SEqualReducer equal) const { return equal.DefEqual(loop_var, other->loop_var) && equal(min, other->min) && equal(extent, other->extent) && equal(kind, other->kind) && equal(body, other->body) && equal(thread_binding, other->thread_binding) && equal(annotations, other->annotations); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce.DefHash(loop_var); hash_reduce(min); hash_reduce(extent); hash_reduce(kind); hash_reduce(body); hash_reduce(thread_binding); hash_reduce(annotations); } static constexpr const char* _type_key = "tir.For"; TVM_DECLARE_FINAL_OBJECT_INFO(ForNode, StmtNode); }; /*! * \brief Managed reference to ForNode. * \sa ForNode */ class For : public Stmt { public: TVM_DLL For(Var loop_var, PrimExpr min, PrimExpr extent, ForKind kind, Stmt body, Optional<IterVar> thread_binding = NullOpt, Map<String, ObjectRef> annotations = Map<String, ObjectRef>(), Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(For, Stmt, ForNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(ForNode); }; /*! * \brief A While loop * * \code * * while (condition) * body * * \endcode */ class WhileNode : public StmtNode { public: /*! \brief The termination condition. */ PrimExpr condition; /*! \brief The body of the while loop. */ Stmt body; void VisitAttrs(AttrVisitor* v) { v->Visit("condition", &condition); v->Visit("body", &body); v->Visit("span", &span); } bool SEqualReduce(const WhileNode* other, SEqualReducer equal) const { return equal(condition, other->condition) && equal(body, other->body); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(condition); hash_reduce(body); } static constexpr const char* _type_key = "tir.While"; TVM_DECLARE_FINAL_OBJECT_INFO(WhileNode, StmtNode); }; /*! * \brief Managed reference to WhileNode. * \sa WhileNode */ class While : public Stmt { public: TVM_DLL While(PrimExpr condition, Stmt body, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(While, Stmt, WhileNode); }; /*! * \brief A prefetch hint for a buffer */ class PrefetchNode : public StmtNode { public: /*! \brief The function to be prefetched. */ Buffer buffer; /*! \brief Bounds to be prefetched. */ Array<Range> bounds; void VisitAttrs(AttrVisitor* v) { v->Visit("buffer", &buffer); v->Visit("bounds", &bounds); v->Visit("span", &span); } bool SEqualReduce(const PrefetchNode* other, SEqualReducer equal) const { return equal(buffer, other->buffer) && equal(bounds, other->bounds); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(buffer); hash_reduce(bounds); } PrefetchNode() = default; PrefetchNode(Buffer buffer, Array<Range> bounds, Span span = Span()) : StmtNode(span), buffer(buffer), bounds(bounds) {} static constexpr const char* _type_key = "tir.Prefetch"; TVM_DECLARE_FINAL_OBJECT_INFO(PrefetchNode, StmtNode); }; /*! * \brief Managed reference to PrefetchNode. * \sa PrefetchNode */ class Prefetch : public Stmt { public: TVM_DLL explicit Prefetch(Buffer buffer, Array<Range> bounds, Span span = Span()); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(Prefetch, Stmt, PrefetchNode); }; /*! * \brief Representing the region of multi-dimensional buffer access. */ class BufferRegionNode : public Object { public: /*! \brief The buffer of the buffer region. */ Buffer buffer; /*! \brief The region array of the buffer region. */ Array<Range> region; void VisitAttrs(AttrVisitor* v) { v->Visit("buffer", &buffer); v->Visit("region", &region); } bool SEqualReduce(const BufferRegionNode* other, SEqualReducer equal) const { return equal(buffer, other->buffer) && equal(region, other->region); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(buffer); hash_reduce(region); } static constexpr const char* _type_key = "tir.BufferRegion"; static constexpr const bool _type_has_method_sequal_reduce = true; static constexpr const bool _type_has_method_shash_reduce = true; TVM_DECLARE_FINAL_OBJECT_INFO(BufferRegionNode, Object); }; /*! * \brief Managed reference to BufferRegionNode. * \sa BufferRegionNode */ class BufferRegion : public ObjectRef { public: TVM_DLL explicit BufferRegion(Buffer buffer, Array<Range> region); /*! * \brief Create a BufferRegion which is full region of the given buffer. * \param buffer The buffer to generate full BufferRegion. * \return The BufferRegion which covers all region of the given buffer */ TVM_DLL static BufferRegion FullRegion(Buffer buffer); /*! * \brief Create a BufferRegion which is a single point of the given buffer. * \param buffer The buffer to generate single point BufferRegion. * \param indices The access point indices of the buffer * \return The BufferRegion which is the single point of the given buffer. */ TVM_DLL static BufferRegion FromPoint(Buffer buffer, Array<PrimExpr> indices); TVM_DEFINE_OBJECT_REF_METHODS(BufferRegion, ObjectRef, BufferRegionNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(BufferRegionNode); }; /*! * \brief Match introduces a constraint that the source buffer region can be remapped to the data * layout specified by the buffer field. The constraint can be checked in later part of lowering (or * optionally during runtime). * * MatchBufferRegion provides a mechanism to represent data layout and compactness constraints in * low-level hardware primitives in the IR and defer the check after the sequence of * transformations. */ class MatchBufferRegionNode : public Object { public: /*! \brief The target buffer. */ Buffer buffer; /*! \brief The source buffer region. */ BufferRegion source; void VisitAttrs(AttrVisitor* v) { v->Visit("buffer", &buffer); v->Visit("source", &source); } bool SEqualReduce(const MatchBufferRegionNode* other, SEqualReducer equal) const { return equal(buffer, other->buffer) && equal(source, other->source); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(buffer); hash_reduce(source); } static constexpr const char* _type_key = "tir.MatchBufferRegion"; static constexpr const bool _type_has_method_sequal_reduce = true; static constexpr const bool _type_has_method_shash_reduce = true; TVM_DECLARE_FINAL_OBJECT_INFO(MatchBufferRegionNode, Object); }; /*! * \brief Managed reference to MatchBufferRegionNode. * \sa MatchBufferRegionNode */ class MatchBufferRegion : public ObjectRef { public: TVM_DLL explicit MatchBufferRegion(Buffer buffer, BufferRegion source); TVM_DEFINE_OBJECT_REF_METHODS(MatchBufferRegion, ObjectRef, MatchBufferRegionNode); }; /*! * \brief A block is a basic schedule unit in TIR. * \note Block's body is parameterized by iter vars. * \code * * with T.block(name): * v0 = T.axis.S(domain, value0) * v1 = T.axis.R(domain, value1) * ... * T.reads([buffer0[start:end, ...], ...]) * T.writes([buffer1[start:end, ...], ...]) * T.where(predicate) * buffer2 = T.alloc_buffer(shape, dtype) * buffer3 = T.match_buffer(source_buffer[start:end, ...]) * T.attr({attr_key: attr_value, ...}) * with T.init(): * // init body * // body * * \endcode */ class BlockNode : public StmtNode { public: /*! \brief The variables of the block. */ Array<IterVar> iter_vars; /*! \brief The read buffer regions of the block. */ Array<BufferRegion> reads; /*! \brief The write buffer regions of the block. */ Array<BufferRegion> writes; /*! \brief The name_hint of the block. */ String name_hint; /*! \brief The body of the block. */ Stmt body; /*! * \brief The init statement is executed during the first iteration of reduction loops in a * reduction block. The optional init field allows us to represent initialization and * reduction update in a single block and transform them collectively. * We also provide primitives to decompose the init into a separate block during scheduling. * Init field is `NullOpt` if there is no reduction iter_vars */ Optional<Stmt> init; /*! \brief The buffer allocated in the block. */ Array<Buffer> alloc_buffers; /*! \brief The match buffer regions. */ Array<MatchBufferRegion> match_buffers; /*! \brief The annotation of the block. */ Map<String, ObjectRef> annotations; void VisitAttrs(AttrVisitor* v) { v->Visit("iter_vars", &iter_vars); v->Visit("reads", &reads); v->Visit("writes", &writes); v->Visit("name_hint", &name_hint); v->Visit("body", &body); v->Visit("init", &init); v->Visit("alloc_buffers", &alloc_buffers); v->Visit("match_buffers", &match_buffers); v->Visit("annotations", &annotations); } bool SEqualReduce(const BlockNode* other, SEqualReducer equal) const { // Need first reduce iter_vars, alloc_buffers and match_buffers to define new vars return equal.DefEqual(iter_vars, other->iter_vars) && equal(alloc_buffers, other->alloc_buffers) && equal(match_buffers, other->match_buffers) && equal(reads, other->reads) && equal(writes, other->writes) && equal(body, other->body) && equal(init, other->init) && equal(annotations, other->annotations); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce.DefHash(iter_vars); hash_reduce(alloc_buffers); hash_reduce(match_buffers); hash_reduce(reads); hash_reduce(writes); hash_reduce(body); hash_reduce(init); hash_reduce(annotations); } static constexpr const char* _type_key = "tir.Block"; TVM_DECLARE_FINAL_OBJECT_INFO(BlockNode, StmtNode); }; /*! * \brief Managed reference to BlockNode. * \sa BlockNode */ class Block : public Stmt { public: TVM_DLL explicit Block(Array<IterVar> iter_vars, Array<BufferRegion> reads, Array<BufferRegion> writes, String name_hint, Stmt body, Optional<Stmt> init = NullOpt, Array<Buffer> alloc_buffers = Array<Buffer>(), Array<MatchBufferRegion> match_buffers = Array<MatchBufferRegion>(), Map<String, ObjectRef> annotations = Map<String, ObjectRef>(), Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(Block, Stmt, BlockNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(BlockNode); }; /*! * \brief A block realization node represents execution of the block at the binding values. */ class BlockRealizeNode : public StmtNode { public: /*! \brief The corresponding values of the iter vars. */ Array<PrimExpr> iter_values; /*! * \brief The predicate of the block realization, the block will only be executed when the * predicate is true. */ PrimExpr predicate; /*! \brief The block to be realized. */ Block block; void VisitAttrs(AttrVisitor* v) { v->Visit("iter_values", &iter_values); v->Visit("predicate", &predicate); v->Visit("block", &block); } bool SEqualReduce(const BlockRealizeNode* other, SEqualReducer equal) const { return equal(iter_values, other->iter_values) && equal(predicate, other->predicate) && equal(block, other->block); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(iter_values); hash_reduce(predicate); hash_reduce(block); } static constexpr const char* _type_key = "tir.BlockRealize"; TVM_DECLARE_FINAL_OBJECT_INFO(BlockRealizeNode, StmtNode); }; /*! * \brief Managed reference to BlockRealizeNode * \sa BlockRealizeNode */ class BlockRealize : public Stmt { public: TVM_DLL explicit BlockRealize(Array<PrimExpr> iter_values, PrimExpr predicate, Block block, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(BlockRealize, Stmt, BlockRealizeNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(BlockRealizeNode); }; /*! \brief namespace of possible attributes in AttrStmt.attr_key */ namespace attr { // The above attr does not pass to ir stage. /*! \brief Mark launching extent of thread, used by device API. */ constexpr const char* thread_extent = "thread_extent"; /*! \brief Mark launching of a virtual thread. */ constexpr const char* virtual_thread = "virtual_thread"; /*! \brief Mark region is processed by a co-proccesor */ constexpr const char* coproc_scope = "coproc_scope"; /*! * \brief Mark region creates coprocessor micro ops, * can be reused if corresponding variable is independent. */ constexpr const char* coproc_uop_scope = "coproc_uop_scope"; /*! \brief Mark the scope as volatile access for certain handle. */ constexpr const char* volatile_scope = "volatile_scope"; /*! * \brief Mark the scope as generated by extern primitive. * such scope can contain arbitrary ir program and we need to be careful * when make certain assumptions about the structure of the program. */ constexpr const char* extern_scope = "extern_scope"; /*! * \brief Mark the scope as when computation start to happen * This can hint some code generator to create a new function for compute. */ constexpr const char* compute_scope = "compute_scope"; /*! \brief Mark storage alignment requirement of buffers */ constexpr const char* storage_alignment = "storage_alignment"; /*! \brief Mark storage scope of realization */ constexpr const char* realize_scope = "realize_scope"; /*! \brief The allocation device for global malloc in host. */ constexpr const char* device_id = "device_id"; /*! \brief The device type. */ constexpr const char* device_type = "device_type"; /*! \brief Mark of loop scope */ constexpr const char* loop_scope = "loop_scope"; /*! \brief Mark of reduce scope */ constexpr const char* reduce_scope = "reduce_scope"; /*! \brief Pragma: auto-unroll, max_step */ constexpr const char* pragma_auto_unroll_max_step = "pragma_auto_unroll_max_step"; /*! \brief Pragma: unroll explicit */ constexpr const char* pragma_unroll_explicit = "pragma_unroll_explicit"; /*! \brief Mark region is guarded by the pragma extension */ constexpr const char* pragma_scope_prefix = "pragma_"; /*! \brief Import C source or file into the final code gen module */ constexpr const char* pragma_import_c = "pragma_import_c"; /*! \brief Import llvm source or file into the final code gen module */ constexpr const char* pragma_import_llvm = "pragma_import_llvm"; /*! \brief Try to modify the AST to support Tensor Core */ constexpr const char* pragma_tensor_core = "pragma_tensor_core"; /*! * \brief Mark of prefetch scope, value=offset, * run prefetch of Tensor on the current loop scope */ constexpr const char* prefetch_scope = "prefetch_scope"; /*! * \brief Marks the layout transforms to be used for a tensor. * * Only applies to a DataProducer, as it should be made part of the * PrimFunc attributes for TIR. */ constexpr const char* layout_transforms = "layout_transforms"; /*! * \brief Marks the physical axis separators * * Only applies to a DataProducer, as it should be made part of the * Buffer definition in a PrimFunc. See `BufferNode::axis_separators` * for more details. */ constexpr const char* axis_separators = "axis_separators"; /*! * \brief Marks production of double buffer data */ constexpr const char* double_buffer_scope = "double_buffer_scope"; /*! * \brief Marks region used by double buffer write */ constexpr const char* double_buffer_write = "double_buffer_write"; /*! \brief Mark realization for rolling buffer optimization */ constexpr const char* rolling_buffer_scope = "rolling_buffer_scope"; /*! \brief Mark of scan update scope */ constexpr const char* scan_update_scope = "scan_update_scope"; /*! \brief Mark of scan init scope */ constexpr const char* scan_init_scope = "scan_init_scope"; /*! * \brief Mark alignment of buffer dimension * stmt.node is Tensor * stmt.value is tvm_tuple(dim, align, offset) * This gives hint to require stride of dim to be k * align + offset. */ constexpr const char* buffer_dim_align = "buffer_dim_align"; /*! \brief Mark stores/loads with theirs bounds. */ constexpr const char* buffer_bound = "buffer_bound"; /*! * \brief Bind the buffer specification to the region of the op * When this scope occurs, the stmt.node is a Array<NodeRef> = [buffer, tensor] * stmt.value is a tvm_tuple(min0, extent0, min1, extent1, ...). * The scope represents that we need to bind the storage region of tensor to buffer. * This will affect replacement of some variables inside the scope that * corresponds to field of buffer to be the actual expressions of tensor during * storage flattening phase. */ constexpr const char* buffer_bind_scope = "buffer_bind_scope"; // Pipeline related attributes /*! \brief channel read scope */ constexpr const char* channel_read_scope = "channel_read_scope"; /*! \brief Advance step of channel after end of scope */ constexpr const char* channel_read_advance = "channel_read_advance"; /*! \brief channel write scope */ constexpr const char* channel_write_scope = "channel_write_scope"; /*! \brief Advance step of channel after end of scope */ constexpr const char* channel_write_advance = "channel_write_advance"; /*! \brief pipeline stage scope, implies always execution */ constexpr const char* pipeline_stage_scope = "pipeline_stage_scope"; /*! \brief pipeline execution scope, implies the scope can be pipelined. */ constexpr const char* pipeline_exec_scope = "pipeline_exec_scope"; /*! * \brief Mark that it is in the device scope. */ constexpr const char* device_scope = "device_scope"; /*! * \brief Mark that the attached statement runs asynchronously. */ constexpr const char* async_scope = "async_scope"; /*! * \brief Annotations for invoking and synchronizing asynchronous operations. * Synchronization is done in terms of "queue": It is an abstract entity associated * with each asynchronous unit, and it tracks invocations and completions of asynchronous * operations in the FIFO order. * * Similarly to PTX instructions commit_group and wait_group, these annotations express * synchronization by "counting": * * async_commit_queue(i): Group one or more invocations of async operations in the given scope, * and "commit" (or push) them to the queue i. A group of operations committed together is * awaited as one chunk. Groups committed to the same queue complete in the FIFO order. * * async_wait_queue(i, N): Block until only N most recent committed groups are still in-flight at * the queue i. N does not have to be a constant, but some backends may require a constant count. */ constexpr const char* async_commit_queue_scope = "async_commit_queue_scope"; constexpr const char* async_wait_queue_scope = "async_wait_queue_scope"; constexpr const char* async_wait_inflight_count = "async_wait_inflight_count"; /*! * \brief Mark that the shape of TensorCore fragment */ constexpr const char* fragment_shape = "fragment_shape"; /*! * \brief Mark that the layout of TensorCore fragment */ constexpr const char* fragment_layout = "fragment_layout"; /*! * \brief Mark that the kernel is hand threaded and doesn't need syncs inserted */ constexpr const char* hand_threaded = "hand_threaded"; /*! * \brief Mark whether the script-completer need to fill in missing access region * during script parsing. * \note The result should be a integer mask with range [0, 4). * if (mask & 1) the read region should be detected, * if (mask & 2) the write region should be detected. */ constexpr const char* script_parsing_detect_access = "tir.script_parsing_detect_access"; /*! * \brief Mark that the loop should be partitioned. */ constexpr const char* pragma_loop_partition_hint = "pragma_loop_partition_hint"; /*! \brief Mark the stage of a statement in the software pipeline */ constexpr const char* software_pipeline_stage = "software_pipeline_stage"; /*! \brief Mark the order of a statement in the software pipeline */ constexpr const char* software_pipeline_order = "software_pipeline_order"; /*! \brief List stages in the software pipeline that should run asynchronously * \note All statements in the provided stages are assumed to have asynchronous * semantics (e.g. CUDA async global to shared memory copy). */ constexpr const char* software_pipeline_async_stages = "software_pipeline_async_stages"; /*! \brief Mark the buffers which is const access and can be transformed layout. */ constexpr const char* layout_free_buffers = "layout_free_buffers"; /*! \brief Mark the local stage for the shared memory access should be added. */ constexpr const char* manifest_shared_memory_local_stage = "tir.manifest_shared_memory_local_stage"; /*! \brief Mark the tiling structure of blocks that are applied by rule Multi-Level-Tiling */ constexpr const char* meta_schedule_tiling_structure = "meta_schedule.tiling_structure"; /*! * \brief Mark that the loop should be further skip and bound to environment threads to enable * cooperative fetching. */ constexpr const char* meta_schedule_cooperative_fetch = "meta_schedule.cooperative_fetch"; /*! \brief The allowed range of thread extent in thread bindings */ constexpr const char* meta_schedule_thread_extent_low_inclusive = "meta_schedule.thread_extent_low_inclusive"; /*! \brief The allowed range of thread extent in thread bindings */ constexpr const char* meta_schedule_thread_extent_high_inclusive = "meta_schedule.thread_extent_high_inclusive"; /*! \brief Mark the block whose producer needs to be applied by rule Random-Compute-Location */ constexpr const char* meta_schedule_random_compute_producer = "meta_schedule.random_compute_producer"; /*! \brief Mark auto-parallel setting on the block. */ constexpr const char* meta_schedule_parallel = "meta_schedule.parallel"; /*! \brief Mark auto-vectorize setting on the block. */ constexpr const char* meta_schedule_vectorize = "meta_schedule.vectorize"; /*! \brief Mark auto-unroll setting on the block. */ constexpr const char* meta_schedule_unroll_explicit = "meta_schedule.unroll_explicit"; /*! \brief Mark auto-unroll setting on the block. */ constexpr const char* meta_schedule_unroll_implicit = "meta_schedule.unroll_implicit"; /*! \brief Mark that a block should be further rewritten using tensorization. */ constexpr const char* meta_schedule_auto_tensorize = "meta_schedule.auto_tensorize"; /*! \brief Mark that a block is a preprocessor block for layout rewrite. */ constexpr const char* meta_schedule_layout_rewrite_preproc = "meta_schedule.layout_rewrite_preproc"; /*! * \brief Mark that the init statement of a block should be further rewritten using tensorization. */ constexpr const char* meta_schedule_auto_tensorize_init = "meta_schedule.auto_tensorize_init"; /*! * \brief Mark that a block is executed by a warp. This implies the extend of threadIdx.x is * warp size. */ constexpr const char* warp_execution = "warp_execution"; /*! * \brief Check if attr_key is a pragma key extension * \param attr_key The attr key to be compared * \return true if it is a pragma key */ inline bool IsPragmaKey(const std::string& attr_key) { return attr_key.compare(0, 7, "pragma_") == 0; } } // namespace attr /*! * \brief Create a type annotation expression * \param dtype The data type * \param span The location of this object in the source code. * \return Expr a expression with dtype. */ TVM_DLL PrimExpr TypeAnnotation(DataType dtype, Span span = Span()); // overload printing of for type. TVM_DLL std::ostream& operator<<(std::ostream& os, ForKind kind); // inline implementations inline const char* ForKind2String(ForKind t) { switch (t) { case ForKind::kSerial: return "serial"; case ForKind::kParallel: return "parallel"; case ForKind::kVectorized: return "vectorized"; case ForKind::kUnrolled: return "unroll"; case ForKind::kThreadBinding: return "thread_binding"; } LOG(FATAL) << "Unknown ForKind" << t; return "Unknown"; } } // namespace tir } // namespace tvm #endif // TVM_TIR_STMT_H_
https://github.com/zk-ml/tachikoma
include/tvm/tir/stmt_functor.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/tir/stmt_functor.h * * \brief Functors for tir stmts * utility functions to call common functors. */ #ifndef TVM_TIR_STMT_FUNCTOR_H_ #define TVM_TIR_STMT_FUNCTOR_H_ #include <tvm/node/functor.h> #include <tvm/tir/expr.h> #include <tvm/tir/expr_functor.h> #include <tvm/tir/function.h> #include <tvm/tir/stmt.h> #include <unordered_map> #include <utility> namespace tvm { namespace tir { /*! * \brief Same as ExprFunctor except it is applied on statements * \tparam FType The function signature. * \sa ExprFunctor */ template <typename FType> class StmtFunctor; #define STMT_FUNCTOR_DEFAULT \ { return VisitStmtDefault_(op, std::forward<Args>(args)...); } #define IR_STMT_FUNCTOR_DISPATCH(OP) \ vtable.template set_dispatch<OP>([](const ObjectRef& n, TSelf* self, Args... args) { \ return self->VisitStmt_(static_cast<const OP*>(n.get()), std::forward<Args>(args)...); \ }); template <typename R, typename... Args> class StmtFunctor<R(const Stmt& n, Args... args)> { private: using TSelf = StmtFunctor<R(const Stmt& n, Args... args)>; using FType = NodeFunctor<R(const ObjectRef& n, TSelf* self, Args... args)>; public: /*! \brief the result type of this functor */ using result_type = R; /*! \brief virtual destructor */ virtual ~StmtFunctor() {} /*! * \brief Same as call. * \param n The stmt node. * \param args Additional arguments. * \return The result of the call */ R operator()(const Stmt& n, Args... args) { return VisitStmt(n, std::forward<Args>(args)...); } /*! * \brief The functor call. * \param n The stmt node. * \param args Additional arguments. * \return The result of the call */ virtual R VisitStmt(const Stmt& n, Args... args) { static FType vtable = InitVTable(); return vtable(n, this, std::forward<Args>(args)...); } // Functions that can be overriden by subclass virtual R VisitStmt_(const LetStmtNode* op, Args... args) STMT_FUNCTOR_DEFAULT; virtual R VisitStmt_(const AttrStmtNode* op, Args... args) STMT_FUNCTOR_DEFAULT; virtual R VisitStmt_(const IfThenElseNode* op, Args... args) STMT_FUNCTOR_DEFAULT; virtual R VisitStmt_(const ForNode* op, Args... args) STMT_FUNCTOR_DEFAULT; virtual R VisitStmt_(const WhileNode* op, Args... args) STMT_FUNCTOR_DEFAULT; virtual R VisitStmt_(const AllocateNode* op, Args... args) STMT_FUNCTOR_DEFAULT; virtual R VisitStmt_(const AllocateConstNode* op, Args... args) STMT_FUNCTOR_DEFAULT; virtual R VisitStmt_(const DeclBufferNode* op, Args... args) STMT_FUNCTOR_DEFAULT; virtual R VisitStmt_(const StoreNode* op, Args... args) STMT_FUNCTOR_DEFAULT; virtual R VisitStmt_(const BufferStoreNode* op, Args... args) STMT_FUNCTOR_DEFAULT; virtual R VisitStmt_(const BufferRealizeNode* op, Args... args) STMT_FUNCTOR_DEFAULT; virtual R VisitStmt_(const AssertStmtNode* op, Args... args) STMT_FUNCTOR_DEFAULT; virtual R VisitStmt_(const ProducerStoreNode* op, Args... args) STMT_FUNCTOR_DEFAULT; virtual R VisitStmt_(const ProducerRealizeNode* op, Args... args) STMT_FUNCTOR_DEFAULT; virtual R VisitStmt_(const PrefetchNode* op, Args... args) STMT_FUNCTOR_DEFAULT; virtual R VisitStmt_(const SeqStmtNode* op, Args... args) STMT_FUNCTOR_DEFAULT; virtual R VisitStmt_(const EvaluateNode* op, Args... args) STMT_FUNCTOR_DEFAULT; virtual R VisitStmt_(const BlockNode* op, Args... args) STMT_FUNCTOR_DEFAULT; virtual R VisitStmt_(const BlockRealizeNode* op, Args... args) STMT_FUNCTOR_DEFAULT; virtual R VisitStmtDefault_(const Object* op, Args...) { LOG(FATAL) << "Do not have a default for " << op->GetTypeKey(); return R(); } private: // initialize the vtable. static FType InitVTable() { FType vtable; IR_STMT_FUNCTOR_DISPATCH(LetStmtNode); IR_STMT_FUNCTOR_DISPATCH(AttrStmtNode); IR_STMT_FUNCTOR_DISPATCH(IfThenElseNode); IR_STMT_FUNCTOR_DISPATCH(ForNode); IR_STMT_FUNCTOR_DISPATCH(WhileNode); IR_STMT_FUNCTOR_DISPATCH(AllocateNode); IR_STMT_FUNCTOR_DISPATCH(AllocateConstNode); IR_STMT_FUNCTOR_DISPATCH(DeclBufferNode); IR_STMT_FUNCTOR_DISPATCH(StoreNode); IR_STMT_FUNCTOR_DISPATCH(AssertStmtNode); IR_STMT_FUNCTOR_DISPATCH(ProducerStoreNode); IR_STMT_FUNCTOR_DISPATCH(ProducerRealizeNode); IR_STMT_FUNCTOR_DISPATCH(PrefetchNode); IR_STMT_FUNCTOR_DISPATCH(SeqStmtNode); IR_STMT_FUNCTOR_DISPATCH(EvaluateNode); IR_STMT_FUNCTOR_DISPATCH(BufferStoreNode); IR_STMT_FUNCTOR_DISPATCH(BufferRealizeNode); IR_STMT_FUNCTOR_DISPATCH(BlockNode); IR_STMT_FUNCTOR_DISPATCH(BlockRealizeNode); return vtable; } }; #undef IR_STMT_FUNCTOR_DISPATCH #undef STMT_FUNCTOR_DEFAULT /*! * \brief StmtVisitor. */ class TVM_DLL StmtVisitor : protected StmtFunctor<void(const Stmt&)> { public: using StmtFunctor::operator(); protected: using StmtFunctor::VisitStmt; /*! * \brief Visitor to Exprs, can be overriden * to do recursive changes to Exprs. * \note A common pattern is to call ExprVisitor here, * or have a class sub-class both StmtVisitor and ExprVisitor * and redirect Visit to ExprMutator::VisitExpr(Expr) */ virtual void VisitExpr(const PrimExpr& e) {} // statement visitor void VisitStmt_(const AttrStmtNode* op) override; void VisitStmt_(const IfThenElseNode* op) override; void VisitStmt_(const LetStmtNode* op) override; void VisitStmt_(const ForNode* op) override; void VisitStmt_(const WhileNode* op) override; void VisitStmt_(const AllocateNode* op) override; void VisitStmt_(const AllocateConstNode* op) override; void VisitStmt_(const DeclBufferNode* op) override; void VisitStmt_(const StoreNode* op) override; void VisitStmt_(const BufferStoreNode* op) override; void VisitStmt_(const BufferRealizeNode* op) override; void VisitStmt_(const AssertStmtNode* op) override; void VisitStmt_(const ProducerStoreNode* op) override; void VisitStmt_(const ProducerRealizeNode* op) override; void VisitStmt_(const PrefetchNode* op) override; void VisitStmt_(const SeqStmtNode* op) override; void VisitStmt_(const EvaluateNode* op) override; void VisitStmt_(const BlockNode* op) override; void VisitStmt_(const BlockRealizeNode* op) override; }; /*! * \brief StmtMutator that mutates the statements. */ class TVM_DLL StmtMutator : protected StmtFunctor<Stmt(const Stmt&)> { public: /*! * \brief Mutate stmt. * \param stmt The input statement to be mutated. * \return The result of the call * \note It is important that stmt is passed by value. * so copy on write can be triggered correctly. * do mutator(std::move(stmt)) or when copy elison is triggered. */ Stmt operator()(Stmt stmt) { allow_copy_on_write_ = true; return VisitStmt(stmt); } protected: // We perform copy on write optimizations on the StmtMutator // so that an unique copy of parent can be mutated inplace // when some of its children changed. // We only do such optimization for Stmt nests(instead of Exprs) for now // as Stmt's parent state is more likely remain unchanged when one of // its child block changes. /*! * \brief Internal state to indicate whether copy on write is enabled. * COW is enabled iff all the parents of the node are unique. */ bool allow_copy_on_write_{false}; /*! * \brief Perform copy on write on node. * * If CopyOnWrite is allowed, directly return * a strong reference to the node container. * Otherwise, return a copy of the node. * * \return The result object pointer. */ template <typename TNode> ObjectPtr<TNode> CopyOnWrite(const TNode* node) { static_assert(std::is_base_of<StmtNode, TNode>::value, "StmtMutator:: CopyOnWrite requires us to track uniqueness of all parent " "nodes during the recursion. Because the child classes do not necessarily " "check the Array, Expr and other structures during the visit, it is only safe to " "call this function with StmtNodes for now. " "Please create a new node directly in other cases."); if (allow_copy_on_write_) { // return the old node. return runtime::GetObjectPtr<TNode>(const_cast<TNode*>(node)); } else { // Make a new copy of the node. // need to rely on the default copy constructor return runtime::make_object<TNode>(*node); } } /*! * \brief Internal mutator that everyone calls. * \note To override mutate's behavior, override VisitExpr instead. * \param stmt The input stmt. * \return The mutated results. */ Stmt VisitStmt(const Stmt& stmt) override { if (allow_copy_on_write_ && !stmt.unique()) { allow_copy_on_write_ = false; Stmt ret = StmtFunctor::VisitStmt(stmt); allow_copy_on_write_ = true; return ret; } else { return StmtFunctor::VisitStmt(stmt); } } /*! * \brief Visitor to Exprs, can be overriden * to do recursive changes to Exprs. * \note A common pattern is to call ExprMutator here, * or have a class sub-class both StmtMutator and ExprMutator * and redirect Mutate to ExprMutator::Mutate(Expr) */ virtual PrimExpr VisitExpr(const PrimExpr& e) { return e; } // statement visitor Stmt VisitStmt_(const AttrStmtNode* op) override; Stmt VisitStmt_(const IfThenElseNode* op) override; Stmt VisitStmt_(const LetStmtNode* op) override; Stmt VisitStmt_(const ForNode* op) override; Stmt VisitStmt_(const WhileNode* op) override; Stmt VisitStmt_(const AllocateNode* op) override; Stmt VisitStmt_(const AllocateConstNode* op) override; Stmt VisitStmt_(const DeclBufferNode* op) override; Stmt VisitStmt_(const StoreNode* op) override; Stmt VisitStmt_(const BufferStoreNode* op) override; Stmt VisitStmt_(const BufferRealizeNode* op) override; Stmt VisitStmt_(const AssertStmtNode* op) override; Stmt VisitStmt_(const ProducerStoreNode* op) override; Stmt VisitStmt_(const ProducerRealizeNode* op) override; Stmt VisitStmt_(const PrefetchNode* op) override; Stmt VisitStmt_(const SeqStmtNode* op) override; Stmt VisitStmt_(const EvaluateNode* op) override; Stmt VisitStmt_(const BlockNode* op) override; Stmt VisitStmt_(const BlockRealizeNode* op) override; /*! * \brief Alternative advance method for SeqStmtNode. * * This function can be called when a child class override * VisitStmt_(const SeqStmtNode*) to introduce * the special behavior to visit * * \param op The sequence. * \param flatten_before_visit Whether to flatten the sequence before visit. * \param fmutate The mutate function, can be nullptr, which defaults to Visit. * \return The mutated result. */ Stmt VisitSeqStmt_(const SeqStmtNode* op, bool flatten_before_visit, std::function<Stmt(const Stmt&)> fmutate = nullptr); // internal helper. class Internal; }; /*! * \brief Visitor that recursively visit stmts and exprs on them. */ class StmtExprVisitor : public StmtVisitor, public ExprVisitor { public: using StmtVisitor::operator(); using ExprVisitor::operator(); protected: using ExprVisitor::VisitExpr; using StmtVisitor::VisitStmt; void VisitExpr(const PrimExpr& e) override { return ExprVisitor::VisitExpr(e); } }; /*! * \brief Mutator that recursively mutates stmts and exprs on them. */ class StmtExprMutator : public StmtMutator, public ExprMutator { public: using StmtMutator::operator(); using ExprMutator::operator(); protected: using ExprMutator::VisitExpr; using StmtMutator::VisitExpr; PrimExpr VisitExpr(const PrimExpr& e) override { return ExprMutator::VisitExpr(e); } }; /*! * \brief recursively visit the ir nodes in post DFS order, and transform it * * \param stmt The ir to be transformed. * \param preorder The function called in before recursive mutation * If preorder returns None, then the transform will proceed to recursive call. * If preorder returns a not None Stmt/Expr, the transformer will simply return it and * won't do further recursion. * \param postorder The function called after recursive mutation. * The recursive mutation result is passed to postorder for further mutation. * \param only_enable List of runtime::String. * If it is null, all IRNode will call preorder/postorder * If it is not null, preorder/postorder will only be called * when the IRNode's type key is in the list. */ TVM_DLL Stmt IRTransform(Stmt stmt, const runtime::PackedFunc& preorder, const runtime::PackedFunc& postorder, Optional<Array<String>> only_enable = NullOpt); /*! * \brief Recursively visit the ir in post DFS order node, apply fvisit * Each node is guaranteed to be visited only once. * \param node The ir to be visited. * \param fvisit The visitor function to be applied. */ TVM_DLL void PostOrderVisit(const ObjectRef& node, std::function<void(const ObjectRef&)> fvisit); /*! * \brief Substitute the var specified by vmap. * \param stmt The source statement to be substituted * \param vmap returns a new value if re-mapping is needed, otherwise returns nullptr. * \return The converted form. */ TVM_DLL Stmt Substitute(Stmt stmt, std::function<Optional<PrimExpr>(const Var& var)> vmap); /*! * \brief Substitute the var specified by vmap. * \param expr The source statement to be substituted * \param vmap returns a new value if re-mapping is needed, otherwise returns nullptr. * \return The result. */ TVM_DLL PrimExpr Substitute(PrimExpr expr, std::function<Optional<PrimExpr>(const Var& var)> vmap); /*! * \brief Substitute the var specified by vmap. * \param region The object whose vars are to be substituted * \param vmap The map of new values. * \return The result. */ TVM_DLL Array<Range> Substitute(const Array<Range>& region, const Map<Var, PrimExpr>& vmap); /*! * \brief Sugar for substitute via a given map. * \param input The input to be updated. * \param value_map The map of new values. * \return The result. * \tparam T the input type, can be PrimExpr or Stmt. */ template <typename T> inline auto Substitute(T input, const Map<Var, PrimExpr>& value_map) { auto vmap = [&](const Var& var) -> Optional<PrimExpr> { auto it = value_map.find(var); if (it != value_map.end()) return (*it).second; return Optional<PrimExpr>(nullptr); }; return Substitute(std::move(input), vmap); } /*! * \brief Sugar for substitute via a given map. * \param input The input to be updated. * \param value_map The map of new values. * \return The result. * \tparam T the input type, can be PrimExpr or Stmt. */ template <typename T> inline T Substitute(T input, const std::unordered_map<const VarNode*, PrimExpr>& value_map) { auto vmap = [&](const Var& var) -> Optional<PrimExpr> { auto it = value_map.find(var.get()); if (it != value_map.end()) return (*it).second; return Optional<PrimExpr>(nullptr); }; return Substitute(std::move(input), vmap); } /*! * \brief Substitute the var specified by vmap and legalize data types after substitution. * \param stmt The source statement to be substituted * \param vmap returns a new value if re-mapping is needed, otherwise returns nullptr. * * Unlike `Substitute`, this allows the substitution to change the data type of the expression. * * \sa Substitute * \return The result. */ TVM_DLL Stmt SubstituteWithDataTypeLegalization(Stmt stmt, std::function<Optional<PrimExpr>(const Var&)> vmap); /*! * \brief Substitute the var specified by vmap and legalize data types after substitution. * \param expr The source statement to be substituted * \param vmap returns a new value if re-mapping is needed, otherwise returns nullptr. * * Unlike `Substitute`, this allows the substitution to change the data type of the expression. * * \sa Substitute * \return The result. */ TVM_DLL PrimExpr SubstituteWithDataTypeLegalization( PrimExpr expr, std::function<Optional<PrimExpr>(const Var&)> vmap); /*! * \brief Recursively visit the IR in pre DFS order node, apply fvisit. * If fvisit returns false, it won't visit the children of the node. * \param stmt_or_expr The ir to be visited. * \param fvisit The visitor function to be applied. If fvisit returns false, it won't visit the * children of the node */ TVM_DLL void PreOrderVisit(const ObjectRef& stmt_or_expr, const std::function<bool(const ObjectRef&)>& fvisit); /*! * \brief Renew the definition nodes for a TIR, including Var, Buffer and IterVar. * This pass works as a simple DeepCopy to duplicate a function with different Vars and * Buffers but the same behavior * \param func The input PrimFunc. * \return The renewed func. */ TVM_DLL PrimFunc RenewDefs(const PrimFunc& func); /*! * \brief Check if the statement contains the specified node type. * * This utility potentially walks the entire statement, and should * therefore not be used if it could otherwise be merged with another * pass. * * \param stmt The statement to be searched * \return Whether stmt contains Node */ template <typename Node, typename = std::enable_if_t<std::is_base_of_v<StmtNode, Node>>> bool ContainsNode(const Stmt& stmt) { struct Visitor : StmtVisitor { // Early bail-out, if we already found the node. void VisitStmt(const Stmt& stmt) final { if (contains_node) { return; } StmtVisitor::VisitStmt(stmt); } void VisitStmt_(const Node* block) override { contains_node = true; } bool contains_node{false}; }; Visitor visitor; visitor(stmt); return visitor.contains_node; } /*! * \brief Legalize the data types of expressions to make sure they are consistent with other * parts of the program. * * It enforces the following rules: * - The data type of the index variable in a loop must be consistent with the data type of the loop * bounds. * - The data type of the binary and ternary expressions must be consistent with the data types of * each of their operands. * - The data type of the bounds and binding values of block iter vars must be consistent with the * data type of the block iter vars. * * Usually we enforce the consistency of data types when constructing the IR nodes. However, such * inconsistency may happen as a result of IR mutation in some passes. This class can be used as * base class of such passes to ensure the consistency of data types. */ class DataTypeLegalizer : public StmtExprMutator { protected: Stmt VisitStmt_(const ForNode* op) override; Stmt VisitStmt_(const AttrStmtNode* op) override; Stmt VisitStmt_(const BlockRealizeNode* op) override; Stmt VisitStmt_(const BlockNode* op) override; PrimExpr VisitExpr_(const SelectNode* op) override; PrimExpr VisitExpr_(const RampNode* op) override; PrimExpr VisitExpr_(const AddNode* op) override; PrimExpr VisitExpr_(const SubNode* op) override; PrimExpr VisitExpr_(const MulNode* op) override; PrimExpr VisitExpr_(const DivNode* op) override; PrimExpr VisitExpr_(const ModNode* op) override; PrimExpr VisitExpr_(const FloorDivNode* op) override; PrimExpr VisitExpr_(const FloorModNode* op) override; PrimExpr VisitExpr_(const MinNode* op) override; PrimExpr VisitExpr_(const MaxNode* op) override; PrimExpr VisitExpr_(const EQNode* op) override; PrimExpr VisitExpr_(const NENode* op) override; PrimExpr VisitExpr_(const LTNode* op) override; PrimExpr VisitExpr_(const LENode* op) override; PrimExpr VisitExpr_(const GTNode* op) override; PrimExpr VisitExpr_(const GENode* op) override; PrimExpr VisitExpr_(const CallNode* op) override; using StmtExprMutator::VisitExpr_; using StmtExprMutator::VisitStmt_; // a map from IterVar before rewrite to that after rewrite, // ensures one old IterVar maps to exactly one new IterVar std::unordered_map<const IterVarNode*, IterVar> ivmap_; }; } // namespace tir } // namespace tvm #endif // TVM_TIR_STMT_FUNCTOR_H_
https://github.com/zk-ml/tachikoma
include/tvm/tir/transform.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/tir/transform.h * \brief TIR specific transformation passes. */ #ifndef TVM_TIR_TRANSFORM_H_ #define TVM_TIR_TRANSFORM_H_ #include <tvm/ir/transform.h> #include <tvm/target/target.h> #include <tvm/tir/expr.h> #include <tvm/tir/function.h> #include <string> #include <vector> namespace tvm { namespace tir { namespace transform { using tvm::transform::Pass; using tvm::transform::PassContext; using tvm::transform::PassContextNode; using tvm::transform::PassInfo; using tvm::transform::PassInfoNode; using tvm::transform::PassNode; using tvm::transform::Sequential; /* * \brief Create a function pass that optimizes PrimFuncs. * * \param pass_func The packed function that contains the optimization. * \param opt_level The optimization level of the function pass. * \param name The name of the function pass. * \param required The list of the passes that the function pass is dependent on. * * \return The created function pass. */ TVM_DLL Pass CreatePrimFuncPass( const runtime::TypedPackedFunc<PrimFunc(PrimFunc, IRModule, PassContext)>& pass_func, int opt_level, String name, tvm::Array<String> required); /*! * \brief Inject prefetch instructions into stmt. * * \return The pass. */ TVM_DLL Pass InjectPrefetch(); // TODO(tvm-team): consolidate configs to the PassContext /*! * \brief Flatten the multi-dimensional read/write * to single dimensional Load/Store * * \param cache_line_size The size of CPU cache line. * \param create_bound_attribute Whether to create bound attributes. * * \return The Pass */ TVM_DLL Pass StorageFlatten(int cache_line_size, bool create_bound_attribute = false); /*! * \brief Inject copy intrinsics with optional pad. * * \param pragma_key The pragma key for hint of copy. * \param fintrin The function with signature * * Stmt fintrin(Buffer src, * Buffer dst, * Array<Expr> pad_before, * Array<Expr> pad_after, * Expr pad_value) * \return The pass. */ TVM_DLL Pass InjectCopyIntrin(String pragma_key, runtime::PackedFunc fintrin); /*! * \brief Detect and insert sync points to co-processor. * * \return The pass. */ TVM_DLL Pass CoProcSync(); /*! * \brief Lift common attrs with attr_key to outer scope. * * \param attr_key The attribute key to be checked. * \return The pass. */ TVM_DLL Pass LiftAttrScope(String attr_key); /*! * \brief partition loops in the stmt. * * \return The pass. */ TVM_DLL Pass LoopPartition(); /*! * \brief Lower vectorization loops. * * \param enable_vectorize Whether vectorization is enabled. * * \return The pass. */ TVM_DLL Pass VectorizeLoop(bool enable_vectorize = true); /*! * \brief Inject virtual thread loops. * * \return The pass. */ TVM_DLL Pass InjectVirtualThread(); /*! * \brief Inject double buffer statements. * * \return The pass. */ TVM_DLL Pass InjectDoubleBuffer(); /*! * \brief Rewrite storage allocation pattern. * Moves the allocation to outer most possible scope. * Trying to share space between allocations to make * a static allocation plan when possible. * * \return The pass. */ TVM_DLL Pass StorageRewrite(); /*! * \brief unroll the constant loop marked by unroll. * This pass also automatically attach pragma unroll tag to loops which meets the standard. * * \return The pass. */ TVM_DLL Pass UnrollLoop(); /*! * \brief Remove No Op from the Stmt. * * \return The pass. */ TVM_DLL Pass RemoveNoOp(); /*! * \brief Detect and rewrite unsafe select that contains memory access. * * \return The pass. */ TVM_DLL Pass RewriteUnsafeSelect(); /*! * \brief Run arithmetic simplifications on the statements and expressions. * * \return The pass. */ TVM_DLL Pass Simplify(); /*! * \brief Instruments bound checkers. * * \return The pass. */ TVM_DLL Pass InstrumentBoundCheckers(); /*! * \brief Transform the high-level PrimFunc to a low-level version * that can be used as an API function. * * * The main task of this function is to create code to : * - Map the values in the api_args to Var that is required by body. * - Insert assertions to check type/value of the passed arguments. * * \note * The function signature have two cases * * let num_packed_args = len(api_args); * * if num_packed_args is zero: * f() * * if num_packed_args is not zero: * f(TVMArg* packed_args, int* packed_arg_type_ids, int num_packed_args, * api_arg_k, api_arg_k+1, ... api_arg_n, * TVMValue* out_ret_val, int* out_ret_tcode) * * where n == len(api_args), k == num_packed_args * * \return The pass. */ TVM_DLL Pass MakePackedAPI(); /*! * \brief Transform the high-level PrimFunc to a C signature that can be used * to call the operator directly. * * The main task of this function is to create code that maps the values in the * api_args to Var that is required by body * * \return The pass. */ TVM_DLL Pass MakeUnpackedAPI(); /*! * \brief Remap the thread axis * * This can be used to get equivalent program which uses * threadIdx.y in place of threadIdx.x by passing * {"threadIdx.x": thread_axis("threadIdx.y")} * * * \return The pass. */ TVM_DLL Pass RemapThreadAxis(Map<String, IterVar> axis_map); /*! * \brief Lower custom datatypes. * * See tvm::datatypes::Registry for more information on adding custom datatypes. * * \return The pass. */ TVM_DLL Pass LowerCustomDatatypes(); /*! * \brief Decorate all the function's body as device function. * * \return The pass. */ TVM_DLL Pass DecorateDeviceScope(); /*! * \brief Split the function into a host function and device functions. * * \return The pass. */ TVM_DLL Pass SplitHostDevice(); /*! * \brief skip assert stmt. * * \return The pass. */ TVM_DLL Pass SkipAssert(); /*! * \brief Insert sync between parallel read/write of shared buffers. * * \param storage_scope The storage scope considered. * \return The pass. */ TVM_DLL Pass ThreadSync(String storage_scope); /*! * \brief Lower cross thread alleduce. * * \return The pass. */ TVM_DLL Pass LowerThreadAllreduce(); /*! * \brief Infer the TensorCore fragment infomation using tensor intrinsics * * \return The pass. */ TVM_DLL Pass InferFragment(); /*! * \brief This annotation is for nodes to be disabled for builtin lowering */ static constexpr const char* kDisableLowerTVMBuiltin = "disable_lower_builtin"; /*! * \brief Lower builtin intrinsics. * \return The pass. */ TVM_DLL Pass LowerTVMBuiltin(); /*! * \brief Lower the target specific function intrinsics in each of the function. * * \return The pass. */ TVM_DLL Pass LowerIntrin(); /*! * \brief Lower warp memory access to low-level device related function calls. * \return The pass. */ TVM_DLL Pass LowerWarpMemory(); /*! * \brief Lower attached storage access information on device. * * \note Run this pass after all storage access analysis finish. * * \return The pass. */ TVM_DLL Pass LowerDeviceStorageAccessInfo(); /*! * \brief Combine context calls in the host function. * * \return The pass. */ TVM_DLL Pass CombineContextCall(); /*! * \brief Narrow down PrimExpr datatype in stmt to target_bits. * * \param target_bits The target bits * * \note Run this pass after storage flatten. * \return The pass. */ TVM_DLL Pass NarrowDataType(int target_bits); /*! * \brief Legalize bf16 typed Ops. Add a cast to fp32 * before Ops, then add a cast back to bf16. * \return The pass. */ TVM_DLL Pass BF16Legalize(); /*! * \brief Rewrite the pointer content type of arguments, * as well as Alloc internal to the function to use * the most frequently accessed type for load/store * to avoid pointer casting in backend when possible. * * \return The pass. */ TVM_DLL Pass PointerValueTypeRewrite(); /*! * \brief Hoist loop-invariant IfThenElse nodes to * outside the elligible loops. * * \return The pass. */ TVM_DLL Pass HoistIfThenElse(); /*! * \brief Hoist loop-invariant expressions nodes to * outside the elligible loops. * * Can hoist conditionals used in IfThenElse statements and * expressions, bindings of variables in Let statements and * expressions, or boolean expressions, configurable to enable/disable * each hoistable type. * * \return The pass. */ TVM_DLL Pass HoistExpression(); /*! * \brief Lower cross-thread reduction from thread * bindings to intrinsic function calls. * \return The pass. */ TVM_DLL Pass LowerCrossThreadReduction(); /*! * \brief Lower block init stmt into IfThenElse stmts * \return The pass. */ TVM_DLL Pass LowerInitBlock(); /*! * \brief Locate the buffer allocation to the exact position (usually is * the lca of buffer access). This pass will inject opaque block * with alloc_buffers at the allocation site. * \return The pass. */ TVM_DLL Pass PlanAndUpdateBufferAllocationLocation(); /*! * \brief Substitute all the block vars with the PrimExprs they are bound to, indicated by the * corresponding iter_values in BlockRealize, for opaque blocks by removing all *. the iter_values in BlockRealize and iter_vars in Block. * \return The pass. */ TVM_DLL Pass ConvertBlocksToOpaque(); /*! * \brief Compact the buffer access region by removing the buffer regions that are not accessed, * i.e. narrowing the buffer shape and adjust the access region if necessary. * * Before narrowing, `B` is a `[16, 16]` buffer, but only a skinny vector `B[i, 0:16]` is accessed. * * \code * * for i in range(0, 16): * with T.block(): * B = T.alloc_buffer(16, 16) * for j in range(0, 16): * B[i, j] = A[i, j] + 1 * for j in range(0, 16): * C[i, j] = B[i, j] + 1 * * \endcode * * This pass narrows the buffer shape and adjust its accessed region accordingly. * In this particular case, because only a `1 * 16` vector of `B` is accessed, * the pass narrows `B` to shape `[1, 16]`, and changes the access to `B[i, j]` to `B[0, j]`. * * \code * * for i in range(0, 16): * with T.block(): * B = T.alloc_buffer(1, 16) * for j in range(0, 16): * B[0, j] = A[i, j] + 1 * for j in range(0, 16): * C[i, j] = B[0, j] + 1 * * \endcode * * * \return The pass. */ TVM_DLL Pass CompactBufferAllocation(); /*! * This pass legalizes packed calls by wrapping their arguments into TVMValues */ TVM_DLL Pass LegalizePackedCalls(); /*! * \brief Remove match buffers inside the block. Also, it will validate the binding. * \return The pass. */ TVM_DLL Pass LowerMatchBuffer(); /*! * \brief Remove the block to ensure that the TIR can not be scheduled again. * \return The pass. */ TVM_DLL Pass LowerOpaqueBlock(); /*! * \brief Flatten the multi-dimensional BufferLoad and BufferStore to single dimensional * BufferLoad/BufferStore for the TIR not contains opaque block. * \return The pass. */ TVM_DLL Pass FlattenBuffer(); /* * \brief Flatten the multi-dimensional read/write * to two dimensional texture Load/Store and realize * texture buffer allocations. * * \return The Pass */ TVM_DLL Pass TextureFlatten(); /* * \brief Lower VTCM allocations * * \return The Pass */ TVM_DLL Pass LowerVtcmAlloc(); /*! * \brief Lower Async TIR primitives to DMA copy and wait builtins */ TVM_DLL Pass LowerAsyncDMA(); /*! * \brief Implements a Common Subexpression Elimination (CSE) for TIR * which introduces let-in bindings for duplicated sub-expressions. * \param enable_cse_tir Whether common subexpression elimination is enabled. * \param identify_equiv_terms Whether equivalent terms should be identified. * \return The pass. */ TVM_DLL Pass CommonSubexprElimTIR(bool enable_cse_tir = true, bool identify_equiv_terms = false); /*! * \brief Unify all the thread bindings for "blockIdx.x/y/z", "threadIdx.x/y/z", and * "vthread.x/y/z". Before the unification, two vars that are bound to a thread axis (e.g., * "threadIdx.x") use different IterVars and variables in their AttrStmts. After the * unification, we use a consolidated IterVar and a variable for them. * \return The pass. * \note `vthread` is a legacy behavior that will be deprecated, though thread bindings of `vthread` * are still also unified in this pass. Please use `vthread.x`, `vthread.y` and `vthread.z` * instead. */ TVM_DLL Pass UnifyThreadBinding(); /*! * A pass to merge multiple TIR-level dynamic shared memory allocations into one */ TVM_DLL Pass MergeDynamicSharedMemoryAllocations(); /*! * \brief This pass is post-scheduling pass to convert all * Parallel For loops to Serial ones. This is run * to attain lesser memory and/or executor/backend * does not support parallel launch of For loops. * \return The pass. */ TVM_DLL Pass ConvertForLoopsToSerial(); /*! * \brief This is the unified static memory planner pass that will * plan for memory intra- and inter- PrimFuncs together. The pass * requires all the function to be PrimFuncs including the main. * \return The pass. */ TVM_DLL Pass UnifiedStaticMemoryPlanner(); /*! * \brief This pass transforms annotated loops into pipelined ones where producers and consumers * are overlapped with the information provided in loop annotations, which enables optimization * techniques like prefetching and pipeline parallelism. * * The pipeline scope consists of the direct children of the annotated loop (ignoring BlockRealize, * Block, SeqStmt), and the number of children is denoted by `n` in the documentation. * * The following annotations are used to guide the loop transformation: * * 1) Loop annotation `software_pipeline_stage` defines the pipeline stage. * An array of `n` integers, and each element should be in range [0, max_stage], * where max_stage is the maximum (inclusive) stage. * 2) Loop annotation `software_pipeline_order` defines the pipeline order. * An array of `n` integers, a permutation of [0, 1, ..., num_components - 1]; * 3) Block annotation `double_buffer_scope` controls certain buffer sizes to allow decoupling of * read/write dependency. It's an integer index of the write regions of the block. * * Every annotated loop is transformed into a loop with three blocks as its direct children: * * 1) Prologue block, where components whose stage is less than `max_stage` is executed; * * 2) Body block, where all the components are executed; * * 3) Epilogue block, where only components whose stage is greater than 0 will be executed. * The execution order is controlled by the annotation `software_pipeline_order`, * and thus could be different than the original order. * * Note: For nested software pipelines, the inner software pipeline will be generated first, * which may affect the number of the direct children of the outer loop. * In this case, the annotations for the outer software * pipeline should include the result of the inner software pipeline, * which is the three blocks as discussed above. * Example: * * Before this pass, the TIR is: * * \code{.py} * @T.prim_func * def before_transform(A: T.Buffer[(16, 16), "float32"], C: T.Buffer[(16, 16), "float32"]) -> None: * for tx in T.thread_binding(0, 16, thread="threadIdx.x"): * for i in T.serial(0, 16, * annotations={"software_pipeline_stage": [0, 1], * "software_pipeline_order": [0, 1]} * ): * with T.block(): * T.reads(A[tx, i]) * T.writes(C[tx, i]) * B = T.alloc_buffer((16, 1), dtype="float32", scope="shared") * with T.block("B"): * T.reads(A[tx, i]) * T.writes(B[tx, 0]) * B[tx, 0] = A[tx, i] * T.float32(2) * with T.block("C"): * T.reads(B[tx, 0]) * T.writes(C[tx, i]) * C[tx, i] = B[tx, 0] + T.float32(1) * \endcode * * The TIR above annotates the loop as a two-stage pipeline with no reordering. * After applying this pass, the TIR is transformed into: * * \code{.py} * @T.prim_func * def after_transform(A: T.Buffer[(16, 16), "float32"], C: T.Buffer[(16, 16), "float32"]) -> None: * for tx in T.thread_binding(0, 16, thread="threadIdx.x"): * with T.block(): * T.reads([A[tx, 0:16]]) * T.writes([C[tx, 0:16]]) * B = T.alloc_buffer([2, 16, 1], dtype="float32", scope="shared") * with T.block("prologue"): * T.reads([A[tx, 0]]) * T.writes([B[0, tx, 0]]) * B[0, tx, 0] = A[tx, 0] * T.float32(2) * with T.block("body"): * T.reads([A[tx, 1:16], B[0:2, tx, 0]]) * T.writes([B[0:2, tx, 0], C[tx, 0:15]]) * for i in T.serial(0, 15): * with T.block("B"): * T.reads([A[tx, i + 1]]) * T.writes([B[(i + 1) % 2, tx, 0]]) * B[(i + 1) % 2, tx, 0] = A[tx, i + 1] * T.float32(2) * with T.block("C"): * T.reads([B[i % 2, tx, 0]]) * T.writes([C[tx, i]]) * C[tx, i] = B[i % 2, tx, 0] + T.float32(1) * with T.block("epilogue"): * T.reads([B[1, tx, 0]]) * T.writes([C[tx, 15]]) * C[tx, 15] = B[1, tx, 0] + T.float32(1) * \endcode * * The original loop has two blocks, B and C, as its direct children. The loop annotations indicate * that block B has stage == 0, order == 0, block C has stage == 1, order == 1. Therefore, block B * should be executed in advance of block C by one iteration. The order 0 and 1 specifies the order * of block B and C inside the body block inside the result TIR. * * \return The IR transform pass. */ TVM_DLL Pass InjectSoftwarePipeline(); TVM_DLL Pass BindParams(const Array<runtime::NDArray>& constants); /*! * \brief Pass to collect tir non-scalar constants into module's 'Constants' attribute. * * \return The pass. */ TVM_DLL Pass ExtractPrimFuncConstants(); /*! * \brief Renormalize the split pattern from floordiv(floormod()) to floormod(floordiv()) * \return The pass. */ TVM_DLL Pass RenormalizeSplitPattern(); /*! * \brief Annotate a PrimFunc with a given target. * \return The pass. */ TVM_DLL Pass BindTarget(Target target); /*! * \brief Set a PrimFunc as the entry point if it is only function in IRModule. * \return The pass. */ TVM_DLL Pass AnnotateEntryFunc(); /*! * \brief Filter PrimFuncs with a given condition. * \return The pass. */ TVM_DLL Pass Filter(runtime::TypedPackedFunc<bool(PrimFunc)> fcond); /*! * \brief Pass to rewrite global to shared memory copy on CUDA with asyncronous copy. * \return The pass. */ TVM_DLL Pass InjectPTXAsyncCopy(); /*! * \brief Remove the weight layout rewrite block * \param skip_ndarray_rewrite If True, exact rewrite of NDArray, according to the given index map, * will be skipped. Only the shape of the NDArray is transformed correctly, and the content of * the destination array will be filled with random values. * * When this pass is called many times during MetaSchedule tuning, the raw data of NDArray, * before and after rewrite, does not matter. Since NDArray layout rewrite, using IndexMap's * MapNDArray, is currently slow, skipping the exact rewrite is sometimes necessary. * * \return The pass. */ TVM_DLL Pass RemoveWeightLayoutRewriteBlock(bool skip_ndarray_rewrite = false); /*! * \brief Add the explicit local stage for the shared memory access on GPU. * \return The pass. */ TVM_DLL Pass ManifestSharedMemoryLocalStage(); /*! * \brief Insert intrinsic calls to instrument function and loop level profiling. * \return The pass. */ TVM_DLL Pass InstrumentProfileIntrinsics(); } // namespace transform } // namespace tir } // namespace tvm #endif // TVM_TIR_TRANSFORM_H_
https://github.com/zk-ml/tachikoma
include/tvm/tir/usmp/algo/greedy.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file include/tvm/tir/usmp/algo/greedy.h * \brief This header file contains helper methods used in greedy algorithms * for planning memory for USMP */ #pragma once #include <tvm/arith/analyzer.h> #include <tvm/runtime/device_api.h> #include <tvm/tir/builtin.h> #include <tvm/tir/function.h> #include <tvm/tir/stmt_functor.h> #include <tvm/tir/usmp/utils.h> #include <unordered_map> #include <vector> namespace tvm { namespace tir { namespace usmp { namespace algo { /*! * \brief This is the base class for Greedy Algorithms where the sorting * is specialized in the extended classes based on the greedy criteria. */ class GreedyBase { public: GreedyBase() {} /*! * \brief This function should be implemented by the extended classes to sort the BufferInfo * objects based on a criteria and then calling PostSortAllocation. */ virtual Map<BufferInfo, PoolAllocation> PlanMemory(const Array<BufferInfo>& buffer_info_arr) = 0; protected: /*! * \brief Rounds up the offset to satisfy the alignement requirement */ size_t round_up_to_byte_alignment(const size_t& non_aligned_byte_offset, const int& byte_alignment); /*! * \brief A helper function check whether a offset is valid given the constraints */ bool IsValidPlacement(const PoolInfo& candidate_pool, const size_t& next_offset, const size_t& size_bytes); /*! * \brief Selects a pool for placement in the given set of ordered pool candidates */ PoolInfo SelectPlacementPool( const BufferInfo& buf_info, const std::unordered_map<PoolInfo, size_t, ObjectPtrHash, ObjectPtrEqual>& pool_offsets); /*! * \brief This is the base allocation function that works on sorted BufferInfo objects based * on the greedy heuristic. The sorting algorithm has to be called before calling this. */ Map<BufferInfo, PoolAllocation> PostSortAllocation( const std::vector<BufferInfo>& buffer_info_vec); }; } // namespace algo } // namespace usmp } // namespace tir } // namespace tvm
https://github.com/zk-ml/tachikoma
include/tvm/tir/usmp/algorithms.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tir/usmp/algorithms.h * \brief The memory planning algorithm for USMP */ #ifndef TVM_TIR_USMP_ALGORITHMS_H_ #define TVM_TIR_USMP_ALGORITHMS_H_ #include <tvm/tir/usmp/utils.h> namespace tvm { namespace tir { namespace usmp { namespace algo { /*! * \brief The Greedy-by-Size algorithm to plan memory * * This will perform a greedy algorithm in deciding the offsets * within provided Pools, using the size of the buffer. * * \return A Map of BufferInfo objects and their associated PoolAllocation */ Map<BufferInfo, PoolAllocation> GreedyBySize(const Array<BufferInfo>& buffer_info_arr, const Integer& memory_pressure); /*! * \brief The Greedy-by-Conflicts algorithm to plan memory * * This will perform a greedy algorithm in deciding the offsets * within provided Pools, using the number of liveness conflicts of the buffer. * * \return A Map of BufferInfo objects and their associated PoolAllocation */ Map<BufferInfo, PoolAllocation> GreedyByConflicts(const Array<BufferInfo>& buffer_info_arr, const Integer& memory_pressure); /*! *\brief The Hill-Climb algoritm to plan memory * * This will perform an attempt to utilize probabalistic approach to memory * allocation. Typically better than greedy family, but quite slow due to large * number of iterations. * * \return A Map of BufferInfo objects and their associated PoolAllocation */ Map<BufferInfo, PoolAllocation> HillClimb(const Array<BufferInfo>& buffer_info_arr, const Integer& memory_pressure); /*! * \brief The Hill-Climb algorithm to plan memory * * This will perform a hill climbing algorithm in deciding the offsets * within provided Pools. * * \return A Map of BufferInfo objects and their associated PoolAllocation */ Map<BufferInfo, PoolAllocation> HillClimb(const Array<BufferInfo>& buffer_info_arr, const Integer& memory_pressure); } // namespace algo } // namespace usmp } // namespace tir } // namespace tvm #endif // TVM_TIR_USMP_ALGORITHMS_H_
https://github.com/zk-ml/tachikoma
include/tvm/tir/usmp/analysis.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tir/usmp/analysis.h * \brief The analysis passes for TIR-based Unified Static Memory Planner */ #ifndef TVM_TIR_USMP_ANALYSIS_H_ #define TVM_TIR_USMP_ANALYSIS_H_ #include <tvm/tir/function.h> #include <tvm/tir/usmp/utils.h> namespace tvm { namespace tir { namespace usmp { /*! * \brief Extract BufferInfo objects from a TIR IRModule * * This pass would extract the buffer information of allocate nodes * including liveness conflict with other buffer info objects. * * \return A Map of BufferInfo objects and their associated Stmts */ BufferInfoAnalysis ExtractBufferInfo(const PrimFunc& main_func, const IRModule& mod); } // namespace usmp } // namespace tir } // namespace tvm #endif // TVM_TIR_USMP_ANALYSIS_H_
https://github.com/zk-ml/tachikoma
include/tvm/tir/usmp/transform.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tir/usmp/transform.h * \brief The transform passes for TIR-based Unified Static Memory Planner */ #ifndef TVM_TIR_USMP_TRANSFORM_H_ #define TVM_TIR_USMP_TRANSFORM_H_ #include <tvm/tir/usmp/utils.h> namespace tvm { namespace tir { namespace usmp { namespace transform { using Pass = tvm::transform::Pass; /*! * \brief Convert the analyzed PoolAllocation to offsets from pool variables * * This pass would convert the main function to accept pool variables as an input * that get passed onto the operator PrimFuncs. Furthermore, the static allocations * will be converted to offsets within the pool variable. * * \return the pass */ TVM_DLL Pass ConvertPoolAllocationsToOffsets(const Map<tir::Stmt, PoolAllocation>& pool_allocations, Bool emit_tvmscript_printable = Bool(false)); /*! * \brief Assign PoolInfo objects to tir.allocate nodes depending on the PrimFunc's target * * This pass would assign default PoolInfo objects to allocate nodes that are not otherwise * annotated, depending on pool info supplied for each target. * * \return the pass */ TVM_DLL Pass AssignPoolInfo(); /*! * \brief This pass creates Allocate nodes for I/O tensors * * If the user wants to place the I/O tensors in the workspace, this pass is required to be * run. In doing so, it will create Allocate nodes for I/O tensors to be planned, and be removed * from function arguments. * * \return the pass */ TVM_DLL Pass CreateAllocatesForIO(); } // namespace transform } // namespace usmp } // namespace tir } // namespace tvm #endif // TVM_TIR_USMP_TRANSFORM_H_
https://github.com/zk-ml/tachikoma
include/tvm/tir/usmp/utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tir/usmp/utils.h * \brief Utilities for Unified Static Memory Planner */ #ifndef TVM_TIR_USMP_UTILS_H_ #define TVM_TIR_USMP_UTILS_H_ #include <tvm/ir/expr.h> #include <tvm/ir/memory_pools.h> #include <tvm/runtime/device_api.h> #include <tvm/target/target.h> #include <tvm/tir/stmt.h> namespace tvm { /*! * \brief PassContext option to enable the USMP */ constexpr const char* kUSMPEnableOption = "tir.usmp.enable"; /*! * \brief PassContext option to select the memory planning algorithm in USMP */ constexpr const char* kUSMPAlgorithmOption = "tir.usmp.algorithm"; /*! * \brief PassContext option to enable placing I/O tensors in the workspace */ constexpr const char* kUSMPUseWorkspaceIO = "tir.usmp.use_workspace_io"; /*! * \brief PassContext option to specify a custom memory planning algorithm in USMP. * The algorithm should be provided as registered PackedFunc with the name tir.usmp.algorithm.NAME */ constexpr const char* kUSMPCustomAlgorithmOption = "tir.usmp.custom_algorithm"; namespace tir { namespace usmp { /*! * \brief A special kind to distinguish between I/O tensors to the model * and intermediate tensors of the model */ enum class BufferInfoKind { kIntermediate = 0, kInput = 1, kOutput = 2 }; /*! * \brief Describes an abstract memory buffer that will get allocated inside a pool. * The actual memory buffer in represented by PoolAllocationNode after static memory planning. * * See also for relay-level counterparts: * relay::StorageToken (graph_plan_memory.cc) * relay::backend::StorageInfoNode (relay/backend/utils.h) * Region (python/tvm/relay/transform/memory_plan.py) */ struct BufferInfoNode : public Object { /*! \brief The name of the buffer var */ String name_hint; /*! \brief The size in terms of bytes */ Integer size_bytes; /*! \brief The pool candidates that this buffer can get pooled to*/ Array<PoolInfo> pool_candidates; /*! \brief The byte alignment required for buffers that will placed within the pool */ Integer alignment; /*! \brief The liveness conflicting other buffer info objects */ Array<ObjectRef> conflicts; /*! \brief Whether BufferInfo object retains info about IO tensors or intermediaries */ BufferInfoKind kind; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("name_hint", &name_hint); v->Visit("size_bytes", &size_bytes); v->Visit("pool_candidates", &pool_candidates); v->Visit("alignment", &alignment); v->Visit("conflicts", &conflicts); v->Visit("kind", &kind); } bool SEqualReduce(const BufferInfoNode* other, SEqualReducer equal) const { return equal(name_hint, other->name_hint) && equal(size_bytes, other->size_bytes) && equal(pool_candidates, other->pool_candidates) && equal(alignment, other->alignment) && equal(conflicts, other->conflicts) && equal(kind, other->kind); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(name_hint); hash_reduce(size_bytes); hash_reduce(alignment); hash_reduce(conflicts); hash_reduce(pool_candidates); hash_reduce(kind); } /*! * \brief Set the liveness conflicts of this BufferInfo * * \param conflicting_buffer_info_objs An array of BufferInfo that conflicts in liveness */ TVM_DLL void SetConflicts(Array<ObjectRef> conflicting_buffer_info_objs); static constexpr const char* _type_key = "tir.usmp.BufferInfo"; TVM_DECLARE_FINAL_OBJECT_INFO(BufferInfoNode, Object); }; class BufferInfo : public ObjectRef { public: TVM_DLL BufferInfo(String name_hint, Integer size_bytes, Array<PoolInfo> pool_candidates, Integer alignment = runtime::kDefaultWorkspaceAlignment, BufferInfoKind kind = BufferInfoKind::kIntermediate); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(BufferInfo, ObjectRef, BufferInfoNode); }; /*! * \brief This is a composite node that is produced by extract_buffer_info * analysis pass that contains useful global information that could be useful * for memory planning algorithms. */ struct BufferInfoAnalysisNode : public Object { /*! \brief The BufferInfo object and its associated TIR statement */ Map<BufferInfo, tir::Stmt> buffer_info_stmts; /*! \brief This represent maximum amount of memory being used at * any point of time in the inference. This value is largely the * best allocation an algorithm could achieve. Due to * the complexities of conflict graphs, it would not be feasible * to achieve this value, practically. However, it can be useful * for iterative algorithms to know this value to define termination * criteria.*/ Integer memory_pressure; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("buffer_info_stmts", &buffer_info_stmts); v->Visit("memory_pressure", &memory_pressure); } bool SEqualReduce(const BufferInfoAnalysisNode* other, SEqualReducer equal) const { return equal(buffer_info_stmts, other->buffer_info_stmts) && equal(memory_pressure, other->memory_pressure); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(buffer_info_stmts); hash_reduce(memory_pressure); } }; class BufferInfoAnalysis : public ObjectRef { public: TVM_DLL BufferInfoAnalysis(Map<BufferInfo, tir::Stmt> buffer_info_stmts, Integer memory_pressure); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(BufferInfoAnalysis, ObjectRef, BufferInfoAnalysisNode); }; /*! * \brief The pool allocation produced after the USMP algorithm */ struct PoolAllocationNode : public Object { /*! \brief The assigned WorkspacePoolInfo or ConstantPoolInfo object */ PoolInfo pool_info; /*! \brief The byte offset within the pool*/ Integer byte_offset; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("pool_info", &pool_info); v->Visit("byte_offset", &byte_offset); } bool SEqualReduce(const PoolAllocationNode* other, SEqualReducer equal) const { return equal(pool_info, other->pool_info) && equal(byte_offset, other->byte_offset); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(pool_info); hash_reduce(byte_offset); } static constexpr const char* _type_key = "tir.usmp.PoolAllocation"; TVM_DECLARE_FINAL_OBJECT_INFO(PoolAllocationNode, Object); }; class PoolAllocation : public ObjectRef { public: TVM_DLL PoolAllocation(PoolInfo pool_info, Integer byte_offset); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(PoolAllocation, ObjectRef, PoolAllocationNode); }; /*! * \brief This object contains information post-allocation for PoolInfo objects */ struct AllocatedPoolInfoNode : public Object { /*! \brief The assigned PoolInfo object */ PoolInfo pool_info; /*! \brief The allocated size into this pool */ Integer allocated_size; /*! \brief An optional associated pool Var index of PrimFunc params*/ Optional<Integer> pool_var_idx; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("pool_info", &pool_info); v->Visit("allocated_size", &allocated_size); v->Visit("pool_var_idx", &pool_var_idx); } bool SEqualReduce(const AllocatedPoolInfoNode* other, SEqualReducer equal) const { return equal(pool_info, other->pool_info) && equal(allocated_size, other->allocated_size) && equal(pool_var_idx, other->pool_var_idx); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(pool_info); hash_reduce(allocated_size); hash_reduce(pool_var_idx); } static constexpr const char* _type_key = "tir.usmp.AllocatedPoolInfo"; TVM_DECLARE_FINAL_OBJECT_INFO(AllocatedPoolInfoNode, Object); }; class AllocatedPoolInfo : public ObjectRef { public: TVM_DLL AllocatedPoolInfo(PoolInfo pool_info, Integer allocated_size, Integer pool_var_idx = Integer()); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(AllocatedPoolInfo, ObjectRef, AllocatedPoolInfoNode); }; /*! * \brief Convert the IR-bound BufferInfo map to an array of BufferInfo * * \param buffer_info_map IR-bound BufferInfo map */ Array<BufferInfo> ConvertToArrayOfBufferInfo(const Map<BufferInfo, Stmt>& buffer_info_map); /*! * \brief Calculate workspace required to execute a IRModule with main expressed in TIR * * \param mod the IRModule with TIR-based main function */ Integer CalculateModuleWorkspaceSize(const IRModule& mod); /*! * \brief The allocate node attribute to indicate candidate memory pools. * This needs to be kept in sync with CANDIDATE_MEMORY_POOL_ATTR in * python/tvm/tir/usmp/utils.py. */ static constexpr const char* kPoolCandidatesAllocateAttr = "candidate_memory_pools"; /*! * \brief The allocate node attribute to indicate it is being used to hold * an input tensor, that needs to be initialized with. */ static constexpr const char* kInputTensorAllocate = "input_tensor"; /*! * \brief The allocate node attribute to indicate it is being used to hold * an output tensor. */ static constexpr const char* kOutputTensorAllocate = "output_tensor"; /*! * \brief Calculate the size of the extents in bytes * * \param op the allocate node */ Integer CalculateExtentsSize(const AllocateNode* op); /*! * \brief Calculate the size of the extents in bytes * * \param op the allocate const node */ Integer CalculateExtentsSize(const AllocateConstNode* op); /*! * \brief Joins the Stmt nodes with PoolAllocation objects * * \param buffer_info_to_stmt the map of BufferInfo objects to Stmt nodes * \param buffer_info_to_pool_allocation the map of BufferInfo objects to PoolAllocation objects */ Map<Stmt, PoolAllocation> AssignStmtPoolAllocations( const Map<BufferInfo, Stmt>& buffer_info_to_stmt, const Map<BufferInfo, PoolAllocation>& buffer_info_to_pool_allocation); /*! * \brief Obtains I/O tensor names to their PoolAllocation objects * * \param buffer_info_to_pool_allocation the map of BufferInfo objects to PoolAllocation objects * * This function will obtain pool allocations for I/O tensors if that had been planned */ Map<String, PoolAllocation> GetIOPoolAllocations( const Map<BufferInfo, PoolAllocation>& buffer_info_to_pool_allocation); } // namespace usmp } // namespace tir namespace attr { /*! * \brief This is a BaseFunc attribute to indicate which input var represent * a PoolInfo Object in the form of a Map<Var, PoolInfo>. */ static constexpr const char* kPoolArgs = "pool_args"; /*! * \brief This is a IRModule attribute that contains I/O Tensor names to pool * allocations. */ static constexpr const char* kIOTensorPoolAllocations = "io_tensor_pool_allocations"; } // namespace attr } // namespace tvm #endif // TVM_TIR_USMP_UTILS_H_
https://github.com/zk-ml/tachikoma
include/tvm/tir/var.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/tir/var.h * \brief Variables in the TIR. */ #ifndef TVM_TIR_VAR_H_ #define TVM_TIR_VAR_H_ #include <tvm/ir/expr.h> #include <tvm/node/node.h> #include <tvm/runtime/data_type.h> #include <string> namespace tvm { namespace tir { /*! * \brief A variable node in the IR. * * A variable is uniquely identified by its address. * * Each variable is only bound once in the following nodes: * - Allocate * - For * - Let * - LetStmt */ class VarNode : public PrimExprNode { public: /*! * \brief The hint to the variable name. * \note Each variable is uniquely identified by its address. */ String name_hint; /*! * \brief type annotation of the variable. * * It is an optional field that provides a refined type of the variable than dtype. * * \sa tvm/ir/type.h for discussion of relations between runtime::DataType and Type. */ Type type_annotation; void VisitAttrs(AttrVisitor* v) { v->Visit("dtype", &dtype); v->Visit("name", &name_hint); v->Visit("type_annotation", &type_annotation); v->Visit("span", &span); } bool SEqualReduce(const VarNode* other, SEqualReducer equal) const { if (!equal(dtype, other->dtype)) return false; if (!equal(type_annotation, other->type_annotation)) return false; return equal.FreeVarEqualImpl(this, other); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(dtype); hash_reduce(type_annotation); hash_reduce.FreeVarHashImpl(this); } static constexpr const char* _type_key = "tir.Var"; static constexpr const uint32_t _type_child_slots = 1; TVM_DECLARE_BASE_OBJECT_INFO(VarNode, PrimExprNode); }; /*! \brief a named variable in TIR */ class Var : public PrimExpr { public: explicit Var(ObjectPtr<Object> n) : PrimExpr(n) {} /*! * \brief Constructor * \param name_hint variable name * \param dtype data type * \param span The location of this object in the source code. */ TVM_DLL explicit Var(String name_hint = "v", DataType dtype = DataType::Int(32), Span span = Span()); /*! * \brief Constructor which provides a more detailed type annotation. * \param name_hint variable name. * \param type_annotation The type annotation. * \param span The location of this object in the source code. */ TVM_DLL explicit Var(String name_hint, Type type_annotation, Span span = Span()); /*! * \brief Make a new copy of var with same type, append suffix * \param suffix The suffix to be appended. * \return the new Var copy */ TVM_DLL Var copy_with_suffix(const String& suffix) const; /*! * \brief Make a new copy of the variable with specified dtype * \param dtype The specified dtype * \return The new variable */ TVM_DLL Var copy_with_dtype(DataType dtype) const; /*! * \brief Get pointer to the internal value. * \return the corresponding Variable. */ const VarNode* operator->() const { return get(); } /*! * \brief Get pointer to the internal value. * \return the corresponding Variable. */ const VarNode* get() const { return static_cast<const VarNode*>(data_.get()); } /*! \brief type indicate the container type */ using ContainerType = VarNode; }; /*! * \brief A variable node represent a tensor index size, * whose value must be non-negative. */ class SizeVarNode : public VarNode { public: static constexpr const char* _type_key = "tir.SizeVar"; TVM_DECLARE_FINAL_OBJECT_INFO(SizeVarNode, VarNode); }; /*! \brief a named variable represents a tensor index size */ class SizeVar : public Var { public: explicit SizeVar(ObjectPtr<Object> n) : Var(n) {} /*! * \brief constructor * \param name_hint variable name * \param t data type * \param span The location of this object in the source code. */ TVM_DLL explicit SizeVar(String name_hint = "s", DataType t = DataType::Int(32), Span span = Span()); /*! * \brief Get pointer to the internal value. * \return the corresponding Variable. */ const SizeVarNode* operator->() const { return get(); } /*! * \brief Get pointer to the internal value. * \return the corresponding Variable. */ const SizeVarNode* get() const { return static_cast<const SizeVarNode*>(data_.get()); } /*! \brief type indicate the container type */ using ContainerType = SizeVarNode; }; using Region = Array<Range>; /*! * \brief Type of iteration variable. * Each IterVar have a specific type. * * The type of iter var can be overriden via * stage.iter_var_attrs given they are compatible. */ enum IterVarType : int { /*! * \brief Data parallel iteration. * This normally corresponds to axis of Tensor. * Allow all IterVar manipulations. * * \note This does not mean the loop * have to be executed in parallel fashion. */ kDataPar = 0, /*! * \brief The IterVar itself is a thread-index * of a fixed thread launching group. * Note that this is already assumed to be parallelized. * * Disallow: split/fuse/vectorize/parallel */ kThreadIndex = 1, /*! * \brief Communicative reduction. * Cannot be directly parallelized. * * Disallow: parallel/vectorize */ kCommReduce = 2, /*! * \brief Serial loops with loop carry dependency, * the iteration must execute in order. * Cannot be re-ordered. * * Disallow: reorder/parallel/vectorize */ kOrdered = 3, /*! * \brief IterVar is opaque, * * May not corresponds to any generated loop * Disallow all IterVar manipulations and compute_at * * \note This is usually used to implement composite op * or external op, where the */ kOpaque = 4, // The following are possible additional // types that are provided during schedule /*! * \brief The execution is unrolled. */ kUnrolled = 5, /*! * \brief The loop is vectorized. */ kVectorized = 6, /*! * \brief The loop is parallelized. */ kParallelized = 7, /*! * \brief Marks boundary of tensorization intrinsic. */ kTensorized = 8 }; /*! * \brief An iteration variable representing an iteration * over a one dimensional interval. * * The dtype of the extent of the `dom` of the IterVar must match the dtype of the internal Var. */ class IterVarNode : public Object { public: /*! * \brief the domain of iteration, if known, can be None * For the intermediate schedule node, before schedule. */ Range dom; /*! \brief The looping variable */ Var var; /*! \brief The type of the IterVar */ IterVarType iter_type; /*! * \brief additional tag on the iteration variable, * set this if this is binded already to a known thread tag. */ String thread_tag; /*! * \brief Span that points to the original source code. * Reserved debug information. */ mutable Span span; void VisitAttrs(AttrVisitor* v) { v->Visit("dom", &dom); v->Visit("var", &var); v->Visit("iter_type", &iter_type); v->Visit("thread_tag", &thread_tag); v->Visit("span", &span); } bool SEqualReduce(const IterVarNode* other, SEqualReducer equal) const { return equal(dom, other->dom) && equal.DefEqual(var, other->var) && equal(iter_type, other->iter_type) && equal(thread_tag, other->thread_tag); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(dom); hash_reduce.DefHash(var); hash_reduce(iter_type); hash_reduce(thread_tag); } static constexpr const char* _type_key = "tir.IterVar"; static constexpr const bool _type_has_method_sequal_reduce = true; static constexpr const bool _type_has_method_shash_reduce = true; TVM_DECLARE_FINAL_OBJECT_INFO(IterVarNode, Object); }; /*! * \brief Iteration Variable, * represents an iteration over an integer interval. * * The dtype of the extent of the `dom` of the IterVar must match the dtype of the internal Var. */ class IterVar : public ObjectRef { public: TVM_DLL IterVar(Range dom, Var var, IterVarType iter_type, String thread_tag = "", Span span = Span()); /*! * \return the corresponding var in the IterVar. */ inline operator PrimExpr() const; TVM_DEFINE_OBJECT_REF_METHODS(IterVar, ObjectRef, IterVarNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(IterVarNode); }; // inline implementations inline IterVar::operator PrimExpr() const { return (*this)->var; } inline const char* IterVarType2String(IterVarType t) { switch (t) { case kDataPar: return "DataPar"; case kThreadIndex: return "ThreadIndex"; case kCommReduce: return "CommReduce"; case kOrdered: return "Ordered"; case kOpaque: return "Opaque"; case kUnrolled: return "Unrolled"; case kVectorized: return "Vectorized"; case kParallelized: return "Parallelized"; case kTensorized: return "Tensorized"; } return "Unknown"; } } // namespace tir } // namespace tvm #endif // TVM_TIR_VAR_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/broadcast.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \brief Broadcast op constructions * \file topi/broadcast.h */ #ifndef TVM_TOPI_BROADCAST_H_ #define TVM_TOPI_BROADCAST_H_ #include <tvm/topi/detail/broadcast.h> #include <tvm/topi/detail/constant_utils.h> #include <tvm/topi/tags.h> #include <algorithm> #include <string> namespace tvm { namespace topi { /*! * \brief Creates an operation that broadcasts a tensor into a compatible * shape according to numpy's rules * * \param t The input tensor * \param output_shape The target output shape, must be compatible * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is a broadcast operation */ inline tvm::te::Tensor broadcast_to(const tvm::te::Tensor& t, const tvm::Array<tvm::PrimExpr>& output_shape, std::string name = "T_broadcast_to", std::string tag = kBroadcast) { ICHECK_GE(output_shape.size(), t->shape.size()) << "Not a broadcast, output dimensionality smaller than input.\noutput: " << output_shape << "\nvs\ninput: " << t; auto bh = detail::BroadcastShape(output_shape, t->shape); ICHECK_EQ(output_shape.size(), bh.common_shape.size()); Array<PrimExpr> oshape; for (size_t i = 0; i < output_shape.size(); ++i) { if (output_shape[i].as<tir::IntImmNode>() == nullptr) { oshape.push_back(output_shape[i]); } else { ICHECK(topi::detail::EqualCheck(output_shape[i], bh.common_shape[i])); oshape.push_back(bh.common_shape[i]); } } auto l = [&](tvm::Array<tvm::tir::Var> ovars) { return t(detail::InputIndexFromBroadcast(ovars, t, bh.vars2, bh.all_vars)); }; return tvm::te::compute(oshape, l, name, tag); } #define TOPI_DEFINE_BCAST_OP(Name, ComputeRule) \ inline tvm::PrimExpr Name(const tvm::PrimExpr& a, const tvm::PrimExpr& b) { ComputeRule; } \ inline tvm::te::Tensor Name(const tvm::te::Tensor& A, const tvm::te::Tensor& B, \ std::string name = "T_" #Name, std::string tag = kBroadcast) { \ auto l = [](tvm::PrimExpr a, tvm::PrimExpr b) { ComputeRule; }; \ return detail::WithBroadcast(l, A, B, name, tag); \ } \ inline tvm::te::Tensor Name(const tvm::te::Tensor& A, const tvm::PrimExpr& B, \ std::string name = "T_" #Name, std::string tag = kElementWise) { \ auto l = [](tvm::PrimExpr a, tvm::PrimExpr b) { ComputeRule; }; \ return tvm::te::compute( \ A->shape, [&](const ::tvm::Array<::tvm::tir::Var>& i) { return l(A(i), B); }, name, tag); \ } \ inline tvm::te::Tensor Name(const tvm::PrimExpr& A, const tvm::te::Tensor& B, \ std::string name = "T_" #Name, std::string tag = kElementWise) { \ auto l = [&](tvm::PrimExpr a, tvm::PrimExpr b) { ComputeRule; }; \ return tvm::te::compute( \ B->shape, [&](const ::tvm::Array<::tvm::tir::Var>& i) { return l(A, B(i)); }, name, tag); \ } #define TOPI_DEFINE_OP_OVERLOAD(Name, OpName) \ inline tvm::te::Tensor Name(const tvm::te::Tensor& A, const tvm::te::Tensor& B) { \ return topi::OpName(A, B); \ } \ inline tvm::te::Tensor Name(const tvm::PrimExpr& A, const tvm::te::Tensor& B) { \ return topi::OpName(A, B); \ } \ inline tvm::te::Tensor Name(const tvm::te::Tensor& A, const tvm::PrimExpr& B) { \ return topi::OpName(A, B); \ } /*! * \fn logical_and * \brief Compute A && B with auto-broadcasting. * * \param A The first tensor, or Expr * \param B The second tensor, or Expr * \param name The name of the operation * \param tag The tag to mark the operation * * \return The result. */ TOPI_DEFINE_BCAST_OP(logical_and, { return a && b; }); TOPI_DEFINE_OP_OVERLOAD(operator&&, logical_and); /*! * \fn logical_or * \brief Compute A || B with auto-broadcasting. * * \param A The first tensor, or Expr * \param B The second tensor, or Expr * \param name The name of the operation * \param tag The tag to mark the operation * * \return The result. */ TOPI_DEFINE_BCAST_OP(logical_or, { return a || b; }); TOPI_DEFINE_OP_OVERLOAD(operator||, logical_or); /*! * \fn logical_xor * \brief Compute A ^ B with auto-broadcasting. * * \param A The first tensor, or Expr * \param B The second tensor, or Expr * \param name The name of the operation * \param tag The tag to mark the operation * * \return The result. */ TOPI_DEFINE_BCAST_OP(logical_xor, { return a ^ b; }); /*! * \fn bitwise_and * \brief Compute A & B with auto-broadcasting. * * \param A The first tensor, or Expr * \param B The second tensor, or Expr * \param name The name of the operation * \param tag The tag to mark the operation * * \return The result. */ TOPI_DEFINE_BCAST_OP(bitwise_and, { return a & b; }); TOPI_DEFINE_OP_OVERLOAD(operator&, bitwise_and); /*! * \fn bitwise_or * \brief Compute A | B with auto-broadcasting. * * \param A The first tensor, or Expr * \param B The second tensor, or Expr * \param name The name of the operation * \param tag The tag to mark the operation * * \return The result. */ TOPI_DEFINE_BCAST_OP(bitwise_or, { return a | b; }); TOPI_DEFINE_OP_OVERLOAD(operator|, bitwise_or); /*! * \fn bitwise_xor * \brief Compute A ^ B with auto-broadcasting. * * \param A The first tensor, or Expr * \param B The second tensor, or Expr * \param name The name of the operation * \param tag The tag to mark the operation * * \return The result. */ TOPI_DEFINE_BCAST_OP(bitwise_xor, { return a ^ b; }); TOPI_DEFINE_OP_OVERLOAD(operator^, bitwise_xor); /*! * \fn add * \brief Compute A + B with auto-broadcasting. * * \param A The first tensor, or Expr * \param B The second tensor, or Expr * \param name The name of the operation * \param tag The tag to mark the operation * * \return The result. */ TOPI_DEFINE_BCAST_OP(add, { return a + b; }); TOPI_DEFINE_OP_OVERLOAD(operator+, add); /*! * \fn subtract * \brief Compute A - B with auto-broadcasting. * * \param A The first tensor, or Expr * \param B The second tensor, or Expr * \param name The name of the operation * \param tag The tag to mark the operation * * \return The result. */ TOPI_DEFINE_BCAST_OP(subtract, { return a - b; }); TOPI_DEFINE_OP_OVERLOAD(operator-, subtract); /*! * \fn multiply * \brief Compute A * B with auto-broadcasting. * * \param A The first tensor, or Expr * \param B The second tensor, or Expr * \param name The name of the operation * \param tag The tag to mark the operation * * \return The result. */ TOPI_DEFINE_BCAST_OP(multiply, { return a * b; }); TOPI_DEFINE_OP_OVERLOAD(operator*, multiply); /*! * \fn divide * \brief Compute A / B with auto-broadcasting. * * \param A The first tensor, or Expr * \param B The second tensor, or Expr * \param name The name of the operation * \param tag The tag to mark the operation * * \return The result. */ TOPI_DEFINE_BCAST_OP(divide, { return div(a, b); }); /*! * \fn floor divide * \brief Compute floor(A / B) with auto-broadcasting. * * \param A The first tensor, or Expr * \param B The second tensor, or Expr * \param name The name of the operation * \param tag The tag to mark the operation * * \return The result. */ TOPI_DEFINE_BCAST_OP(floor_divide, { if (a.dtype().is_int() || a.dtype().is_uint()) { return floordiv(a, b); } else { return floor(div(a, b)); } }); /*! * \fn trunc divide * \brief Compute trunc(A / B) with auto-broadcasting. * * \param A The first tensor, or Expr * \param B The second tensor, or Expr * \param name The name of the operation * \param tag The tag to mark the operation * * \return The result. */ TOPI_DEFINE_BCAST_OP(trunc_divide, { if (a.dtype().is_int() || a.dtype().is_uint()) { return truncdiv(a, b); } else { return trunc(div(a, b)); } }); /*! * \fn mod * \brief Compute A % B with auto-broadcasting. * * \param A The first tensor, or Expr * \param B The second tensor, or Expr * \param name The name of the operation * \param tag The tag to mark the operation * * \return The result. */ TOPI_DEFINE_BCAST_OP(mod, { return truncmod(a, b); }); /*! * \fn floor mod * \brief Compute A - floor_div(A, B) * B with auto-broadcasting. * * \param A The first tensor, or Expr * \param B The second tensor, or Expr * \param name The name of the operation * \param tag The tag to mark the operation * * \return The result. */ TOPI_DEFINE_BCAST_OP(floor_mod, { if (a.dtype().is_int() || a.dtype().is_uint()) { return floormod(a, b); } else { return a - floor_divide(a, b) * b; } }); /*! * \fn trunc mod * \brief Compute A - trunc_div(A, B) * B with auto-broadcasting. * * \param A The first tensor, or Expr * \param B The second tensor, or Expr * \param name The name of the operation * \param tag The tag to mark the operation * * \return The result. */ TOPI_DEFINE_BCAST_OP(trunc_mod, { if (a.dtype().is_int() || a.dtype().is_uint()) { return truncmod(a, b); } else { return a - trunc_divide(a, b) * b; } }); /*! * \fn maximum * \brief Compute maximum(A, B) with auto-broadcasting. * * \param A The first tensor, or Expr * \param B The second tensor, or Expr * \param name The name of the operation * \param tag The tag to mark the operation * * \return The result. */ TOPI_DEFINE_BCAST_OP(maximum, { return tvm::max(a, b); }); /*! * \fn minimum * \brief Compute minimum(A, B) with auto-broadcasting. * * \param A The first tensor, or Expr * \param B The second tensor, or Expr * \param name The name of the operation * \param tag The tag to mark the operation * * \return The result. */ TOPI_DEFINE_BCAST_OP(minimum, { return tvm::min(a, b); }); /*! * \fn power * \brief Compute power(A, B) with auto-broadcasting. * * \param A The first tensor, or Expr * \param B The second tensor, or Expr * \param name The name of the operation * \param tag The tag to mark the operation * * \return The result. */ TOPI_DEFINE_BCAST_OP(power, { return tvm::pow(a, b); }); /*! * \fn left_shift * \brief Compute A << B with auto-broadcasting. * * \param A The first tensor, or Expr * \param B The second tensor, or Expr * \param name The name of the operation * \param tag The tag to mark the operation * * \return The result. */ TOPI_DEFINE_BCAST_OP(left_shift, { return a << b; }); TOPI_DEFINE_OP_OVERLOAD(operator<<, left_shift); /*! * \fn right_shift * \brief Compute A >> B with auto-broadcasting. * * \param A The first tensor, or Expr * \param B The second tensor, or Expr * \param name The name of the operation * \param tag The tag to mark the operation * * \return The result. */ TOPI_DEFINE_BCAST_OP(right_shift, { return a >> b; }); TOPI_DEFINE_OP_OVERLOAD(operator>>, right_shift); /*! * \fn greater * \brief Compute (A > B) with auto-broadcasting. * * \param A The first tensor, or Expr * \param B The second tensor, or Expr * \param name The name of the operation * \param tag The tag to mark the operation * * \return The result. */ TOPI_DEFINE_BCAST_OP(greater, { return (a > b); }); /*! * \fn less * \brief Compute (A < B) with auto-broadcasting. * * \param A The first tensor, or Expr * \param B The second tensor, or Expr * \param name The name of the operation * \param tag The tag to mark the operation * * \return The result. */ TOPI_DEFINE_BCAST_OP(less, { return (a < b); }); /*! * \fn equal * \brief Compute (A == B) with auto-broadcasting. * * \param A The first tensor, or Expr * \param B The second tensor, or Expr * \param name The name of the operation * \param tag The tag to mark the operation * * \return The result. */ TOPI_DEFINE_BCAST_OP(equal, { return (a == b); }); /*! * \fn not_equal * \brief Compute (A != B) with auto-broadcasting. * * \param A The first tensor, or Expr * \param B The second tensor, or Expr * \param name The name of the operation * \param tag The tag to mark the operation * * \return The result. */ TOPI_DEFINE_BCAST_OP(not_equal, { return (a != b); }); /*! * \fn greater_equal * \brief Compute (A >= B) with auto-broadcasting. * * \param A The first tensor, or Expr * \param B The second tensor, or Expr * \param name The name of the operation * \param tag The tag to mark the operation * * \return The result. */ TOPI_DEFINE_BCAST_OP(greater_equal, { return (a >= b); }); /*! * \fn less_equal * \brief Compute (A <= B) with auto-broadcasting. * * \param A The first tensor, or Expr * \param B The second tensor, or Expr * \param name The name of the operation * \param tag The tag to mark the operation * * \return The result. */ TOPI_DEFINE_BCAST_OP(less_equal, { return (a <= b); }); } // namespace topi } // namespace tvm #endif // TVM_TOPI_BROADCAST_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/contrib/cublas.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \brief External function interface to cuBLAS libraries * \file cublas.h */ #ifndef TVM_TOPI_CONTRIB_CUBLAS_H_ #define TVM_TOPI_CONTRIB_CUBLAS_H_ #include <tvm/te/operation.h> #include <tvm/topi/detail/extern.h> namespace tvm { namespace topi { namespace contrib { using namespace tvm::te; using namespace topi::detail; /*! * \brief Create an op that multiplies lhs and rhs with cuBLAS * * \param lhs The left matrix operand * \param rhs The right matrix operand * \param transa Whether to transpose lhs * \param transb Whether to transpose rhs * * \return The output tensor */ inline Tensor cublas_matmul(const Tensor& lhs, const Tensor& rhs, bool transa, bool transb) { auto n = transa ? lhs->shape[1] : lhs->shape[0]; auto m = transb ? rhs->shape[0] : rhs->shape[1]; return make_extern( {{n, m}}, {lhs->dtype}, {lhs, rhs}, [&](Array<Buffer> ins, Array<Buffer> outs) { return call_packed({StringImm("tvm.contrib.cublas.matmul"), pack_buffer(ins[0]), pack_buffer(ins[1]), pack_buffer(outs[0]), transa, transb}); }, "C", "", {})[0]; } /*! * \brief Create an op that multiplies batch matrices * lhs and rhs with cuBLAS * * \param lhs The left matrix operand * \param rhs The right matrix operand * \param transa Whether to transpose lhs * \param transb Whether to transpose rhs * * \return The output tensor */ inline Tensor cublas_batch_matmul(const Tensor& lhs, const Tensor& rhs, bool transa, bool transb) { auto b = lhs->shape[0]; auto n = transa ? lhs->shape[2] : lhs->shape[1]; auto m = transb ? rhs->shape[1] : rhs->shape[2]; return make_extern( {{b, n, m}}, {lhs->dtype}, {lhs, rhs}, [&](Array<Buffer> ins, Array<Buffer> outs) { return call_packed({StringImm("tvm.contrib.cublas.batch_matmul"), pack_buffer(ins[0]), pack_buffer(ins[1]), pack_buffer(outs[0]), transa, transb}); }, "C", "", {})[0]; } } // namespace contrib } // namespace topi } // namespace tvm #endif // TVM_TOPI_CONTRIB_CUBLAS_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/contrib/rocblas.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \brief External function interface to rocBLAS libraries * \file tags.h */ #ifndef TVM_TOPI_CONTRIB_ROCBLAS_H_ #define TVM_TOPI_CONTRIB_ROCBLAS_H_ #include <tvm/te/operation.h> #include <tvm/topi/detail/extern.h> namespace tvm { namespace topi { namespace contrib { using namespace tvm::te; /*! * \brief Create an op that multiplies lhs and rhs with rocBLAS * * \param lhs The left matrix operand * \param rhs The right matrix operand * \param transa Whether to transpose lhs * \param transb Whether to transpose rhs * * \return The output tensor */ inline Tensor rocblas_matmul(const Tensor& lhs, const Tensor& rhs, bool transa, bool transb) { auto n = transa ? lhs->shape[1] : lhs->shape[0]; auto m = transb ? rhs->shape[0] : rhs->shape[1]; return make_extern( {{n, m}}, {lhs->dtype}, {lhs, rhs}, [&](Array<Buffer> ins, Array<Buffer> outs) { return call_packed({StringImm("tvm.contrib.rocblas.matmul"), pack_buffer(ins[0]), pack_buffer(ins[1]), pack_buffer(outs[0]), transa, transb}); }, "C", "", {})[0]; } /*! * \brief Create an op that batch multiplies lhs and rhs with rocBLAS * * \param lhs The left matrix operand e.g. (batch_size, M, K) * \param rhs The right matrix operand e.g. (batch_size, K, N) * \param transa Whether to transpose lhs * \param transb Whether to transpose rhs * * \return The output tensor */ inline Tensor rocblas_batch_matmul(const Tensor& lhs, const Tensor& rhs, bool transa, bool transb) { auto batch_size = lhs->shape[0]; auto n = transa ? lhs->shape[2] : lhs->shape[1]; auto m = transb ? rhs->shape[1] : rhs->shape[2]; return make_extern( {{batch_size, n, m}}, {lhs->dtype}, {lhs, rhs}, [&](Array<Buffer> ins, Array<Buffer> outs) { return call_packed({StringImm("tvm.contrib.rocblas.batch_matmul"), pack_buffer(ins[0]), pack_buffer(ins[1]), pack_buffer(outs[0]), transa, transb}); }, "C", "", {})[0]; } } // namespace contrib } // namespace topi } // namespace tvm #endif // TVM_TOPI_CONTRIB_ROCBLAS_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/cuda/dense.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file cuda/dense.h * \brief CUDA schedule for dense operation */ #ifndef TVM_TOPI_CUDA_DENSE_H_ #define TVM_TOPI_CUDA_DENSE_H_ #include <tvm/target/generic_func.h> #include <tvm/te/operation.h> #include <tvm/te/schedule_pass.h> #include <tvm/topi/contrib/cublas.h> #include <tvm/topi/detail/array_utils.h> #include <tvm/topi/generic/extern.h> #include <tvm/topi/nn/dense.h> #include <tvm/topi/tags.h> namespace tvm { namespace topi { using namespace tvm::te; namespace cuda { /*! * \brief Implementation of dense for CUDA backend * * \param target The target device * \param data Tensor with shape [batch, in_dim] * \param weight Tensor with shape [out_dim, in_dim] * \param bias Tensor with shape [out_dim]. Optional; to omit bias, pass Tensor() * \param out_dtype Output data type. Used for mixed precision. * * \return Tensor with shape [batch, out_dim] */ inline tvm::te::Tensor dense_cuda(const Target& target, const tvm::te::Tensor& data, const tvm::te::Tensor& weight, const tvm::te::Tensor& bias, const DataType& out_dtype) { ICHECK_EQ(data->shape.size(), 2) << "dense requires 2-D data"; ICHECK_EQ(weight->shape.size(), 2) << "dense requires 2-D weight"; if (bias.defined()) { ICHECK_EQ(bias->shape.size(), 1) << "dense requires 1-D bias"; } auto batch = data->shape[0]; auto in_dim = data->shape[1]; auto out_dim = weight->shape[0]; if (target->GetLibs().count("cublas")) { ICHECK_EQ(data->dtype, out_dtype) << "Mixed precision not supported."; auto mm = topi::contrib::cublas_matmul(data, weight, false, true); if (bias.defined()) { mm = tvm::te::compute( {batch, out_dim}, [&](Var i, Var j) { return mm(i, j) + bias(j); }, "tensor", kBroadcast); } return mm; } else { return topi::nn::dense(data, weight, bias, out_dtype); } } /*! * \brief Create a CUDA schedule for dense * * \param target The target to generate a schedule for. * \param outs The output tensors. * * \return A schedule for the given ops. */ inline Schedule schedule_dense(const Target& target, const Array<Tensor>& outs) { if (target->kind->name == "cuda" && target->GetLibs().count("cublas")) { return topi::generic::schedule_extern(target, outs); } Array<Operation> out_ops; for (auto t : outs) { out_ops.push_back(t->op); } auto s = create_schedule(out_ops); auto _schedule = [&](const Tensor& dense) { auto num_thread = 64; auto k = dense->op.as<ComputeOpNode>()->reduce_axis[0]; IterVar ko, kf; s[dense].split(k, num_thread, &ko, &kf); auto dense_f = s.rfactor(dense, kf)[0]; Tensor out; if (detail::contains(s->outputs, dense->op)) { out = dense; } else { out = outs[0]->op.output(0); s[dense].compute_at(s[out], s[out]->op.as<ComputeOpNode>()->axis[1]); } s[out].bind(s[out]->op.as<ComputeOpNode>()->axis[0], tvm::te::thread_axis(Range(), "blockIdx.y")); s[out].bind(s[out]->op.as<ComputeOpNode>()->axis[1], tvm::te::thread_axis(Range(), "blockIdx.x")); auto tx = s[dense]->op.as<ComputeOpNode>()->reduce_axis[0]; auto thread_x = tvm::te::thread_axis(Range(), "threadIdx.x"); s[dense].bind(tx, thread_x); s[dense_f].compute_at(s[dense], tx); s[dense].set_store_predicate(static_cast<PrimExpr>(thread_x) == 0); s[out].set_store_predicate(static_cast<PrimExpr>(thread_x) == 0); }; std::function<void(Operation)> traverse; traverse = [&](const Operation& op) { // Inline all one-to-one-mapping operators except the last stage (output) if (is_broadcast(op->tag)) { if (!detail::contains(s->outputs, op)) { s[op].compute_inline(); } for (auto tensor : op->InputTensors()) { if (tensor->op->InputTensors().size() > 0) { traverse(tensor->op); } } } else if (op->tag == "dense") { // If tag starts with global_pool auto dense = op.output(0); _schedule(dense); } else { LOG(ERROR) << "Unsupported operator " << op->tag; } }; traverse(outs[0]->op); return s; } } // namespace cuda } // namespace topi } // namespace tvm #endif // TVM_TOPI_CUDA_DENSE_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/cuda/injective.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file cuda/injective.h * \brief CUDA schedule for injective operations */ #ifndef TVM_TOPI_CUDA_INJECTIVE_H_ #define TVM_TOPI_CUDA_INJECTIVE_H_ #include <tvm/target/generic_func.h> #include <tvm/te/operation.h> #include <tvm/te/schedule_pass.h> #include <tvm/topi/detail/fuse.h> #include <tvm/topi/tags.h> namespace tvm { namespace topi { using namespace tvm::te; namespace cuda { /*! * \brief Updates an existing schedule for the given injective ops. * * \param sch The schedule to update. * \param out The tensor representing the injective op. * * \return The updated schedule. */ inline Schedule schedule_injective_from_existing(Schedule sch, const Tensor& out) { auto fused = detail::Fuse(sch[out], sch[out]->op.as<ComputeOpNode>()->axis); auto target = Target::Current(false); int num_thread = target->GetAttr<Integer>("max_num_threads").value().IntValue(); IterVar bx, tx; sch[out].split(fused, num_thread, &bx, &tx); sch[out].bind(bx, thread_axis(Range(), "blockIdx.x")); sch[out].bind(tx, thread_axis(Range(), "threadIdx.x")); return sch; } /*! * \brief Create a CUDA schedule for the given output tensors. * * \param target The target to generate a schedule for. * \param outs The output tensors. * * \return A schedule for the given ops. */ inline Schedule schedule_injective(const Target& target, const Array<Tensor>& outs) { Array<Operation> out_ops; for (auto t : outs) { out_ops.push_back(t->op); } auto s = create_schedule(out_ops); tvm::te::AutoInlineInjective(s); for (auto out : outs) { schedule_injective_from_existing(s, out); } return s; } } // namespace cuda } // namespace topi } // namespace tvm #endif // TVM_TOPI_CUDA_INJECTIVE_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/cuda/pooling.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file cuda/pooling.h * \brief CUDA schedule for pooling operations */ #ifndef TVM_TOPI_CUDA_POOLING_H_ #define TVM_TOPI_CUDA_POOLING_H_ #include <tvm/target/generic_func.h> #include <tvm/te/operation.h> #include <tvm/te/schedule_pass.h> #include <tvm/topi/detail/array_utils.h> #include <tvm/topi/detail/fuse.h> #include <tvm/topi/tags.h> namespace tvm { namespace topi { using namespace tvm::te; namespace cuda { /*! * \brief Create a CUDA schedule for pool * * \param target The target to generate a schedule for. * \param outs The output tensors. * * \return A schedule for the given ops. */ inline Schedule schedule_pool(const Target& target, const Array<Tensor>& outs) { Array<Operation> out_ops; for (auto t : outs) { out_ops.push_back(t->op); } auto s = create_schedule(out_ops); auto _schedule = [&](const Tensor& padded_input, const Tensor& pool) { if (padded_input->op->IsInstance<ComputeOpNode>()) { s[padded_input].compute_inline(); } int num_thread = target->GetAttr<Integer>("max_num_threads").value().IntValue(); Tensor out; Tensor OL; if (detail::contains(s->outputs, pool->op)) { out = pool; OL = s.cache_write(pool, "local"); } else { out = outs[0]->op.output(0); s[pool].set_scope("local"); } auto fused = detail::Fuse(s[out], s[out]->op.as<ComputeOpNode>()->axis); IterVar bx, tx; s[out].split(fused, num_thread, &bx, &tx); s[out].bind(bx, tvm::te::thread_axis(Range(), "blockIdx.x")); s[out].bind(tx, tvm::te::thread_axis(Range(), "threadIdx.x")); if (detail::contains(s->outputs, pool->op)) { s[OL].compute_at(s[out], tx); } else { s[pool].compute_at(s[out], tx); } }; std::function<void(Operation)> traverse; traverse = [&](const Operation& op) { // Inline all one-to-one-mapping operators except the last stage (output) if (is_broadcast(op->tag)) { if (!detail::contains(s->outputs, op)) { s[op].compute_inline(); } for (auto tensor : op->InputTensors()) { if (tensor->op->InputTensors().size() > 0) { traverse(tensor->op); } } } else if (op->tag.rfind("pool", 0) == 0) { // If tag starts with pool auto padded_input = op->InputTensors()[0]; auto pool = op.output(0); _schedule(padded_input, pool); } else { LOG(ERROR) << "Unsupported operator " << op->tag; } }; traverse(outs[0]->op); return s; } /*! * \brief Create a CUDA schedule for global_pool * * \param target The target to generate a schedule for. * \param outs The output tensors. * * \return A schedule for the given ops. */ inline Schedule schedule_global_pool(const Target& target, const Array<Tensor>& outs) { Array<Operation> out_ops; for (auto t : outs) { out_ops.push_back(t->op); } auto s = create_schedule(out_ops); auto _schedule = [&](const Tensor& pool) { auto num_thread = 8; auto block_x = tvm::te::thread_axis(Range(), "blockIdx.x"); auto block_y = tvm::te::thread_axis(Range(), "blockIdx.y"); auto thread_x = tvm::te::thread_axis(Range(0, num_thread), "threadIdx.x"); auto thread_y = tvm::te::thread_axis(Range(0, num_thread), "threadIdx.y"); Tensor out; Tensor OL; if (detail::contains(s->outputs, pool->op)) { out = pool; OL = s.cache_write(pool, "local"); } else { out = outs[0]->op.output(0); s[pool].set_scope("local"); } auto i = s[out]->op.as<ComputeOpNode>()->axis[0]; auto c = s[out]->op.as<ComputeOpNode>()->axis[1]; IterVar by, ty; s[out].split(i, num_thread, &by, &ty); IterVar bx, tx; s[out].split(c, num_thread, &bx, &tx); s[out].reorder({by, bx, ty, tx}); s[out].bind(ty, thread_y); s[out].bind(tx, thread_x); s[out].bind(by, block_y); s[out].bind(bx, block_x); if (detail::contains(s->outputs, pool->op)) { s[OL].compute_at(s[out], tx); } else { s[pool].compute_at(s[out], tx); } }; std::function<void(Operation)> traverse; traverse = [&](const Operation& op) { // Inline all one-to-one-mapping operators except the last stage (output) if (is_broadcast(op->tag)) { if (!detail::contains(s->outputs, op)) { s[op].compute_inline(); } for (auto tensor : op->InputTensors()) { if (tensor->op->InputTensors().size() > 0) { traverse(tensor->op); } } } else if (op->tag.rfind("global_pool", 0) == 0) { // If tag starts with global_pool auto pool = op.output(0); _schedule(pool); } else { LOG(ERROR) << "Unsupported operator " << op->tag; } }; traverse(outs[0]->op); return s; } } // namespace cuda } // namespace topi } // namespace tvm #endif // TVM_TOPI_CUDA_POOLING_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/cuda/reduction.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file cuda/reduction.h * \brief CUDA schedule for reduction operations */ #ifndef TVM_TOPI_CUDA_REDUCTION_H_ #define TVM_TOPI_CUDA_REDUCTION_H_ #include <tvm/target/generic_func.h> #include <tvm/te/operation.h> #include <tvm/te/schedule_pass.h> #include <tvm/topi/detail/fuse.h> #include <tvm/topi/tags.h> namespace tvm { namespace topi { using namespace tvm::te; namespace cuda { /*! * \brief Schedule a given reduce operation. * * \param target The target to generate a schedule for. * \param op The operation representing the injective operation. * \param sch The schedule to apply this scheduling to * \param is_idx_reduce Pass true to schedule a reduce op that returns * an index, such as argmax or argmin. * * \return The schedule given by sch */ Schedule ScheduleReduce(const Target& target, Operation op, Schedule sch, bool is_idx_reduce = false) { Tensor data_out; Tensor data_in; if (!is_idx_reduce) { data_in = op->InputTensors()[0]; data_out = op.output(0); } else { data_out = op->InputTensors()[0]; } auto out_stage = sch[data_out]; ICHECK_GT(out_stage->op.as<ComputeOpNode>()->reduce_axis.size(), 0) << "reduce_axis must be greater than zero"; bool all_reduce; int num_thread; IterVar block_x, thread_x, thread_y; if (out_stage->op.as<ComputeOpNode>()->axis.size() > 0) { all_reduce = false; num_thread = 32; if (target->kind->name == "opencl" || target->kind->name == "metal") { // Without this, CL_INVALID_WORK_GROUP_SIZE occurs with python tests. // Don't know why. num_thread = 16; } block_x = tvm::te::thread_axis(Range(), "blockIdx.x"); thread_x = tvm::te::thread_axis(Range(0, num_thread), "threadIdx.x"); thread_y = tvm::te::thread_axis(Range(0, num_thread), "threadIdx.y"); } else { all_reduce = true; num_thread = target->GetAttr<Integer>("max_num_threads").value().IntValue(); thread_x = tvm::te::thread_axis(Range(0, num_thread), "threadIdx.x"); } auto fused_reduce = detail::Fuse(out_stage, out_stage->op.as<ComputeOpNode>()->reduce_axis); IterVar ko, ki; out_stage.split(fused_reduce, num_thread, &ko, &ki); auto data_out_rf = sch.rfactor(data_out, ki)[0]; auto tx = out_stage->op.as<ComputeOpNode>()->reduce_axis[0]; out_stage.bind(tx, thread_x); sch[data_out_rf].compute_at(out_stage, tx); Tensor real_output; Tensor temp_idx_input, temp_val_input; if (is_idx_reduce) { real_output = op.output(0); temp_idx_input = data_out->op.output(0); temp_val_input = data_out->op.output(1); } else { real_output = data_out; } auto stage_real = sch[real_output]; if (!all_reduce) { // Fuse and split the axis auto fused_outer = detail::Fuse(stage_real, stage_real->op.as<ComputeOpNode>()->axis); IterVar bx, outer_in; stage_real.split(fused_outer, num_thread, &bx, &outer_in); // Bind the axes to threads and blocks stage_real.bind(outer_in, thread_y); stage_real.bind(bx, block_x); if (is_idx_reduce) { sch[temp_idx_input].compute_at(stage_real, outer_in); sch[temp_val_input].compute_at(stage_real, outer_in); } } else { if (is_idx_reduce) { sch[temp_idx_input].compute_at(stage_real, stage_real->op.as<ComputeOpNode>()->axis[0]); sch[temp_val_input].compute_at(stage_real, stage_real->op.as<ComputeOpNode>()->axis[0]); } } stage_real.set_store_predicate(static_cast<PrimExpr>(thread_x) == 0); return sch; } /*! * \brief Recursively traverse operator inputs, setting injective inputs * to be computed inline. * * \param s The schedule we are building * \param op The current op in the traversal */ void TraverseBeforeReduce(Schedule s, Operation op) { if (op->IsInstance<PlaceholderOpNode>()) { return; } else if (is_injective(op->tag)) { s[op].compute_inline(); for (auto tensor : op->InputTensors()) { TraverseBeforeReduce(s, tensor->op); } } else { LOG(ERROR) << "Unsupported operator " << op->tag; } } /*! * \brief Schedule a reduce op, then invoke TraverseBeforeReduce on each * of the op's inputs. * * \param target The target to generate a schedule for. * \param s The schedule we are building * \param op The reduce op */ void TraverseAfterReduce(const Target& target, Schedule s, Operation op) { if (is_broadcast(op->tag)) { LOG(ERROR) << "Elementwise op after reduce is not yet supported"; } else if (op->tag == kCommReduce) { ScheduleReduce(target, op, s, false); for (auto tensor : op->InputTensors()) { TraverseBeforeReduce(s, tensor->op); } } else if (op->tag == kCommReduceIdx) { ScheduleReduce(target, op, s, true); for (auto tensor : op->InputTensors()[0]->op->InputTensors()) { TraverseBeforeReduce(s, tensor->op); } } else { LOG(ERROR) << "Unsupported operator " << op->tag; } } /*! * \brief Create a CUDA schedule for a reduce operation. * * \param target The target to generate a schedule for. * \param outs The output tensors. * * \return A schedule for the given ops. */ Schedule schedule_reduce(const Target& target, Array<Tensor> outs) { ICHECK_EQ(outs.size(), 1) << "outs must have size 1"; Array<Operation> out_ops; for (auto t : outs) { out_ops.push_back(t->op); } auto s = create_schedule(out_ops); TraverseAfterReduce(target, s, outs[0]->op); return s; } } // namespace cuda } // namespace topi } // namespace tvm #endif // TVM_TOPI_CUDA_REDUCTION_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/cuda/softmax.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file cuda/injective.h * \brief CUDA schedule for injective operations */ #ifndef TVM_TOPI_CUDA_SOFTMAX_H_ #define TVM_TOPI_CUDA_SOFTMAX_H_ #include <tvm/target/generic_func.h> #include <tvm/te/operation.h> #include <tvm/te/schedule_pass.h> #include <tvm/topi/detail/fuse.h> #include <tvm/topi/tags.h> namespace tvm { namespace topi { using namespace tvm::te; namespace cuda { /*! * \brief Create a CUDA schedule for the given softmax output tensors. * * \param target The target to generate a schedule for. * \param outs The output tensors. * * \return A schedule for the given ops. */ inline Schedule schedule_softmax(const Target& target, const Array<Tensor>& outs) { Array<Operation> out_ops; for (auto t : outs) { out_ops.push_back(t->op); } auto s = create_schedule(out_ops); auto softmax = outs[0]; tvm::te::Tensor max_elem; tvm::te::Tensor expsum; tvm::te::Tensor exp; bool has_exp = false; auto tag = softmax->op.as<ComputeOpNode>()->tag; if (tag == "softmax_output") { expsum = softmax->op->InputTensors()[1]; exp = softmax->op->InputTensors()[0]; max_elem = s[exp]->op->InputTensors()[1]; has_exp = true; } else if (tag == "log_softmax_output") { max_elem = softmax->op->InputTensors()[1]; expsum = softmax->op->InputTensors()[2]; } else { LOG(ERROR) << "Tag is expected to be softmax_output or log_softmax_output. Got " << tag; } int num_thread = 64; auto block_x = tvm::te::thread_axis(Range(), "blockIdx.x"); auto thread_x = tvm::te::thread_axis(Range(0, num_thread), "threadIdx.x"); if (has_exp) { s[exp].bind(exp->op.as<ComputeOpNode>()->axis[0], block_x); } s[max_elem].bind(max_elem->op.as<ComputeOpNode>()->axis[0], block_x); auto k = expsum->op.as<ComputeOpNode>()->reduce_axis[0]; IterVar ko, ki; s[expsum].split(k, num_thread, &ko, &ki); auto EF = s.rfactor(expsum, ki)[0]; s[expsum].bind(s[expsum]->op.as<ComputeOpNode>()->axis[0], block_x); s[expsum].bind(s[expsum]->op.as<ComputeOpNode>()->reduce_axis[0], thread_x); s[EF].compute_at(s[expsum], s[expsum]->op.as<ComputeOpNode>()->reduce_axis[0]); s[expsum].set_store_predicate(thread_x->var == 0); IterVar tx, xi; s[softmax].split_by_nparts(softmax->op.as<ComputeOpNode>()->axis[1], num_thread, &tx, &xi); s[softmax].bind(tx, thread_x); return s; } } // namespace cuda } // namespace topi } // namespace tvm #endif // TVM_TOPI_CUDA_SOFTMAX_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/detail/array_utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file array_utils.h * \brief Utility functions for handling arrays */ #ifndef TVM_TOPI_DETAIL_ARRAY_UTILS_H_ #define TVM_TOPI_DETAIL_ARRAY_UTILS_H_ #include <tvm/te/operation.h> namespace tvm { namespace topi { namespace detail { using namespace tvm::te; /*! * \brief Search an array for a specific item * * \param array The array to search * \param item The item to search for * * \return True iff the given array contains the given item. */ template <typename T> inline bool contains(Array<T> array, T item) { for (auto& i : array) { if (i == item) { return true; } } return false; } } // namespace detail } // namespace topi } // namespace tvm #endif // TVM_TOPI_DETAIL_ARRAY_UTILS_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/detail/broadcast.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \brief Detail broadcast. * \file topi/detail/broadcast.h */ #ifndef TVM_TOPI_DETAIL_BROADCAST_H_ #define TVM_TOPI_DETAIL_BROADCAST_H_ #include <tvm/te/operation.h> #include <tvm/topi/detail/constant_utils.h> #include <algorithm> #include <deque> #include <string> namespace tvm { namespace topi { namespace detail { struct BroadcastHelper { std::deque<tvm::PrimExpr> common_shape; std::deque<tvm::tir::Var> all_vars; std::deque<tvm::tir::Var> vars1; std::deque<tvm::tir::Var> vars2; }; static inline DataType CommonType(DataType type1, DataType type2) { ICHECK(type1.is_scalar() && type2.is_scalar()); ICHECK(type1.code() == type2.code()); return DataType(type1.code(), std::max(type1.bits(), type2.bits()), /*lanes=*/1); } inline BroadcastHelper BroadcastShape(const tvm::Array<tvm::PrimExpr>& shape1, const tvm::Array<tvm::PrimExpr>& shape2) { BroadcastHelper bh; int s1_size = shape1.size(); int s2_size = shape2.size(); tvm::PrimExpr one(1); int i; auto cast_if_needed = [](DataType to_type, PrimExpr expr) { return to_type != expr.dtype() ? cast(to_type, expr) : expr; }; for (i = 1; i <= std::min(s1_size, s2_size); ++i) { // TODO(@icemelon9): Need to revisit this part const IntImmNode* static_size1 = shape1[s1_size - i].as<IntImmNode>(); const IntImmNode* static_size2 = shape2[s2_size - i].as<IntImmNode>(); DataType common_type = CommonType(shape1[s1_size - i].dtype(), shape2[s2_size - i].dtype()); bh.all_vars.push_front(tvm::tir::Var("dim", common_type)); if (topi::detail::EqualCheck(shape1[s1_size - i], shape2[s2_size - i])) { bh.common_shape.push_front(cast_if_needed(common_type, shape1[s1_size - i])); bh.vars1.push_front(bh.all_vars[0]); bh.vars2.push_front(bh.all_vars[0]); } else if (topi::detail::EqualCheck(one, shape1[s1_size - i])) { ICHECK(!topi::detail::EqualCheck(one, shape2[s2_size - i])); bh.common_shape.push_front(cast_if_needed(common_type, shape2[s2_size - i])); bh.vars2.push_front(bh.all_vars[0]); } else if (topi::detail::EqualCheck(one, shape2[s2_size - i])) { bh.common_shape.push_front(cast_if_needed(common_type, shape1[s1_size - i])); bh.vars1.push_front(bh.all_vars[0]); } else if (!static_size1 && !static_size2) { bh.common_shape.push_front( cast_if_needed(common_type, max(shape1[s1_size - i], shape2[s2_size - i]))); bh.vars1.push_front(bh.all_vars[0]); bh.vars2.push_front(bh.all_vars[0]); } else if (!static_size1) { bh.common_shape.push_front(cast_if_needed(common_type, shape2[s2_size - i])); bh.vars2.push_front(bh.all_vars[0]); bh.vars1.push_front(bh.all_vars[0]); } else if (!static_size2) { bh.common_shape.push_front(cast_if_needed(common_type, shape1[s1_size - i])); bh.vars1.push_front(bh.all_vars[0]); bh.vars2.push_front(bh.all_vars[0]); } else { ICHECK(false) << "Incompatible broadcast dims: " << shape1[s1_size - i] << " and " << shape2[s2_size - i] << " in: " << tvm::Array<tvm::PrimExpr>(shape1.begin(), shape1.end()) << " and " << tvm::Array<tvm::PrimExpr>(shape2.begin(), shape2.end()); } } // Remaining dimensions whether on shape1 or shape2 can always be completed auto max_size = std::max(s1_size, s2_size); auto& shape = (s1_size > s2_size) ? shape1 : shape2; auto& vars = (s1_size > s2_size) ? bh.vars1 : bh.vars2; for (; i <= max_size; ++i) { bh.all_vars.push_front(tvm::tir::Var("v", shape[max_size - 1].dtype())); bh.common_shape.push_front(shape[max_size - i]); vars.push_front(bh.all_vars[0]); } return bh; } inline tvm::Array<tvm::PrimExpr> InputIndexFromBroadcast( const tvm::Array<tvm::tir::Var>& ovars, const tvm::te::Tensor& T, const std::deque<tvm::tir::Var>& my_vars, const std::deque<tvm::tir::Var>& all_vars) { tvm::Array<tvm::PrimExpr> ivars; ICHECK_EQ(ovars.size(), all_vars.size()); // N^2, could use a map but NBD. size_t expected_dims = T->shape.size(); for (size_t i = 0; i < ovars.size(); ++i) { bool found = false; for (size_t j = 0; j < my_vars.size(); ++j) { if (all_vars[i].same_as(my_vars[j])) { ivars.push_back(ovars[i]); found = true; break; } } // Only inject 0 here if we have not yet reached the dimension of I // (i.e. this must be a 1) if (!found && (ovars.size() - i) <= expected_dims) { ivars.push_back(tvm::tir::make_zero(ovars[i].dtype())); } } ICHECK(expected_dims == ivars.size()); return ivars; } template <typename FBinaryExpr> inline tvm::te::Tensor WithBroadcast(FBinaryExpr op, const tvm::te::Tensor& A, const tvm::te::Tensor& B, const std::string& name = "tensor", const std::string& tag = "") { auto bh = BroadcastShape(A->shape, B->shape); auto l = [&](tvm::Array<tvm::tir::Var> ovars) { return op(A(InputIndexFromBroadcast(ovars, A, bh.vars1, bh.all_vars)), B(InputIndexFromBroadcast(ovars, B, bh.vars2, bh.all_vars))); }; return tvm::te::compute(tvm::Array<tvm::PrimExpr>(bh.common_shape.begin(), bh.common_shape.end()), l, name, tag); } } // namespace detail } // namespace topi } // namespace tvm #endif // TVM_TOPI_DETAIL_BROADCAST_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/detail/constant_utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file constant_utils.h * \brief Utility functions for handling constants in TVM expressions */ #ifndef TVM_TOPI_DETAIL_CONSTANT_UTILS_H_ #define TVM_TOPI_DETAIL_CONSTANT_UTILS_H_ #include <tvm/arith/analyzer.h> #include <tvm/te/operation.h> #include <tvm/tir/analysis.h> #include <tvm/tir/expr.h> #include <string> #include <vector> namespace tvm { namespace topi { namespace detail { using namespace tvm::te; /*! * \brief Test whether the given Expr is a constant integer * * \param expr the Expr to query * * \return true if the given expr is a constant int or uint, false otherwise. */ inline bool IsConstInt(PrimExpr expr) { return expr->IsInstance<tvm::tir::IntImmNode>(); } /*! * \brief Test whether the given Array has every element as constant integer. * Undefined elements are also treat as constants. * * \param array the array to query * * \return true if every element in array is constant int or uint, false otherwise. */ inline bool IsConstIntArray(Array<PrimExpr> array) { bool is_const_int = true; for (auto const& elem : array) { is_const_int &= !elem.defined() || elem->IsInstance<tvm::tir::IntImmNode>(); } return is_const_int; } /*! * \brief Get the value of the given constant integer expression. An error * is logged if the given expression is not a constant integer. * * \param expr The expression to get the value of * * \return The integer value. */ inline int64_t GetConstInt(PrimExpr expr) { if (expr->IsInstance<tvm::IntImmNode>()) { return expr.as<tvm::IntImmNode>()->value; } LOG(ERROR) << "expr must be a constant integer"; return -1; } /*! * \brief Get the value of all the constant integer expressions in the given array * * \param exprs The array of expressions to get the values of * \param var_name The name to be used when logging an error in the event that any * of the expressions are not constant integers. * * \return A vector of the integer values */ inline std::vector<int> GetConstIntValues(Array<PrimExpr> exprs, const std::string& var_name) { std::vector<int> result; if (!exprs.defined()) return result; for (auto expr : exprs) { ICHECK(IsConstInt(expr)) << "All elements of " << var_name << " must be constant integers"; result.push_back(GetConstInt(expr)); } return result; } /*! * \brief Get the value of all the constant integer expressions in the given array * * \param exprs The array of expressions to get the values of * \param var_name The name to be used when logging an error in the event that any * of the expressions are not constant integers. * * \return A vector of the int64_t values */ inline std::vector<int64_t> GetConstInt64Values(Array<PrimExpr> exprs, const std::string& var_name) { std::vector<int64_t> result; if (!exprs.defined()) return result; for (auto expr : exprs) { ICHECK(IsConstInt(expr)) << "All elements of " << var_name << " must be constant integers"; result.push_back(GetConstInt(expr)); } return result; } /*! * \brief Check whether the two expressions are equal or not, if not simplify the expressions and * check again * \note This is stronger equality check than tvm::tir::Equal * \param lhs First expression * \param rhs Second expression * \return result True if both expressions are equal, else false */ inline bool EqualCheck(PrimExpr lhs, PrimExpr rhs) { tvm::tir::ExprDeepEqual expr_equal; bool result = expr_equal(lhs, rhs); if (!result) { PrimExpr t = tvm::arith::Analyzer().Simplify(lhs - rhs); if (const IntImmNode* i = t.as<IntImmNode>()) { result = i->value == 0; } } return result; } } // namespace detail } // namespace topi } // namespace tvm #endif // TVM_TOPI_DETAIL_CONSTANT_UTILS_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/detail/extern.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file detail/extern.h * \brief Helpers for using external functions */ #ifndef TVM_TOPI_DETAIL_EXTERN_H_ #define TVM_TOPI_DETAIL_EXTERN_H_ #include <tvm/te/operation.h> #include <tvm/tir/builtin.h> #include <string> #include <vector> namespace tvm { namespace topi { namespace detail { using namespace tvm::te; /*! * \brief Construct a buffer to pass to an external function * * \param shape The shape of the buffer * \param dtype The type of the buffer elements * \param name The name of the buffer * * \return The Buffer object */ inline Buffer DeclExternBuffer(Array<PrimExpr> shape, DataType dtype, std::string name) { auto data = var(name, DataType::Handle()); auto elem_offset = PrimExpr(); return Buffer(data, dtype, shape, Array<PrimExpr>(), elem_offset, name, -1, 0, kDefault); } /*! * \brief A function which constructs an Expr representing the invocation of an external * function. The function expects two arguments: an array of Buffers holding the input * tensor values, and a pre-allocated array of Buffers to be filled with the outputs. */ using FExtern = std::function<PrimExpr(Array<Buffer>, Array<Buffer>)>; /*! * \brief Create tensors representing the result of invoking an external function. * This function will create the necessary buffers to hold input and output tensor values. * * \param out_shapes An array where each element is the shape of the corresponding output tensor. * \param out_types An array where each element is the dtype of the corresponding output tensor. * \param inputs An array of input Tensors * \param fextern A function that constructs an Expr representing the invocation of * the external function given the input and output buffers. * \param name The name of the operation * \param tag The tag to mark the operation * \param attrs The additional auxiliary attributes of the operation. * * \return An array of Tensors representing the outputs of the function invocation. There will * be one output Tensor for each element of out_shapes, with dtype equal to the corresponding * element of out_types. */ inline Array<Tensor> make_extern(const Array<Array<PrimExpr>>& out_shapes, const std::vector<DataType>& out_types, const Array<Tensor>& inputs, FExtern fextern, std::string name, std::string tag, ::tvm::Map<String, ObjectRef> attrs) { ICHECK_EQ(out_shapes.size(), out_types.size()) << "make_extern: out_shapes and out_types must have equal size"; Array<Buffer> input_placeholders; for (auto t : inputs) { input_placeholders.push_back(DeclExternBuffer(t->shape, t->dtype, t->op->name)); } Array<Buffer> output_placeholders; for (size_t i = 0; i < out_shapes.size(); ++i) { output_placeholders.push_back(DeclExternBuffer(out_shapes[i], out_types[i], name)); } auto body = fextern(input_placeholders, output_placeholders); auto body_stmt = tvm::tir::Evaluate(body); auto op = ExternOp(name, tag, attrs, inputs, input_placeholders, output_placeholders, body_stmt); Array<Tensor> outputs; for (size_t i = 0; i < output_placeholders.size(); ++i) { outputs.push_back(op.output(i)); } return outputs; } /*! * \brief This function is used to create a DLTensor structure on the stack to * be able to pass a symbolic buffer as arguments to TVM PackedFunc * * \param buf The buffer to pack * * \return An expression representing the pack operation */ inline PrimExpr pack_buffer(Buffer buf) { ICHECK_GT(buf->shape.size(), 0) << "buf shape must have at least one element"; auto shape = tvm::tir::Call(DataType::Handle(), tvm::tir::builtin::tvm_stack_make_shape(), buf->shape); PrimExpr strides; if (buf->strides.size() > 0) { strides = tvm::tir::Call(DataType::Handle(), tvm::tir::builtin::tvm_stack_make_shape(), buf->shape); } else { strides = 0; } Array<PrimExpr> pack_args{buf->data, shape, strides, make_const(DataType::Int(32), static_cast<int64_t>(buf->shape.size())), make_const(buf->dtype, 0), buf->elem_offset}; return tvm::tir::Call(DataType::Handle(), tvm::tir::builtin::tvm_stack_make_array(), pack_args); } /*! * \brief Construct an Expr representing the invocation of a PackedFunc * * \param args An array containing the registered name of the PackedFunc followed * by the arguments to pass to the PackedFunc when called. The first element of the * array must be a constant string expression. * * \return An expression representing the invocation */ inline PrimExpr call_packed(Array<PrimExpr> args) { return tvm::tir::Call(DataType::Int(32), tvm::tir::builtin::tvm_call_packed(), args); } } // namespace detail } // namespace topi } // namespace tvm #endif // TVM_TOPI_DETAIL_EXTERN_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/detail/fuse.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file fuse.h * \brief Fuse operation */ #ifndef TVM_TOPI_DETAIL_FUSE_H_ #define TVM_TOPI_DETAIL_FUSE_H_ #include <tvm/te/operation.h> namespace tvm { namespace topi { namespace detail { using namespace tvm::te; /*! * \brief Fuse all of the given args * * \param stage The stage in which to apply the fuse * \param args The iteration variables to be fused * * \return The fused iteration variable */ inline IterVar Fuse(Stage stage, const Array<IterVar>& args) { IterVar res; stage.fuse(args, &res); return res; } } // namespace detail } // namespace topi } // namespace tvm #endif // TVM_TOPI_DETAIL_FUSE_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/detail/pad_utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file pad_utils.h * \brief Padding helpers */ #ifndef TVM_TOPI_DETAIL_PAD_UTILS_H_ #define TVM_TOPI_DETAIL_PAD_UTILS_H_ #include <tvm/te/operation.h> #include <tvm/tir/expr.h> #include <tvm/tir/op.h> #include <vector> namespace tvm { namespace topi { namespace detail { using namespace tvm::te; /*! * \brief Get padding size for each side given padding height and width * * \param pad_h The amount to pad each of the top and bottom sides * \param pad_w The amount to pad each of the left and right sides * * \return An array of 4 elements, representing padding sizes for * each individual side. The array is in the order { top, left, bottom, right } */ inline Array<PrimExpr> GetPadTuple(PrimExpr pad_h, PrimExpr pad_w) { pad_h *= 2; pad_w *= 2; auto pad_top = indexdiv(pad_h + 1, 2); auto pad_left = indexdiv(pad_w + 1, 2); return {pad_top, pad_left, pad_h - pad_top, pad_w - pad_left}; } } // namespace detail } // namespace topi } // namespace tvm #endif // TVM_TOPI_DETAIL_PAD_UTILS_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/detail/ravel_unravel.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file ravel_unravel.h * \brief Index ravel and unraval operations */ #ifndef TVM_TOPI_DETAIL_RAVEL_UNRAVEL_H_ #define TVM_TOPI_DETAIL_RAVEL_UNRAVEL_H_ #include <tvm/te/operation.h> #include <vector> namespace tvm { namespace topi { namespace detail { using namespace tvm::te; /*! * \brief Flatten the indices to 1D * * \param indices The input coordinates * \param shape Shape of the tensor * * \return The index after flattening */ inline PrimExpr RavelIndex(Array<PrimExpr> indices, Array<PrimExpr> shape) { ICHECK_EQ(indices.size(), shape.size()) << "indices and shape must have equal size"; if (indices.size() == 0U) { return 0; } PrimExpr idx; for (size_t i = 0; i < indices.size(); ++i) { if (i == 0) { idx = indices[i]; } else { idx = idx * shape[i] + indices[i]; } } return idx; } /*! * \brief Convert flattened index to coordinate array * * \param idx The 1D index * \param shape Shape of the tensor * * \return The coordinate corresponding to the 1D index */ inline Array<PrimExpr> UnravelIndex(PrimExpr idx, Array<PrimExpr> shape) { std::vector<PrimExpr> indices; for (int i = static_cast<int>(shape.size()) - 1; i >= 0; --i) { indices.push_back(indexmod(idx, shape[i])); idx = indexdiv(idx, shape[i]); } std::reverse(indices.begin(), indices.end()); return indices; } } // namespace detail } // namespace topi } // namespace tvm #endif // TVM_TOPI_DETAIL_RAVEL_UNRAVEL_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/detail/strided_slice.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file strided_slice.h * \brief Utility functions for strided_slice op */ #ifndef TVM_TOPI_DETAIL_STRIDED_SLICE_H_ #define TVM_TOPI_DETAIL_STRIDED_SLICE_H_ #include <tvm/tir/expr.h> #include <algorithm> #include <limits> #include <string> #include <tuple> #include <vector> #include "constant_utils.h" namespace tvm { namespace topi { namespace detail { using namespace tvm::te; inline int64_t CanonicalizeIndex(int64_t index, int64_t extent, int64_t stride) { int64_t begin_range = stride < 0 ? -1 : 0; int64_t end_range = stride < 0 ? extent - 1 : extent; if (index < 0) { index += extent; } return std::min(std::max(index, begin_range), end_range); } inline std::tuple<std::vector<int64_t>, std::vector<int64_t>, std::vector<int64_t>> ConvertToVec( const Array<Integer>& begin, const Array<Integer>& end, const Array<Integer>& strides, std::string slice_mode) { std::vector<int64_t> stride_vec(strides.size(), 1); if (slice_mode == "end") { for (size_t i = 0; i < strides.size(); ++i) { ICHECK(strides[i].defined()); stride_vec[i] = GetConstInt(strides[i]); } } const int64_t max_range = std::numeric_limits<int64_t>::max(); std::vector<int64_t> begin_vec; for (size_t i = 0; i < begin.size(); ++i) { if (!begin[i].defined()) { // value=None begin_vec.push_back(stride_vec[i] > 0 ? 0 : max_range); } else { begin_vec.push_back(GetConstInt(begin[i])); } } std::vector<int64_t> end_vec; for (size_t i = 0; i < end.size(); ++i) { // allow end to be None if (!end[i].defined()) { end_vec.push_back(stride_vec[i] < 0 ? 0 : max_range); } else if (slice_mode == "size") { int64_t end_val = GetConstInt(end[i]); if (end_val < 0) { end_vec.push_back(stride_vec[i] < 0 ? 0 : max_range); } else { end_vec.push_back(begin_vec[i] + end_val); } } else { end_vec.push_back(GetConstInt(end[i])); } } return std::make_tuple(begin_vec, end_vec, stride_vec); } inline Array<PrimExpr> StridedSliceCanonicalizeBegin(const Array<PrimExpr>& ishape, const std::vector<int64_t>& begin, const std::vector<int64_t>& strides, const Array<Integer>& axes, DataType dtype, std::string slice_mode = "end") { Array<PrimExpr> begin_expr; for (size_t i = 0; i < axes.size(); ++i) { if (ishape[axes[i].IntValue()]->IsInstance<tvm::IntImmNode>()) { int64_t dim_i = GetConstInt(ishape[axes[i].IntValue()]); int64_t begin_i = CanonicalizeIndex(begin[i], dim_i, strides[i]); begin_expr.push_back(make_const(dtype, begin_i)); } else { auto idim = ishape[axes[i].IntValue()]; auto b_expr = make_const(dtype, begin[i]); PrimExpr b = begin[i] < 0 ? b_expr + idim : b_expr; auto s = strides[i]; if (s < 0) { b = tvm::min(b, idim - 1); } else { b = tvm::if_then_else(b < 0, 0, b); } begin_expr.push_back(b); } } return begin_expr; } inline Array<PrimExpr> StridedSliceOutputShape(const Array<PrimExpr>& ishape, const std::vector<int64_t>& begin, const std::vector<int64_t>& end, const std::vector<int64_t>& strides, const Array<Integer>& axes, std::string slice_mode, const Array<PrimExpr>& begin_canonicalized, bool use_any = false) { const size_t src_tensor_dim = ishape.size(); Array<PrimExpr> out_shape; for (size_t i = 0; i < src_tensor_dim; ++i) { out_shape.push_back(ishape[i]); } for (size_t i = 0; i < axes.size(); ++i) { if (ishape[axes[i].IntValue()]->IsInstance<tvm::IntImmNode>()) { const int64_t dim_i = GetConstInt(ishape[axes[i].IntValue()]); ICHECK(begin_canonicalized[i]->IsInstance<tvm::IntImmNode>()); int64_t begin_i = GetConstInt(begin_canonicalized[i]); int64_t end_i = CanonicalizeIndex(end[i], dim_i, strides[i]); int interval = std::abs(end_i - begin_i); int slice_size = static_cast<int>((interval + std::abs(strides[i]) - 1) / std::abs(strides[i])); ICHECK(strides[i] < 0 ? (end_i <= begin_i) : (begin_i <= end_i)) << ": Input [Begin=" << begin[i] << ", End=" << end[i] << "] is invalid for axis=" << i; out_shape.Set(axes[i].IntValue(), cast(out_shape[i].dtype(), PrimExpr(slice_size))); } else if (use_any) { out_shape.Set(axes[i].IntValue(), tvm::tir::Any()); } else { out_shape.Set(axes[i].IntValue(), tvm::tir::Var("dim", out_shape[i]->dtype)); } } return out_shape; } } // namespace detail } // namespace topi } // namespace tvm #endif // TVM_TOPI_DETAIL_STRIDED_SLICE_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/detail/tensor_utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tensor_utils.h * \brief Utility functions for handling tensor */ #ifndef TVM_TOPI_DETAIL_TENSOR_UTILS_H_ #define TVM_TOPI_DETAIL_TENSOR_UTILS_H_ #include <tvm/te/operation.h> #include <vector> namespace tvm { namespace topi { namespace detail { using namespace tvm::te; /*! * \brief Check whether input shape has dimension of size 0; * * \param x Input shape * * \return True if the input shape is empty. */ inline bool is_empty_shape(const Array<PrimExpr>& x) { bool is_empty = false; for (const auto& dim : x) { if (auto int_dim = dim.as<IntImmNode>()) { if (int_dim->value == 0) { is_empty = true; break; } } } return is_empty; } /*! * \brief Sample a point in a tensor using bilinear interpolation. * * \param input The input tensor. * \param indices The index of the target point, which can be fractional * \param max_y The maximum of y dimension * \param max_x The maximum of x dimension * * \return The interpolated value in the given index. */ inline PrimExpr bilinear_sample_nchw(const Tensor& input, const Array<PrimExpr>& indices, const PrimExpr max_y, const PrimExpr max_x) { auto batch_id = indices[0]; auto channel_id = indices[1]; auto in_y = indices[2]; auto in_x = indices[3]; auto y_low = tvm::cast(DataType::Int(32), tvm::floor(in_y)); auto y_high = y_low + 1; auto x_low = tvm::cast(DataType::Int(32), tvm::floor(in_x)); auto x_high = x_low + 1; auto wy_h = in_y - y_low; auto wx_h = in_x - x_low; auto wy_l = 1 - wy_h; auto wx_l = 1 - wx_h; PrimExpr val = 0; std::vector<std::vector<PrimExpr>> wx_xp{{wx_l, x_low}, {wx_h, x_high}}; std::vector<std::vector<PrimExpr>> wy_yp{{wy_l, y_low}, {wy_h, y_high}}; for (auto wx_xp_ele : wx_xp) { for (auto wy_yp_ele : wy_yp) { auto wx = wx_xp_ele[0]; auto xp = wx_xp_ele[1]; auto wy = wy_yp_ele[0]; auto yp = wy_yp_ele[1]; val += tvm::if_then_else(0 <= yp && yp <= max_y && 0 <= xp && xp <= max_x, wx * wy * input(batch_id, channel_id, yp, xp), 0); } } return val; } /*! * \brief Sample a point in a tensor using bilinear interpolation. * * \param input The input tensor. * \param indices The index of the target point, which can be fractional * \param max_y The maximum of y dimension * \param max_x The maximum of x dimension * * \return The interpolated value in the given index. */ inline PrimExpr bilinear_sample_nhwc(const Tensor& input, const Array<PrimExpr>& indices, const PrimExpr max_y, const PrimExpr max_x) { auto batch_id = indices[0]; auto channel_id = indices[3]; auto in_y = indices[1]; auto in_x = indices[2]; auto y_low = tvm::cast(DataType::Int(32), tvm::floor(in_y)); auto y_high = y_low + 1; auto x_low = tvm::cast(DataType::Int(32), tvm::floor(in_x)); auto x_high = x_low + 1; auto wy_h = in_y - y_low; auto wx_h = in_x - x_low; auto wy_l = 1 - wy_h; auto wx_l = 1 - wx_h; PrimExpr val = 0; std::vector<std::vector<PrimExpr>> wx_xp{{wx_l, x_low}, {wx_h, x_high}}; std::vector<std::vector<PrimExpr>> wy_yp{{wy_l, y_low}, {wy_h, y_high}}; for (auto wx_xp_ele : wx_xp) { for (auto wy_yp_ele : wy_yp) { auto wx = wx_xp_ele[0]; auto xp = wx_xp_ele[1]; auto wy = wy_yp_ele[0]; auto yp = wy_yp_ele[1]; val += tvm::if_then_else(0 <= yp && yp <= max_y && 0 <= xp && xp <= max_x, wx * wy * input(batch_id, yp, xp, channel_id), 0); } } return val; } } // namespace detail } // namespace topi } // namespace tvm #endif // TVM_TOPI_DETAIL_TENSOR_UTILS_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/einsum.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file topi/einsum.h * \brief Einstein summation op */ #ifndef TVM_TOPI_EINSUM_H_ #define TVM_TOPI_EINSUM_H_ #define LABELRANGE 128 #define NPY_MAXDIMS 16 #define NPY_MAXARGS 16 #include <tvm/te/operation.h> #include <tvm/tir/data_layout.h> #include <tvm/topi/detail/constant_utils.h> #include <tvm/topi/detail/ravel_unravel.h> #include <tvm/topi/detail/tensor_utils.h> #include <tvm/topi/tags.h> #include <algorithm> #include <bitset> #include <iterator> #include <string> #include <tuple> #include <unordered_set> #include <vector> namespace tvm { namespace topi { using namespace tvm::te; using namespace topi::detail; /*! * \brief Compute the shape of the output. * \param subscripts input subscripts. * \param operands operand tensors. * * \return the shape of the output. */ Array<PrimExpr> InferEinsumShape(const std::string& subscripts, const std::vector<Array<PrimExpr>>& operands); /*! * \brief Evaluates the Einstein summation convention on the operands. * * \param subscripts_str Specifies the subscripts for summation as comma separated list of * subscript labels. * \param inputs Arrays for the operation. * \param name The name of the operation. * \param tag The tag to mark the operation. * * \return The calculation based on the Einstein summation convention. */ Tensor einsum(const std::string& subscripts_str, const Array<Tensor> inputs, std::string name = "T_einsum", std::string tag = kEinsum); struct EinsumEquation { /*! * \brief Create EinsumEquation from a string. * The result will be converted to the explicit mode of Einsum if it is in implicit mode. * \return The created EinsumEquation. */ static EinsumEquation FromString(const std::string& equation); using Label = char; using Subscript = std::vector<Label>; // Special label value for ellipsis. The value is chosen to be less than any other letters so make // sorting easier. static constexpr Label kEllipsis = '\0'; // The input subscripts for each operand of the Einsum operator. std::vector<Subscript> inputs; // The output subscript of the Einsum equation. Subscript output; }; } // namespace topi } // namespace tvm #endif // TVM_TOPI_EINSUM_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/elemwise.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file elemwise.h * \brief Elementwise op constructions */ #ifndef TVM_TOPI_ELEMWISE_H_ #define TVM_TOPI_ELEMWISE_H_ #include <tvm/tir/builtin.h> #include <tvm/tir/expr.h> #include <tvm/topi/tags.h> #include <algorithm> #include <string> #include "broadcast.h" namespace tvm { namespace topi { using namespace tvm::te; // Unary intrinsic operators #define TOPI_DECLARE_UNARY_OP(OpName) \ inline Tensor OpName(const Tensor& x, std::string name = "T_" #OpName, \ std::string tag = kElementWise) { \ return compute( \ x->shape, [&](const Array<Var>& i) { return ::tvm::OpName(x(i)); }, name, tag); \ } TOPI_DECLARE_UNARY_OP(exp); TOPI_DECLARE_UNARY_OP(erf); TOPI_DECLARE_UNARY_OP(sigmoid); TOPI_DECLARE_UNARY_OP(sqrt); TOPI_DECLARE_UNARY_OP(log); TOPI_DECLARE_UNARY_OP(log2); TOPI_DECLARE_UNARY_OP(log10); TOPI_DECLARE_UNARY_OP(floor); TOPI_DECLARE_UNARY_OP(ceil); TOPI_DECLARE_UNARY_OP(round); TOPI_DECLARE_UNARY_OP(trunc); TOPI_DECLARE_UNARY_OP(abs); TOPI_DECLARE_UNARY_OP(cos); TOPI_DECLARE_UNARY_OP(cosh); TOPI_DECLARE_UNARY_OP(tan); TOPI_DECLARE_UNARY_OP(sin); TOPI_DECLARE_UNARY_OP(sinh); TOPI_DECLARE_UNARY_OP(acos); TOPI_DECLARE_UNARY_OP(acosh); TOPI_DECLARE_UNARY_OP(asin); TOPI_DECLARE_UNARY_OP(asinh); TOPI_DECLARE_UNARY_OP(atan); TOPI_DECLARE_UNARY_OP(atanh); TOPI_DECLARE_UNARY_OP(isnan); TOPI_DECLARE_UNARY_OP(tanh); TOPI_DECLARE_UNARY_OP(isfinite); TOPI_DECLARE_UNARY_OP(isinf); /*! * \brief Fast_tanh_float implementation from Eigen * https://github.com/eigenteam/eigen-git-mirror/blob/master/Eigen/src/Core/MathFunctionsImpl.h#L26 */ inline Tensor fast_tanh_float(const Tensor& in, std::string name, std::string tag) { // Clamp the inputs to the range [-9, 9] since anything outside // this range is +/-1.0f in single-precision. auto x = maximum(make_const(in->dtype, -9.0), minimum(make_const(in->dtype, 9.0), in)); // The monomial coefficients of the numerator polynomial (odd). auto alpha_1 = make_const(in->dtype, 4.89352455891786e-03); auto alpha_3 = make_const(in->dtype, 6.37261928875436e-04); auto alpha_5 = make_const(in->dtype, 1.48572235717979e-05); auto alpha_7 = make_const(in->dtype, 5.12229709037114e-08); auto alpha_9 = make_const(in->dtype, -8.60467152213735e-11); auto alpha_11 = make_const(in->dtype, 2.00018790482477e-13); auto alpha_13 = make_const(in->dtype, -2.76076847742355e-16); // The monomial coefficients of the denominator polynomial (even). auto beta_0 = make_const(in->dtype, 4.89352518554385e-03); auto beta_2 = make_const(in->dtype, 2.26843463243900e-03); auto beta_4 = make_const(in->dtype, 1.18534705686654e-04); auto beta_6 = make_const(in->dtype, 1.19825839466702e-06); return compute( x->shape, [&](const Array<Var>& i) { auto x2 = x(i) * x(i); auto p = x2 * alpha_13 + alpha_11; p = x2 * p + alpha_9; p = x2 * p + alpha_7; p = x2 * p + alpha_5; p = x2 * p + alpha_3; p = x2 * p + alpha_1; p = x(i) * p; auto q = x2 * beta_6 + beta_4; q = x2 * q + beta_2; q = x2 * q + beta_0; return p / q; }, name, tag); } /*! * \brief Creates an operation that returns hyperbolic tanh of a given tensor * * \param x The input tensor * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is tanh */ inline Tensor fast_tanh(const Tensor& x, std::string name = "T_fast_tanh", std::string tag = kElementWise) { if (x->dtype == DataType::Float(32)) { // invoke fast_tanh_float implementation return fast_tanh_float(x, name, tag); } else { // fallback to default implementation return compute( x->shape, [&](const Array<Var>& i) { return ::tvm::tanh(x(i)); }, name, tag); } } /*! * \brief Creates an operation that returns identity of a given tensor * * \param x The input tensor * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the identity operation */ inline Tensor identity(const Tensor& x, std::string name = "T_identity", std::string tag = kElementWise) { return compute( x->shape, [&](const Array<Var>& i) { return x(i); }, name, tag); } /*! * \brief Creates an operation that returns the negation of a given tensor * * \param x The input tensor * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the negation operation */ inline Tensor negative(const Tensor& x, std::string name = "T_negative", std::string tag = kElementWise) { return compute( x->shape, [&](const Array<Var>& i) { return -x(i); }, name, tag); } /*! * \brief Creates an operation that returns the logical NOT of a given tensor * * \param x The input tensor * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the logical NOT operation */ inline Tensor logical_not(const Tensor& x, std::string name = "T_logical_not", std::string tag = kElementWise) { return compute( x->shape, [&](const Array<Var>& i) { return !x(i); }, name, tag); } /*! * \brief Creates an operation that returns the bitwise NOT of a given tensor * * \param x The input tensor * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the bitwise NOT operation */ inline Tensor bitwise_not(const Tensor& x, std::string name = "T_bitwise_not", std::string tag = kElementWise) { return compute( x->shape, [&](const Array<Var>& i) { return ~x(i); }, name, tag); } /*! * \brief Returns the sign of the tensor * * \param x The input tensor * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the sign */ inline Tensor sign(const Tensor& x, std::string name = "T_sign", std::string tag = kElementWise) { return compute( x->shape, [&](const Array<Var>& i) { PrimExpr zero = make_zero(x->dtype); PrimExpr one = make_const(x->dtype, 1); PrimExpr minus_one = make_const(x->dtype, -1); auto s1 = tvm::tir::Select((x(i) < zero), minus_one, zero); auto s2 = tvm::tir::Select((x(i) > zero), one, s1); return s2; }, name, tag); } /*! * \brief Creates an operation that returns rsqrt of a given tensor * * \param x The input tensor * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the rsqrt operation */ inline Tensor rsqrt(const Tensor& x, std::string name = "tensor", std::string tag = kElementWise) { return compute( x->shape, [&](const Array<Var>& i) { PrimExpr one = make_const(x->dtype, 1); return one / tvm::sqrt(x(i)); }, name, tag); } /*! * \brief Creates an operation that clips each element of a tensor to * the interval [a_min, a_max] * * \param x The input tensor * \param a_min The inclusive lower bound of the interval * \param a_max The inclusive upper bound of the interval * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the clip operation */ inline Tensor clip(const Tensor& x, const PrimExpr& a_min, const PrimExpr& a_max, std::string name = "T_clip", std::string tag = kElementWise) { return compute( x->shape, [&](const Array<Var>& i) { auto min_val = tvm::cast(x->dtype, a_min); auto max_val = tvm::cast(x->dtype, a_max); return tvm::max(tvm::min(x(i), max_val), min_val); // NOLINT(*) }, name, tag); } /*! * \brief Cast each element of x to the given type. If expr is * scalar and type is a corresponding vector type, a * Broadcast is generated, otherwise a Cast is generated. * * \param x The input tensor * \param type The type to cast to * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the cast operation */ inline Tensor cast(const Tensor& x, DataType type, std::string name = "T_cast", std::string tag = kElementWise) { return compute( x->shape, [&](const Array<Var>& i) -> PrimExpr { auto expr = x(i); if (expr.dtype().code() == type.code() && expr.dtype().bits() == type.bits()) { if (expr.dtype().lanes() == type.lanes()) { return expr; } else if (expr.dtype().lanes() == 1 && type.lanes() > 1) { return tvm::tir::Broadcast(expr, type.lanes()); } } return tvm::cast(type, x(i)); }, name, tag); } /*! * \brief Reinterpret each element of x to the given type. * \param x The input tensor * \param type The type to cast to * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the reinterpret operation */ inline Tensor reinterpret(const Tensor& x, DataType type, std::string name = "tensor", std::string tag = kElementWise) { return compute( x->shape, [&](const Array<Var>& i) { return tvm::tir::Call(type, tvm::tir::builtin::reinterpret(), {x(i)}); }, name, tag); } /*! * \brief Creates an operation that sum each element of a tensor * * \param xs The input tensor array * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the sum operation */ inline Tensor elemwise_sum(const Array<Tensor>& xs, std::string name = "T_elemwise_sum", std::string tag = kElementWise) { ICHECK_GT(xs.size(), 0) << "elemwise sum must have at least one input tensor."; return compute( xs[0]->shape, [&](const Array<Var>& i) { auto sum_expr = xs[0](i); for (size_t j = 1; j < xs.size(); j++) { sum_expr = sum_expr + xs[j](i); } return sum_expr; }, name, tag); } /*! * \brief Creates an operation that fill a tensor with fill_value * * \param shape The shape of a tensor * \param dtype The Type of fill_value * \param fill_value The value to be filled * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the full operation */ inline Tensor full(const Array<PrimExpr>& shape, DataType dtype, const PrimExpr fill_value, std::string name = "T_full", std::string tag = kElementWise) { PrimExpr ev = cast(dtype, fill_value); if (!ev.defined()) { LOG(ERROR) << "Can't cast fill_value to " << dtype; } return compute( shape, [&](const Array<Var>& i) { return ev; }, name, tag); } /*! * \brief Creates an operation that construct a tensor with same shape as input tensor, * then fill a tensor with fill_value * * \param x The input tensor * \param fill_value The value to be filled * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op memeber is the full_like operation */ inline Tensor full_like(const Tensor& x, const PrimExpr fill_value, std::string name = "T_full_like", std::string tag = kElementWise) { PrimExpr ev = cast(x->dtype, fill_value); return compute( x->shape, [&](const Array<Var>& i) { return ev; }, name, tag); } /*! * \brief Fast exponential function implementation * * \param _x The input tensor * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is exponent operation * * \note Function computes: * log2(e^x) = x * log2(e) * log2(2) => * log2(e^x) = log2(2^(x*log2(e))) => * e^x = 2^(x*log2(e)) * Splitting power x*log2(e) into integer and fractional parts: * e^(n+f) = e^n * e^f * n = floor(x*log2(e) + 1/2) * f = x - n * ln(2) * exp(x) = 2^n * exp(y) * Approximation for fractional part: * y = exp(f) = 1 + 2 * P(x**2)/(Q(x**2) - P(x**2)) */ inline Tensor fast_exp_float32(const Tensor& _x, std::string name, std::string tag) { auto x_hi = make_const(DataType::Float(32), 88.3762626647950f); auto x_lo = make_const(DataType::Float(32), -88.3762626647949f); auto log2e = make_const(DataType::Float(32), 1.44269504088896341f); auto ln2 = make_const(DataType::Float(32), 0.6931471805599453f); PrimExpr p[6] = {make_const(DataType::Float(32), 1.9875691500E-4f), make_const(DataType::Float(32), 1.3981999507E-3f), make_const(DataType::Float(32), 8.3334519073E-3f), make_const(DataType::Float(32), 4.1665795894E-2f), make_const(DataType::Float(32), 1.6666665459E-1f), make_const(DataType::Float(32), 5.0000001201E-1f)}; auto one = make_const(DataType::Float(32), 1.0f); auto one_half = make_const(DataType::Float(32), 0.5f); auto b = make_const(DataType::Float(32), 127.0f); return compute( _x->shape, [&](const Array<Var>& i) { // clamp x auto x = ::tvm::max(::tvm::min(_x(i), x_hi), x_lo); // integer part auto n = ::tvm::floor(x * log2e + one_half); // fractional part auto f = x - n * ln2; auto y = (((((p[0] * f + p[1]) * f + p[2]) * f + p[3]) * f + p[4]) * f + p[5]) * f * f + f + one; // Return 2^m * exp(r). auto ef = tvm::reinterpret(DataType::Float(32), ::tvm::cast(DataType::Int(32), n + b) << 23); return ::tvm::max(ef * y, _x(i)); // NOLINT(*) }, name, tag); } /*! * \brief Fast exponential function implementation * * \param x The input tensor * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is exponent operation * */ inline Tensor fast_exp(const Tensor& x, std::string name = "T_fast_exp", std::string tag = kElementWise) { if (x->dtype == DataType::Float(32)) { auto ret = fast_exp_float32(x, name, tag); return ret; } else { return compute( x->shape, [&](const Array<Var>& i) { return ::tvm::exp(x(i)); }, name, tag); } } /*! * \brief Fast_erf_float expression from Eigen * https://github.com/eigenteam/eigen-git-mirror/blob/master/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsImpl.h#L290 * \param arg The input expression. * \param bits The number of bits in the type. */ inline PrimExpr fast_erf_float_expr(PrimExpr arg, int bits) { auto plus_4 = make_const(DataType::Float(bits), 4.f); auto minus_4 = make_const(DataType::Float(bits), -4.f); // The monomial coefficients of the numerator polynomial (odd). auto alpha_1 = make_const(DataType::Float(bits), -1.60960333262415e-02f); auto alpha_3 = make_const(DataType::Float(bits), -2.95459980854025e-03f); auto alpha_5 = make_const(DataType::Float(bits), -7.34990630326855e-04f); auto alpha_7 = make_const(DataType::Float(bits), -5.69250639462346e-05f); auto alpha_9 = make_const(DataType::Float(bits), -2.10102402082508e-06f); auto alpha_11 = make_const(DataType::Float(bits), 2.77068142495902e-08f); auto alpha_13 = make_const(DataType::Float(bits), -2.72614225801306e-10f); // The monomial coefficients of the denominator polynomial (even). auto beta_0 = make_const(DataType::Float(bits), -1.42647390514189e-02f); auto beta_2 = make_const(DataType::Float(bits), -7.37332916720468e-03f); auto beta_4 = make_const(DataType::Float(bits), -1.68282697438203e-03f); auto beta_6 = make_const(DataType::Float(bits), -2.13374055278905e-04f); auto beta_8 = make_const(DataType::Float(bits), -1.45660718464996e-05f); // clamp x auto x = tvm::max(tvm::min(arg, plus_4), minus_4); auto x2 = x * x; // Evaluate the numerator polynomial p. auto p = x2 * alpha_13 + alpha_11; p = x2 * p + alpha_9; p = x2 * p + alpha_7; p = x2 * p + alpha_5; p = x2 * p + alpha_3; p = x2 * p + alpha_1; p = x * p; // Evaluate the denominator polynomial p. auto q = x2 * beta_8 + beta_6; q = x2 * q + beta_4; q = x2 * q + beta_2; q = x2 * q + beta_0; return p / q; } /*! * \brief Fast_erf_float expression from Eigen */ inline Tensor fast_erf_float32(const Tensor& data, std::string name, std::string tag) { return compute( data->shape, [&](const Array<Var>& i) { return fast_erf_float_expr(data(i), 32); }, name, tag); } /*! * \brief Fast_erf_float expression from Eigen for float16. */ inline Tensor fast_erf_float16(const Tensor& data, std::string name, std::string tag) { return compute( data->shape, [&](const Array<Var>& i) { return fast_erf_float_expr(data(i), 16); }, name, tag); } /*! * \brief Fast erf implementation * * \param x The input tensor * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is erf operation */ inline Tensor fast_erf(const Tensor& x, std::string name = "T_fast_erf", std::string tag = kElementWise) { if (x->dtype == DataType::Float(32)) { auto ret = fast_erf_float32(x, name, tag); return ret; } else if (x->dtype == DataType::Float(16)) { auto ret = fast_erf_float16(x, name, tag); return ret; } else { return topi::erf(x); } } } // namespace topi } // namespace tvm #endif // TVM_TOPI_ELEMWISE_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/generic/default.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file generic/default.h * \brief Generic default schedule */ #ifndef TVM_TOPI_GENERIC_DEFAULT_H_ #define TVM_TOPI_GENERIC_DEFAULT_H_ #include <tvm/target/generic_func.h> #include <tvm/te/operation.h> #include <tvm/te/schedule_pass.h> #include <tvm/topi/detail/fuse.h> #include <tvm/topi/tags.h> namespace tvm { namespace topi { using namespace tvm::te; namespace generic { /*! * \brief Create a generic default schedule for the given output tensors. * * \param target The target to generate a schedule for. * \param outs The output tensors. * * \return A schedule for the given ops. */ inline Schedule default_schedule(const Target& target, const Array<Tensor>& outs) { Array<Operation> out_ops; for (auto t : outs) { out_ops.push_back(t->op); } auto s = create_schedule(out_ops); return s; } /*! * \brief Create a generic default schedule for the given output tensors, and apply * auto inline * * \param target The target to generate a schedule for. * \param outs The output tensors. * * \return A schedule for the given ops. */ inline Schedule default_schedule_auto_inline(const Target& target, const Array<Tensor>& outs) { Array<Operation> out_ops; for (auto t : outs) { out_ops.push_back(t->op); } auto s = create_schedule(out_ops); auto x = outs[0]; tvm::te::AutoInlineInjective(s); auto axis = s[x]->op.as<ComputeOpNode>()->axis; if (axis.size() > 0) { detail::Fuse(s[x], axis); } return s; } } // namespace generic } // namespace topi } // namespace tvm #endif // TVM_TOPI_GENERIC_DEFAULT_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/generic/extern.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file generic/extern.h * \brief Schedule for extern followed by injective ops */ #ifndef TVM_TOPI_GENERIC_EXTERN_H_ #define TVM_TOPI_GENERIC_EXTERN_H_ #include <tvm/target/generic_func.h> #include <tvm/te/operation.h> #include <tvm/te/schedule_pass.h> #include <tvm/topi/detail/fuse.h> #include <tvm/topi/generic/injective.h> #include <tvm/topi/tags.h> namespace tvm { namespace topi { using namespace tvm::te; namespace generic { /*! * \brief Schedule an extern op followed by injective operations * * \param target The target to generate a schedule for. * \param outs The output tensors. * * \return A schedule for the op. */ inline Schedule schedule_extern(const Target& target, const Array<Tensor>& outs) { Array<Operation> out_ops; for (auto t : outs) { out_ops.push_back(t->op); } auto s = create_schedule(out_ops); tvm::te::AutoInlineInjective(s); for (auto out : outs) { if (out->op->IsInstance<ExternOpNode>()) { continue; } tvm::GenericFunc::Get("schedule_injective_from_existing")(s, out); } return s; } } // namespace generic } // namespace topi } // namespace tvm #endif // TVM_TOPI_GENERIC_EXTERN_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/generic/injective.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file generic/injective.h * \brief Generic schedule for injective operations */ #ifndef TVM_TOPI_GENERIC_INJECTIVE_H_ #define TVM_TOPI_GENERIC_INJECTIVE_H_ #include <tvm/target/generic_func.h> #include <tvm/te/operation.h> #include <tvm/te/schedule_pass.h> #include <tvm/topi/detail/fuse.h> #include <tvm/topi/tags.h> namespace tvm { namespace topi { using namespace tvm::te; namespace generic { /*! * \brief Updates an existing schedule for the given injective ops. * * \param sch The schedule to update. * \param out The tensor representing the injective op. * * \return The updated schedule. */ inline Schedule schedule_injective_from_existing(Schedule sch, const Tensor& out) { detail::Fuse(sch[out], sch[out]->op.as<ComputeOpNode>()->axis); return sch; } /*! * \brief Create a generic schedule for the given injective ops. * * \param target The target to generate a schedule for. * \param outs The output tensors. * * \return A schedule for the given ops. */ inline Schedule schedule_injective(const Target& target, const Array<Tensor>& outs) { Array<Operation> out_ops; for (auto t : outs) { out_ops.push_back(t->op); } auto s = create_schedule(out_ops); tvm::te::AutoInlineInjective(s); auto x = outs[0]; schedule_injective_from_existing(s, x); return s; } } // namespace generic } // namespace topi } // namespace tvm #endif // TVM_TOPI_GENERIC_INJECTIVE_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/nn.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \brief NN op constructions * \file topi/nn.h */ #ifndef TVM_TOPI_NN_H_ #define TVM_TOPI_NN_H_ #include <tvm/arith/analyzer.h> #include <tvm/te/operation.h> #include <tvm/tir/expr.h> #include <tvm/tir/op.h> #include <tvm/topi/detail/constant_utils.h> #include <tvm/topi/reduction.h> #include <tvm/topi/tags.h> #include <tvm/topi/transform.h> #include <algorithm> #include <string> namespace tvm { namespace topi { using namespace tvm::te; /*! * \brief Creates an operation that performs a rectified linear unit * * \param t The input tensor * \param threshold The relu threshold (default 0) * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the relu operation */ template <typename T> inline tvm::te::Tensor relu(const tvm::te::Tensor& t, T threshold = static_cast<T>(0), std::string name = "T_relu", std::string tag = kElementWise) { return tvm::te::compute( t->shape, [&](const tvm::Array<tvm::tir::Var>& i) { auto threshold_const = tvm::tir::make_const(t->dtype, threshold); return tvm::max(t(i), threshold_const); }, name, tag); } /*! * \brief Creates an operation that performs a leaky rectified linear unit * * \param t The input tensor * \param alpha The slope for the small gradient when t < 0 * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the leaky relu operation */ inline tvm::te::Tensor leaky_relu(const tvm::te::Tensor& t, double alpha = 0.1, std::string name = "T_leaky_relu", std::string tag = kElementWise) { return tvm::te::compute( t->shape, [&](const tvm::Array<tvm::tir::Var>& i) { auto value = t(i); auto calpha = tvm::tir::make_const(value.dtype(), alpha); return tvm::tir::Select(value > 0, value, value * calpha); }, name, tag); } /*! * \brief Creates an operation that performs a parametric rectified linear unit * * \param x The input data tensor * \param slope The channel-wise slope tensor * \param axis The axis where the channel data needs to be applied * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the parametric relu operation */ inline tvm::te::Tensor prelu(const tvm::te::Tensor& x, const tvm::te::Tensor& slope, const int axis = 1, std::string name = "T_prelu", std::string tag = kBroadcast) { ICHECK((size_t)axis < x->shape.size()) << "Wrong axis (" << axis << ")value. "; ICHECK(topi::detail::GetConstInt(slope->shape[0]) == topi::detail::GetConstInt(x->shape[axis])) << "Wrong slope shape received."; return tvm::te::compute( x->shape, [&](const tvm::Array<tvm::tir::Var>& indices) { auto xval = x(indices); return tvm::tir::Select(xval > 0, xval, xval * slope(indices[axis])); }, name, tag); } /*! * \brief Creates an operation that performs padding * * \param t The input tensor * \param pad_before An Array of Expr describing the padding before the * respective iterator * \param pad_after An Array of Expr describing the padding after the * respective iterator * \param pad_value The value to fill padding elements with * \param pad_mode Padding type to use. * "constant" pads with constant_value; * "edge" pads using the edge values of the input array; * "reflect" pads by reflecting values with respect to the edges. * \param dyn_output_shape Output shape of the pad op, default nullptr. * You only need to pass this in if the shape was evaluated dynamically. * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the padding operation * * \note * The pad_after Array must either be empty or have the same length as * pad_before * When pad_after is empty, it takes the same values as pad_before (symmetric * padding) * The pad Array applies from the leading dimensions and skips missing * trailing dimensions: * * pad(t(i, j, k), {1}, {0}) returns the equivalent operation for * the following pseudocode: * for i in [1, t.shape[0] + 2]: * for i in [1, t.shape[0] + 2]: * for i in [1, t.shape[0] + 2]: * name(i,j,k) = * (1 <= i <= t.shape[0] + 1) ? * t(i-1, j, k) : 0; * * */ inline tvm::te::Tensor pad(const tvm::te::Tensor& t, const tvm::Array<tvm::PrimExpr>& pad_before, tvm::Array<tvm::PrimExpr> pad_after = tvm::Array<tvm::PrimExpr>(), PrimExpr pad_value = PrimExpr(), std::string name = "T_pad", std::string tag = kElementWise, std::string pad_mode = "constant", const Array<PrimExpr>* dyn_output_shape = nullptr) { if (pad_after.size() < pad_before.size()) { for (size_t i = pad_after.size(); i < pad_before.size(); ++i) { pad_after.push_back(pad_before[i]); } } arith::Analyzer analyzer; ICHECK_GE(pad_before.size(), 1); ICHECK_EQ(pad_before.size(), pad_after.size()); tvm::Array<tvm::PrimExpr> pad_before_int32; tvm::Array<tvm::PrimExpr> pad_after_int32; for (const auto& ele : pad_before) { pad_before_int32.push_back(tvm::cast(tvm::DataType::Int(32), ele)); } for (const auto& ele : pad_after) { pad_after_int32.push_back(tvm::cast(tvm::DataType::Int(32), ele)); } tvm::Array<tvm::PrimExpr> output_shape; if (dyn_output_shape == nullptr) { for (size_t i = 0; i < t->shape.size(); ++i) { if (i >= pad_before.size()) { output_shape.push_back(t->shape[i]); } else { output_shape.push_back( analyzer.Simplify(t->shape[i] + pad_before_int32[i] + pad_after_int32[i])); } } } else { for (size_t i = 0; i < dyn_output_shape->size(); i++) { output_shape.push_back((*dyn_output_shape)[i]); } } if (!pad_value.defined()) { pad_value = tvm::tir::make_const(t->dtype, 0); } auto l = [&](tvm::Array<tvm::tir::Var> ovars) { tvm::Array<tvm::PrimExpr> indices; tvm::Array<tvm::PrimExpr> sel; tvm::Array<tvm::PrimExpr> pad_idx; for (size_t i = 0; i < t->shape.size(); ++i) { if (i >= pad_before_int32.size()) { indices.push_back(ovars[i]); continue; } if (!topi::detail::EqualCheck(pad_before_int32[i], 0)) { sel.push_back(ovars[i] >= pad_before_int32[i]); indices.push_back(ovars[i] - pad_before_int32[i]); } else { indices.push_back(ovars[i]); } if (!topi::detail::EqualCheck(pad_after_int32[i], 0)) { sel.push_back(analyzer.Simplify(ovars[i] < pad_before_int32[i] + t->shape[i])); } if (pad_mode == "edge") { pad_idx.push_back( tvm::if_then_else(ovars[i] < pad_before[i], 0, tvm::if_then_else(ovars[i] >= pad_before[i] + t->shape[i], t->shape[i] - 1, ovars[i] - pad_before[i]))); } else if (pad_mode == "reflect") { pad_idx.push_back( tvm::if_then_else(ovars[i] < pad_before[i], pad_before[i] - ovars[i], tvm::if_then_else(ovars[i] >= pad_before[i] + t->shape[i], t->shape[i] * 2 - ovars[i] + pad_before[i] - 2, ovars[i] - pad_before[i]))); } } if (sel.size() != 0) { if (pad_mode == "constant") { return tvm::if_then_else( foldl([](PrimExpr a, PrimExpr b, Span span) { return tvm::logical_and(a, b, span); }, const_true(1), sel), t(indices), pad_value); } else if (pad_mode == "edge" || pad_mode == "reflect") { return tvm::if_then_else( foldl([](PrimExpr a, PrimExpr b, Span span) { return tvm::logical_and(a, b, span); }, const_true(1), sel), t(indices), t(pad_idx)); } } return t(indices); }; return tvm::te::compute(output_shape, l, name, tag); } /*! * \brief Creates an operation that performs a 2-D convolution with an * NCHW-layout * * \param I The 4-D input tensor * \param W The 4-D weight tensor * \param pad_h A static constant padding amount applied to the height of the * image, before and after (symmetric padding) * \param pad_w A static constant padding amount applied to the width of the * image, before and after (symmetric padding) * \param stride_h A static constant striding amount applied to the height of * the image, before and after (symmetric padding) * \param stride_w A static constant strindingamount applied to the width of * the image, before and after (symmetric padding) * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the 2-D convolution operation (NCHW * layout) */ inline tvm::te::Tensor conv2d_nchw(const tvm::te::Tensor& I, const tvm::te::Tensor& W, int pad_h = 0, int pad_w = 0, int stride_h = 1, int stride_w = 1, std::string name = "T_conv2d_nchw", std::string tag = kConv2dNCHW) { ICHECK_EQ(4, I->shape.size()); ICHECK_EQ(4, W->shape.size()); auto pH = I->shape[2]; auto pW = I->shape[3]; tvm::Array<tvm::PrimExpr> output_shape{ I->shape[0], // B W->shape[0], // O indexdiv(I->shape[2] - W->shape[2] + 2 * pad_h, stride_h) + 1, // H indexdiv(I->shape[3] - W->shape[3] + 2 * pad_w, stride_w) + 1 // W }; auto i = tvm::te::reduce_axis(tvm::Range{0, I->shape[1]}, "i"); auto kh = tvm::te::reduce_axis(tvm::Range{0, W->shape[2]}, "kh"); auto kw = tvm::te::reduce_axis(tvm::Range{0, W->shape[3]}, "kw"); auto T = (pad_h == 0 && pad_w == 0) ? I : pad(I, {tvm::PrimExpr(0), tvm::PrimExpr(0), pad_h, pad_w}); auto l = [&](tvm::tir::Var b, tvm::tir::Var o, tvm::tir::Var h, tvm::tir::Var w) { return tvm::sum(T(b, i, stride_h * h + kh, stride_w * w + kw) * W(o, i, kh, kw), {i, kh, kw}); }; return tvm::te::compute(output_shape, l, name, tag); } /*! * \brief Creates an operation for 2-D convolution layer with an HWCN-layout * * \param I The 4-D input tensor * \param W The 4-D weight tensor * \param pad_h A static constant padding amount applied to the height of the * image, before and after (symmetric padding) * \param pad_w A static constant padding amount applied to the width of the * image, before and after (symmetric padding) * \param stride_h A static constant striding amount applied to the height of * the image, before and after (symmetric padding) * \param stride_w A static constant strindingamount applied to the width of * the image, before and after (symmetric padding) * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the 2-D convolution operation * (HWCN layout) */ inline tvm::te::Tensor conv2d_hwcn(const tvm::te::Tensor& I, const tvm::te::Tensor& W, int pad_h = 0, int pad_w = 0, int stride_h = 1, int stride_w = 1, std::string name = "T_conv2d_hwcn", std::string tag = kConv2dHWCN) { ICHECK_EQ(4, I->shape.size()); ICHECK_EQ(4, W->shape.size()); auto pH = I->shape[2]; auto pW = I->shape[3]; tvm::Array<tvm::PrimExpr> output_shape{ indexdiv(I->shape[2] - W->shape[2] + 2 * pad_h, stride_h) + 1, // H indexdiv(I->shape[3] - W->shape[3] + 2 * pad_w, stride_w) + 1, // W I->shape[2], // B W->shape[3] // O }; auto i = tvm::te::reduce_axis(tvm::Range{0, I->shape[3]}, "i"); auto kh = tvm::te::reduce_axis(tvm::Range{0, W->shape[0]}, "kh"); auto kw = tvm::te::reduce_axis(tvm::Range{0, W->shape[1]}, "kw"); auto T = (pad_h == 0 && pad_w == 0) ? I : pad(I, {pad_h, pad_w}); auto l = [&](tvm::tir::Var b, tvm::tir::Var o, tvm::tir::Var h, tvm::tir::Var w) { return tvm::sum(T(stride_h * h + kh, stride_w * w + kw, i, b) * W(kh, kw, i, o), {i, kh, kw}); }; return tvm::te::compute(output_shape, l, name, tag); } /*! * \brief Creates an operation that performs a 2-D depthwise convolution with * an NCHW-layout * * \param I The 4-D input tensor * \param W The 4-D weight tensor * \param pad_h A static constant padding amount applied to the height of the * image, before and after (symmetric padding) * \param pad_w A static constant padding amount applied to the width of the * image, before and after (symmetric padding) * \param stride_h A static constant striding amount applied to the height of * the image, before and after (symmetric padding) * \param stride_w A static constant strindingamount applied to the width of * the image, before and after (symmetric padding) * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the 2-D depthwise convolution operation * (NCHW layout) */ inline tvm::te::Tensor depthwise_conv2d_nchw(const tvm::te::Tensor& I, const tvm::te::Tensor& W, int pad_h = 0, int pad_w = 0, int stride_h = 1, int stride_w = 1, std::string name = "T_depthwise_conv2d_nchw", std::string tag = kDepthwiseConv2dNCHW) { ICHECK_EQ(4, I->shape.size()); ICHECK_EQ(4, W->shape.size()); auto pH = I->shape[2]; auto pW = I->shape[3]; auto pCM = W->shape[1]; // channel_multiplier tvm::Array<tvm::PrimExpr> output_shape{ I->shape[0], // B W->shape[1], // O indexdiv(I->shape[2] - W->shape[2] + 2 * pad_h, stride_h) + 1, // H indexdiv(I->shape[3] - W->shape[3] + 2 * pad_w, stride_w) + 1 // W }; auto i = tvm::te::reduce_axis(tvm::Range{0, I->shape[1]}, "i"); auto kh = tvm::te::reduce_axis(tvm::Range{0, W->shape[2]}, "kh"); auto kw = tvm::te::reduce_axis(tvm::Range{0, W->shape[3]}, "kw"); auto T = (pad_h == 0 && pad_w == 0) ? I : pad(I, {tvm::PrimExpr(0), tvm::PrimExpr(0), pad_h, pad_w}); auto l = [&](tvm::tir::Var b, tvm::tir::Var o, tvm::tir::Var h, tvm::tir::Var w) { return tvm::sum(T(b, indexdiv(i, pCM), stride_h * h + kh, stride_w * w + kw) * W(indexdiv(i, pCM), indexmod(o, pCM), kh, kw), {i, kh, kw}); }; return tvm::te::compute(output_shape, l, name, tag); } inline tvm::te::Tensor depthwise_conv2d_nhwc(const tvm::te::Tensor& I, const tvm::te::Tensor& W, int pad_h = 0, int pad_w = 0, int stride_h = 1, int stride_w = 1, std::string name = "T_depthwise_conv2d_nhwc", std::string tag = kDepthwiseConv2dNHWC) { ICHECK_EQ(4, I->shape.size()); ICHECK_EQ(4, W->shape.size()); auto pH = I->shape[1]; auto pW = I->shape[2]; auto pCM = W->shape[1]; // channel_multiplier tvm::Array<tvm::PrimExpr> output_shape{ I->shape[0], // B indexdiv(I->shape[1] - W->shape[1] + 2 * pad_h, stride_h) + 1, // H indexdiv(I->shape[2] - W->shape[2] + 2 * pad_w, stride_w) + 1, // W W->shape[3], // O }; auto i = tvm::te::reduce_axis(tvm::Range{0, I->shape[3]}, "i"); auto kh = tvm::te::reduce_axis(tvm::Range{0, W->shape[0]}, "kh"); auto kw = tvm::te::reduce_axis(tvm::Range{0, W->shape[1]}, "kw"); auto T = (pad_h == 0 && pad_w == 0) ? I : pad(I, {tvm::PrimExpr(0), pad_h, pad_w, tvm::PrimExpr(0)}); auto l = [&](tvm::tir::Var b, tvm::tir::Var h, tvm::tir::Var w, tvm::tir::Var o) { return tvm::sum(T(b, stride_h * h + kh, stride_w * w + kw, indexdiv(i, pCM)) * W(kh, kw, indexdiv(i, pCM), indexmod(o, pCM)), {kh, kw, i}); }; return tvm::te::compute(output_shape, l, name, tag); } /*! * \brief Creates an operation that performs a 2-D group convolution with * an NGCHW-layout * * \param I The 5-D input tensor * \param W The 5-D weight tensor * \param pad_h A static constant padding amount applied to the height of the * image, before and after (symmetric padding) * \param pad_w A static constant padding amount applied to the width of the * image, before and after (symmetric padding) * \param stride_h A static constant striding amount applied to the height of * the image, before and after (symmetric padding) * \param stride_w A static constant strindingamount applied to the width of * the image, before and after (symmetric padding) * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the 2-D groupconvolution operation * (NCHW layout) */ inline tvm::te::Tensor group_conv2d_ngchw(const tvm::te::Tensor& I, const tvm::te::Tensor& W, int pad_h = 0, int pad_w = 0, int stride_h = 1, int stride_w = 1, std::string name = "T_group_conv2d_ngchw", std::string tag = kGroupConv2d) { ICHECK_EQ(5, I->shape.size()); ICHECK_EQ(5, W->shape.size()); auto pH = I->shape[2]; auto pW = I->shape[3]; tvm::Array<tvm::PrimExpr> output_shape{ I->shape[0], // B I->shape[1], // G W->shape[2], // O indexdiv(I->shape[3] - W->shape[3] + 2 * pad_h, stride_h) + 1, // H indexdiv(I->shape[4] - W->shape[4] + 2 * pad_w, stride_w) + 1 // W }; auto i = tvm::te::reduce_axis(tvm::Range{0, I->shape[2]}, "i"); auto kh = tvm::te::reduce_axis(tvm::Range{0, W->shape[3]}, "kh"); auto kw = tvm::te::reduce_axis(tvm::Range{0, W->shape[4]}, "kw"); auto T = (pad_h == 0 && pad_w == 0) ? I : pad(I, {tvm::PrimExpr(0), tvm::PrimExpr(0), tvm::PrimExpr(0), pad_h, pad_w}); auto l = [&](tvm::Array<tvm::tir::Var> args) { tvm::tir::Var b = args[0]; tvm::tir::Var g = args[1]; tvm::tir::Var o = args[2]; tvm::tir::Var h = args[3]; tvm::tir::Var w = args[4]; return tvm::sum(I(b, g, i, stride_h * h + kh, stride_w * w + kw) * W(g, i, o, kh, kw), {i, kh, kw}); }; return tvm::te::compute(output_shape, l, name, tag); } /*! * \brief Divide spatial dimensions of the input into a grid of blocks. * * \param data The input tensor. * \param block_shape The size of the spatial block. * \param pad_before The zero-padding size before each spatial dimension. * \param pad_after The zero-padding size after each spatial dimension. * \param pad_value The value used for padding. * \param name The name of the operation. * \param tag The tag to mark the operation. * * \return A Tensor whose op member is the space_to_batch_nd operation */ inline tvm::te::Tensor space_to_batch_nd(const tvm::te::Tensor& data, const tvm::Array<Integer>& block_shape, const tvm::Array<tvm::PrimExpr>& pad_before, const tvm::Array<tvm::PrimExpr>& pad_after, PrimExpr pad_value = PrimExpr(), std::string name = "space_to_batch_nd", std::string tag = kInjective) { tvm::te::Tensor padded_t; CHECK_EQ(pad_before.size(), pad_after.size()); CHECK_EQ(block_shape.size(), pad_before.size()) << "Paddings must be provided for each spatial dimension"; tvm::Array<tvm::PrimExpr> pad_before_int32; tvm::Array<tvm::PrimExpr> pad_after_int32; // pad size for batch dimension is 0 pad_before_int32.push_back(tvm::cast(tvm::DataType::Int(32), 0)); pad_after_int32.push_back(tvm::cast(tvm::DataType::Int(32), 0)); // insert pad sizes given for spatial dimensions for (const auto& ele : pad_before) { pad_before_int32.push_back(tvm::cast(tvm::DataType::Int(32), ele)); } for (const auto& ele : pad_after) { pad_after_int32.push_back(tvm::cast(tvm::DataType::Int(32), ele)); } // pad the input with paddings provided if (!pad_value.defined()) { pad_value = tvm::tir::make_const(data->dtype, 0); } padded_t = pad(data, pad_before_int32, pad_after_int32, pad_value); auto input_shape = data->shape; auto padded_shape = padded_t->shape; // infer shapes tvm::Array<PrimExpr> r_shape; tvm::Array<Integer> axis; tvm::Array<PrimExpr> o_shape; size_t num_block_dims = block_shape.size(); int batch = static_cast<int>(GetConstInt(input_shape[0])); tvm::PrimExpr block_shape_prod(1); r_shape.push_back(batch); for (size_t i = 1; i <= num_block_dims; i++) { int padded_input = static_cast<int>(GetConstInt(padded_shape[i])); int block_size = static_cast<int>(GetConstInt(block_shape[i - 1])); CHECK_EQ((padded_input % block_size), 0) << "(" << i << ")th " "Input dimension after padding (" << padded_input << ")" << " must be divisible by its block size (" << block_size << ")"; r_shape.push_back(div(padded_shape[i], block_shape[i - 1])); r_shape.push_back(block_shape[i - 1]); block_shape_prod *= block_shape[i - 1]; axis.push_back(Integer(r_shape.size() - 1)); // index of block_shape[i - 1] } size_t n = axis.size(); axis.push_back(0); // batch is at index 0 // index of (padded_shape[i] / block_shape[i - 1]) in r_shape for (size_t i = 0; i < n; i++) { axis.push_back(static_cast<int>(GetConstInt(axis[i] - 1))); } o_shape.push_back(tvm::PrimExpr(batch) * block_shape_prod); for (size_t i = 1; i <= num_block_dims; i++) { o_shape.push_back(div(padded_shape[i], block_shape[i - 1])); } // append remaining shape for (size_t i = num_block_dims + 1; i < input_shape.size(); i++) { r_shape.push_back(input_shape[i]); axis.push_back(Integer(r_shape.size() - 1)); // index of remaining shape in r_shape o_shape.push_back(input_shape[i]); } tvm::te::Tensor output = reshape(padded_t, r_shape); output = transpose(output, axis); output = reshape(output, o_shape); return output; } /*! * \brief Reshape the batch dimension into spatial dimensions. * * \param data The input tensor. * \param block_shape The size of the spatial block. * \param crop_begin_list The begin crop size for each spatial dimension. * \param crop_end_list The end crop size for each spatial dimension. * \param name The name of the operation. * \param tag The tag to mark the operation. * * \return A Tensor whose op member is the batch_to_space_nd operation */ inline tvm::te::Tensor batch_to_space_nd(const tvm::te::Tensor& data, const tvm::Array<Integer>& block_shape, const tvm::Array<tvm::PrimExpr>& crop_begin_list, const tvm::Array<tvm::PrimExpr>& crop_end_list, std::string name = "batch_to_space_nd", std::string tag = kInjective) { // Construct shapes for reshape and transpose operation Array<PrimExpr> in_shape = data->shape; Array<PrimExpr> r_shape; Array<Integer> axis; size_t num_block_dims = block_shape.size(); size_t num_input_dims = in_shape.size(); tvm::PrimExpr block_shape_prod(1); int batch = static_cast<int>(GetConstInt(in_shape[0])); for (size_t i = 0; i < num_block_dims; i++) { r_shape.push_back(block_shape[i]); block_shape_prod *= block_shape[i]; } axis.push_back(Integer(r_shape.size())); // axis of (batch / block_shape_prod) r_shape.push_back(batch / block_shape_prod); for (size_t i = 1; i < num_input_dims; i++) { axis.push_back(Integer(r_shape.size())); // axis of in_shape[i] if (axis.size() < (num_block_dims + num_input_dims)) { axis.push_back(Integer(r_shape.size() - (num_block_dims + 1))); // axis of block_shape[i] } r_shape.push_back(in_shape[i]); } Array<PrimExpr> r_p_shape; r_p_shape.push_back(batch / block_shape_prod); for (size_t i = 1; i <= num_block_dims; i++) { r_p_shape.push_back(in_shape[i] * block_shape[i - 1]); } for (size_t i = num_block_dims + 1; i < num_input_dims; i++) { r_p_shape.push_back(in_shape[i]); } tvm::te::Tensor out; out = reshape(data, r_shape); out = transpose(out, axis); out = reshape(out, r_p_shape); // Crop the start and end of dimensions of out Array<Integer> begin_idx, end_idx, strides; for (size_t i = 0; i < r_p_shape.size(); ++i) { strides.push_back(Integer(1)); if (i > 0 && i <= num_block_dims) { // prepare begin and end index for spatial dimensions int begin_i = static_cast<int>(GetConstInt(crop_begin_list[i - 1])); int end_i = static_cast<int>(GetConstInt(crop_end_list[i - 1])); int out_i = static_cast<int>(GetConstInt(r_p_shape[i])); CHECK_GT(out_i, (begin_i + end_i)) << "Incorrect crop sizes for (" << i << ")th dim, can not crop more than" << " output size" << out_i << " vs " << (begin_i + end_i); begin_idx.push_back(begin_i); end_idx.push_back(out_i - end_i); } else { // ignore the batch and remaining dimension begin_idx.push_back(Integer(0)); end_idx.push_back(static_cast<int>(GetConstInt(r_p_shape[i]))); } } out = strided_slice(out, begin_idx, end_idx, strides); return out; } /*! * \brief Negative log likelihood loss. * * \param predictions The prediction tensor. * \param targets The target tensor. * \param weights A manual rescaling weight given to each class. * \param reduction The reduction method to apply to the output. * \param ignore_index The target value to ignore. * \param name The name of the operation. * \param tag The tag to mark the operation. * * \return The negative log likelihood loss of the predictions and targets. */ inline Tensor nll_loss(const Tensor& predictions, const Tensor& targets, const Tensor& weights, std::string reduction = "mean", int ignore_index = -100, const std::string name = "nll_loss", const std::string tag = kBroadcast) { auto T = tvm::te::compute( targets->shape, [&](const tvm::Array<tvm::tir::Var>& target_indices) { auto c = targets(target_indices); tvm::Array<tvm::PrimExpr> pred_indices; pred_indices.push_back(target_indices[0]); // batch index pred_indices.push_back(c); // class index for (size_t i = 1; i < target_indices.size(); i++) { pred_indices.push_back(target_indices[i]); // indices for multidimensional loss } return tvm::tir::Select(c != ignore_index, -predictions(pred_indices) * weights(c), tvm::tir::make_const(predictions->dtype, 0)); }, name, tag); if (reduction == "mean") { auto W = tvm::te::compute( targets->shape, [&](const tvm::Array<tvm::tir::Var>& target_indices) { auto c = targets(target_indices); return tvm::tir::Select(c != ignore_index, weights(c), tvm::tir::make_const(predictions->dtype, 0)); }, name, tag); return topi::divide(topi::sum(T, {}), topi::sum(W, {})); } else if (reduction == "sum") { return topi::sum(T, {}); } else { // reduction == "none" return T; } } } // namespace topi } // namespace tvm #endif // TVM_TOPI_NN_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/nn/bias_add.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \brief bias_add op constructions * \file nn/bias_add.h */ #ifndef TVM_TOPI_NN_BIAS_ADD_H_ #define TVM_TOPI_NN_BIAS_ADD_H_ #include <tvm/te/operation.h> #include <tvm/topi/broadcast.h> #include <tvm/topi/tags.h> #include <tvm/topi/transform.h> #include <string> namespace tvm { namespace topi { namespace nn { /*! * \brief Creates an operation that calculates data + bias * * \param data Tensor with shape [batch, in_dim] * \param bias Tensor with shape [batch]. * \param axis The axis to add the bias to. * \return Tensor with shape [batch, in_dim] */ inline tvm::te::Tensor bias_add(const tvm::te::Tensor& data, const tvm::te::Tensor& bias, int axis) { int data_ndim = data->shape.size(); if (axis < 0) { axis += data_ndim; } int num_newaxis = data_ndim - axis - 1; return add(data, (num_newaxis ? expand_dims(bias, 1, num_newaxis) : bias)); } } // namespace nn } // namespace topi } // namespace tvm #endif // TVM_TOPI_NN_BIAS_ADD_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/nn/bnn.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \brief Binary op constructions * \file nn/bnn.h */ #ifndef TVM_TOPI_NN_BNN_H_ #define TVM_TOPI_NN_BNN_H_ #include <tvm/arith/analyzer.h> #include <tvm/te/operation.h> #include <tvm/topi/detail/constant_utils.h> #include <tvm/topi/tags.h> #include <string> namespace tvm { namespace topi { namespace nn { using namespace tvm::te; /*! * \brief Binarization and bit-packing along a certain axis. * * \param data N-D tensor, can be any layout * \param axis The axis along which to do binarization and bit-packing. This axis * must have a size equal to an integer multiple of 32. * \param name The name of the operation * \param tag The tag to mark the operation * * \return Output tensor with dtype uint32 */ inline tvm::te::Tensor binarize_pack(const tvm::te::Tensor& data, int axis, std::string name = "PackedInput", std::string tag = "binarize_pack") { auto ishape = data->shape; ICHECK_EQ(GetConstInt(ishape[axis]) % 32, 0) << "binarize_pack: axis size must be a multiple of 32"; arith::Analyzer analyzer; auto n = ishape.size(); Array<PrimExpr> oshape; for (size_t i = 0; i < n; ++i) { oshape.push_back(i == static_cast<size_t>(axis) ? analyzer.Simplify(indexdiv(ishape[i], 32)) : ishape[i]); } return tvm::te::compute( oshape, [&](const Array<Var>& indices) { Array<PrimExpr> start_idx; for (size_t i = 0; i < n; ++i) { start_idx.push_back(i == static_cast<size_t>(axis) ? indices[i] * 32 : static_cast<PrimExpr>(indices[i])); } auto packed = make_const(DataType::UInt(32), 0); for (size_t j = 0; j < 32; ++j) { Array<PrimExpr> idx; for (size_t i = 0; i < n; ++i) { idx.push_back(i == static_cast<size_t>(axis) ? start_idx[i] + static_cast<int>(j) : start_idx[i]); } auto sign = tvm::cast(DataType::UInt(32), data(idx) >= 0); packed = (packed | sign); if (j == 31) { return packed; } packed = packed << 1; } return packed; // never reached, but suppress compiler warning }, name, tag); } /*! * \brief Binary matrix multiplication using xor and bit-count * * \param data Tensor with shape [batch, in_dim], dtype is uint32 * \param weight Tensor with shape [out_dim, in_dim], dtype is uint32 * * \return Tensor with shape [batch, out_dim], dtype is float32 */ inline tvm::te::Tensor binary_dense(const tvm::te::Tensor& data, const tvm::te::Tensor& weight) { ICHECK_EQ(data->shape.size(), 2) << "binary_dense requires 2-D data"; ICHECK_EQ(weight->shape.size(), 2) << "binary_dense requires 2-D weight"; ICHECK_EQ(data->dtype, DataType::UInt(32)) << "binary_dense requires uint32 data"; ICHECK_EQ(weight->dtype, DataType::UInt(32)) << "binary_dense requires uint32 weight"; auto batch = data->shape[0]; auto in_dim = data->shape[1]; auto out_dim = weight->shape[0]; auto k = tvm::te::reduce_axis(Range(0, in_dim), "k"); auto matmul = tvm::te::compute( {batch, out_dim}, [&](Var i, Var j) { return tvm::sum(popcount(data(i, k) ^ weight(j, k)), {k}); }, "tensor", "binary_dense"); return tvm::te::compute( {batch, out_dim}, [&](Var i, Var j) { return 32 * in_dim - 2.0f * matmul(i, j); }, "tensor", kElementWise); } } // namespace nn } // namespace topi } // namespace tvm #endif // TVM_TOPI_NN_BNN_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/nn/dense.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \brief Dense op constructions * \file nn/dense.h */ #ifndef TVM_TOPI_NN_DENSE_H_ #define TVM_TOPI_NN_DENSE_H_ #include <tvm/te/operation.h> #include <tvm/topi/tags.h> #include <string> namespace tvm { namespace topi { namespace nn { using namespace tvm::te; /*! * \brief Creates an operation that calculates data * weight^T + bias * * \param data Tensor with shape [batch, in_dim] * \param weight Tensor with shape [out_dim, in_dim] * \param bias Tensor with shape [out_dim]. Optional; to omit bias, pass Tensor() * \param out_dtype Output data type. Used for mixed precision. * * \return Tensor with shape [batch, out_dim] */ inline tvm::te::Tensor dense(const tvm::te::Tensor& data, const tvm::te::Tensor& weight, const tvm::te::Tensor& bias, const DataType& out_dtype) { ICHECK_EQ(data->shape.size(), 2) << "dense requires 2-D data"; ICHECK_EQ(weight->shape.size(), 2) << "dense requires 2-D weight"; if (bias.defined()) { ICHECK_EQ(bias->shape.size(), 1) << "dense requires 1-D bias"; } auto batch = data->shape[0]; auto in_dim = data->shape[1]; auto out_dim = weight->shape[0]; auto k = tvm::te::reduce_axis(Range(0, in_dim), "k"); auto matmul = tvm::te::compute( {batch, out_dim}, [&](Var i, Var j) { return tvm::sum(tvm::cast(out_dtype, data(i, k)) * tvm::cast(out_dtype, weight(j, k)), {k}); }, "tensor", "dense"); if (bias.defined()) { matmul = tvm::te::compute( {batch, out_dim}, [&](Var i, Var j) { return matmul(i, j) + tvm::cast(out_dtype, bias(j)); }, "tensor", kBroadcast); } return matmul; } } // namespace nn } // namespace topi } // namespace tvm #endif // TVM_TOPI_NN_DENSE_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/nn/dilate.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \brief Dilate op constructions * \file nn/dilate.h */ #ifndef TVM_TOPI_NN_DILATE_H_ #define TVM_TOPI_NN_DILATE_H_ #include <tvm/arith/analyzer.h> #include <tvm/te/operation.h> #include <tvm/topi/tags.h> #include <string> namespace tvm { namespace topi { namespace nn { using namespace tvm::te; /*! * \brief Create a new expression of the logical and of all * conditions in the arguments. * * \param args The arguments to find the logical conjunction of * * \return The logical conjunction expression */ PrimExpr all(Array<PrimExpr> args) { ICHECK_GT(args.size(), 0) << "all requires at least one argument"; PrimExpr ret = args[0]; for (size_t i = 1; i < args.size(); ++i) { ret = ret && args[i]; } return ret; } /*! * \brief Dilate data with given dilation value (0 by default). * * \param x The input tensor, this can have any number of * dimensions and any layout. * \param strides Dilation stride for each dimension. Stride 1 * means no dilation. * \param dilation_value Value used to dilate the input. * \param name The name of the operation * \param tag The tag to mark the operation * * \return The output tensor. */ inline Tensor dilate(const Tensor& x, Array<PrimExpr> strides, double dilation_value, std::string name = "tensor", std::string tag = kInjective) { auto n = x->shape.size(); ICHECK_EQ(n, strides.size()) << "strides size (" << strides.size() << ") must match dimension of x (" << n << ")"; Array<PrimExpr> out_shape; arith::Analyzer analyzer; for (size_t i = 0; i < n; ++i) { out_shape.push_back( analyzer.Simplify((x->shape[i] - 1) * cast(DataType::Int(32), strides[i] + 1))); } return tvm::te::compute( out_shape, [&](const Array<Var>& indices) { Array<PrimExpr> not_zero; Array<PrimExpr> index_tuple; for (size_t i = 0; i < n; ++i) { if (IsConstInt(strides[i]) && GetConstInt(strides[i]) == 1) { index_tuple.push_back(indices[i]); } else { index_tuple.push_back(indexdiv(indices[i], strides[i])); not_zero.push_back((indexmod(indices[i], strides[i])) == 0); } } if (not_zero.size() > 0) { auto all_not_zero = all(not_zero); return tvm::if_then_else(all_not_zero, x(index_tuple), make_const(x->dtype, dilation_value)); } return x(index_tuple); }, name, tag); } } // namespace nn } // namespace topi } // namespace tvm #endif // TVM_TOPI_NN_DILATE_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/nn/flatten.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \brief Softmax op constructions * \file nn/flatten.h */ #ifndef TVM_TOPI_NN_FLATTEN_H_ #define TVM_TOPI_NN_FLATTEN_H_ #include <tvm/te/operation.h> #include <tvm/topi/detail/constant_utils.h> #include <tvm/topi/tags.h> #include <string> #include <vector> namespace tvm { namespace topi { namespace nn { using namespace tvm::te; /*! * \brief Flattens the input tensor into a 2-D tensor by collapsing higher dimensions. * This requires the input tensor to have constant sized dimensions. * * \param x The input tensor. * \param name The name of the operation * \param tag The tag to mark the operation * * \return A 2-D tensor. */ inline Tensor flatten(const Tensor& x, std::string name = "tensor", std::string tag = kInjective) { auto ishape = x->shape; PrimExpr dim = 1; for (size_t i = 1; i < ishape.size(); ++i) { dim = dim * ishape[i]; } Array<PrimExpr> oshape({ishape[0], dim}); std::vector<PrimExpr> extra_shape; for (size_t i = 1; i < ishape.size(); ++i) { extra_shape.push_back(ishape[i]); } std::reverse(extra_shape.begin(), extra_shape.end()); return tvm::te::compute( oshape, [&](Var i, Var j) { PrimExpr idx = j; std::vector<PrimExpr> index; for (auto s : extra_shape) { index.push_back(indexmod(idx, s)); idx = indexdiv(idx, s); } index.push_back(i); std::reverse(index.begin(), index.end()); return x(index); }, name, tag); } } // namespace nn } // namespace topi } // namespace tvm #endif // TVM_TOPI_NN_FLATTEN_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/nn/layer_norm.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \brief layer normalization op constructions * \file nn/layer_norm.h */ #ifndef TVM_TOPI_NN_LAYER_NORM_H_ #define TVM_TOPI_NN_LAYER_NORM_H_ #include <tvm/te/operation.h> #include <tvm/topi/tags.h> #include <string> namespace tvm { namespace topi { namespace nn { using namespace tvm::te; /*! * \brief Layer normalization. * \param data N-D tensor with shape [d_0, d_1, ..., d_{N-1}] * \param gamma K-D tensor with shape [r_0, r_1, ..., r_{K-1}] where K == len(axis) and * d_{axis_k} == r_k * \param beta Optional, K-D tensor with shape [r_0, r_1, ..., r_{K-1}] where * d_{axis_k} == r_k * \param axis The axis to normalize over. * \param epsilon The epsilon value to avoid division by zero. * \param name The name of the operation. * \param tag The tag to mark the operation. * \return The normalized tensor, with the same shape as data. */ inline Tensor layer_norm(const Tensor& data, const Tensor& gamma, const Tensor& beta, const Array<Integer>& axis, double epsilon, std::string name = "T_layer_norm", std::string tag = kInjective) { // sum x and x^2 auto ndim = data->shape.size(); ICHECK_NE(ndim, 0) << "Cannot reduce a 0 dim Tensor"; auto real_axis = GetRealAxis(static_cast<int>(ndim), axis); auto reduce_axes = MakeReduceAxes(real_axis, data); auto target_shape = MakeReduceTargetShape(real_axis, data, /*keepdims=*/false, /*atleast1d=*/true); auto func = MakeTupleSumReducer(); auto compute = [ndim, &real_axis, &reduce_axes, &func, &data](const Array<Var>& indices) { Array<PrimExpr> eval_range; int arg_counter = 0; int red_counter = 0; for (size_t i = 0; i < ndim; ++i) { if (std::find(real_axis.begin(), real_axis.end(), i) != real_axis.end()) { // real_axis contains i eval_range.push_back(reduce_axes[red_counter]); red_counter++; } else { eval_range.push_back(indices[arg_counter]); arg_counter++; } } auto square = [](const PrimExpr& x) { return x * x; }; return func({data(eval_range), square(data(eval_range))}, reduce_axes, nullptr); }; auto temp_x_x2 = tvm::te::compute(target_shape, compute, data->op->name + "_red_temp", kCommReduce); auto temp_x = temp_x_x2[0]; auto temp_x2 = temp_x_x2[1]; auto reduce_extent = make_const(data->dtype, 1); for (int i : real_axis) { reduce_extent *= data->shape[i]; } auto layer_norm_func = [&](const Array<Var>& indices) { Array<Var> reduce_indices, non_reduce_indices; for (int i = 0, n = static_cast<int>(indices.size()); i < n; ++i) { if (std::find(real_axis.begin(), real_axis.end(), i) != real_axis.end()) { reduce_indices.push_back(indices[i]); } else { non_reduce_indices.push_back(indices[i]); } } auto mean = temp_x(non_reduce_indices) / reduce_extent; auto var = temp_x2(non_reduce_indices) / reduce_extent - mean * mean; auto layer_norm = (data(indices) - mean) * tvm::rsqrt(var + make_const(var->dtype, epsilon)); layer_norm = topi::multiply(layer_norm, gamma(reduce_indices)); if (beta.defined()) { layer_norm = topi::add(layer_norm, beta(reduce_indices)); } return layer_norm; }; return tvm::te::compute(data->shape, layer_norm_func, name, tag); } } // namespace nn } // namespace topi } // namespace tvm #endif // TVM_TOPI_NN_LAYER_NORM_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/nn/local_response_norm.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \brief local response normalization op constructions * \file nn/local_response_norm.h */ #ifndef TVM_TOPI_NN_LOCAL_RESPONSE_NORM_H_ #define TVM_TOPI_NN_LOCAL_RESPONSE_NORM_H_ #include <tvm/te/operation.h> #include <tvm/topi/tags.h> #include <string> namespace tvm { namespace topi { namespace nn { using namespace tvm::te; /*! * \brief Local response normalization inference operator * * \param data The input tensor. 4-D shape NCHW or NHWC * \param size Integer to define normalisation window size * \param axis Input data layout channel axis * \param alpha Float scaling factor * \param beta Exponent value * \param bias Offset to avoid dividing by zero * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the Local response normalization operation */ inline Tensor lrn(const Tensor& data, int size, int axis = 1, float alpha = 0.0001, float beta = 0.75, float bias = 2, std::string name = "tensor", std::string tag = kBroadcast) { ICHECK_EQ(data->shape.size(), 4) << "LRN requires 4-D input"; ICHECK_EQ(size % 2, 1) << "size should be odd number"; ICHECK(axis == 1 || axis == 3) << "axis should be 1 or 3 for NCHW and NHWC"; ICHECK(data->dtype.is_float()) << "datatype should be float"; auto input_shape = data->shape; Array<PrimExpr> pad_before{0, 0, 0, 0}; Array<PrimExpr> pad_after{0, 0, 0, 0}; pad_before.Set(axis, static_cast<PrimExpr>(size / 2)); pad_after.Set(axis, static_cast<PrimExpr>(size / 2)); auto pad_data = pad(data, pad_before, pad_after, 0, "pad_data"); auto rxs = tvm::te::reduce_axis(Range(0, size), "rxs"); Tensor sqr_sum; if (axis == 1) { sqr_sum = tvm::te::compute( input_shape, [&](Var i, Var l, Var j, Var k) { return tvm::sum(pad_data(i, l + rxs, j, k) * pad_data(i, l + rxs, j, k), {rxs}); }, "tensor", "sqr_sum"); } else if (axis == 3) { sqr_sum = tvm::te::compute( input_shape, [&](Var i, Var l, Var j, Var k) { return tvm::sum(pad_data(i, l, j, k + rxs) * pad_data(i, l, j, k + rxs), {rxs}); }, "tensor", "sqr_sum"); } PrimExpr alpha_imm = tvm::te::make_const(data->dtype, alpha); PrimExpr beta_imm = tvm::te::make_const(data->dtype, beta); PrimExpr bias_imm = tvm::te::make_const(data->dtype, bias); auto sqrt_sum_up = tvm::te::compute( input_shape, [&](Var i, Var j, Var k, Var l) { return tvm::pow(bias_imm + (div(alpha_imm * sqr_sum(i, j, k, l), size)), beta_imm); }, "tensor", kElementWise); return topi::divide(data, sqrt_sum_up); } } // namespace nn } // namespace topi } // namespace tvm #endif // TVM_TOPI_NN_LOCAL_RESPONSE_NORM_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/nn/mapping.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \brief Mapping op constructions * \file nn/mapping.h */ #ifndef TVM_TOPI_NN_MAPPING_H_ #define TVM_TOPI_NN_MAPPING_H_ #include <tvm/te/operation.h> #include <tvm/topi/tags.h> #include <string> namespace tvm { namespace topi { namespace nn { using namespace tvm::te; /*! * \brief Scale and shift with NCHW order * * \param x The input tensor. * \param scale Scale tensor, 1-D of size channel * \param shift Shift tensor, 1-D of size channel * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the scale shift operation */ inline Tensor scale_shift_nchw(const Tensor& x, const Tensor& scale, const Tensor& shift, std::string name = "ScaleShift", std::string tag = kBroadcast) { return tvm::te::compute( x->shape, [&](Var b, Var c, Var h, Var w) { return x(b, c, h, w) * scale(c) + shift(c); }, name, tag); } /*! * \brief Scale and shift with NHWC order * * \param x The input tensor. * \param scale Scale tensor, 1-D of size channel * \param shift Shift tensor, 1-D of size channel * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the scale shift operation */ inline Tensor scale_shift_nhwc(const Tensor& x, const Tensor& scale, const Tensor& shift, std::string name = "ScaleShift", std::string tag = kBroadcast) { return tvm::te::compute( x->shape, [&](Var b, Var h, Var w, Var c) { return x(b, h, w, c) * scale(c) + shift(c); }, name, tag); } } // namespace nn } // namespace topi } // namespace tvm #endif // TVM_TOPI_NN_MAPPING_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/nn/pooling.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \brief Pooling op constructions * \file nn/pooling.h */ #ifndef TVM_TOPI_NN_POOLING_H_ #define TVM_TOPI_NN_POOLING_H_ #include <tvm/arith/analyzer.h> #include <tvm/topi/detail/pad_utils.h> #include <tvm/topi/nn.h> #include <tvm/topi/reduction.h> #include <tvm/topi/tags.h> #include <algorithm> #include <string> #include <vector> namespace tvm { namespace topi { namespace nn { using namespace tvm::te; /*! \brief Pooling type */ enum PoolType : int { kAvgPool, kMaxPool, }; inline Tensor pool_grad_impl(const Tensor& out_grad, const Tensor& x, const Array<PrimExpr>& kernel_size, const Array<PrimExpr>& stride_size, const Array<PrimExpr>& padding_size, PoolType pool_type, bool ceil_mode, const size_t height_axis, const size_t width_axis, bool count_include_pad) { ICHECK(out_grad->shape.size() >= 2) << "Pooling grad output must >= 2-D (H, W)"; ICHECK(x->shape.size() >= 2) << "Pooling input must >= 2-D (H, W)"; ICHECK_EQ(kernel_size.size(), 2) << "Pooling kernel_size must have 2 elements"; ICHECK_EQ(stride_size.size(), 2) << "Pooling stride_size must have 2 elements"; ICHECK_EQ(padding_size.size(), 4) << "Pooling padding_size must have 4 elements"; auto kernel_height = cast(DataType::DataType::Int(32), kernel_size[0]); auto kernel_width = cast(DataType::DataType::Int(32), kernel_size[1]); auto stride_height = cast(DataType::DataType::Int(32), stride_size[0]); auto stride_width = cast(DataType::DataType::Int(32), stride_size[1]); auto height = cast(DataType::DataType::Int(32), x->shape[height_axis]); auto width = cast(DataType::DataType::Int(32), x->shape[width_axis]); auto pad_top = cast(DataType::DataType::Int(32), padding_size[0]); auto pad_left = cast(DataType::DataType::Int(32), padding_size[1]); auto pad_bottom = cast(DataType::DataType::Int(32), padding_size[2]); auto pad_right = cast(DataType::DataType::Int(32), padding_size[3]); if (ceil_mode) { // Additional padding to ensure we do ceil instead of floor when // dividing by stride. pad_bottom += stride_height - 1; pad_right += stride_width - 1; } Array<PrimExpr> pad_before(std::vector<PrimExpr>(x->shape.size(), 0)); pad_before.Set(height_axis, pad_top); pad_before.Set(width_axis, pad_left); Array<PrimExpr> pad_after(std::vector<PrimExpr>(x->shape.size(), 0)); pad_after.Set(height_axis, pad_bottom); pad_after.Set(width_axis, pad_right); arith::Analyzer analyzer; auto out_height = analyzer.Simplify((height - kernel_height + pad_top + pad_bottom) / stride_height + 1); auto out_width = analyzer.Simplify((width - kernel_width + pad_left + pad_right) / stride_width + 1); auto dheight = tvm::te::reduce_axis(Range(0, kernel_height), "dh"); auto dwidth = tvm::te::reduce_axis(Range(0, kernel_width), "dw"); Array<PrimExpr> data_shape = x->shape; for (size_t i = 0; i < data_shape.size(); ++i) { data_shape.Set(i, cast(DataType::DataType::Int(32), data_shape[i])); } Array<PrimExpr> out_shape = data_shape; out_shape.Set(height_axis, out_height); out_shape.Set(width_axis, out_width); const int64_t* padding_h0 = as_const_int(pad_top); const int64_t* padding_w0 = as_const_int(pad_left); const int64_t* padding_h1 = as_const_int(pad_bottom); const int64_t* padding_w1 = as_const_int(pad_right); const bool do_pad = ((padding_h0 && *padding_h0) || (padding_w0 && *padding_w0)) || ((padding_h1 && *padding_h1) || (padding_w1 && *padding_w1)); if (pool_type == kMaxPool) { Array<PrimExpr> ravel_shape{data_shape.begin(), data_shape.end()}; ravel_shape.Set(height_axis, ravel_shape[height_axis] + pad_top + pad_bottom); ravel_shape.Set(width_axis, ravel_shape[width_axis] + pad_left + pad_right); auto windowh = tvm::te::reduce_axis(Range(0, (kernel_height + stride_height - 1) / stride_height), "wh"); auto windoww = tvm::te::reduce_axis(Range(0, (kernel_width + stride_width - 1) / stride_width), "ww"); auto argmax = MakeArgmaxReducer(); auto pad_x = do_pad ? pad(x, pad_before, pad_after, tvm::min_value(x->dtype), "pad_temp") : x; auto mp_argmax = tvm::te::compute( out_shape, [&](const Array<Var>& inds) { Array<PrimExpr> window_inds{inds.begin(), inds.end()}; window_inds.Set(height_axis, inds[height_axis] * stride_height + dheight); window_inds.Set(width_axis, inds[width_axis] * stride_width + dwidth); auto idx = detail::RavelIndex(window_inds, ravel_shape); return argmax({idx, pad_x(window_inds)}, {dheight, dwidth}, nullptr); }, "maxpool_grad_argmax", kCommReduceIdx); auto mp_inds = mp_argmax[0]; return tvm::te::compute( data_shape, [&](const Array<Var>& inds) { Array<PrimExpr> pad_inds{inds.begin(), inds.end()}; pad_inds.Set(height_axis, pad_inds[height_axis] + pad_top); pad_inds.Set(width_axis, pad_inds[width_axis] + pad_left); auto idx = detail::RavelIndex(pad_inds, ravel_shape); Array<PrimExpr> out_idx{inds.begin(), inds.end()}; out_idx.Set(height_axis, (inds[height_axis] + pad_top) / stride_height - windowh); out_idx.Set(width_axis, (inds[width_axis] + pad_left) / stride_width - windoww); PrimExpr out_idx_lower_h = tir::Select( pad_inds[height_axis] < kernel_height, make_const(DataType::DataType::Int(32), 0), (pad_inds[height_axis] - kernel_height) / stride_height + 1); PrimExpr out_idx_lower_w = tir::Select( pad_inds[width_axis] < kernel_width, make_const(DataType::DataType::Int(32), 0), (pad_inds[width_axis] - kernel_width) / stride_width + 1); return tvm::sum( tvm::if_then_else(tir::And(tir::And(out_idx[height_axis] >= out_idx_lower_h, out_idx[width_axis] >= out_idx_lower_w), mp_inds(out_idx) == idx), out_grad(out_idx), make_const(x->dtype, 0)), {windowh, windoww}); }, "T_pool_grad", "pool_grad_max"); } else if (pool_type == kAvgPool) { auto windowh = tvm::te::reduce_axis(Range(0, (kernel_height + stride_height - 1) / stride_height), "wh"); auto windoww = tvm::te::reduce_axis(Range(0, (kernel_width + stride_width - 1) / stride_width), "ww"); return tvm::te::compute( data_shape, [&](const Array<Var>& inds) { PrimExpr pad_h_idx = inds[height_axis] + pad_top; PrimExpr pad_w_idx = inds[width_axis] + pad_left; // output indices whose pooling windows cover current input element (can be out-of-bound) Array<PrimExpr> out_idx{inds.begin(), inds.end()}; out_idx.Set(height_axis, (pad_h_idx / stride_height - windowh)); out_idx.Set(width_axis, (pad_w_idx / stride_width - windoww)); PrimExpr out_idx_lower_h = tir::Select(pad_h_idx < kernel_height, make_const(DataType::Int(32), 0), (pad_h_idx - kernel_height) / stride_height + 1); PrimExpr out_idx_lower_w = tir::Select(pad_w_idx < kernel_width, make_const(DataType::Int(32), 0), (pad_w_idx - kernel_width) / stride_width + 1); PrimExpr divide_factor; // number of pooled elements if (count_include_pad) { divide_factor = kernel_height * kernel_width; } else { PrimExpr h_start = out_idx[height_axis] * stride_height - pad_top; PrimExpr w_start = out_idx[width_axis] * stride_width - pad_left; PrimExpr h_end = min(h_start + kernel_height, height); PrimExpr w_end = min(w_start + kernel_width, width); h_start = max(h_start, make_const(DataType::Int(32), 0)); w_start = max(w_start, make_const(DataType::Int(32), 0)); divide_factor = max((h_end - h_start) * (w_end - w_start), make_const(DataType::Int(32), 1)); } return tvm::sum( tvm::if_then_else(tir::And(tir::And(out_idx[height_axis] >= out_idx_lower_h, out_idx[height_axis] < out_height), tir::And(out_idx[width_axis] >= out_idx_lower_w, out_idx[width_axis] < out_width)), out_grad(out_idx) / divide_factor, make_const(out_grad->dtype, 0)), {windowh, windoww}); }, "T_pool_grad", "pool_grad_avg"); } else { LOG(ERROR) << "Unrecognized pool_type: " << pool_type; return Tensor(); } } inline bool find_depth_height_width(const std::string& layout, int* depth_axis, int* height_axis, int* width_axis) { *depth_axis = -1; *height_axis = -1; *width_axis = -1; int curr_idx = 0; for (size_t i = 0; i < layout.size(); ++i) { if ((layout[i] >= 'A' && layout[i] <= 'Z') || (layout[i] >= 'a' && layout[i] <= 'z')) { if (layout[i] == 'D') { if (*depth_axis != -1) return false; *depth_axis = curr_idx; } else if (layout[i] == 'H') { if (*height_axis != -1) return false; *height_axis = curr_idx; } else if (layout[i] == 'W') { if (*width_axis != -1) return false; *width_axis = curr_idx; } else if (layout[i] == 'd' || layout[i] == 'h' || layout[i] == 'w') { // do not support split on height or width, e.g., NCHW16w return false; } ++curr_idx; } } if (*depth_axis == -1 || *height_axis == -1 || *width_axis == -1) return false; return true; } inline bool find_height_width(const std::string& layout, int* height_axis, int* width_axis) { int dummy; ICHECK_EQ(find_depth_height_width(layout, &dummy, height_axis, width_axis), false); if (*height_axis != -1 && *width_axis != -1) { return true; } return false; } inline bool find_width(const std::string& layout, int* width_axis) { int dummy; ICHECK_EQ(find_depth_height_width(layout, &dummy, &dummy, width_axis), false); if (*width_axis != -1) { return true; } return false; } /*! * \brief Calculate gradient of pooling on height and width dimension of data. * It decides the height and width dimension according to the layout string, * in which 'W' and 'H' means width and height respectively. * Width and height dimension cannot be split. * For example, NCHW, NCHW16c, etc. are valid for pool, * while NCHW16w, NCHW16h are not. * See \a layout for more information of the layout string convention. * \param out_grad The output gradient tensor. * \param x The input tensor. * \param kernel_size Vector of two ints: {kernel_height, kernel_width} * \param stride_size Vector of two ints: {stride_height, stride_width} * \param padding_size Vector of two ints: {padding_height, padding_width} * \param pool_type The type of pooling operator * \param ceil_mode Whether to use ceil when calculating the output size * \param layout The input layout. Pooling supports any layout as long as 'H' and 'W' appear. * The layout is supposed to be composed of upper cases, lower cases and (optional) numbers, * where upper case indicates a dimension and * the corresponding lower case (with factor size) indicates the split dimension. * For example, NCHW16c can describe a 5-D tensor of * [batch_size, channel, height, width, channel_block]. * (in which factor size `16` will not be used in pooling but for other operators, * it can be used to decide the output shape). * Since pooling does not care about the factor size of dimensions * other than `H` and `W`, one can pass `NCHWc` as well. * \param count_include_pad Whether include padding in the calculation when pool_type is 'avg' * * * \return The output tensor in the same layout */ inline Tensor pool_grad(const Tensor& out_grad, const Tensor& x, const Array<PrimExpr>& kernel_size, const Array<PrimExpr>& stride_size, const Array<PrimExpr>& padding_size, PoolType pool_type, bool ceil_mode, const std::string& layout = "NCHW", bool count_include_pad = true) { int height_axis = -1, width_axis = -1; ICHECK(find_height_width(layout, &height_axis, &width_axis)) << "Unsupported layout " << layout; return pool_grad_impl(out_grad, x, kernel_size, stride_size, padding_size, pool_type, ceil_mode, height_axis, width_axis, count_include_pad); } inline PrimExpr start_index(const Var& out_index, const PrimExpr& odim, const PrimExpr& idim) { return indexdiv(out_index * idim, odim); } inline PrimExpr end_index(const Var& out_index, const PrimExpr& odim, const PrimExpr& idim) { PrimExpr tmp = indexdiv((out_index + 1) * idim, odim); return tvm::tir::Select(indexmod((out_index + 1) * idim, odim) == 0, tmp, tmp + 1); } /*! * \brief Perform adaptive pooling on N dimensional data * * \param x The input tensor * \param output_size int vector of size in each dimension * \param pool_type The type of pooling operator * \param axes indices of each dimension * * \return The output tensor in same layout order */ inline Tensor adaptive_pool_impl(const Tensor& x, const Array<PrimExpr>& output_size, PoolType pool_type, const std::vector<int>& axes) { const auto n_dim = output_size.size(); ICHECK_EQ(axes.size(), n_dim) << "The number of axes not equal to the in/out dimension"; Array<PrimExpr> data_shape = x->shape; for (size_t i = 0; i < data_shape.size(); ++i) { data_shape.Set(i, cast(DataType::DataType::Int(32), data_shape[i])); } Array<PrimExpr> out_shape = data_shape; Array<PrimExpr> in_size, out_size; for (size_t i = 0; i < n_dim; ++i) { in_size.push_back(data_shape[axes[i]]); out_size.push_back(cast(DataType::Int(32), output_size[i])); out_shape.Set(axes[i], out_size[i]); } auto get_iter_vars = [=](const Array<Var>& output, bool reduce_indices) { Array<PrimExpr> indices; for (size_t i = 0; i < output.size(); ++i) indices.push_back(output[i]); Array<tir::IterVar> reduce_axes; for (size_t i = 0; i < n_dim; ++i) { auto i_start = start_index(output[axes[i]], out_size[i], in_size[i]); auto i_end = end_index(output[axes[i]], out_size[i], in_size[i]); auto rv_name = "rv" + std::to_string(i); auto rv_axis = tvm::te::reduce_axis(Range(0, i_end - i_start), rv_name); reduce_axes.push_back(rv_axis); if (reduce_indices) { indices.Set(axes[i], i_start + rv_axis); } } return std::make_tuple(indices, reduce_axes); }; if (pool_type == kMaxPool) { return tvm::te::compute( out_shape, [&](const Array<Var>& output) { Array<PrimExpr> indices; Array<tir::IterVar> reduce_axes; std::tie(indices, reduce_axes) = get_iter_vars(output, true); return tvm::max(x(indices), reduce_axes); // NOLINT(*) }, "tensor", "adaptive_pool_max"); } else if (pool_type == kAvgPool) { auto pool_sum = tvm::te::compute( out_shape, [&](const Array<Var>& output) { Array<PrimExpr> indices; Array<tir::IterVar> reduce_axes; std::tie(indices, reduce_axes) = get_iter_vars(output, true); return tvm::sum(x(indices), reduce_axes); }, "tensor", "adaptive_pool_sum"); return tvm::te::compute( out_shape, [&](const Array<Var>& output) { Array<PrimExpr> indices; Array<tir::IterVar> reduce_axes; std::tie(indices, reduce_axes) = get_iter_vars(output, false); PrimExpr divide_factor = tvm::cast(x->dtype, 1); for (size_t i = 0; i < n_dim; ++i) { divide_factor *= tvm::cast(x->dtype, reduce_axes[i]->dom->extent); } return div(pool_sum(indices), divide_factor); }, "tensor", kElementWise); } else { LOG(ERROR) << "Unrecognized pool_type: " << pool_type; return x; } } /*! * \brief Adaptively perform pooling on height and width dimension of data. * The pooling kernel and stride sizes are automatically chosen for desired output sizes. * It decides the height and width dimension according to the layout string, * in which 'W' and 'H' means width and height respectively. * Width and height dimension cannot be split. * For example, NCHW, NCHW16c, etc. are valid for pool, * while NCHW16w, NCHW16h are not. * See \a layout for more information of the layout string convention. * * \param x The input tensor * \param output_size Vector of two ints: {output_height, output_width} * \param pool_type The type of pooling operator * \param layout The input layout. Pooling supports any layout as long as 'H' and 'W' appear. * The layout is supposed to be composed of upper cases, lower cases and (optional) numbers, * where upper case indicates a dimension and * the corresponding lower case (with factor size) indicates the split dimension. * For example, NCHW16c can describe a 5-D tensor of * [batch_size, channel, height, width, channel_block]. * (in which factor size `16` will not be used in pooling but for other operators, * it can be used to decide the output shape). * Since pooling does not care about the factor size of dimensions * other than `H` and `W`, one can pass `NCHWc` as well. * * \return The output tensor in same layout order */ inline Tensor adaptive_pool(const Tensor& x, const Array<PrimExpr>& output_size, PoolType pool_type, const std::string& layout = "NCHW") { int height_axis = -1, width_axis = -1; ICHECK(find_height_width(layout, &height_axis, &width_axis)) << "Unsupported layout " << layout; return adaptive_pool_impl(x, output_size, pool_type, {height_axis, width_axis}); } /*! * \brief Adaptively perform pooling on three dimensional data. * See the two dimensional version above for details. * \param x The input tensor * \param output_size Vector of three ints: {output_depth, output_height, output_width} * \param pool_type The type of pooling operator * \param layout The input layout. The default is "NCDHW". */ inline Tensor adaptive_pool3d(const Tensor& x, const Array<PrimExpr>& output_size, PoolType pool_type, const std::string& layout = "NCDHW") { int depth_axis = -1, height_axis = -1, width_axis = -1; ICHECK(find_depth_height_width(layout, &depth_axis, &height_axis, &width_axis)) << "Unsupported layout " << layout; return adaptive_pool_impl(x, output_size, pool_type, {depth_axis, height_axis, width_axis}); } /*! * \brief Adaptively perform pooling on one dimensional data. * See the two dimensional version above for details. * \param x The input tensor * \param output_size Vector of one int: {output_width} * \param pool_type The type of pooling operator * \param layout The input layout. The default is "NCW". */ inline Tensor adaptive_pool1d(const Tensor& x, const Array<PrimExpr>& output_size, PoolType pool_type, const std::string& layout = "NCW") { int width_axis = -1; ICHECK(find_width(layout, &width_axis)) << "Unsupported layout " << layout; return adaptive_pool_impl(x, output_size, pool_type, {width_axis}); } /*! * \brief Perform global pooling on height and width dimension of data. * It decides the height and width dimension according to the layout string, * in which 'W' and 'H' means width and height respectively. * Width and height dimension cannot be split. * For example, NCHW, NCHW16c, ... are valid for global_pool, * while NCHW16w, NCHW16h are not. * See \a layout for more information of the layout string convention. * * \param x The input tensor represent as layout * \param pool_type The type of pooling operator * \param layout The input layout. global-pooling supports any layout as long as 'H' and 'W' appear. * The layout is supposed to be composed of upper cases, lower cases and (optional) numbers, * where upper case indicates a dimension and * the corresponding lower case (with factor size) indicates the sub-dimension. * For example, `NCHW16c` can describe a 5-D tensor of * [batch_size, channel, height, width, channel_block]. * (in which factor size `16` will not be used in pooling but for other operators, * it can be used to decide the output shape). * Since pooling does not care about the factor size of * dimensions other than `H` and `W`, one can pass `NCHWc` as well. * * \return The output tensor in same layout with height and width dimension size of 1. * e.g., for NCHW, the output shape will be [batch, channel, 1, 1] */ inline Tensor global_pool(const Tensor& x, PoolType pool_type, const std::string& layout = "NCHW") { return adaptive_pool(x, Array<PrimExpr>{1, 1}, pool_type, layout); } /*! * \brief Perform pooling on N-dimension of data. * * \param x The input tensor * \param kernel_size Vector of N ints * \param stride_size Vector of N ints * \param dilation_size Vector of N ints * \param padding_size Vector of N*2 ints [head_pad_d1, head_pad_d2, ..., * head_pad_dN, tail_pad_d1, tail_pad_d2, ..., tail_pad_dN] * \param pool_type The type of pooling operator * \param ceil_mode Whether to use ceil when calculating the output size * \param axis Vector of indices for the N dimensions * \param count_include_pad Whether include padding in the calculation * * \return The output tensor in same layout order */ inline Tensor pool_impl_nd(const Tensor& x, const Array<PrimExpr>& kernel_size, const Array<PrimExpr>& stride_size, const Array<PrimExpr>& dilation_size, const Array<PrimExpr>& padding_size, PoolType pool_type, bool ceil_mode, const std::vector<int>& axis, bool count_include_pad) { int k_size = kernel_size.size(); int x_size = x->shape.size(); ICHECK_EQ(stride_size.size(), k_size) << "Pooling stride_size must have same elements as kernel"; ICHECK_EQ(padding_size.size(), k_size * 2) << "Pooling padding_size must has double elements of" " kernel"; ICHECK_EQ(axis.size(), k_size) << "axis must have same elements as kernel"; Array<IterVar> daxis; std::vector<PrimExpr> kernel(k_size); std::vector<PrimExpr> stride(k_size); std::vector<PrimExpr> dilation(k_size); std::vector<PrimExpr> pad_head(k_size); std::vector<PrimExpr> pad_tail(k_size); std::vector<PrimExpr> offset(k_size, 0); Array<PrimExpr> pad_before(std::vector<PrimExpr>(x_size, 0)); Array<PrimExpr> pad_after(std::vector<PrimExpr>(x_size, 0)); Array<PrimExpr> data_shape = x->shape; for (size_t i = 0; i < data_shape.size(); ++i) { data_shape.Set(i, cast(DataType::DataType::Int(32), data_shape[i])); } Array<PrimExpr> out_shape = data_shape; bool do_pad = false; for (int i = 0; i < k_size; i++) { int ii = axis[i]; kernel[i] = cast(DataType::Int(32), kernel_size[i]); stride[i] = cast(DataType::Int(32), stride_size[i]); dilation[i] = cast(DataType::Int(32), dilation_size[i]); pad_head[i] = cast(DataType::Int(32), padding_size[i]); pad_tail[i] = cast(DataType::Int(32), padding_size[i + k_size]); if (ceil_mode) { // The offset[i] is an additional padding to ensure we do ceil instead of floor when // dividing by stride. // In the case of ceil_mode=True and count_include_pad=True, // in order to obtain the correct boundary, // we also need to use the offset[i] to eliminate this extra padding. offset[i] = stride[i] - 1; pad_tail[i] += offset[i]; } const int64_t* padding0 = as_const_int(pad_head[i]); const int64_t* padding1 = as_const_int(pad_tail[i]); do_pad = do_pad || (padding0 && *padding0) || (padding1 && *padding1); daxis.push_back(tvm::te::reduce_axis(Range(0, kernel[i]), "rv" + std::to_string(i))); pad_before.Set(ii, pad_head[i]); pad_after.Set(ii, pad_tail[i]); arith::Analyzer analyzer; PrimExpr numerator = data_shape[ii] - (kernel[i] - 1) * dilation[i] - 1 + pad_head[i] + pad_tail[i]; auto out_dim = analyzer.Simplify(indexdiv(numerator, stride[i]) + 1); out_shape.Set(ii, out_dim); } if (pool_type == kMaxPool) { auto temp = do_pad ? pad(x, pad_before, pad_after, tvm::min_value(x->dtype), "pad_temp") : x; return tvm::te::compute( out_shape, [&](const Array<Var>& output) { Array<PrimExpr> indices; for (const Var& var : output) indices.push_back(var); for (int i = 0; i < k_size; i++) { int ii = axis[i]; indices.Set(ii, output[ii] * stride[i] + daxis[i] * dilation[i]); } return tvm::max(temp(indices), daxis); }, "tensor", "pool_max"); } else if (pool_type == kAvgPool) { // Pad the inputs auto temp = do_pad ? pad(x, pad_before, pad_after, 0, "pad_temp") : x; // TVM compute for summing the pooling window. auto pool_sum = tvm::te::compute( out_shape, [&](const Array<Var>& output) { Array<PrimExpr> indices; for (const Var& var : output) indices.push_back(var); for (int i = 0; i < k_size; i++) { int ii = axis[i]; indices.Set(ii, output[ii] * stride[i] + daxis[i] * dilation[i]); } return tvm::sum(temp(indices), daxis); }, "tensor", "pool_sum"); // TVM compute for dividing the reduced window sum by kernel size. return tvm::te::compute( out_shape, [&](const Array<Var>& output) { Array<PrimExpr> indices; for (const Var& var : output) indices.push_back(var); if (count_include_pad) { std::vector<PrimExpr> start(k_size); std::vector<PrimExpr> end(k_size); auto num_el = make_const(DataType::Int(32), 1); for (int i = 0; i < k_size; i++) { int ii = axis[i]; start[i] = output[ii] * stride[i] - pad_head[i]; // When computing the output shape in ceil_mode, // we have added the extra padding of offset[i], // so now in order to calculate the correct boundary , // we need to substract the offset[i]. end[i] = start[i] + (kernel[i] - 1) * dilation[i]; end[i] = min(end[i], data_shape[ii] + pad_tail[i] - 1 - offset[i]); num_el *= (end[i] - start[i]) / dilation[i] + 1; } return div(pool_sum(indices), num_el); } else { std::vector<PrimExpr> start(k_size); std::vector<PrimExpr> end(k_size); auto num_el = make_const(DataType::Int(32), 1); for (int i = 0; i < k_size; i++) { int ii = axis[i]; // Let start and end contain the first and last index of our Tensor // along the relevant dimension we use in our calculation. // Assume indices -1, -2 represent the padding before (tail) and // len(arr), len(arr) + 1 represent the padding after (head). start[i] = output[ii] * stride[i] - pad_head[i]; end[i] = start[i] + (kernel[i] - 1) * dilation[i]; // if start[i] < 0, e.g. we start on a tail padded number this will be a positive // number that represents the number of steps along the dilated kernel to reach a // non-padded value. Otherwise this should be 0. PrimExpr jumps_to_non_pad = (dilation[i] - 1 - start[i]) / dilation[i]; jumps_to_non_pad = max(jumps_to_non_pad, make_const(DataType::Int(32), 0)); end[i] = min(end[i], data_shape[ii] - 1); num_el *= (end[i] - (start[i] + dilation[i] * jumps_to_non_pad)) / dilation[i] + 1; } PrimExpr divide_factor = max(num_el, make_const(DataType::Int(32), 1)); return div(pool_sum(indices), divide_factor); } }, "tensor", kElementWise); } else { LOG(ERROR) << "Unrecognized pool_type: " << pool_type; return x; } } /*! * \brief Perform pooling on the width dimension of data. * Width axis is determined by the layout string * in which 'W' means width. * Width dimension cannot be split. * For example, NCW, NCW16c, etc. are valid for pool, * while NCW16w is not. * See \a layout for more information of the layout string convention. * \param x The input tensor. * \param kernel_size Vector of one int: {kernel_width} * \param stride_size Vector of one int: {stride_width} * \param dilation_size Vector of one int: {dilation_width} * \param padding_size Vector of two ints: {head_pad_width, tail_pad_width} * \param pool_type The type of pooling operator * \param ceil_mode Whether to use ceil when calculating the output size * \param layout The input layout. Pooling supports any layout as long as 'W' appears. * The layout is supposed to be composed of upper cases, lower cases and (optional) numbers, * where upper case indicates a dimension and * the corresponding lower case (with factor size) indicates the split dimension. * For example, NCW16c can describe a 4-D tensor of * [batch_size, channel, width, channel_block]. * (in which factor size `16` will not be used in pooling but for other operators, * it can be used to decide the output shape). * Since pooling does not care about the factor size of dimensions * other than `W`, one can pass `NCWc` as well. * \param count_include_pad Whether include padding in the calculation when pool_type is 'avg' * * * \return The output tensor in the same layout */ inline Tensor pool1d(const Tensor& x, const Array<PrimExpr>& kernel_size, const Array<PrimExpr>& stride_size, const Array<PrimExpr>& dilation_size, const Array<PrimExpr>& padding_size, PoolType pool_type, bool ceil_mode, const std::string& layout = "NCW", bool count_include_pad = true) { int width_axis = -1; ICHECK(find_width(layout, &width_axis)) << "Unsupported layout " << layout; std::vector<int> axis = {width_axis}; return pool_impl_nd(x, kernel_size, stride_size, dilation_size, padding_size, pool_type, ceil_mode, axis, count_include_pad); } /*! * \brief Perform pooling on height and width dimension of data. * It decides the height and width dimension according to the layout string, * in which 'W' and 'H' means width and height respectively. * Width and height dimension cannot be split. * For example, NCHW, NCHW16c, etc. are valid for pool, * while NCHW16w, NCHW16h are not. * See \a layout for more information of the layout string convention. * \param x The input tensor. * \param kernel_size Vector of two ints: {kernel_height, kernel_width} * \param stride_size Vector of two ints: {stride_height, stride_width} * \param dilation_size Vector of two ints: {dilation_height, dilation_width} * \param padding_size Vector of two ints: {padding_height, padding_width} * \param pool_type The type of pooling operator * \param ceil_mode Whether to use ceil when calculating the output size * \param layout The input layout. Pooling supports any layout as long as 'H' and 'W' appear. * The layout is supposed to be composed of upper cases, lower cases and (optional) numbers, * where upper case indicates a dimension and * the corresponding lower case (with factor size) indicates the split dimension. * For example, NCHW16c can describe a 5-D tensor of * [batch_size, channel, height, width, channel_block]. * (in which factor size `16` will not be used in pooling but for other operators, * it can be used to decide the output shape). * Since pooling does not care about the factor size of dimensions * other than `H` and `W`, one can pass `NCHWc` as well. * \param count_include_pad Whether include padding in the calculation when pool_type is 'avg' * * * \return The output tensor in the same layout */ inline Tensor pool2d(const Tensor& x, const Array<PrimExpr>& kernel_size, const Array<PrimExpr>& stride_size, const Array<PrimExpr>& dilation_size, const Array<PrimExpr>& padding_size, PoolType pool_type, bool ceil_mode, const std::string& layout = "NCHW", bool count_include_pad = true) { int height_axis = -1, width_axis = -1; ICHECK(find_height_width(layout, &height_axis, &width_axis)) << "Unsupported layout " << layout; std::vector<int> axis = {height_axis, width_axis}; return pool_impl_nd(x, kernel_size, stride_size, dilation_size, padding_size, pool_type, ceil_mode, axis, count_include_pad); } /*! * \brief Perform pooling on depth, height and width dimension of data. * It decides the depth, height and width dimension according to the layout string, * in which 'D', 'W' and 'H' means depth, width and height respectively. * Depth, Width and height dimension cannot be split. * For example, NCDHW, NCDHW16c, etc. are valid for pool, * while NCDHW16d, NCDHW16w or NCDHW16h are not. * See \a layout for more information of the layout string convention. * \param x The input tensor. * \param kernel_size Vector of three ints: {kernel_depth, kernel_height, kernel_width} * \param stride_size Vector of three ints: {stride_depth, stride_height, stride_width} * \param dilation_size Vector of three ints: {dilation_depth, dilation_height, dilation_width} * \param padding_size Vector of six ints: {head_pad_depth, head_pad_height, head_pad_width, * tail_pad_depth, tail_pad_height, tail_pad_width} * \param pool_type The type of pooling operator * \param ceil_mode Whether to use ceil when calculating the output size * \param layout The input layout. Pooling supports any layout as long as 'D', 'H' and 'W' appear. * The layout is supposed to be composed of upper cases, lower cases and (optional) numbers, * where upper case indicates a dimension and * the corresponding lower case (with factor size) indicates the split dimension. * For example, NCDHW16c can describe a 6-D tensor of * [batch_size, channel, depth, height, width, channel_block]. * (in which factor size `16` will not be used in pooling but for other operators, * it can be used to decide the output shape). * Since pooling does not care about the factor size of dimensions * other than `D`, `H` and `W`, one can pass `NCDHWc` as well. * \param count_include_pad Whether include padding in the calculation when pool_type is 'avg' * * * \return The output tensor in the same layout */ inline Tensor pool3d(const Tensor& x, const Array<PrimExpr>& kernel_size, const Array<PrimExpr>& stride_size, const Array<PrimExpr>& dilation_size, const Array<PrimExpr>& padding_size, PoolType pool_type, bool ceil_mode, const std::string& layout = "NCDHW", bool count_include_pad = true) { int depth_axis = -1, height_axis = -1, width_axis = -1; ICHECK(find_depth_height_width(layout, &depth_axis, &height_axis, &width_axis)) << "Unsupported layout " << layout; std::vector<int> axis = {depth_axis, height_axis, width_axis}; return pool_impl_nd(x, kernel_size, stride_size, dilation_size, padding_size, pool_type, ceil_mode, axis, count_include_pad); } } // namespace nn } // namespace topi } // namespace tvm #endif // TVM_TOPI_NN_POOLING_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/nn/softmax.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \brief Softmax op constructions * \file nn/softmax.h */ #ifndef TVM_TOPI_NN_SOFTMAX_H_ #define TVM_TOPI_NN_SOFTMAX_H_ #include <tvm/te/operation.h> #include <tvm/topi/reduction.h> #include <tvm/topi/tags.h> #include <algorithm> #include <string> namespace tvm { namespace topi { namespace nn { using namespace tvm::te; /*! * \brief Softmax activation * * \param x The input tensor. Can be any dimension * \param axis The channel axis along which softmax is performed * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the softmax operation */ inline Tensor softmax(const Tensor& x, int axis = -1, std::string name = "tensor", std::string tag = "softmax_output") { auto input_shape = x->shape; auto ndim = input_shape.size(); if (axis < 0) { axis = ndim + axis; } ICHECK_LT(axis, ndim) << "axis parameter should be less than input dim"; auto k1 = tvm::te::reduce_axis(Range(0, input_shape[axis]), "k1"); auto k2 = tvm::te::reduce_axis(Range(0, input_shape[axis]), "k2"); auto reduced_shape = MakeReduceTargetShape({axis}, x, false, false); tvm::Map<String, ObjectRef> attrs; attrs.Set("axis", Integer(axis)); auto insert_reduce_index = [axis, ndim](const Array<Var>& indices, const IterVar& reduce_index) { Array<PrimExpr> eval_range; int arg_counter = 0; for (size_t i = 0; i < ndim; ++i) { if (static_cast<int>(i) == axis) { eval_range.push_back(reduce_index); } else { eval_range.push_back(indices[arg_counter++]); } } return eval_range; }; auto get_non_reduce_indices = [axis, ndim](const Array<Var>& indices) { Array<PrimExpr> non_reduce_indices; for (size_t i = 0; i < ndim; ++i) { if (static_cast<int>(i) != axis) non_reduce_indices.push_back(indices[i]); } return non_reduce_indices; }; auto _compute_max = [&](const Array<Var>& indices) { auto eval_range = insert_reduce_index(indices, k1); return topi::MaxOp(x(eval_range), {k1}); }; auto _compute_exp = [&](const Tensor& max_elem, const Array<Var>& indices) { auto non_reduce_indices = get_non_reduce_indices(indices); return tvm::exp(x(indices) - max_elem(non_reduce_indices)); }; auto _compute_expsum = [&](const Tensor& exp, const Array<Var>& indices) { auto eval_range = insert_reduce_index(indices, k2); return tvm::sum(exp(eval_range), {k2}); }; auto _normalize = [&](const Tensor& exp, const Tensor& expsum, const Array<Var>& indices) { auto non_reduce_indices = get_non_reduce_indices(indices); return exp(indices) / expsum(non_reduce_indices); }; auto max_elem = tvm::te::compute(reduced_shape, _compute_max); auto exp = tvm::te::compute( input_shape, [&](const Array<Var>& indices) { return _compute_exp(max_elem, indices); }); auto expsum = tvm::te::compute( reduced_shape, [&](const Array<Var>& indices) { return _compute_expsum(exp, indices); }); return tvm::te::compute( input_shape, [&](const Array<Var>& indices) { return _normalize(exp, expsum, indices); }, name, tag, attrs); } /*! * \brief Log softmax activation * * \param x The input tensor. 2-D where log softmax is performed along the second dimension * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the log softmax operation */ inline Tensor log_softmax(const Tensor& x, std::string name = "tensor", std::string tag = "log_softmax_output") { ICHECK_EQ(x->shape.size(), 2) << "Log softmax requires 2-D input"; PrimExpr m = x->shape[0]; PrimExpr n = x->shape[1]; auto k = tvm::te::reduce_axis(Range(0, n), "k"); auto max_elem = tvm::te::compute({m}, [&](Var i) { return tvm::max(x(i, k), Array<IterVar>{k}); }); k = tvm::te::reduce_axis(Range(0, n), "k"); auto expsum = tvm::te::compute({m}, [&](Var i) { return tvm::sum(tvm::exp(x(i, k) - max_elem(i)), {k}); }); return tvm::te::compute( x->shape, [&](Var i, Var j) { return x(i, j) - max_elem(i) - tvm::log(expsum(i)); }, name, tag); } } // namespace nn } // namespace topi } // namespace tvm #endif // TVM_TOPI_NN_SOFTMAX_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/reduction.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file topi/reduction.h * \brief Reduction op constructors */ #ifndef TVM_TOPI_REDUCTION_H_ #define TVM_TOPI_REDUCTION_H_ #include <tvm/te/operation.h> #include <tvm/topi/broadcast.h> #include <tvm/topi/detail/constant_utils.h> #include <tvm/topi/detail/ravel_unravel.h> #include <tvm/topi/elemwise.h> #include <tvm/topi/tags.h> #include <tvm/topi/transform.h> #include <algorithm> #include <iterator> #include <string> #include <vector> namespace tvm { namespace topi { using namespace tvm::te; /*! \brief The operation to use for CommReduce */ using FReduce = std::function<PrimExpr(PrimExpr source, const Array<IterVar>& axis, Array<PrimExpr> init, Span span)>; /*! \brief The operation to use for CommReduceIdx */ using FCommReduce = std::function<Array<PrimExpr>(Array<PrimExpr> exprs, const Array<IterVar>& axis, PrimExpr* condition)>; /*! * \brief Convert a reduction axis which could be empty or have negative * elements into a real axis with valid dimension indices. * * \param ndim Number of dimensions in the target. * \param axis The axis parameter. * * \return A non-empty sorted array of valid dimension indices, with no duplicates. * If the input axis is empty, the result will be an axis including all dimensions. * If any input element is negative, it will be treated as an offset from the * last dimension (same as python indexing rules). */ inline std::vector<int> GetRealAxis(int ndim, const Array<Integer>& axis) { std::vector<int> real_axis; if (!axis.defined() || axis.size() == 0) { for (int i = 0; i < ndim; ++i) { real_axis.push_back(i); } } else { // Use a set so duplicates are removed and the dims are sorted for (auto elem : axis) { int64_t val = elem->value; if (val < 0) { val += ndim; } ICHECK_LE(val, ndim) << " exceeds the maximum dimension " << ndim; ICHECK_GE(val, 0); real_axis.push_back(static_cast<int>(val)); } std::sort(real_axis.begin(), real_axis.end()); real_axis.resize(std::unique(real_axis.begin(), real_axis.end()) - real_axis.begin()); } return real_axis; } /*! \brief Enumerate the axes for a reduce op */ inline Array<IterVar> MakeReduceAxes(const std::vector<int>& real_axis, const Tensor& data) { Array<IterVar> reduce_axes; for (auto i : real_axis) { std::string name = "k" + std::to_string(i); reduce_axes.push_back(tvm::te::reduce_axis(Range(0, data->shape[i]), name)); } return reduce_axes; } /*! \brief Calculate the target shape for a reduce op */ inline Array<PrimExpr> MakeReduceTargetShape(const std::vector<int>& real_axis, const Tensor& data, bool keepdims, bool atleast1d) { auto ndim = data->shape.size(); Array<PrimExpr> target_shape; if (keepdims) { for (size_t i = 0; i < ndim; ++i) { if (std::find(real_axis.begin(), real_axis.end(), i) != real_axis.end()) { // real_axis contains i target_shape.push_back(1); } else { target_shape.push_back(data->shape[i]); } } } else { for (size_t i = 0; i < ndim; ++i) { if (std::find(real_axis.begin(), real_axis.end(), i) == real_axis.end()) { // real_axis does not contain i target_shape.push_back(data->shape[i]); } } } if (target_shape.size() == 0 && atleast1d) { target_shape.push_back(1); } return target_shape; } /*! * \brief Create a reduction operation. * * \param data The input tensor. * \param func The reduction function eg. tvm::sum * \param target_shape The output Tensor shape. * \param reduce_axes The real axes along which the reduction is performed. * \param squeeze_axes The real axes to squeeze. Unsqueezed, reduced axes will * have shape 1 in the output tensor. * \param span The location of this reducer in the source. * * \return The result tensor. */ inline Tensor DoCommReduce(const Tensor& data, FReduce func, const Array<PrimExpr>& target_shape, const std::vector<int>& reduce_axes, const std::vector<int>& squeeze_axes, Span span = Span()) { auto r_axes = MakeReduceAxes(reduce_axes, data); auto compute = [&](const Array<Var>& indices) { Array<PrimExpr> eval_range; Array<Var> eval_indices; int arg_counter = 0; int red_counter = 0; for (size_t i = 0; i < data->shape.size(); ++i) { bool squeeze_i = std::find(squeeze_axes.begin(), squeeze_axes.end(), i) != squeeze_axes.end(); if (std::find(reduce_axes.begin(), reduce_axes.end(), i) != reduce_axes.end()) { // real_axis contains i eval_range.push_back(r_axes[red_counter]); eval_indices.push_back(r_axes[red_counter]->var); red_counter++; arg_counter += !squeeze_i; continue; } eval_range.push_back(indices[arg_counter]); arg_counter++; } return func(data(eval_range), r_axes, {}, span); }; return tvm::te::compute(target_shape, compute, data->op->name + "_red", kCommReduce); } /*! * \brief Create a reduction operation. * * \param data The input tensor. * \param axis The axes along which the reduction is performed. * \param func The reduction function eg. tvm::sum * \param keepdims If this is set to true, the axes which are reduced are * left in the result as dimensions with size one. This enables the result * to broadcast correctly against the input array. * \param atleast1d Whether the output need to be atleast1d. * * \return The result tensor. */ inline Tensor CommReduce(const Tensor& data, const Array<Integer>& axis, FReduce func, bool keepdims, bool atleast1d) { auto ndim = data->shape.size(); ICHECK_NE(ndim, 0) << "Cannot reduce a 0 dim Tensor"; auto real_axis = GetRealAxis(static_cast<int>(ndim), axis); auto target_shape = MakeReduceTargetShape(real_axis, data, keepdims, atleast1d); return DoCommReduce(data, func, target_shape, real_axis, keepdims ? std::vector<int>() : real_axis); } /*! * \brief Create an index reduction operation. * * \param data The input tensor. * \param axis The axes along which the reduction is performed. * \param func The reduction function * \param keepdims If this is set to true, the axes which are reduced are * left in the result as dimensions with size one. This enables the result * to broadcast correctly against the input array. * \param atleast1d Whether the output need to be atleast1d. * * \return The result tensor. */ inline Tensor CommReduceIdx(const Tensor& data, const Array<Integer>& axis, FCommReduce func, bool keepdims, bool atleast1d) { auto ndim = data->shape.size(); ICHECK_NE(ndim, 0) << "Cannot reduce a 0 dim Tensor"; auto real_axis = GetRealAxis(static_cast<int>(ndim), axis); auto reduce_axes = MakeReduceAxes(real_axis, data); auto target_shape = MakeReduceTargetShape(real_axis, data, keepdims, atleast1d); auto compute = [ndim, keepdims, &real_axis, &reduce_axes, &func, &data](const Array<Var>& indices) { Array<PrimExpr> eval_range; Array<PrimExpr> eval_indices; int arg_counter = 0; int red_counter = 0; for (size_t i = 0; i < ndim; ++i) { if (std::find(real_axis.begin(), real_axis.end(), i) != real_axis.end()) { // real_axis contains i eval_range.push_back(reduce_axes[red_counter]); eval_indices.push_back(reduce_axes[red_counter]->var); red_counter++; } else { if (!keepdims) { eval_range.push_back(indices[arg_counter]); arg_counter++; } else { eval_range.push_back(indices[i]); } } } Array<PrimExpr> ravel_shape; for (auto i : real_axis) { ravel_shape.push_back(data->shape[i]); } auto idx = detail::RavelIndex(eval_indices, ravel_shape); return func({idx, data(eval_range)}, reduce_axes, nullptr); }; auto temp_idx_val = tvm::te::compute(target_shape, compute, data->op->name + "_red_temp", kCommReduceIdx); auto temp_idx = temp_idx_val[0]; auto temp_val = temp_idx_val[1]; return tvm::te::compute( target_shape, [&temp_idx](const Array<Var>& indices) { return temp_idx(indices); }, data->op->name + "_red", kCommReduceIdx); } /*! \brief A combiner function for a reduction */ using FCombine = std::function<Array<PrimExpr>(Array<Var> lhs, Array<Var> rhs)>; /*! \brief An initializer function for a reduction */ using FIdentity = std::function<Array<PrimExpr>(std::vector<DataType> types)>; /*! * \brief Create a commutative reducer for a reduction * * \param fcombine A function to combine exprs * \param fidentity A function to initialize elements * \param name The name of the operation * * \return A reducer function which creates a reduce expression over an axis. */ inline FCommReduce MakeCommReducer(FCombine fcombine, FIdentity fidentity, std::string name = "reduce") { return [fcombine, fidentity, name](Array<PrimExpr> exprs, const Array<IterVar>& axis, PrimExpr* condition) { Array<Var> lhs, rhs; std::vector<DataType> dtypes; for (size_t i = 0; i < exprs.size(); ++i) { auto dtype = exprs[i].dtype(); dtypes.push_back(dtype); lhs.push_back(var(name + "_lhs_" + std::to_string(i), dtype)); rhs.push_back(var(name + "_rhs_" + std::to_string(i), dtype)); } auto result = fcombine(lhs, rhs); auto id_elem = fidentity(dtypes); auto cond = condition != nullptr ? *condition : tir::const_true(); auto combiner = tvm::tir::CommReducer(lhs, rhs, result, id_elem); Array<PrimExpr> outputs; for (size_t i = 0; i < exprs.size(); ++i) { outputs.push_back(tvm::tir::Reduce(combiner, exprs, axis, cond, static_cast<int>(i), {})); } return outputs; }; } /*! \brief Wrap tvm::min to ensure we get the correct overload */ inline PrimExpr MinOp(PrimExpr source, Array<IterVar> axis, Array<PrimExpr> init = {}, Span span = Span()) { return tvm::min(source, axis, init, span); } /*! \brief Wrap tvm::max to ensure we get the correct overload */ inline PrimExpr MaxOp(PrimExpr source, Array<IterVar> axis, Array<PrimExpr> init = {}, Span span = Span()) { return tvm::max(source, axis, init, span); // NOLINT(*) } /*! \brief Wrap tvm::prod to ensure we get the correct overload */ inline PrimExpr ProdOp(PrimExpr source, Array<IterVar> axis, Array<PrimExpr> init = {}, Span span = Span()) { return tvm::prod(source, axis, init, span); // NOLINT(*) } /*! * \brief Creates an operation that sums array elements over a given axis * * \param data The input tensor * \param axis The axis to sum over. If axis is empty, the operation will * sum over all elements of the array. * \param keepdims If this is set to true, the axes which are reduced are * left in the result as dimensions with size one. This enables the result * to broadcast correctly against the input array. * \param atleast1d Whether the output need to be atleast1d. * * \return A Tensor whose op member is the sum operation */ inline Tensor sum(const Tensor& data, const Array<Integer>& axis, bool keepdims = false, bool atleast1d = false) { return CommReduce(data, axis, tvm::sum, keepdims, atleast1d); } inline Tensor collapse_sum(const Tensor& data, Array<PrimExpr> target_shape) { ICHECK_GE(data->shape.size(), target_shape.size()); auto ishape = detail::GetConstIntValues(data->shape, "ishape"); auto oshape = detail::GetConstIntValues(target_shape, "oshape"); std::vector<int> reduce_axes; std::vector<int> squeeze_axes; for (int i_ax = ishape.size() - 1, o_ax = oshape.size() - 1; i_ax >= 0; --i_ax) { if (o_ax >= 0 && ishape[i_ax] == oshape[o_ax]) { --o_ax; continue; } reduce_axes.push_back(i_ax); if (o_ax < 0) { // squeeze o_ax if was added during expansion squeeze_axes.push_back(i_ax); } else if (oshape[o_ax] == 1) { --o_ax; } } if (reduce_axes.size() == 0) return topi::identity(data, "tensor", kCommReduce); std::reverse(reduce_axes.begin(), reduce_axes.end()); std::reverse(squeeze_axes.begin(), squeeze_axes.end()); return DoCommReduce(data, tvm::sum, target_shape, reduce_axes, squeeze_axes); } /*! * \brief Creates an operation that computes the logical AND of elements * over a given axis * * \param data The input boolean tensor * \param axis The axes to reduce. If axis is empty, the operation will * perform logical AND over all elements of the array. * \param keepdims If this is set to true, the axes which are reduced are * left in the result as dimensions with size one. This enables the result * to broadcast correctly against the input array. * \param atleast1d Whether the output need to be atleast1d. * * \return A Tensor whose op member is the all operation */ inline Tensor all(const Tensor& data, const Array<Integer>& axis, bool keepdims = false, bool atleast1d = false) { return CommReduce(data, axis, tvm::all, keepdims, atleast1d); } /*! * \brief Creates an operation that computes the logical OR of elements * over a given axis * * \param data The input boolean tensor * \param axis The axes to reduce. If axis is empty, the operation will * perform logical OR over all elements of the array. * \param keepdims If this is set to true, the axes which are reduced are * left in the result as dimensions with size one. This enables the result * to broadcast correctly against the input array. * \param atleast1d Whether the output need to be atleast1d. * * \return A Tensor whose op member is the all operation */ inline Tensor any(const Tensor& data, const Array<Integer>& axis, bool keepdims = false, bool atleast1d = false) { return CommReduce(data, axis, tvm::any, keepdims, atleast1d); } /*! * \brief Creates an operation that finds the minimum of elements over * a given axis. * * \param data The input tensor * \param axis The axis to find the minimum over. If axis is empty, the * operation will find the minimum over all elements of the array. * \param keepdims If this is set to true, the axes which are reduced are * left in the result as dimensions with size one. This enables the result * to broadcast correctly against the input array. * \param atleast1d Whether the output need to be atleast1d. * * \return A Tensor whose op member is the min operation */ inline Tensor min(const Tensor& data, const Array<Integer>& axis, bool keepdims = false, bool atleast1d = false) { return CommReduce(data, axis, MinOp, keepdims, atleast1d); } /*! * \brief Creates an operation that finds the maximum of elements over * a given axis. * * \param data The input tensor * \param axis The axis to find the maximum over. If axis is empty, the * operation will find the maximum over all elements of the array. * \param keepdims If this is set to true, the axes which are reduced are * left in the result as dimensions with size one. This enables the result * to broadcast correctly against the input array. * \param atleast1d Whether the output need to be atleast1d. * * \return A Tensor whose op member is the max operation */ inline Tensor max(const Tensor& data, const Array<Integer>& axis, bool keepdims = false, bool atleast1d = false) { return CommReduce(data, axis, MaxOp, keepdims, atleast1d); } inline FCommReduce MakeArgminReducer(bool select_last_index = false) { // Create a Commutative Reducer with a comparison operation, and method to get the initial value. auto fcombine = [=](Array<Var> lhs, Array<Var> rhs) { Array<PrimExpr> result; // Casting to avoid operator ambiguity PrimExpr lhs_idx = static_cast<PrimExpr>(lhs[0]); PrimExpr rhs_idx = static_cast<PrimExpr>(rhs[0]); PrimExpr lhs_val = static_cast<PrimExpr>(lhs[1]); PrimExpr rhs_val = static_cast<PrimExpr>(rhs[1]); // These variables compare the actual values of the array auto is_smaller = lhs_val < rhs_val; auto is_same = lhs_val == rhs_val; // This checks if the indices are correct for the reduction. E.g. for select_last_index // it gives precedence for later indices of the same element and precedence for sooner // indices if not select_last_index; PrimExpr proper_index; if (select_last_index) { proper_index = lhs_idx > rhs_idx; } else { proper_index = lhs_idx < rhs_idx; } PrimExpr update_index = is_smaller || (is_same && proper_index); result.push_back(tvm::tir::Select(update_index, lhs[0], rhs[0])); // idx result.push_back(tvm::tir::Select(is_smaller, lhs[1], rhs[1])); // val return result; }; auto fidentity = [&](std::vector<DataType> types) { Array<PrimExpr> result; result.push_back(tvm::tir::make_const(types[0], -1)); // idx result.push_back(tvm::max_value(types[1])); // val return result; }; return MakeCommReducer(fcombine, fidentity, "argmin"); } /*! * \brief Creates an operation that finds the indices of the minimum * values over a given axis. * * \param data The input tensor * \param axis The axis along which the argmin is performed. If axis is empty, * the operation will find the minimum index over all elements of the array. * \param keepdims If this is set to true, the axes which are reduced are * left in the result as dimensions with size one. This enables the result * to broadcast correctly against the input array. * \param atleast1d Whether the output need to be atleast1d. * \param select_last_index Whether to select the last index if the minimum element * appears multiple times, else select the first index. * * \return A Tensor whose op member is the argmin operation */ inline Tensor argmin(const Tensor& data, const Array<Integer>& axis, bool keepdims = false, bool atleast1d = false, bool select_last_index = false) { auto reducer = MakeArgminReducer(select_last_index); return CommReduceIdx(data, axis, reducer, keepdims, atleast1d); } inline FCommReduce MakeArgmaxReducer(bool select_last_index = false) { // Create a Commutative Reducer with a comparison operation, and method to get the initial value. auto fcombine = [=](Array<Var> lhs, Array<Var> rhs) { Array<PrimExpr> result; // Casting to avoid operator ambiguity PrimExpr lhs_idx = static_cast<PrimExpr>(lhs[0]); PrimExpr rhs_idx = static_cast<PrimExpr>(rhs[0]); PrimExpr lhs_val = static_cast<PrimExpr>(lhs[1]); PrimExpr rhs_val = static_cast<PrimExpr>(rhs[1]); // These variables compare the actual values of the array auto is_bigger = lhs_val > rhs_val; auto is_same = lhs_val == rhs_val; // This checks if the indices are correct for the reduction. E.g. for select_last_index // it gives precedence for later indices of the same element and precedence for sooner // indices if not select_last_index; PrimExpr proper_index; if (select_last_index) { proper_index = lhs_idx > rhs_idx; } else { proper_index = lhs_idx < rhs_idx; } PrimExpr update_index = is_bigger || (is_same && proper_index); result.push_back(tvm::tir::Select(update_index, lhs[0], rhs[0])); // idx result.push_back(tvm::tir::Select(is_bigger, lhs[1], rhs[1])); // val return result; }; auto fidentity = [&](std::vector<DataType> types) { Array<PrimExpr> result; result.push_back(tvm::tir::make_const(types[0], -1)); // idx result.push_back(tvm::min_value(types[1])); // val return result; }; return MakeCommReducer(fcombine, fidentity, "argmax"); } /*! * \brief Creates an operation that finds the indices of the maximum * values over a given axis. * * \param data The input tensor * \param axis The axis along which the argmax is performed. If axis is empty, * the operation will find the maximum index over all elements of the array. * \param keepdims If this is set to true, the axes which are reduced are * left in the result as dimensions with size one. This enables the result * to broadcast correctly against the input array. * \param atleast1d Whether the output need to be atleast1d. * \param select_last_index Whether to select the last index if the maximum element * appears multiple times, else select the first index. * \return A Tensor whose op member is the argmax operation */ inline Tensor argmax(const Tensor& data, const Array<Integer>& axis, bool keepdims = false, bool atleast1d = false, bool select_last_index = false) { auto reducer = MakeArgmaxReducer(select_last_index); return CommReduceIdx(data, axis, reducer, keepdims, atleast1d); } /*! * \brief Creates product operation over given axis. * * \param data The input tensor * \param axis The axis to do product over. If axis is empty, the * operation will do the product over all elements of the array. * \param keepdims If this is set to true, the axes which are reduced are * left in the result as dimensions with size one. This enables the result * to broadcast correctly against the input array. * \param atleast1d Whether the output need to be atleast1d. * * \return A Tensor whose op member is the prod operation */ inline Tensor prod(const Tensor& data, const Array<Integer>& axis, bool keepdims = false, bool atleast1d = false) { return CommReduce(data, axis, ProdOp, keepdims, atleast1d); } /*! * \brief Create communitive reducer summing over tuples */ inline FCommReduce MakeTupleSumReducer() { auto fcombine = [](Array<Var> lhs, Array<Var> rhs) { Array<PrimExpr> result; ICHECK_EQ(lhs.size(), rhs.size()); result.reserve(lhs.size()); for (size_t i = 0; i < lhs.size(); ++i) { result.push_back(lhs[i] + rhs[i]); } return result; }; auto fidentity = [](std::vector<DataType> types) { Array<PrimExpr> result; for (size_t i = 0; i < types.size(); ++i) { result.push_back(tvm::tir::make_const(types[i], 0)); } return result; }; return MakeCommReducer(fcombine, fidentity, "tuple_sum"); } } // namespace topi } // namespace tvm #endif // TVM_TOPI_REDUCTION_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/rocm/dense.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file rocm/dense.h * \brief rocm schedule for dense operation */ #ifndef TVM_TOPI_ROCM_DENSE_H_ #define TVM_TOPI_ROCM_DENSE_H_ #include <tvm/target/generic_func.h> #include <tvm/te/operation.h> #include <tvm/topi/contrib/rocblas.h> #include <tvm/topi/cuda/dense.h> #include <tvm/topi/detail/array_utils.h> #include <tvm/topi/generic/extern.h> #include <tvm/topi/nn/dense.h> #include <tvm/topi/tags.h> namespace tvm { namespace topi { using namespace tvm::te; namespace rocm { /*! * \brief Implementation of dense for rocm backend * * \param target The target device * \param data Tensor with shape [batch, in_dim] * \param weight Tensor with shape [out_dim, in_dim] * \param bias Tensor with shape [out_dim]. Optional; to omit bias, pass Tensor() * \param out_dtype Output data type. Used for mixed precision. * * \return Tensor with shape [batch, out_dim] */ inline tvm::te::Tensor dense_rocm(const Target& target, const tvm::te::Tensor& data, const tvm::te::Tensor& weight, const tvm::te::Tensor& bias, const DataType& out_dtype) { ICHECK_EQ(data->shape.size(), 2) << "dense requires 2-D data"; ICHECK_EQ(weight->shape.size(), 2) << "dense requires 2-D weight"; if (bias.defined()) { ICHECK_EQ(bias->shape.size(), 1) << "dense requires 1-D bias"; } auto batch = data->shape[0]; auto in_dim = data->shape[1]; auto out_dim = weight->shape[0]; if (target->GetLibs().count("rocblas")) { ICHECK_EQ(data->dtype, out_dtype) << "Mixed precision not supported."; auto mm = topi::contrib::rocblas_matmul(data, weight, false, true); if (bias.defined()) { mm = tvm::te::compute( {batch, out_dim}, [&](Var i, Var j) { return mm(i, j) + bias(j); }, "tensor", kBroadcast); } return mm; } else { return topi::nn::dense(data, weight, bias, out_dtype); } } /*! * \brief Create a rocm schedule for dense * * \param target The target to generate a schedule for. * \param outs The output tensors. * * \return A schedule for the given ops. */ inline Schedule schedule_dense(const Target& target, const Array<Tensor>& outs) { if (target->kind->name == "rocm" && target->GetLibs().count("rocblas")) { return topi::generic::schedule_extern(target, outs); } return topi::cuda::schedule_dense(target, outs); } } // namespace rocm } // namespace topi } // namespace tvm #endif // TVM_TOPI_ROCM_DENSE_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/rocm/injective.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file rocm/injective.h * \brief rocm schedule for injective operations */ #ifndef TVM_TOPI_ROCM_INJECTIVE_H_ #define TVM_TOPI_ROCM_INJECTIVE_H_ #include <tvm/target/generic_func.h> #include <tvm/te/operation.h> #include <tvm/topi/cuda/injective.h> #include <tvm/topi/detail/fuse.h> #include <tvm/topi/tags.h> namespace tvm { namespace topi { using namespace tvm::te; namespace rocm { /*! * \brief Updates an existing schedule for the given injective ops. * * \param sch The schedule to update. * \param out The tensor representing the injective op. * * \return The updated schedule. */ inline Schedule schedule_injective_from_existing(Schedule sch, const Tensor& out) { return topi::cuda::schedule_injective_from_existing(sch, out); } /*! * \brief Create a rocm schedule for the given output tensors. * * \param target The target to generate a schedule for. * \param outs The output tensors. * * \return A schedule for the given ops. */ inline Schedule schedule_injective(const Target& target, const Array<Tensor>& outs) { return topi::cuda::schedule_injective(target, outs); } } // namespace rocm } // namespace topi } // namespace tvm #endif // TVM_TOPI_ROCM_INJECTIVE_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/rocm/pooling.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file rocm/pooling.h * \brief rocm schedule for pooling operations */ #ifndef TVM_TOPI_ROCM_POOLING_H_ #define TVM_TOPI_ROCM_POOLING_H_ #include <tvm/target/generic_func.h> #include <tvm/te/operation.h> #include <tvm/topi/cuda/pooling.h> #include <tvm/topi/detail/array_utils.h> #include <tvm/topi/detail/fuse.h> #include <tvm/topi/tags.h> namespace tvm { namespace topi { using namespace tvm::te; namespace rocm { /*! * \brief Create a rocm schedule for pool * * \param target The target to generate a schedule for. * \param outs The output tensors. * * \return A schedule for the given ops. */ inline Schedule schedule_pool(const Target& target, const Array<Tensor>& outs) { return topi::cuda::schedule_pool(target, outs); } /*! * \brief Create a rocm schedule for global_pool * * \param target The target to generate a schedule for. * \param outs The output tensors. * * \return A schedule for the given ops. */ inline Schedule schedule_global_pool(const Target& target, const Array<Tensor>& outs) { return topi::cuda::schedule_global_pool(target, outs); } } // namespace rocm } // namespace topi } // namespace tvm #endif // TVM_TOPI_ROCM_POOLING_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/rocm/reduction.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file rocm/reduction.h * \brief rocm schedule for reduction operations */ #ifndef TVM_TOPI_ROCM_REDUCTION_H_ #define TVM_TOPI_ROCM_REDUCTION_H_ #include <tvm/target/generic_func.h> #include <tvm/te/operation.h> #include <tvm/topi/cuda/reduction.h> #include <tvm/topi/detail/fuse.h> #include <tvm/topi/tags.h> namespace tvm { namespace topi { using namespace tvm::te; namespace rocm { /*! * \brief Create a rocm schedule for a reduce operation. * * \param target The target to generate a schedule for. * \param outs The output tensors. * * \return A schedule for the given ops. */ Schedule schedule_reduce(const Target& target, Array<Tensor> outs) { return topi::cuda::schedule_reduce(target, outs); } } // namespace rocm } // namespace topi } // namespace tvm #endif // TVM_TOPI_ROCM_REDUCTION_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/rocm/softmax.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file rocm/injective.h * \brief ROCM schedule for injective operations */ #ifndef TVM_TOPI_ROCM_SOFTMAX_H_ #define TVM_TOPI_ROCM_SOFTMAX_H_ #include <tvm/target/generic_func.h> #include <tvm/te/operation.h> #include <tvm/topi/cuda/softmax.h> #include <tvm/topi/detail/fuse.h> #include <tvm/topi/tags.h> namespace tvm { namespace topi { using namespace tvm::te; namespace rocm { /*! * \brief Create a rocm schedule for the given softmax output tensors. * * \param target The target to generate a schedule for. * \param outs The output tensors. * * \return A schedule for the given ops. */ inline Schedule schedule_softmax(const Target& target, const Array<Tensor>& outs) { return topi::cuda::schedule_softmax(target, outs); } } // namespace rocm } // namespace topi } // namespace tvm #endif // TVM_TOPI_ROCM_SOFTMAX_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/tags.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \brief Tag definitions * \file tags.h */ #ifndef TVM_TOPI_TAGS_H_ #define TVM_TOPI_TAGS_H_ #include <string> namespace tvm { namespace topi { constexpr auto kElementWise = "elemwise"; constexpr auto kInjective = "injective"; constexpr auto kCommReduce = "comm_reduce"; constexpr auto kCommReduceIdx = "comm_reduce_idx"; constexpr auto kBroadcast = "broadcast"; constexpr auto kMatMul = "matmul"; constexpr auto kConv2dNCHW = "conv2d_nchw"; constexpr auto kConv2dHWCN = "conv2d_hwcn"; constexpr auto kDepthwiseConv2dNCHW = "depthwise_conv2d_nchw"; constexpr auto kDepthwiseConv2dNHWC = "depthwise_conv2d_nhwc"; constexpr auto kDepthwiseConv2dBackInputNHWC = "depthwise_conv2d_back_input_nhwc"; constexpr auto kDepthwiseConv2dBackWeightNHWC = "depthwise_conv2d_back_weight_nhwc"; constexpr auto kEinsum = "einsum"; constexpr auto kGroupConv2d = "group_conv2d"; inline bool is_broadcast(std::string tag) { return tag.rfind(kElementWise, 0) == 0 || tag.rfind(kBroadcast, 0) == 0; } inline bool is_injective(std::string tag) { return tag.rfind(kElementWise, 0) == 0 || tag.rfind(kBroadcast, 0) == 0 || tag.rfind(kInjective, 0) == 0; } } // namespace topi } // namespace tvm #endif // TVM_TOPI_TAGS_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/transform.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file topi/transform.h * \brief Transform op constructors */ #ifndef TVM_TOPI_TRANSFORM_H_ #define TVM_TOPI_TRANSFORM_H_ #include <tvm/te/operation.h> #include <tvm/tir/data_layout.h> #include <tvm/tir/index_map.h> #include <tvm/topi/broadcast.h> #include <tvm/topi/detail/broadcast.h> #include <tvm/topi/detail/constant_utils.h> #include <tvm/topi/detail/ravel_unravel.h> #include <tvm/topi/detail/strided_slice.h> #include <tvm/topi/detail/tensor_utils.h> #include <tvm/topi/tags.h> #include <algorithm> #include <iterator> #include <limits> #include <string> #include <unordered_set> #include <vector> namespace tvm { namespace topi { using namespace tvm::te; using namespace topi::detail; /*! * \brief Creates an operation to slide a window over the input x. * * \param x The input tensor. * \param axis What axis the window begins sliding over. Window will be slid * over this axis and all following axes. The axis value determines the window * shape (and thus, the number of strides): window shape and strides must both * be of length `data.ndim-axis`. * \param window_shape The window shape to form over the input. Window shape * must be of length `data.ndim-axis`. * \param strides How to stride the window along each dimension. Strides must be * of length `data.ndim-axis`. * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the sliding_window operation */ inline Tensor sliding_window(const Tensor& x, int axis, Array<Integer> window_shape, Array<Integer> strides, std::string name = "T_sliding_window", std::string tag = "") { CHECK_GE(axis, 0); auto _axis = size_t(axis); CHECK_LT(_axis, x->shape.size()) << "axis must be a valid dimension index of x."; CHECK_EQ(x->shape.size() - _axis, window_shape.size()) << "There must be a window shape for every dimension of x " << "over which we are sliding the window."; CHECK_EQ(strides.size(), window_shape.size()) << "Windows and strides should be the same length."; // Compute the new shape. Array<PrimExpr> new_shape; // Dimensions up until `axis` remain the same. for (size_t i = 0; i < _axis; ++i) { new_shape.push_back(x->shape[i]); } // New dimensions which result from sliding the window in each dimension. One new dimension per // window dimension. for (size_t i = 0; i < window_shape.size(); ++i) { // Length of the shape along this dimension. auto dim_len = x->shape[_axis + i]; // Length of the window along this dimension. auto window_len = window_shape[i]; // Strides along this dimension. auto stride = strides[i]; new_shape.push_back(floordiv(dim_len - (window_len - 1) + stride - 1, stride)); } // Dimensions comprising the window. for (size_t i = 0; i < window_shape.size(); ++i) { new_shape.push_back(window_shape[i]); } ICHECK(new_shape.size() == _axis + 2 * window_shape.size()); return compute( new_shape, [&](const Array<Var>& indices) { // The index at which to index the old tensor x. Array<PrimExpr> idx; // Dimensions up until `axis` remain the same. for (size_t i = 0; i < _axis; ++i) { idx.push_back(indices[i]); } for (size_t i = 0; i < window_shape.size(); ++i) { // Which window in this dimension we are indexing. auto window_idx = indices[_axis + i]; // Which index within the window we are indexing. auto idx_within_window = indices[_axis + window_shape.size() + i]; // Stride value for this dimension. auto stride = strides[i]; idx.push_back(window_idx * stride + idx_within_window); } ICHECK(idx.size() == x->shape.size()); return x(idx); }, name, tag); } /*! * \brief Creates an operation to insert new dimensions of length 1 * * \param x The input tensor * \param axis The index of the first new dimension (allows negative * indices as offsets from the last dimension) * \param num_newaxis The number of new dimensions to insert * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the dim expansion operation */ inline Tensor expand_dims(const Tensor& x, int axis, int num_newaxis = 1, std::string name = "T_expand_dims", std::string tag = kBroadcast) { int ndim = static_cast<int>(x->shape.size()); ICHECK(-ndim - 1 <= axis && axis <= ndim) << "expand_dims only accepts `axis` in [-data.ndim - 1, data.ndim]" << ", but got axis = " << axis << ", and data.ndim = " << ndim; ICHECK(num_newaxis >= 0) << "expand_dims only accepts `num_newaxis >= 0`" << ", but got num_newaxis = " << num_newaxis; if (axis < 0) { // Calculate offset from last dimension axis = ndim + axis + 1; } Array<PrimExpr> new_shape; for (size_t i = 0; i < static_cast<size_t>(axis); ++i) { new_shape.push_back(x->shape[i]); } for (size_t i = 0; i < static_cast<size_t>(num_newaxis); ++i) { new_shape.push_back(1); } for (size_t i = axis; i < x->shape.size(); ++i) { new_shape.push_back(x->shape[i]); } return compute( new_shape, [&](const Array<Var>& indices) { Array<PrimExpr> idx; for (size_t i = 0; i < static_cast<size_t>(axis); ++i) { idx.push_back(indices[i]); } for (size_t i = axis + num_newaxis; i < indices.size(); ++i) { idx.push_back(indices[i]); } return x(idx); }, name, tag); } /*! * \brief Permute the dimensions of an array * * \param x The input tensor * \param axes The indices of the permutation. If this is empty, * the dimensions will be reversed. * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the transpose operation */ inline Tensor transpose(const Tensor& x, Array<Integer> axes, std::string name = "T_transpose", std::string tag = kInjective) { if (!axes.defined() || axes.size() == 0) { axes = Array<Integer>(); for (int i = static_cast<int>(x->shape.size()) - 1; i >= 0; --i) { axes.push_back(i); } } Array<PrimExpr> new_shape; for (size_t i = 0; i < axes.size(); ++i) { int axis = static_cast<int>(axes[i]->value); int new_axis = axis; if (axis < 0) { new_axis = static_cast<int>(x->shape.size()) + axis; axes.Set(i, new_axis); } ICHECK((new_axis >= 0) && (new_axis < static_cast<int>(x->shape.size()))) << "axis=" << axis << " is invalid for the " << static_cast<int>(x->shape.size()) << "-dimensional input tensor"; for (size_t j = 0; j < axes.size(); ++j) { if (i != j) { ICHECK(new_axis != static_cast<int>(axes[j]->value)) << "repeated axis in transpose"; } } new_shape.push_back(x->shape[new_axis]); } return compute( new_shape, [&](const Array<Var>& indices) { std::vector<PrimExpr> idx; for (size_t i = 0; i < axes.size(); ++i) { idx.push_back(1); } for (size_t i = 0; i < axes.size(); ++i) { int axis = static_cast<int>(axes[i]->value); idx[axis] = indices[i]; } return x(idx); }, name, tag); } /*! * \brief Reverse the tensor for variable length slices. * Input is first sliced along batch axis and then elements are reversed along seq axis. * * \param x The input tensor * \param seq_lengths A 1D Tensor with length x.dims[batch_axis]. Optional Tensor() can be passed. * If not defined batch axis is ignored and tensor is reversed along seq_axis. * \param seq_axis The axis along which the elements will be reveresed * \param batch_axis The axis along which the tensor will be sliced * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the reverse_sequence operation */ inline Tensor reverse_sequence(const Tensor& x, const Tensor& seq_lengths, int seq_axis = 1, int batch_axis = 0, std::string name = "T_reverse_sequence", std::string tag = kInjective) { size_t src_tensor_dim = x->shape.size(); int seq_axis_inp = seq_axis; if (seq_lengths.defined()) { size_t seq_lengths_dim = seq_lengths->shape.size(); int batch_axis_inp = batch_axis; if (batch_axis < 0) { batch_axis = static_cast<int>(x->shape.size()) + batch_axis; } ICHECK(seq_lengths_dim == 1) << "seq_lengths should be 1D vector"; ICHECK(GetConstInt(seq_lengths->shape[0]) == GetConstInt(x->shape[batch_axis])) << "For reverse_sequnece seq_lengths size should match with dimension of batch axis" << ", but got dimension of batch_axis = " << GetConstInt(x->shape[batch_axis]) << ", and seq_length size = " << GetConstInt(seq_lengths->shape[0]); ICHECK((0 <= batch_axis) && (batch_axis < static_cast<int>(x->shape.size()))) << "batch_axis=" << batch_axis_inp << " is invalid for the " << static_cast<int>(x->shape.size()) << "-dimensional input tensor"; } if (seq_axis < 0) { seq_axis = static_cast<int>(x->shape.size()) + seq_axis; } ICHECK((0 <= seq_axis) && (seq_axis < static_cast<int>(x->shape.size()))) << "seq_axis=" << seq_axis_inp << " is invalid for the " << static_cast<int>(x->shape.size()) << "-dimensional input tensor"; auto func = [&](const Array<Var>& indices) { Array<PrimExpr> real_indices; for (size_t i = 0; i < src_tensor_dim; ++i) { if (i == static_cast<size_t>(seq_axis)) { if (seq_lengths.defined()) { auto len = seq_lengths(indices[batch_axis]); auto idx = if_then_else( len <= 1 || len <= indices[i], indices[i], if_then_else(len > x->shape[i], x->shape[i] - 1 - indices[i], len - 1 - indices[i])); real_indices.push_back(idx); } else { real_indices.push_back(x->shape[i] - 1 - indices[i]); } } else { real_indices.push_back(indices[i]); } } return x(real_indices); }; return compute(x->shape, func, name, tag); } /*! * \brief Reshape a tensor * * \param x The input tensor * \param newshape The new shape * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the reshape operation */ inline Tensor reshape(const Tensor& x, Array<PrimExpr> newshape, std::string name = "T_reshape", std::string tag = kInjective) { auto x_shape = x->shape; Array<PrimExpr> target_shape; for (const auto& ele : newshape) { if (ele.as<IntImmNode>()) { target_shape.push_back(cast(DataType::Int(32), ele)); } else { target_shape.push_back(ele); } } // If either the input shape or the target shape contains a zero, return an empty tensor. if (is_empty_shape(target_shape) || is_empty_shape(x->shape)) { return compute( target_shape, [&](const Array<Var>& indices) { return tvm::cast(x->dtype, 0); }, name, tag); } else { return compute( target_shape, [&](const Array<Var>& indices) { return x(UnravelIndex( RavelIndex(Array<PrimExpr>{indices.begin(), indices.end()}, target_shape), x_shape)); }, name, tag); } } /*! * \brief Converts a flat index or array of flat indices into a tuple of coordinate arrays * * \param x The input tensor having indices. * \param shape The shape tensor * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor of coordinate arrays. */ inline Tensor unravel_index(const Tensor& x, const Tensor& shape, std::string name = "T_unravel", std::string tag = kInjective) { auto x_shape = x->shape; auto shape_shape = shape->shape; Array<PrimExpr> oshape; oshape.push_back(shape_shape[0]); if (x_shape.size() != 0) { oshape.push_back(x_shape[0]); } auto func = [&](const Array<Var>& indices) { auto i = indices[0]; std::vector<PrimExpr> indices_divs; PrimExpr ret = 0; PrimExpr cur_val = 0; PrimExpr index_val = 0; if (x_shape.size() != 0) { index_val = x[indices[1]]; } else { index_val = x(); } indices_divs.push_back(index_val); for (int v = GetConstInt(shape_shape[0]) - 1; v >= 0; --v) { ret = tvm::if_then_else(i == v, indexmod(indices_divs.back(), shape[v]), ret); cur_val = indexdiv(indices_divs.back(), shape[v]); indices_divs.push_back(cur_val); } return ret; }; return compute(oshape, func, name, tag); } /*! * \brief Remove size 1 dimensions from the shape of a tensor. * The removed dimensions must have a constant size of 1. * * \param x The input tensor * \param axis Indices of the dimensions to remove. If this is None, * all entries with a constant size of 1 will be removed. * \param atleast1d Whether the output need to be atleast1d. * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the squeeze operation */ inline Tensor squeeze(const Tensor& x, Array<Integer> axis, bool atleast1d = false, std::string name = "T_squeeze", std::string tag = kInjective) { auto ndim = x->shape.size(); std::vector<int> axis_val; if (!axis.defined()) { for (size_t i = 0; i < ndim; ++i) { if (IsConstInt(x->shape[i]) && GetConstInt(x->shape[i]) == 1) { axis_val.push_back(static_cast<int>(i)); } } } else { for (size_t i = 0; i < axis.size(); ++i) { int64_t val = axis[i]->value; if (val < 0) { val += static_cast<int>(x->shape.size()); } if (IsConstInt(x->shape[val])) { ICHECK_EQ(GetConstInt(x->shape[val]), 1) << "Dimension " << val << " must have size 1"; } axis_val.push_back(val); } } std::unordered_set<int> axis_set(axis_val.begin(), axis_val.end()); Array<PrimExpr> out_shape; for (size_t i = 0; i < ndim; ++i) { if (axis_set.count(static_cast<int>(i)) == 0) { out_shape.push_back(x->shape[i]); } } if (out_shape.size() == 0 && atleast1d) { out_shape.push_back(1); } return compute( out_shape, [&](const Array<Var>& indices) { Array<PrimExpr> real_indices; int flag = 0; for (size_t i = 0; i < ndim; ++i) { if (axis_set.count(static_cast<int>(i)) == 0) { real_indices.push_back(indices[i - flag]); } else { real_indices.push_back(0); flag += 1; } } return x(real_indices); }, name, tag); } /*! * \brief Join a sequence of tensors along an existing axis * * \param inputs The input tensors * \param axis The axis along which the tensors will be joined * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the concatenate operation */ inline Tensor concatenate(const Array<Tensor>& inputs, int axis = 0, std::string name = "T_concat", std::string tag = kInjective) { int ndim = static_cast<int>(inputs[0]->shape.size()); ICHECK(-ndim <= axis && axis < ndim) << "concatenate only accepts `axis` in [-ndim, ndim)" << ", but got axis = " << axis << ", and ndim = " << ndim; if (axis < 0) { axis += ndim; } ICHECK_LT(axis, inputs[0]->shape.size()) << "axis out of bounds"; Array<PrimExpr> axis_sizes; for (auto t : inputs) { axis_sizes.push_back(t->shape[axis]); } arith::Analyzer analyzer; PrimExpr join_size = axis_sizes[0]; for (size_t i = 1; i < axis_sizes.size(); ++i) { join_size += axis_sizes[i]; } join_size = analyzer.Simplify(join_size); Array<PrimExpr> out_shape; for (size_t i = 0; i < inputs[0]->shape.size(); ++i) { out_shape.push_back(i == static_cast<size_t>(axis) ? join_size : inputs[0]->shape[i]); } return compute( out_shape, [&](const Array<Var>& indices) { auto ret = inputs[0](indices); auto ind = indices[axis]; for (size_t i = 0; i < inputs.size() - 1; ++i) { ind -= axis_sizes[i]; Array<PrimExpr> idx; for (size_t i = 0; i < static_cast<size_t>(axis); ++i) { idx.push_back(indices[i]); } idx.push_back(ind); for (size_t i = axis + 1; i < indices.size(); ++i) { idx.push_back(indices[i]); } ret = tvm::if_then_else(ind >= 0, inputs[i + 1](idx), ret); } return ret; }, name, tag); } /*! * \brief Join a sequence of tensors along a new axis. * * \param inputs The input tensors * \param axis The axis along which the tensors will be stacked * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the stack operation */ inline Tensor stack(const Array<Tensor>& inputs, int axis = 0, std::string name = "T_stack", std::string tag = kInjective) { int ndim = static_cast<int>(inputs[0]->shape.size()); ICHECK(-ndim - 1 <= axis && axis <= ndim) << "stack only accepts `axis` in [-ndim, ndim)" << ", but got axis = " << axis << ", and ndim = " << ndim; if (axis < 0) { axis += ndim + 1; } ICHECK_LT(axis, inputs[0]->shape.size() + 1) << "axis out of bounds"; const int stack_size = static_cast<int>(inputs.size()); Array<PrimExpr> out_shape; for (size_t i = 0; i < static_cast<size_t>(axis); ++i) out_shape.push_back(inputs[0]->shape[i]); out_shape.push_back(stack_size); for (size_t i = static_cast<size_t>(axis); i < static_cast<size_t>(ndim); ++i) out_shape.push_back(inputs[0]->shape[i]); return compute( out_shape, [&](const Array<Var>& indices) { Array<PrimExpr> idx; for (size_t i = 0; i < indices.size(); ++i) if (i != static_cast<size_t>(axis)) idx.push_back(indices[i]); auto ind = indices[axis]; auto ret = inputs[0](idx); for (int i = 0; i < static_cast<int>(inputs.size() - 1); ++i) { ret = tvm::if_then_else(ind == i + 1, inputs[i + 1](idx), ret); } return ret; }, name, tag); } /*! * \brief Split a tensor into multiple sub-tensors * * \param x The input tensor * \param split_indices The indices to split the input at. This must be in ascending * order. * \param axis The axis to split along. * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the split operation */ inline Array<Tensor> split(const Tensor& x, Array<PrimExpr> split_indices, int axis, std::string name = "T_split", std::string tag = kInjective) { if (axis < 0) { axis += static_cast<int>(x->shape.size()); } ICHECK_LT(axis, x->shape.size()) << "axis out of bounds"; auto src_axis_size = x->shape[axis]; std::vector<PrimExpr> begin_ids; begin_ids.push_back(0); for (auto idx : split_indices) { auto idx_node = idx.as<IntImmNode>(); auto back_node = begin_ids.back().as<IntImmNode>(); if (idx_node && back_node) { ICHECK_GT(idx_node->value, back_node->value) << "split_indices must be sorted"; } begin_ids.push_back(idx); } Array<Array<PrimExpr>> out_shapes; for (size_t i = 0; i < begin_ids.size(); ++i) { PrimExpr out_axis_size; if (i == begin_ids.size() - 1) { out_axis_size = src_axis_size - begin_ids[i]; } else { out_axis_size = begin_ids[i + 1] - begin_ids[i]; } Array<PrimExpr> shape; for (size_t i = 0; i < static_cast<size_t>(axis); ++i) { shape.push_back(x->shape[i]); } shape.push_back(out_axis_size); for (size_t i = axis + 1; i < x->shape.size(); ++i) { shape.push_back(x->shape[i]); } out_shapes.push_back(shape); } Array<Tensor> result; for (size_t i = 0; i < begin_ids.size(); ++i) { result.push_back(compute( out_shapes[i], [&](const Array<Var>& indices) { auto begin = begin_ids[i]; Array<PrimExpr> real_indices; for (size_t j = 0; j < static_cast<size_t>(axis); ++j) { real_indices.push_back(indices[j]); } real_indices.push_back(indices[axis] + begin); for (size_t j = axis + 1; j < indices.size(); ++j) { real_indices.push_back(indices[j]); } return x(real_indices); }, name, tag)); } return result; } /*! * \brief strided_slice of a tensor where begin/end/stride can be mixed static and dynamic * * \param x The input tensor * \param begin The indices to begin with in the slicing * \param end Indices indicating end of the slice * \param strides Specifies the stride values, it can be negative * in that case, the input tensor will be reversed in that particular axis * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the dynamic_strided_slice operation */ inline Tensor dynamic_strided_slice(const Tensor& x, const Array<PrimExpr>& begin, const Array<PrimExpr>& end, const Array<PrimExpr>& strides, std::string name = "T_dynamic_strided_slice", std::string tag = kInjective) { const size_t src_tensor_dim = x->shape.size(); ICHECK_LE(begin.size(), src_tensor_dim); ICHECK_LE(end.size(), src_tensor_dim); ICHECK_LE(strides.size(), src_tensor_dim); ICHECK_EQ(begin.size(), end.size()); ICHECK_EQ(begin.size(), strides.size()); const size_t num_slice_axes = begin.size(); Array<PrimExpr> out_shape; for (size_t i = 0; i < num_slice_axes; ++i) { auto d = indexdiv(end[i] - begin[i], strides[i]); if (d->IsInstance<tvm::IntImmNode>()) { // Preserve static dimension if possible out_shape.push_back(d); } else { out_shape.push_back(tvm::tir::Var("dim")); } } for (size_t i = num_slice_axes; i < src_tensor_dim; ++i) { out_shape.push_back(x->shape[i]); } return te::compute( out_shape, [&](const Array<tvm::tir::Var>& indices) { Array<PrimExpr> real_indices; for (size_t i = 0; i < num_slice_axes; ++i) { real_indices.push_back(indices[i] * strides[i] + tvm::min(begin[i], x->shape[i] - 1)); } // keep input dim for (size_t i = num_slice_axes; i < src_tensor_dim; ++i) { real_indices.push_back(indices[i]); } return x(real_indices); }, name, tag); } /*! * \brief strided_slice of a tensor with dynamic begin/end/stride * * \param x The input tensor * \param begin The indices to begin with in the slicing * \param end Indices indicating end of the slice * \param strides Specifies the stride values, it can be negative * in that case, the input tensor will be reversed in that particular axis * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the dynamic_strided_slice operation */ inline te::Tensor dynamic_strided_slice(const te::Tensor& x, const te::Tensor& begin, const te::Tensor& end, const te::Tensor& strides, std::string name = "T_strided_slice_dynamic", std::string tag = topi::kInjective) { const int64_t num_dynamic_axes = begin->shape[0].as<IntImmNode>()->value; ICHECK_EQ(end->shape[0].as<IntImmNode>()->value, num_dynamic_axes); ICHECK_EQ(strides->shape[0].as<IntImmNode>()->value, num_dynamic_axes); Array<PrimExpr> begin_expr, end_expr, strides_expr; for (int64_t i = 0; i < num_dynamic_axes; ++i) { auto i64_ind = IntImm(DataType::Int(64), i); begin_expr.push_back(begin(i64_ind)); end_expr.push_back(end(i64_ind)); strides_expr.push_back(strides(i64_ind)); } return dynamic_strided_slice(x, begin_expr, end_expr, strides_expr, name, tag); } /*! * \brief Calcluate the output shape of strided_slice, the entry point for Relay type relation * * \param ishape The input tensor shape * \param begin The indices to begin with in the slicing * \param end Indices indicating end of the slice * \param strides Specifies the stride values, it can be negative * in that case, the input tensor will be reversed in that particular axis * \param axes Axes along which slicing is applied. When it is specified, the length of begin, end, * strides, and axes argument must be equal * \param slice_mode Specifies the slice mode * * \return The output shape of strided_slice using the arguments above */ inline Array<PrimExpr> StridedSliceOutputShape( const Array<PrimExpr>& ishape, const Array<Integer>& begin, const Array<Integer>& end, const Array<Integer>& strides, const Array<Integer>& axes, const std::string& slice_mode) { ICHECK(axes.size() == begin.size() && axes.size() == end.size() && axes.size() == strides.size()); std::vector<int64_t> begin_vec, end_vec, strides_vec; std::tie(begin_vec, end_vec, strides_vec) = ConvertToVec(begin, end, strides, slice_mode); auto begin_canonicalized = StridedSliceCanonicalizeBegin(ishape, begin_vec, strides_vec, axes, begin[0]->dtype, slice_mode); return StridedSliceOutputShape(ishape, begin_vec, end_vec, strides_vec, axes, slice_mode, begin_canonicalized, true); } /*! * \brief strided_slice of a tensor * * \param x The input tensor * \param begin The indices to begin with in the slicing * \param end Indices indicating end of the slice * \param strides Specifies the stride values, it can be negative * in that case, the input tensor will be reversed in that particular axis * \param axes Axes along which slicing is applied. When it is specified, the length of begin, end, * strides, and axes argument must be equal * \param slice_mode Specifies the slice mode * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the sstrided_slice operation */ inline Tensor strided_slice_with_axes(const Tensor& x, const Array<Integer>& begin, const Array<Integer>& end, const Array<Integer>& strides, const Array<Integer>& axes, std::string slice_mode = "end", std::string name = "T_strided_slice_with_axes", std::string tag = kInjective) { const size_t src_tensor_dim = x->shape.size(); ICHECK(axes.size() <= src_tensor_dim); ICHECK(axes.size() == begin.size() && axes.size() == end.size() && axes.size() == strides.size()); std::vector<int64_t> begin_vec, end_vec, strides_vec; std::tie(begin_vec, end_vec, strides_vec) = ConvertToVec(begin, end, strides, slice_mode); auto begin_expr = StridedSliceCanonicalizeBegin(x->shape, begin_vec, strides_vec, axes, begin[0]->dtype, slice_mode); auto out_shape = StridedSliceOutputShape(x->shape, begin_vec, end_vec, strides_vec, axes, slice_mode, begin_expr); return te::compute( out_shape, [&](const Array<tir::Var>& indices) { Array<PrimExpr> real_indices; for (size_t i = 0; i < out_shape.size(); ++i) real_indices.push_back(indices[i]); for (size_t i = 0; i < axes.size(); ++i) { auto stride = make_const(strides[i].dtype(), strides_vec[i]); PrimExpr ind = indices[axes[i].IntValue()] * stride + begin_expr[i]; real_indices.Set(axes[i].IntValue(), ind); } return x(real_indices); }, name, tag); } /*! * \brief strided_slice of a tensor * * \param x The input tensor * \param begin The indices to begin with in the slicing * \param end Indices indicating end of the slice * \param strides Specifies the stride values, it can be negative * in that case, the input tensor will be reversed in that particular axis * \param slice_mode Specifies the slice mode * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the strided_slice operation */ inline Tensor strided_slice(const Tensor& x, const Array<Integer>& begin, const Array<Integer>& end, const Array<Integer>& strides, std::string slice_mode = "end", std::string name = "T_strided_slice", std::string tag = kInjective) { size_t src_tensor_dim = static_cast<size_t>(x->shape.size()); Array<Integer> axes; for (size_t i = 0; i < src_tensor_dim; ++i) axes.push_back(i); Array<Integer> begin_full(begin); Array<Integer> end_full(end); Array<Integer> strides_full(strides); const IntImm one = IntImm(DataType::Int(64), 1); const IntImm zero = IntImm(DataType::Int(64), 0); const IntImm max_range = IntImm(DataType::Int(64), std::numeric_limits<int64_t>::max()); for (size_t i = strides.size(); i < src_tensor_dim; ++i) { strides_full.push_back(one); } for (size_t i = begin.size(); i < src_tensor_dim; ++i) { begin_full.push_back(GetConstInt(strides_full[i]) > 0 ? zero : max_range); } for (size_t i = end.size(); i < src_tensor_dim; ++i) { end_full.push_back(GetConstInt(strides_full[i]) < 0 ? zero : max_range); } return strided_slice_with_axes(x, begin_full, end_full, strides_full, axes, slice_mode, name, tag); } /*! * \brief Split a tensor into a number of sub-tensors * * \param x The input tensor * \param num_sections The number of sections to split the tensor into. * this must be an integer factor of the size of the axis being split. * \param axis The axis to split along. * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the split operation */ inline Array<Tensor> split_sections(const Tensor& x, int num_sections, int axis, std::string name = "T_split_sections", std::string tag = kInjective) { if (axis < 0) { axis += static_cast<int>(x->shape.size()); } ICHECK_LT(axis, x->shape.size()) << "axis out of bounds"; auto src_axis_size = x->shape[axis]; ICHECK_GT(num_sections, 0) << "Slice count must be > 0"; if (auto node = src_axis_size.as<IntImmNode>()) { ICHECK_EQ(node->value % num_sections, 0) << "num_sections must be an integer factor of the size of axis " << axis << " (" << node->value << ")"; } Array<PrimExpr> split_indices; auto seg_size = indexdiv(src_axis_size, num_sections); for (int i = 0; i < num_sections; ++i) { // region at index 0 is added by split() if (i != 0) { split_indices.push_back(seg_size * i); } } return split(x, split_indices, axis, name, tag); } /*! * \brief Take elements from an flattened input array when axis is None. * * \param a The source array. * \param indices The indices of the values to extract. * \param batch_dims The number of batch dimensions. * \param mode The mode of the operation. * \param name The name of the operation. * \param mode The mode of to handle out of bound indices. * \param tag The tag to mark the operation. * * \return A Tensor whose op member is the take operation */ inline Tensor take(const Tensor& a, const Tensor& indices, int batch_dims, std::string mode = "clip", std::string name = "T_take", std::string tag = kInjective) { Array<PrimExpr> a_shape = a->shape; Array<PrimExpr> out_shape = indices->shape; PrimExpr a_size = 1; for (size_t i = 0; i < a_shape.size(); ++i) { a_size = a_size * a_shape[i]; } if (mode == "clip") { return compute( out_shape, [&](const Array<Var>& out_index) { auto idx = tvm::min(tvm::max(0, indices(out_index)), a_size - 1); return a(UnravelIndex(idx, a_shape)); }, name, tag); } else if (mode == "fast") { LOG(WARNING) << "Fast mode segfaults when there are out-of-bounds indices. " "Make sure input indices are in bound"; return compute( out_shape, [&](const Array<Var>& out_index) { return a(UnravelIndex(indices(out_index), a_shape)); }, name, tag); } else { // mode == "wrap" return compute( out_shape, [&](const Array<Var>& out_index) { auto idx = truncmod(truncmod(indices(out_index), a_size) + a_size, a_size); return a(UnravelIndex(idx, a_shape)); }, name, tag); } } /*! * \brief Mask the out-of-boundary elements of each sequence. * * \param data The source array. * \param valid_length The real length of each sequence. * \param mask_value The masking value. * \param axis The axis of the temporal dimension of the sequence * \param name The name of the operation. * \param tag The tag to mark the operation. * * \return A Tensor whose op member is the sequence_mask operation */ inline Tensor sequence_mask(const Tensor& data, const Tensor& valid_length, double mask_value, int axis, std::string name = "T_sequence_mask", std::string tag = kInjective) { ICHECK(axis == 0 || axis == 1) << "axis must be either 0 or 1"; ICHECK_EQ(valid_length->shape.size(), 1) << "valid_length must have ndim=1, i.e., (batch_size,)."; auto length_dim = data->shape[axis]; auto batch_dim = data->shape[1 - axis]; Array<PrimExpr> out_shape = data->shape; Tensor out = compute( out_shape, [&](const Array<Var>& out_index) { Array<PrimExpr> len_index; auto tid = out_index[axis]; auto bid = out_index[1 - axis]; len_index.push_back(bid); PrimExpr ret = tvm::if_then_else(tvm::cast(valid_length->dtype, tid) >= valid_length(len_index), tvm::tir::make_const(data->dtype, mask_value), data(out_index)); return ret; }, name, tag); return out; } /*! * \brief Take elements from an array along an axis. * * \param a The source array. * \param indices The indices of the values to extract. * \param batch_dims The number of batch dimensions. By default is 0. * \param axis The axis over which to select values. By default, * the flattened input array is used. * \param mode The mode for handling out of bound indices. * \param name The name of the operation. * \param tag The tag to mark the operation. * * \return A Tensor whose op member is the take operation */ inline Tensor take(const Tensor& a, const Tensor& indices, int batch_dims, int axis, std::string mode = "clip", std::string name = "T_take", std::string tag = kInjective) { if (axis < 0) { axis += static_cast<int>(a->shape.size()); } ICHECK_GE(axis, 0) << "axis out of bounds"; ICHECK_LT(axis, a->shape.size()) << "axis out of bounds"; auto axis_dim = a->shape[axis]; int indices_len = static_cast<int>(indices->shape.size()); int batch_dims_ = batch_dims; if (batch_dims_ != 0) { ICHECK_GE(batch_dims_, -static_cast<int>(indices->shape.size())) << "batch_dims out of bounds"; ICHECK_LE(batch_dims_, indices->shape.size()) << "batch_dims out of bounds"; if (batch_dims_ < 0) { batch_dims_ = indices->shape.size() + batch_dims_; } ICHECK_LT(batch_dims_, a->shape.size()) << "batch_dims out of bounds"; ICHECK_LE(batch_dims_, axis) << "batch_dims must be less than or equal to axis"; for (int i = 0; i < batch_dims_; ++i) { auto addr1 = a->shape[i]; auto addr2 = indices->shape[i]; auto v1 = static_cast<IntImm*>(&addr1)->get()->value; auto v2 = static_cast<IntImm*>(&addr2)->get()->value; ICHECK_EQ(v1, v2) << "a.shape[" << i << "] should be equal to indices.shape[" << i << "]"; } } // The result shape is a.shape[:axis] + indices.shape[batch_dims:] + // a.shape[axis + 1:]. Array<PrimExpr> out_shape; for (int i = 0; i < batch_dims_; ++i) { out_shape.push_back(a->shape[i]); } for (int i = batch_dims_; i < axis; ++i) { out_shape.push_back(a->shape[i]); } for (size_t i = static_cast<size_t>(batch_dims_); i < indices->shape.size(); ++i) { out_shape.push_back(indices->shape[i]); } for (size_t i = axis + 1; i < a->shape.size(); ++i) { out_shape.push_back(a->shape[i]); } if (mode == "clip") { if (batch_dims_ == 0) { return compute( out_shape, [&](const Array<Var>& out_index) { Array<PrimExpr> indices_position; for (size_t j = axis; j < static_cast<size_t>(axis + indices_len); ++j) { indices_position.push_back(out_index[j]); } Array<PrimExpr> real_indices; for (size_t j = 0; j < static_cast<size_t>(axis); ++j) { real_indices.push_back(out_index[j]); } auto idx = tvm::min(tvm::max(0, indices(indices_position)), axis_dim - 1); real_indices.push_back(idx); for (size_t j = axis + indices_len; j < out_index.size(); ++j) { real_indices.push_back(out_index[j]); } return a(real_indices); }, name, tag); } else { return compute( out_shape, [&](const Array<Var>& out_index) { Array<PrimExpr> indices_position; for (size_t j = 0; j < static_cast<size_t>(batch_dims_); ++j) { indices_position.push_back(out_index[j]); } for (size_t j = axis; j < static_cast<size_t>(axis + indices_len - batch_dims_); ++j) { indices_position.push_back(out_index[j]); } Array<PrimExpr> real_indices; for (size_t j = 0; j < static_cast<size_t>(axis); ++j) { real_indices.push_back(out_index[j]); } auto idx = tvm::min(tvm::max(0, indices(indices_position)), axis_dim - 1); real_indices.push_back(idx); for (size_t j = axis + indices_len - batch_dims_; j < out_index.size(); ++j) { real_indices.push_back(out_index[j]); } return a(real_indices); }, name, tag); } } else if (mode == "fast") { LOG(WARNING) << "Fast mode segfaults when there are out-of-bounds indices. " "Make sure input indices are in bound"; return compute( out_shape, [&](const Array<Var>& out_index) { Array<PrimExpr> indices_position; for (size_t j = axis; j < static_cast<size_t>(axis + indices_len); ++j) { indices_position.push_back(out_index[j]); } Array<PrimExpr> real_indices; for (size_t j = 0; j < static_cast<size_t>(axis); ++j) { real_indices.push_back(out_index[j]); } real_indices.push_back(indices(indices_position)); for (size_t j = axis + indices_len; j < out_index.size(); ++j) { real_indices.push_back(out_index[j]); } return a(real_indices); }, name, tag); } else { // mode == "wrap" return compute( out_shape, [&](const Array<Var>& out_index) { Array<PrimExpr> indices_position; for (size_t j = axis; j < static_cast<size_t>(axis + indices_len); ++j) { indices_position.push_back(out_index[j]); } Array<PrimExpr> real_indices; for (size_t j = 0; j < static_cast<size_t>(axis); ++j) { real_indices.push_back(out_index[j]); } auto idx = truncmod(truncmod(indices(indices_position), axis_dim) + axis_dim, axis_dim); real_indices.push_back(idx); for (size_t j = axis + indices_len; j < out_index.size(); ++j) { real_indices.push_back(out_index[j]); } return a(real_indices); }, name, tag); } } /*! * \brief Return the elements, either from x or y, depending on the condition. * * \param condition The condition array. * \param x First array to be selected. * \param y Second array to be selected. * \param name The name of the operation. * \param tag The tag to mark the operation. * * \return A Tensor selected from x or y depending on condition. */ inline Tensor where(const Tensor& condition, const Tensor& x, const Tensor& y, std::string name = "T_where", std::string tag = kBroadcast) { ICHECK_EQ(x->dtype, y->dtype) << "x and y must have the same dtype: " << x->dtype << " vs " << y->dtype; auto get_out_shape = [&]() { auto bh1 = detail::BroadcastShape(x->shape, y->shape); Array<PrimExpr> common_shape1(bh1.common_shape.begin(), bh1.common_shape.end()); auto bh2 = detail::BroadcastShape(condition->shape, common_shape1); Array<PrimExpr> common_shape2(bh2.common_shape.begin(), bh2.common_shape.end()); return common_shape2; }; auto oshape = get_out_shape(); auto c_bh = detail::BroadcastShape(condition->shape, oshape); auto x_bh = detail::BroadcastShape(x->shape, oshape); auto y_bh = detail::BroadcastShape(y->shape, oshape); auto select = [&](tvm::Array<tvm::tir::Var> ovars) { auto c = condition(InputIndexFromBroadcast(ovars, condition, c_bh.vars1, c_bh.all_vars)); auto true_val = x(InputIndexFromBroadcast(ovars, x, x_bh.vars1, x_bh.all_vars)); auto false_val = y(InputIndexFromBroadcast(ovars, y, y_bh.vars1, y_bh.all_vars)); return tvm::tir::Select(c != 0, true_val, false_val); }; return compute(oshape, select, name, tag); } /*! * \brief Creates an operation to repeat elements of an array * * \param x The input tensor * \param repeats The number of repetitions for each element * \param axis The axis along which to repeat values (allows * negative indices as offsets from the last dimension) * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the repeat operation */ inline Tensor repeat(const Tensor& x, int repeats, int axis, std::string name = "T_repeat", std::string tag = kBroadcast) { int ndim = static_cast<int>(x->shape.size()); ICHECK(-ndim - 1 <= axis && axis <= ndim) << "repeat only accepts `axis` in [-data.ndim - 1, data.ndim]" << ", but got axis = " << axis << ", and data.ndim = " << ndim; ICHECK(repeats >= 1) << "repeat only accepts `repeats >= 1`" << ", but got repeats = " << repeats; if (axis < 0) { // Calculate offset from last dimension axis += ndim; } Array<PrimExpr> new_shape; for (size_t i = 0; i < static_cast<size_t>(axis); ++i) { new_shape.push_back(x->shape[i]); } new_shape.push_back(repeats * x->shape[axis]); for (size_t i = axis + 1; i < x->shape.size(); ++i) { new_shape.push_back(x->shape[i]); } return compute( new_shape, [&](const Array<Var>& indices) { Array<PrimExpr> idx; for (size_t i = 0; i < static_cast<size_t>(axis); ++i) { idx.push_back(indices[i]); } idx.push_back(indexdiv(indices[axis], repeats)); for (size_t i = axis + 1; i < indices.size(); ++i) { idx.push_back(indices[i]); } return x(idx); }, name, tag); } /*! * \brief Creates an operation to tile elements of an array * * \param x The input tensor * \param reps The number of times for repeating the tensor * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the tile operation */ inline Tensor tile(const Tensor& x, Array<Integer> reps, std::string name = "T_tile", std::string tag = kBroadcast) { size_t ndim = x->shape.size(); size_t rdim = reps.size(); size_t tdim = (ndim > rdim) ? ndim : rdim; Array<PrimExpr> data_shape; Array<PrimExpr> reps_shape; Array<PrimExpr> new_shape; if (ndim == rdim) { for (size_t i = 0; i < ndim; ++i) { data_shape.push_back(x->shape[i]); reps_shape.push_back(reps[i]); } } else if (ndim > rdim) { for (size_t i = 0; i < ndim; ++i) data_shape.push_back(x->shape[i]); for (size_t i = 0; i < (ndim - rdim); ++i) reps_shape.push_back(1); for (size_t i = 0; i < rdim; ++i) reps_shape.push_back(reps[i]); } else { for (size_t i = 0; i < (rdim - ndim); ++i) data_shape.push_back(1); for (size_t i = 0; i < ndim; ++i) data_shape.push_back(x->shape[i]); for (size_t i = 0; i < rdim; ++i) reps_shape.push_back(reps[i]); } for (size_t i = 0; i < tdim; ++i) new_shape.push_back(data_shape[i] * reps_shape[i]); if (is_empty_shape(new_shape)) { return compute( new_shape, [&](const Array<Var>& indices) { return tvm::cast(x->dtype, 0); }, name, tag); } else { return compute( new_shape, [&](const Array<Var>& indices) { Array<PrimExpr> idx; if (ndim >= rdim) { for (size_t i = 0; i < ndim; ++i) idx.push_back(indexmod(indices[i], x->shape[i])); } else { for (size_t i = 0; i < ndim; ++i) idx.push_back(indexmod(indices[rdim - ndim + i], x->shape[i])); } return x(idx); }, name, tag); } } /*! * \brief Creates an operation to tile elements of an array * * \param x The input tensor * \param new_shape The shape of the output after tiling * \param rdim The rank of the reps, provided by caller * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the tile operation */ inline Tensor dyn_tile(const Tensor& x, Array<PrimExpr> new_shape, size_t rdim, std::string name = "T_tile", std::string tag = kBroadcast) { size_t ndim = x->shape.size(); if (is_empty_shape(new_shape)) { return compute( new_shape, [&](const Array<Var>& indices) { return tvm::cast(x->dtype, 0); }, name, tag); } else { return compute( new_shape, [&](const Array<Var>& indices) { Array<PrimExpr> idx; if (ndim >= rdim) { for (size_t i = 0; i < ndim; ++i) { idx.push_back(indexmod(indices[i], x->shape[i])); } } else { for (size_t i = 0; i < ndim; ++i) { idx.push_back(indexmod(indices[rdim - ndim + i], x->shape[i])); } } return x(idx); }, name, tag); } } /*! * \brief Gather values along given axis from given indices. * * \param data The input data to the operator. * \param axis The axis along which to index. * \param indices The indices of values to gather. * \param name The name of the operation. * \param tag The tag to mark the operation. * * \return A Tensor whose op member is the gather operation */ inline Tensor gather(const Tensor& data, int axis, const Tensor& indices, std::string name = "T_gather", std::string tag = kInjective) { size_t ndim_d = data->shape.size(); size_t ndim_i = indices->shape.size(); ICHECK_GE(ndim_d, 1) << "Cannot gather from a scalar."; ICHECK_EQ(ndim_d, ndim_i); if (axis < 0) { axis += ndim_d; } ICHECK_GE(axis, 0); ICHECK_LT(axis, ndim_d); if (indices->shape[axis].as<IntImmNode>()) { size_t indices_dim_i = static_cast<size_t>(GetConstInt(indices->shape[axis])); ICHECK_GE(indices_dim_i, 1); } ICHECK(indices->dtype.is_int() || indices->dtype.is_uint()); Array<PrimExpr> out_shape; for (size_t i = 0; i < ndim_i; ++i) { out_shape.push_back(indices->shape[i]); } return compute( out_shape, [&](const Array<Var>& out_index) { Array<PrimExpr> indices_position; for (size_t i = 0; i < ndim_i; ++i) { indices_position.push_back(out_index[i]); } Array<PrimExpr> real_indices; for (size_t i = 0; i < ndim_i; ++i) { if (i == static_cast<size_t>(axis)) { real_indices.push_back(indices(indices_position)); } else { real_indices.push_back(indices_position[i]); } } return data(real_indices); }, name, tag); } /*! * \brief Gather elements from a n-dimension array. * * \param data The source array. * \param indices The indices of the values to extract. * \param batch_dims The number of batch dimensions. * \param name The name of the operation. * \param tag The tag to mark the operation. * * \return A Tensor whose op member is the gather_nd operation */ inline Tensor gather_nd(const Tensor& data, const Tensor& indices, int batch_dims = 0, std::string name = "T_gather_nd", std::string tag = kInjective) { size_t ndim_d = data->shape.size(); size_t ndim_i = indices->shape.size(); ICHECK_GE(ndim_i, 1) << "indices tensor must have at least 1 dimensions"; size_t indices_dim0 = static_cast<size_t>(GetConstInt(indices->shape[0])); ICHECK_LE(indices_dim0, ndim_d) << "dim 0 of indices tensor must be no more " << "than dimensions of data tensor"; Array<PrimExpr> out_shape; for (size_t i = 1; i < ndim_i; ++i) { out_shape.push_back(indices->shape[i]); } for (size_t i = indices_dim0 + batch_dims; i < ndim_d; ++i) { out_shape.push_back(data->shape[i]); } return compute( out_shape, [&](const Array<Var>& out_index) { Array<PrimExpr> indices_position; indices_position.push_back(0); for (size_t i = 0; i < ndim_i - 1; ++i) { indices_position.push_back(out_index[i]); } Array<PrimExpr> real_indices; for (size_t i = 0; i < static_cast<size_t>(batch_dims); ++i) { real_indices.push_back(out_index[i]); } for (size_t i = 0; i < indices_dim0; ++i) { indices_position.Set(0, make_const(DataType::Int(32), i)); if (indices->dtype.is_int() || indices->dtype.is_uint()) { real_indices.push_back(indices(indices_position)); } else { real_indices.push_back(tvm::cast(tvm::DataType::Int(32), indices(indices_position))); } } if (real_indices.size() == ndim_d) { return data(real_indices); } for (size_t i = ndim_i - 1; i < out_index.size(); ++i) { real_indices.push_back(out_index[i]); } return data(real_indices); }, name, tag); } /*! * \brief Creates an operation that calculates a matrix multiplication * (row-major notation): * A(i, k) * B(k, j), if trans_a == trans_b * the usual transposed combinations, otherwise * * \param A The matrix A * \param B The matrix B * \param trans_a Is A's layout transposed? * \param trans_b Is B's layout transposed? * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the matmul operation */ inline tvm::te::Tensor matmul(const tvm::te::Tensor& A, const tvm::te::Tensor& B, bool trans_a = false, bool trans_b = false, std::string name = "T_matmul", std::string tag = kMatMul) { tvm::Array<tvm::PrimExpr> output_shape{A->shape[trans_a ? 1 : 0], B->shape[trans_b ? 0 : 1]}; auto k = tvm::te::reduce_axis(tvm::Range{0, A->shape[trans_a ? 0 : 1]}, "k"); auto l = [&](tvm::tir::Var i, tvm::tir::Var j) { return tvm::sum((trans_a ? A[k][i] : A[i][k]) * (trans_b ? B[j][k] : B[k][j]), {k}); }; return tvm::te::compute(output_shape, l, name, tag); } /*! * \brief A generalization of matrix multiplication to tensors. * * \param A The tensor A * \param B The tensor B * \param axes The number of the dimensions to reduce over * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor computing the result */ inline Tensor tensordot(const Tensor& A, const tvm::te::Tensor& B, int axes = 2, std::string name = "T_tensordot", std::string tag = kMatMul) { ICHECK_GE(A->shape.size(), axes); ICHECK_GE(B->shape.size(), axes); Array<PrimExpr> output_shape(A->shape.begin(), A->shape.end() + (-axes)); for (auto it = B->shape.begin() + axes; it != B->shape.end(); ++it) output_shape.push_back(*it); Array<IterVar> iter_vars; for (int i = 0; i < axes; ++i) iter_vars.push_back(reduce_axis(Range(0, B->shape[i]), "k" + std::to_string(i))); auto func = [&A, &B, &iter_vars, axes](const Array<Var>& input_indices) { Array<PrimExpr> A_indices(input_indices.begin(), input_indices.begin() + (A->shape.size() - axes)); for (auto& v : iter_vars) A_indices.push_back(v); Array<PrimExpr> B_indices; for (auto& v : iter_vars) B_indices.push_back(v); auto it = input_indices.begin() + (A->shape.size() - axes); for (; it != input_indices.end(); ++it) B_indices.push_back(*it); // Some passes don't like reductions with empty axis, so avoid it here if (iter_vars.empty()) { return A(A_indices) * B(B_indices); } else { return sum(A(A_indices) * B(B_indices), iter_vars); } }; return compute(output_shape, func, name, tag); } /*! * \brief A generalization of matrix multiplication to tensors. * * \param A The tensor A * \param B The tensor B * \param A_axes The indices of the dimensions of tensor A to reduce over * \param B_axes The indices of the dimensions of tensor B to reduce over * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor computing the result */ inline Tensor tensordot(const Tensor& A, const tvm::te::Tensor& B, Array<PrimExpr> A_axes, Array<PrimExpr> B_axes, std::string name = "T_tensordot", std::string tag = kMatMul) { ICHECK_EQ(A_axes.size(), B_axes.size()); auto A_axes_val = GetConstIntValues(A_axes, "A_axes"); auto B_axes_val = GetConstIntValues(B_axes, "B_axes"); Array<PrimExpr> output_shape; for (unsigned i = 0; i < A->shape.size(); ++i) if (std::find(A_axes_val.begin(), A_axes_val.end(), i) == A_axes_val.end()) output_shape.push_back(A->shape[i]); for (unsigned i = 0; i < B->shape.size(); ++i) if (std::find(B_axes_val.begin(), B_axes_val.end(), i) == B_axes_val.end()) output_shape.push_back(B->shape[i]); Array<IterVar> iter_vars; for (unsigned i = 0; i < B_axes_val.size(); ++i) iter_vars.push_back(reduce_axis(Range(0, B->shape[B_axes_val[i]]), "k" + std::to_string(i))); auto func = [&A, &B, &iter_vars, A_axes_val, B_axes_val](const Array<Var>& input_indices) { int idx_input = 0; Array<PrimExpr> A_indices; for (unsigned i = 0; i < A->shape.size(); ++i) { auto axes_pos = std::find(A_axes_val.begin(), A_axes_val.end(), i); if (axes_pos == A_axes_val.end()) { A_indices.push_back(input_indices[idx_input++]); } else { A_indices.push_back(iter_vars[axes_pos - A_axes_val.begin()]); } } Array<PrimExpr> B_indices; for (unsigned i = 0; i < B->shape.size(); ++i) { auto axes_pos = std::find(B_axes_val.begin(), B_axes_val.end(), i); if (axes_pos == B_axes_val.end()) { B_indices.push_back(input_indices[idx_input++]); } else { B_indices.push_back(iter_vars[axes_pos - B_axes_val.begin()]); } } return sum(A(A_indices) * B(B_indices), iter_vars); }; return compute(output_shape, func, name, tag); } inline Tensor arange(const PrimExpr& start, const PrimExpr& stop, const PrimExpr& step, DataType dtype, std::string name = "T_arange", std::string tag = kInjective) { PrimExpr num_elem = tvm::cast( tvm::DataType::Int(32), tvm::ceil(tvm::cast(tvm::DataType::Float(32), stop - start) / step)); Array<PrimExpr> shape; return compute( {num_elem}, [&](const Array<Var>& indices) { return tvm::cast(dtype, start + step * indices[0]); }, name, tag); } /*! * \brief Produce grids by expanding input over dimensions defined by other inputs * * \param inputs The input tensors * \param indexing The indexing mode, either "xy" or "ij" * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the meshgrid operation */ inline Array<Tensor> meshgrid(const Array<Tensor>& inputs, const std::string& indexing, std::string name = "T_meshgrid", std::string tag = kInjective) { const bool cartesian_indexing = indexing == "xy" && inputs.size() >= 2; Array<PrimExpr> out_shape; for (size_t i = 0; i < inputs.size(); ++i) { const int src_index = (cartesian_indexing && i < 2) ? 1 - i : i; out_shape.push_back(inputs[src_index]->shape.size() == 0 ? 1 : inputs[src_index]->shape[0]); } Array<Tensor> result; for (size_t i = 0; i < inputs.size(); ++i) { result.push_back(compute( out_shape, [&](const Array<Var>& indices) { const int src_index = (cartesian_indexing && i < 2) ? 1 - i : i; auto ndim = inputs[i]->GetShape().size(); Array<PrimExpr> real_indices = {}; if (ndim > 0) { real_indices = {indices[src_index]}; } return inputs[i](real_indices); }, name, tag)); } return result; } /*! * \brief Transform the layout according to \p src_layout and \p dst_layout * \param src the source input. * \param src_layout the source layout. * \param dst_layout the destination layout. * \param name output tensor name. * \param tag output tensor tag. * \return A tensor with shape in \p dst_layout */ inline Tensor layout_transform(const Tensor& src, const std::string& src_layout, const std::string& dst_layout, const std::string name = "T_layout_trans", const std::string tag = kInjective) { Layout src_layout_struct(src_layout); Layout dst_layout_struct(dst_layout); if (src_layout_struct.Equals(dst_layout_struct)) { return src; } ICHECK(src_layout_struct.defined() && dst_layout_struct.defined()) << "cannot convert from/to undefined layout"; auto layout_converter = tir::BijectiveLayout(src_layout_struct, dst_layout_struct); ICHECK(layout_converter.defined()) << "cannot convert from " << src_layout << " to " << dst_layout; Array<PrimExpr> dst_shape = layout_converter.ForwardShape(src->shape); return compute( dst_shape, [&](const Array<Var>& dst_indices) { Array<PrimExpr> dst_indices_expr(dst_indices.begin(), dst_indices.end()); Array<PrimExpr> src_indices = layout_converter.BackwardIndex(dst_indices_expr); PrimExpr in_range = PrimExpr(1) > PrimExpr(0); // init with dtype=bool and value=true for (size_t i = 0; i < src.ndim(); ++i) { in_range = in_range && (src_indices[i] < src->shape[i]); } return if_then_else(in_range, src(src_indices), tvm::cast(src->dtype, PrimExpr(0))); }, name, tag); } /*! \brief Utility function for auto_scheduler_layout_transform */ inline void parse_auto_scheduler_layout(const String& layout, Array<PrimExpr>* shape, std::vector<std::string>* axes) { int32_t factor = 0; std::string axis = ""; for (char c : std::string(layout)) { if (c >= 'A' && c <= 'z') { axis += c; if (factor != 0) { shape->push_back(factor); factor = 0; } } else if (c >= '0' && c <= '9') { factor = factor * 10 + c - '0'; if (!axis.empty()) { axes->push_back(axis); axis = ""; } } else { LOG(FATAL) << "Invalid layout " << layout; } } if (!axis.empty()) { axes->push_back(axis); } } /*! * \brief Transform the auto-scheduler generated layout according to * \p src_layout and \p dst_layout * \param src the source input. * \param src_layout the source layout. * \param dst_layout the destination layout. * \param name output tensor name. * \param tag output tensor tag. * \return A tensor with shape in \p dst_layout */ inline Tensor auto_scheduler_layout_transform(const Tensor& src, const String& src_layout, const String& dst_layout, const String name = "T_auto_scheduler_layout_trans", const String tag = kInjective) { Array<PrimExpr> src_shape; std::vector<std::string> src_axes; Array<PrimExpr> dst_shape; std::vector<std::string> dst_axes; parse_auto_scheduler_layout(src_layout, &src_shape, &src_axes); parse_auto_scheduler_layout(dst_layout, &dst_shape, &dst_axes); return compute( dst_shape, [&](const Array<Var>& dst_indices) { Array<PrimExpr> dst_indices_expr(dst_indices.begin(), dst_indices.end()); Array<PrimExpr> src_indices; for (const std::string& src_axis : src_axes) { PrimExpr src_index = 0; CHECK_EQ(dst_indices_expr.size(), dst_axes.size()); for (size_t i = 0; i < dst_axes.size(); ++i) { if (dst_axes[i] == src_axis) { src_index = src_index * dst_shape[i] + dst_indices_expr[i]; } } src_indices.push_back(src_index); } return src(src_indices); }, name, tag); } /*! * \brief Transform the meta-schedule generated layout according to TIR's IndexMap * \param src the source input. * \param index_map The TIR IndexMap * \param name output tensor name. * \param tag output tensor tag. * \return A tensor. The layout transformation method * \note Example: * * For the indexing pattern below: * * for i in range(32): * for j in range(64): * load A[ * i / 16 * 4 + j / 16, * i % 16 * 16 + j % 16, * ] * * The corresponding indexing pattern in TIR is: * * A[i, j] => A'[i / 4, j / 16, i % 4, j % 16] * * which converts the pattern to: * * for i in range(32): * for j in range(64): * load A'[ * i / 16 + j / 64, * i % 16, * j % 64 / 16, * j % 16, * ] * * In this case, the transformation pattern is: * A'[a, b, c, d] = A[a * 4 + c, b * 16 + d] */ inline Tensor meta_schedule_layout_transform(const Tensor& src, const tir::IndexMap& index_map, const String name = "T_meta_schedule_layout_trans", const String tag = kInjective) { Array<Range> iter_domain; iter_domain.reserve(src->shape.size()); for (const PrimExpr& e : src->shape) { iter_domain.push_back(Range::FromMinExtent(make_zero(e->dtype), e)); } Array<PrimExpr> post_transform_shape = index_map->MapShape(src->shape); return compute( post_transform_shape, [src, inv = index_map.Inverse(iter_domain)](const Array<Var>& indices) -> PrimExpr { return src(inv->MapIndices(Array<PrimExpr>{indices.begin(), indices.end()})); }, name, tag); } /*! * \brief Get the shape of input tensor. * \param src the input tensor. * \param dtype the type of the elements in the tensor. * \param name output tensor name. * \param tag output tensor tag. * \return Tensor of input shape. */ inline Tensor shape(const Tensor& src, DataType dtype, const std::string name = "T_shape", const std::string tag = kInjective) { int ndim = static_cast<int>(src->shape.size()); Array<PrimExpr> out_shape{ndim}; return compute( out_shape, [&](const Array<Var>& indices) { auto idx = indices[0]; PrimExpr ret = 0; for (int i = 0; i < ndim; ++i) { ret = tvm::if_then_else(idx == i, src->shape[i], ret); } return tvm::cast(dtype, ret); }, name, tag); } /*! * \brief Get the size of input tensor. * \param src the input tensor. * \param dtype the type of the elements in the tensor. * \param name output tensor name. * \param tag output tensor tag. * \return Tensor of input shape. */ inline Tensor ndarray_size(const Tensor& src, const DataType& dtype, const std::string& name = "ndarray_size", const std::string& tag = kInjective) { int ndim = static_cast<int>(src->shape.size()); Array<PrimExpr> out_ndarray_size = {}; return compute( out_ndarray_size, [&](const Array<Var>& indices) { PrimExpr ret = 1; for (int i = 0; i < ndim; ++i) { ret *= src->shape[i]; } return tvm::cast(dtype, ret); }, name, tag); } /*! * \brief Returns a one-hot tensor where the locations repsented by indices take value on_value, other locations take value off_value. * \param indices locations to set to on_value. * \param on_value value that locations represented by indices take on. * \param off_value value that other locations take on. * \param depth depth of the one-hot dimension. * \param axis axis to fill. * \param dtype data type of the output tensor. * \param oshape shape of the output tensor. * \param name output tensor name. * \param tag output tensor tag. * \return one-hot tensor. */ inline Tensor one_hot(const Tensor& indices, const PrimExpr on_value, const PrimExpr off_value, int depth, int axis, const DataType& dtype, Array<PrimExpr> oshape = Array<PrimExpr>(), const std::string name = "T_one_hot", const std::string tag = kInjective) { int true_axis = (axis == -1) ? indices->shape.size() : axis; if (oshape.size() == 0) { int ndim = indices->shape.size() + 1; int indices_index = 0; for (int i = 0; i < ndim; i++) { if (i == true_axis) { oshape.push_back(Integer(depth)); } else { oshape.push_back(indices->shape[indices_index++]); } } } PrimExpr on_value_cast = cast(dtype, on_value); PrimExpr off_value_cast = cast(dtype, off_value); return compute( oshape, [&](const Array<Var>& iter_vars) { Array<Var> indices_indices; for (size_t i = 0; i < iter_vars.size(); i++) { if (static_cast<int>(i) == true_axis) { continue; } indices_indices.push_back(iter_vars[i]); } auto idx = iter_vars[true_axis]; return tir::Select(indices(indices_indices) == idx, on_value_cast, off_value_cast); }, name, tag); } /*! * \brief Get a dense tensor. * \param sparse_indices sparse_indices[i] contains sparse_values[i] will be placed. * \param output_shape is the shape of the dense output tensor . * \param sparse_values is a 0-D or 1-D tensor. Values for each row of sparse_indices. * \param default_value is a 0-D tensor. Defaults to zero. * \param name output tensor name. * \param tag output tensor tag. * \return Tensor of output_shape. */ inline Tensor sparse_to_dense(const Tensor& sparse_indices, const Array<PrimExpr>& output_shape, const Tensor& sparse_values, const PrimExpr& default_value, const std::string name = "T_sparse_to_dense", const std::string tag = kInjective) { ICHECK(sparse_indices->dtype.is_int()) << "sparse_indices only accepts integer values"; ICHECK_LE(sparse_indices->shape.size(), 3) << "sparse_indices tensor should be 0D, 1D, or 2D only"; ICHECK_LE(sparse_values->shape.size(), 2) << "sparse_values tensor should be 0D or 1D only"; const auto rank_sparse_indices = static_cast<int>(sparse_indices->shape.size()); Array<PrimExpr> oshape; for (auto l : output_shape) { oshape.push_back(l); } return compute( oshape, [&](const Array<Var>& indices) { PrimExpr ret = default_value; if (0 == rank_sparse_indices) { ret = if_then_else(indices[0] == sparse_indices(), sparse_values(), ret); } else if (1 == rank_sparse_indices) { for (int j = 0; j < GetConstInt(sparse_indices->shape[0]); j++) { ret = if_then_else(indices[0] == sparse_indices[j], sparse_values[j], ret); } } else { for (int j = 0; j < GetConstInt(sparse_indices->shape[0]); j++) { PrimExpr aggregate_condition; for (int k = 0; k < GetConstInt(sparse_indices->shape[1]); k++) { PrimExpr comparision = indices[k] == sparse_indices[j][k]; aggregate_condition = 0 == k ? comparision : aggregate_condition && comparision; } ret = if_then_else(aggregate_condition, sparse_values[j], ret); } } return ret; }, name, tag); } /*! * \brief Returns a tensor with the diagonal of input tensor replaced with the provided diagonals. * \param input input tensor. * \param diagonal values to be filled in the diagonals. * \param k1 lower limit (included) of the range of diagonals. * \param k2 upper limit (included) of the range of diagonals. * \param super_diag_right_align bool, true iff super-diagonal is right aligned (left-padded). * \param sub_diag_right_align bool, true iff sub-diagonal is right aligned (left-padded). * \param name output tensor name. * \param tag output tensor tag. * \return new tensor with given diagonal values. */ inline Tensor matrix_set_diag(const Tensor& input, const Tensor& diagonal, int k1, int k2, bool super_diag_right_align, bool sub_diag_right_align, const std::string name = "T_matrix_set_diag", const std::string tag = kInjective) { size_t ndim = input->shape.size() - 1; bool only_one_diagonal = k1 == k2; return compute( input->shape, [&](const Array<Var>& iter_vars) { auto get_diag = [&]() { Array<PrimExpr> diagonal_indices; PrimExpr k, offset = 0; for (size_t i = 0; i < ndim - 1; i++) { diagonal_indices.push_back(iter_vars[i]); } if (only_one_diagonal) { k = k1; } else { // Determining which diagonal/sub-diagonal/super-diagonal it is k = iter_vars[ndim] - iter_vars[ndim - 1]; diagonal_indices.push_back(k2 - k); // Calculating the offset in diagonal tensor for this diagonal auto get_offset = [&](PrimExpr M, PrimExpr N) { // offset = max_diagonal_length - diagonal_length return diagonal->shape[diagonal->shape.size() - 1] - if_then_else(M < N, M, N); }; offset = if_then_else( k >= 0, super_diag_right_align ? get_offset(input->shape[ndim] - k, input->shape[ndim - 1]) : 0, sub_diag_right_align ? get_offset(input->shape[ndim], input->shape[ndim - 1] + k) : 0); } diagonal_indices.push_back(if_then_else(k >= 0, iter_vars[ndim - 1], iter_vars[ndim]) + offset); return diagonal(diagonal_indices); }; return if_then_else((PrimExpr)iter_vars[ndim] - iter_vars[ndim - 1] >= k1, if_then_else((PrimExpr)iter_vars[ndim] - iter_vars[ndim - 1] <= k2, get_diag(), input(iter_vars)), input(iter_vars)); }, name, tag); } /*! * \brief Numpy style advanced indexing with tensor. * \param data is input data. * \param indices is list of indexing tensors. * \param name output tensor name. * \param tag output tensor tag. * \return Output tensor. */ inline Tensor adv_index(const Tensor& data, const Array<Tensor>& indices, const std::string name = "advanced_index", const std::string tag = kInjective) { ICHECK_LE(indices.size(), data->shape.size()) << "too many indices for data!"; Array<PrimExpr> oshape; Array<PrimExpr> broadcast_shape; Array<Tensor> bindices; broadcast_shape = indices[0]->shape; for (size_t i = 1; i < indices.size(); ++i) { auto bh = detail::BroadcastShape(broadcast_shape, indices[i]->shape); broadcast_shape = Array<PrimExpr>(bh.common_shape.begin(), bh.common_shape.end()); } if (indices.size() == 1) { // quick path bindices = indices; } else { // Do broadcast for indices for (size_t i = 0; i < indices.size(); ++i) { bindices.push_back(broadcast_to(indices[i], broadcast_shape)); } } for (const auto& dim : broadcast_shape) { oshape.push_back(dim); } for (size_t i = indices.size(); i < data->shape.size(); ++i) { oshape.push_back(data->shape[i]); } return compute( oshape, [&](const Array<Var>& iter_var) { Array<PrimExpr> tensor_indices; for (size_t i = 0; i < broadcast_shape.size(); ++i) { tensor_indices.push_back(iter_var[i]); } Array<PrimExpr> real_indices; for (size_t i = 0; i < bindices.size(); ++i) { real_indices.push_back(bindices[i](tensor_indices)); } for (size_t i = broadcast_shape.size(); i < iter_var.size(); ++i) { real_indices.push_back(iter_var[i]); } return data(real_indices); }, name, tag); } } // namespace topi } // namespace tvm #endif // TVM_TOPI_TRANSFORM_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \brief Topi utility function * \file topi/utils.h */ #ifndef TVM_TOPI_UTILS_H_ #define TVM_TOPI_UTILS_H_ #include <tvm/ir/expr.h> #include <tvm/runtime/packed_func.h> namespace tvm { namespace topi { using namespace tvm::runtime; /*! \brief Canonicalize an argument that may be Array<Expr> or int to Array<Expr> */ inline Array<Integer> ArrayOrInt(TVMArgValue arg) { if (arg.type_code() == kDLInt || arg.type_code() == kDLUInt) { Array<Integer> result; result.push_back(arg.operator int()); return result; } else { return arg; } } } // namespace topi } // namespace tvm #endif // TVM_TOPI_UTILS_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/vision/reorg.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \brief Reorg op constructions * \file vision/reorg.h */ #ifndef TVM_TOPI_VISION_REORG_H_ #define TVM_TOPI_VISION_REORG_H_ #include <tvm/te/operation.h> #include <tvm/topi/detail/constant_utils.h> #include <tvm/topi/reduction.h> #include <tvm/topi/tags.h> #include <tvm/topi/transform.h> #include <algorithm> #include <string> namespace tvm { namespace topi { namespace vision { using namespace tvm::te; /*! * \brief Reorg operation * * \param data The input tensor. Can be any dimension * \param stride The input integer used as stride in reorg operation * \param name The name of the operation * \param tag The tag to mark the operation * * \return A Tensor whose op member is the reorg operation */ inline Tensor reorg(const Tensor& data, int stride = 1, std::string name = "tensor", std::string tag = "reorg_output") { auto input_shape = data->shape; int batch = GetConstInt(input_shape[0]); int c_in = GetConstInt(input_shape[1]); int h_in = GetConstInt(input_shape[2]); int w_in = GetConstInt(input_shape[3]); int out_c = c_in / (stride * stride); auto out = tvm::te::compute( input_shape, [&](Var b, Var k, Var j, Var i) { return data(b * stride * stride, indexmod(k, out_c) * stride * stride, (j * stride + indexdiv(indexdiv(k, out_c), stride)) * stride, (i * stride + indexmod(indexdiv(k, out_c), stride))); }, name, tag); out_c = c_in * stride * stride; int out_h = h_in / stride; int out_w = w_in / stride; Array<PrimExpr> out_shape = {batch, out_c, out_h, out_w}; return reshape(out, out_shape); } } // namespace vision } // namespace topi } // namespace tvm #endif // TVM_TOPI_VISION_REORG_H_
https://github.com/zk-ml/tachikoma
include/tvm/topi/x86/bnn.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file x86/bnn.h * \brief x86 schedule for binary operations */ #ifndef TVM_TOPI_X86_BNN_H_ #define TVM_TOPI_X86_BNN_H_ #include <tvm/target/generic_func.h> #include <tvm/te/operation.h> #include <tvm/topi/detail/fuse.h> #include <tvm/topi/tags.h> namespace tvm { namespace topi { using namespace tvm::te; namespace x86 { /*! * \brief Create a generic schedule for binarize_pack * * \param target The target to generate a schedule for. * \param outs The output tensors. * * \return A schedule for the given ops. */ inline Schedule schedule_binarize_pack(const Target& target, const Array<Tensor>& outs) { Array<Operation> out_ops; for (auto t : outs) { out_ops.push_back(t->op); } auto s = create_schedule(out_ops); auto _schedule = [&](const Tensor& out) { s[out].parallel(out->op.as<ComputeOpNode>()->axis[0]); }; std::function<void(Operation)> traverse; traverse = [&](const Operation& op) { if (op->tag == "binarize_pack") { _schedule(op.output(0)); } else { LOG(ERROR) << "Unsupported operator " << op->tag; } }; traverse(outs[0]->op); return s; } /*! * \brief Create a generic schedule for binary_dense * * \param target The target to generate a schedule for. * \param outs The output tensors. * * \return A schedule for the given ops. */ inline Schedule schedule_binary_dense(const Target& target, const Array<Tensor>& outs) { Array<Operation> out_ops; for (auto t : outs) { out_ops.push_back(t->op); } auto s = create_schedule(out_ops); auto _schedule = [&](const Tensor& A, const Tensor& B, const Tensor& C) { IterVar co, ci; s[C].split(s[C]->op.as<ComputeOpNode>()->reduce_axis[0], 8, &co, &ci); s[C].parallel(s[C]->op.as<ComputeOpNode>()->axis[0]); Tensor out; if (detail::contains(s->outputs, C->op)) { out = C; } else { out = outs[0]->op.output(0); } IterVar xo, xi; s[out].split(out->op.as<ComputeOpNode>()->axis[1], 8, &xo, &xi); s[out].vectorize(xi); }; std::function<void(Operation)> traverse; traverse = [&](const Operation& op) { // Inline all one-to-one-mapping operators except the last stage (output) if (is_broadcast(op->tag)) { if (!detail::contains(s->outputs, op)) { s[op].compute_inline(); } for (auto tensor : op->InputTensors()) { if (tensor->op->InputTensors().size() > 0) { traverse(tensor->op); } } } else if (op->tag == "binary_dense") { auto output = op.output(0); auto data = op->InputTensors()[0]; auto weight = op->InputTensors()[1]; _schedule(data, weight, output); } else { LOG(ERROR) << "Unsupported operator " << op->tag; } }; traverse(outs[0]->op); return s; } } // namespace x86 } // namespace topi } // namespace tvm #endif // TVM_TOPI_X86_BNN_H_
https://github.com/zk-ml/tachikoma