file_path
stringlengths
7
180
content
stringlengths
0
811k
repo
stringclasses
11 values
src/target/llvm/intrin_rule_llvm.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file intrin_rule_llvm.h * \brief Common utilities for llvm intrinsics. */ #ifndef TVM_TARGET_LLVM_INTRIN_RULE_LLVM_H_ #define TVM_TARGET_LLVM_INTRIN_RULE_LLVM_H_ #ifdef TVM_LLVM_VERSION #include <tvm/runtime/registry.h> #include <tvm/target/codegen.h> #include <tvm/tir/builtin.h> #include <tvm/tir/expr.h> namespace tvm { namespace codegen { // num_signature means number of arguments used to query signature template <unsigned id, int num_signature> inline PrimExpr DispatchLLVMPureIntrin(const PrimExpr& e) { const tir::CallNode* call = e.as<tir::CallNode>(); ICHECK(call != nullptr); Array<PrimExpr> cargs; // intrin id. cargs.push_back(IntImm(DataType::UInt(32), id)); cargs.push_back(IntImm(DataType::UInt(32), num_signature)); for (PrimExpr arg : call->args) { cargs.push_back(arg); } return tir::Call(call->dtype, tir::builtin::call_llvm_pure_intrin(), cargs); } template <unsigned id, int num_signature> inline PrimExpr DispatchLLVMIntrin(const PrimExpr& e) { const tir::CallNode* call = e.as<tir::CallNode>(); ICHECK(call != nullptr); Array<PrimExpr> cargs; // intrin id. cargs.push_back(IntImm(DataType::UInt(32), id)); cargs.push_back(IntImm(DataType::UInt(32), num_signature)); for (PrimExpr arg : call->args) { cargs.push_back(arg); } return tir::Call(call->dtype, tir::builtin::call_llvm_intrin(), cargs); } } // namespace codegen } // namespace tvm #endif // LLVM_VERSION #endif // TVM_TARGET_LLVM_INTRIN_RULE_LLVM_H_
https://github.com/zk-ml/tachikoma
src/target/llvm/llvm_instance.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! \file llvm_instance.h */ #ifndef TVM_TARGET_LLVM_LLVM_INSTANCE_H_ #define TVM_TARGET_LLVM_LLVM_INSTANCE_H_ #ifdef TVM_LLVM_VERSION #include <llvm/ADT/ArrayRef.h> #if TVM_LLVM_VERSION >= 150 #include <llvm/IR/FMF.h> #else #include <llvm/IR/Operator.h> #endif #include <llvm/Support/CodeGen.h> #include <llvm/Target/TargetOptions.h> #include <tvm/ir/expr.h> #include <tvm/runtime/container/array.h> #include <tvm/runtime/container/optional.h> #include <tvm/runtime/container/string.h> #include <tvm/target/target.h> #include <algorithm> #include <memory> #include <string> #include <utility> #include <vector> namespace llvm { class LLVMContext; class MemoryBuffer; class Module; class TargetMachine; } // namespace llvm namespace tvm { namespace codegen { class LLVMTarget; /*! * \class LLVMInstance * \brief LLVMInstance is a class that (conceptually) starts and stops LLVM. * All uses of LLVM should take place within a lifetime of an object * of this class. * * E.g. * ```{.cpp} * { * LLVMInstance llvm_instance; * ... * someFunctionFromLLVM(...); * ... * } * // no more calls to LLVM here * ``` * In addition to that, LLVMInstance provides an LLVM context (llvm::LLVMContext). * The context is a structure in LLVM where common IR constructs are maintained, * (such as types, constants, etc.) so that they can be identified by their * address (i.e. pointer comparison). Because of that, it's important to use * the same context throughout compilation. * * At the moment the "starting" of LLVM performs initialization of LLVM, but * "stopping" doesn't do anything. In the future, if such a need arises, this * functionality may be extended to perform dlopen/dlclose of the LLVM-based * code in TVM. * * This class provides means to deserialize an LLVM module, either from text * (in a string), or from a file. In either case, the serialized module can * be LLVM IR assembly, or binary bitcode enconding. */ class LLVMInstance { public: /*! * \brief Constructs LLVMInstance */ LLVMInstance(); /*! * \brief Destroys LLVMInstance object */ ~LLVMInstance(); // Must not be "= default" here in the header file. /*! * \brief Get the LLVM context for this scope. */ std::shared_ptr<llvm::LLVMContext> GetContext() const { return ctx_; } /*! * \brief Create `llvm::Module` from a string. * * Parse the string in \param llvm_ir, and return the `llvm::Module`. * At the moment this function will abort if the parsing fails. * \param llvm_ir string with the LLVM IR assembly or bitcode * \return created `llvm::Module` */ std::unique_ptr<llvm::Module> ParseIR(const std::string& llvm_ir) const; /*! * \brief Load `llvm::Module` from a given file * * Read the file \param file_name, and return the `llvm::Module`. * At the moment this function will abort if reading of the file or creation * of the module fails. * \param file_name file with the LLVM IR assembly or bitcode * \return created `llvm::Module` */ std::unique_ptr<llvm::Module> LoadIR(const std::string& file_name) const; private: std::unique_ptr<llvm::Module> ParseBuffer(const llvm::MemoryBuffer& buffer) const; std::shared_ptr<llvm::LLVMContext> ctx_; }; /*! * \class LLVMTargetInfo * \brief Summary of information for this TVM target relevant to LLVM code * generation. * * This class contains all information that LLVM needs for code generation for * a particular target. The purpose of this class is only to provide information * in an easily-accessible form (for example for querying the target properties). * * Note that objects of this class must be created within the lifetime of an * LLVMInstance object. */ class LLVMTargetInfo { public: /*! * \brief Constructs LLVMTargetInfo from `Target` * \param scope LLVMInstance object * \param target TVM Target object for target "llvm" */ LLVMTargetInfo(LLVMInstance& scope, const Target& target); // NOLINT(runtime/references) /*! * \brief Constructs LLVMTargetInfo from target string * \param scope LLVMInstance object * \param target TVM target string for target "llvm" */ // NOLINTNEXTLINE(runtime/references) LLVMTargetInfo(LLVMInstance& scope, const std::string& target_str); /*! * \brief Destroys LLVMTargetInfo object */ ~LLVMTargetInfo(); /*! * \brief Returns string representation (as TVM target) of the LLVMTargetInfo * \return Target string * * Note: If the LLVMTargetInfo object was created from a string `s`, the string * returned here may not be exactly equal to `s`. For example, if the CPU * was "default", the returned string will have CPU set to the detected host * CPU. */ std::string str() const; /*! * \brief Return LLVM's `TargetMachine`, or nullptr * \param allow_missing do not abort if the target machine cannot be created, * return nullptr instead * \return Pointer to the `TargetMachine` object (or nullptr if it cannot be * created, \see allow_missing) */ llvm::TargetMachine* GetOrCreateTargetMachine(bool allow_missing = false); /*! * \brief Get the target triple * \return the target triple */ const std::string& GetTargetTriple() const { return triple_; } /*! * \brief Get the CPU name * \return the CPU name: the detected host CPU if the original TVM target * specified it as "default" */ const std::string& GetCPU() const { return cpu_; } /*! * \brief Get the list of LLVM target features * \return array of individual feature strings */ llvm::ArrayRef<std::string> GetTargetFeatures() const { return attrs_; } /*! * \brief Get the LLVM target feature string * \return comma-separated list of LLVM target features */ std::string GetTargetFeatureString() const; /*! * \brief Get the LLVM target options * \return `llvm::TargetOptions` object for this target */ const llvm::TargetOptions& GetTargetOptions() const { return target_options_; } /*! * \brief Get fast math flags * \return `llvm::FastMathFlags` for this target */ llvm::FastMathFlags GetFastMathFlags() const { return fast_math_flags_; } /*! * \brief Get the LLVM optimization level * \return optimization level for this target */ llvm::CodeGenOpt::Level GetOptLevel() const { return opt_level_; } /*! * \class Option * \brief Internal representation of command-line option */ struct Option { enum class OptType { Invalid = 0, //!< placeholder, indicates parsing error Bool, //!< enum value corresponding to type string "bool" Int, //!< enum value corresponding to type string "int" UInt, //!< enum value corresponding to type string "uint" String, //!< enum value corresponding to type string "string" }; std::string name; //!< option name OptType type; //!< type of the option value struct { union { bool b; //!< bool option value int i; //!< int option value unsigned u = 0; //!< unsigned option value }; std::string s; //!< string option value } value; //!< option value specified in the option string }; /*! * \brief Get LLVM command line options * \return the list of LLVM command line options specified for this target */ const std::vector<Option>& GetCommandLineOptions() const { return llvm_options_; } /*! * \brief Parse a string from the `cl-opt` target attribute * \param str the option string * \return parsed `Option` object, if parsing failed the type member will be * set to `Option::OptType::Invalid` */ static Option ParseOptionString(const std::string& str); /*! * \brief Checks if the settings in this object that describe global state * match the current global state * \return true or false correspondingly * \note The global state can be modified by command line options. This * function checks if the specified options differ from their current * values. */ bool MatchesGlobalState() const; protected: /*! * \brief Get the current value of given LLVM option * \param opt Option with "type" and "name" set * Fills in the "value" field in the provided Option argument, or sets the * "type" to Invalid if the option value cannot be obtained. */ void GetOptionValue(Option* opt) const; private: std::string triple_; std::string cpu_; std::vector<std::string> attrs_; std::vector<Option> llvm_options_; llvm::TargetOptions target_options_; llvm::FastMathFlags fast_math_flags_; llvm::CodeGenOpt::Level opt_level_; llvm::Reloc::Model reloc_model_ = llvm::Reloc::PIC_; llvm::CodeModel::Model code_model_ = llvm::CodeModel::Small; std::shared_ptr<llvm::TargetMachine> target_machine_; }; /*! * \class LLVMTarget * \brief Information used by LLVM for code generation for particular target * * In addition to all information that LLVM needs for code generation for * a particular target, objects of this class handle saving and restoring * global LLVM state that may be affected by these flags. This way, code * generation for each LLVM-based target in TVM will start with the same LLVM * global state. * * Note that objects of this class must be created within the lifetime of an * LLVMInstance object. */ class LLVMTarget : public LLVMTargetInfo { public: /*! * \brief Constructs LLVMTarget from `Target` * \param scope LLVMInstance object * \param target_info Target info object for target "llvm" */ LLVMTarget(LLVMInstance& scope, const LLVMTargetInfo& target_info); // NOLINT(runtime/references) /*! * \brief Constructs LLVMTarget from `Target` * \param scope LLVMInstance object * \param target TVM Target object for target "llvm" */ LLVMTarget(LLVMInstance& scope, const Target& target); // NOLINT(runtime/references) /*! * \brief Constructs LLVMTarget from target string * \param scope LLVMInstance object * \param target TVM target string for target "llvm" */ LLVMTarget(LLVMInstance& scope, const std::string& target_str); // NOLINT(runtime/references) /*! * \brief Destroys LLVMTarget object */ ~LLVMTarget(); /*! * \brief Get the LLVMInstance object from which the LLVMTarget object was * created * \return The enclosing LLVMInstance object */ const LLVMInstance& GetInstance() const { return instance_; } /*! * \brief Get the current LLVM context * \return the current LLVM context */ llvm::LLVMContext* GetContext() const; /*! * \brief Extract the target string from given `llvm::Module` * \param module LLVM module with the TVM target string embedded as metadata * \return the target string from module's metadata */ static std::string GetTargetMetadata(const llvm::Module& module); /*! * \brief Embed target string as metadata in given `llvm::Module` * \param module the module to insert the target string into */ void SetTargetMetadata(llvm::Module* module) const; // Stubs to enable use with `With`. void EnterWithScope() {} void ExitWithScope() {} private: std::vector<Option> saved_llvm_options_; /*! * \brief Apply or revert command-line LLVM options * \param apply_otherwise_revert if true, apply the options (saving previous * values, if false, then restore the saved values * \param dry_run if true, do not make any changes (or save anything) * \return true is changes were made (or would have been made in a dry run), * false otherwise */ bool ApplyLLVMOptions(bool apply_otherwise_revert, bool dry_run = false); const LLVMInstance& instance_; std::weak_ptr<llvm::LLVMContext> ctx_; /*! * \brief Global singleton flag indicating whether LLVM's global state has * been modified or not (via command-line flags). There can only be * a single such modification in effect at any given time. */ static bool modified_llvm_state_; }; } // namespace codegen } // namespace tvm #endif // TVM_LLVM_VERSION #endif // TVM_TARGET_LLVM_LLVM_INSTANCE_H_
https://github.com/zk-ml/tachikoma
src/target/llvm/llvm_module.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file llvm_module.h * \brief Declares top-level shared functions related to the LLVM codegen. */ #ifndef TVM_TARGET_LLVM_LLVM_MODULE_H_ #define TVM_TARGET_LLVM_LLVM_MODULE_H_ #ifdef TVM_LLVM_VERSION #include <tvm/relay/runtime.h> #include <tvm/runtime/container/array.h> #include <tvm/runtime/metadata.h> #include <tvm/runtime/module.h> #include <tvm/target/target.h> namespace tvm { namespace codegen { runtime::Module CreateLLVMCppMetadataModule(runtime::metadata::Metadata metadata, Target target, tvm::relay::Runtime runtime); runtime::Module CreateLLVMCrtMetadataModule(const Array<runtime::Module>& modules, Target target, tvm::relay::Runtime runtime); } // namespace codegen } // namespace tvm #endif // TVM_LLVM_VERSION #endif // TVM_TARGET_LLVM_LLVM_MODULE_H_
https://github.com/zk-ml/tachikoma
src/target/metadata.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/target/metadata.h * \brief Extends Metadata for use in the compiler. */ #ifndef TVM_TARGET_METADATA_H_ #define TVM_TARGET_METADATA_H_ #include <tvm/ir/memory_pools.h> #include <tvm/runtime/metadata.h> #include <memory> #include <string> #include <vector> namespace tvm { namespace target { namespace metadata { /*! * \brief Subclass of MetadataNode that implements the VisitAttrs reflection method. * * This implementation (and other such Visitable subclasses) is compiled into libtvm.so, but not * libtvm_runtime.so, because reflection is not supported in libtvm_runtime.so over code size * concerns. It is used during compilation by the generic metadata code-generators. */ class VisitableMetadataNode : public ::tvm::runtime::metadata::MetadataNode { public: explicit VisitableMetadataNode(const struct ::TVMMetadata* data) : MetadataNode{data} {} VisitableMetadataNode() : MetadataNode{nullptr} {} void VisitAttrs(AttrVisitor* v) { int64_t version_cpp{version()}; v->Visit("version", &version_cpp); auto inputs_array = Array<ObjectRef>(); auto inputs_accessor = inputs(); inputs_array.reserve(num_inputs()); for (int64_t i = 0; i < num_inputs(); ++i) { inputs_array.push_back(::tvm::runtime::metadata::TensorInfo{inputs_accessor[i]}); } ::tvm::runtime::metadata::MetadataArray inputs_metadata_array{ inputs_array, ::tvm::runtime::metadata::MetadataKind::kMetadata, ::tvm::runtime::metadata::TensorInfoNode::_type_key}; v->Visit("inputs", &inputs_metadata_array); int64_t num_inputs_cpp = num_inputs(); v->Visit("num_inputs", &num_inputs_cpp); auto outputs_array = Array<ObjectRef>(); auto outputs_accessor = outputs(); outputs_array.reserve(num_outputs()); for (int64_t i = 0; i < num_outputs(); ++i) { outputs_array.push_back(::tvm::runtime::metadata::TensorInfo{outputs_accessor[i]}); } ::tvm::runtime::metadata::MetadataArray outputs_metadata_array{ outputs_array, ::tvm::runtime::metadata::MetadataKind::kMetadata, ::tvm::runtime::metadata::TensorInfoNode::_type_key}; v->Visit("outputs", &outputs_metadata_array); int64_t num_outputs_cpp = num_outputs(); v->Visit("num_outputs", &num_outputs_cpp); auto pools_array = Array<ObjectRef>(); auto pools_accessor = workspace_pools(); pools_array.reserve(num_workspace_pools()); for (int64_t i = 0; i < num_workspace_pools(); ++i) { pools_array.push_back(::tvm::runtime::metadata::TensorInfo{pools_accessor[i]}); } ::tvm::runtime::metadata::MetadataArray workspace_pools_metadata_array{ pools_array, ::tvm::runtime::metadata::MetadataKind::kMetadata, ::tvm::runtime::metadata::TensorInfoNode::_type_key}; v->Visit("workspace_pools", &workspace_pools_metadata_array); int64_t num_workspace_pools_cpp = num_workspace_pools(); v->Visit("num_workspace_pools", &num_workspace_pools_cpp); auto consts_array = Array<ObjectRef>(); auto consts_accessor = constant_pools(); consts_array.reserve(num_constant_pools()); for (int64_t i = 0; i < num_constant_pools(); ++i) { consts_array.push_back(::tvm::runtime::metadata::ConstantInfoMetadata{consts_accessor[i]}); } int64_t num_const_pools_cpp = num_constant_pools(); ::tvm::runtime::metadata::MetadataArray constant_pools_metadata_array{ consts_array, ::tvm::runtime::metadata::MetadataKind::kMetadata, ::tvm::runtime::metadata::ConstantInfoMetadataNode::_type_key}; v->Visit("constant_pools", &constant_pools_metadata_array); v->Visit("num_constant_pools", &num_const_pools_cpp); ::std::string mod_name_cpp{data()->mod_name}; v->Visit("mod_name", &mod_name_cpp); } }; /*! * \brief Subclass of MetadataNode which also owns the backing C structures. * * This class (and other InMemory subclasses) are used during compilation to instantiate Metadata * instances whose storage lives outside of .rodata. This class exists because the Module returned * from tvm.relay.build must also be ready to run inference. */ class InMemoryMetadataNode : public ::tvm::target::metadata::VisitableMetadataNode { public: InMemoryMetadataNode() : InMemoryMetadataNode(0 /* version */, {} /* inputs */, {} /* outputs */, {} /* workspace_pools */, {} /* constant_pools */, "" /* mod_name */) { } InMemoryMetadataNode(int64_t version, const ::std::vector<::tvm::runtime::metadata::TensorInfo>& inputs, const ::std::vector<::tvm::runtime::metadata::TensorInfo>& outputs, const ::std::vector<::tvm::runtime::metadata::TensorInfo>& workspace_pools, const ::std::vector<::tvm::ConstantInfo>& constant_pools, const ::tvm::runtime::String mod_name) : VisitableMetadataNode{&storage_}, inputs_{new struct TVMTensorInfo[inputs.size()]}, inputs_objs_{inputs}, outputs_{new struct TVMTensorInfo[outputs.size()]}, outputs_objs_{outputs}, workspace_pools_{new struct TVMTensorInfo[workspace_pools.size()]}, workspace_pools_objs_{workspace_pools}, constant_pools_{new struct TVMConstantInfo[constant_pools.size()]}, constant_pools_objs_{constant_pools}, mod_name_{mod_name}, storage_{version, nullptr, 0ull, nullptr, 0ull, nullptr, 0ull, nullptr, 0ull, mod_name_.c_str()} { storage_.inputs = inputs_.get(); storage_.num_inputs = inputs.size(); for (unsigned int i = 0; i < inputs.size(); ++i) { inputs_.get()[i] = *inputs[i]->data(); } storage_.outputs = outputs_.get(); storage_.num_outputs = outputs.size(); for (unsigned int i = 0; i < outputs.size(); ++i) { outputs_.get()[i] = *outputs[i]->data(); } storage_.workspace_pools = workspace_pools_.get(); storage_.num_workspace_pools = workspace_pools.size(); for (unsigned int i = 0; i < workspace_pools.size(); ++i) { workspace_pools_.get()[i] = *workspace_pools[i]->data(); } storage_.constant_pools = constant_pools_.get(); storage_.num_constant_pools = constant_pools.size(); for (size_t i = 0; i < constant_pools.size(); ++i) { constant_pools_.get()[i].name_hint = constant_pools[i]->name_hint.c_str(); constant_pools_.get()[i].byte_offset = constant_pools[i]->byte_offset.IntValue(); std::string bytes; dmlc::MemoryStringStream stream(&bytes); auto data = constant_pools[i]->data; data.Save(&stream); // Allocated mem freed in destructor constant_pools_.get()[i].data_len = bytes.size(); char* a = reinterpret_cast<char*>(malloc(bytes.size())); constant_pools_.get()[i].data_bytes = a; memcpy(a, bytes.c_str(), bytes.size()); } } ~InMemoryMetadataNode() { // frees allocated mem for const_objs_ for (int i = 0; i < storage_.num_constant_pools; ++i) { free(const_cast<void*>(constant_pools_.get()[i].data_bytes)); } } private: ::std::unique_ptr<struct TVMTensorInfo[]> inputs_; std::vector<::tvm::runtime::metadata::TensorInfo> inputs_objs_; ::std::unique_ptr<struct TVMTensorInfo[]> outputs_; std::vector<::tvm::runtime::metadata::TensorInfo> outputs_objs_; ::std::unique_ptr<struct TVMTensorInfo[]> workspace_pools_; std::vector<::tvm::runtime::metadata::TensorInfo> workspace_pools_objs_; ::std::unique_ptr<struct TVMConstantInfo[]> constant_pools_; std::vector<::tvm::ConstantInfo> constant_pools_objs_; ::std::string mod_name_; struct ::TVMMetadata storage_; }; class VisitableTensorInfoNode : public ::tvm::runtime::metadata::TensorInfoNode { public: explicit VisitableTensorInfoNode(const struct ::TVMTensorInfo* data) : TensorInfoNode{data} {} VisitableTensorInfoNode() : TensorInfoNode{nullptr} {} void VisitAttrs(AttrVisitor* v) { ::std::string name_cpp{data()->name}; v->Visit("name", &name_cpp); auto shape_array = Array<ObjectRef>(); auto shape_accessor = shape(); shape_array.reserve(num_shape()); for (int64_t i = 0; i < num_shape(); ++i) { shape_array.push_back(::tvm::Integer{static_cast<int>(shape_accessor[i])}); } ::tvm::runtime::metadata::MetadataArray shape_metadata_array{ shape_array, ::tvm::runtime::metadata::MetadataKind::kInt64, nullptr}; v->Visit("shape", &shape_metadata_array); int64_t num_shape_cpp = num_shape(); v->Visit("num_shape", &num_shape_cpp); ::tvm::runtime::DataType dtype_cpp{dtype()}; v->Visit("dtype", &dtype_cpp); } }; class InMemoryTensorInfoNode : public ::tvm::target::metadata::VisitableTensorInfoNode { public: InMemoryTensorInfoNode() : InMemoryTensorInfoNode("", {}, ::tvm::runtime::DataType(0, 0, 0)) {} InMemoryTensorInfoNode(const ::tvm::runtime::String& name, const ::std::vector<int64_t>& shape, ::tvm::runtime::DataType dtype) : VisitableTensorInfoNode{&storage_}, name_{name}, shape_{new int64_t[shape.size()]()}, storage_{name_.c_str(), nullptr, 0, dtype} { storage_.shape = shape_.get(); storage_.num_shape = shape.size(); for (unsigned int i = 0; i < shape.size(); ++i) { shape_.get()[i] = shape[i]; } } private: ::std::string name_; ::std::unique_ptr<int64_t[]> shape_; struct ::TVMTensorInfo storage_; }; class VisitableConstantInfoMetadataNode : public ::tvm::runtime::metadata::ConstantInfoMetadataNode { public: explicit VisitableConstantInfoMetadataNode(const struct ::TVMConstantInfo* data) : ConstantInfoMetadataNode{data} {} VisitableConstantInfoMetadataNode() : ConstantInfoMetadataNode{nullptr} {} void VisitAttrs(AttrVisitor* v) { ::std::string name_cpp{name_hint()}; v->Visit("name_hint", &name_cpp); uint64_t byte_offset_cpp{byte_offset()}; v->Visit("byte_offset", &byte_offset_cpp); ::tvm::runtime::NDArray data_cpp = data(); v->Visit("data", &data_cpp); } }; } // namespace metadata } // namespace target } // namespace tvm #endif // TVM_TARGET_METADATA_H_
https://github.com/zk-ml/tachikoma
src/target/metadata_module.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file metadata_module.h * \brief Declares functions that build MetadataModules for C++ and C runtimes. */ #ifndef TVM_TARGET_METADATA_MODULE_H_ #define TVM_TARGET_METADATA_MODULE_H_ #include <tvm/relay/executor.h> #include <tvm/relay/runtime.h> #include <tvm/runtime/module.h> #include <tvm/runtime/ndarray.h> #include <tvm/target/target.h> #include <string> #include <unordered_map> #include "../relay/backend/utils.h" namespace tvm { namespace codegen { /*! * \brief Create a metadata module wrapper. The helper is used by different * codegens, such as graph executor codegen and the vm compiler. * * \param params The metadata for initialization of all modules. * \param target_module the internal module that is compiled by tvm. * \param ext_modules The external modules that needs to be imported inside the metadata * module(s). * \param target The target that all the modules are compiled for * \param runtime The runtime to codegen for * \param metadata Module metadata * \return The created metadata module that manages initialization of metadata. */ runtime::Module CreateMetadataModule( const std::unordered_map<std::string, runtime::NDArray>& params, runtime::Module target_module, const Array<runtime::Module>& ext_modules, Target target, tvm::relay::Runtime runtime, tvm::relay::Executor executor, relay::backend::ExecutorCodegenMetadata metadata); } // namespace codegen } // namespace tvm #endif // TVM_TARGET_METADATA_MODULE_H_
https://github.com/zk-ml/tachikoma
src/target/metadata_utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/target/metadata_utils.h * \brief Declares utilty functions and classes for emitting metadata. */ #ifndef TVM_TARGET_METADATA_UTILS_H_ #define TVM_TARGET_METADATA_UTILS_H_ #include <tvm/runtime/data_type.h> #include <tvm/runtime/ndarray.h> #include <tvm/runtime/object.h> #include <string> #include <tuple> #include <unordered_map> #include <vector> #include "metadata.h" namespace tvm { namespace codegen { namespace metadata { /*! * \brief Construct a unique string "address" for a struct member from a vector of pieces. * * In codegen, it is frequently necessary to assemble a C-style identifier for an * otherwise-anonymous member of Metadata. For instance, suppose Metadata declares an array: * struct TVMMetadata { * int64_t* shape; * }; * * In order to properly initialize this struct, the array must be declared separately with a global * name. This function produces such a name, here termed "address." * * \param parts A vector of pieces, typically the struct member names which identify the path to * this member. * \return The joined pieces. */ std::string AddressFromParts(const std::vector<std::string>& parts); /*! * \brief A prefix in metadata symbol names. * This prefix is typically given to AddressFromParts as the 0th item in parts. */ static constexpr const char* kMetadataGlobalSymbol = "kTvmgenMetadata"; /*! * \brief Post-order traverse metadata to discover arrays which need to be forward-defined. */ class DiscoverArraysVisitor : public AttrVisitor { public: /*! \brief Models a single array discovered in this visitor. * Conatains two fields: * 0. An address which uniquely identifies the array in this Metadata instance. * 1. The discovered MetadataArray. */ using DiscoveredArray = std::tuple<std::string, runtime::metadata::MetadataArray>; explicit DiscoverArraysVisitor(std::vector<DiscoveredArray>* queue); void Visit(const char* key, double* value) final; void Visit(const char* key, int64_t* value) final; void Visit(const char* key, uint64_t* value) final; void Visit(const char* key, int* value) final; void Visit(const char* key, bool* value) final; void Visit(const char* key, std::string* value) final; void Visit(const char* key, DataType* value) final; void Visit(const char* key, runtime::NDArray* value) final; void Visit(const char* key, void** value) final; void Visit(const char* key, ObjectRef* value) final; private: /*! \brief The queue to be filled with discovered arrays. */ std::vector<DiscoveredArray>* queue_; /*! \brief Tracks the preceding address pieces. */ std::vector<std::string> address_parts_; }; /*! * \brief Post-order traverse Metadata to discover all complex types which need to be * forward-defined. This visitor finds one defined() MetadataBase instance for each unique subclass * present inside Metadata in the order in which the subclass was first discovered. */ class DiscoverComplexTypesVisitor : public AttrVisitor { public: /*! \brief Construct a new instance. * \param queue An ordered map which holds the */ explicit DiscoverComplexTypesVisitor(std::vector<runtime::metadata::MetadataBase>* queue) : queue_{queue} { int i = 0; for (auto q : *queue) { type_key_to_position_[q->GetTypeKey()] = i++; } } void Visit(const char* key, double* value) final; void Visit(const char* key, int64_t* value) final; void Visit(const char* key, uint64_t* value) final; void Visit(const char* key, int* value) final; void Visit(const char* key, bool* value) final; void Visit(const char* key, std::string* value) final; void Visit(const char* key, DataType* value) final; void Visit(const char* key, runtime::NDArray* value) final; void Visit(const char* key, void** value) final; void Visit(const char* key, ObjectRef* value) final; void Discover(runtime::metadata::MetadataBase metadata); private: bool DiscoverType(std::string type_key); void DiscoverInstance(runtime::metadata::MetadataBase md); std::vector<runtime::metadata::MetadataBase>* queue_; /*! \brief map type_index to index in queue_. */ std::unordered_map<std::string, int> type_key_to_position_; }; } // namespace metadata } // namespace codegen } // namespace tvm #endif // TVM_TARGET_METADATA_UTILS_H_
https://github.com/zk-ml/tachikoma
src/target/parsers/aprofile.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/target/parsers/aprofile.h * \brief Target Parser for Arm(R) Cortex(R) A-Profile CPUs */ #ifndef TVM_TARGET_PARSERS_APROFILE_H_ #define TVM_TARGET_PARSERS_APROFILE_H_ #include <tvm/target/target.h> namespace tvm { namespace target { namespace parsers { namespace aprofile { bool IsArch(TargetJSON target); TargetJSON ParseTarget(TargetJSON target); } // namespace aprofile } // namespace parsers } // namespace target } // namespace tvm #endif // TVM_TARGET_PARSERS_APROFILE_H_
https://github.com/zk-ml/tachikoma
src/target/parsers/cpu.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/target/parsers/cpu.h * \brief Target Parser for CPU Target's */ #ifndef TVM_TARGET_PARSERS_CPU_H_ #define TVM_TARGET_PARSERS_CPU_H_ #include <tvm/target/target.h> namespace tvm { namespace target { namespace parsers { namespace cpu { TargetJSON ParseTarget(TargetJSON target); } // namespace cpu } // namespace parsers } // namespace target } // namespace tvm #endif // TVM_TARGET_PARSERS_CPU_H_
https://github.com/zk-ml/tachikoma
src/target/parsers/mprofile.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/target/parsers/mprofile.h * \brief Target Parser for Arm(R) Cortex(R) M-Profile CPUs */ #ifndef TVM_TARGET_PARSERS_MPROFILE_H_ #define TVM_TARGET_PARSERS_MPROFILE_H_ #include <tvm/target/target.h> namespace tvm { namespace target { namespace parsers { namespace mprofile { bool IsArch(TargetJSON target); TargetJSON ParseTarget(TargetJSON target); } // namespace mprofile } // namespace parsers } // namespace target } // namespace tvm #endif // TVM_TARGET_PARSERS_MPROFILE_H_
https://github.com/zk-ml/tachikoma
src/target/source/codegen_c.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file codegen_c.h * \brief Common utilities to generated C style code. */ #ifndef TVM_TARGET_SOURCE_CODEGEN_C_H_ #define TVM_TARGET_SOURCE_CODEGEN_C_H_ #include <tvm/ir/op.h> #include <tvm/target/codegen.h> #include <tvm/tir/analysis.h> #include <tvm/tir/builtin.h> #include <tvm/tir/expr.h> #include <tvm/tir/function.h> #include <tvm/tir/op_attr_types.h> #include <tvm/tir/stmt.h> #include <tvm/tir/stmt_functor.h> #include <string> #include <unordered_map> #include <unordered_set> #include <vector> #include "../../tir/transforms/ir_utils.h" #include "codegen_source_base.h" namespace tvm { namespace codegen { using namespace tir; /*! * \brief A base class to generate C code. * * CodeGenC have two modes: generate SSA formed C code or normal form. * * **NOTE** CodeGenC does not aim at generating C codes consumed by MSVC or GCC, * Rather, it's providing infrastructural abstraction for C variants like CUDA * and OpenCL-C. You might find some odd variant features, e.g., type `int3` for * a vector of 3 `int`s. For native C code generator, see `CodeGenLLVM`. */ class CodeGenC : public ExprFunctor<void(const PrimExpr&, std::ostream&)>, public StmtFunctor<void(const Stmt&)>, public CodeGenSourceBase { public: /*! * \brief Initialize the code generator. * \param output_ssa Whether output SSA. */ void Init(bool output_ssa); /*! * \brief Add the function to the generated module. * \param f The function to be compiled. * \param whether to append return 0 in the end. */ void AddFunction(const PrimFunc& f); /*! * \brief Finalize the compilation and return the code. * \return The code. */ std::string Finish(); /*! * \brief Print the Stmt n to CodeGenC->stream * \param n The statement to be printed. */ void PrintStmt(const Stmt& n) { VisitStmt(n); } /*! * \brief Print the expression n(or its ssa id if in ssa mode) into os * \param n The expression to be printed. * \param os The output stream */ void PrintExpr(const PrimExpr& n, std::ostream& os); /*! * \brief Same as PrintExpr, but simply returns result string * \param n The expression to be printed. */ std::string PrintExpr(const PrimExpr& n) { std::ostringstream os; PrintExpr(n, os); return os.str(); } // The following parts are overloadable print operations. /*! * \brief Print the function header before the argument list * * Example: stream << "void"; */ virtual void PrintFuncPrefix(); // NOLINT(*) /*! * \brief Print extra function attributes * * Example: __launch_bounds__(256) for CUDA functions */ virtual void PrintExtraAttrs(const PrimFunc& f); /*! * \brief Print the final return at the end the function. */ virtual void PrintFinalReturn(); // NOLINT(*) /*! * \brief Insert statement before function body. * \param f The function to be compiled. */ virtual void PreFunctionBody(const PrimFunc& f) {} /*! * \brief Initialize codegen state for generating f. * \param f The function to be compiled. */ virtual void InitFuncState(const PrimFunc& f); // expression void VisitExpr_(const VarNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const LoadNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const BufferLoadNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const LetNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const CallNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const AddNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const SubNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const MulNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const DivNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const ModNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const MinNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const MaxNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const EQNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const NENode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const LTNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const LENode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const GTNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const GENode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const AndNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const OrNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const CastNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const NotNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const SelectNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const RampNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const ShuffleNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const BroadcastNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const IntImmNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const FloatImmNode* op, std::ostream& os) override; // NOLINT(*) void VisitExpr_(const StringImmNode* op, std::ostream& os) override; // NOLINT(*) // statment void VisitStmt_(const LetStmtNode* op) override; void VisitStmt_(const StoreNode* op) override; void VisitStmt_(const BufferStoreNode* op) override; void VisitStmt_(const ForNode* op) override; void VisitStmt_(const WhileNode* op) override; void VisitStmt_(const IfThenElseNode* op) override; void VisitStmt_(const AllocateNode* op) override; void VisitStmt_(const AttrStmtNode* op) override; void VisitStmt_(const AssertStmtNode* op) override; void VisitStmt_(const EvaluateNode* op) override; void VisitStmt_(const SeqStmtNode* op) override; void VisitStmt_(const AllocateConstNode* op) override; void VisitStmt_(const DeclBufferNode* op) override; /*! * \brief Print expr representing the thread tag * \param IterVar iv The thread index to be binded; */ virtual void BindThreadIndex(const IterVar& iv); // NOLINT(*) virtual void PrintStorageScope(const std::string& scope, std::ostream& os); // NOLINT(*) virtual void PrintStorageSync(const CallNode* op); // NOLINT(*) // Binary vector op. virtual void PrintVecBinaryOp(const std::string& op, DataType op_type, PrimExpr lhs, PrimExpr rhs, std::ostream& os); // NOLINT(*) // print vector load virtual std::string GetVecLoad(DataType t, const BufferNode* buffer, PrimExpr base); // print vector store virtual void PrintVecStore(const BufferNode* buffer, DataType t, PrimExpr base, const std::string& value); // NOLINT(*) // print load of single element virtual void PrintVecElemLoad(const std::string& vec, DataType t, int i, std::ostream& os); // NOLINT(*) // print store of single element. virtual void PrintVecElemStore(const std::string& vec, DataType t, int i, const std::string& value); // Get a cast type from to virtual std::string CastFromTo(std::string value, DataType from, DataType target); // Get load of single element with expression virtual void PrintVecElemLoadExpr(DataType t, int i, const std::string& value, std::ostream& os); // Print restrict keyword for a given Var if applicable virtual void PrintRestrict(const Var& v, std::ostream& os); virtual void SetConstantsByteAlignment(Integer constants_byte_alignment) { constants_byte_alignment_ = constants_byte_alignment; } protected: // Print reference to struct location std::string GetStructRef(DataType t, const PrimExpr& buffer, const PrimExpr& index, int kind); // Print reference to a buffer as type t in index. virtual std::string GetBufferRef(DataType t, const BufferNode* buffer, PrimExpr index); /*! * \brief Handle volatile loads. * * This is to workaround a bug in CUDA cuda_fp16.h. Volatile accesses * to shared memory are required for reductions. However, __half class * does not implement volatile member functions. CUDA codegen will cast * away volatile qualifier from CUDA __half types. */ virtual void HandleVolatileLoads(const std::string& value, const BufferLoadNode* op, std::ostream& os) { // By default, do nothing but print the loaded value. os << value; } /*! * \brief Check if scope is part of type in the target language. * * **NOTE** In OpenCL, __local is part of type, so "__local int *" * is legal. This is not the case for CUDA, where "__shared__" * or "__constant__" is not part of type but a storage class (like * C/C++ static). */ virtual bool IsScopePartOfType() const { return true; } /*! * \brief Print external function call. * \param ret_type The return type. * \param global_symbol The symbolc of the target function. * \param args The arguments to the function. * \param skip_first_arg Whether to skip the first arguments. * \param os The output stream. */ virtual void PrintCallExtern(Type ret_type, String global_symbol, const Array<PrimExpr>& args, bool skip_first_arg, std::ostream& os); // NOLINT(*) /*! * \brief If buffer is allocated as type t. * \param buf_var The buffer variable. * \param t The type to be checked. */ bool HandleTypeMatch(const VarNode* buf_var, DataType t) const; /*! * \brief Register the data type of buf_var * \param buf_var The buffer variable. * \param t The type to be checked. */ void RegisterHandleType(const VarNode* buf_var, DataType t); // override void PrintSSAAssign(const std::string& target, const std::string& src, DataType t) final; /*! \brief reserves common C keywords */ void ReserveKeywordsAsUnique(); /*! \brief Check if buf_var is volatile or not. */ bool IsVolatile(const VarNode* buf_var) const { return volatile_buf_.count(buf_var) != 0; } /*! \brief restrict keyword */ std::string restrict_keyword_{""}; /*! \brief the storage scope of allocation */ std::unordered_map<const VarNode*, std::string> alloc_storage_scope_; /*! \brief the data type of allocated buffers */ std::unordered_map<const VarNode*, DataType> handle_data_type_; /*! \brief Record of ops that have pre-defined global symbol. */ OpAttrMap<TGlobalSymbol> op_attr_global_symbol_ = Op::GetAttrMap<TGlobalSymbol>("TGlobalSymbol"); // cache commonly used ops const Op& builtin_call_extern_ = builtin::call_extern(); const Op& builtin_call_pure_extern_ = builtin::call_pure_extern(); Integer constants_byte_alignment_ = 16; private: /*! \brief whether to print in SSA form */ bool print_ssa_form_{false}; /*! \brief set of volatile buf access */ std::unordered_set<const VarNode*> volatile_buf_; // deep comparison of PrimExpr ExprDeepEqual deep_equal_; // binding of let variables. Enables duplicate var defs that map to same value std::unordered_map<Var, const LetNode*, ObjectPtrHash, ObjectPtrEqual> let_binding_; }; } // namespace codegen } // namespace tvm #endif // TVM_TARGET_SOURCE_CODEGEN_C_H_
https://github.com/zk-ml/tachikoma
src/target/source/codegen_c_host.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file codegen_c_host.h * \brief Generate C host code. */ #ifndef TVM_TARGET_SOURCE_CODEGEN_C_HOST_H_ #define TVM_TARGET_SOURCE_CODEGEN_C_HOST_H_ #include <string> #include <unordered_map> #include <unordered_set> #include <utility> #include <vector> #include "codegen_c.h" #include "tvm/target/codegen.h" #include "tvm/tir/expr.h" namespace tvm { namespace codegen { class CodeGenCHost : public CodeGenC { public: CodeGenCHost(); void Init(bool output_ssa, bool emit_asserts, std::string target_str, const std::unordered_set<std::string>& devices); void InitGlobalContext(); void AddFunction(const PrimFunc& f); /*! * \brief Add functions from the (unordered) range to the current module in a deterministic * order. This helps with debugging. * * \param functions A vector of unordered range of current module. */ void AddFunctionsOrdered(std::vector<std::pair<tvm::GlobalVar, tvm::BaseFunc>> functions); void DefineModuleName(); void PrintType(DataType t, std::ostream& os) final; // NOLINT(*) void PrintFuncPrefix() final; // NOLINT(*) void PrintFinalReturn() final; // NOLINT(*) // overload visitor functions void VisitExpr_(const BroadcastNode* op, std::ostream& os) final; // NOLINT(*) void VisitExpr_(const CallNode* op, std::ostream& os); // NOLINT(*) // overload min and max to use the ternary operator, so we don't rely on the // standard library implementations void VisitExpr_(const MinNode* op, std::ostream& os) final; // NOLINT(*) void VisitExpr_(const MaxNode* op, std::ostream& os) final; // NOLINT(*) void VisitStmt_(const AssertStmtNode* op) final; // NOLINT(*) Array<String> GetFunctionNames() { return function_names_; } private: /* \brief Internal structure to store information about function calls */ struct FunctionInfo { /* \brief function name */ std::string func_name; /* number of arguments required by the function */ int64_t num_args; /* \brief name of resource_handle to pass */ std::string resource_handle_name; }; std::string module_name_; /* \brief mapping global packed func to the unique name */ std::unordered_map<std::string, std::string> declared_globals_; /* \brief names of the functions declared in this module */ Array<String> function_names_; /*! \brief whether to emit asserts in the resulting C code */ bool emit_asserts_; FunctionInfo GetFunctionInfo(const CallNode* op, bool has_resource_handle); std::string GetPackedName(const CallNode* op); void PrintGetFuncFromBackend(const std::string& func_name, const std::string& packed_func_name); void PrintFuncCall(const std::string& packed_func_name, int num_args); void PrintFuncCallC(const std::string& packed_func_name, int num_args, const std::string& resource_handle_name); /*! * \brief Print ternary conditional operator implementing binary `op` * Forces the operands to be in SSA form. * \param op binary operator being expressed * \param compare string representation of comparison operator * \param os stream reference to print into */ template <typename T> inline void PrintTernaryCondExpr(const T* op, const char* compare, std::ostream& os); // NOLINT(*) }; } // namespace codegen } // namespace tvm #endif // TVM_TARGET_SOURCE_CODEGEN_C_HOST_H_
https://github.com/zk-ml/tachikoma
src/target/source/codegen_cuda.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file codegen_cuda.h * \brief Utility to generate cuda code */ #ifndef TVM_TARGET_SOURCE_CODEGEN_CUDA_H_ #define TVM_TARGET_SOURCE_CODEGEN_CUDA_H_ #include <tvm/target/codegen.h> #include <tvm/tir/expr.h> #include <tvm/tir/op.h> #include <string> #include <unordered_map> #include "codegen_c.h" namespace tvm { namespace codegen { class CodeGenCUDA final : public CodeGenC { public: CodeGenCUDA(); void Init(bool output_ssa); std::string Finish(); bool need_include_path() { return (enable_fp16_ || enable_bf16_ || enable_int8_ || need_math_constants_h_ || need_mma_h_); } // override behavior void PrintFuncPrefix() final; void PrintExtraAttrs(const PrimFunc& f) final; void VisitStmt_(const ForNode* op) final; void PrintStorageSync(const CallNode* op) final; void PrintStorageScope(const std::string& scope, std::ostream& os) final; // NOLINT(*) void PrintVecBinaryOp(const std::string& op, DataType t, PrimExpr lhs, PrimExpr rhs, std::ostream& os) final; // NOLINT(*) void PrintType(DataType t, std::ostream& os) final; // NOLINT(*) void PrintVecElemLoad(const std::string& vec, DataType t, int i, std::ostream& os) final; // NOLINT(*) void PrintVecElemStore(const std::string& vec, DataType t, int i, const std::string& value) final; void BindThreadIndex(const IterVar& iv) final; // NOLINT(*) void PrintVecElemLoadExpr(DataType t, int i, const std::string& value, std::ostream& os) final; // overload visitor void VisitExpr_(const RampNode* op, std::ostream& os) final; // NOLINT(*) void VisitExpr_(const ShuffleNode* op, std::ostream& os) final; // NOLINT(*) void VisitExpr_(const SelectNode* op, std::ostream& os) final; // NOLINT(*) void VisitExpr_(const BroadcastNode* op, std::ostream& os) final; // NOLINT(*) void VisitExpr_(const FloatImmNode* op, std::ostream& os) final; void VisitExpr_(const CallNode* op, std::ostream& os) final; void VisitExpr_(const CastNode* op, std::ostream& os) final; void VisitStmt_(const EvaluateNode* op) final; void VisitStmt_(const AllocateNode* op) final; void VisitStmt_(const AttrStmtNode* op) final; protected: void PrintCallExtern(Type ret_type, String global_symbol, const Array<PrimExpr>& args, bool skip_first_arg, std::ostream& os) final; // NOLINT(*) private: // Handle volatile loads void HandleVolatileLoads(const std::string& value, const BufferLoadNode* op, std::ostream& os) final; // Whether scope such as "__shared__" or "__constant__" is part of type. bool IsScopePartOfType() const final { return false; } // Whether global barrier is needed. bool need_global_barrier_{false}; // Global barrier state std::string vid_global_barrier_state_; // Global barrier expected node. std::string vid_global_barrier_expect_; // whether enable fp16 bool enable_fp16_{false}; // whether enable bf16 bool enable_bf16_{false}; // whether enable int8 bool enable_int8_{false}; // whether enable warp shuffle intrinsics bool enable_warp_shuffle_{false}; // whether need math_constants.h bool need_math_constants_h_{false}; // whether need mma.h bool need_mma_h_{false}; // Op attribute map OpAttrMap<bool> op_need_warp_shuffle_ = Op::GetAttrMap<bool>("cuda.need_warp_shuffle"); std::unordered_map<const VarNode*, std::string> fragment_shapes; std::unordered_map<const VarNode*, std::string> fragment_layouts; friend void PrintConst(const FloatImmNode* op, std::ostream& os, CodeGenCUDA* p); void PrintWmmaScope(const std::string& scope, DataType t, const VarNode* variable, std::ostream& os); int32_t GetWmmaFragmentSize(const std::string& scope, const VarNode* variable, int32_t size); }; } // namespace codegen } // namespace tvm #endif // TVM_TARGET_SOURCE_CODEGEN_CUDA_H_
https://github.com/zk-ml/tachikoma
src/target/source/codegen_metal.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file codegen_metal.h * \brief Generate Metal device code. */ #ifndef TVM_TARGET_SOURCE_CODEGEN_METAL_H_ #define TVM_TARGET_SOURCE_CODEGEN_METAL_H_ #include <tvm/target/codegen.h> #include <string> #include "codegen_c.h" namespace tvm { namespace codegen { class CodeGenMetal final : public CodeGenC { public: explicit CodeGenMetal(Target target); // override print thread tag. void PrintArgUnionDecl(); void AddFunction(const PrimFunc& f); // NOLINT(*) void InitFuncState(const PrimFunc& f) final; void PrintStorageScope(const std::string& scope, std::ostream& os) final; // NOLINT(*) void PrintStorageSync(const CallNode* op) final; // NOLINT(*) void PrintType(DataType t, std::ostream& os) final; // NOLINT(*) void BindThreadIndex(const IterVar& iv) final; // NOLINT(*) // print load of single element void PrintVecElemLoad(const std::string& vec, DataType t, int i, std::ostream& os) final; // NOLINT(*) // print store of single element. void PrintVecElemStore(const std::string& vec, DataType t, int i, const std::string& value) final; // overload visitor void VisitExpr_(const BroadcastNode* op, std::ostream& os) final; // NOLINT(*) void VisitExpr_(const CallNode* op, std::ostream& os) final; // NOLINT(*) void VisitExpr_(const FloatImmNode* op, std::ostream& os) final; // reuse parent's function. using CodeGenC::PrintType; private: int thread_index_bits_{32}; Target target_; }; } // namespace codegen } // namespace tvm #endif // TVM_TARGET_SOURCE_CODEGEN_METAL_H_
https://github.com/zk-ml/tachikoma
src/target/source/codegen_opencl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file codegen_opencl.h * \brief Generate OpenCL device code. */ #ifndef TVM_TARGET_SOURCE_CODEGEN_OPENCL_H_ #define TVM_TARGET_SOURCE_CODEGEN_OPENCL_H_ #include <tvm/target/codegen.h> #include <string> #include <unordered_map> #include "codegen_c.h" namespace tvm { namespace codegen { class CodeGenOpenCL final : public CodeGenC { public: CodeGenOpenCL(); std::string Finish(); // override print thread tag. void InitFuncState(const PrimFunc& f) final; void PrintFuncPrefix() final; // NOLINT(*) void PreFunctionBody(const PrimFunc& f) final; // NOLINT(*) void BindThreadIndex(const IterVar& iv) final; // NOLINT(*) void PrintStorageScope(const std::string& scope, std::ostream& os) final; // NOLINT(*) void PrintStorageSync(const CallNode* op) final; // NOLINT(*) void PrintType(DataType t, std::ostream& os) final; // NOLINT(*) void PrintType(const Type& type, std::ostream& os) final; // NOLINT(*) std::string GetVecLoad(DataType t, const BufferNode* buffer, PrimExpr base) final; void PrintVecStore(const BufferNode* buffer, DataType t, PrimExpr base, const std::string& value) final; // NOLINT(*) // the address of load/store void PrintVecAddr(const BufferNode* buffer, DataType t, PrimExpr base, std::ostream& os); // NOLINT(*) void PrintRestrict(const Var& v, std::ostream& os) final; // NOLINT(*) std::string CastFromTo(std::string value, DataType from, DataType target); // NOLINT(*) std::string CastTo(std::string value, DataType target); // NOLINT(*) void SetTextureScope(const std::unordered_map<const VarNode*, std::string>&); // NOLINT(*) // overload visitor void VisitStmt_(const AllocateNode* op) final; // NOLINT(*) void VisitExpr_(const BroadcastNode* op, std::ostream& os) final; // NOLINT(*) void VisitExpr_(const CallNode* op, std::ostream& os) final; // NOLINT(*) void VisitExpr_(const CastNode* op, std::ostream& os) final; // NOLINT(*) void VisitExpr_(const FloatImmNode* op, std::ostream& os) final; // NOLINT(*) void VisitStmt_(const StoreNode* op) final; // NOLINT(*) void VisitStmt_(const BufferStoreNode* op) final; // NOLINT(*) // overload min and max to avoid ambiguous call errors void VisitExpr_(const MinNode* op, std::ostream& os) final; void VisitExpr_(const MaxNode* op, std::ostream& os) final; void VisitExpr_(const AndNode* op, std::ostream& os) final; void VisitExpr_(const OrNode* op, std::ostream& os) final; void VisitExpr_(const SelectNode* op, std::ostream& os) final; private: // whether enable fp16 and fp64 extension bool enable_fp16_{false}; bool enable_fp64_{false}; // Whether to enable atomics extension. bool enable_atomics_{false}; // Whether to enable sampler or sampler-less texture reads, // where the choice depends on the OpenCL version used. bool enable_compliant_texture_reads_{false}; // Key to disable use of texture SSA in certain scenarios. For example, // when loaded value is stored directly to a user declared l-value buffer bool need_texture_ssa_{true}; // Mapping from buffer to allocation size. // Useful to track when a scalar store of a vectorized texture load is required. std::unordered_map<const Object*, size_t> allocation_size_; }; } // namespace codegen } // namespace tvm #endif // TVM_TARGET_SOURCE_CODEGEN_OPENCL_H_
https://github.com/zk-ml/tachikoma
src/target/source/codegen_params.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file codegen_params.h */ #ifndef TVM_TARGET_SOURCE_CODEGEN_PARAMS_H_ #define TVM_TARGET_SOURCE_CODEGEN_PARAMS_H_ #include <tvm/runtime/ndarray.h> #include <iostream> #include <string> namespace tvm { namespace codegen { /*! * \brief Write a C representation of arr to os. * * This function generates a comma-separated, indented list of C integer listeals suitable for use * in an initializer. The NDArray is flattened and then the list is produced element by element. * For the int16_t NDArray [-3, -2, -1, 0, 1, 2, 3, ...], and indent_chars = 4, the following output * is produced: * -0x0003, -0x0002, -0x0001, +0x0000, +0x0001, +0x0002, +0x0003 * * \param arr The array to generate * \param indent_chars Number of chars to indent * \param os Output stream where the array data should be written. */ void NDArrayDataToC(::tvm::runtime::NDArray arr, int indent_chars, std::ostream& os, const std::string& eol = "\n"); } // namespace codegen } // namespace tvm #endif // TVM_TARGET_SOURCE_CODEGEN_PARAMS_H_
https://github.com/zk-ml/tachikoma
src/target/source/codegen_source_base.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file codegen_source_base.h * \brief Common utilities to source code in text form. */ #ifndef TVM_TARGET_SOURCE_CODEGEN_SOURCE_BASE_H_ #define TVM_TARGET_SOURCE_CODEGEN_SOURCE_BASE_H_ #include <tvm/ir/name_supply.h> #include <tvm/runtime/metadata.h> #include <tvm/target/codegen.h> #include <tvm/tir/expr.h> #include <tvm/tir/op.h> #include <functional> #include <string> #include <unordered_map> #include <vector> #include "../../runtime/meta_data.h" namespace tvm { namespace codegen { /*! * \brief A base class to generate source code. * Contains helper utilities to generate nest and ssa form. */ class CodeGenSourceBase { public: virtual ~CodeGenSourceBase() = default; /*! * \brief Register constant value appeared in expresion tree * This avoid generated a ssa id for each appearance of the value * \param value The constant value. */ void MarkConst(std::string value); /*! * Print Type representation of type type. * \param t The type representation. * \param os The stream to print the ctype into */ virtual void PrintType(DataType type, std::ostream& os); // NOLINT(*) /*! * Print Type representation of type type. * \param type The type representation. * \param os The stream to print the ctype into */ virtual void PrintType(const Type& type, std::ostream& os); // NOLINT(*) protected: /*! \brief entry in ssa assign map */ struct SSAEntry { /*! \brief The value id */ std::string vid; /*! \brief The scope id, used to check if this entry is invalid. */ int scope_id; }; /*! \brief Clear the states that might relates to function generation */ void ClearFuncState(); /*! \brief print the current indented value */ void PrintIndent(); /*! * \brief Allocate a variable name for a newly defined var. * \param v The variable. * \return the variable name. */ std::string AllocVarID(const tir::VarNode* v); /*! * \brief Get a variable name. * \param v The variable. * \return the variable name. */ std::string GetVarID(const tir::VarNode* v) const; /*! * \brief Get the SSA ID corresponds to src * If necessary, generate new assignment * \param src The source expression * \param t The type of the expression. */ std::string SSAGetID(std::string src, DataType t); /*! * \brief mark the beginning of a new scope * \return The scope id. */ int BeginScope(); /*! * \brief mark the end of an old scope. * \param scope_id The scope id to be ended. */ void EndScope(int scope_id); /*! * \brief Print assignment of src to the id in ssa entry. * \param target id of target variable. * \param src The source expression. * \param t The type of target. */ virtual void PrintSSAAssign(const std::string& target, const std::string& src, DataType t) = 0; /*! \brief the declaration stream */ std::ostringstream decl_stream; /*! \brief the stream to be printed */ std::ostringstream stream; /*! \brief name of each variable */ std::unordered_map<const tir::VarNode*, std::string> var_idmap_; /*! \brief NameSupply for allocation */ NameSupply name_supply_ = NameSupply(""); private: /*! \brief assignment map of ssa */ std::unordered_map<std::string, SSAEntry> ssa_assign_map_; /*! \brief array to check whether we are inside certain scope */ std::vector<bool> scope_mark_; /*! \brief The current indentation value */ int indent_{0}; }; /*! * \brief Create a source module for viewing. * \param code The code to be viewed. * \param fmt The code. format. */ runtime::Module SourceModuleCreate(std::string code, std::string fmt); /*! * \brief Create a C source module for viewing and compiling GCC code. * \param code The code to be viewed. * \param fmt The code format. * \param func_names The name of functions inside the runtime module. * \param const_vars. The constant variables that the c source module needs. * \return The created module. */ runtime::Module CSourceModuleCreate(const String& code, const String& fmt, const Array<String>& func_names, const Array<String>& const_vars = {}); /*! * \brief Wrap the submodules in a metadata module. * \param params The variable to constant mapping that is collected by the host * module. * \param target_module The main TIR-lowered internal runtime module * \param modules All the external modules that needs to be imported inside the metadata module(s). * \param target The target that all the modules are compiled for * \param metadata Metadata which should be exported to the runtime. * \return The wrapped module. */ runtime::Module CreateMetadataModule( const std::unordered_map<std::string, runtime::NDArray>& params, runtime::Module target_module, const Array<runtime::Module>& ext_modules, Target target, runtime::metadata::Metadata metadata); /*! * \brief Create a source module for viewing and limited saving for device. * \param data The code data to be viewed. * \param fmt The code. format. * \param fmap The map function information map of each function. * \param type_key The type_key of the runtime module of this source code * \param fget_source a closure to replace default get source behavior. */ runtime::Module DeviceSourceModuleCreate( std::string data, std::string fmt, std::unordered_map<std::string, runtime::FunctionInfo> fmap, std::string type_key, std::function<std::string(const std::string&)> fget_source = nullptr); /*! * \brief Wrap the submodules that are to be wrapped in a c-source metadata module for C runtime. * \param modules The modules to be wrapped. * \param target the target the modules are compiled for. * \param metadata the metadata needed for code generation. * \return The wrapped module. */ runtime::Module CreateCSourceCrtMetadataModule(const Array<runtime::Module>& modules, Target target, runtime::metadata::Metadata metadata); } // namespace codegen } // namespace tvm #endif // TVM_TARGET_SOURCE_CODEGEN_SOURCE_BASE_H_
https://github.com/zk-ml/tachikoma
src/target/source/codegen_vhls.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. 5B5B */ /*! * \file codegen_vhls.h * \brief Utility to generate vhls code */ #ifndef TVM_TARGET_SOURCE_CODEGEN_VHLS_H_ #define TVM_TARGET_SOURCE_CODEGEN_VHLS_H_ #include <tvm/target/codegen.h> #include <tvm/target/target.h> #include <tvm/tir/expr.h> #include <string> #include "codegen_c.h" namespace tvm { namespace codegen { class CodeGenVivadoHLS final : public CodeGenC { public: void Init(bool output_ssa); void PrintType(DataType t, std::ostream& os); void PrintFuncPrefix() final; void PreFunctionBody(const PrimFunc& f) final; void VisitExpr_(const MinNode* op, std::ostream& os) final; void VisitExpr_(const MaxNode* op, std::ostream& os) final; }; } // namespace codegen } // namespace tvm #endif // TVM_TARGET_SOURCE_CODEGEN_VHLS_H_
https://github.com/zk-ml/tachikoma
src/target/source/literal/cuda_half_t.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file cuda_half_t.h * \brief half_t (fp16) definition for cuda codegen. */ #ifndef TVM_TARGET_SOURCE_LITERAL_CUDA_HALF_T_H_ #define TVM_TARGET_SOURCE_LITERAL_CUDA_HALF_T_H_ static constexpr const char* _cuda_half_t_def = R"( typedef unsigned short uint16_t; typedef unsigned char uint8_t; typedef signed char int8_t; typedef int int32_t; typedef unsigned long long uint64_t; typedef unsigned int uint32_t; #define TVM_FORCE_INLINE inline __attribute__((always_inline)) #define TVM_XINLINE TVM_FORCE_INLINE __device__ __host__ #define TVM_ALIGNED(x) __attribute__ ((aligned(x))) #define TVM_HALF_OPERATOR(RTYPE, OP) \ TVM_XINLINE RTYPE operator OP (half a, half b) { \ return RTYPE(float(a) OP float(b)); \ } \ template<typename T> \ TVM_XINLINE RTYPE operator OP (half a, T b) { \ return RTYPE(float(a) OP float(b)); \ } \ template<typename T> \ TVM_XINLINE RTYPE operator OP (T a, half b) { \ return RTYPE(float(a) OP float(b)); \ } #define TVM_HALF_ASSIGNOP(AOP, OP) \ template<typename T> \ TVM_XINLINE half operator AOP (const T& a) { \ return *this = half(float(*this) OP float(a)); \ } \ template<typename T> \ TVM_XINLINE half operator AOP (const volatile T& a) volatile { \ return *this = half(float(*this) OP float(a)); \ } class TVM_ALIGNED(2) half { public: uint16_t half_; static TVM_XINLINE half Binary(uint16_t value) { half res; res.half_ = value; return res; } TVM_XINLINE half() {} TVM_XINLINE half(const float& value) { constructor(value); } TVM_XINLINE explicit half(const double& value) { constructor(value); } TVM_XINLINE explicit half(const int8_t& value) { constructor(value); } TVM_XINLINE explicit half(const uint8_t& value) { constructor(value); } TVM_XINLINE explicit half(const int32_t& value) { constructor(value); } TVM_XINLINE explicit half(const uint32_t& value) { constructor(value); } TVM_XINLINE explicit half(const long long& value) { constructor(value); } TVM_XINLINE explicit half(const uint64_t& value) { constructor(value); } TVM_XINLINE operator float() const { \ return float(half2float(half_)); \ } \ TVM_XINLINE operator float() const volatile { \ return float(half2float(half_)); \ } TVM_HALF_ASSIGNOP(+=, +) TVM_HALF_ASSIGNOP(-=, -) TVM_HALF_ASSIGNOP(*=, *) TVM_HALF_ASSIGNOP(/=, /) TVM_XINLINE half operator+() { return *this; } TVM_XINLINE half operator-() { return half(-float(*this)); } TVM_XINLINE half operator=(const half& a) { half_ = a.half_; return a; } template<typename T> TVM_XINLINE half operator=(const T& a) { return *this = half(a); } TVM_XINLINE half operator=(const half& a) volatile { half_ = a.half_; return a; } template<typename T> TVM_XINLINE half operator=(const T& a) volatile { return *this = half(a); } private: union Bits { float f; int32_t si; uint32_t ui; }; static int const fp16FractionBits = 10; static int const fp32FractionBits = 23; static int32_t const fp32FractionMask = ~(~0u << fp32FractionBits); // == 0x7fffff static int32_t const fp32HiddenBit = 1 << fp32FractionBits; // == 0x800000 static int const shift = fp32FractionBits - fp16FractionBits; // == 13 static int const shiftSign = 16; static int32_t const expAdjust = 127 - 15; // exp32-127 = exp16-15, so exp16 = exp32 - (127-15) static int32_t const infN = 0x7F800000; // flt32 infinity static int32_t const maxN = 0x477FFFFF; // max flt32 that's a flt16 normal after >> by shift static int32_t const minN = 0x38800000; // min flt16 normal as a flt32 static int32_t const maxZ = 0x33000000; // max fp32 number that's still rounded to zero in fp16 static int32_t const signN = 0x80000000; // flt32 sign bit static int32_t const infC = infN >> shift; static int32_t const nanN = (infC + 1) << shift; // minimum flt16 nan as a flt32 static int32_t const maxC = maxN >> shift; static int32_t const minC = minN >> shift; static int32_t const signC = signN >> shiftSign; // flt16 sign bit static int32_t const mulN = 0x52000000; // (1 << 23) / minN static int32_t const mulC = 0x33800000; // minN / (1 << (23 - shift)) static int32_t const subC = 0x003FF; // max flt32 subnormal down shifted static int32_t const norC = 0x00400; // min flt32 normal down shifted static int32_t const maxD = infC - maxC - 1; static int32_t const minD = minC - subC - 1; TVM_XINLINE uint16_t float2half(const float& value) const { Bits v; v.f = value; uint32_t sign = v.si & signN; // grab sign bit v.si ^= sign; // clear sign bit from v sign >>= shiftSign; // logical shift sign to fp16 position if (v.si <= maxZ) { // Handle eventual zeros here to ensure // vshift will not exceed 32 below. v.ui = 0; } else if (v.si < minN) { // Handle denorms uint32_t exp32 = v.ui >> fp32FractionBits; int32_t exp16 = exp32 - expAdjust; // If exp16 == 0 (just into the denorm range), then significant should be shifted right 1. // Smaller (so negative) exp16 values should result in greater right shifts. uint32_t vshift = 1 - exp16; uint32_t significand = fp32HiddenBit | (v.ui & fp32FractionMask); v.ui = significand >> vshift; v.ui += (v.ui & 0x3fff) != 0x1000 || (significand & 0x7ff) ? 0x1000 : 0; } else if (v.si <= maxN) { // Handle norms v.ui += (v.ui & 0x3fff) != 0x1000 ? 0x1000 : 0; v.ui -= expAdjust << fp32FractionBits; } else if (v.si <= infN) { v.si = infN; } else if (v.si < nanN) { v.si = nanN; } v.ui >>= shift; return sign | (v.ui & 0x7fff); } // Same as above routine, except for addition of volatile keyword TVM_XINLINE uint16_t float2half( const volatile float& value) const volatile { Bits v; v.f = value; uint32_t sign = v.si & signN; // grab sign bit v.si ^= sign; // clear sign bit from v sign >>= shiftSign; // logical shift sign to fp16 position if (v.si <= maxZ) { // Handle eventual zeros here to ensure // vshift will not exceed 32 below. v.ui = 0; } else if (v.si < minN) { // Handle denorms uint32_t exp32 = v.ui >> fp32FractionBits; int32_t exp16 = exp32 - expAdjust; // If exp16 == 0 (just into the denorm range), then significant should be shifted right 1. // Smaller (so negative) exp16 values should result in greater right shifts. uint32_t vshift = 1 - exp16; uint32_t significand = fp32HiddenBit | (v.ui & fp32FractionMask); v.ui = significand >> vshift; v.ui += (v.ui & 0x3fff) != 0x1000 || (significand & 0x7ff) ? 0x1000 : 0; } else if (v.si <= maxN) { // Handle norms v.ui += (v.ui & 0x3fff) != 0x1000 ? 0x1000 : 0; v.ui -= expAdjust << fp32FractionBits; } else if (v.si <= infN) { v.si = infN; } else if (v.si < nanN) { v.si = nanN; } v.ui >>= shift; return sign | (v.ui & 0x7fff); } TVM_XINLINE float half2float(const uint16_t& value) const { Bits v; v.ui = value; int32_t sign = v.si & signC; v.si ^= sign; sign <<= shiftSign; v.si ^= ((v.si + minD) ^ v.si) & -(v.si > subC); v.si ^= ((v.si + maxD) ^ v.si) & -(v.si > maxC); Bits s; s.si = mulC; s.f *= v.si; int32_t mask = -(norC > v.si); v.si <<= shift; v.si ^= (s.si ^ v.si) & mask; v.si |= sign; return v.f; } TVM_XINLINE float half2float( const volatile uint16_t& value) const volatile { Bits v; v.ui = value; int32_t sign = v.si & signC; v.si ^= sign; sign <<= shiftSign; v.si ^= ((v.si + minD) ^ v.si) & -(v.si > subC); v.si ^= ((v.si + maxD) ^ v.si) & -(v.si > maxC); Bits s; s.si = mulC; s.f *= v.si; int32_t mask = -(norC > v.si); v.si <<= shift; v.si ^= (s.si ^ v.si) & mask; v.si |= sign; return v.f; } template<typename T> TVM_XINLINE void constructor(const T& value) { half_ = float2half(float(value)); } }; TVM_HALF_OPERATOR(half, +) TVM_HALF_OPERATOR(half, -) TVM_HALF_OPERATOR(half, *) TVM_HALF_OPERATOR(half, /) TVM_HALF_OPERATOR(bool, >) TVM_HALF_OPERATOR(bool, <) TVM_HALF_OPERATOR(bool, >=) TVM_HALF_OPERATOR(bool, <=) TVM_XINLINE half __float2half_rn(const float a) { return half(a); } )"; static constexpr const char* _cuda_half_util = R"( // Pack two half values. static inline __device__ __host__ unsigned __pack_half2(const half x, const half y) { unsigned v0 = *((unsigned short *)&x); unsigned v1 = *((unsigned short *)&y); return (v1 << 16) | v0; } // Some fp16 math functions are not supported in cuda_fp16.h, // so we define them here to make sure the generated CUDA code // is valid. #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) #define CUDA_UNSUPPORTED_HALF_MATH_BINARY(HALF_MATH_NAME, FP32_MATH_NAME) \ static inline __device__ __host__ half HALF_MATH_NAME(half x, half y) { \ float tmp_x = __half2float(x); \ float tmp_y = __half2float(y); \ float result = FP32_MATH_NAME(tmp_x, tmp_y); \ return __float2half(result); \ } #define CUDA_UNSUPPORTED_HALF_MATH_UNARY(HALF_MATH_NAME, FP32_MATH_NAME) \ static inline __device__ __host__ half HALF_MATH_NAME(half x) { \ float tmp_x = __half2float(x); \ float result = FP32_MATH_NAME(tmp_x); \ return __float2half(result); \ } CUDA_UNSUPPORTED_HALF_MATH_BINARY(hpow, powf) CUDA_UNSUPPORTED_HALF_MATH_UNARY(htanh, tanhf) CUDA_UNSUPPORTED_HALF_MATH_UNARY(htan, tanf) CUDA_UNSUPPORTED_HALF_MATH_UNARY(hatan, atanf) CUDA_UNSUPPORTED_HALF_MATH_UNARY(herf, erf) #undef CUDA_UNSUPPORTED_HALF_MATH_BINARY #undef CUDA_UNSUPPORTED_HALF_MATH_UNARY #endif )"; static constexpr const char* _cuda_bfloat16_util = R"( // Pack two bfloat16 values. static inline __device__ __host__ unsigned __pack_nv_bfloat162(const nv_bfloat16 x, const nv_bfloat16 y) { unsigned v0 = *((unsigned short *)&x); unsigned v1 = *((unsigned short *)&y); return (v1 << 16) | v0; } // Some bfp16 math functions are not supported in cuda_bfp16.h, // so we define them here to make sure the generated CUDA code // is valid. #define CUDA_UNSUPPORTED_HALF_MATH_BINARY(HALF_MATH_NAME, FP32_MATH_NAME) \ static inline __device__ __host__ nv_bfloat16 HALF_MATH_NAME(nv_bfloat16 x, nv_bfloat16 y) { \ float tmp_x = __bfloat162float(x); \ float tmp_y = __bfloat162float(y); \ float result = FP32_MATH_NAME(tmp_x, tmp_y); \ return __float2bfloat16(result); \ } #define CUDA_UNSUPPORTED_HALF_MATH_UNARY(HALF_MATH_NAME, FP32_MATH_NAME) \ static inline __device__ __host__ nv_bfloat16 HALF_MATH_NAME(nv_bfloat16 x) { \ float tmp_x = __bfloat162float(x); \ float result = FP32_MATH_NAME(tmp_x); \ return __float2bfloat16(result); \ } CUDA_UNSUPPORTED_HALF_MATH_BINARY(hpow, powf) CUDA_UNSUPPORTED_HALF_MATH_UNARY(htanh, tanhf) CUDA_UNSUPPORTED_HALF_MATH_UNARY(htan, tanf) CUDA_UNSUPPORTED_HALF_MATH_UNARY(hatan, atanf) CUDA_UNSUPPORTED_HALF_MATH_UNARY(herf, erf) #undef CUDA_UNSUPPORTED_HALF_MATH_BINARY #undef CUDA_UNSUPPORTED_HALF_MATH_UNARY )"; static constexpr const char* _cuda_warp_intrinsic_util = R"( #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 700) #define __shfl_sync(mask, var, lane, width) \ __shfl((var), (lane), (width)) #define __shfl_down_sync(mask, var, offset, width) \ __shfl_down((var), (offset), (width)) #define __shfl_up_sync(mask, var, offset, width) \ __shfl_up((var), (offset), (width)) #endif )"; #endif // TVM_TARGET_SOURCE_LITERAL_CUDA_HALF_T_H_
https://github.com/zk-ml/tachikoma
src/target/source/ptx.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file ptx.h * \brief Code generation with inlined PTX code. */ #ifndef TVM_TARGET_SOURCE_PTX_H_ #define TVM_TARGET_SOURCE_PTX_H_ #include <tvm/runtime/logging.h> #include <string> #include <tuple> namespace tvm { namespace codegen { /*! * \brief Print MMA assembly string given parameters. * \param shape The shape string mMnNkK * \param A_layout The layout of multiplicand A, can be either "row" or "col". * \param B_layout The layout of multiplicand B, can be either "row" or "col". * \param A_dtype The data type of multiplicand A. * \param B_dtype The data type of multiplicand B. * \param C_dtype The data type of multiplicand C. * \param a_ptr Pointer to buffer A. * \param a_offset The offset of element in A. * \param b_ptr Pointer to buffer B. * \param b_offset The offset of element in B. * \param c_ptr Pointer to buffer C. * \param c_offset The offset of element in C. * \param metadata Pointer to metadata buffer (only used for sparse mma). * \param metadata_offset The offset of element in metadata. * \param sparsity_selector The sparsity selector in sparse mma. * \param bit_op The bit operator used in 1-bit mma, can be either "xor" or "and". * \param sparse Whether it's sparse mma or not. * \param saturate Whether saturate output or not. */ std::string PrintMMAAssembly(const std::string& shape, const std::string& A_layout, const std::string& B_layout, const std::string& A_dtype, const std::string& B_dtype, const std::string& C_dtype, const std::string& a_ptr, const std::string& a_offset, const std::string& b_ptr, const std::string& b_offset, const std::string& c_ptr, const std::string& c_offset, const std::string& metadata, const std::string& metadata_offset, const std::string& sparsity_selector, const std::string& bit_op, bool sparse, bool saturate); /*! * \brief Print ldmatrix assembly string given parameters. * \param trans: whether the matrix is loaded in column major format or not. * \param num: number of matrices to load. * \param type: The data type in the matrix, .b16 is the only accepted data type. * \param local_ptr: pointer to local buffer. * \param local_elem_offset: The offset of the element to store in the local buffer. * \param smem_ptr: pointer to the shared memory buffer to load. * \param smem_elem_offset: The offset of the start element of the row to load in shared memory. */ std::string PrintLoadMatrixAssembly(bool trans, int num, const std::string& type, const std::string& local_ptr, const std::string& local_elem_offset, const std::string& smem_ptr, const std::string& smem_elem_offset); /*! * \brief Print ptx cp.async assembly string given parameters. * \param shared_ptr: The pointer to the destination shared memory. * \param shared_elem_offset: The offset into the shared memory. * \param global_ptr: The pointer to the global memory. * \param global_elem_offset: The offset into the global memory. * \param bytes: The number of bytes to copy, valid values are 4, 8, and 16. */ std::string PrintCpAsyncAssembly(const std::string& shared_ptr, const std::string& shared_elem_offset, const std::string& global_ptr, const std::string& global_elem_offset, const std::string& bytes); } // namespace codegen } // namespace tvm #endif // TVM_TARGET_SOURCE_PTX_H_
https://github.com/zk-ml/tachikoma
src/target/source/source_module.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file source_module.h * \brief Source code module */ #ifndef TVM_TARGET_SOURCE_SOURCE_MODULE_H_ #define TVM_TARGET_SOURCE_SOURCE_MODULE_H_ #include <tvm/relay/runtime.h> #include <tvm/runtime/metadata.h> #include <tvm/runtime/module.h> #include <tvm/target/target.h> #include "../../relay/backend/utils.h" #include "../../runtime/meta_data.h" namespace tvm { namespace codegen { /*! * \brief Wrap the submodules that are to be wrapped in a c-source metadata module for C runtime. * \param modules The modules to be wrapped. * \param target the target the modules are compiled for. * \param runtime the runtime to code generate against * \param metadata Compiler-generated metadata exported to runtime. * \param aot_metadata If supplied, metadata for the AOTExecutor module. * \return The wrapped module. */ runtime::Module CreateCSourceCrtMetadataModule(const Array<runtime::Module>& modules, Target target, relay::Runtime runtime, relay::backend::ExecutorCodegenMetadata metadata, runtime::metadata::Metadata aot_metadata); /*! * \brief Create C++-runtime targeted metadata module for "c" backend. * \param metadata Compiler-generated metadata. */ runtime::Module CreateCSourceCppMetadataModule(runtime::metadata::Metadata metadata); } // namespace codegen } // namespace tvm #endif // TVM_TARGET_SOURCE_SOURCE_MODULE_H_
https://github.com/zk-ml/tachikoma
src/target/spirv/codegen_spirv.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file ir_builder.h * \brief Utility for building SPIRV code block */ #ifndef TVM_TARGET_SPIRV_CODEGEN_SPIRV_H_ #define TVM_TARGET_SPIRV_CODEGEN_SPIRV_H_ #include <tvm/arith/analyzer.h> #include <tvm/target/target.h> #include <tvm/tir/analysis.h> #include <tvm/tir/expr.h> #include <tvm/tir/function.h> #include <tvm/tir/stmt_functor.h> #include <memory> #include <string> #include <unordered_map> #include <vector> #include "../../runtime/thread_storage_scope.h" #include "../../runtime/vulkan/vulkan_shader.h" #include "ir_builder.h" #include "spirv_support.h" namespace tvm { namespace codegen { using namespace tir; /*! * \brief Code generator into SPIRV */ class CodeGenSPIRV : public ExprFunctor<spirv::Value(const PrimExpr&)>, public StmtFunctor<void(const Stmt&)> { public: /*! * \brief Initialize the codegen based on a specific target. * * \param target The target for which code should be generated. The * device_type for this target must be kDLVulkan. */ CodeGenSPIRV(Target target); /*! * \brief Compile and add function f to the current module. * \param f The function to be added. * \param name The name of the target function. * \return The final spirv module. */ virtual runtime::VulkanShader BuildFunction(const PrimFunc& f, const std::string& name); /*! * \brief Create Value for expression e * \param e The expression to be created value for. * \return created value. */ spirv::Value MakeValue(const PrimExpr& e) { return VisitExpr(e); } // override codegen spirv::Value VisitExpr_(const VarNode* op) override; spirv::Value VisitExpr_(const CastNode* op) override; spirv::Value VisitExpr_(const IntImmNode* op) override; spirv::Value VisitExpr_(const FloatImmNode* op) override; spirv::Value VisitExpr_(const StringImmNode* op) override; spirv::Value VisitExpr_(const AddNode* op) override; spirv::Value VisitExpr_(const SubNode* op) override; spirv::Value VisitExpr_(const MulNode* op) override; spirv::Value VisitExpr_(const DivNode* op) override; spirv::Value VisitExpr_(const ModNode* op) override; spirv::Value VisitExpr_(const MinNode* op) override; spirv::Value VisitExpr_(const MaxNode* op) override; spirv::Value VisitExpr_(const LTNode* op) override; spirv::Value VisitExpr_(const LENode* op) override; spirv::Value VisitExpr_(const GTNode* op) override; spirv::Value VisitExpr_(const GENode* op) override; spirv::Value VisitExpr_(const EQNode* op) override; spirv::Value VisitExpr_(const NENode* op) override; spirv::Value VisitExpr_(const AndNode* op) override; spirv::Value VisitExpr_(const OrNode* op) override; spirv::Value VisitExpr_(const NotNode* op) override; spirv::Value VisitExpr_(const SelectNode* op) override; spirv::Value VisitExpr_(const LetNode* op) override; spirv::Value VisitExpr_(const CallNode* op) override; spirv::Value VisitExpr_(const RampNode* op) override; spirv::Value VisitExpr_(const BroadcastNode* op) override; spirv::Value VisitExpr_(const BufferLoadNode* op) override; // stmt void VisitStmt_(const BufferStoreNode* op) override; void VisitStmt_(const ForNode* op) override; void VisitStmt_(const WhileNode* op) override; void VisitStmt_(const IfThenElseNode* op) override; void VisitStmt_(const AllocateNode* op) override; void VisitStmt_(const AttrStmtNode* op) override; void VisitStmt_(const AssertStmtNode* op) override; void VisitStmt_(const LetStmtNode* op) override; void VisitStmt_(const SeqStmtNode* op) override; void VisitStmt_(const EvaluateNode* op) override; protected: /*! \brief Storage information for a buffer */ struct StorageInfo { /*! \brief The name of the tir::Var for the buffer * * Used for error messages. */ std::string name_hint; /*! \brief Whether it is volatile */ bool is_volatile{false}; /*! \brief Whether the element type of the buffer is known. * * This value is determined based on the type_annotation of the * buffer variable (AllocateNode) or of the parameter (shader * arguments). */ bool element_type_known{false}; /*! \brief The known element type of the buffer. * * This value is determined based on the type_annotation of the * buffer variable (AllocateNode) or of the parameter (shader * arguments). */ DataType element_type{DataType()}; /* \brief Check that the access type matches the known type * * Asserts that the type given is the same as the type previously * stored in this array. * * @param type The data type being stored/loaded in the buffer * * @param index_lanes The number of lanes of the index. The * number of lanes in the value being stored/loaded should be the * product of the number of lanes of the buffer element type and * the number of lanes of the index. */ void CheckContentType(DataType type, int index_lanes = 1) { ICHECK(element_type_known) << "Cannot check element type of buffer " << name_hint << " no previous element type defined"; DataType expected_type = element_type.with_lanes(index_lanes * element_type.lanes()); ICHECK_EQ(type, expected_type) << "Attempted to access buffer " << name_hint << " as element type " << type << " using an index of size " << index_lanes << " when the element type is " << element_type; } // Update content type if it hasn't been updated. void SetContentType(DataType type, std::string name_hint) { ICHECK(!element_type_known) << "Cannot set element type of buffer " << name_hint << " a second time."; this->element_type = type; this->name_hint = name_hint; element_type_known = true; } }; // Reset the state so it works for a new function. void InitFuncState(); // Get the thread index spirv::Value GetThreadIndex(const IterVar& iv, const PrimExpr& extent); spirv::Value CreateStorageSync(const CallNode* op); void Scalarize(const PrimExpr& e, std::function<void(int i, spirv::Value v)> f); // SPIRV-related capabilities of the target SPIRVSupport spirv_support_; // The builder std::unique_ptr<spirv::IRBuilder> builder_; // Work group size of three uint32_t workgroup_size_[3]; // Likely branch uint32_t weight_likely_branch_{128}; /* The data type used for the backing array for booleans. * * Currently matched to the data type used in Buffer::vstore and * Buffer::vload. In the future, this should be the smallest * integer type supported by the device, as not all Vulkan * implementations support int8. */ DataType boolean_storage_type_{DataType::Int(8)}; // the storage scope of allocation std::unordered_map<const VarNode*, StorageInfo> storage_info_; // The definition of local variable. std::unordered_map<const VarNode*, spirv::Value> var_map_; // The analyzer. std::unique_ptr<arith::Analyzer> analyzer_; // deep comparison of PrimExpr ExprDeepEqual deep_equal_; // binding of let variables. Enables duplicate var defs that map to same value std::unordered_map<Var, const LetNode*, ObjectPtrHash, ObjectPtrEqual> let_binding_; // Running total of the number of bytes of shared memory used. // Checked against the max_shared_memory_per_group size_t shared_memory_bytes_used_{0}; }; } // namespace codegen } // namespace tvm #endif // TVM_TARGET_SPIRV_CODEGEN_SPIRV_H_
https://github.com/zk-ml/tachikoma
src/target/spirv/ir_builder.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file ir_builder.h * \brief Utility for building SPIRV code block */ #ifndef TVM_TARGET_SPIRV_IR_BUILDER_H_ #define TVM_TARGET_SPIRV_IR_BUILDER_H_ #include <tvm/runtime/packed_func.h> #include <tvm/tir/expr.h> // clang-format off #include <algorithm> #include <map> #include <set> #include <string> #include <unordered_map> #include <utility> #include <vector> #include <tuple> #include <spirv.hpp> // clang-format on #include "spirv_support.h" namespace tvm { namespace codegen { namespace spirv { /*! \brief Represent the SPIRV Type */ struct SType { /*! \brief The Id to represent type */ uint32_t id{0}; /*! \brief corresponding TVM type */ tvm::DataType type; /*! \brief content type id if it is a pointer/struct-array class */ uint32_t element_type_id{0}; /*! \brief The storage class, if it is a pointer */ spv::StorageClass storage_class{spv::StorageClassMax}; }; enum ValueKind { kNormal, kConstant, kVectorPtr, kStructArrayPtr, kPushConstantPtr, kFunction, kExtInst, kUniformPtr }; /*! \brief Represent the SPIRV Value */ struct Value { /*! \brief The Id to represent value */ uint32_t id{0}; /*! \brief The data type */ SType stype; /*! \brief additional flags about the value */ ValueKind flag{kNormal}; }; /*! \brief Represent the SPIRV Label */ struct Label { /*! \brief The Id to represent label */ uint32_t id{0}; }; /*! * \brief A SPIRV instruction, * can be used as handle to modify its content later */ class Instr { public: /*! \return the word count */ uint32_t WordCount() const { return word_count_; } /*! * \brief Access idx-th word of instruction * \param idx The index * \return reference to idx-th word. */ uint32_t& operator[](uint32_t idx) { ICHECK_LT(idx, word_count_); return (*data_)[begin_ + idx]; } private: friend class InstrBuilder; /*! * \brief the data that backs this instruction * Have to use vector reference because * vector can change. */ std::vector<uint32_t>* data_{nullptr}; /*! \brief begin location of instruction */ uint32_t begin_{0}; /*! \brief work count */ uint32_t word_count_{0}; }; /*! \brief Representation of phi value */ struct PhiValue : public Value { /*! \brief The corresponding instr */ Instr instr; /*! * \brief Add incoming information of a PhiValue * \param index The location of Phi * \param value The value to come * \param parent The parent label. */ void SetIncoming(uint32_t index, const Value& value, const Label& parent) { ICHECK_EQ(this->stype.id, value.stype.id); instr[3 + index * 2] = value.id; instr[3 + index * 2 + 1] = parent.id; } }; /*! * \brief Helper class to build SPIRV instruction. * * \code * * std::vector<uint32_t> func_seg_vec_; * InstrBuilder ib; * * // construct and append to the end of func_seg_vec_; * ib.Begin(spv::OpIAdd) * .Add(result).Add(v1).Add(v2) * .Commit(&func_seg_vec_); * * \endcode */ class InstrBuilder { public: /*! * \brief Begin construction of instruction. * \param op The op code * \return reference to self. */ InstrBuilder& Begin(spv::Op op) { // NOLINT(*); // finish previous build ICHECK_EQ(data_.size(), 0U); op_ = op; data_.push_back(0); return *this; } /*! * \brief Add v to end of instruction. * \param v The value to be appended to the instruction. * \return reference to self. */ InstrBuilder& Add(const Value& v) { data_.push_back(v.id); return *this; } /*! * \brief Add v to end of instruction. * \param v The type to be appended to the instruction. * \return reference to self. */ InstrBuilder& Add(const SType& v) { data_.push_back(v.id); return *this; } /*! * \brief Add v to end of instruction. * \param v The label to be appended to the instruction. * \return reference to self. */ InstrBuilder& Add(const Label& v) { data_.push_back(v.id); return *this; } /*! * \brief Add a word to end of instruction. * \param v The value to be added. * \return reference to self. */ InstrBuilder& Add(const uint32_t& v) { data_.push_back(v); return *this; } /*! * \brief Add string literal of end of instruction. * \param v The string literal to be appended. * \return reference to self. */ InstrBuilder& Add(const std::string& v) { const uint32_t kWordSize = sizeof(uint32_t); uint32_t nwords = (static_cast<uint32_t>(v.length()) + kWordSize) / kWordSize; size_t begin = data_.size(); data_.resize(begin + nwords, 0U); std::copy(v.begin(), v.end(), reinterpret_cast<char*>(&data_[begin])); return *this; } /*! * \brief add sequence of values to instruction * \param args The instruction sequence * \return reference to self. * \tparams Args The positional arguments */ template <typename... Args> InstrBuilder& AddSeq(Args&&... args) { AddSeqHelper helper; helper.builder = this; runtime::detail::for_each(helper, std::forward<Args>(args)...); return *this; } /*! * \brief Finish build, commit the current * instruction to the end of seg. * * \param seg The code segment to commit to * \return The result instruction. */ Instr Commit(std::vector<uint32_t>* seg) { Instr ret; ret.data_ = seg; ret.begin_ = seg->size(); ret.word_count_ = static_cast<uint32_t>(data_.size()); data_[0] = op_ | (ret.word_count_ << spv::WordCountShift); seg->insert(seg->end(), data_.begin(), data_.end()); data_.clear(); return ret; } private: // current op code. spv::Op op_; // The internal data to store code std::vector<uint32_t> data_; // helper class to support variadic arguments struct AddSeqHelper { // The reference to builder InstrBuilder* builder; // invoke function template <typename T> void operator()(size_t, const T& v) const { builder->Add(v); } }; }; /*! * \brief Builder to build up a single SPIR-V module * * This is a thin wrapper to build SPIRV binary. * SPIRV adopts structure control-flow. * We can build the code by always appending to the end of the * binary code block and revisit some * * This IRBuilder did not introduce concept of BasicBlock. * instead instructions are append to end of each segment. */ class IRBuilder { public: /*! * \brief Initialize the codegen based on a specific feature set. * * \param support The features in SPIRV that are supported by the * target device. */ explicit IRBuilder(const SPIRVSupport& support); /*! \brief Initialize header */ void InitHeader(); /*! \brief Initialize the predefined contents */ void InitPreDefs(); /*! * \brief Import additional extension libraries. * \param name The name of the library. * \return The finalized binary instruction. */ Value ExtInstImport(const std::string& name) { auto it = ext_inst_tbl_.find(name); if (it != ext_inst_tbl_.end()) { return it->second; } Value val = NewValue(SType(), kExtInst); ib_.Begin(spv::OpExtInstImport).AddSeq(val, name).Commit(&extended_instruction_section_); ext_inst_tbl_[name] = val; return val; } /*! * \brief Get the final binary built from the builder * \return The finalized binary instruction. */ std::vector<uint32_t> Finalize(); /*! * \brief Create new label * \return The created new label */ Label NewLabel() { Label label; label.id = id_counter_++; return label; } /*! * \brief Start a new block with given label * \param label The label we use. */ void StartLabel(Label label) { MakeInst(spv::OpLabel, label); curr_label_ = label; } /*! \return The current label */ Label CurrentLabel() const { return curr_label_; } /*! * \brief Add code to debug segment. * \param op The operator * \param args The instruction sequence * \tparams Args The positional arguments */ template <typename... Args> void Debug(spv::Op op, Args&&... args) { ib_.Begin(op).AddSeq(std::forward<Args>(args)...).Commit(&debug_); } /*! * \brief Set the name of a value or label * \param obj The object to be named * \param name The name of the object * \tparams Obj The type of the object being named. Typically a Label or Value. */ template <typename Obj> void SetName(Obj&& obj, const std::string& name) { Debug(spv::OpName, std::forward<Obj>(obj), name); } /*! * \brief Add Execution mode to a function. * \param func The function value * \param args The instruction sequence * \tparams Args The positional arguments */ template <typename... Args> void ExecutionMode(Value func, Args&&... args) { ib_.Begin(spv::OpExecutionMode).AddSeq(func, std::forward<Args>(args)...).Commit(&exec_mode_); } /*! * \brief Add code to decorate segment. * \param op The operator * \param args The instruction sequence * \tparams Args The positional arguments */ template <typename... Args> void Decorate(spv::Op op, Args&&... args) { ib_.Begin(op).AddSeq(std::forward<Args>(args)...).Commit(&decorate_); } /*! * \brief Add code to global segment. * \param op The operator * \param args The instruction sequence * \tparams Args The positional arguments */ template <typename... Args> void DeclareGlobal(spv::Op op, Args&&... args) { ib_.Begin(op).AddSeq(std::forward<Args>(args)...).Commit(&global_); } /*! * \brief Make a new instruction and append it to end of function segment. * * \param op The operator * \param args The instruction sequence * \return The result SSA value. * \tparams Args The positional arguments */ template <typename... Args> Instr MakeInst(spv::Op op, Args&&... args) { return ib_.Begin(op).AddSeq(std::forward<Args>(args)...).Commit(&function_); } /*! * \brief Make a new SSA value, * * \param op The operator. * \param out_type The result type. * \param args The instruction sequence * \return The result SSA value. * \tparams Args The positional arguments */ template <typename... Args> Value MakeValue(spv::Op op, const SType& out_type, Args&&... args) { Value val = NewValue(out_type, kNormal); MakeInst(op, out_type, val, std::forward<Args>(args)...); return val; } /*! * \brief Make a phi value. * * \param out_type The output data type. * \param num_incoming number of incoming blocks. * \return The result Phi value. */ PhiValue MakePhi(const SType& out_type, uint32_t num_incoming); /*! * \brief Create a GLSL450 call * * \param ret_type The result type. * \param inst_id The instance id of the function. * \param args The arguments * \return The result value. */ Value CallGLSL450(const SType& ret_type, uint32_t inst_id, const std::vector<Value>& args); /*! * \brief Create a SPIRV_KHR_integer_dot_product call * * \param ret_type The result type. * \param args The arguments * \return The result value. */ Value CallKHRIntegerDotProduct(const SType& ret_type, const std::vector<Value>& args, const DataType& dtype); /*! * \brief Build vector by concatenating components * * \param vec The vector component * \tparams Args The positional arguments */ Value Concat(const std::vector<Value>& vec); /*! * \brief Get the spirv type for a given tvm data type. * \param dtype The data type. * \return The corresponding spirv type. */ SType GetSType(const tvm::DataType& dtype); /*! * \brief Get the pointer type that points to value_type * \param value_type. * \param storage_class The storage class * \return The corresponding spirv type. */ SType GetPointerType(const SType& value_type, spv::StorageClass storage_class); /*! * \brief Get a struct{ value_type[num_elems] } type. * \param value_type the content value type. * \param num_elems number of elements in array * num_elems = 0 means runtime array with BufferBlock Decoration * \param interface_block if this array type for interface blocks(input, output, uniform, * storage buffer). * * \return The corresponding spirv type. */ SType GetStructArrayType(const SType& value_type, uint32_t num_elems, bool interface_block); /*! * \brief Get a struct array access with a given index. * \param ptr_type The pointer type. * \param buffer The buffer ptr to struct array * \param index The array index. */ Value StructArrayAccess(const SType& ptr_type, Value buffer, Value index); /*! * \brief Create a cast that cast value to dst_type * \param dst_type The target type. * \param value the source value. * \return The result value */ Value Cast(const SType& dst_type, Value value); /* * \brief Create a const integer. * \param dtype The content data type. * \param value The data value. */ Value IntImm(const SType& dtype, int64_t value); /* * \brief Create a const unsigned integer. * \param dtype The content data type. * \param value The data value. */ Value UIntImm(const SType& dtype, uint64_t value); /* * \brief Create a const float. * \param dtype The content data type. * \param value The data value. */ Value FloatImm(const SType& dtype, double value); /* * \brief Declare buffer argument of function * * \param arg_type The type of argument. * \param descriptor_set The descriptor set we want to use. * \param binding The binding location in descriptor set. * \param The argument type. */ Value BufferArgument(const SType& value_type, uint32_t descriptor_set, uint32_t binding); /*! * \brief Declare POD arguments through push constants. * * \note Only call this function once! * \param value_types The values in the push constant * \return reference to self. */ Value DeclarePushConstant(const std::vector<SType>& value_types); /*! * \brief Get i-th push constant * \param v_type The value type * \param index The push constant index * \return the value of push constant */ Value GetPushConstant(Value ptr_push_const, const SType& v_type, uint32_t index); /*! * \brief Declare POD arguments through uniform buffer. * * \note Only call this function once! * \param value_types The values in the uniform buffer * \param descriptor_set The descriptor set we want to use * \param binding The binding location in descriptor set * \return reference to self. */ Value DeclareUniformBuffer(const std::vector<SType>& value_types, uint32_t descriptor_set, uint32_t binding); /*! * \brief Get i-th uniform constant * \param v_type The value type * \param index The uniform index * \return the value of uniform constant */ Value GetUniform(Value ptr_ubo, const SType& v_type, uint32_t index); /*! * \brief Declare a new function * \return The created function ID. */ Value NewFunction(); /*! * \brief Declare the entry point for a kernel function. This should be * invoked after building the function so the builder is aware of which * variables to declare as part of the function's interface. * \param func The previously declared function. * \param name Name of the entry point. */ void CommitKernelFunction(const Value& func, const std::string& name); /*! * \brief Start function scope. * \param func function to be started. */ void StartFunction(const Value& func); /*! * \brief Set the local size of the function * \param func function of interest * \param local_size The local workgroup_size */ void SetLocalSize(const Value& func, uint32_t local_size[3]); /* * \brief Allocate space * \param value_type The content value type * \param num_elems Number of elements to allocate. * \param storage_class The storage class we want to store to. */ Value Allocate(const SType& value_type, uint32_t num_elems, spv::StorageClass storage_class); /* * \brief Get the i-th workgroup id. * \return The value representing the workgroup id. */ Value GetWorkgroupID(uint32_t dim_index); /* * \brief Get the i-th local id. * \return The value representing the local id. */ Value GetLocalID(uint32_t dim_index); // Expressions Value Add(Value a, Value b); Value Sub(Value a, Value b); Value Mul(Value a, Value b); Value Div(Value a, Value b); Value Mod(Value a, Value b); Value EQ(Value a, Value b); Value NE(Value a, Value b); Value LT(Value a, Value b); Value LE(Value a, Value b); Value GT(Value a, Value b); Value GE(Value a, Value b); Value Select(Value cond, Value a, Value b); private: /*! * \brief Create new value * \return The created new label */ Value NewValue(const SType& stype, ValueKind flag) { Value val; val.id = id_counter_++; val.stype = stype; val.flag = flag; return val; } /*! \brief Get a built-in value provided by SPIR-V * * \param built_in The SPIR-V built-in array to access. For * example, spv::BuiltInLocalInvocationId to access the thread * id. * * \param index The index of the built-in array to access. * * \param name The name of the value being accessed. For * example, "threadIdx.x". This is for debug purposes, and is * used to tag the variable with OpName. */ Value GetBuiltInValue(spv::BuiltIn built_in, uint32_t index, const std::string& name = ""); /*! * \brief The common function to declare push constants or uniform buffer * \param value_types The values in the push constants or uniform buffer * \param storage_class An enum defined by SPIR-V indicating push constant or uniform * \param kind An enum indicating push constant or uniform * \return The created new label */ Value DeclareStorageVariable(const std::vector<SType>& value_types, spv::StorageClass storage_class, ValueKind kind); /*! * \brief The common function to decorate storage buffer or uniform buffer arguments. * \param val The Value to be decorated. * \param descriptor_set The index of the descriptor set containing the buffer's descriptor * \param binding The index of the buffer's descriptor within the descriptor set */ void DecorateBufferArgument(Value val, uint32_t descriptor_set, uint32_t binding); // get constant given value encoded in uint64_t Value GetConst_(const SType& dtype, const uint64_t* pvalue); // declare type SType DeclareType(const DataType& dtype); // Declare the appropriate SPIR-V capabilities and extensions to use // this data type. void AddCapabilityFor(const DataType& dtype); /*! \brief SPIRV-related capabilities of the target * * This SPIRVSupport object is owned by the same CodeGenSPIRV * object that owns the IRBuilder. Therefore, safe to use a * reference as the CodeGenSPIRV will live longer. */ const SPIRVSupport& spirv_support_; /*! \brief internal instruction builder */ InstrBuilder ib_; /*! \brief Current label */ Label curr_label_; /*! \brief The current maximum id */ uint32_t id_counter_{1}; /*! \brief glsl 450 extension */ Value ext_glsl450_; /*! \brief Special cache int32, fp32, void*/ SType t_bool_, t_int32_, t_uint32_, t_fp32_, t_void_, t_void_func_; /*! \brief quick cache for const one i32 */ Value const_i32_zero_; /*! \brief The cached values for built-in arrays * * Maps from a tuple of spv::BuiltIn enum to the Value containing * that built-in array. For example, * ``built_in_tbl_[spv::BuiltInLocalInvocationId]`` is the array * of invocation ids, equivalent to an array of ``threadIdx.x``, * ``threadIdx.y``, and ``threadIdx.z`` in CUDA. * * These are declared in the global section of the shader. */ std::unordered_map<spv::BuiltIn, Value> built_in_tbl_; /*! \brief The cached values for built-in values * * Maps from a tuple of (spv::BuiltIn enum, index) to the value * stored at that index of the built-in array. For example, * ``built_in_tbl_[{spv::BuiltInLocalInvocationId, 0}]`` is the * first index of the invocation id, equivalent to * ``threadIdx.x`` in CUDA. * * These are declared in the first block of the function, in the * ``function_scope_vars_`` section. */ std::map<std::tuple<spv::BuiltIn, uint32_t>, Value> built_in_values_tbl_; /*! \brief whether push constant is defined */ Value push_const_; /*! \brief map from type code to the type */ std::unordered_map<uint32_t, SType> pod_type_tbl_; /*! \brief map from value to array type */ std::map<std::tuple<uint32_t, uint32_t, bool>, SType> struct_array_type_tbl_; /*! \brief map from value to its pointer type */ std::map<std::pair<uint32_t, spv::StorageClass>, SType> pointer_type_tbl_; /*! \brief map from constant int to its value */ std::map<std::pair<uint32_t, uint64_t>, Value> const_tbl_; /*! \brief map from name of a ExtInstImport to its value */ std::map<std::string, Value> ext_inst_tbl_; /*! \brief Header segment * * 5 words long, described in "First Words of Physical Layout" * section of SPIR-V documentation. */ std::vector<uint32_t> header_; /*! \brief SPIR-V capabilities used by this module. */ std::set<spv::Capability> capabilities_used_; /*! \brief SPIR-V extensions used by this module. */ std::set<std::string> extensions_used_; /*! \brief entry point segment */ std::vector<uint32_t> extended_instruction_section_; /*! \brief entry point segment */ std::vector<uint32_t> entry_; /*! \brief Header segment */ std::vector<uint32_t> exec_mode_; /*! \brief Debug segment */ std::vector<uint32_t> debug_; /*! \brief Annotation segment */ std::vector<uint32_t> decorate_; /*! \brief Global segment: types, variables, types */ std::vector<uint32_t> global_; /*! \brief Function header segment * * Contains the start of function (spv::OpFunction), first label * (spv::OpLabel), and all array allocations (spv::OpVariable). */ std::vector<uint32_t> func_header_; /*! \brief Function-scope variable declarations * * Contains variable declarations that should be accessible * throughout the entire kernel (e.g. threadIdx.x). This must be * separate from func_header_, because the function-level * spv::OpVariable declarations must come first in the first block * of a function. */ std::vector<uint32_t> function_scope_vars_; /*! \brief Function segment */ std::vector<uint32_t> function_; }; } // namespace spirv } // namespace codegen } // namespace tvm #endif // TVM_TARGET_SPIRV_IR_BUILDER_H_
https://github.com/zk-ml/tachikoma
src/target/spirv/spirv_support.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file spirv_support * * \brief Utility for determining which spirv capabilities a TVM * target supports. */ #ifndef TVM_TARGET_SPIRV_SPIRV_SUPPORT_H_ #define TVM_TARGET_SPIRV_SPIRV_SUPPORT_H_ #include <tvm/target/target.h> #include <vulkan/vulkan_core.h> namespace tvm { namespace codegen { /*! \brief Represents which support a Vulkan driver has that are relevant to codegen */ struct SPIRVSupport { /*! \brief Determine spirv capabilities from a vulkan target. */ explicit SPIRVSupport(Target target); /*! \brief The Vulkan API version supported by the device. * * Vulkan struct: VkPhysicalDeviceProperties * Device property: apiVersion * * If VK_KHR_driver_properties is present, will also check the * driver conformance version. If the version advertised does not * pass the Vulkan conformance test, vulkan_api_version will be the * latest Vulkan version that does pass the conformance test * instead. */ uint32_t vulkan_api_version{VK_MAKE_VERSION(1, 0, 0)}; /*! * \brief The supported subgroup operations * * Vulkan extension: VK_KHR_driver_properties * Minimum vulkan version: 1.1 * Vulkan struct: VkPhysicalDeviceSubgroupProperties * Device property: supportedOperations * * Requires vulkan 1.1 or higher to use. If the * VK_KHR_driver_properties extension is not present in order to * query this value, or if the driver does not support vulkan 1.0, * then this value will be set to 0. * */ uint32_t supported_subgroup_operations{0}; /*! * \brief The maximum size (bytes) of push constants * * Vulkan struct: VkPhysicalDeviceLimits * Device property: maxPushConstantsSize * * The maxPushConstantsSize from VkPhysicalDeviceLimits. * Default value is from Vulkan spec, "Required Limits" table. * Implementations may have a larger limit. */ uint32_t max_push_constants_size{128}; /*! * \brief The maximum size (bytes) of a uniform buffer. * * Vulkan struct: VkPhysicalDeviceLimits * Device property: maxUniformBufferRange * * Default value is from Vulkan spec, "Required Limits" table. * Implementations may have a larger limit. */ uint32_t max_uniform_buffer_range{16384}; /*! * \brief The maximum size (bytes) of a storage buffer. * * Vulkan struct: VkPhysicalDeviceLimits * Device property: maxStorageBufferRange * * Default value is from Vulkan spec, "Required Limits" table. * Implementations may have a larger limit. */ uint32_t max_storage_buffer_range{1 << 27}; /*! * \brief The maximum amount of shared memory usable by a shader * * Vulkan extension: N/A * Vulkan struct: VkPhysicalDeviceLimits * Device Property: maxComputeSharedMemorySize * SPV Extension name: N/A * SPV Capability: N/A * * The maximum amount of shared memory (Workgroup scope) that may be * allocated by a shader. Default value is from Vulkan spec, * "Required Limits" table. Implementations may have a larger * limit. */ uint32_t max_shared_memory_per_block{16384}; /*! * \brief The maximum number of storage buffers accessible by a single shader. * * Vulkan struct: VkPhysicalDeviceLimits * Device property: maxPerStageDescriptorStorageBuffers * * Default value is from Vulkan spec, "Required Limits" table. * Implementations may have a larger limit, frequently much larger. * (e.g. GTX 1080 has max of 2^20) */ uint32_t max_per_stage_descriptor_storage_buffers{4}; /*! * \brief Whether the driver supports StorageClassStorageBuffer * * Vulkan extension: VK_KHR_storage_buffer_storage_class * Device property: N/A * SPV Extension: SPV_KHR_storage_buffer_storage_class * SPV Capability: N/A * * If support is present, access push constants and UBO as * block-decorated StorageClassStorageBuffer. Otherwise, access as * buffer-block-decorated StorageClassUniform. SPIRV 1.3 deprecated * BufferBlock, so this should always be true drivers that support * SPIRV 1.3. * */ bool supports_storage_buffer_storage_class{false}; /*! * \brief Whether the driver supports reading/writing to 16-bit values * * Vulkan extension: VK_KHR_8bit_storage * Vulkan struct: VkPhysicalDevice8BitStorageFeaturesKHR * Device property: storageBuffer8BitAccess * SPV extension: SPV_KHR_8bit_storage * SPV Capability: StorageBuffer8BitAccess * * If support is present, can read/write 8-bit values, but doesn't * necessarily provide 8-bit operations. * * If support is present, will declare StorageBuffer8BitAccess as * needed. If support is not present, will throw error if a * PrimFunc calls for this functionality. Unlike * StorageUniform16BitAccess, no fallback to * "StorageUniformBufferBlock8" is needed, as VK_KHR_8bit_storage * requires VK_KHR_storage_buffer_storage_class to also be present. * */ bool supports_storage_buffer_8bit_access{false}; /*! * \brief Whether the driver supports reading/writing to 16-bit values * * Vulkan extension: VK_KHR_16bit_storage * Vulkan struct: VkPhysicalDevice16BitStorageFeaturesKHR * Device property: storageBuffer16BitAccess * SPV extension: SPV_KHR_16bit_storage * SPV Capability: StorageBuffer16BitAccess, StorageUniformBufferBlock16 * * If support is present, can read/write 16-bit values, but doesn't * necessarily provide 16-bit operations. * * If support is present, will declare either * StorageBuffer16BitAccess or StorageUniformBufferBlock16 as * needed, selecting based on the value of * supports_StorageBufferStorageClass. If support is not present, * will throw error if a PrimFunc calls for this functionality. */ bool supports_storage_buffer_16bit_access{false}; /*! * \brief Whether the driver supports operations involving 16-bit floats * * Vulkan extension: VK_KHR_shader_float16_int8 * Vulkan struct: VkPhysicalDeviceShaderFloat16Int8FeaturesKHR * Device Property: shaderFloat16 * SPV Extension name: N/A * SPV Capability: Float16, Float16Buffer * * If support is present, can perform 16-bit float operations. If * support is not present, codegen will throw exception on * attempting to create a 16-bit float. */ bool supports_float16{false}; /*! * \brief Whether the driver supports operations involving 16-bit floats * * Vulkan extension: N/A * Vulkan struct: VkPhysicalDeviceFeatures * Device Property: shaderFloat64 * SPV Extension name: N/A * SPV Capability: Float64 * * If support is present, can perform 64-bit float operations. If * support is not present, codegen will throw exception on * attempting to create a 64-bit float. */ bool supports_float64{false}; /*! * \brief Whether the driver supports operations involving 8-bit ints * * Vulkan extension: VK_KHR_shader_float16_int8 * Vulkan struct: VkPhysicalDeviceShaderFloat16Int8FeaturesKHR * Device Property: shaderInt8 * SPV Extension name: N/A * SPV Capability: Int8 * * If support is present, can perform 8-bit int operations. If * support is not present, codegen will throw exception on * attempting to create a 8-bit int. */ bool supports_int8{false}; /*! * \brief Whether the driver supports operations involving 8-bit ints * * Vulkan extension: N/A * Vulkan struct: VkPhysicalDeviceFeatures * Device Property: shaderInt16 * SPV Extension name: N/A * SPV Capability: Int16 * * If support is present, can perform 16-bit int operations. If * support is not present, codegen will throw exception on * attempting to create a 16-bit int. */ bool supports_int16{false}; /*! * \brief Whether the driver supports operations involving 64-bit ints * * Vulkan extension: N/A * Vulkan struct: VkPhysicalDeviceFeatures * Device Property: shaderInt64 * SPV Extension name: N/A * SPV Capability: Int64 * * If support is present, can perform 64-bit int operations. If * support is not present, codegen will throw exception on * attempting to create a 64-bit int. */ bool supports_int64{false}; /*! * \brief Whether the driver supports operations involving integer dot product. * * Vulkan extension: VK_KHR_shader_integer_dot_product * SPV Extension name: SPV_KHR_integer_dot_product * SPV Capability: spv::CapabilityDotProductKHR, * spv::CapabilityDotProductInput4x8BitPackedKHR); * * If support is present, can perform integer dot product operations. If * support is not present, codegen will throw exception on * attempting to perform integer dot product. */ bool supports_integer_dot_product{false}; }; } // namespace codegen } // namespace tvm #endif // TVM_TARGET_SPIRV_SPIRV_SUPPORT_H_
https://github.com/zk-ml/tachikoma
src/target/stackvm/codegen_stackvm.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file codegen_stack_vm.h * \brief Codegen into Simple Stack VM. */ #ifndef TVM_TARGET_STACKVM_CODEGEN_STACKVM_H_ #define TVM_TARGET_STACKVM_CODEGEN_STACKVM_H_ #include <tvm/target/codegen.h> #include <tvm/tir/expr.h> #include <tvm/tir/function.h> #include <tvm/tir/op.h> #include <tvm/tir/stmt_functor.h> #include <string> #include <unordered_map> #include <vector> #include "../../runtime/stackvm/stackvm.h" namespace tvm { namespace codegen { using namespace tir; using runtime::StackVM; /*! * \brief A base class to generate a stack VM. * This module is used to generate host wrapper * into device function when only device JIT is available. */ class CodeGenStackVM : public ExprFunctor<void(const PrimExpr&)>, public StmtFunctor<void(const Stmt&)> { public: /*! * \brief Generate a stack VM representing * \param f The function to be compiled * \param device_funcs The extern device functions to be linked. * \note Only call compile once, * create a new codegen object each time. */ StackVM Compile(const PrimFunc& f); /*! \brief Push stmt to generate new code */ void Push(const Stmt& n); /*! \brief Push expr to generate new code */ void Push(const PrimExpr& n) { VisitExpr(n); } /*! * \brief Push the opcode to the code. * \param opcode The code to be pushed. */ void PushOp(StackVM::OpCode opcode); /*! * \brief Push the opcode and operand to the code. * \param opcode The opcode. * \param operand The operand to be pushed. * \return operand_index, indicating location of operand */ int64_t PushOp(StackVM::OpCode opcode, int operand); /*! * \brief Set the relative jump offset to be offset. * \param operand_index The indexed returned by PushOp. * \param operand The operand to be set. */ void SetOperand(int64_t operand_index, int64_t operand); /*! \return The current program pointer */ int64_t GetPC() const { return static_cast<int64_t>(vm_.code.size()); } /*! * \brief Get string id in vm * \param key The string to get id. * \return the id of the string. */ int GetStrID(const std::string& key); /*! * \brief Allocate a variable name for a newly defined var. * \param v The variable. * \return the heap index of the var. */ int AllocVarID(const VarNode* v); /*! * \brief Get a variable name. * \param v The variable. * \return the heap index of the var. */ int GetVarID(const VarNode* v) const; // Push binary operator void PushBinary(StackVM::OpCode op_int64, const PrimExpr& a, const PrimExpr& b); // push cast; void PushCast(DataType dst, DataType src); // overloadable functions // expression void VisitExpr_(const VarNode* op) final; void VisitExpr_(const LoadNode* op) final; void VisitExpr_(const BufferLoadNode* op) final; void VisitExpr_(const LetNode* op) final; void VisitExpr_(const CallNode* op) final; void VisitExpr_(const AddNode* op) final; void VisitExpr_(const SubNode* op) final; void VisitExpr_(const MulNode* op) final; void VisitExpr_(const DivNode* op) final; void VisitExpr_(const ModNode* op) final; void VisitExpr_(const MinNode* op) final; void VisitExpr_(const MaxNode* op) final; void VisitExpr_(const EQNode* op) final; void VisitExpr_(const NENode* op) final; void VisitExpr_(const LTNode* op) final; void VisitExpr_(const LENode* op) final; void VisitExpr_(const GTNode* op) final; void VisitExpr_(const GENode* op) final; void VisitExpr_(const AndNode* op) final; void VisitExpr_(const OrNode* op) final; void VisitExpr_(const CastNode* op) final; void VisitExpr_(const NotNode* op) final; void VisitExpr_(const SelectNode* op) final; void VisitExpr_(const RampNode* op) final; void VisitExpr_(const BroadcastNode* op) final; void VisitExpr_(const IntImmNode* op) final; void VisitExpr_(const FloatImmNode* op) final; void VisitExpr_(const StringImmNode* op) final; // statment void VisitStmt_(const LetStmtNode* op) final; void VisitStmt_(const StoreNode* op) final; void VisitStmt_(const BufferStoreNode* op) final; void VisitStmt_(const ForNode* op) final; void VisitStmt_(const IfThenElseNode* op) final; void VisitStmt_(const AllocateNode* op) final; void VisitStmt_(const AttrStmtNode* op) final; void VisitStmt_(const AssertStmtNode* op) final; void VisitStmt_(const EvaluateNode* op) final; void VisitStmt_(const SeqStmtNode* op) final; private: bool debug_{false}; /*! \brief The vm to be generated */ StackVM vm_; /*! \brief id of each variable */ std::unordered_map<const VarNode*, int> var_idmap_; /*! \brief id of each string */ std::unordered_map<std::string, int> str_idmap_; /*! \brief id of each global function */ std::unordered_map<std::string, int> extern_fun_idmap_; Op backend_alloc_workspace_op_ = Op::Get("tir.TVMBackendAllocWorkspace"); Op backend_free_workspace_op_ = Op::Get("tir.TVMBackendFreeWorkspace"); }; } // namespace codegen } // namespace tvm #endif // TVM_TARGET_STACKVM_CODEGEN_STACKVM_H_
https://github.com/zk-ml/tachikoma
src/te/autodiff/ad_utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file ad_utils.h * \brief Helper utilities to implement auto-differentiation. */ #ifndef TVM_TE_AUTODIFF_AD_UTILS_H_ #define TVM_TE_AUTODIFF_AD_UTILS_H_ #include <tvm/arith/int_solver.h> #include <tvm/te/operation.h> #include <tvm/tir/expr.h> #include <string> #include <unordered_map> #include <utility> #include <vector> namespace tvm { namespace te { /*! * \brief Clone iter vars and return both the new vars and the substitution from old to new. * * \param vars The original iter vars. * \return A pair containing the array of new iter vars and the map from old vars to new ones. */ std::pair<Array<IterVar>, Map<Var, PrimExpr>> CloneIterVars(const Array<IterVar>& vars); /*! * \brief Clone reduction by cloning the axis variables. * \param expr A reduction expr to clone. Non-reduction expressions are left intact. */ PrimExpr CloneReduction(const PrimExpr& expr); /*! * \brief Create a tensor from an expression. The expression may be a reduction, in which * case its body will be correctly duplicated if it is a multi-valued reduction. * * \param expr The expr which will be the tensor's body. * \param axis The input variables with ranges. * \param name The tensor's name. * \param tag The tensor's tag. * \param attrs The tensor's attrs. * \param clone_axis Whether to clone the given axis and perform substitution. * \return A tensor. */ Tensor TensorFromExpr(const PrimExpr& expr, const Array<IterVar>& axis, const std::string& name = "tensor", const std::string& tag = "", const Map<String, ObjectRef>& attrs = {}, bool clone_axis = true); Tensor TransformTensorBody( const Tensor& tensor, const std::function<PrimExpr(const PrimExpr&, const Array<IterVar>&)>& func); Tensor TransformTensorBody(const Tensor& tensor, const std::function<PrimExpr(const PrimExpr&)>& func); /*! * \brief Inline tensors access recursively. * * This function will inline tensors recursively until it reaches a tensor which is impossible to * inline (a reduction if \p inline_reductions is false, a non-compute tensor, a tensor which is * not from \p inlineable). It won't descend into non-inlinable tensors' bodies. * * \param tensor The tensor whose body to transform. * \param inlineable A list of tensors which are allowed to be inlined. If empty, try * to inline all tensors. * \param inline_reductions Whether to inline reductions (this may result in top-level reduction * nodes). * * \return An inlined tensor */ TVM_DLL Tensor InlineTensorAccess(const Tensor& tensor, const Array<Tensor>& inlineable = Array<Tensor>(), bool inline_reductions = false); /*! * \brief Inline tensors access at the tail. * \param tensor The tensor whose body to transform. * \return An inlined tensor */ TVM_DLL Tensor InlineTailTensorAccess(const Tensor& tensor); /*! * \brief Simplify an iteration domain. * * An iteration domain is basically an array of variables and a condition. The function will do the * following: * - Replace div and mod operations with new variables (optional). * - Extract (in)equalities from the condition. * - Perform Fourier-Motzkin elimination. * - Shear the domain of iteration (e.g. if `y <= x <= y + 2` then x will be replaced with `y + d` * where `d` is a new variable such that `0 <= d <= 2`). * - Remove redundant variables. * - Infer new variable ranges (hopefully more precise). * * \param iter_domains The original domain. * \param eliminate_div_mod Whether to eliminate div and mod by introducing new variables. */ TVM_DLL arith::IntConstraintsTransform SimplifyDomain(const arith::IntConstraints& iter_domains, bool eliminate_div_mod = true); /*! * \brief Perform lifting of conditions of being possible to be non-zero together with * applying some transformations like simplifying the reduction domain. Works only with * this particular tensor's body, i.e. doesn't perform inlining. * * \param tensor The original tensor; * \param vranges Optional map from free variables to their value ranges. * \return An optimized tensor. */ TVM_DLL Tensor RemoveJacobianAndLiftNonzeroCond(const Tensor& tensor, const Map<Var, Range>& vranges = Map<Var, Range>()); } // namespace te } // namespace tvm #endif // TVM_TE_AUTODIFF_AD_UTILS_H_
https://github.com/zk-ml/tachikoma
src/te/operation/compute_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \brief Helper utilities to implement compute_op. * \file compute_op.h */ #ifndef TVM_TE_OPERATION_COMPUTE_OP_H_ #define TVM_TE_OPERATION_COMPUTE_OP_H_ #include <tvm/te/operation.h> #include <tvm/tir/expr.h> #include <unordered_map> #include <vector> namespace tvm { namespace te { // loop nest structure for general compute // This the loop nest structured used in compute. // Does not include the loop body. struct ComputeLoopNest { // The common number of loops between init and main size_t num_common_loop; // predicates for the initialize loop std::vector<PrimExpr> init_predicates; // Initialization nest involved. std::vector<std::vector<Stmt>> init_nest; // Value map for the init code std::unordered_map<IterVar, PrimExpr> init_vmap; // Predicates for the main update loop std::vector<PrimExpr> main_predicates; // The general loop nest std::vector<std::vector<Stmt>> main_nest; // Value map for the IterVar. std::unordered_map<IterVar, PrimExpr> main_vmap; /*! * \brief constructor to build ComputeOpNest * \param self The pointer to compute op. * \param stage The scxhedule stage. * \param dom_map The domain map. * \param debug_keep_trivial_loop Whether keep trivial loops with extent of 1 * \return The constructed loop nest */ static ComputeLoopNest Create(const BaseComputeOpNode* self, const Stage& stage, const std::unordered_map<IterVar, Range>& dom_map, bool debug_keep_trivial_loop); }; /*! * \brief Build body of compute for cross thread reduction pattern. * \param self The pointer to ComputeOpNode * \param stage The schedule stage. * \param dom_map The domain map. * \param debug_keep_trivial_loop Whether keep trivial loops with extent of 1 * \return The created statement. */ Stmt MakeCrossThreadReduction(const ComputeOpNode* self, const Stage& stage, const std::unordered_map<IterVar, Range>& dom_map, bool debug_keep_trivial_loop); /*! * \brief Build body of compute for tensorization. * \param self The pointer to ComputeOpNode * \param stage The schedule stage. * \param dom_map The domain map. * \param debug_keep_trivial_loop Whether keep trivial loops with extent of 1 * \return The created statement. */ Stmt MakeTensorize(const ComputeOpNode* self, const Stage& stage, const std::unordered_map<IterVar, Range>& dom_map, bool debug_keep_trivial_loop); /*! * \brief Transform the update part when there is no init func in tensorizing * \param stage The stage for tensorizing. * \param dom_map The range of each iter var. * \param n The loop nest structured used in compute. * \param body The body func in tensorize intrin * \param update The update func in tensorize intrin * \return Transformed result. */ Stmt TransformUpdate(const Stage& stage, const std::unordered_map<IterVar, Range>& dom_map, const ComputeLoopNest& n, Stmt body, Stmt update); } // namespace te } // namespace tvm #endif // TVM_TE_OPERATION_COMPUTE_OP_H_
https://github.com/zk-ml/tachikoma
src/te/operation/create_primfunc.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_TE_OPERATION_CREATE_PRIMFUNC_H_ #define TVM_TE_OPERATION_CREATE_PRIMFUNC_H_ #include <tvm/runtime/container/array.h> #include <tvm/te/tensor.h> #include <tvm/tir/function.h> namespace tvm { namespace tir { /*! \brief Use Tensor Expression to create a schedulable TensorIR func. */ PrimFunc CreatePrimFunc(const Array<te::Tensor>& arg_list); /*! \brief The same as above but create a PrimFunc with AllocateConstNode. If the size of the * constants array is N, the last N tensors in arg_list will be treated as constant tensors. * Constant tensors will not be part of the parameters of the created PrimFunc, instead constants * will be embedded in the body as AllocateConstNode. */ PrimFunc CreatePrimFuncWithConstants(const Array<te::Tensor>& arg_list, const Array<runtime::NDArray>& constants); } // namespace tir } // namespace tvm #endif // TVM_TE_OPERATION_CREATE_PRIMFUNC_H_
https://github.com/zk-ml/tachikoma
src/te/operation/hybrid_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \brief Helper utilities to implement hybrid_op. * \file hybrid_op.h */ #ifndef TVM_TE_OPERATION_HYBRID_OP_H_ #define TVM_TE_OPERATION_HYBRID_OP_H_ #include <tvm/te/schedule.h> #include <tvm/tir/expr.h> #include <unordered_map> #include <unordered_set> #include <vector> #include "../../tir/transforms/arg_binder.h" #include "../../tir/transforms/ir_utils.h" #include "../schedule/message_passing.h" namespace tvm { namespace te { /*! * \brief Find all the iteration variables in the given statement body. * \param stmt The body to be inspected. */ std::vector<IterVar> GatherLoopVars(Stmt stmt); /*! * \brief Replace the tensor reference (especially in Provide's) in stmt by the replace map. * \param stmt The statement to be processed. * \param replace The replacement rule. */ Stmt ReplaceProvideTensor(Stmt stmt, const std::unordered_map<Tensor, Tensor>& replace); /*! * \brief Apply the schedule manipulation on the function body. * \param stmt The statement to be processed. * \param dom_map The extents of the iterative variables may be used. * \param stage The schedule information to be applied. */ Stmt ApplySchedule(const Stage& stage, const std::unordered_map<IterVar, Range>& dom_map, Stmt stmt); /*! * \brief Apply loop splits and fuses in the schedule on the function body. * \param stage The schedule information to be applied. * \param dom_map The extents of the iterative variables may be used. * \param stmt The statement to be processed. */ Stmt ApplyLoopShapes(const Stage& stage, const std::unordered_map<IterVar, Range>& dom_map, Stmt stmt); /*! * \brief Apply loop annotation in the schedule on the function body. * \param stage The schedule information to be applied. * \param rebased The map specifies the rebase, a.k.a rename, relationship of these variables. * \param stmt The statement to be processed. */ Stmt ApplyLoopAnnotations(const Stage& stage, const std::unordered_map<IterVar, IterVar>& rebased, Stmt stmt); /*! * \brief Apply loop order in the schedule on the function body. * \param stage The schedule information to be applied. * \param dom_map The extents of the iterative variables may be used. * \param rebased The map specifies the rebase, a.k.a rename, relationship of these variables. * \param stmt The statement to be processed. */ Stmt ApplyLoopOrder(const Stage& stage, const std::unordered_map<IterVar, Range>& dom_map, const std::unordered_map<IterVar, IterVar>& rebased, Stmt stmt); } // namespace te } // namespace tvm #endif // TVM_TE_OPERATION_HYBRID_OP_H_
https://github.com/zk-ml/tachikoma
src/te/operation/op_utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file op_utils.h * \brief Common utility used in operator construction. */ #ifndef TVM_TE_OPERATION_OP_UTILS_H_ #define TVM_TE_OPERATION_OP_UTILS_H_ #include <tvm/te/schedule.h> #include <tvm/tir/expr.h> #include <unordered_map> #include <unordered_set> #include <vector> #include "../../tir/transforms/arg_binder.h" #include "../../tir/transforms/ir_utils.h" #include "../schedule/message_passing.h" namespace tvm { namespace te { using tir::MergeNest; /*! * \brief Build loop nest for stage. * * \param stage The stage to create a loop nest. * \param dom_map The range of each iter var. * \param begin_iter_pos The beginning position of leaf_iter_vars to generate loop. * \param new_loop_var Whether create new loop variable. * \param skip_iter Whether skip certain iteration. * \param p_value_map The result value of each IterVar. * \param debug_keep_trivial_loop Whether keep trivial loops with extent of 1 */ std::vector<std::vector<Stmt>> MakeLoopNest(const Stage& stage, const std::unordered_map<IterVar, Range>& dom_map, size_t begin_iter_pos, bool new_loop_var, const std::unordered_set<IterVar>& skip_iter, std::unordered_map<IterVar, PrimExpr>* p_value_map, bool debug_keep_trivial_loop); /*! * \brief Create a nest of if checking the predicates. * * \param predicates The predicates to be checked. * \return List of If nest that checks the predicates. */ std::vector<Stmt> MakeIfNest(const std::vector<PrimExpr>& predicates); /*! * \brief Replace the tensor reference (especially in Call's) in stmt by the replace map. * \param stmt The statement to be processed. * \param replace The replacement rule. */ Stmt ReplaceTensor(Stmt stmt, const std::unordered_map<Tensor, Tensor>& replace); /*! * \brief Replace the tensor reference (especially in Call's) in primExpr by the replace map. * \param expr The expression to be processed. * \param replace The replacement rule. */ PrimExpr ReplaceTensor(PrimExpr expr, const std::unordered_map<Tensor, Tensor>& replace); /*! * \brief Substitute the variables of stmt by value map. * \param stmt the statment * \param value_map The value map. * \return Substituted result. */ Stmt Substitute(Stmt stmt, const std::unordered_map<IterVar, PrimExpr>& value_map); /*! * \brief Substitute the variables of primExpr by value map. * \param expr the expression to be processed. * \param value_map The value map. * \return Substituted result. */ PrimExpr Substitute(PrimExpr expr, const std::unordered_map<IterVar, PrimExpr>& value_map); /*! * \brief Converts Halide ForKind to its corresponding IterVarType * \param kind The ForKind to be converted */ IterVarType ForKindToIterVarType(tir::ForKind kind); /*! * \brief Converts IterVarType to its corresponding Halide ForKind * \param iter_type The IterVarType to be converted */ tir::ForKind IterVarTypeToForKind(IterVarType iter_type); } // namespace te } // namespace tvm #endif // TVM_TE_OPERATION_OP_UTILS_H_
https://github.com/zk-ml/tachikoma
src/te/schedule/graph.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file graph.h * \brief Utilities to get information about schedule graph. */ #ifndef TVM_TE_SCHEDULE_GRAPH_H_ #define TVM_TE_SCHEDULE_GRAPH_H_ #include <tvm/te/operation.h> #include <tvm/te/schedule.h> #include <tvm/tir/expr.h> #include <unordered_map> #include <unordered_set> #include <vector> namespace tvm { namespace te { /*! * \brief data structure of Operation->Tensors it reads */ using ReadGraph = Map<Operation, Array<Tensor>>; /*! * \brief AttachPath maps op-> a list of IterVar */ using AttachPath = Map<Operation, Array<IterVar>>; /*! * \brief The map between tensor and operation it feeds to. */ using FeedGraph = std::unordered_map<Tensor, std::vector<Operation>>; /*! * \brief Get read graph of each operation to all the * Tensors that it directly depends on. * * The result map contains Operations needed to finish root Operation. * \param roots The root operation. * \return The result map. */ ReadGraph CreateReadGraph(const Array<Operation>& roots); /*! * \brief Get minimum subgraph between outputs and inputs. * The operations contains node which input-reachable from any inputs * output reachable to any outputs. * * The inputs won't be included in the subgraph, the outputs will be included. * * \param outputs The outputs of the subgraph * \param inputs The inputs to the subgraph. * \param include_inputs Whether to include inputs * * \return The subgraph. */ Array<Operation> GetSubGraph(const Array<Tensor>& outputs, const Array<Tensor>& inputs, bool include_inputs); /*! * \brief Get a post DFS ordered of operations in the graph. * \param roots The root of the graph. * \param g The read graph. * \return vector order of Operations in PostDFS order. * * \note PostDFSOrder is a special case of Topoligical order, * and can be used when topoligical order is needed. */ Array<Operation> PostDFSOrder(const Array<Operation>& roots, const ReadGraph& g); /*! * \brief Create feedgraph for given Schedule * \param g The read graph. * \return The created feedgraph. */ FeedGraph CreateFeedGraph(const ReadGraph& g); /*! * \brief Create AttachPath that maps op-> a list of IterVar * That represents the loop nest op sits in from inner most to outermost * Also inserts attach_stage for scan updates when needed. * * \param sch The schedule. * \return The attach path. */ AttachPath CreateAttachPath(Schedule sch); /*! * \brief Get all operations inside the recursion of scan. * \param scan_op The scan node ops. * \return The body operations, in read dependency order. */ Array<Operation> ScanGetBody(const Operation& scan_op); /*! * \brief Analyze each spatial dimension of scan's result. * Give check on whether each dimension is fix point, * An axis is a fixed point if it only refers back to itself in recursion * and it is not used in axis of other recursion field. * * next_state[t, ..., axis, ...] = f(prev_state[t-1, ...,axis,...] * * \param scan The scan node. * \return Map of spatial_axis -> IntImm */ Map<IterVar, PrimExpr> ScanFixPointAnalysis(const Operation& scan); } // namespace te } // namespace tvm #endif // TVM_TE_SCHEDULE_GRAPH_H_
https://github.com/zk-ml/tachikoma
src/te/schedule/message_passing.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file message_passing.h * \brief Common utilities to do message passing * on the schedule hyper graph. */ #ifndef TVM_TE_SCHEDULE_MESSAGE_PASSING_H_ #define TVM_TE_SCHEDULE_MESSAGE_PASSING_H_ #include <tvm/arith/analyzer.h> #include <tvm/te/operation.h> #include <tvm/te/schedule.h> #include <tvm/tir/expr.h> #include <unordered_map> #include <unordered_set> #include <vector> namespace tvm { namespace te { /*! * \brief Downward inference of domain of each IterVar. * Caller set the range of the root, then the function * propagates it towards the leaves. * * \param stage The stage to operate on. * \param p_state The state of the message passing. * \param analyzer Analyzer context, storing information about bounds in p_state. * \param allow_missing Whether allow missing value. */ void PassDownDomain(const Stage& stage, std::unordered_map<IterVar, Range>* p_state, arith::Analyzer* analyzer, bool allow_missing = false); /*! * \param Upward inference of index of each IterVar. * given index assignement of the leaves, * * \param stage The stage to operate on. * \param dom_map The domain map of each iteration variable's domain. * \param p_state The index state of each IterVar. * \param allow_missing Whether allow missing value. */ void PassUpIndex(const Stage& stage, const Map<IterVar, Range>& dom_map, std::unordered_map<IterVar, PrimExpr>* p_state, bool allow_missing = false); /*! * \param Downward inference of index of each IterVar. * given index assignement of roots. * * \param stage The stage to operate on. * \param dom_map The domain map of each iteration variable's domain. * \param p_state The index state of each IterVar. * \param allow_missing Whether allow missing value. */ void PassDownIndex(const Stage& stage, const Map<IterVar, Range>& dom_map, std::unordered_map<IterVar, PrimExpr>* p_state, bool allow_missing = false); /*! * \param Upward inference of domain set of each IterVar. * given domain assignment of the leaves, * * \param stage The stage to operate on. * \param dom_map The domain map of each iteration variable's maximum domain. * \param p_state The index state of each IterVar. */ void PassUpDomain(const Stage& stage, const std::unordered_map<IterVar, Range>& dom_map, std::unordered_map<IterVar, IntSet>* p_state); /*! * \brief Upward message passing of bitmask with or relation. * \param stage The stage to operate on. * \param p_state The index state of each IterVar. * \param allow_missing Whether allow missing value. */ void PassUpBitMaskOr(const Stage& stage, std::unordered_map<IterVar, int>* p_state, bool allow_missing = false); /*! * \brief Downward message passing of bitmask with or relation. * \param stage The stage to operate on. * \param p_state The index state of each IterVar. * \param allow_missing Whether allow missing value. */ void PassDownBitMaskOr(const Stage& stage, std::unordered_map<IterVar, int>* p_state, bool allow_missing = false); /*! * \brief Create boundary check predicates given remapped value of root * \param stage The stage we operate on * \param dom_map The domain map of each value. * \param value_map The value map of the root iter var. * \param skip_ivar_domain Whether we skip check for IterVar's original domain. * \param skip_iter The set of variables to skip bound condition. * \return List of predicates that we need to check. */ std::vector<PrimExpr> MakeBoundCheck(const Stage& stage, const Map<IterVar, Range>& dom_map, const std::unordered_map<IterVar, PrimExpr>& value_map, bool skip_ivar_domain, const std::unordered_set<IterVar>& skip_iter); } // namespace te } // namespace tvm #endif // TVM_TE_SCHEDULE_MESSAGE_PASSING_H_
https://github.com/zk-ml/tachikoma
src/te/schedule/operation_inline.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file operation_inline.h */ #ifndef TVM_TE_SCHEDULE_OPERATION_INLINE_H_ #define TVM_TE_SCHEDULE_OPERATION_INLINE_H_ #include <tvm/te/operation.h> #include <tvm/te/tensor.h> #include <tvm/tir/expr.h> #include <tvm/tir/stmt.h> namespace tvm { namespace te { /*! * \brief inline all calls of f in stmt. * * \param stmt The statement to apply inline optimization. * \param op The op to be inlined. * \param args The arguments variable of the function. * \param body The definition body of the function. * \return The result stmt * * \note All the passes in this file uses SSA form and outputs SSA form. */ Stmt Inline(Stmt stmt, Operation op, Array<Var> args, PrimExpr body); } // namespace te } // namespace tvm #endif // TVM_TE_SCHEDULE_OPERATION_INLINE_H_
https://github.com/zk-ml/tachikoma
src/tir/analysis/check_contains.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file check_contains.h * \brief Interface of the analysis that tells if an expression contains a node that satisfies a given predicate. */ #ifndef TVM_TIR_ANALYSIS_CHECK_CONTAINS_H_ #define TVM_TIR_ANALYSIS_CHECK_CONTAINS_H_ #include <tvm/tir/expr.h> #include <tvm/tir/stmt_functor.h> // For the class StmtExprVisitor namespace tvm { namespace tir { /*! * \brief Visitor which tells if a given expression or statement contains a subexpression that satisfies a given predicate */ class CheckContains : public StmtExprVisitor { public: // Toplevel (static) functions static bool ExprContains(const PrimExpr& expr, std::function<bool(const PrimExpr&)> predicate); static bool StmtContains(const Stmt& stmt, std::function<bool(const PrimExpr&)> predicate); protected: // Constructor explicit CheckContains(std::function<bool(const PrimExpr&)> predicate); void VisitExpr(const PrimExpr& expr) override; void VisitStmt(const Stmt& stmt) override; private: std::function<bool(const PrimExpr&)> predicate_; bool contains_it_ = false; }; } // namespace tir } // namespace tvm #endif // TVM_TIR_ANALYSIS_CHECK_CONTAINS_H_
https://github.com/zk-ml/tachikoma
src/tir/analysis/device_constraint_utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tir/analysis/device_constraint_utils.cc * \brief Utilities for extracting and applying device-related constraints to \p PrimFunc * parameters. * * These utilities are used by the \p PlanDevices pass to extract memory (aka 'storage') scope * information from \p PrimFuncs and convert them back into \p VirtualDevice form w.r.t. the * original Relay type of the \p PrimFunc (ie before flattening of tuple arguments/results and * conversion to destination-passing style aka DPS). * * A utility is also supplied to go the other way: impose memory scopes on \p PrimFunc parameters. * However that's still in EXPERIMENTAL form. * * We may extend these utilities to also gather/apply layout information should we add that to * \p VirtualDevice. */ #ifndef TVM_TIR_ANALYSIS_DEVICE_CONSTRAINT_UTILS_H_ #define TVM_TIR_ANALYSIS_DEVICE_CONSTRAINT_UTILS_H_ #include <tvm/target/virtual_device.h> #include <tvm/tir/function.h> namespace tvm { namespace tir { /*! * A Relay Function with type: * \code * fn((Tensor[...], Tensor[...]), Tensor[...]) -> (Tensor[...], Tensor[...]) * ^ ^ ^ ^ ^ * a b c d e * \endcode * will be represented by a TIR PrimFunc in flattened and DPS form with at least 5 argument a..e. * \code * primfn(a: handle, b: handle, c: handle, d: handle, e: handle) { * buffers = { ... } * buffer_map = { ... } * ... * } * \endcode * * Each such PrimFunc argument will me mapped to a \p Buffer who's underlying \p data \p Var * has a \p PointerType. * * The PrimFunc may have additional non-pointer arguments, eg for: * - scalar inputs and tensor dimensions * - device contexts * Those should be ignored here since they have no counterpart in the Relay Function. * * We'll need helpers to map on-the-fly between the Relay and TIR view of functions. */ /*! * \brief Returns the \p VirtualDevices capturing the memory (aka storage) scope constraints for all * the arguments and result of \p prim_func. However the result will be w.r.t. the \p prim_func's * representation as a Relay \p Function of \p relay_func_type_ before lowering and conversion to * DPS. */ Array<VirtualDevice> GetPrimFuncArgAndResultConstraints(const tir::PrimFunc& prim_func, const FuncType& relay_func_type); /* * \brief Returns \p prim_func written to capture the memory (aka storage) scope constraints * for each of the \p prim_func's parameters given by \p arg_and_result_virtual_devices. However, * \p arg_and_result_virtual_devices should be w.r.t. the \p prim_func's representation as a Relay * \p Function of \p relay_func_type before lowering and conversion to DPS. * * CAUTION: This is experimental. The resulting \p PrimFunc may not have fully accounted for all * new memory scopes. */ PrimFunc ApplyPrimFuncArgAndResultConstraints( const PrimFunc& prim_func, const FuncType& relay_func_type, const Array<VirtualDevice>& arg_and_result_virtual_devices); } // namespace tir } // namespace tvm #endif // TVM_TIR_ANALYSIS_DEVICE_CONSTRAINT_UTILS_H_
https://github.com/zk-ml/tachikoma
src/tir/ir/buffer_common.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tir/ir/buffer_common.h * \brief Common utils for buffer access */ #ifndef TVM_TIR_IR_BUFFER_COMMON_H_ #define TVM_TIR_IR_BUFFER_COMMON_H_ #include <tvm/ir/type.h> #include <tvm/runtime/data_type.h> #include <optional> namespace tvm { namespace tir { /*! * \brief Returns the type of object pointed to. * * \param type The type to be checked. * * \return An std::optional<DataType> object. If the type is a pointer * to a primitive type, the object has a value which is the pointed-to * type. Otherwise the object is nullopt. */ inline std::optional<runtime::DataType> GetPointerType(const Type& type) { if (type.defined()) { if (auto* ptr_type = type.as<PointerTypeNode>()) { if (auto* prim_type = ptr_type->element_type.as<PrimTypeNode>()) { return prim_type->dtype; } } } return std::nullopt; } } // namespace tir } // namespace tvm #endif // TVM_TIR_IR_BUFFER_COMMON_H_
https://github.com/zk-ml/tachikoma
src/tir/ir/functor_common.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #include <tvm/runtime/container/array.h> /*! * \file tir/ir/functor_common.h * \brief Common utils for implementing functors */ #ifndef TVM_TIR_IR_FUNCTOR_COMMON_H_ #define TVM_TIR_IR_FUNCTOR_COMMON_H_ namespace tvm { namespace tir { // Implementation of Visitors template <typename T, typename F> inline void VisitArray(const Array<T>& arr, F fvisit) { for (size_t i = 0; i < arr.size(); i++) { fvisit(arr[i]); } } template <typename T, typename F> inline Array<T> MutateArray(Array<T> arr, F fmutate) { return arr.Map(fmutate); } } // namespace tir } // namespace tvm #endif // TVM_TIR_IR_FUNCTOR_COMMON_H_
https://github.com/zk-ml/tachikoma
src/tir/ir/script/script_complete.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tir/ir/script/script_complete.h * \brief Used by TVM Script parser to expand incomplete TIR input */ #ifndef TVM_TIR_IR_SCRIPT_SCRIPT_COMPLETE_H_ #define TVM_TIR_IR_SCRIPT_SCRIPT_COMPLETE_H_ #include <tvm/runtime/registry.h> #include <tvm/tir/stmt.h> #include <tvm/tir/stmt_functor.h> namespace tvm { namespace tir { PrimFunc ScriptComplete(PrimFunc func, const Array<Buffer>& root_allocates); } // namespace tir } // namespace tvm #endif // TVM_TIR_IR_SCRIPT_SCRIPT_COMPLETE_H_
https://github.com/zk-ml/tachikoma
src/tir/schedule/analysis.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_TIR_SCHEDULE_ANALYSIS_H_ #define TVM_TIR_SCHEDULE_ANALYSIS_H_ #include <tvm/arith/analyzer.h> #include <tvm/ir/op.h> #include <tvm/tir/index_map.h> #include <tvm/tir/schedule/schedule.h> #include <tvm/tir/schedule/state.h> #include <tuple> #include <unordered_map> #include <unordered_set> #include <utility> #include <vector> #include "../../runtime/thread_storage_scope.h" namespace tvm { namespace tir { /******** Verification ********/ /*! * \brief Verifies the sref tree state is consistent with the IR * \param self The schedule state containing the sref to be verified * \throw An exception will be thrown if the sref tree is not valid */ void VerifySRefTree(const ScheduleState& self); /*! * \brief Verifies the cached flags in the schedule state, including: * - affine_binding * - region_cover * - stage_pipeline * \param self The schedule state to be verified * \throw An exception will be thrown if some srefs are not valid */ void VerifyCachedFlags(const ScheduleState& self); /******** IR Module ********/ /*! * \brief Get PrimFunc and GlobalVar that the root block belongs to * \param mod The IRModule * \param root_block The root block of the PrimFunc * \param result_g_var The result GlobalVar * \return The result PrimFunc where the root block belongs to * \note This function returns the pointer instead of ObjectRef to avoid later copy-on-write */ const PrimFuncNode* GetRootPrimFunc(const IRModule& mod, const StmtNode* root_block, GlobalVar* result_g_var); /*! * \brief Get the root node of the sref tree, which is the root block of the PrimFunc. * \param sref The given sref. * \return The root node of the sref tree which contains the given node. */ StmtSRef GetSRefTreeRoot(const StmtSRef& sref); /******** Scope ********/ /*! * \brief Checks if scope the specified sref is in is a stage-pipeline and return it * \param self The schedule state * \param sref The sref whose scope is to be checked * \param require_stage_pipeline A boolean indicating whether to check stage pipeline * \throw ScheduleError if * 1) the sref has been the root of the AST (so it has no scope root), or * 2) require_stage_pipeline = true, but its scope root is not a stage pipeline * \return The block sref to the scope root */ StmtSRef GetScopeRoot(const ScheduleState& self, const StmtSRef& sref, bool require_stage_pipeline); /*! * \brief The information of a block scope, including the leaf blocks, * as well as the loop types (spatial, reduction) for each loop in the scope. */ struct ScopeBlockLoopInfo { /*! \brief A list of the leaf blocks, from left to right */ std::vector<BlockRealize> realizes; /*! \brief The loop vars bound to spatial block iters */ std::unordered_set<const VarNode*> spatial_vars; /*! \brief The loop vars bound to non-spatial block iters */ std::unordered_set<const VarNode*> non_spatial_vars; }; /*! * \brief Inspect the scope of the given sref * \param scope_block The root block of the scope * \return The information of the scope */ ScopeBlockLoopInfo GetScopeBlockLoopInfo(const Block& scope_block); /*! * \brief Checks whether the block is a complete block under the scope * \param self The schedule state * \param block_sref The block to be checked * \param scope_root_sref The sref to the root block of the scope that `block_sref` is in * \return A boolean indicating if the block is a complete block * \note Definition of a complete block: * 1) All block vars are data parallel * 2) Dominant: the block is the only writer of its output, * dominating the reader of its output buffers * 3) No overlap between the buffers the block reads and writes */ bool IsCompleteBlock(const ScheduleState& self, const StmtSRef& block_sref, const StmtSRef& scope_root_sref); /*! * \brief Check if the block is a complete block under the scope * \param self The schedule state * \param block_sref The sref to the block whose completeness is to be checked * \param scope_root_sref The scope root of the block * \throw ScheduleError If the block is not a complete block */ void CheckCompleteBlock(const ScheduleState& self, const StmtSRef& block_sref, const StmtSRef& scope_root_sref); /*! * \brief Check whether the block is a reduction block under the scope * \param self The schedule state * \param block_sref The block to be checked * \param scope_root_sref The sref to the root block of the scope that `block_sref` is in * \return A boolean indicating if the block is a reduction block * \note Definition of a reduction block: * 1) The block has the `init` statement * 2) All the block bindings are quasi-affine expressions * 3) All block vars are either data parallel block vars or reduction block vars * 4) Dominant: the block is the only writer of its output, dominating the reader of its output * buffers * 5) The reduction block vars are not used to index the output buffers */ bool IsReductionBlock(const ScheduleState& self, const StmtSRef& block_sref, const StmtSRef& scope_root_sref); /*! * \brief Check if the block is a reduction block under the scope * \param self The schedule state * \param block_sref The sref of the block to be checked * \param scope_root_sref The scope root of the block * \throw ScheduleError If the block is not a reduction block */ void CheckReductionBlock(const ScheduleState& self, const StmtSRef& block_sref, const StmtSRef& scope_root_sref); /*! * \brief Check if the block is a complete block or a reduction block under the scope * \param self The schedule state * \param block_sref The sref of the block to be checked * \param scope_root_sref The scope root of the block * \throw ScheduleError If the block is neither a complete block nor a reduction block */ void CheckCompleteOrReductionBlock(const ScheduleState& self, const StmtSRef& block_sref, const StmtSRef& scope_root_sref); /*! * \brief Check the subtree compact dataflow property. The scope root may have one or more subtrees * rooted at its direct children, and this property requires all the blocks of the subtree * that the specified sref is in to be local complete block or local reduction block. * \param self The schedule state * \param subtree_root The sref of the subtree root to be checked */ void CheckSubtreeCompactDataflow(const ScheduleState& self, const StmtSRef& subtree_root); /*! * \brief Check if the block is an output block, i.e. the block writes to at least a buffer that is * not allocated under the current scope * \param self The schedule state * \param block_sref The block to be checked * \param scope_root_sref The scope root of the block * \return A boolean flag indicating if the block is an output block */ bool IsOutputBlock(const ScheduleState& self, const StmtSRef& block_sref, const StmtSRef& scope_root_sref); /*! * \brief Check if the block is not an output block, i.e. all the buffers the block writes to * are allocated under the current scope * \param self The schedule state * \param block_sref The block to be checked * \param scope_root_sref The scope root of the block * \throw ScheduleError if the block is an output block */ void CheckNotOutputBlock(const ScheduleState& self, const StmtSRef& block_sref, const StmtSRef& scope_root_sref); /*! * \brief Extracts the types of the block vars * \param block_sref The block to be checked * \return A vector of types of the block vars */ std::vector<IterVarType> GetBlockVarTypes(const StmtSRef& block_sref); /*! * \brief Checks if a block could be considered as a "write cache" * \param block_sref The block to be checked * \return A boolean flag indicating if the block is a write cache */ bool IsWriteCache(const StmtSRef& block_sref); /******** Binding ********/ /*! * \brief Verifies if the block binding in a specific BlockRealize is an affine binding. * The binding can be represented as an injective affine map from the loop iterators. * \param realize The BlockRealize to be analyzed * \param loop_var_ranges The ranges of the loop variables * \param analyzer The analyzer * \return A boolean flag indicating if the binding is affine */ bool IsAffineBinding(const BlockRealize& realize, const Map<Var, Range>& loop_var_ranges, arith::Analyzer* analyzer); /*! * \brief Check whether a block has an affine binding using the cached flag, and throw an exception * if the block does not have an affine binding. * \param self The schedule state * \param block The block to be checked * \throw ScheduleError If the input block does not have an affine binding */ void CheckAffineBinding(const ScheduleState& self, Block block); /*! * \brief Check whether a block has an affine binding under the high exclusive sref node, * throw an exception if the block does not have an affine binding. * \param self The schedule state * \param block The block to be checked * \param high_exclusive The highest sref node * \throw ScheduleError If the input block does not have an affine binding */ void CheckPartialAffineBinding(const ScheduleState& self, Block block, const Optional<StmtSRef>& high_exclusive); /*! * \brief Extracts the ranges of loop variables in a path of the sref tree * \param low_inclusive The lowest node in the path * \param high_exclusive The highest node in the path, defaults to the scope root if not specified * \param extra_relax_scope If the scope is not global, the method will look beyond the limit and * retrieve extra domains. For example, * - if the storage scope is warp, it will look upwards for threadIdx.x * - if the storage scope is shared, it will look for threadIdx.x/y/z * \return The loop domain */ Map<Var, Range> LoopDomainOfSRefTreePath(const StmtSRef& low_inclusive, const Optional<StmtSRef>& high_exclusive = NullOpt, const runtime::StorageScope& extra_relax_scope = // runtime::StorageScope{runtime::StorageRank::kGlobal, ""}); /*! * \brief Returns the block var binding * \param realize The BlockRealize to be analyzed * \return The block var binding */ Map<Var, PrimExpr> GetBindings(const BlockRealize& realize); /*! * \brief Get the vars involved in the bindings of data parallel block vars and reduction block * vars, respectively * \param block_realize The BlockRealize to be analyzed * \param data_par_vars The vars that appear in the binding of any data parallel block iter * \param reduce_vars The vars that appear in the binding of any reduction block iter * \return A boolean indicating whether the block has block iters that is neither a data parallel * block iter nor a reduction block iter */ bool GetVarsTouchedByBlockIters(const BlockRealize& block_realize, std::unordered_set<const VarNode*>* data_par_vars, std::unordered_set<const VarNode*>* reduce_vars); /******** Loop properties ********/ /*! * \brief Check the loop starts with zero. * \param self The schedule state * \param loop_sref The StmtSRef that points to the loop to be checked * \param analyzer The arithmetic analyzer * \throw ScheduleError If the loop doesn't starts with zero. */ void CheckLoopStartsWithZero(const ScheduleState& self, const StmtSRef& loop_sref, arith::Analyzer* analyzer); /*! * \brief Check whether a block has a trivial binding, i.e. each block var is bound to a outer loop, * from outer to inner. * \param self The schedule state * \param block_sref The block to be checked * \throw ScheduleError If the block does not have trivial bindings */ void CheckBlockHasTrivialBinding(const ScheduleState& self, const StmtSRef& block_sref); /******** Block-loop relation ********/ /*! * \brief Gets StmtSRefs of leaf blocks of a scope where a specific block/loop is in * \param self The schedule state * \param parent_sref The StmtSRef that points to the parent block/loop * \return A list of StmtSRefs of leaf block */ Array<StmtSRef> GetChildBlockSRefOnSRefTree(const ScheduleState& self, const StmtSRef& parent_sref); /*! * \brief Gets the BlockRealize of the leaf blocks of a scope where a specific block/loop is in * \param parent_sref The StmtSRef that points to the parent block/loop * \return A list of leaf BlockRealize */ Array<BlockRealize> GetChildBlockRealizeOnSRefTree(const StmtSRef& parent_sref); /*! * \brief Get the BlockRealize of the single child block of the block or loop specified by * `parent_sref` on SRef tree, or throw an exception if there is 0 or multiple child blocks * \param self The schedule state * \param parent_sref The StmtSRef that points to the parent block/loop * \return The BlockRealize of the single child block * \throw ScheduleError If there is 0 or multiple child blocks */ BlockRealize CheckGetSingleChildBlockRealizeOnSRefTree(const ScheduleState& self, const StmtSRef& parent_sref); /*! * \brief Get the BlockRealize of the input block * \param self The schedule state * \param block_sref The StmtSRef of the queried block * \return The BlockRealize of the input block */ BlockRealize GetBlockRealize(const ScheduleState& self, const StmtSRef& block_sref); /*! * \brief Get the IterVarType of the specific loop, according to the blocks it's bound to * \param loop_sref The loop to be checked * \return The IterVarType of the specific loop */ IterVarType GetLoopIterType(const StmtSRef& loop_sref); /*! * \brief Get the lowest common ancestor of an array of blocks or loops on the sref tree * \param srefs The block srefs or loop srefs whose lowest common ancestor is to be queried * \return The lowest common ancestor of the input block srefs or loop srefs * \note The input array is required to have at least one sref */ StmtSRef GetSRefLowestCommonAncestor(const Array<StmtSRef>& srefs); /*! * \brief Checks if the given block has been applied by multi-level tiling. We check this by * examine the block's annotation. * \param block_sref The block to be checked * \return A boolean indicating whether the block has been multi-level tiled. */ bool HasBeenMultiLevelTiled(const StmtSRef& block_sref); /*! * \brief Collect all the feasible compute-at locations of the input block * \param self The schedule state * \param block_sref The block whose compute-at locations are to be collected * \return All the feasible compute-at locations of the input block, given as an array of loop srefs * and an array of their indices among the outer loops of the input block */ std::pair<Array<StmtSRef>, std::vector<int>> CollectComputeLocation(const ScheduleState& self, const StmtSRef& block_sref); /******** Producer-consumer relation ********/ /*! * \brief Get the producer blocks to the given block under the given scope * \param block_sref The block whose producers are to be retrieved * \param scope The block scope where the given block is in * \return The producer blocks of the specified block */ Array<StmtSRef> GetProducers(const StmtSRef& block_sref, const BlockScope& scope); /*! * \brief Get the consumer blocks to the given block under the given scope * \param block_sref The block whose consumers are to be retrieved * \param scope The block scope where the given block is in * \return The consumer blocks of the specified block */ Array<StmtSRef> GetConsumers(const StmtSRef& block_sref, const BlockScope& scope); /*! * \brief A solution to split a ordered list of subtrees into two parts, * where producers are on the LHS and consumers are on the RHS. * For example, subtree[0, 3) are on the LHS, and subtree[3, 6) are on the RHS. */ struct ProducerConsumerSplit { /*! \brief Indicates that all producers fall into `subtrees[0, last_producer_position]` */ int last_producer_position; /*! \brief Indicates that all consumers fall into `subtrees[first_consumer_position, ...)` */ int first_consumer_position; /*! \brief The number of given producers visited in `subtrees` */ int n_producers_visited; /*! \brief The number of given consumers visited in `subtrees` */ int n_consumers_visited; /*! * \brief Find a split among the given `subtree` * \param state The schedule state * \param subtrees The ordered list of subtrees to be split * \param producer_block_srefs The producers * \param consumer_block_srefs The consumers * \param block2realize If not null, the corresponding BlockRealize to each block in the scope * will be saved in this map * \return The valid split points are (last_producer_position, first_consumer_position] * \throw ScheduleError is not valid split is found */ static ProducerConsumerSplit Find( const ScheduleState& state, const Array<Stmt>& subtrees, const Array<StmtSRef>& producer_block_srefs, const Array<StmtSRef>& consumer_block_srefs, std::unordered_map<const BlockNode*, const BlockRealizeNode*>* block2realize); }; /******** Block-buffer relation ********/ /*! * \brief Get the n-th read or write buffer of the given block. * \param self The schedule state. * \param block The queried block. * \param n The index of the queried buffer. * \param index_type The type of the buffer index, kRead or kWrite. * \return The buffer of the n-th read/write region of the block. * \throw ScheduleError If the buffer index is out of bound. */ Buffer GetNthAccessBuffer(const ScheduleState& self, const Block& block, int n, BufferIndexType index_type); /*! * \brief Get the n-th read or write buffer of the given block. * \param self The schedule state. * \param block The queried block. * \param n The index of the queried buffer. * \param index_type The type of the buffer index, kRead or kWrite. * \return The n-th read/write region of the block. * \throw ScheduleError If the buffer index is out of bound. */ BufferRegion GetNthAccessBufferRegion(const ScheduleState& self, const Block& block, int n, BufferIndexType index_type); /*! * \brief Find the defining site of the buffer in the given block and its ancestors * \param block_sref The block sref * \param buffer The buffer * \return The defining site of the buffer and whether the buffer is allocated (otherwise the * buffer is from match_buffer). */ std::pair<Optional<StmtSRef>, bool> GetBufferDefiningSite(const StmtSRef& block_sref, const Buffer& buffer); /******** Reduction Block Related ********/ /*! * \brief Get the init values and the BufferStore updates from the input reduction block * \param self The schedule state, used for error reporting * \param block The block from which the init values and BufferStore updates are extracted from * \return The extracted init values and BufferStore updates * \throw ScheduleError If rfactor or cross-thread reduction cannot be applied to the block */ std::pair<Array<PrimExpr>, Array<BufferStore>> GetInitValuesAndUpdatesFromReductionBlock( const Optional<ScheduleState>& self, Block block); /*! * \brief Check whether the input array of IterVars only contains data-parallel and reduction block * iters * \param iters The input array of IterVars to be checked * \return A boolean indicating whether the input array of IterVars only contains data-parallel and * reduction block iters */ bool ContainsOnlyDataParAndReductionBlockIter(const Array<IterVar>& iters); /*! * \brief Check whether the block's reduction block iters are not used to index the block's output * buffers * \param block The block to be checked * \return A boolean indicating whether the block's reduction block iters are not used to index the * block's output buffer */ bool ReductionIterNotIndexOutputBuffer(const Block& block); /*! * \brief Given a list of reduction identities and a list of reduction combiners, detect the * corresponding commutative reducer, and extract the combiner LHS values and combiner RHS values * \param self The schedule state * \param identities The reduction identities to be analyzed * \param combiners The reduction combiners to be analyzed * \return The corresponding CommReducer, combiner LHS values and combiner RHS values * \throw ScheduleError If no corresponding commutative reducer can be matched */ std::tuple<CommReducer, Array<PrimExpr>, Array<PrimExpr>> GetReducerAndCombinerLhsRhs( const Optional<ScheduleState>& self, const Array<PrimExpr>& identities, const Array<BufferStore>& combiners); /******** Commutative Reducer ********/ /*! * \brief Get the list of the registered reducer-getter functions * \return The list of the registered reducer-getter functions * \sa ReducerRegistry */ std::vector<runtime::TypedPackedFunc<Optional<CommReducer>(Array<PrimExpr>)>> GetReducerGetters(); /*! * \brief Given the input identities and the combiner BufferStores of a reduction, extract the * corresponding commutative reducer, LHS values and RHS values, if possible. * \param identities The identities of the reduction * \param combiners The combiners of the reduction * \param result_reducer The extracted CommReducer * \param lhs The extracted LHS values of the reducer * \param rhs The extracted RHS values of the reducer * \return A boolean indicating whether a corresponding commutative reducer is found */ bool FromIdentityCombiner(const Array<PrimExpr>& identities, const Array<BufferStore>& combiners, CommReducer* result_reducer, Array<PrimExpr>* lhs, Array<PrimExpr>* rhs); /******** Misc ********/ /*! * \brief Check whether the input storage scope string is valid. Throw an error if not. * \param self The schedule state * \param storage_scope The storage scope string to be checked * \throw ScheduleError If the input storage scope is not valid */ void CheckStorageScope(const ScheduleState& self, String storage_scope); /*! * \brief Checks if a block could be successfully computed inline into its consumer * \param self The schedule state * \param block_sref The block to be checked * \return A boolean indicating whether the block could be successfully computed inline */ bool CanComputeInline(const ScheduleState& self, const StmtSRef& block_sref); /*! * \brief Checks if a block could be successfully computed inline into its producer * \param self The schedule state * \param block_sref The block to be checked * \return A boolean indicating whether the block could be successfully computed inline */ bool CanReverseComputeInline(const ScheduleState& self, const StmtSRef& block_sref); /*! * \brief Checks if a producer block could be successfully computed at the specific loop. * \param self The schedule state * \param block_sref The block to be moved * \param loop_sref The loop where the block to be moved to * \param preserve_unit_loops Whether to keep the trivial loops whose extents are 1 * \return A boolean indicating whether the block could be successfully compute at the specific loop */ bool CanComputeAt(const ScheduleState& self, const StmtSRef& block_sref, const StmtSRef& loop_sref, bool preserve_unit_loops); /*! * \brief Checks if a consumer block could be successfully computed at the specific loop. * \param self The schedule state * \param block_sref The block to be moved * \param loop_sref The loop where the block to be moved to * \param preserve_unit_loops Whether to keep the trivial loops whose extents are 1 * \return A boolean indicating whether the block could be successfully reverse compute at the * specific loop */ bool CanReverseComputeAt(const ScheduleState& self, const StmtSRef& block_sref, const StmtSRef& loop_sref, bool preserve_unit_loops); /*! * \brief Provided the access pattern to a buffer, suggest one of the possible layout * transformation to minimize the locality of the access pattern. * \param buffer The buffer to be transformed * \param indices The access pattern to the buffer * \param loops The loops above the buffer * \param predicate The predicate of the access * \param analyzer Arithmetic analyzer */ Optional<IndexMap> SuggestIndexMap(const Buffer& buffer, const Array<PrimExpr>& indices, const Array<For>& loops, const PrimExpr& predicate, arith::Analyzer* analyzer); /*! * \brief Checks if the given AST contains the specific operators * \param stmt The AST statement to be checked * \param ops The list of operators to be checked * \return A boolean indicating whether the AST contains the specific operators */ bool HasOp(const Stmt& stmt, const Array<Op>& ops); /*! * \brief Checks if the given AST statement contains if-then-else, including * 1) IfThenElse statement * 2) Select expression * 3) The operator `tir.if_then_else` * 4) non-constant-true Block predicates * \param stmt The AST statement to be checked * \return A boolean indicating whether the statement contains the if-then-else pattern */ bool HasIfThenElse(const Stmt& stmt); /*! * \brief Given the read/write region, extract the pattern of their index correspondence * namely, the mapping from read index to the write index. * \param read_region The read region * \param write_region The write region * \return A tuple of booleans, the extracted pattern * 0) exists: if the pattern is found * 1) surjective: if the pattern is surjective, i.e. each write index is mapped at least once * e.g. A[i, j] = B[i, i, j] * 2) injective: if the pattern is injective, i.e. each write index is mapped at most once. * e.g. A[i, j] = B[i] * 3) ordered: if the mapping is ordered * 4) no_const_read: if there is no constant indexing in the read indices, * e.g. A[i, j] = B[0, i, j] * 5) no_shift_read: if there is no constant shift in the read indices, * e.g. A[i, j] = B[i + 1, j] */ std::tuple</*exists=*/bool, /*surjective=*/bool, /*injective=*/bool, /*ordered=*/bool, /*no_const_read=*/bool, /*no_shift_read=*/bool> AnalyzeReadWritePattern(const BufferRegion& read_region, const BufferRegion& write_region); /*! * \brief Check if the block is a data parallel block, i.e. all the block vars are data parallel * \param block_sref The block to be checked * \return A boolean flag indicating if the block is a data parallel block */ bool IsSpatial(const StmtSRef& block_sref); /*! * \brief Check whether a block has a trivial binding, i.e. each block var is bound to a outer loop, * from outer to inner. * \param self The schedule state * \param block_sref The block to be checked * \return A boolean flag indicating if the block has a trivial binding */ bool IsTrivialBinding(const ScheduleState& self, const StmtSRef& block_sref); /*! * \brief Checks if the given block has data reuse opportunity and thus multi-level tiling is * beneficial. * \param self The schedule state * \param block_sref The block to be checked * \return A boolean indicating whether the block has data reuse opportunity */ bool NeedsMultiLevelTiling(const ScheduleState& self, const StmtSRef& block_sref); /*! * \brief Checks if all the blocks in the PrimFunc is spatial * \param func The PrimFunc to be checked * \return A boolean indicating whether all the blocks in the PrimFunc is spatial */ bool IsSpatialPrimFunc(const PrimFunc& func); /*! * \brief Checks if the rfactor or cross thread reduction is beneficial to the given block. * \param self The schedule state. * \param block_sref The block to be checked. * \param max_parallel_extent The maximum parallel jobs on the target. * \param max_parallel_basic The maximum cores on the target. * \return A boolean indicating whether the operation is beneficial. */ bool NeedsRFactorOrCrossThreadReduction(const tir::ScheduleState& self, // const tir::StmtSRef& block_sref, // int64_t max_parallel_extent, // int64_t max_parallel_basic); /*! * \brief Analyze the buffer region under the sref tree path [dom_low_inclusive, dom_high_exclusive) * Relaxation of the region may be used in upper-bound analysis, i.e. some extra region may be added * to the result. * \param region The buffer region to be analyzed * \param dom_low_inclusive The lowest node in the sref tree path * \param dom_high_exclusive The highest node in the sref tree path * \return An n-dimensional integer set */ Array<arith::IntSet> AnalyzeRegionUpperBound(const BufferRegion& region, const PrimExpr& predicate, const StmtSRef& dom_low_inclusive, const StmtSRef& dom_high_exclusive, arith::Analyzer* analyzer); /*! * \brief Analyze the buffer region under the sref tree path [dom_low_inclusive, dom_high_exclusive) * Some subregion may be discarded during the lower-bound analysis. * \param realize The block realize that touches the buffer region * \param region The buffer region to be analyzed * \param dom_low_inclusive The lowest node in the sref tree path * \param dom_high_exclusive The highest node in the sref tree path * \param analyzer The analyzer * \return An n-dimensional integer set */ Array<arith::IntSet> AnalyzeRegionLowerBound(const BufferRegion& region, const PrimExpr& predicate, const StmtSRef& dom_low_inclusive, const StmtSRef& dom_high_exclusive, arith::Analyzer* analyzer); /*! * \brief Check if buffer indices are all Vars and extr * \param buffer_access The BufferLoad or BufferStore * \return The indices if the indices are all Vars, otherwise NullOpt */ template <typename T> Optional<Array<Var>> CheckTrivialBufferIndices(const T& buffer_access) { Array<Var> indices; for (const PrimExpr& index : buffer_access->indices) { const VarNode* var = index.as<VarNode>(); if (var == nullptr) { return NullOpt; } indices.push_back(GetRef<Var>(var)); } return indices; } /*! * \brief Simplify non-trivial expressions * \param expr The expression to be simplified * \param analyzer The analyzer * \return The simplified expression * * During scheduling, we often need preserve block iters in trivial expressions that can be * simplified to constant values for further scheduling and analysis because simplifing away the * block iters may result in loss of information for further analysis. */ PrimExpr SimplifyNonTrivialExpr(const PrimExpr& expr, arith::Analyzer* analyzer); /*! \brief Necessary information used for tensorization */ class TensorizeInfoNode : public Object { public: /*! \brief Maps loops in a target block to the ones in an intrinsic description */ Map<tir::StmtSRef, tir::For> loop_map; /*! \brief Maps loops in an intrinsic description to its index, outer to inner */ Map<tir::For, Integer> desc_loop_indexer; /*! \brief Optional padded extents of the block iters when padding is needed to match the * intrinsic description */ Optional<Array<Integer>> block_iter_paddings; void VisitAttrs(AttrVisitor* v) { v->Visit("loop_map", &loop_map); v->Visit("desc_loop_indexer", &desc_loop_indexer); v->Visit("block_iter_paddings", &block_iter_paddings); } static constexpr const char* _type_key = "tir.schedule.TensorizeInfo"; TVM_DECLARE_FINAL_OBJECT_INFO(TensorizeInfoNode, Object); }; class TensorizeInfo : public ObjectRef { public: TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(TensorizeInfo, ObjectRef, TensorizeInfoNode); }; /*! * \brief Establish a mapping between loops in a target block and an intrinsic description * \param self The schedule state to be tensorized * \param block_sref The target block to match against * \param desc_func The prim func describing the computation to be tensorized * \param allow_padding Whether to allow padding the block iters to match the intrinsic description * \return TensorizeInfo structure if a valid mapping is found, NullOpt otherwise */ Optional<TensorizeInfo> GetTensorizeLoopMapping(const tir::ScheduleState& self, const tir::StmtSRef& block_sref, const tir::PrimFunc& desc_func, bool allow_padding); /*!\brief Necessary information used to perform transformations for tensorization */ class AutoTensorizeMappingInfoNode : public Object { public: /*! \brief Possible mappings to apply to block iters */ Array<IndexMap> mappings; /* Additional information from AutoTensorizeComparator */ /*! \brief Mapping from LHS buffer to RHS buffer */ Map<Buffer, Buffer> lhs_buffer_map; /*! \brief Buffer indices on RHS */ Map<Buffer, Array<PrimExpr>> rhs_buffer_indices; /*! \brief Block iters on LHS */ Array<IterVar> lhs_iters; /*! \brief Block iters on RHS */ Array<IterVar> rhs_iters; void VisitAttrs(AttrVisitor* v) { v->Visit("mappings", &mappings); v->Visit("lhs_buffer_map", &lhs_buffer_map); v->Visit("rhs_buffer_indices", &rhs_buffer_indices); v->Visit("lhs_iters", &lhs_iters); v->Visit("rhs_iters", &rhs_iters); } static constexpr const char* _type_key = "tir.schedule.AutoTensorizeMappingInfo"; TVM_DECLARE_FINAL_OBJECT_INFO(AutoTensorizeMappingInfoNode, Object); }; class AutoTensorizeMappingInfo : public ObjectRef { public: TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(AutoTensorizeMappingInfo, ObjectRef, AutoTensorizeMappingInfoNode); }; /*! * \brief Get mapping info between a target block and an intrinsic description including layout * transformations to apply. * \param self The schedule state * \param block_sref The compute block for auto tensorization * \param desc_func The prim func describing the computation to be tensorized * \return AutoTensorizeMappingInfo structure if a potential mapping is found, NullOpt otherwise. * \note Returning a valid AutoTensorizeMappingInfo doesn't guarantee the block can be tensorized. * We will need to apply the suggested layout transformations and then match against the tensor * intrinsics. */ Optional<AutoTensorizeMappingInfo> GetAutoTensorizeMappingInfo(const ScheduleState& self, const StmtSRef& block_sref, const PrimFunc& desc_func); /*! * \brief Perform basic checks for auto tensorization applicability, such as the structure of * arithmetic operations and data types. * \param sch The schedule to be tensorized * \param block_rv The compute block for auto tensorization * \param desc_func The prim func describing the computation to be tensorized * \return true if basic conditions are met. */ bool CheckAutoTensorizeApplicable(const tir::Schedule& sch, const tir::BlockRV& block_rv, const tir::PrimFunc& desc_func); } // namespace tir } // namespace tvm #endif // TVM_TIR_SCHEDULE_ANALYSIS_H_
https://github.com/zk-ml/tachikoma
src/tir/schedule/concrete_schedule.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_TIR_SCHEDULE_CONCRETE_SCHEDULE_H_ #define TVM_TIR_SCHEDULE_CONCRETE_SCHEDULE_H_ #include <memory> #include <utility> #include <vector> #include "./utils.h" namespace tvm { namespace tir { class ConcreteScheduleNode : public ScheduleNode { friend class Schedule; friend class ScheduleCopier; public: using TSymbolTable = Map<ObjectRef, ObjectRef>; protected: /*! \brief The internal state of scheduling */ ScheduleState state_; /*! \brief The function to be worked on. */ Optional<GlobalVar> func_working_on_; /*! \brief The level of error rendering */ ScheduleErrorRenderLevel error_render_level_; /*! \brief A symbol table that maps random variables to concrete StmtSRef/Integers */ TSymbolTable symbol_table_; /*! \brief A persistent stateless arithmetic analyzer. */ std::unique_ptr<arith::Analyzer> analyzer_; /*! \brief The value of random state for sampling. */ support::LinearCongruentialEngine::TRandState rand_state_; public: void VisitAttrs(tvm::AttrVisitor* v) { // `state_` is not visited // `func_working_on_` is not visited // `error_render_level_` is not visited // `symbol_table_` is not visited // `analyzer_` is not visited // `rgnd_state_` is not visited } virtual ~ConcreteScheduleNode() = default; public: ScheduleState state() const final { return state_; } Optional<Trace> trace() const override { return NullOpt; } void WorkOn(const String& func_name) final; Schedule Copy() override; void Seed(support::LinearCongruentialEngine::TRandState seed) final; support::LinearCongruentialEngine::TRandState ForkSeed() final; public: /******** Lookup random variables ********/ inline Block Get(const BlockRV& block_rv) const final; inline For Get(const LoopRV& loop_rv) const final; inline PrimExpr Get(const ExprRV& expr_rv) const final; inline StmtSRef GetSRef(const BlockRV& block_rv) const final; inline StmtSRef GetSRef(const LoopRV& loop_rv) const final; inline bool HasBlock(const BlockRV& block_rv) const final; inline Array<StmtSRef> GetSRefs(const Array<BlockRV>& rvs) const; inline Array<StmtSRef> GetSRefs(const Array<LoopRV>& rvs) const; void RemoveRV(const BlockRV& block_rv) final { RemoveFromSymbolTable(block_rv); } void RemoveRV(const LoopRV& loop_rv) final { RemoveFromSymbolTable(loop_rv); } void RemoveRV(const ExprRV& expr_rv) final { RemoveFromSymbolTable(expr_rv); } using ScheduleNode::GetSRef; public: /******** Schedule: Sampling ********/ ExprRV SampleCategorical(const Array<Integer>& candidates, const Array<FloatImm>& probs, Optional<Integer> decision = NullOpt) override; Array<ExprRV> SamplePerfectTile(const LoopRV& loop_rv, int n, int max_innermost_factor, Optional<Array<Integer>> decision = NullOpt) override; LoopRV SampleComputeLocation(const BlockRV& block_rv, Optional<Integer> decision = NullOpt) override; /******** Schedule: Get blocks & loops ********/ BlockRV GetBlock(const String& name, const Optional<String>& func_name) override; Array<LoopRV> GetLoops(const BlockRV& block_rv) override; Array<BlockRV> GetChildBlocks(const BlockRV& block_rv) override; Array<BlockRV> GetChildBlocks(const LoopRV& loop_rv) override; Array<BlockRV> GetProducers(const BlockRV& block_rv) override; Array<BlockRV> GetConsumers(const BlockRV& block_rv) override; /******** Schedule: Transform loops ********/ LoopRV Fuse(const Array<LoopRV>& loop_rvs, bool preserve_unit_iters) override; Array<LoopRV> Split(const LoopRV& loop_rv, const Array<Optional<ExprRV>>& factors, bool preserve_unit_iters) override; void Reorder(const Array<LoopRV>& ordered_loop_rvs) override; LoopRV AddUnitLoop(const BlockRV& block_rv) override; LoopRV AddUnitLoop(const LoopRV& loop_rv) override; /******** Schedule: Manipulate ForKind ********/ void Parallel(const LoopRV& loop_rv) override; void Vectorize(const LoopRV& loop_rv) override; void Bind(const LoopRV& loop_rv, const String& thread_axis) override; void Unroll(const LoopRV& loop_rv) override; /******** Schedule: Insert cache stages ********/ BlockRV CacheRead(const BlockRV& block_rv, int read_buffer_index, const String& storage_scope, const Array<BlockRV> consumer_blocks = {}) override; BlockRV CacheWrite(const BlockRV& block_rv, int write_buffer_index, const String& storage_scope) override; Array<BlockRV> CacheInplace(const BlockRV& block_rv, int read_buffer_index, const String& storage_scope) override; Array<BlockRV> CacheIndex(const BlockRV& block_rv, int write_buffer_index) override; BlockRV ReIndex(const BlockRV& block_rv, int buffer_index, BufferIndexType buffer_index_type) override; /******** Schedule: Compute location ********/ void ComputeAt(const BlockRV& block_rv, const LoopRV& loop_rv, bool preserve_unit_loops, int index = -1) override; void ReverseComputeAt(const BlockRV& block_rv, const LoopRV& loop_rv, bool preserve_unit_loops, int index = -1) override; void ComputeInline(const BlockRV& block) override; void ReverseComputeInline(const BlockRV& block) override; /******** Schedule: Reduction ********/ BlockRV RFactor(const LoopRV& loop_rv, int factor_axis) override; BlockRV DecomposeReduction(const BlockRV& block_rv, const LoopRV& loop_rv) override; void PadEinsum(const BlockRV& block_rv, const Array<Integer>& padding) override; /******** Schedule: Block annotation ********/ void StorageAlign(const BlockRV& block_rv, int buffer_index, int axis, int factor, int offset) override; void SetScope(const BlockRV& block_rv, int buffer_index, const String& storage_scope) override; /******** Schedule: Blockize & Tensorize ********/ BlockRV Blockize(const LoopRV& loop_rv) override; void Tensorize(const BlockRV& block_rv, const String& intrin) override; void Tensorize(const LoopRV& loop_rv, const String& intrin) override; /******** Schedule: Annotation ********/ void Annotate(const LoopRV& loop_rv, const String& ann_key, const ObjectRef& ann_val) override; void Unannotate(const LoopRV& loop_rv, const String& ann_key) override; void Annotate(const BlockRV& block_rv, const String& ann_key, const ObjectRef& ann_val) override; void Unannotate(const BlockRV& block_rv, const String& ann_key) override; /******** Schedule: Layout transformation ********/ void TransformLayout(const BlockRV& block_rv, int buffer_index, BufferIndexType buffer_index_type, const IndexMap& index_map, const Optional<IndexMap>& pad_value) override; void TransformBlockLayout(const BlockRV& block_rv, const IndexMap& index_map) override; void SetAxisSeparator(const BlockRV& block_rv, int buffer_index, BufferIndexType buffer_index_type, const Array<IntImm>& axis_separators) override; /******** Schedule: Padding decomposition ********/ BlockRV DecomposePadding(const BlockRV& block_rv, const LoopRV& loop_rv) override; /******** Schedule: Buffer transformation ********/ void RollingBuffer(const BlockRV& block_rv, int write_buffer_index) override; /******** Schedule: Misc ********/ void EnterPostproc() override {} protected: /******** Utility functions ********/ /*! * \brief Copy the schedule state, as well as the symbol table * \param new_state The ScheduleState copied * \param new_symbol_table The symbol table copied */ void Copy(ScheduleState* new_state, TSymbolTable* new_symbol_table) const; /*! * \brief Add srefs as random variables into the symbol table * \tparam T The type of the random variables * \param srefs The srefs to be added to the symbol table * \return The new random variables created */ template <class T> inline Array<T> CreateRV(const Array<StmtSRef>& srefs); /*! * \brief Add an sref as a random variable into the symbol table * \tparam T The type of the random variable * \param sref The sref to be added to the symbol table * \return The new random variable created */ template <class T> inline T CreateRV(const StmtSRef& sref); /*! * \brief Add an integer as a random variable into the symbol table * \param value The integer to be added to the symbol table * \return The new random variable created */ inline ExprRV CreateRV(int64_t value); /*! * \brief Add a list of integers as random variables into the symbol table * \param value The list of integers to be added to the symbol table * \return The new random variables created */ inline Array<ExprRV> CreateRV(const std::vector<int64_t>& value); /*! \brief Remove a random variable from the symbol table */ inline void RemoveFromSymbolTable(const ObjectRef& rv); /*! * \brief Check the annotation value is valid and look up the random variable. Raises an exception * if the type of the annotation value is not allowed. * \param The annotation value. * \return The annotation value with random variables substituted with their values. */ ObjectRef CheckAndGetAnnotationValue(const ObjectRef& ann_val); }; // implementations /******** Lookup random variables ********/ inline Block ConcreteScheduleNode::Get(const BlockRV& block_rv) const { StmtSRef sref = this->GetSRef(block_rv); const BlockNode* block = TVM_SREF_TO_BLOCK(sref); return GetRef<Block>(block); } inline For ConcreteScheduleNode::Get(const LoopRV& loop_rv) const { StmtSRef sref = this->GetSRef(loop_rv); const ForNode* loop = TVM_SREF_TO_FOR(sref); return GetRef<For>(loop); } inline PrimExpr ConcreteScheduleNode::Get(const ExprRV& expr_rv) const { PrimExpr transformed = Substitute(expr_rv, [this](const Var& var) -> Optional<PrimExpr> { auto it = this->symbol_table_.find(var); if (it == this->symbol_table_.end()) { LOG(FATAL) << "IndexError: Cannot find corresponding ExprRV: " << var; } const ObjectRef& obj = (*it).second; const auto* int_imm = TVM_TYPE_AS(obj, IntImmNode); return Integer(int_imm->value); }); return this->analyzer_->Simplify(transformed); } inline bool ConcreteScheduleNode::HasBlock(const BlockRV& block_rv) const { auto it = this->symbol_table_.find(block_rv); if (it == this->symbol_table_.end()) { return false; } const ObjectRef& obj = (*it).second; const auto* sref = obj.as<StmtSRefNode>(); if (sref == nullptr || sref->stmt == nullptr) { return false; } return true; } inline StmtSRef ConcreteScheduleNode::GetSRef(const BlockRV& block_rv) const { auto it = this->symbol_table_.find(block_rv); if (it == this->symbol_table_.end()) { LOG(FATAL) << "IndexError: Cannot find corresponding BlockRV: " << block_rv; } const ObjectRef& obj = (*it).second; const auto* sref = obj.as<StmtSRefNode>(); if (sref == nullptr) { LOG(FATAL) << "ValueError: BlockRV's corresponding type is invalid: " << (obj.defined() ? obj->GetTypeKey() : "None"); } if (sref->stmt == nullptr) { LOG(FATAL) << "ValueError: The block no longer exists in the IRModule"; } return GetRef<StmtSRef>(sref); } inline StmtSRef ConcreteScheduleNode::GetSRef(const LoopRV& loop_rv) const { static StmtSRef inline_mark = StmtSRef::InlineMark(); static StmtSRef root_mark = StmtSRef::RootMark(); auto it = this->symbol_table_.find(loop_rv); if (it == this->symbol_table_.end()) { LOG(FATAL) << "IndexError: Cannot find corresponding LoopRV: " << loop_rv; } const ObjectRef& obj = (*it).second; if (obj.same_as(inline_mark)) { return inline_mark; } if (obj.same_as(root_mark)) { return root_mark; } const auto* sref = obj.as<StmtSRefNode>(); if (sref == nullptr) { LOG(FATAL) << "ValueError: LoopRV's corresponding type is invalid: " << (obj.defined() ? obj->GetTypeKey() : "None"); } if (sref->stmt == nullptr) { LOG(FATAL) << "ValueError: The loop no longer exists in the IRModule"; } return GetRef<StmtSRef>(sref); } template <class T> inline Array<StmtSRef> GetSRefsHelper(const ConcreteScheduleNode* sch, const Array<T>& rvs) { Array<StmtSRef> result; result.reserve(rvs.size()); for (const T& rv : rvs) { result.push_back(sch->GetSRef(rv)); } return result; } inline Array<StmtSRef> ConcreteScheduleNode::GetSRefs(const Array<BlockRV>& rvs) const { return GetSRefsHelper(this, rvs); } inline Array<StmtSRef> ConcreteScheduleNode::GetSRefs(const Array<LoopRV>& rvs) const { return GetSRefsHelper(this, rvs); } /******** Adding/Removing elements in the symbol table ********/ template <class T> inline Array<T> ConcreteScheduleNode::CreateRV(const Array<StmtSRef>& srefs) { Array<T> result; result.reserve(srefs.size()); for (const StmtSRef& sref : srefs) { T rv; this->symbol_table_.Set(rv, sref); result.push_back(rv); } return result; } template <class T> inline T ConcreteScheduleNode::CreateRV(const StmtSRef& sref) { T rv; this->symbol_table_.Set(rv, sref); return std::move(rv); } inline ExprRV ConcreteScheduleNode::CreateRV(int64_t value) { Var rv("v" + std::to_string(this->symbol_table_.size() + 1), DataType::Int(32)); this->symbol_table_.Set(rv, Integer(static_cast<int32_t>(value))); return std::move(rv); } inline Array<ExprRV> ConcreteScheduleNode::CreateRV(const std::vector<int64_t>& value) { Array<ExprRV> results; results.reserve(value.size()); for (int64_t v : value) { results.push_back(CreateRV(v)); } return results; } inline void ConcreteScheduleNode::RemoveFromSymbolTable(const ObjectRef& obj) { auto it = this->symbol_table_.find(obj); if (it != this->symbol_table_.end()) { this->symbol_table_.erase(obj); } else { LOG(FATAL) << "IndexError: Cannot find the object in the symbol table: " << obj; throw; } } } // namespace tir } // namespace tvm #endif // TVM_TIR_SCHEDULE_CONCRETE_SCHEDULE_H_
https://github.com/zk-ml/tachikoma
src/tir/schedule/error.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_TIR_SCHEDULE_ERROR_H_ #define TVM_TIR_SCHEDULE_ERROR_H_ #include <tvm/tir/schedule/state.h> #include <string> #include <utility> namespace tvm { namespace tir { /*! \brief Error that happens during TensorIR scheduling */ class ScheduleError : public tvm::runtime::Error { public: /*! \brief Base constructor */ ScheduleError() : tvm::runtime::Error("") {} /*! \brief The error occurred in this IRModule */ virtual IRModule mod() const = 0; /*! \brief The locations of interest that we want to point out */ virtual Array<ObjectRef> LocationsOfInterest() const = 0; /*! * \brief Returns an error string template for rendering, corresponds to the "detail" mode. * \sa ScheduleErrorRenderLevel * \note The template is a string, e.g. * "Some error occurred on block {0} and loop {1} blah blah" * And renderer will replace {0} and {1} according to the list provided LocationsOfInterest. Right * now it only printed out all the locations in plain text, but in the future, we may want to mark * the IR with underscores and attach names to each location of interest, like what synr does. */ virtual String DetailRenderTemplate() const = 0; /*! * \brief Returns an error string without needing to render, corresponds to the "fast" mode * \sa ScheduleErrorRenderLevel */ virtual String FastErrorString() const = 0; /*! \brief Render the ScheduleError with the template provided by `DetailRenderTemplate` */ String RenderReport(const String& primitive) const; }; class LoopPositionError : public ScheduleError { public: explicit LoopPositionError(IRModule mod, For loop, Block block, const std::string& primitive) : mod_(std::move(mod)), loop_(std::move(loop)), block_(std::move(block)), primitive_(primitive) {} String FastErrorString() const final { return "ScheduleError: " + primitive_ + " expect the loop to be an ancestor of block"; } String DetailRenderTemplate() const final { std::ostringstream os; os << "ScheduleError: The input loop {0} of " << primitive_ << " is required to be be an ancestor of block {1}."; return os.str(); } IRModule mod() const final { return mod_; } Array<ObjectRef> LocationsOfInterest() const final { return {loop_, block_}; } IRModule mod_; For loop_; Block block_; std::string primitive_; }; } // namespace tir } // namespace tvm #endif // TVM_TIR_SCHEDULE_ERROR_H_
https://github.com/zk-ml/tachikoma
src/tir/schedule/instruction_traits.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_TIR_SCHEDULE_INSTRUCTION_TRAITS_H_ #define TVM_TIR_SCHEDULE_INSTRUCTION_TRAITS_H_ #include <tvm/tir/schedule/instruction.h> #include <tvm/tir/schedule/schedule.h> #include <algorithm> #include <sstream> #include <string> #include <utility> #include <vector> namespace tvm { namespace tir { /*! * \brief Register an InstructionKind using a trait class * \param InstructionKindTraits A traits class of an InstructionKind * * Example: * * \code * * struct SomeInstructionKindTraits { * static constexpr const char* kName = "name-of-the-instruction"; * static constexpr bool kIsPure = false; * * // Convertible to `InstructionKindNode::FInstructionApply` * static Array<ObjectRef> ApplyToSchedule( * const tir::Schedule& sch, * const Array<ObjectRef>& inputs, * const Array<ObjectRef>& attrs, * const Optional<ObjectRef>& decision); * * // Convertible to `InstructionKindNode::FInstructionAsPython` * static String AsPython( * const Array<String>& inputs, * const Array<ObjectRef>& attrs, * const Optional<ObjectRef>& decision, * const Array<String>& outputs); * * // Convertible to `InstructionKindNode::FInstructionAttrsAsJSON` * static ObjectRef AttrsAsJSON( * const Array<ObjectRef>& attrs); * * // Convertible to `InstructionKindNode::FInstructionAttrsFromJSON` * static Array<ObjectRef> AttrsFromJSON( * const ObjectRef& attrs_record); * }; * * TVM_REGISTER_INST_KIND_TRAITS(SomeInstructionKindTraits); * * \endcode */ #define TVM_REGISTER_INST_KIND_TRAITS(InstructionKindTraits) \ TVM_REGISTER_INST_KIND(InstructionKindTraits::kName) \ .set_is_pure(InstructionKindTraits::kIsPure) \ .set_apply_to_schedule(InstructionKindTraits::ApplyToSchedule) \ .set_attrs_as_json(InstructionKindTraits::AttrsAsJSON) \ .set_attrs_from_json(InstructionKindTraits::AttrsFromJSON) \ .set_as_python(InstructionKindTraits::AsPython) /*! * \brief A helper to conveniently register an InstructionKind. When inherited in curiously * recursive template pattern, the derived class `TTraits` only needs to define two functions on the * unpacked inputs, and the helper handles unpacking and downcasting. See the example for more * details. * * \tparam TTraits The derived class * * Example: * * \code * * struct SamplePerfectTileTraits : public UnpackedInstTraits<SamplePerfectTileTraits> { * // The name of this kind of instruction * static constexpr const char* kName = "SamplePerfectTile"; * // A boolean indicating if the instruction is pure, i.e. change nothing in the schedule state * static constexpr bool kIsPure = true; * // The number of inputs in this kind of instruction * static constexpr size_t kNumInputs = 1; * // The number of attributes in this kind of instruction * static constexpr size_t kNumAttrs = 2; * // The number of decisions in this kind of instruction (only 0 or 1 is allowed) * static constexpr size_t kNumDecisions = 1; * * // Calling convention: * // - All the arguments must be ObjectRef * // - The 1st argument is Schedule * // - The next `kNumInputs` arguments are input random variables * // - The next `kNumAttrs` arguments are attributes * // - The next argument is decision, if `kNumDecisions == 1` * static Array<Var> UnpackedApplyToSchedule( * Schedule sch, * LoopRV loop_rv, * Integer n, * Integer max_innermost_factor, * Optional<Array<Integer>> decision) { * return sch->SamplePerfectTile(loop_rv, n->value, max_innermost_factor->value, decision); * } * * // Calling convention: * // - All the arguments must be ObjectRef * // - The 1st argument is an array containing names of output random variables * // - The next `kNumInputs` arguments are names of input random variables * // - The next `kNumAttrs` arguments are attributes * // - The next argument is decision, if `kNumDecisions == 1` * static String UnpackedAsPython( * Array<String> outputs, * String loop_rv, * Integer n, * Integer max_innermost_factor, * Optional<Array<Integer>> decision) { * PythonAPICall py("sample_perfect_tile"); * py.Input("loop", loop_rv); * py.Input("n", n->value); * py.Input("max_innermost_factor", max_innermost_factor->value); * py.Decision(decision); * py.OutputList(outputs); * return py.Str(); * } * * template <typename> * friend struct UnpackedInstTraits; * }; * * TVM_REGISTER_INST_KIND(SamplePerfectTileTraits); * \endcode */ template <class TTraits> struct UnpackedInstTraits { /*! * \brief Unpack the arguments in the calling convention, and feed them into * `TTraits::UnpackedApplyToSchedule` * \sa InstructionKindNode::f_apply_to_schedule */ static Array<ObjectRef> ApplyToSchedule(const Schedule& sch, const Array<ObjectRef>& inputs, const Array<ObjectRef>& attrs, const Optional<ObjectRef>& decision); /*! * \brief Unpack the arguments in the calling convention, and feed them into * `TTraits::UnpackedAsPython` * \sa InstructionKindNode::f_as_python */ static String AsPython(const Array<ObjectRef>& inputs, const Array<ObjectRef>& attrs, const Optional<ObjectRef>& decision, const Array<String>& outputs); /*! \brief No customized serializer by default */ static constexpr std::nullptr_t AttrsAsJSON = nullptr; /*! \brief No customized deserializer by default */ static constexpr std::nullptr_t AttrsFromJSON = nullptr; protected: template <size_t index_offset> static TVM_ALWAYS_INLINE void _SetInputs(const runtime::TVMArgsSetter& setter, const Array<ObjectRef>& inputs); template <size_t index_offset> static TVM_ALWAYS_INLINE void _SetAttrs(const runtime::TVMArgsSetter& setter, const Array<ObjectRef>& attrs); template <size_t index_offset> static TVM_ALWAYS_INLINE void _SetDecision(const runtime::TVMArgsSetter& setter, const Optional<ObjectRef>& decision); static TVM_ALWAYS_INLINE Array<ObjectRef> _ConvertOutputs(const TVMRetValue& rv); }; /*! * \brief A helper class that constructs schedule API call in python syntax, * which helps convert an Inst to a python statement. * \sa InstructionKindNode::f_as_python */ class PythonAPICall { public: /*! * \brief Constructor * \param method_name The name of the schedule API to be called */ explicit PythonAPICall(String method_name) : method_name_(method_name), output_(NullOpt) {} /*! \brief Add an integer input */ inline void Input(String arg_name, int arg); /*! \brief Add an integer input */ inline void Input(String arg_name, int64_t arg); /*! \brief Add a bool input */ inline void Input(String arg_name, bool arg); /*! \brief Add a double input */ inline void Input(String arg_name, double arg); /*! \brief Add an input random variable */ inline void Input(String arg_name, String arg); /*! \brief Add an input, dispatched to different implementations according to the object's type */ inline void Input(String arg_name, ObjectRef arg); /*! \brief Add the decision */ inline void Decision(ObjectRef decision); /*! * \brief Add a single output random variable * \param unit_array An array containing only one element */ inline void SingleOutput(Array<String> unit_array); /*! \brief Add a list of output random variables */ inline void OutputList(Array<String> outputs); /*! \returns The schedule API call in python syntax */ inline String Str() const; private: /*! \brief Converts a TVM object to python string and print to the output stream */ inline void AsPythonString(const ObjectRef& obj, std::ostream& os); private: /*! \brief The name of the API to call */ String method_name_; /*! \brief The output of the instruction */ Optional<String> output_; /*! \brief The names of input arguments */ std::vector<String> arg_names_; /*! \brief The values of input arguments */ std::vector<String> args_; }; /********** implementation details **********/ // forward declaration namespace details { template <typename... Args> struct _ArgsPacker; template <> struct _ArgsPacker<> { static constexpr bool checked = true; }; template <typename TObjectRef, typename... Args> struct _ArgsPacker<TObjectRef, Args...> { static constexpr bool checked = std::is_base_of<ObjectRef, TObjectRef>::value && _ArgsPacker<Args...>::checked; }; template <typename T> struct _MethodType {}; template <typename TReturn, typename... Args> struct _MethodType<TReturn(Args...)> { using return_type = TReturn; using argument_type = _ArgsPacker<Args...>; }; template <typename T> struct _NumArgs {}; template <typename TReturn, typename... Args> struct _NumArgs<TReturn(Args...)> { static constexpr size_t value = sizeof...(Args); }; template <typename> struct _IsTVMArray : std::false_type {}; template <typename T> struct _IsTVMArray<runtime::Array<T>> : std::true_type {}; template <typename T> struct _IsSingleObject : std::integral_constant<bool, std::is_base_of<ObjectRef, T>::value && !_IsTVMArray<T>::value> { }; template <class T> using ReturnType = typename _MethodType<std::remove_cv_t<T>>::return_type; template <class T> static constexpr bool ArgumentAreAllObjects = _MethodType<std::remove_cv_t<T>>::argument_type::checked; template <class T> static constexpr size_t NumArgs = _NumArgs<std::remove_cv_t<T>>::value; template <class T> static constexpr int IsTVMArray = _IsTVMArray<std::remove_cv_t<T>>::value; template <class T> static constexpr int IsSingleObject = _IsSingleObject<std::remove_cv_t<T>>::value; }; // namespace details template <class TTraits> Array<ObjectRef> UnpackedInstTraits<TTraits>::ApplyToSchedule(const Schedule& sch, const Array<ObjectRef>& inputs, const Array<ObjectRef>& attrs, const Optional<ObjectRef>& decision) { using method_type = decltype(TTraits::UnpackedApplyToSchedule); using return_type = details::ReturnType<method_type>; static_assert(details::ArgumentAreAllObjects<method_type>, "All arguments to `UnpackedApplyToSchedule` must be subclasses of ObjectRef"); constexpr size_t kNumArgs = details::NumArgs<method_type>; constexpr size_t kNumInputs = TTraits::kNumInputs; constexpr size_t kNumAttrs = TTraits::kNumAttrs; constexpr size_t kNumDecisions = TTraits::kNumDecisions; static_assert(kNumArgs == 1 + kNumInputs + kNumAttrs + kNumDecisions, "length of argument list mismatch"); TVMValue tvm_values[kNumArgs]; int tvm_type_codes[kNumArgs]; runtime::TVMArgsSetter setter(tvm_values, tvm_type_codes); setter(0, sch); TTraits::template _SetInputs<1>(setter, inputs); TTraits::template _SetAttrs<1 + kNumInputs>(setter, attrs); TTraits::template _SetDecision<1 + kNumInputs + kNumAttrs>(setter, decision); PackedFunc pf([](const TVMArgs& args, TVMRetValue* rv) -> void { using runtime::detail::unpack_call; constexpr size_t kNumArgs = details::NumArgs<method_type>; ICHECK_EQ(args.size(), kNumArgs); unpack_call<return_type, kNumArgs>(nullptr, TTraits::UnpackedApplyToSchedule, args, rv); }); TVMRetValue rv; pf.CallPacked(TVMArgs(tvm_values, tvm_type_codes, kNumArgs), &rv); return TTraits::_ConvertOutputs(rv); } template <class TTraits> String UnpackedInstTraits<TTraits>::AsPython(const Array<ObjectRef>& inputs, const Array<ObjectRef>& attrs, const Optional<ObjectRef>& decision, const Array<String>& outputs) { using method_type = decltype(TTraits::UnpackedAsPython); using return_type = details::ReturnType<method_type>; static_assert(details::ArgumentAreAllObjects<method_type>, "All arguments to `UnpackedAsPython` must be subclasses of ObjectRef"); constexpr size_t kNumArgs = details::NumArgs<method_type>; constexpr size_t kNumInputs = TTraits::kNumInputs; constexpr size_t kNumAttrs = TTraits::kNumAttrs; constexpr size_t kNumDecisions = TTraits::kNumDecisions; static_assert(kNumArgs == 1 + kNumInputs + kNumAttrs + kNumDecisions, "length of argument list mismatch"); TVMValue tvm_values[kNumArgs]; int tvm_type_codes[kNumArgs]; runtime::TVMArgsSetter setter(tvm_values, tvm_type_codes); setter(0, outputs); TTraits::template _SetInputs<1>(setter, inputs); TTraits::template _SetAttrs<1 + kNumInputs>(setter, attrs); TTraits::template _SetDecision<1 + kNumInputs + kNumAttrs>(setter, decision); PackedFunc pf([](const TVMArgs& args, TVMRetValue* rv) -> void { using runtime::detail::unpack_call; constexpr size_t kNumArgs = details::NumArgs<method_type>; ICHECK_EQ(args.size(), kNumArgs); unpack_call<return_type, kNumArgs>(nullptr, TTraits::UnpackedAsPython, args, rv); }); TVMRetValue rv; pf.CallPacked(TVMArgs(tvm_values, tvm_type_codes, kNumArgs), &rv); String result = rv; return result; } template <class TTraits> template <size_t index_offset> TVM_ALWAYS_INLINE void UnpackedInstTraits<TTraits>::_SetInputs(const runtime::TVMArgsSetter& setter, const Array<ObjectRef>& inputs) { constexpr size_t kNumInputs = TTraits::kNumInputs; ICHECK_EQ(kNumInputs, inputs.size()) << "ValueError: Incorrect kNumInputs for instruction: " << TTraits::kName; const ObjectRef* ptr = inputs.template as<ArrayNode>()->begin(); for (size_t i = 0; i < kNumInputs; ++i) { setter(i + index_offset, *(ptr + i)); } } template <class TTraits> template <size_t index_offset> TVM_ALWAYS_INLINE void UnpackedInstTraits<TTraits>::_SetAttrs(const runtime::TVMArgsSetter& setter, const Array<ObjectRef>& attrs) { constexpr size_t kNumAttrs = TTraits::kNumAttrs; ICHECK_EQ(kNumAttrs, attrs.size()) << "ValueError: Incorrect kNumAttrs for instruction: " << TTraits::kName; const ObjectRef* ptr = attrs.as<ArrayNode>()->begin(); for (size_t i = 0; i < kNumAttrs; ++i) { setter(i + index_offset, *(ptr + i)); } } template <class TTraits> template <size_t index_offset> TVM_ALWAYS_INLINE void UnpackedInstTraits<TTraits>::_SetDecision( const runtime::TVMArgsSetter& setter, const Optional<ObjectRef>& decision) { constexpr size_t kNumDecisions = TTraits::kNumDecisions; static_assert(kNumDecisions <= 1, "an instruction is supposed to have at most 1 decision"); if (kNumDecisions == 1) { setter(index_offset, decision); } else { ICHECK(!decision.defined()); } } template <class TTraits> TVM_ALWAYS_INLINE Array<ObjectRef> UnpackedInstTraits<TTraits>::_ConvertOutputs( const TVMRetValue& rv) { using method_type = decltype(TTraits::UnpackedApplyToSchedule); using return_type = details::ReturnType<method_type>; constexpr int is_array = details::IsTVMArray<return_type>; constexpr int is_single_obj = details::IsSingleObject<return_type>; constexpr int is_void = std::is_void<return_type>::value; static_assert(is_array || is_single_obj || is_void, "return type not supported"); static_assert(is_array + is_single_obj + is_void == 1, "internal template error"); if (is_void) { return {}; } else if (is_single_obj) { ObjectRef obj = rv; return {obj}; } else if (is_array) { ObjectRef obj = rv; const ArrayNode* array = obj.as<ArrayNode>(); return GetRef<Array<ObjectRef>>(array); } } /********** PythonAPICall **********/ inline void PythonAPICall::AsPythonString(const ObjectRef& obj, std::ostream& os) { if (!obj.defined()) { os << "None"; } else if (const auto* str = obj.as<runtime::StringObj>()) { os << str->data; } else if (const auto* int_imm = obj.as<IntImmNode>()) { os << int_imm->value; } else if (const auto* float_imm = obj.as<FloatImmNode>()) { os.precision(17); os << float_imm->value; } else if (const auto* array = obj.as<ArrayNode>()) { os << '['; bool is_first = true; for (const ObjectRef& e : *array) { if (is_first) { is_first = false; } else { os << ", "; } AsPythonString(e, os); } os << ']'; } else if (const auto* dict = obj.as<MapNode>()) { os << '{'; bool is_first = true; std::vector<std::pair<std::string, std::string>> dict_items; for (auto it = dict->begin(); it != dict->end(); ++it) { std::ostringstream ks; AsPythonString(it->first, ks); std::ostringstream vs; AsPythonString(it->second, vs); dict_items.emplace_back(ks.str(), vs.str()); } std::sort(dict_items.begin(), dict_items.end(), [](const auto& p1, const auto& p2) { return p1.first < p2.first; }); for (const auto& kv : dict_items) { if (is_first) { is_first = false; } else { os << ", "; } os << '\"' << kv.first << "\": " << kv.second; } os << '}'; } else { LOG(FATAL) << "ValueError: Cannot translate type '" << obj->GetTypeKey() << "' to python. Its value is: " << obj; throw; } } void PythonAPICall::Input(String arg_name, int arg) { arg_names_.emplace_back(std::move(arg_name)); args_.push_back(std::to_string(arg)); } void PythonAPICall::Input(String arg_name, int64_t arg) { arg_names_.emplace_back(std::move(arg_name)); args_.push_back(std::to_string(arg)); } void PythonAPICall::Input(String arg_name, bool arg) { static const char* true_str = "True"; static const char* false_str = "False"; arg_names_.emplace_back(std::move(arg_name)); if (arg) { args_.push_back(true_str); } else { args_.push_back(false_str); } } void PythonAPICall::Input(String arg_name, double arg) { arg_names_.emplace_back(std::move(arg_name)); std::ostringstream os; os.precision(17); os << arg; args_.push_back(os.str()); } void PythonAPICall::Input(String arg_name, String arg) { arg_names_.emplace_back(std::move(arg_name)); args_.emplace_back(std::move(arg)); } void PythonAPICall::Input(String arg_name, ObjectRef arg) { arg_names_.emplace_back(std::move(arg_name)); std::ostringstream os; AsPythonString(arg, os); args_.push_back(os.str()); } void PythonAPICall::Decision(ObjectRef decision) { if (decision.defined()) { this->Input("decision", decision); } } void PythonAPICall::SingleOutput(Array<String> unit_array) { ICHECK_EQ(unit_array.size(), 1); this->output_ = unit_array[0]; } void PythonAPICall::OutputList(Array<String> outputs) { if (outputs.empty()) { return; } if (outputs.size() == 1) { this->output_ = outputs[0] + ","; return; } std::ostringstream os; os << outputs[0]; for (int i = 1, n = outputs.size(); i < n; ++i) { os << ", " << outputs[i]; } this->output_ = os.str(); } String PythonAPICall::Str() const { std::ostringstream os; if (output_.defined()) { os << output_.value() << " = "; } os << "sch." << method_name_ << '('; int n = args_.size(); for (int i = 0; i < n; ++i) { if (i > 0) { os << ", "; } if (arg_names_[i].empty()) { os << args_[i]; } else { os << arg_names_[i] << '=' << args_[i]; } } os << ')'; return os.str(); } } // namespace tir } // namespace tvm #endif // TVM_TIR_SCHEDULE_INSTRUCTION_TRAITS_H_
https://github.com/zk-ml/tachikoma
src/tir/schedule/ir_comparator.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_TIR_SCHEDULE_IR_COMPARATOR_H_ #define TVM_TIR_SCHEDULE_IR_COMPARATOR_H_ #include <string> #include <unordered_map> #include <utility> #include <vector> #include "./utils.h" namespace tvm { namespace tir { using ExprComparator = ExprFunctor<bool(const PrimExpr& n, const PrimExpr& other)>; using StmtComparator = StmtFunctor<bool(const Stmt& n, const Stmt& other)>; /*! \brief Deep comparison to check if two IR ASTs are equivalent for tensorization*/ class TensorizeComparator : public ExprComparator, public StmtComparator { public: /*! * \brief Constructor of TensorizeComparator * \param assert_mode Whether to raise an error if the two IR ASTs do not match. * \param lhs_mod The IRModule of the LHS. This is used for error reporting. */ explicit TensorizeComparator(IRModule lhs_mod, bool assert_mode = true) : lhs_mod_(std::move(lhs_mod)), assert_mode_(assert_mode) {} bool VisitExpr(const PrimExpr& n, const PrimExpr& other) override; bool VisitStmt(const Stmt& n, const Stmt& other) override; bool VisitStmt_(const ForNode* op, const Stmt& other) override; bool VisitStmt_(const SeqStmtNode* op, const Stmt& other) override; bool VisitStmt_(const BufferStoreNode* op, const Stmt& other) override; bool VisitStmt_(const BlockRealizeNode* op, const Stmt& other) override; bool VisitStmt_(const BlockNode* op, const Stmt& other) override; bool VisitExpr_(const AddNode* op, const PrimExpr& other) override; bool VisitExpr_(const SubNode* op, const PrimExpr& other) override; bool VisitExpr_(const MulNode* op, const PrimExpr& other) override; bool VisitExpr_(const DivNode* op, const PrimExpr& other) override; bool VisitExpr_(const ModNode* op, const PrimExpr& other) override; bool VisitExpr_(const EQNode* op, const PrimExpr& other) override; bool VisitExpr_(const NENode* op, const PrimExpr& other) override; bool VisitExpr_(const LTNode* op, const PrimExpr& other) override; bool VisitExpr_(const LENode* op, const PrimExpr& other) override; bool VisitExpr_(const GTNode* op, const PrimExpr& other) override; bool VisitExpr_(const GENode* op, const PrimExpr& other) override; bool VisitExpr_(const AndNode* op, const PrimExpr& other) override; bool VisitExpr_(const OrNode* op, const PrimExpr& other) override; bool VisitExpr_(const MinNode* op, const PrimExpr& other) override; bool VisitExpr_(const MaxNode* op, const PrimExpr& other) override; bool VisitExpr_(const FloorDivNode* op, const PrimExpr& other) override; bool VisitExpr_(const FloorModNode* op, const PrimExpr& other) override; bool VisitExpr_(const IntImmNode* op, const PrimExpr& other) override; bool VisitExpr_(const FloatImmNode* op, const PrimExpr& other) override; bool VisitExpr_(const CastNode* op, const PrimExpr& other) override; bool VisitExpr_(const VarNode* op, const PrimExpr& other) override; bool VisitExpr_(const BufferLoadNode* op, const PrimExpr& other) override; bool VisitExpr_(const SelectNode* op, const PrimExpr& other) override; /*! \brief Map from RHS buffer to LHS buffer */ std::unordered_map<Buffer, Buffer, ObjectHash, ObjectEqual> rhs_buffer_map_; /*! \brief Base indices of the LHS buffer. */ std::unordered_map<Buffer, std::vector<PrimExpr>, ObjectPtrHash, ObjectPtrEqual> buffer_indices_; protected: bool DefEqual(const Var& lhs, const Var& rhs); virtual bool CompareBuffer(const Buffer& lhs, const Buffer& rhs); bool CompareBufferRegion(const BufferRegion& lhs, const BufferRegion& rhs); bool CompareAnnotation(const std::pair<String, ObjectRef>& lhs, const std::pair<String, ObjectRef>& rhs); bool CompareAnnotationMap(const Map<String, ObjectRef>& lhs, const Map<String, ObjectRef>& rhs); template <typename T> bool CompareBufferAccess(const T* lhs, const T* rhs); template <typename T, typename Self, typename F> bool CompareArray(const Array<T>& lhs, const Array<T>& rhs, F Self::*cmp); bool CompareRange(const Range& lhs, const Range& rhs); bool CompareIterVar(const IterVar& lhs, const IterVar& rhs); void EmitError(const std::string& error_message); /*! \brief IRModule of the LHS stmt. */ IRModule lhs_mod_; /*! \brief Whether assertion mode is enabled. */ bool assert_mode_; /*! \brief Whether it is visiting the scope block (the outermost block). */ bool is_scope_block = true; /*! \brief The arithmetic analyzer. */ arith::Analyzer analyzer_; /*! \brief Additional error messages. Only used when assert_mode is true. */ std::vector<std::string> error_messages_; // variable remap if any std::unordered_map<ObjectRef, ObjectRef, ObjectPtrHash, ObjectPtrEqual> equal_map_; }; /*! * \brief IR comparator for auto tensorization. * This comparator is used to extract correspondence between the IR of the workload (LHS) and the * tensor intrin (RHS). Unlike `TensorizeComparator`, this comparator has relaxed requirements * during comparison. It ignores the loop structure (number of loops and their extents) and buffer * indices. It only requires the LHS and the RHS to have the same arithmetic operations and the same * dtype. With such relaxed requirements, workloads that can only match the tensor intrin after * certain transformations (e.g. im2col for conv2d) are allowed for auto tensorization. */ class AutoTensorizeComparator : public TensorizeComparator { public: explicit AutoTensorizeComparator(const IRModule& lhs_mod) : TensorizeComparator(lhs_mod, /* assert_mode=*/false) {} private: bool VisitExprDefault_(const Object* op, const PrimExpr& other) override; bool VisitStmtDefault_(const Object* op, const Stmt& other) override; bool VisitStmt_(const BlockNode* op, const Stmt& other) override; bool VisitStmt_(const BufferStoreNode* op, const Stmt& other) override; bool VisitExpr_(const BufferLoadNode* op, const PrimExpr& other) override; bool CompareBuffer(const Buffer& lhs, const Buffer& rhs) override; template <typename T> bool CompareBufferAccess(const T* lhs, const T* rhs); public: // Additional information extracted from LHS (the workload) and RHS (the tensor intrin). /*! \brief Block iters in the LHS stmt. */ std::vector<IterVar> lhs_iters_; /*! \brief Block iters in the RHS stmt. */ std::vector<IterVar> rhs_iters_; /*! \brief The buffer and its access indices in the LHS stmt. */ std::unordered_map<Buffer, Array<PrimExpr>, ObjectPtrHash, ObjectPtrEqual> lhs_buffer_indices_map_; /*! \brief The buffer and its access indices in the RHS stmt. */ std::unordered_map<Buffer, Array<PrimExpr>, ObjectPtrHash, ObjectPtrEqual> rhs_buffer_indices_map_; /*! \brief Map from LHS buffer to RHS buffer */ std::unordered_map<Buffer, Buffer, ObjectHash, ObjectEqual> lhs_buffer_map_; private: /*! \brief The domain of the inner block iters. */ Map<Var, arith::IntSet> inner_iter_dom_map_; }; } // namespace tir } // namespace tvm #endif // TVM_TIR_SCHEDULE_IR_COMPARATOR_H_
https://github.com/zk-ml/tachikoma
src/tir/schedule/primitive.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_TIR_SCHEDULE_PRIMITIVE_H_ #define TVM_TIR_SCHEDULE_PRIMITIVE_H_ #include <tvm/support/random_engine.h> #include <tvm/tir/schedule/state.h> #include <vector> namespace tvm { namespace tir { /******** Schedule: Sampling ********/ /*! * \brief Sample a random integer from a given range. * \param rand_state The pointer to schedule's random state. * \param min_inclusive The minimum value of the range, inclusive. * \param max_exclusive The maximum value of the range, exclusive. * \return The random integer sampled in the given range. */ TVM_DLL int32_t SampleInt(support::LinearCongruentialEngine::TRandState* rand_state, int32_t min_inclusive, int32_t max_exclusive); /*! * \brief Sample k random integers from given range without replacement, i.e, no duplication. * \param rand_state The pointer to schedule's random state * \param n The range is defined as 0 to n-1. * \param k The total number of samples. * \return The randomly selected samples from the n candidates. */ std::vector<int32_t> SampleWithoutReplacement( support::LinearCongruentialEngine::TRandState* rand_state, int32_t n, int32_t k); /*! * \brief Sample once category from candidates according to the probability weights. * \param rand_state The pointer to schedule's random state * \param candidates The candidates * \param probs The probability distribution of the candidates * \param decision The sampling decision, if any * \return The random variable sampled from candidates */ TVM_DLL int64_t SampleCategorical(support::LinearCongruentialEngine::TRandState* rand_state, const Array<Integer>& candidates, const Array<FloatImm>& probs, Optional<Integer>* decision); /*! * \brief Create a sampling function that does multinomial sampling. * \param rand_state The random state. * \param weights The weights for multinomial sampling. * \return The multinomial sampling function. */ TVM_DLL std::function<int32_t()> MakeMultinomialSampler( support::LinearCongruentialEngine::TRandState* rand_state, const std::vector<double>& weights); /*! * \brief Sample the factors to perfect tile a specific loop * \param rand_state The random state * \param extent The loop extent to be tiled * \param n_split The number of tiles to be sampled * \return A list of length `n`, the random perfect tile sizes sampled */ TVM_DLL std::vector<int64_t> SamplePerfectTile( support::LinearCongruentialEngine::TRandState* rand_state, // int32_t extent, int32_t n_splits); /*! * \brief Sample the factors to perfect tile a specific loop * \param rand_state The random state * \param extent The loop extent to be tiled * \param n_split The number of tiles to be sampled * \param max_innermost_factor The maximum tile size allowed to be sampled in the innermost loop * \return A list of length `n`, the random perfect tile sizes sampled */ TVM_DLL std::vector<int64_t> SamplePerfectTile( support::LinearCongruentialEngine::TRandState* rand_state, // int32_t extent, int32_t n_split, int32_t max_innermost_factor); /*! * \brief Sample the factors to perfect tile a specific loop * \param rand_state The random state * \param loop_sref The loop to be tiled * \param n_split The number of tiles to be sampled * \param max_innermost_factor The maximum tile size allowed to be sampled in the innermost loop * \param decision The sampling decision * \return A list of length `n`, the random perfect tile sizes sampled */ TVM_DLL std::vector<int64_t> SamplePerfectTile( support::LinearCongruentialEngine::TRandState* rand_state, // const tir::StmtSRef& loop_sref, int32_t n_split, int32_t max_innermost_factor, Optional<Array<Integer>>* decision); /*! * \brief Sample a compute-at location of the given block * \param self The schedule state * \param rand_state The random state * \param block_sref The sref of the block whose compute-at location is to be sampled * \param decision The sampling decision * \return The sampled loop where the input block is to be computed at */ TVM_DLL tir::StmtSRef SampleComputeLocation( tir::ScheduleState self, support::LinearCongruentialEngine::TRandState* rand_state, const tir::StmtSRef& block_sref, Optional<Integer>* decision); /******** Schedule: Get blocks & loops ********/ /*! * \brief Retrieves blocks in a specific function with its name * \param self The schedule state * \param name The name of the blocks to be retrieved * \param gvar The function to be retrieved * \return A list of blocks with the specific name */ Array<StmtSRef> GetBlocks(const ScheduleState& self, const String& name, const GlobalVar& gv); /*! * \brief Gets the parent loops of the block in its scope, from outer to inner * \param self The schedule state * \param block_sref The query block * \return A list of loops above the given block in its scope, from outer to inner */ Array<StmtSRef> GetLoops(const StmtSRef& block_sref); /*! * \brief Get the leaf blocks of a specific block/loop * \param self The schedule state * \param parent_sref The query block/loop * \return A list of leaf blocks inside a specific block/loop */ Array<StmtSRef> GetChildBlocks(const ScheduleState& self, const StmtSRef& parent_sref); /*! * \brief Get the producers of a specific block * \param self The schedule state * \param block_sref The block in the query * \return A list of blocks, the producers of the given block */ Array<StmtSRef> GetProducers(const ScheduleState& self, const StmtSRef& block_sref); /*! * \brief Get the consumers of a specific block * \param self The schedule state * \param block_rv The block in the query * \return A list of blocks, the consumers of the given block */ Array<StmtSRef> GetConsumers(const ScheduleState& self, const StmtSRef& block_sref); /******** Schedule: Transform loops ********/ /*! * Split a loop into a list of consecutive loops. It requires: * 1) The loop can't have annotation or thread binding. * 2) The loop must start with 0. * \param self The state of the schedule * \param loop_sref The sref to the loop being split * \param factors The splitting factors * \param preserve_unit_iters Whether or not to preserve unit iterators in block bindings * \return An array of srefs to the loops after splitting */ TVM_DLL Array<StmtSRef> Split(ScheduleState self, const StmtSRef& loop_sref, const Array<PrimExpr>& factors, bool preserve_unit_iters); /*! * \brief Fuse a list of consecutive loops into one. It requires: * 1) The loops can't have annotations or thread bindings. * 2) The inner loop must be the only child of the outer loop. * 3) All loops must start with 0. * 4) The domain of a loop to be fused cannot depend on another loop to be fused. * \param self The state of the schedule * \param loop_srefs An array of srefs to the loops to be fused * \param preserve_unit_iters Whether or not to preserve unit iterators in block bindings * \return The sref to the fused loop */ TVM_DLL StmtSRef Fuse(ScheduleState self, const Array<StmtSRef>& loop_srefs, bool preserve_unit_loops); /*! * \brief Reorder a list of loops. It doesn't require the loops to be consecutive. * It requires: * 1) The loops are in the same chain. That means: the loops can be ordered to [l_1, l_2, ... , * l_n] where l_i is an ancestor of l_{i+1} and there are only single-branch loops between * l_1 and l_n (which also indicates they are under the same scope). * 2) After reordering, the domain of an outer loop cannot depend on any of the inner loops. * 3) For every block under the loop nests, its block binding must be affine, and the block * variables must be either data parallel or reduction. * 4) No duplicated loops are allowed in the arguments. * \param self The state of the schedule * \param ordered_loop_srefs An array of srefs which indicates the new order of loops */ TVM_DLL void Reorder(ScheduleState self, const Array<StmtSRef>& ordered_loop_srefs); /*! * \brief Create a new unit loop on top of the specific block or loop. * \param sref The block/loop above which the new thread_binding loop is created * \param extent The extent of the new thread_binding loop * \param thread_axis The thread axis of the new thread_binding loop * \param attrs Extra loop attributes * \return The new thread_binding loop */ TVM_DLL StmtSRef AddUnitLoop(ScheduleState self, StmtSRef sref); /******** Schedule: Manipulate ForKind ********/ /*! * \brief Parallelize the input loop. It requires: * 1) The scope block that the loop is in should have stage-pipeline property * 2) All the blocks under the loop are complete blocks or reduction blocks, and have affine * bindings * 3) For each block under the loop, the loop can only be contained in data-parallel block iters' * bindings * \param self The state of the schedule * \param loop_sref The sref of the loop to be parallelized */ TVM_DLL void Parallel(ScheduleState self, const StmtSRef& loop_sref); /*! * \brief Vectorize the input loop. It requires: * 1) The scope block that the loop is in should have stage-pipeline property * 2) All the blocks under the loop are complete blocks or reduction blocks, and have affine * bindings * 3) For each block under the loop, the loop can only be contained in data-parallel block iters' * bindings * \param self The state of the schedule * \param loop_sref The sref of the loop to be vectorized */ TVM_DLL void Vectorize(ScheduleState self, const StmtSRef& loop_sref); /*! * \brief Bind the input loop to the given thread axis. It requires: * 1) The scope block that the loop is in should have stage-pipeline property * 2) All the blocks under the loop are complete blocks or reduction blocks, and have affine * bindings * 3) For each block under the loop, if the thread axis starts with "threadIdx`, the loop can only * be contained in data-parallel block iter and reduction block iters' bindings. Otherwise the * loop can only be contained in data-parallel block iters' bindings * \param self The state of the schedule * \param loop_sref The sref of the loop to be bound to the thread axis * \param thread_axis The thread axis to be bound to the loop */ TVM_DLL void Bind(ScheduleState self, const StmtSRef& loop_sref, const IterVar& thread_axis); /*! * \brief Unroll the input loop. It requires nothing * \param self The state of the schedule * \param loop_sref The loop to be unrolled */ TVM_DLL void Unroll(ScheduleState self, const StmtSRef& loop_sref); /******** Schedule: Insert cache stages ********/ /*! * \brief Create a block that reads a buffer region into a read cache. It requires: * 1) There is at most one block who writes the buffer in the scope. * 2) The scope block have stage-pipeline property. * \param self The state of the schedule * \param block_sref The consumer block of the target buffer. * \param read_buffer_index The index of the buffer in block's read region. * \param storage_scope The target storage scope. * \param consumer_blocks Array of blocks that consume the cache. * \return The cache stage block. */ TVM_DLL StmtSRef CacheRead(ScheduleState self, const StmtSRef& block_sref, int read_buffer_index, const String& storage_scope, const Array<StmtSRef> consumer_blocks = {}); /*! * \brief Create a block that writes a buffer region into a write cache. It requires: * 1) There is only one block that writes the target buffer. * 2) The scope block have stage-pipeline property. * \param self The state of the schedule * \param block_sref The producer of the buffer * \param write_buffer_index The index of the buffer in block's write region * \param storage_scope The target storage scope * \return The cache stage block. */ TVM_DLL StmtSRef CacheWrite(ScheduleState self, const StmtSRef& block_sref, int write_buffer_index, const String& storage_scope); /*! *! * \brief Create 2 blocks that read&write a buffer region into a read/write cache. * It requires the the target block both read & write the target buffer. * \param self The state of the schedule * \param block_sref The target block operates on the target buffer. * \param read_buffer_index The index of the buffer in block's read region. * \param storage_scope The target storage scope * \return The cache stage blocks, cache read block together with cache write block. */ TVM_DLL Array<StmtSRef> CacheInplace(ScheduleState self, const StmtSRef& block_sref, int read_buffer_index, const String& storage_scope); /*! * \brief Create a block to cache precomputed index for later use. * if there is no index computation, keep unchanged. * \param block_sref The target block * \param buffer_index The index of the target buffer in block's read region, * \return The cache stage block. */ TVM_DLL Array<StmtSRef> CacheIndex(ScheduleState self, const StmtSRef& block_sref, int buffer_index); /*! *! * \brief Create a block that read/write a buffer region into a read/write cache with reindexing. * The layout of the cache will be the same as by the iterators of the block that reads/writes the * buffer. It requires: * 1) There is only one block who reads/writes the target buffer * 2) There is only one buffer load/store of this buffer in the block * \param self The state of the schedule * \param block_sref The block operates on the target buffer. * \param buffer_index The index of the buffer in block's read or write region. * \param buffer_index_type The type of the buffer index, kRead or kWrite. * \return The reindex stage block. */ TVM_DLL StmtSRef ReIndex(ScheduleState self, const StmtSRef& block_sref, int buffer_index, BufferIndexType buffer_index_type); /******** Schedule: Compute location ********/ /*! * \brief Move a producer block under the specific loop, and regenerate the * loops induced by the block so that the buffer region produced by the producer block could * cover those regions consumed by its consumer blocks under the given loop. It requires: * 1) `block` and `loop` are under the same scope, `loop` is not the ancestor of `block` * 2) The scope block has stage-pipeline property * 3) The subtree of the scope block, where the given block is in, satisfies the compact dataflow * condition. i.e. all the blocks in the scope block's subtree must be either complete block or * reduction block * 4) The block is not an output block with regard to the scope block, i.e. the buffers written by * the block are allocated under the scope block * 5) All the consumers of the block are under the given loop * * \param self The schedule state * \param block_sref The block to be moved * \param loop_sref The loop where the block to be moved to * \param index The block index of the loop body subtree blocks: * - `index = -1` means inserted into the last possible insertion point; * - `index = -2` means inserted into the first possible insertion point; * - Otherwise, `index` is a nonnegative number that indicates the insertion point */ TVM_DLL void ComputeAt(ScheduleState self, const StmtSRef& block_sref, const StmtSRef& loop_sref, bool preserve_unit_loops, int index = -1); /*! * \brief Move a consumer block under the specific loop, and regenerate the * loops induced by the block so that the buffer region consumed by the consumer block could * cover those regions produced by its producer blocks under the given loop. It requires: * 1) `block` and `loop` are under the same scope, `loop` is not the ancestor of `block` * 2) The scope block has stage-pipeline property * 3) The subtree of the scope block, where the given block is in, satisfies the compact dataflow * condition. i.e. all the blocks in the scope block's subtree must be either complete block or * reduction block * 4) All the producers of the block are under the given loop * * \param self The schedule state * \param block_sref The block to be moved * \param loop_sref The loop where the block to be moved to * \param preserve_unit_loops Whether to keep the trivial loops whose extents are 1 * \param index The block index of the loop body subtree blocks: * - `index = -1` means inserted into the last possible insertion point; * - `index = -2` means inserted into the first possible insertion point; * - Otherwise, `index` is a nonnegative number that indicates the insertion point */ TVM_DLL void ReverseComputeAt(ScheduleState self, const StmtSRef& block_sref, const StmtSRef& loop_sref, bool preserve_unit_loops, int index = -1); /*! * \brief Inline a block into its consumer(s). It requires: * 1) The block is a complete non-root block, which only produces one buffer * 2) The block must not be the only leaf in the scope. * 3) The body of the block must be a BufferStore statement in the form of, * A[i, j, k, ...] = ... * where the indices of the LHS are all distinct atomic variables, * and no variables other than those indexing variables are allowed in the statement. * \param self The state of the schedule * \param block_sref The sref to the block to be inlined to its consumer(s) */ TVM_DLL void ComputeInline(ScheduleState self, const StmtSRef& block_sref); /*! * \brief Inline a block into its only producer. It requires: * 1) The block is a complete non-root block, which only produces and consumers one buffer * 2) The block must not be the only leaf in the scope. * 3) The only producer of the block is a read-after-write producer and a complete non-root block * 4) The body of the block must be a BufferStore statement in the form of, * B[f(i, j, k, ...)] = g(i, j, k, A[i, j, k, ...] ...) * where the indices of each `BufferLoad` on the RHS are all distinct atomic variables, * and no variables other than those indexing variables are allowed in the statement. * \param self The state of the schedule * \param block_sref The sref to the block to be inlined to its producer */ TVM_DLL void ReverseComputeInline(ScheduleState self, const StmtSRef& block_sref); /******** Schedule: Reduction ********/ /*! * \brief Decompose a reduction block into two separate blocks. * a) The init block, which is translated from the init statement of the reduction block; * b) The update block, which is the original block without init statement. * * The init block is inserted right before the given loop. * * The schedule primitive requires: * 1) The input block is a reduction block. * 2) The input loop is the ancestor of the block. * 3) The input loop is not lower than all the loops related to reduce block var. * \param block_rv The reduction block to be decomposed * \param loop_rv The loop above which the init block is inserted before. * \return The init block */ TVM_DLL StmtSRef DecomposeReduction(ScheduleState self, const StmtSRef& block_sref, const StmtSRef& loop_sref); /*! * \brief Factor a reduction block by the specified loop * \details See python/tvm/tir/schedule/schedule.py * \param self The state of the schedule * \param loop_sref The loop outside block for which we want to do rfactor * \param factor_axis The position where the new dimension is placed in the new introduced rfactor * buffer. Suppose the original reduction block writes to buffer `B` with * ndim(B) dimensions, then `factor_axis` should be in range `[-ndim(B) - 1, * ndim(B)]`, and the negative index will be normalized to a non-negative one * \return The sref of the rfactor block */ TVM_DLL StmtSRef RFactor(ScheduleState self, const StmtSRef& loop_sref, int factor_axis); /******** Schedule: Block annotation ********/ /*! \brief The quad used by StorageAlign for (buffer_idx, axis, factor, offset) */ using StorageAlignTuple = Array<Integer>; /*! \brief A list of StorageAlignTuple, used by StorageAlign */ using StorageAlignAnnotation = Array<StorageAlignTuple>; /*! * \brief Set alignment requirement for specific dimension such that * stride[axis] == k * factor + offset for some k. This is useful to set memory layout for * more friendly memory access pattern. For example, we can set alignment to be factor=2, * offset=1 to avoid bank conflict for thread access on higher dimension in GPU shared * memory. * \param self The state of the schedule * \param block_sref The producer block of the buffer * \param buffer_index The index of the buffer in block's write region * \param axis The dimension to be specified for alignment * \param factor The factor multiple of alignment * \param offset The required offset factor */ TVM_DLL void StorageAlign(ScheduleState self, const StmtSRef& block_sref, int buffer_index, int axis, int factor, int offset); /*! * \brief Set the storage scope of a buffer, where the buffer is specified by the a block and a * write-index * \param self The state of the schedule * \param block_sref The sref of the producer block of the buffer * \param buffer_index The index of the buffer in block's write region * \param storage_scope The storage scope to be set */ TVM_DLL void SetScope(ScheduleState self, const StmtSRef& block_sref, int buffer_index, const String& storage_scope); /*! * \brief Set the axis separator of a buffer, where the buffer is specified by a block and a read * or write index * \param block_rv The block that accesses the target buffer. * \param buffer_index The index of the buffer in block's read or write region. * \param buffer_index_type The type of the buffer index, kRead or kWrite. * \param axis_separators The axis separator of the buffer */ TVM_DLL void SetAxisSeparator(ScheduleState self, const StmtSRef& block_sref, int buffer_index, BufferIndexType buffer_index_type, const Array<IntImm>& axis_separators); /******** Schedule: Blockize & Tensorize ********/ /*! * \brief Convert the subtree rooted at a specific loop into a block. * \param self The state of the schedule * \param loop_sref The root of the subtree * \return The new block */ TVM_DLL StmtSRef Blockize(ScheduleState self, const StmtSRef& loop_sref); /*! * \brief Tensorize the computation enclosed by loop with the tensor intrinsic. * \param self The state of the schedule * \param block_or_loop_sref The block or loop to be tensorized. * \param intrin The tensor intrinsic. */ TVM_DLL void Tensorize(ScheduleState self, const StmtSRef& block_or_loop_sref, const TensorIntrin& intrin); /******** Schedule: Annotation ********/ /*! * \brief Annotate a block/loop with a key value pair * \param self The state of the schedule * \param sref The block/loop sref to be annotated * \param ann_key The annotation key * \param ann_val The annotation value */ TVM_DLL void Annotate(ScheduleState self, const StmtSRef& sref, const String& ann_key, const ObjectRef& ann_val); /*! * \brief Unannotate a block/loop's annotation with key ann_key * \param self The state of the schedule * \param sref The block/loop to be unannotated * \param ann_key The annotation key */ TVM_DLL void Unannotate(ScheduleState self, const StmtSRef& sref, const String& ann_key); /******** Schedule: Layout transformation ********/ /*! * \brief Apply a transformation represented by IndexMap to buffer * \details The indices and the access region to the target buffer is transformed by the given * index_map. The index_map is also used to infer the new shape of the buffer. Buffer must be * one of the parameter of the function, or allocated in some blocks (it cannot be a buffer * subregion created via match_buffer). * \param self The state of the schedule * \param block_sref The block sref that accesses the target buffer. * \param buffer_index The index of the buffer in block's read or write region. * \param buffer_index_type The type of the buffer index, kRead or kWrite. * \param index_map The transformation to apply. * \param pad_value The value to write into padding introduced by the transformation. */ TVM_DLL void TransformLayout(ScheduleState self, const StmtSRef& block_sref, int buffer_index, BufferIndexType buffer_index_type, const IndexMap& index_map, const Optional<IndexMap>& pad_value); /*! * \brief Apply a transformation represented by IndexMap to block * \details The block iters and the block body are transformed by the given index_map. * Outer loops corresponding to each new block iter are regenerated. * The index_map is required to be bijective affine since we need its inverse mapping. * \param self The state of the schedule * \param block_sref The block sref that refers to the block to be transformed * \param index_map The transformation to apply. */ TVM_DLL void TransformBlockLayout(ScheduleState self, const StmtSRef& block_sref, const IndexMap& index_map); /******** Schedule: Padding ********/ /*! * \brief Decompose a padding block into a block filling const pad values and a block * writing in-bound values. * \param block_sref The block sref that match the padding pattern. * \param loop_sref The loop above which the const filling block is inserted before. * \return The padding value filling block sref. */ TVM_DLL StmtSRef DecomposePadding(ScheduleState self, const StmtSRef& block_sref, const StmtSRef& loop_sref); /*! * \brief Pad the computation of Einsum. * \param self The state of the schedule * \param block_sref The block sref that matches the Einsum pattern. * \param padding The padding for each block iter. */ TVM_DLL void PadEinsum(ScheduleState self, const StmtSRef& block_sref, const Array<Integer>& padding); /******** Schedule: Buffer transformation ********/ /*! * \brief Compute the target buffer via rolling buffering. * \details This primitive selects the outermost rollable axis with a positive bound overlap that * appears in the block's ancestor loops as `rolling axis`, fold and circularize the buffer along * the rolling dimension, append block predicate to avoid recomputing overlapping elements. * It requires: * 1) The buffer to be an intermediate buffer defined via `alloc_buffer`. * 2) The LCA of the producer and consumer of the buffer is a for loop, typically, * the producer and consumer of the buffer are cascaded through compute_at. * 3) The access region of the buffer has at least one dimension that contains * a positive bound overlap. * \param block_rv The producer block of the buffer. * \param write_buffer_index The index of the buffer in block's write region. */ TVM_DLL void RollingBuffer(ScheduleState self, const StmtSRef& block_sref, int write_buffer_index); /******** Schedule: Misc ********/ } // namespace tir } // namespace tvm #endif // TVM_TIR_SCHEDULE_PRIMITIVE_H_
https://github.com/zk-ml/tachikoma
src/tir/schedule/traced_schedule.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_TIR_SCHEDULE_TRACED_SCHEDULE_H_ #define TVM_TIR_SCHEDULE_TRACED_SCHEDULE_H_ #include "./concrete_schedule.h" namespace tvm { namespace tir { class TracedScheduleNode : public ConcreteScheduleNode { friend class Schedule; protected: Trace trace_; public: void VisitAttrs(tvm::AttrVisitor* v) { // `state_` is not visited // `error_render_level_` is not visited // `symbol_table_` is not visited // `analyzer_` is not visitied // `trace_` is not visited } ~TracedScheduleNode() = default; public: Optional<Trace> trace() const final { return trace_; } Schedule Copy() final; public: /******** Schedule: Sampling ********/ ExprRV SampleCategorical(const Array<Integer>& candidates, const Array<FloatImm>& probs, Optional<Integer> decision = NullOpt) final; Array<ExprRV> SamplePerfectTile(const LoopRV& loop_rv, int n, int max_innermost_factor, Optional<Array<Integer>> decision = NullOpt) final; LoopRV SampleComputeLocation(const BlockRV& block_rv, Optional<Integer> decision = NullOpt) final; /******** Schedule: Get blocks & loops ********/ BlockRV GetBlock(const String& name, const Optional<String>& func_name) final; Array<LoopRV> GetLoops(const BlockRV& block_rv) final; Array<BlockRV> GetChildBlocks(const BlockRV& block_rv) final; Array<BlockRV> GetChildBlocks(const LoopRV& loop_rv) final; Array<BlockRV> GetProducers(const BlockRV& block_rv) final; Array<BlockRV> GetConsumers(const BlockRV& block_rv) final; /******** Schedule: Transform loops ********/ LoopRV Fuse(const Array<LoopRV>& loop_rvs, bool preserve_unit_iters) final; Array<LoopRV> Split(const LoopRV& loop_rv, const Array<Optional<ExprRV>>& factor_rvs, bool preserve_unit_iters) final; void Reorder(const Array<LoopRV>& ordered_loop_rvs) final; LoopRV AddUnitLoop(const BlockRV& block_rv) final; LoopRV AddUnitLoop(const LoopRV& loop_rv) final; /******** Schedule: Manipulate ForKind ********/ void Parallel(const LoopRV& loop_rv) final; void Vectorize(const LoopRV& loop_rv) final; void Bind(const LoopRV& loop_rv, const String& thread_axis) final; void Unroll(const LoopRV& loop_rv) final; /******** Schedule: Insert cache stages ********/ BlockRV CacheRead(const BlockRV& block_rv, int read_buffer_index, const String& storage_scope, const Array<BlockRV> consumer_blocks = {}) final; BlockRV CacheWrite(const BlockRV& block_rv, int write_buffer_index, const String& storage_scope) final; Array<BlockRV> CacheInplace(const BlockRV& block_rv, int read_buffer_index, const String& storage_scope) final; BlockRV ReIndex(const BlockRV& block_rv, int buffer_index, BufferIndexType buffer_index_type) final; Array<BlockRV> CacheIndex(const BlockRV& block_rv, int buffer_index) final; /******** Schedule: Compute location ********/ void ComputeAt(const BlockRV& block_rv, const LoopRV& loop_rv, bool preserve_unit_loops, int index = -1) final; void ReverseComputeAt(const BlockRV& block_rv, const LoopRV& loop_rv, bool preserve_unit_loops, int index = -1) final; void ComputeInline(const BlockRV& block_rv) final; void ReverseComputeInline(const BlockRV& block_rv) final; /******** Schedule: Reduction ********/ BlockRV DecomposeReduction(const BlockRV& block_rv, const LoopRV& loop_rv) final; BlockRV RFactor(const LoopRV& loop_rv, int factor_axis) final; /******** Schedule: Block annotation ********/ void StorageAlign(const BlockRV& block_rv, int buffer_index, int axis, int factor, int offset) final; void SetScope(const BlockRV& block_rv, int buffer_index, const String& storage_scope) final; /******** Schedule: Blockize & Tensorize ********/ BlockRV Blockize(const LoopRV& loop_rv) final; void Tensorize(const BlockRV& block_rv, const String& intrin) final; void Tensorize(const LoopRV& loop_rv, const String& intrin) final; /******** Schedule: Annotation ********/ void Annotate(const LoopRV& loop_rv, const String& ann_key, const ObjectRef& ann_val) override; void Unannotate(const LoopRV& loop_rv, const String& ann_key) override; void Annotate(const BlockRV& block_rv, const String& ann_key, const ObjectRef& ann_val) override; void Unannotate(const BlockRV& block_rv, const String& ann_key) override; /******** Schedule: Layout transformation ********/ void TransformLayout(const BlockRV& block_rv, int buffer_index, BufferIndexType buffer_index_type, const IndexMap& index_map, const Optional<IndexMap>& pad_value) override; void TransformBlockLayout(const BlockRV& block_rv, const IndexMap& index_map) override; void SetAxisSeparator(const BlockRV& block_rv, int buffer_index, BufferIndexType buffer_index_type, const Array<IntImm>& axis_separators) final; /******** Schedule: Padding ********/ BlockRV DecomposePadding(const BlockRV& block_rv, const LoopRV& loop_rv) final; void PadEinsum(const BlockRV& block_rv, const Array<Integer>& padding) final; /******** Schedule: Buffer transformation ********/ void RollingBuffer(const BlockRV& block_rv, int write_buffer_index) final; /******** Schedule: Misc ********/ void EnterPostproc() final; }; } // namespace tir } // namespace tvm #endif // TVM_TIR_SCHEDULE_TRACED_SCHEDULE_H_
https://github.com/zk-ml/tachikoma
src/tir/schedule/transform.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_TIR_SCHEDULE_TRANSFORM_H_ #define TVM_TIR_SCHEDULE_TRANSFORM_H_ #include <tvm/tir/schedule/schedule.h> #include <tvm/tir/schedule/state.h> #include <tvm/tir/stmt_functor.h> #include <unordered_map> #include <utility> #include "../../arith/ir_mutator_with_analyzer.h" #include "../ir/functor_common.h" namespace tvm { namespace tir { /******** Annotation ********/ /*! * \brief Create a new block with the given annotation added * \param block The block with original annotation * \param attr_key The annotation key to be added * \param attr_value The annotation value to be added * \return A new block with the given annotation as its last annotation */ Block WithAnnotation(const BlockNode* block, const String& attr_key, const ObjectRef& attr_value); /******** Buffer Related ********/ /*! * \brief Create a new buffer by changing the storage scope. * \param buffer The given buffer. * \param scope The target storage scope. * \return The new buffer with target storage scope. */ Buffer WithScope(const Buffer& buffer, const String& scope); /*! * \brief Replaces the buffer within the specific sequence of regions * \param regions The regions whose buffers are to be replaced * \param source The buffer to be replaced * \param target The buffer to be replaced to * \return The new sequence of regions after replacement */ Array<BufferRegion> ReplaceBuffer(Array<BufferRegion> regions, const Buffer& source, const Buffer& target); /*! * \brief Replaces the buffer within the specific sequence of match_buffers * \param match_buffers The match_buffers whose buffers are to be replaced * \param source The buffer to be replaced * \param target The buffer to be replaced to * \return The new sequence of match_buffers after replacement */ Array<MatchBufferRegion> ReplaceBuffer(Array<MatchBufferRegion> match_buffers, const Buffer& source, const Buffer& target); /*! * \brief Replaces the buffer region within the specific sequence of regions * \param regions The regions to be replaced * \param source_buffer The buffer to whose region is to be replaced * \param target The buffer region to be replaced to * \return The new sequence of regions after replacement */ Array<BufferRegion> ReplaceBufferRegion(Array<BufferRegion> regions, const Buffer& source_buffer, const BufferRegion& target); /*! * \brief Replaces the buffer region within the specific sequence of match_buffers * \param regions The match_buffers to be replaced * \param source_buffer The buffer to whose region is to be replaced * \param target The buffer region to be replaced to * \return The new sequence of match_buffers after replacement */ Array<MatchBufferRegion> ReplaceBufferRegion(Array<MatchBufferRegion> match_buffers, const Buffer& source_buffer, const BufferRegion& target); /*! * \brief A helper mutator which recursively replaces the old buffer with the new buffer and * collects the block sref reuse information for the following replacement. * * If the buffer to be replaced in used as the source in `match_buffers`, depending the specific * use cases, the target buffers in `match_buffers` may also need to be mutated. In this * case, this class should be subclassed to explicitly handle `match_buffers`. */ class ReplaceBufferMutator : public StmtExprMutator { public: /*! * \brief The constructor * \param old_buffer The old buffer * \param new_buffer The new buffer * \param block_sref_reuse Optional map to record mapping between old and new blocks that reuse * sref. */ ReplaceBufferMutator(const Buffer& old_buffer, Buffer new_buffer, Map<Block, Block>* block_sref_reuse); ReplaceBufferMutator(const Map<Buffer, Buffer>& buffer_map, Map<Block, Block>* block_sref_reuse); protected: using StmtExprMutator::VisitExpr_; using StmtExprMutator::VisitStmt_; PrimExpr VisitExpr_(const VarNode* var) final; template <typename Node> Node VisitBufferAccess(Node node) { auto it = buffer_var_map_.find(node->buffer->data.get()); if (it != buffer_var_map_.end()) { node.CopyOnWrite()->buffer = it->second; } return node; } Stmt VisitStmt_(const BufferStoreNode* op) final; PrimExpr VisitExpr_(const BufferLoadNode* op) final; virtual MatchBufferRegion VisitMatchBufferRegion(const MatchBufferRegion& match_buffer); Stmt VisitStmt_(const BlockNode* block) override; /*! * \brief A mapping which maps old buffer vars to new buffers, including the buffers defined in * MatchBufferRegion. */ std::unordered_map<const VarNode*, Buffer> buffer_var_map_; /*! \brief The block sref reuse map for the following replacement */ Map<Block, Block>* block_sref_reuse_; }; /******** Block Removal ********/ /*! * \brief Construct a new AST, with a specific sref tree leaf removed. * The leaf's ancestors who have only a single child will be removed too. * \param leaf_block_sref The block/loop sref to the sref tree leaf to be removed * \param src_stmt The root of the subtree where the replacement begins * \param tgt_stmt The root of the subtree after the replacement * \return A boolean indicating if the leaf can be removed successfully * \note Read before use: * 1) Removal is not conducted beyond scope-level. * 2) This method only works properly when the scope root is a stage pipeline. * * An example of the removal plan, say we are removing the leaf block "B" from the AST. * * \code * with block([], "scope_root"): * ... * with block([128, 128], "B") as [vi, vj]: * B[vi, vj] = A[vi, vj] + 1.0 * with block([128, 128], "C") as [vi, vj]: * C[vi, vj] = B[vi, vj] * 2.0 * \endcode * * Ths method does not mutate the AST, instead it returns the a `(src_stmt, tgt_stmt)` pair as a * plan to substitute certain pieces of the IR. * * In our example, it returns block "scope_root" as `src_stmt`, and the result `tgt_stmt` is: * * \code * with block([], "scope_root"): * ... * with block([128, 128], "C") as [vi, vj]: * C[vi, vj] = B[vi, vj] * 2.0 * \endcode */ void LeafBlockRemovalPlan(const ScheduleState& self, const StmtSRef& leaf_block_sref, Stmt* src_stmt, Stmt* tgt_stmt); /*! * \brief Tile a subset of loops in the block according to the given tensor intrinsic. * \param self The schedule to which tiling is applied * \param block_rv The block whose subset of loops will be tiled * \param intrin_name The name of a tensor intrinsic, must be registerd via * TensorIntrin.register(...) beforehand * \param allow_padding Whether to allow padding when tiling * \return LoopRV corresponding to the outermost loop of a * block tiled according to the given intrin, NullOpt if a valid loop mapping is not found */ Optional<tir::LoopRV> TileWithTensorIntrin(const tir::Schedule& sch, const tir::BlockRV& block_rv, const String& intrin_name, bool allow_padding = false); /******** Block mutation ********/ /*! * \brief Simplifier for indices of buffer access and block buffer access regions. */ class BlockBufferAccessSimplifier : public arith::IRMutatorWithAnalyzer { public: /*! * \brief Simplify indices of buffer access and block buffer access regions in the statement * \param stmt The statement to be simplified * \param analyzer The arithmetic analyzer * \return The simplified statement */ static Stmt Simplify(const Stmt& stmt, arith::Analyzer* analyzer) { BlockBufferAccessSimplifier simplifier(analyzer); return simplifier(stmt); } private: explicit BlockBufferAccessSimplifier(arith::Analyzer* analyzer) : IRMutatorWithAnalyzer(analyzer) {} using IRMutatorWithAnalyzer::VisitExpr_; using IRMutatorWithAnalyzer::VisitStmt_; void SimplifyAccessRegion(Array<BufferRegion>* old_access_regions); void SimplifyBufferIndices(Array<PrimExpr>* indices); Stmt VisitStmt_(const BlockNode* op) final; Stmt VisitStmt_(const BufferStoreNode* op) final; PrimExpr VisitExpr_(const BufferLoadNode* op) final; }; } // namespace tir } // namespace tvm #endif // TVM_TIR_SCHEDULE_TRANSFORM_H_
https://github.com/zk-ml/tachikoma
src/tir/schedule/utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_TIR_SCHEDULE_UTILS_H_ #define TVM_TIR_SCHEDULE_UTILS_H_ #include <tvm/arith/analyzer.h> #include <tvm/arith/int_set.h> #include <tvm/arith/iter_affine_map.h> #include <tvm/tir/analysis.h> #include <tvm/tir/function.h> #include <tvm/tir/op.h> #include <tvm/tir/schedule/instruction.h> #include <tvm/tir/schedule/schedule.h> #include <tvm/tir/schedule/state.h> #include <tvm/tir/schedule/trace.h> #include <tvm/tir/stmt_functor.h> #include <string> #include <unordered_map> #include <unordered_set> #include <utility> #include "../../arith/pattern_match.h" #include "../../node/attr_registry.h" #include "../../printer/text_printer.h" #include "../../runtime/thread_storage_scope.h" #include "../../support/array.h" #include "../../support/nd_int_set.h" #include "./analysis.h" #include "./error.h" #include "./instruction_traits.h" #include "./primitive.h" #include "./transform.h" namespace tvm { namespace tir { /*! * \brief A helper macro to convert an sref to the statement it points to, * then check if the downcasting succeeded. * \param Result The result variable, used for checking * \param SRef The SRef to be cast * \param Type The type to be cast to, can be Block or For */ #define TVM_SREF_AS_OR_ERR(Result, SRef, Type) \ SRef->StmtAs<Type>(); \ ICHECK(Result) /*! * \brief A helper macro to convert an sref to the block it points to, * * Throws an internal error if downcasting fails. The variable name * in the parent scope is used for the error message. * * \param SRef The SRef to be cast */ #define TVM_SREF_TO_BLOCK(SRef) \ [&]() { \ auto result = TVM_SREF_AS_OR_ERR(result, (SRef), ::tvm::tir::BlockNode) \ << "TypeError: Expects StmtSRef `" << #SRef << "` points to `Block`, but gets: " \ << ((SRef)->stmt ? (SRef)->stmt->GetTypeKey() : "None"); \ return result; \ }() /*! * \brief A helper macro to convert an sref to the for-loop it points to * * Throws an internal error if downcasting fails. The variable name * in the parent scope is used for the error message. * * \param SRef The SRef to be cast */ #define TVM_SREF_TO_FOR(SRef) \ [&]() { \ auto result = TVM_SREF_AS_OR_ERR(result, (SRef), ::tvm::tir::ForNode) \ << "TypeError: Expects StmtSRef `" << #SRef << "` points to `Loop`, but gets: " \ << ((SRef)->stmt ? (SRef)->stmt->GetTypeKey() : "None"); \ return result; \ }() /*! * \brief Downcast a TVM ObjectRef to its corresponding container using `ObjectRef::as<Type>`, * then check if the downcasting succeeded. * \param Result The result variable, used for checking * \param From The ObjectRef to be downcast * \param Type The type to be downcast to */ #define TVM_TYPE_AS_OR_ERR(Result, From, Type) \ From.as<Type>(); \ ICHECK(Result) /*! * \brief Downcast a TVM ObjectRef to its corresponding container using `ObjectRef::as<Type>`, * throwing an internal error if downcast fails. * \param Result The result variable, used for checking * \param From The ObjectRef to be downcast * \param Type The type to be downcast to */ #define TVM_TYPE_AS(From, Type) \ [&]() { \ auto result = TVM_TYPE_AS_OR_ERR(result, (From), Type) \ << "TypeError: Expects `" << #From << "` to have type `" << Type::_type_key \ << "`, but gets: " << ((From).defined() ? (From)->GetTypeKey() : "None"); \ return result; \ }() /*! * \brief Convert an array of loop StmtSRefs to an array of loops * \param loop_srefs The loop StmtSRefs to be converted * \return The conversion result loops */ inline Array<For> LoopSRefs2Loops(const Array<StmtSRef>& loop_srefs) { Array<For> loops; loops.reserve(loop_srefs.size()); for (StmtSRef loop_sref : loop_srefs) { const ForNode* loop = TVM_SREF_TO_FOR(loop_sref); loops.push_back(GetRef<For>(loop)); } return loops; } /*! * \brief Convert an array of block rvs to an array of block StmtSRefs * \param sch The schedule used to evaluate the random variables * \param block_rvs The random variables to be converted * \return The conversion result srefs */ inline Array<StmtSRef> BlockRVs2StmtSRefs(const Schedule& sch, const Array<BlockRV>& block_rvs) { Array<StmtSRef> block_srefs; block_srefs.reserve(block_rvs.size()); for (const BlockRV& block_rv : block_rvs) { block_srefs.push_back(sch->GetSRef(block_rv)); } return block_srefs; } /******** Storage scope ********/ /*! * \brief Determine if iterators of a storage scope should be relaxed * under a specific thread scope * \param storage_scope The storage scope that the iterators are on * \param thread_scope The thread scope to be relaxed * \return A boolean indicating the result */ inline bool CanRelaxStorageUnderThread(const runtime::StorageScope& storage_scope, const runtime::ThreadScope& thread_scope) { if (storage_scope.rank == runtime::StorageRank::kWarp) { // for warp memory, we only relax threadIdx.x return thread_scope.rank == 1 && thread_scope.dim_index == 0; } return static_cast<int>(storage_scope.rank) <= static_cast<int>(thread_scope.rank); } /******** SeqStmt ********/ /*! * \brief Remove a specific Stmt from a SeqStmt. If a SeqStmt contains a BlockRealize, * whose block is the Stmt to be removed, then remove that BlockRealize too. * \param seq The SeqStmt to be removed from * \param to_remove The Stmt to be removed * \return The removal result */ inline Stmt RemoveFromSeqStmt(const SeqStmt& seq, const Stmt& to_remove) { ICHECK_GT(seq->size(), 1); Array<Stmt> new_stmts; new_stmts.reserve(seq->size()); for (const Stmt& stmt : seq->seq) { if (to_remove.same_as(stmt)) { continue; } if (const auto* realize = stmt.as<BlockRealizeNode>()) { if (to_remove.same_as(realize->block)) { continue; } } new_stmts.push_back(stmt); } return SeqStmt::Flatten(new_stmts); } /*! * \brief Convert a Stmt to an Array. * \param stmt The Stmt to be converted to * \return If the Stmt is SeqStmt, then returns the sequence; * Otherwise, returns a single-element Array with the Stmt inside. */ inline Array<Stmt> AsArray(const Stmt& stmt) { if (const auto* seq_stmt = stmt.as<SeqStmtNode>()) { return seq_stmt->seq; } return {stmt}; } /*! * \brief Checks of a statement is a SeqStmt that contains multiple statements * \param stmt The statement to be checked * \return A boolean indicating the result */ inline bool IsSingleStmt(const Stmt& stmt) { if (const auto* seq_stmt = stmt.as<SeqStmtNode>()) { return seq_stmt->seq.size() == 1; } return true; } /******** IterVar ********/ /*! * \brief Create a new IterVar for the input For loop, with specified name and type * \param loop The loop to be created from * \param name The name of the new IterVar * \param iter_var_type The type of the new IterVar * \return The newly created IterVar */ inline IterVar IterVarFromLoop(const For& loop, String name, IterVarType iter_var_type) { return IterVar(Range::FromMinExtent(loop->min, loop->extent), Var(std::move(name), loop->loop_var.dtype()), iter_var_type); } /*! * \brief Get the thread scope bound to the specific loop * \param loop The loop to be inspected * \return The thread scope bound to the loop */ inline runtime::ThreadScope GetThreadScope(const ForNode* loop) { if (loop->kind == ForKind::kThreadBinding) { return runtime::ThreadScope::Create(loop->thread_binding.value()->thread_tag); } return runtime::ThreadScope{-1, -1}; } /*! * \brief Check if the thread scope is blockIdx * \param thread_scope The thread scope to be checked * \return True if the thread scope is blockIdx */ inline bool IsBlockIdx(const runtime::ThreadScope& thread_scope) { return thread_scope.rank == 0; // The rank of blockIdx is 0 } /*! * \brief Check if the thread scope is threadIdx * \param thread_scope The thread scope to be checked * \return True if the thread scope is threadIdx */ inline bool IsThreadIdx(const runtime::ThreadScope& thread_scope) { return thread_scope.rank == 1 && thread_scope.dim_index >= 0; } /**************** Loop extents ****************/ /*! * \brief Get the extents of a loop * \param loop The loop to be queried * \return The extent of the loop, nullptr if the extent is not constant */ inline const int64_t* GetLoopIntExtent(const ForNode* loop) { return as_const_int(loop->extent); } /*! * \brief Get the extents of a loop * \param loop_sref The loop to be queried * \return The extent of the loop, nullptr if the extent is not constant */ inline const int64_t* GetLoopIntExtent(const StmtSRef& loop_sref) { const ForNode* loop = TVM_SREF_TO_FOR(loop_sref); return as_const_int(loop->extent); } /*! * \brief Check if an expression consists of a single variable, * or a variable plus/minus an constant integer shift * \param expr The expression to be checked * \return The single variable in the expression, or NullOpt if the expression is neither a variable * or a constant shift from a variable */ inline Optional<Var> AnalyzeVarWithShift(const PrimExpr& expr, Optional<IntImm>* constant) { if (const auto* var = expr.as<VarNode>()) { *constant = NullOpt; return GetRef<Var>(var); } arith::PVar<Var> var; arith::PVar<IntImm> shift; // match: "var + shift" if ((var + shift).Match(expr) || (shift + var).Match(expr)) { *constant = shift.Eval(); return var.Eval(); } // match: "var - shift" if ((var - shift).Match(expr)) { IntImm result = shift.Eval(); *constant = IntImm(result->dtype, -result->value); return var.Eval(); } return NullOpt; } /******** Annotation ********/ /*! * \brief Get the annotation on a Block/For * \tparam TObjectRef The type of the annotation value * \param sref The sref to the block or the for loop * \param ann_key The annotation key to be looked up * \return NullOpt if not found; otherwise the annotation value */ template <class TObjectRef, class TStmtNode> inline Optional<TObjectRef> GetAnn(const TStmtNode* stmt, const String& ann_key) { const Map<String, ObjectRef>* annotations = &stmt->annotations; for (const auto& ann : *annotations) { if (ann.first == ann_key) { return Downcast<TObjectRef>(ann.second); } } return NullOpt; } /*! * \brief Get the annotation on a Block/For * \tparam TObjectRef The type of the annotation value * \param sref The sref to the block or the for loop * \param ann_key The annotation key to be looked up * \return NullOpt if not found; otherwise the annotation value */ template <class TObjectRef> inline Optional<TObjectRef> GetAnn(const StmtSRef& sref, const String& ann_key) { if (const auto* loop = sref->StmtAs<ForNode>()) { return GetAnn<TObjectRef, ForNode>(loop, ann_key); } else if (const auto* block = sref->StmtAs<BlockNode>()) { return GetAnn<TObjectRef, BlockNode>(block, ann_key); } else { LOG(FATAL) << "TypeError: Unknown type of sref: " << sref->stmt->GetTypeKey(); throw; } } /*! * \brief Check if a Block/For has a specific pair of annotation key and values * \param sref The sref to the block or the for loop * \param ann_key The annotation key to be checked * \param ann_val The annotation value to be checked * \return Whether a Block/For has a specific pair of annotation key and values */ inline bool HasAnn(const StmtSRef& sref, const String& ann_key, const String& ann_val) { Optional<String> result = GetAnn<String>(sref, ann_key); return result.defined() && result.value() == ann_val; } /*! * \brief Check if a Block/For has a specific pair of annotation key and values * \param sref The sref to the block or the for loop * \param ann_key The annotation key to be checked * \param ann_val The boolean annotation value to be checked * \return Whether a Block/For has a specific pair of annotation key and values */ inline bool HasAnn(const StmtSRef& sref, const String& ann_key, bool ann_val) { Optional<Bool> result = GetAnn<Bool>(sref, ann_key); return result.defined() && result.value() == ann_val; } /********** Helper Functions for RuleAddRFactor and RuleCrossThreadReduction **********/ /*! * \brief Reorder the reduction loops to innermost positions if needed. * \param sch The schedule * \param block_rv The block where to apply the reorder * \param fused_reduce_loop The fusion-generated loop to return. * \param num_spatial_loops The number of spatial loops to return. * \note Before invoking this helper function, make sure that the block has only spatial and * reduction loop axes. */ inline void ReorderAndFuseReductionLoops(const tir::Schedule& sch, const tir::BlockRV& block_rv, tir::LoopRV* fused_reduce_loop, size_t* num_spatial_loops) { Array<tir::LoopRV> loops = sch->GetLoops(block_rv); Array<tir::StmtSRef> loop_srefs; for (const tir::LoopRV& loop_rv : loops) { loop_srefs.push_back(sch->GetSRef(loop_rv)); } Array<tir::LoopRV> new_order; // Step 1. Add spatial loops. *num_spatial_loops = 0; for (size_t i = 0; i < loops.size(); ++i) { if (GetLoopIterType(loop_srefs[i]) == tir::kDataPar) { new_order.push_back(loops[i]); (*num_spatial_loops)++; } } // Step 2. Add reduction loops. Array<tir::LoopRV> reduction_loops; for (size_t i = 0; i < loops.size(); ++i) { if (GetLoopIterType(loop_srefs[i]) == tir::kCommReduce) { new_order.push_back(loops[i]); reduction_loops.push_back(loops[i]); } } // Step 3. Apply reordering if new_order differs from the original order. ICHECK_EQ(new_order.size(), loops.size()); for (size_t i = 0; i < loops.size(); ++i) { if (!new_order[i].same_as(loops[i])) { sch->Reorder(new_order); break; } } // Step 4. Fuse all the reduction loops if there are multiple reduction loops. CHECK(!reduction_loops.empty()) << "ValueError: There should be at least one reduction loop"; if (reduction_loops.size() > 1) { *fused_reduce_loop = sch->Fuse(reduction_loops); } else { *fused_reduce_loop = reduction_loops[0]; } } /******** Helper functions for enum conversion ********/ /*! * \brief Convert BufferIndexType to String * \param buffer_index_type The BufferIndexType value to convert * \return The string representation of BufferIndexType */ inline String BufferIndexType2Str(BufferIndexType buffer_index_type) { if (buffer_index_type == BufferIndexType::kRead) { return "read"; } else { ICHECK(buffer_index_type == BufferIndexType::kWrite); return "write"; } } /******** Utilities for retrieving information about blocks ********/ /*! \brief Returns the names of the blocks in the provided module. */ inline std::unordered_set<std::string> GetBlockNames(const IRModule& mod) { struct BlockNameCollector : public tir::StmtVisitor { void VisitStmt_(const tir::BlockNode* block) override { block_names.insert(block->name_hint); StmtVisitor::VisitStmt(block->body); } std::unordered_set<std::string> block_names; }; auto prim_func = tir::FindEntryFunc(mod, nullptr); BlockNameCollector collector; collector(prim_func->body); return collector.block_names; } /*! \brief Query if the given block name exists in the module associated with the schedule */ inline bool HasBlock(const Schedule& sch, const std::string& block_name) { auto block_names = GetBlockNames(sch->mod()); return block_names.count(block_name); } /******** Utilites for trace application ********/ /*! * \brief Translate the input objects using the provided substitution map. * \param inputs The input objects. * \param rv_map The substitution map for variables. * \return The transformed objects. */ Array<ObjectRef> TranslateInputRVs(const Array<ObjectRef>& inputs, const std::unordered_map<const Object*, const Object*>& rv_map); /*! * \brief Update the variable substitution map according to the new outputs. * \param old_outputs The previous outputs of a schedule instruction. * \param new_outputs The new outputs of the same schedule instruction. * \param rv_map The substitution map for variables. */ void TranslateAddOutputRVs(const Array<ObjectRef>& old_outputs, const Array<ObjectRef>& new_outputs, std::unordered_map<const Object*, const Object*>* rv_map); } // namespace tir } // namespace tvm #endif // TVM_TIR_SCHEDULE_UTILS_H_
https://github.com/zk-ml/tachikoma
src/tir/transforms/arg_binder.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file arg_binder.h * \brief Helper utility to match and bind arguments. */ #ifndef TVM_TIR_TRANSFORMS_ARG_BINDER_H_ #define TVM_TIR_TRANSFORMS_ARG_BINDER_H_ #include <tvm/arith/analyzer.h> #include <tvm/tir/buffer.h> #include <tvm/tir/expr.h> #include <string> #include <unordered_map> #include <vector> namespace tvm { namespace tir { /*! * \brief Helper utility to generate match and bind of arguments. * * \note There is many places in TVM IR where we need argument bindings. * * Consider a function f(tA(shape=var(n)), tB(shape=3), tC(shape=(n+2)). * Here n is a undefined variable that is decided by the outside, tB imposes * a constraint such that it can only take tensor with shape 3, tC imposes * another constraint that it's shape must equals n + 2. * So if we call it with f(bufferA, bufferB, bufferC), we need to generate * the following binding sequence: * - define n = bufferA.shape[0] * - assert bufferB.shape[0] == 3 * - assert bufferB.shape[1] == n + 3 * * In general, this is a constraint solving problem. We have simplified assumption * over the binding declaration, such that we require the variable occurred in * constraint must be declared in argument list. So it is illegal to have signature * f(tA(shape=(n+3))) without any argument variable corresponds to n, even though * it is already enough to derive n from the input argument. */ class ArgBinder { public: /*! * \brief Constructor * \param def_map A definition map that contains definition of known variables. * ArgBinder will update this def_map when adding new definitions. */ explicit ArgBinder(std::unordered_map<const VarNode*, PrimExpr>* def_map) : def_map_(def_map) {} /*! * \brief Try to bind arg to value, generate constraint if necessary. * \param arg The argument to be binded. * \param value The target expression value * \param arg_name argument name. * \param with_let Whether add lets during bind */ void Bind(const PrimExpr& arg, const PrimExpr& value, const std::string& arg_name, bool with_let = false); /*! * \brief Bind array to array * \param arg The argument to be binded. * \param value The target expression value * \param arg_name argument name. */ void BindArray(const Array<PrimExpr>& arg, const Array<PrimExpr>& value, const std::string& arg_name); /*! * \brief Bind symbolic buffer to another symbolic buffer * \param arg The argument to be binded. * \param value The target expression value * \param arg_name argument name. * \param fuzzy_match If enabled, we allow value's dimension to be smaller than arg, as long as * arg's higher dimensions are of 1. */ void BindBuffer(const Buffer& arg, const Buffer& value, const std::string& arg_name, bool fuzzy_match); /*! * \brief Bind symbolic buffer to a DLTensor handle. * \param buffer The argument buffer to be binded. * \param device_type The device id to be binded. * \param device_id The device id to be binded. * \param handle The DLTensor handle. * \param arg_name argument name. */ void BindDLTensor(const Buffer& buffer, const PrimExpr& device_type, const PrimExpr& device_id, const Var& handle, const std::string& arg_name); /*! \return The defs generated in binding. */ const std::vector<Var>& defs() const { return defs_; } /*! \return The asserts generated in binding */ const std::vector<Stmt>& asserts() const { return asserts_; } /*! * \brief Initialization nest generated * This is only non-empty when BindDLTensor is called. * * \note The binder may choose to generate a let statement * and simply put def_map to map Variable to itself, * or update def_map to directly map to new value and not generate let statement. * * Let statement is usually generated when bind to DLTensor and memory load is involved. * \return The initialization nest generated during binding. */ const std::vector<Stmt>& init_nest() const { return init_nest_; } /*! \return Handle data type of the data */ const Map<Var, PrimExpr>& def_handle_dtype() const { return def_handle_dtype_; } private: // Internal bind function bool Bind_(const PrimExpr& arg, const PrimExpr& value, const std::string& arg_name, bool with_lets); /*! \brief The definition map, can be uses to substitute */ std::unordered_map<const VarNode*, PrimExpr>* def_map_; /*! \brief defs generated in the current binder */ std::vector<Var> defs_; /*! \brief Initialize nest */ std::vector<Stmt> init_nest_; /*! \brief handle data type in the defintiions */ Map<Var, PrimExpr> def_handle_dtype_; /*! \brief asserts generated */ std::vector<Stmt> asserts_; /*! \brief internal analyzer. */ arith::Analyzer analyzer_; }; } // namespace tir } // namespace tvm #endif // TVM_TIR_TRANSFORMS_ARG_BINDER_H_
https://github.com/zk-ml/tachikoma
src/tir/transforms/common_subexpr_elim.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file common_subexpr_elim.h * \brief Interface of the Common Subexpressions Elimination (CSE) pass which rewrites statements and expressions in order to eliminate redundant computations. In order to achieve that, common (sub-)expressions are introduced into variables with let-in bindings, and the places where the expression was used are replaced with the freshly introduced variable. */ #ifndef TVM_TIR_TRANSFORMS_COMMON_SUBEXPR_ELIM_H_ #define TVM_TIR_TRANSFORMS_COMMON_SUBEXPR_ELIM_H_ #include <tvm/tir/expr.h> #include <tvm/tir/expr_functor.h> #include <tvm/tir/stmt.h> #include <tvm/tir/stmt_functor.h> // For the class StmtExprMutator #include <tvm/tir/var.h> #include <utility> // For std::pair #include <vector> #include "common_subexpr_elim_tools.h" // For the class MaybeValue namespace tvm { namespace tir { /*! * \brief A context is a vector of pairs that associates Var to MaybeValue (which are either an expression or nothing) */ using Context = std::vector<std::pair<Var, MaybeValue>>; /*! * \brief Mutator that performs Common Subexpression Elimination (CSE) for the body of a PrimFunc, mutating both its expressions and statements. */ class CommonSubexpressionEliminator : public StmtExprMutator { public: // Toplevel (static) function static Stmt PerformCSE(const Stmt& stmt, const Context& context_init, bool identify_equiv_terms); PrimExpr VisitExpr(const PrimExpr& expr) override; Stmt VisitStmt(const Stmt& stmt) override; int GetNbVarGenerated(); protected: // Constructor CommonSubexpressionEliminator(const Stmt& stmt, const Context& context_init, bool identify_equiv_terms); PrimExpr VisitExpr_(const LetNode* op) override; Stmt VisitStmt_(const LetStmtNode* op) override; Stmt VisitStmt_(const ForNode* op) override; private: Stmt initial_body_; // Kept for checking if names of new variables already exist Context context_; // Context associating variables to (maybe) definitions int num_last_try_ = 0; // Number of the last variable tried int nb_var_ = 0; // Number of variables introduced by the CSE pass bool identify_equiv_terms_ = false; static bool ForbiddenComputation(const PrimExpr& expr); static bool IsEligibleComputation(const PrimExpr& expr); static bool CanContainEligibleComputations(const PrimExpr& expr); static bool OrderOnExprAndFrequency(std::pair<PrimExpr, size_t> a, std::pair<PrimExpr, size_t> b); Var GenerateNewVar(DataType type_annotation); }; } // namespace tir } // namespace tvm #endif // TVM_TIR_TRANSFORMS_COMMON_SUBEXPR_ELIM_H_
https://github.com/zk-ml/tachikoma
src/tir/transforms/common_subexpr_elim_tools.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file common_subexpr_elim_tools.h * \brief Interface of analysis tools and utility functions used by the Common Subexpression Elimination (CSE) pass. */ #ifndef TVM_TIR_TRANSFORMS_COMMON_SUBEXPR_ELIM_TOOLS_H_ #define TVM_TIR_TRANSFORMS_COMMON_SUBEXPR_ELIM_TOOLS_H_ #include <tvm/runtime/container/string.h> #include <tvm/tir/analysis.h> // For the ExprDeepEqual analysis #include <tvm/tir/expr.h> #include <tvm/tir/expr_functor.h> #include <tvm/tir/stmt.h> #include <tvm/tir/stmt_functor.h> // For the class StmtExprVisitor #include <optional> #include <unordered_map> // For the hashtable datatype #include <utility> // For pairs datatype #include <vector> namespace tvm { namespace tir { /*! * \brief A computation table is a hashtable which associates to each expression being computed a number (which is the number of time that it is computed) It is important to note that the hash used is a StructuralHash (and not an ObjectPtrHash) as we need to hash similarly deeply equal terms. The comparison used is ExprDeepEqual, which is stricter than StructuralEqual (as it does not do variables remapping), so it is compatible with StructuralHash (intended to be used with StructuralEqual). */ using ComputationTable = std::unordered_map<PrimExpr, size_t, StructuralHash, ExprDeepEqual>; /*! * \brief A cache of computations is made of a pair of two hashtables, which respectively associate to each statement or expression of the program its computation table. Its purpose is to avoid the CSE pass from recomputing repeatedly the same tables of computations. */ struct ComputationCache { // Part of the cache for statements // It maps each known statement to its computation table std::unordered_map<Stmt, ComputationTable, ObjectPtrHash, ObjectPtrEqual> cache_stmt_table_computations_; // Part of the cache for expressions // It maps each known expression to its computation table std::unordered_map<PrimExpr, ComputationTable, ObjectPtrHash, ObjectPtrEqual> cache_expr_table_computations_; }; /*! * \brief Visitor which returns in a hashtable the (syntatic) computations done by an expression or by a statement. * \note Computations here are considered syntactically, meaning that semantically equivalent computations that are not syntactically the same are not merged together. */ class ComputationsDoneBy : public StmtExprVisitor { public: // Toplevel (static) methods static ComputationTable GetComputationsDoneBy( const PrimExpr& expr, std::function<bool(const PrimExpr&)> is_eligible_computation, std::function<bool(const PrimExpr&)> can_contain_computations); static ComputationTable GetComputationsDoneBy( const Stmt& stmt, std::function<bool(const PrimExpr&)> is_eligible_computation, std::function<bool(const PrimExpr&)> can_contain_computations); protected: // Constructor ComputationsDoneBy(std::function<bool(const PrimExpr&)> is_eligible_computation, std::function<bool(const PrimExpr&)> can_contain_computations); void VisitExpr(const PrimExpr& expr) override; void VisitStmt(const Stmt& stmt) override; void VisitStmt_(const IfThenElseNode* op) override; void VisitStmt_(const ForNode* op) override; void VisitStmt_(const WhileNode* op) override; private: static ComputationTable ComputationsDoneByChildrenOf( const PrimExpr& expr, std::function<bool(const PrimExpr&)> is_eligible_computation, std::function<bool(const PrimExpr&)> can_contain_computations); static ComputationTable ComputationsDoneByChildrenOf( const Stmt& stmt, std::function<bool(const PrimExpr&)> is_eligible_computation, std::function<bool(const PrimExpr&)> can_contain_computations); // The predicate used for knowing which computations are eligible std::function<bool(const PrimExpr&)> is_eligible_computation_; // The predicate used for knowing in which nodes we can search for eligible computations std::function<bool(const PrimExpr&)> can_contain_computations_; // The object being constructed and "returned" by the VisitExpr()/VisitStmt() methods ComputationTable table_of_computations_; // Cache for preventing to compute repeatedly the computations done by the same stmt or expr static ComputationCache cache_; }; /*! * \brief Visitor that computes the *direct* subexpressions of a given expression. * \note Returns only the direct subexpressions of the given expressions, not all the subexprs. So for instance, for (A+(B+C)) it will return A and (B+C) if they are eligible, but not B and C. */ class DirectSubexpr : public ExprVisitor { public: // Toplevel (static) function static std::vector<PrimExpr> GetDirectSubexpressions( const PrimExpr& expr, std::function<bool(const PrimExpr&)> is_eligible_computation, std::function<bool(const PrimExpr&)> can_contain_computations); protected: // Constructor DirectSubexpr(std::function<bool(const PrimExpr&)> is_eligible_computation, std::function<bool(const PrimExpr&)> can_contain_computations); void VisitExpr(const PrimExpr& expr) override; private: // The predicate used for knowing which computations are eligible std::function<bool(const PrimExpr&)> is_eligible_computation_; // The predicate used for knowing in which nodes we can search for eligible subexpressions std::function<bool(const PrimExpr&)> can_contain_computations_; // We haven't entered the VisitExpr() method yet bool entered_ = false; // The vector of direct subexpressions that we are building std::vector<PrimExpr> direct_subexpr_; }; /*! * \brief Visitor which tells if a given expression or statement uses a given variable name. This is used by the CSE pass to make sure that we do not reuse existing names, even though having the same name does not mean that it's the same variable, but it's clearer for dumps. */ class UsesVarName : public StmtExprVisitor { public: // Toplevel (static) methods static bool ExprUsesVarName(const PrimExpr& expr, String var_name); static bool StmtUsesVarName(const Stmt& stmt, String var_name); protected: // Constructor explicit UsesVarName(String var_name); void VisitExpr(const PrimExpr& expr) override; void VisitStmt(const Stmt& stmt) override; private: String var_name_; bool uses_var_name_ = false; }; /*! * \brief Various utility functions for the CSE pass */ void PrintComputationTable(const ComputationTable& table); using MaybeValue = std::optional<PrimExpr>; bool EqualTerms(const PrimExpr& a, const PrimExpr& b); // Used for deciding the (decidable) equivalence relation PrimExpr NormalizeTerm(const PrimExpr& expr, bool do_normalization); // The equivalence relation, which is the syntactical equality when `identify_equiv_terms` is false bool EquivalentTerms(const PrimExpr& a, const PrimExpr& b, bool identify_equiv_terms); std::vector<std::pair<PrimExpr, size_t>> SyntacticToSemanticComputations( const ComputationTable& table, bool identify_equiv_terms); bool PredicateIntroVarForComputation(const PrimExpr& computation, size_t nb_times_seen); // Polymorphic (functional) map on a vector, which builds a news vector with the same number of // elements, where each element is the application of a given function on the corresponding element // in the input vector. template <typename A, typename B> std::vector<B> VectorMap(const std::vector<A>& input, std::function<B(const A&)> fun) { std::vector<B> result; size_t size = input.size(); // For efficiency, allocate immediately the size needed as the result will have // the same size as the input result.reserve(size); for (size_t i = 0; i < size; i++) { result.push_back(fun(input[i])); } return result; } // Explicitely instanciate the template function for A=std::pair<Var,MaybeValue> and B=Var template std::vector<Var> VectorMap(const std::vector<std::pair<Var, MaybeValue>>&, std::function<Var(const std::pair<Var, MaybeValue>&)>); void InsertElemToSortedSemanticComputations(std::vector<std::pair<PrimExpr, size_t>>* sorted_vec, const std::pair<PrimExpr, size_t>& pair); void InsertVectorToSortedSemanticComputations(std::vector<std::pair<PrimExpr, size_t>>* sorted_vec, const std::vector<PrimExpr>& vec_to_add, bool identify_equiv_terms); } // namespace tir } // namespace tvm #endif // TVM_TIR_TRANSFORMS_COMMON_SUBEXPR_ELIM_TOOLS_H_
https://github.com/zk-ml/tachikoma
src/tir/transforms/ir_utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file ir_utils.h * \brief Helper functions to construct and compose IR nodes. */ #ifndef TVM_TIR_TRANSFORMS_IR_UTILS_H_ #define TVM_TIR_TRANSFORMS_IR_UTILS_H_ #include <tvm/arith/int_set.h> #include <tvm/runtime/device_api.h> #include <tvm/support/with.h> #include <tvm/tir/builtin.h> #include <tvm/tir/expr.h> #include <tvm/tir/function.h> #include <tvm/tir/op.h> #include <limits> #include <string> #include <unordered_map> #include <utility> #include <vector> namespace tvm { namespace tir { /*! * \brief combine the nest stmt, whose body is not defined. * \param nest A list of For and LetStmt, whose body is not defined. * \param body body * \return The combined Stmt */ Stmt MergeNest(const std::vector<Stmt>& nest, Stmt body); /*! * \brief combine the nest stmt, whose body is not defined. * \param nest A list of For and LetStmt, whose body is not defined. * \param body body * \return The combined Stmt */ Stmt MergeNest(const std::vector<std::vector<Stmt>>& nest, Stmt body); /*! * \brief update array with an unary function * \param arr array * \param fupdate an unary function * \tparam T type of array element * \tparam F type of the unary function * \return if update happens, return the new array, else return the * original array */ template <typename T, typename F> inline Array<T> UpdateArray(Array<T> arr, F fupdate) { std::vector<T> new_arr(arr.size()); bool changed = false; for (size_t i = 0; i < arr.size(); ++i) { T old_elem = arr[i]; T new_elem = fupdate(old_elem); if (!new_elem.same_as(old_elem)) changed = true; new_arr[i] = new_elem; } if (!changed) { return arr; } else { return Array<T>(new_arr); } } /*! * \brief Get construct from struct * \param dtype The data type. * \param handle the struct handle. * \param index the offset index. * \param kind The data kind. * \return the get expression. */ inline PrimExpr TVMStructGet(DataType dtype, Var handle, int index, builtin::TVMStructFieldKind kind) { Array<PrimExpr> args = {handle, make_const(DataType::Int(32), index), make_const(DataType::Int(32), static_cast<int>(kind))}; return Call(dtype, builtin::tvm_struct_get(), args); } /*! * \brief Address of handle + offset * \param handle the array handle. * \param dtype The data type. * \param offset the offset index. */ inline PrimExpr AddressOffset(Var handle, DataType dtype, int offset) { PrimExpr offset_expr = make_const(DataType::Int(32), offset * dtype.lanes()); Buffer dummy_buf(handle, dtype, {offset_expr + 1}, {}, 0, handle->name_hint, 0, 0, kDefault); BufferLoad buf_load(dummy_buf, {offset_expr}); return Call(DataType::Handle(), builtin::address_of(), {buf_load}); } /*! * \brief Address of handle + offset * \param handle the array handle. * \param dtype The data type. * \param offset the offset index. */ inline PrimExpr AddressOffset(Var handle, DataType dtype, PrimExpr offset) { if (dtype.lanes() != 1) { offset = offset * make_const(offset.dtype(), dtype.lanes()); offset = Ramp(offset, make_const(offset.dtype(), 1), dtype.lanes()); } Buffer dummy_buf(handle, dtype.element_of(), {offset + 1}, {}, 0, handle->name_hint, 0, 0, kDefault); BufferLoad buf_load(dummy_buf, {offset}); return Call(DataType::Handle(), builtin::address_of(), {buf_load}); } /*! * \brief Set value into struct. * \param handle the struct handle. * \param index the offset index. * \param kind The data kind. * \param value The value to be set. * \return the set stmt. */ inline Stmt TVMStructSet(Var handle, int index, builtin::TVMStructFieldKind kind, PrimExpr value) { Array<PrimExpr> args = {handle, make_const(DataType::Int(32), index), make_const(DataType::Int(32), static_cast<int>(kind)), value}; return Evaluate(Call(DataType::Int(32), builtin::tvm_struct_set(), args)); } /*! * \brief Get the type that is passed around TVM PackedFunc API. * \param t The original type. * \return The corresponding API type. */ inline DataType APIType(DataType t) { if (t.is_handle()) return t; ICHECK_EQ(t.lanes(), 1) << "Cannot pass vector type through packed API."; if (t.is_uint() || t.is_int()) return DataType::Int(64); ICHECK(t.is_float()); return DataType::Float(64); } /*! * \brief Rule to get allocation alignment requirement for a given const array. * \param type The type of allocation. * \param const_size The constant size of the array. * \return the alignment */ inline int GetTempAllocaAlignment(DataType type, int32_t const_size) { int align = runtime::kTempAllocaAlignment; if (const_size > 0) { int64_t const_s = static_cast<int64_t>(const_size) * type.bits() * type.lanes() / 8; while (align > const_s) { align = align / 2; } } return align; } /*! * \brief Create an int32 constant * \param index the value of the constant * \return the PrimExpr that represents the constant */ inline PrimExpr ConstInt32(size_t index) { ICHECK_LE(index, std::numeric_limits<int>::max()); return make_const(DataType::Int(32), static_cast<int>(index)); } /*! * \brief Allocate TVMValues on the stack * \param type type of allocation * \param num number of TVMValues to allocate * \return PrimExpr representing the TVMValue */ inline PrimExpr StackAlloca(std::string type, size_t num) { Array<PrimExpr> args = {StringImm(type), ConstInt32(num)}; return Call(DataType::Handle(), builtin::tvm_stack_alloca(), args); } /*! * \brief Convert a IR node to be SSA form. * \param stmt The source statement to be converted. * \return The converted form. */ Stmt ConvertSSA(Stmt stmt); /*! * \brief Return the storage scope associated with a buffer variable. * \param buffer_var The input buffer variable. * \return A string representing the storage scope of this buffer variable. */ String GetPtrStorageScope(Var buffer_var); /*! * \brief Convert match buffer target buffer access indices to original one. * \param indices The indices of the target buffer * \return The indices of source buffer. */ Array<PrimExpr> ConvertIndices(const MatchBufferRegion& match_buffer, const Array<PrimExpr>& indices); /*! * \brief Convert match buffer target buffer region to original one. * \param region The sub-region of the target buffer * \return The region of source buffer. */ Region ConvertRegion(const MatchBufferRegion& match_buffer, const Region& region); /*! * \brief Check if a given PrimFunc originated from a TE schedule. * * Internally this checks for the `from_legacy_te_schedule` attr of the PrimFunc. * * \param f PrimFunc to check * \return Whether or not the PrimFunc was created from a te schedule */ Bool IsFromLegacyTESchedule(PrimFunc f); /*! *\brief Context helper to update domain map within conditional scope. * * Assume the condition is `0 <= i && i < 9` and global domain of i is [0, 20], thus `bounds[i]` is * [0, 8]. Then `With<ConditionalBoundsContext> ctx(condition, &relax_map, &hint_map, true)` step *into scope where dom_map[i] is [0, 8] and `With<ConditionalBoundsContext> ctx(condition, *&relax_map, &hint_map, false)` step into scope where dom_map[i] is [9, 20] */ class ConditionalBoundsContext { private: friend class With<ConditionalBoundsContext>; /*! * \brief Construct a condition bounds context. * \param condition The condition holds on true branch. * \param relax_map The domain map for relaxed vars to update. * \param hint_map The domain map for free vars to update. * \param is_true_branch Whether step into the branch where condition bounds holds. */ ConditionalBoundsContext(const PrimExpr& condition, std::unordered_map<const VarNode*, arith::IntSet>* relax_map, std::unordered_map<const VarNode*, arith::IntSet>* hint_map, bool is_true_branch); void EnterWithScope(); void ExitWithScope(); /*! \brief Helper to solve related variable's bound within conditional scope.*/ Map<Var, Range> GetVarBoundsFromCondition(); /*! \brief the condition holds on true branch. */ const PrimExpr& condition_; /*! \brief domain map for relaxed vars to update */ std::unordered_map<const VarNode*, arith::IntSet>* relax_map_; /*! \brief domain map for free vars to update */ std::unordered_map<const VarNode*, arith::IntSet>* hint_map_; /*! \brief whether is on true branch */ bool is_true_branch_; /*! \brief used to record and restore original var bounds */ std::unordered_map<const VarNode*, arith::IntSet> origin_map_; }; // Information of tensor core fragment. struct FragmentInfo { // fragment shape int m, n, k; // fragment layout (row-major or column-major) std::string layout; // scope of the fragment (wmma.matrix_a, wmma.matrix_b, or wmma.accumulator) std::string scope; FragmentInfo() = default; FragmentInfo(int _m, int _n, int _k, const std::string& _layout, const std::string& _scope) : m(_m), n(_n), k(_k), layout(_layout), scope(_scope) {} int GetSize() const { if (scope == "wmma.matrix_a") { return m * k; } else if (scope == "wmma.matrix_b") { return n * k; } else if (scope == "wmma.accumulator") { return m * n; } else { ICHECK(0); throw; } } }; /*! * \brief Extract information of tensor core fragment from the IR. * \param stmt The stmt to visit. * \return Map from buffer variables to the fragment info. */ std::unordered_map<const VarNode*, FragmentInfo> GetTensorCoreFragmentInfo(const Stmt& stmt); // Return the queue id and the in-flight count associated with the given // attr::async_wait_queue_scope annotation. std::pair<PrimExpr, PrimExpr> GetAsyncWaitAttributes(const AttrStmtNode* op); /*! * \brief Bind a subset of parameter tensors to constants, replacing them by AllocateConst nodes. * \param f The function to bind constants to. * \param constants Raw constant data. If the size of this array is N, the last N parameter tensors * will be removed from the signature and instead AllocateConst nodes will be introduced in the * function body. * \return The updated function. */ PrimFunc BindParams(PrimFunc f, const Array<runtime::NDArray>& constants); } // namespace tir } // namespace tvm #endif // TVM_TIR_TRANSFORMS_IR_UTILS_H_
https://github.com/zk-ml/tachikoma
src/tir/transforms/replace_selected_expr.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file replace_selected_expr.h * \brief Interface of the pass that replaces in a statement or expression all the subexpressions that are selected with a predicate by another expression. */ #ifndef TVM_TIR_TRANSFORMS_REPLACE_SELECTED_EXPR_H_ #define TVM_TIR_TRANSFORMS_REPLACE_SELECTED_EXPR_H_ #include <tvm/tir/expr.h> #include <tvm/tir/expr_functor.h> #include <tvm/tir/stmt.h> #include <tvm/tir/stmt_functor.h> // For the class StmtExprMutator namespace tvm { namespace tir { /*! * \brief Mutator for replacing the expressions selected by a predicate in a statement and/or in an expression, which only replace inside of nodes in which it is allowed to perform replacecements (given by a second predicate) */ class ReplaceSelectedExpr : public StmtExprMutator { public: // Toplevel (static) functions static PrimExpr ReplaceSelectedExprInExpr( const PrimExpr& expr, std::function<bool(const PrimExpr&)> predicate_selector, const PrimExpr& new_expr, std::function<bool(const PrimExpr&)> can_replace_inside); static Stmt ReplaceSelectedExprInStmt(const Stmt& stmt, std::function<bool(const PrimExpr&)> predicate_selector, const PrimExpr& new_expr, std::function<bool(const PrimExpr&)> can_replace_inside); protected: // Constructor ReplaceSelectedExpr(std::function<bool(const PrimExpr&)> predicate_selector, const PrimExpr& new_expr, std::function<bool(const PrimExpr&)> can_replace_inside); PrimExpr VisitExpr(const PrimExpr& expr) override; private: // The predicate used for selecting what will be replaced std::function<bool(const PrimExpr&)> predicate_selector_; // The expression used for replacing const PrimExpr& new_expr_; // The predicate used for knowning inside which nodes we can do rewriting // (i.e. in which nodes it can recurse) std::function<bool(const PrimExpr&)> can_replace_inside_; }; } // namespace tir } // namespace tvm #endif // TVM_TIR_TRANSFORMS_REPLACE_SELECTED_EXPR_H_
https://github.com/zk-ml/tachikoma
src/tir/transforms/storage_access.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file storage_access.h * \brief Common data structure for storage access analysis. */ #ifndef TVM_TIR_TRANSFORMS_STORAGE_ACCESS_H_ #define TVM_TIR_TRANSFORMS_STORAGE_ACCESS_H_ #include <tvm/arith/int_set.h> #include <tvm/ir/attrs.h> #include <tvm/tir/expr.h> #include <tvm/tir/stmt_functor.h> #include <unordered_map> #include <vector> #include "../../runtime/thread_storage_scope.h" namespace tvm { namespace tir { using runtime::StorageRank; using runtime::StorageScope; /*! * \brief Base class of storage access analysis */ class StorageAccessVisitor : public StmtExprVisitor { public: /*! \brief Storage access type */ enum AccessType { kRead, kWrite, kSync, kAlloc, // acquired version of read, only need to handle WAR dep. kReadAcquire }; /*! \brief An access entry */ struct AccessEntry { /*! \brief The thread index that access this entry */ Array<IterVar> threads; /*! \brief The buffer variable, if any */ Var buffer = NullValue<Var>(); /*! \brief The access data type */ DataType dtype; /*! \brief The touched access range * * Has one IntSet for each index in the buffer being accessed. */ Array<arith::IntSet> touched; /*! \brief The type of access */ AccessType type; /*! \brief The storage scope */ StorageScope scope; /*! \brief Whether the access is double buffer write */ bool double_buffer_write = false; }; /*! \brief Access pattern about a single statement */ struct StmtEntry { /*! \brief The statement */ const Object* stmt; /*! \brief access patterns in the statement */ std::vector<AccessEntry> access; }; // override visitor pattern void VisitExpr_(const LoadNode* op) final; void VisitStmt_(const StoreNode* op) final; void VisitExpr_(const BufferLoadNode* op) final; void VisitStmt_(const BufferStoreNode* op) final; void VisitStmt_(const EvaluateNode* op) final; void VisitStmt_(const AttrStmtNode* op) final; void VisitStmt_(const ForNode* op) final; void VisitStmt_(const IfThenElseNode* op) final; void VisitStmt_(const WhileNode* op) final; void VisitExpr_(const CallNode* op) final; protected: StorageAccessVisitor() { scope_.push_back(std::vector<StmtEntry>()); } /*! \return number of conditions in the current scope. */ int condition_counter() const { return condition_counter_; } /*! \return whether we are in device environment. */ bool in_device_env() const { return in_device_env_; } /*! \return environment threads */ const Array<IterVar>& env_threads() const { return env_threads_; } /*! * \brief Whether we need analyze the buffer in current scope. * \param buffer The buffer to be checked * \param scope The scope of the buffer. * \return Whether the analysis of buffer is enabled. */ virtual bool Enabled(const VarNode* buffer, const StorageScope& scope) const { return true; } /*! * \brief Summarize the sequence of operations into parent. * * Insert synchronization if necessary and remove un-necessary * memory access which are already synced. * * \param seq The sequence of the access operations. * \param loop Pass loop node if it is a loop, otherwise nullptr. * \return The summarized sequence that represent access that * the parent should taken care of to synchronize. */ virtual std::vector<AccessEntry> Summarize(std::vector<StmtEntry> seq, const ForNode* loop) = 0; /*! * \brief Get the scope of the buffer array. * \return The scope of the final buffer array. */ StorageScope GetScope(Var buffer_var) const; // access scope std::vector<std::vector<StmtEntry>> scope_; private: // whether access appending is enabled. bool allow_append_{false}; // Whether we are in device environment bool in_device_env_{false}; // Whether we are inside condition. int condition_counter_{0}; // The current double buffer write scope. const VarNode* double_buffer_write_{nullptr}; // the current free stmt entry. StmtEntry curr_stmt_; // The involving threads Array<IterVar> env_threads_; }; } // namespace tir } // namespace tvm #endif // TVM_TIR_TRANSFORMS_STORAGE_ACCESS_H_
https://github.com/zk-ml/tachikoma
src/tir/transforms/update_pointer_storage_scope.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file update_pointer_storage_scope.h * \brief A pass to update storage scopes for buffer variables. */ #ifndef TVM_TIR_TRANSFORMS_UPDATE_POINTER_STORAGE_SCOPE_H_ #define TVM_TIR_TRANSFORMS_UPDATE_POINTER_STORAGE_SCOPE_H_ #include <tvm/tir/expr.h> #include <tvm/tir/op.h> #include <tvm/tir/stmt_functor.h> #include <unordered_map> namespace tvm { namespace tir { class UpdatePointerStorageScope : public StmtExprMutator { public: explicit UpdatePointerStorageScope( const std::unordered_map<const VarNode*, String>& new_storage_scopes); virtual PrimExpr VisitExpr_(const VarNode*); virtual PrimExpr VisitExpr_(const LoadNode*); virtual PrimExpr VisitExpr_(const BufferLoadNode*); virtual Stmt VisitStmt_(const AllocateNode*); virtual Stmt VisitStmt_(const StoreNode*); virtual Stmt VisitStmt_(const BufferStoreNode*); private: template <typename Node> Node UpdateBufferAccess(Node node); Buffer GetUpdatedBuffer(Buffer buf); std::unordered_map<const VarNode*, Var> new_var_remap_; std::unordered_map<const BufferNode*, Buffer> new_buffer_remap_; }; } // namespace tir } // namespace tvm #endif // TVM_TIR_TRANSFORMS_UPDATE_POINTER_STORAGE_SCOPE_H_
https://github.com/zk-ml/tachikoma
test.py
import tvm from tvm import relay, ir from tvm.relay import testing from tvm.mrt.utils import * from tvm.mrt import api, runtime, image, extool, data from tvm.mrt import stats, dataset from tvm.mrt import utils import sys import numpy as np batch_size = 1 def load_model_from_mx() -> (ir.IRModule, ParametersT): import mxnet as mx spath, ppath = gluon.save_model("resnet18_v1", ctx=mx.cpu()) print(spath, ppath) symbol, params = gluon.load_model(spath, ppath) return relay.frontend.from_mxnet(symbol, arg_params=params) if False: num_class = 10 image_shape = (1, 28, 28) mod, params = testing.mlp.get_workload( num_classes=num_class, image_shape=image_shape, batch_size=batch_size) else: num_class = 1000 image_shape = (3, 224, 224) out_shape = (batch_size, num_class) # mod, params = load_model_from_mx() # mod, params = testing.resnet.get_workload( # batch_size=batch_size, # num_classes=num_class, # num_layers=18, # image_shape=image_shape,) data_shape = (batch_size,) + image_shape def load_model_from_torch() -> (ir.IRModule, ParametersT): import torch from torchvision import models weights = models.ResNet18_Weights.IMAGENET1K_V1 model = models.resnet18(weights=weights) model = model.eval() input_data = torch.randn(data_shape) script_module = torch.jit.trace(model, [input_data]).eval() return relay.frontend.from_pytorch( script_module, [ ("input", data_shape) ]) mod, params = load_model_from_torch() mod: tvm.IRModule = mod func: relay.function.Function = mod["main"] expr: ir.RelayExpr = func.body # expr.simple_raw_print(mod["main"].body, params) relay.Var relay.var relay.nn.conv2d relay.nn.batch_flatten relay.nn.batch_norm relay.Tuple relay.TupleGetItem relay.expr.TupleWrapper ir.tensor_type.TensorType ir.type.TupleType # mrt_model = model.from_mod(mod, params) # mrt_model = mrt_model.set_input_shape((16,) + image_shape) # mrt_model.print() # mod = mrt_model.to_mod() # mod: tvm.IRModule = relay.transform.InferType()(mod) # print(mod.astext(show_meta_data=False)) # tr = api.Trace("init", expr, params).infer_type() from tvm.mrt import trace from tvm.mrt.symbol import * tr = trace.Trace.from_expr(expr, params) @filter_operators(TUPLE_GET_ITEM_NAME) def fuse_batch_norm(expr: relay.expr.Call, params: ParametersT): if extool.op_name(expr.tuple_value) == "nn.batch_norm": return expr.tuple_value.args[0] assert False # tr = tr.transform(fuse_batch_norm) from tvm.mrt.calibrate import Calibrator # def calibrate(sym: Symbol, params: ParametersT): # # print("apply calibrate for {}".format(sym)) # data = None # if is_input(sym, params): # data = np.random.randn(*sym.shape).astype(sym.dtype) # data = tvm.nd.array(data) # elif is_param(sym, params): # data = params[sym.name] # return sym.clone(Calibrator, init_data=data) tvm.nd.NDArray tr.print() # calibrate_tr = tr.transform(calibrate) calibrate_tr = tr.transform(Calibrator.apply()) print("\n\n\n") def _cast(sym: Calibrator, params: ParametersT): print("cast: ", sym.output[0].shape) calibrate_tr.transform(_cast) sys.exit(1) # ctx = tvm.runtime.cuda(1) from tvm.mrt.fuse import FusionOp def fuse(sym: Symbol, params: ParametersT): return sym.clone(FusionOp, params=params) fuse_tr = tr.transform(fuse) sys.exit(1) # print("\n", expr.astext(show_meta_data=False)) from torch.utils.data import DataLoader import torch from torch.utils.data import Dataset from torchvision import datasets from torchvision.transforms import ToTensor import PIL def to_tensor(img: PIL.Image.Image): img = img.resize(image_shape[1:]) img = np.array(img).astype("float32") img = np.transpose(img, (2, 1, 0)) return img val_data = datasets.ImageFolder( path.join(utils.MRT_DATASET_ROOT, "imagenet/val"), transform=to_tensor) data_loader = DataLoader(val_data, batch_size=1) # class TorchImageNet(dataset.Dataset): # def __init__(self): # self.data_loader = data_loader # self._max = len(self.data_loader) # self.reset() # def reset(self): # self._iter = iter(self.data_loader) # def next(self): # try: # data, label = next(self._iter) # return data.numpy(), label.numpy() # except Exception as e: # return None # data, label = next(iter(data_loader)) # data, label = data.numpy(), label.numpy() # print(type(data), data.shape, type(label), label) # sys.exit(1) # tr.print() # outs = tr.calibrate() # print(outs.keys()) # tr_eval = tr.eval(ctx) # runtime.multiple_validate(tr_eval, TorchImageNet(), # stats.ClassificationOutput,) # test accuracy # data = image.get_real_image(*image_shape[1:]) res = tr.run(data, device=ctx) # res = mrt_model.run(data) # print(res.shape, res.dtype) # input_data = data.random_inputs(new_expr, params) # res = runtime.infer(new_expr, input_data) out = stats.ClassificationOutput() out.merge([res[0], [0,]]) out.dl_info() print("labels: ", dataset.ImageNet().labels(out.dl_top5[0])) # fuse pass: fold_constant, fuse_batch_norm, quantize # compare accuracy # to_cvm # for k, v in params.items(): # print(k, type(v)) # continue # set show_meta_data=True if you want to show meta data # print(mod.astext(show_meta_data=False)) # @ir.transform.module_pass(opt_level=2) # def transform(mod, ctx): # tp = relay.TensorType((10,), "float32") # x = relay.var("x", tp) # func = relay.Function([x], relay.abs(x)) # gv = relay.GlobalVar("myabs") # # new_mod = tvm.IRModule({gv: func}) # new_mod = tvm.IRModule() # new_mod["myabs"] = func # new_mod.update(mod) # return new_mod # print(relay.analysis.all_vars(mod["main"])) # module_pass = transform # assert isinstance(module_pass, ir.transform.ModulePass) # assert module_pass.info.opt_level == 2 x = relay.var("x", shape=(1, 3, 28, 28), dtype="float32") y = relay.var("y", shape=(28,), dtype="float32") out = x + y out = relay.abs(out) a = relay.Constant(tvm.nd.array(np.ones((28,), dtype="float32"))) b = relay.Constant(tvm.nd.array(np.ones((28,), dtype="float32"))) c = a + b out = out + c relay.analysis.post_order_visit(out, _collect_ops) mod = tvm.IRModule() mod["main"] = relay.Function([x, y], out) mod = relay.transform.FoldConstant()(mod) print(mod.astext(show_meta_data=False)) sys.exit(1) # mod = tvm.IRModule() # mod["main"] = relay.Function([x, y], out) # print(str(mod)) # mod = module_pass(mod) # print("2", str(mod)) # # out = mod["myabs"](out) # # mod["main"] = relay.Function([x, y], out) # # print("1", str(mod)) # # mod = create_relay_module_from_model() # Output: Figure 1 import pprint from tvm.relay.op.contrib import register from tvm.relay.op.contrib import cvm pattern_table = register.get_pattern_table("cvm") pprint.pprint([p[0] for p in pattern_table]) mod = relay.transform.MergeComposite(pattern_table)(mod) # mod = relay.transform.AnnotateTarget(["dnnl"])(mod) # Output: Figure 2 # mod = relay.transform.MergeCompilerRegions()(mod) # Output: Figure 3 # mod = relay.transform.PartitionGraph()(mod) # Output: Figure 4 print("3", mod.astext(show_meta_data=False))
https://github.com/zk-ml/tachikoma
tests/crt/buffer_write_stream.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TESTS_CRT_BUFFER_WRITE_STREAM_H_ #define TESTS_CRT_BUFFER_WRITE_STREAM_H_ #include <inttypes.h> #include <tvm/runtime/crt/rpc_common/frame_buffer.h> #include <tvm/runtime/crt/rpc_common/write_stream.h> #include <string> using ::tvm::runtime::micro_rpc::FrameBuffer; using ::tvm::runtime::micro_rpc::WriteStream; template <unsigned int N> class BufferWriteStream : public WriteStream { public: ssize_t Write(const uint8_t* data, size_t data_size_bytes) override { return buffer_.Write(data, data_size_bytes); } void Reset() { buffer_.Clear(); packet_done_ = false; } inline bool packet_done() { return packet_done_; } inline bool is_valid() { return is_valid_; } void PacketDone(bool is_valid) override { EXPECT_FALSE(packet_done_); packet_done_ = true; is_valid_ = is_valid; } std::string BufferContents() { return std::string((const char*)buffer_data_, buffer_.Size()); } static constexpr unsigned int capacity() { return N; } private: bool packet_done_{false}; bool is_valid_{false}; uint8_t buffer_data_[N]; FrameBuffer buffer_{buffer_data_, N}; }; #endif // TESTS_CRT_BUFFER_WRITE_STREAM_H_
https://github.com/zk-ml/tachikoma
tests/crt/contrib/stm32/src/main.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #include <inttypes.h> #include <math.h> #include <stdarg.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include "ai_runtime_api.h" #include "network.h" #include "network_data.h" // // Network that we are testing // extern ai_model_info network_network; // // Dummy: for the runtime // uint32_t __models_section_start__ = (uint32_t)&network_network; uint32_t __models_section_end__ = (uint32_t)&network_network + sizeof(ai_model_info); static ai_model_info* _model_p = &network_network; // // Global handle to reference the instantiated NN // static ai_handle _network = AI_HANDLE_NULL; static uint8_t LoadInputImg(const char* filename, ai_tensor* input); static int32_t quantize_val(float val, ai_quantization_info* quant); static float dequantize_val(int32_t val, ai_quantization_info* quant); // ================================================================= // Convert_Fixed_To_Float // ================================================================= static float Convert_Fixed_To_Float(uint8_t data, int8_t fl) { uint8_t val = data; float x; if (fl >= 0) { x = ((float)val) / (float)(1 << fl); // NOLINT } else { x = ((float)val) / (1 / (float)(1 << fl)); // NOLINT } return x; } // ======================================================= // error // ======================================================= static void error(const char* fmt, ...) { va_list vp; char emsg[512]; int32_t loc = 0; // // Prepare main error message: // va_start(vp, fmt); loc += vsprintf(&emsg[loc], fmt, vp); va_end(vp); // fputs (emsg, stderr); // fflush (stderr); fprintf(stderr, " #### Error: %s.\n", emsg); exit(-1); } // ================================================== // aiLogErr // ================================================== static void aiLogErr(const char* fct, const char* msg) { if (fct) { printf("E: AI error: %s - %s\r\n", fct, msg); } else { printf("E: AI error - %s\r\n", msg); } } // ================================================== // aiPrintLayoutBuffer // ================================================== static void aiPrintLayoutBuffer(const char* msg, int idx, ai_tensor* tensor) { DLTensor* dltensor = get_dltensor(tensor); DLDataType dtype = dltensor->dtype; printf("%s[%d] ", msg, idx); printf(" (%u, %u, %u)", dtype.code, dtype.bits, dtype.lanes); // // Quantization info exists for input/output tensors // const ai_quantization_info* quant = ai_get_quantization(tensor); if (quant != NULL) { printf(" -- TODO: quantization info \n"); } int32_t size = get_tensor_size(tensor); printf(" %d bytes, shape=(", size); for (int i = 0; i < dltensor->ndim; ++i) { printf("%d,", (int32_t)dltensor->shape[i]); } printf("), address = 0x%08x\r\n", (unsigned int)dltensor->data); } // ================================================== // aiPrintNetworkInfo // ================================================== static void aiPrintNetworkInfo(ai_handle network) { const char* name = ai_get_name(network); const char* datetime = ai_get_datetime(network); const char* revision = ai_get_revision(network); const char* tool_version = ai_get_tool_version(network); const char* api_version = ai_get_api_version(network); uint32_t n_nodes = ai_get_node_size(network); uint32_t n_inputs = ai_get_input_size(network); uint32_t n_outputs = ai_get_output_size(network); uint32_t activations_size = ai_get_activations_size(network); uint32_t params_size = ai_get_params_size(network); printf("Network configuration...\r\n"); printf(" Model name : %s\r\n", name); printf(" Compile datetime : %s\r\n", datetime); printf(" Tool revision : %s (%s)\r\n", revision, tool_version); printf(" API version : %s\r\n", api_version); printf("Network info...\r\n"); printf(" nodes : %d\r\n", n_nodes); printf(" activation : %d bytes\r\n", activations_size); printf(" params : %d bytes\r\n", params_size); printf(" inputs/outputs : %u/%u\r\n", n_inputs, n_outputs); } // ====================================================== // aiInit // ====================================================== static int aiInit(void) { ai_status err = AI_STATUS_OK; const char* nn_name = AI_MODEL_name(_model_p); ai_ptr built_in_activations = AI_MODEL_activations(_model_p); // // Creating the network // printf("Creating the network \"%s\"..\r\n", nn_name); err = ai_create(_model_p, built_in_activations, &_network); if (err != AI_STATUS_OK) { const char* msg = ai_get_error(_network); aiLogErr("ai_create", msg); return -1; } // // Query the created network to get relevant info from it // aiPrintNetworkInfo(_network); uint32_t n_inputs = ai_get_input_size(_network); uint32_t n_outputs = ai_get_output_size(_network); uint32_t activations_size = ai_get_activations_size(_network); uint32_t params_size = ai_get_params_size(_network); const ai_ptr params = ai_get_params(_network); ai_ptr activations = ai_get_activations(_network); printf("Weights buffer : 0x%08x %d bytes)\r\n", (unsigned int)params, (unsigned int)params_size); printf("Activation buffer : 0x%08x (%d bytes) %s\r\n", (unsigned int)activations, (unsigned int)activations_size, ((uint32_t)activations & (uint32_t)0xFF000000) ? "internal" : "external"); printf("Inputs:\r\n"); for (int i = 0; i < n_inputs; i++) { ai_tensor* input = ai_get_input(_network, i); aiPrintLayoutBuffer(" I", i, input); } printf("Outputs:\r\n"); for (int i = 0; i < n_outputs; i++) { ai_tensor* output = ai_get_output(_network, i); aiPrintLayoutBuffer(" O", i, output); } return 0; } // ====================================================== // aiDeInit // ====================================================== static void aiDeInit(void) { ai_status err = AI_STATUS_OK; printf("Releasing the network(s)...\r\n"); if (ai_destroy(_network) != AI_STATUS_OK) { const char* err = ai_get_error(_network); aiLogErr("ai_destroy", err); } _network = AI_HANDLE_NULL; return; } // ================================================================= // argmax // // Description : return argument of table maximum value // Argument : Vector_db *vec: table // Return Value : int: index of max value // ================================================================= static uint8_t argmax(int8_t* vec, uint32_t num) { uint32_t i; uint8_t arg = 0; int8_t imax = vec[0]; for (i = 1; i < num; i++) { imax = (imax > vec[i]) ? imax : vec[i]; if (imax == vec[i]) { arg = i; } } return (arg); } // ====================================================== // aiRun // ====================================================== static int aiRun(void) { ai_status err = AI_STATUS_OK; // // Inputs // ai_tensor* input = ai_get_input(_network, 0); if (input == NULL) { const char* err = ai_get_error(_network); aiLogErr("ai_run", err); return -1; } // // Outputs // ai_tensor* output = ai_get_output(_network, 0); if (output == NULL) { const char* err = ai_get_error(_network); aiLogErr("ai_run", err); return -1; } DLDataType out_dtype = output->dltensor.dtype; if (out_dtype.lanes > 1) { printf("E: vector outputs are not supported ...\r\n"); return -1; } uint32_t elts = get_tensor_elts(output); char outfile_name[128]; sprintf(outfile_name, "%s/tvm_results.txt", BUILD_PATH); // NOLINT FILE* outfile = fopen(outfile_name, "w"); for (int i = 0; i <= 9; i++) { char image[128]; sprintf(image, "%s/0%d.raw", IMAGE_PATH, i); // NOLINT printf("Loading input image %s ... \n", image); if (LoadInputImg(image, input) != 0) { error("Loading image %s\n", image); } // // Run the inference // printf("Running the network\r\n"); if (ai_run(_network) != AI_STATUS_OK) { const char* err = ai_get_error(_network); aiLogErr("ai_run", err); return -1; } const ai_quantization_info* output_quant = ai_get_quantization(output); if (output_quant == NULL) { // // Floating point model // float* probabilities = (float*)output->dltensor.data; // NOLINT for (int i = 0; i < elts; i++) { float val = probabilities[i]; // printf (" -- probability[%d] = %g \n", i, val); fprintf(outfile, "%g ", val); } } else { // // Quantized model // if (out_dtype.code == kDLInt) { int8_t* probabilities = (int8_t*)output->dltensor.data; // NOLINT for (int i = 0; i < elts; i++) { int8_t qval = probabilities[i]; // printf (" -- probability[%d] = %d \n", i, qval); float val = dequantize_val(qval, output_quant); fprintf(outfile, "%g ", val); } } else { uint8_t* probabilities = (uint8_t*)output->dltensor.data; // NOLINT for (int i = 0; i < elts; i++) { uint8_t qval = probabilities[i]; // printf (" -- probability[%d] = %d \n", i, qval); float val = dequantize_val(qval, output_quant); fprintf(outfile, "%g ", val); } } } fprintf(outfile, "\n"); } fclose(outfile); return 0; } // ================================================================= // quantize_val // ================================================================= static int32_t quantize_val(float val, ai_quantization_info* quant) { float new_val; float input_scale = quant->scale[0]; int32_t input_zero_point = quant->zero_point[0]; new_val = val / input_scale + input_zero_point; return (int32_t)new_val; } // ================================================================= // dequantize_val // ================================================================= static float dequantize_val(int32_t val, ai_quantization_info* quant) { float new_val; float output_scale = quant->scale[0]; int32_t output_zero_point = quant->zero_point[0]; new_val = (val - output_zero_point) * output_scale; return new_val; } // ================================================================= // LoadInputImg // ================================================================= uint8_t LoadInputImg(const char* filename, ai_tensor* input) { DLDataType dtype = input->dltensor.dtype; const ai_quantization_info* input_quant = ai_get_quantization(input); if (dtype.lanes > 1) { printf("E: vector inputs are not supported ...\r\n"); return -1; } if (dtype.code == kDLBfloat) { printf("E: Double float inputs are not supported ...\r\n"); return -1; } FILE* file = fopen(filename, "r"); if (file == NULL) { printf("== File %s not found\n", filename); return (-1); } // // Find file size // fseek(file, 0L, SEEK_END); size_t img_size = ftell(file); (void)fseek(file, 0L, SEEK_SET); // printf ("== Image size = %d\n", img_size); uint8_t* image = (uint8_t*)malloc(img_size); // NOLINT size_t size = fread(image, 1, img_size, file); if (size != img_size) { perror("fread"); printf("== Problem reading %s\n", filename); return (-1); } fclose(file); uint32_t x; uint8_t* p = image; uint8_t* pg = (uint8_t*)input->dltensor.data; // NOLINT for (x = 0; x < img_size; x++) { uint8_t val = p[x]; // // Input image needs to be normalized into [0..1] interval // float nval = ((float)val) / 255.0; // NOLINT if (input_quant != NULL) { if (dtype.code == kDLInt) { int8_t qval = quantize_val(nval, input_quant); *pg = qval; pg += sizeof(int8_t); } else { uint8_t qval = quantize_val(nval, input_quant); *pg = qval; pg += sizeof(uint8_t); } } else { *(float*)pg = nval; // NOLINT pg += sizeof(float); } } free(image); return 0; } // ====================================================== // main // ====================================================== int main(int argc, char* argv[]) { int status; status = aiInit(); if (status != 0) { printf("Error initializing.\n"); } status = aiRun(); if (status != 0) { printf("Error running.\n"); } aiDeInit(); return (0); }
https://github.com/zk-ml/tachikoma
tests/lint/add_asf_header.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Helper tool to add ASF header to files that cannot be handled by Rat.""" import os import sys header_cstyle = """ /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ """.strip() header_mdstyle = """ <!--- Licensed to the Apache Software Foundation (ASF) under one --> <!--- or more contributor license agreements. See the NOTICE file --> <!--- distributed with this work for additional information --> <!--- regarding copyright ownership. The ASF licenses this file --> <!--- to you under the Apache License, Version 2.0 (the --> <!--- "License"); you may not use this file except in compliance --> <!--- with the License. You may obtain a copy of the License at --> <!--- http://www.apache.org/licenses/LICENSE-2.0 --> <!--- Unless required by applicable law or agreed to in writing, --> <!--- software distributed under the License is distributed on an --> <!--- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY --> <!--- KIND, either express or implied. See the License for the --> <!--- specific language governing permissions and limitations --> <!--- under the License. --> """.strip() header_pystyle = """ # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """.strip() header_rststyle = """ .. Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at .. http://www.apache.org/licenses/LICENSE-2.0 .. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """.strip() header_groovystyle = """ // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. """.strip() header_cmdstyle = """ :: Licensed to the Apache Software Foundation (ASF) under one :: or more contributor license agreements. See the NOTICE file :: distributed with this work for additional information :: regarding copyright ownership. The ASF licenses this file :: to you under the Apache License, Version 2.0 (the :: "License"); you may not use this file except in compliance :: with the License. You may obtain a copy of the License at :: :: http://www.apache.org/licenses/LICENSE-2.0 :: :: Unless required by applicable law or agreed to in writing, :: software distributed under the License is distributed on an :: "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY :: KIND, either express or implied. See the License for the :: specific language governing permissions and limitations :: under the License. """.strip() FMT_MAP = { "sh": header_pystyle, "cc": header_cstyle, "c": header_cstyle, "mm": header_cstyle, "m": header_cstyle, "go": header_cstyle, "java": header_cstyle, "h": header_cstyle, "py": header_pystyle, "toml": header_pystyle, "yml": header_pystyle, "yaml": header_pystyle, "rs": header_cstyle, "md": header_mdstyle, "cmake": header_pystyle, "mk": header_pystyle, "rst": header_rststyle, "gradle": header_groovystyle, "tcl": header_pystyle, "xml": header_mdstyle, "storyboard": header_mdstyle, "pbxproj": header_cstyle, "plist": header_mdstyle, "xcworkspacedata": header_mdstyle, "html": header_mdstyle, "bat": header_cmdstyle, } def copyright_line(line): # Following two items are intentionally break apart # so that the copyright detector won"t detect the file itself. if line.find("Copyright " + "(c)") != -1: return True # break pattern into two lines to avoid false-negative check spattern1 = "Copyright" if line.find(spattern1) != -1 and line.find("by") != -1: return True return False def add_header(fname, header): """Add header to file""" if not os.path.exists(fname): print("Cannot find %s ..." % fname) return lines = open(fname).readlines() has_asf_header = False has_copyright = False for i, l in enumerate(lines): if l.find("Licensed to the Apache Software Foundation") != -1: has_asf_header = True elif copyright_line(l): has_copyright = True lines[i] = "" if has_asf_header and not has_copyright: print("Skip file %s ..." % fname) return with open(fname, "w") as outfile: skipline = False ext = os.path.splitext(fname)[1][1:] if not lines: skipline = False # File is enpty elif lines[0][:2] == "#!": skipline = True elif lines[0][:2] == "<?": skipline = True elif lines[0].startswith("<html>"): skipline = True elif lines[0].startswith("// !$"): skipline = True if skipline: outfile.write(lines[0]) if not has_asf_header: outfile.write(header + "\n\n") outfile.write("".join(lines[1:])) else: if not has_asf_header: outfile.write(header + "\n\n") outfile.write("".join(lines)) if not has_asf_header: print("Add header to %s" % fname) if has_copyright: print("Removed copyright line from %s" % fname) def main(args): if len(args) != 2: print("Usage: python add_asf_header.py <file_list>") for l in open(args[1]): if l.startswith("---"): continue if l.find("File:") != -1: l = l.split(":")[-1] fname = l.strip() if len(fname) == 0: continue suffix = fname.split(".")[-1] if suffix in FMT_MAP: add_header(fname, FMT_MAP[suffix]) elif os.path.basename(fname) == "gradle.properties": add_header(fname, FMT_MAP["h"]) else: print("Cannot handle %s ..." % fname) if __name__ == "__main__": main(sys.argv)
https://github.com/zk-ml/tachikoma
tests/lint/check_cmake_options.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import argparse import re from pathlib import Path REPO_ROOT = Path(__file__).resolve().parent.parent.parent LIBINFO_CC = REPO_ROOT / "src" / "support" / "libinfo.cc" LIBINFO_CMAKE = REPO_ROOT / "cmake" / "modules" / "LibInfo.cmake" CMAKELISTS = REPO_ROOT / "CMakeLists.txt" if __name__ == "__main__": parser = argparse.ArgumentParser( description="Check that CMake options are mirrored to libinfo.cc" ) with open(CMAKELISTS) as f: cmake = f.readlines() with open(LIBINFO_CC) as f: libinfo = f.read() with open(LIBINFO_CMAKE) as f: libinfo_cmake = f.read() # Read tvm_options from CMakeLists.txt options = [] for line in cmake: m = re.search(r"tvm_option\((.*?) ", line) if m is not None: options.append(m.groups()[0]) # Check that each option is present in libinfo.cc missing_lines = [] for option in options: expected_line = f' {{"{option}", TVM_INFO_{option}}},' if expected_line not in libinfo: missing_lines.append(expected_line) error = False if len(missing_lines) > 0: missing_lines = "\n".join(missing_lines) print( f"Missing these lines from {LIBINFO_CC.relative_to(REPO_ROOT)}, please update it\n{missing_lines}" ) error = True # Check that each option has a compile defintion in LibInfo.cmake missing_cmake_lines = [] for option in options: expected_line = f' TVM_INFO_{option}="${{{option}}}"' if expected_line not in libinfo_cmake: missing_cmake_lines.append(expected_line) if len(missing_cmake_lines) > 0: missing_cmake_lines = "\n".join(missing_cmake_lines) print( f"Missing these lines from {LIBINFO_CMAKE.relative_to(REPO_ROOT)}, please update it\n{missing_cmake_lines}" ) error = True if error: exit(1)
https://github.com/zk-ml/tachikoma
tests/lint/check_file_type.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Helper tool to check file types that are allowed to checkin.""" import os import sys import subprocess # List of file types we allow ALLOW_EXTENSION = { # source code "cc", "c", "h", "s", "rs", "m", "mm", "g4", "gradle", "js", "tcl", "scala", "java", "go", "ts", "sh", "py", "pyi", "pxi", "pyd", "pyx", "cu", "bat", # relay text format "rly", # configurations "mk", "in", "cmake", "xml", "toml", "yml", "yaml", "json", # docs "txt", "md", "rst", # sgx "edl", "lds", # ios "pbxproj", "plist", "xcworkspacedata", "storyboard", "xcscheme", # hw/chisel "sbt", "properties", "v", "sdc", # generated parser "interp", "tokens", # interface definition "idl", # opencl file "cl", # zephyr config file "conf", # arduino sketch file "ino", # linker scripts "ld", # Jinja2 templates "j2", } # List of file names allowed ALLOW_FILE_NAME = { ".gitignore", ".eslintignore", ".gitattributes", "README", "Makefile", "Doxyfile", "pylintrc", "condarc", "rat-excludes", "log4j.properties", ".clang-format", ".gitmodules", "CODEOWNERSHIP", ".scalafmt.conf", "Cargo.lock", "with_the_same_user", } # List of specific files allowed in relpath to <proj_root> ALLOW_SPECIFIC_FILE = { "LICENSE", "NOTICE", "KEYS", "DISCLAIMER", "Jenkinsfile", "mypy.ini", # cargo config "rust/runtime/tests/test_wasm32/.cargo/config", "rust/tvm-graph-rt/tests/test_wasm32/.cargo/config", "apps/sgx/.cargo/config", "apps/wasm-standalone/wasm-graph/.cargo/config", # html for demo purposes "web/apps/browser/rpc_server.html", # images are normally not allowed # discuss with committers before add more images "apps/android_rpc/app/src/main/res/mipmap-hdpi/ic_launcher.png", "apps/android_rpc/app/src/main/res/mipmap-mdpi/ic_launcher.png", # documentation related files "docs/_static/css/tvm_theme.css", "docs/_static/img/tvm-logo-small.png", "docs/_static/img/tvm-logo-square.png", # pytest config "pytest.ini", # microTVM tests "tests/micro/testdata/mnist/digit-2.jpg", "tests/micro/testdata/mnist/digit-9.jpg", "tests/micro/testdata/mnist/mnist-8.onnx", # microTVM Zephyr runtime "apps/microtvm/zephyr/template_project/CMakeLists.txt.template", "apps/microtvm/zephyr/template_project/qemu-hack/qemu-system-arm", "apps/microtvm/zephyr/template_project/qemu-hack/qemu-system-xilinx-aarch64", "apps/microtvm/zephyr/template_project/qemu-hack/qemu-system-i386", "apps/microtvm/zephyr/template_project/qemu-hack/qemu-system-riscv32", "apps/microtvm/zephyr/template_project/qemu-hack/qemu-system-riscv64", "apps/microtvm/zephyr/template_project/fvp-hack/FVP_Corstone_SSE-300_Ethos-U55", "apps/microtvm/zephyr/template_project/app-overlay/nucleo_l4r5zi.overlay", # microTVM Arduino runtime "apps/microtvm/arduino/template_project/Makefile.template", # microTVM CRT "src/runtime/crt/host/Makefile.template", # microTVM Virtual Machines "apps/microtvm/poetry.lock", "apps/microtvm/reference-vm/Vagrantfile", "apps/microtvm/reference-vm/base-box/Vagrantfile.packer-template", # Hexagon "src/runtime/hexagon/rpc/android_bash.sh.template", "src/runtime/hexagon/profiler/lwp_handler.S", } def filename_allowed(name): """Check if name is allowed by the current policy. Paramaters ---------- name : str Input name Returns ------- allowed : bool Whether the filename is allowed. """ arr = name.rsplit(".", 1) if arr[-1] in ALLOW_EXTENSION: return True if os.path.basename(name) in ALLOW_FILE_NAME: return True if os.path.basename(name).startswith("Dockerfile"): return True if name.startswith("3rdparty"): return True if name in ALLOW_SPECIFIC_FILE: return True return False def copyright_line(line): # Following two items are intentionally break apart # so that the copyright detector won't detect the file itself. if line.find("Copyright " + "(c)") != -1: return True # break pattern into two lines to avoid false-negative check spattern1 = "Copyright" if line.find(spattern1) != -1 and line.find("by") != -1: return True return False def check_asf_copyright(fname): if fname.endswith(".png"): return True if not os.path.isfile(fname): return True has_asf_header = False has_copyright = False try: for line in open(fname): if line.find("Licensed to the Apache Software Foundation") != -1: has_asf_header = True if copyright_line(line): has_copyright = True if has_asf_header and has_copyright: return False except UnicodeDecodeError: pass return True def main(): cmd = ["git", "ls-files"] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) (out, _) = proc.communicate() assert proc.returncode == 0, f'{" ".join(cmd)} errored: {out}' res = out.decode("utf-8") flist = res.split() error_list = [] for fname in flist: if not filename_allowed(fname): error_list.append(fname) if error_list: report = "------File type check report----\n" report += "\n".join(error_list) report += "\nFound %d files that are not allowed\n" % len(error_list) report += ( "We do not check in binary files into the repo.\n" "If necessary, please discuss with committers and" "modify tests/lint/check_file_type.py to enable the file you need.\n" ) sys.stderr.write(report) sys.stderr.flush() sys.exit(-1) asf_copyright_list = [] for fname in res.split(): if not check_asf_copyright(fname): asf_copyright_list.append(fname) if asf_copyright_list: report = "------File type check report----\n" report += "\n".join(asf_copyright_list) + "\n" report += "------Found %d files that has ASF header with copyright message----\n" % len( asf_copyright_list ) report += "--- Files with ASF header do not need Copyright lines.\n" report += "--- Contributors retain copyright to their contribution by default.\n" report += "--- If a file comes with a different license, consider put it under the 3rdparty folder instead.\n" report += "---\n" report += "--- You can use the following steps to remove the copyright lines\n" report += "--- Create file_list.txt in your text editor\n" report += "--- Copy paste the above content in file-list into file_list.txt\n" report += "--- python3 tests/lint/add_asf_header.py file_list.txt\n" sys.stderr.write(report) sys.stderr.flush() sys.exit(-1) print("check_file_type.py: all checks passed..") if __name__ == "__main__": main()
https://github.com/zk-ml/tachikoma
tests/lint/check_request_hook.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import argparse import fnmatch import re from pathlib import Path from typing import List, Optional REPO_ROOT = Path(__file__).resolve().parent.parent.parent EXPECTED = """ # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore """.rstrip() IGNORE_PATTERNS = ["*/micro_tvmc.py", "*/micro_train.py"] APACHE_HEADER_LINES = 16 def find_code_block_line(lines: List[str]) -> Optional[int]: """ This returns the index in 'lines' of the first line of code in the tutorial or none if there are no code blocks. """ in_multiline_string = False in_sphinx_directive = False i = 0 lines = lines[APACHE_HEADER_LINES:] while i < len(lines): line = lines[i].strip() if '"""' in line: in_multiline_string = not in_multiline_string elif "# sphinx_gallery_" in line: in_sphinx_directive = not in_sphinx_directive elif line.startswith("#") or in_sphinx_directive or in_multiline_string or line == "": pass else: return i i += 1 return None if __name__ == "__main__": parser = argparse.ArgumentParser( description="Check that all tutorials/docs override urllib.request.Request" ) parser.add_argument( "--fix", action="store_true", help="Insert expected code into erroring files" ) args = parser.parse_args() gallery_files = (REPO_ROOT / "gallery").glob("**/*.py") # gallery_files = [x for x in gallery_files if "cross_compi" in str(x)] errors = [] for file in gallery_files: skip = False for ignored_file in IGNORE_PATTERNS: if fnmatch.fnmatch(str(file), ignored_file): skip = True break if skip: continue with open(file) as f: content = f.read() if EXPECTED not in content: errors.append((file, None)) continue index = content.index(EXPECTED) line = content.count("\n", 0, index) + EXPECTED.count("\n") + 2 expected = find_code_block_line(content.split("\n")) if expected is not None and line < expected: errors.append((file, (line, expected))) if args.fix: for error, line_info in errors: with open(error) as f: content = f.read() # Note: There must be a little bit of care taken here since inserting # the block between a comment and multiline string will lead to an # empty code block in the HTML output if "from __future__" in content: # Place after the last __future__ import new_content = re.sub( r"((?:from __future__.*?\n)+)", r"\1\n" + EXPECTED, content, flags=re.MULTILINE ) else: # Place in the first codeblock lines = content.split("\n") position = find_code_block_line(lines) if position is None: new_content = "\n".join(lines) + EXPECTED + "\n" else: print(position) new_content = ( "\n".join(lines[:position]) + EXPECTED + "\n\n" + "\n".join(lines[position:]) ) with open(error, "w") as f: f.write(new_content) else: # Don't fix, just check and print an error message if len(errors) > 0: print( f"These {len(errors)} file(s) did not contain the expected text to " "override urllib.request.Request, it was at the wrong position, or " "the whitespace is incorrect.\n" "You can run 'python3 tests/lint/check_request_hook.py --fix' to " "automatically fix these errors:\n" f"{EXPECTED}\n\nFiles:" ) for file, line_info in errors: if line_info is None: print(f"{file} (missing hook)") else: actual, expected = line_info print(f"{file} (misplaced hook at {actual}, expected at {expected})") exit(1) else: print("All files successfully override urllib.request.Request") exit(0)
https://github.com/zk-ml/tachikoma
tests/lint/filter_untracked.py
#!/usr/bin/env python3 # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os.path import subprocess import sys def check_output(args, **kw): proc = subprocess.Popen(args, **kw, stdout=subprocess.PIPE) out, _ = proc.communicate() if proc.returncode: sys.stderr.write("exited with code %d: %s\n" % (proc.returncode, " ".join(args))) sys.exit(2) if sys.version_info[0] == 2: return unicode(out, "utf-8") else: return str(out, "utf-8") def main(): script_dir = os.path.dirname(__file__) or os.getcwd() toplevel_dir = check_output(["git", "rev-parse", "--show-toplevel"], cwd=script_dir).strip("\n") # NOTE: --ignore-submodules because this can drag in some problems related to mounting a git # worktree in the docker VM in a different location than it exists on the host. The problem # isn't quite clear, but anyhow it shouldn't be necessary to filter untracked files in # submodules here. git_status_output = check_output(["git", "status", "-s", "--ignored"], cwd=toplevel_dir) untracked = [ line[3:] for line in git_status_output.split("\n") if line.startswith("?? ") or line.startswith("!! ") ] # also add .git in case rat picks up files in .git or the .git file (if a worktree). toplevel_git_dentry = os.path.join(toplevel_dir, ".git") if os.path.isfile(toplevel_git_dentry): untracked.append(".git") else: untracked.append(".git/") for line in sys.stdin: cleaned_line = line if line[:2] == "./": cleaned_line = line[2:] cleaned_line = cleaned_line.strip("\n") if any( (cleaned_line.startswith(u) if u[-1] == "/" else cleaned_line == u) for u in untracked ): continue sys.stdout.write(line) if __name__ == "__main__": main()
https://github.com/zk-ml/tachikoma
tests/lint/trailing_newlines.py
#!/usr/bin/env python3 # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import fileinput import os def has_one_trailing_newline(filename: str) -> bool: """ Returns True if 'filename' has a single trailing newline """ with open(filename, "rb") as f: start_bytes = len(f.read(2)) if start_bytes == 0: # empty file return True elif start_bytes == 1: # 1 byte file return False else: # skip to the end f.seek(-2, os.SEEK_END) end_bytes = f.read(2) # should be a non-newline followed by a newline return end_bytes[0] != ord("\n") and end_bytes[1] == ord("\n") if __name__ == "__main__": exit_code = 1 for line in fileinput.input(): filename = line.rstrip() if not has_one_trailing_newline(filename): exit_code = 0 print(filename) exit(exit_code)
https://github.com/zk-ml/tachikoma
tests/micro/__init__.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License.
https://github.com/zk-ml/tachikoma
tests/micro/arduino/conftest.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. pytest_plugins = [ "tvm.micro.testing.pytest_plugin", ] import pytest def pytest_addoption(parser): parser.addoption( "--arduino-cli-cmd", default="arduino-cli", help="Path to `arduino-cli` command for flashing device.", ) def pytest_configure(config): config.addinivalue_line( "markers", "requires_hardware: mark test to run only when an Arduino board is connected" ) @pytest.fixture(scope="session") def arduino_cli_cmd(request): return request.config.getoption("--arduino-cli-cmd")
https://github.com/zk-ml/tachikoma
tests/micro/arduino/test_arduino_error_detection.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import sys import pytest from tvm.micro.project_api.server import ServerError import test_utils import tvm.testing @pytest.fixture def project(board, arduino_cli_cmd, microtvm_debug, workspace_dir): return test_utils.make_kws_project(board, arduino_cli_cmd, microtvm_debug, workspace_dir) def test_blank_project_compiles(workspace_dir, project): project.build() # Add a bug (an extra curly brace) and make sure the project doesn't compile def test_bugged_project_compile_fails(workspace_dir, project): with open(workspace_dir / "project" / "project.ino", "a") as main_file: main_file.write("}\n") with pytest.raises(ServerError): project.build() if __name__ == "__main__": tvm.testing.main()
https://github.com/zk-ml/tachikoma
tests/micro/arduino/test_arduino_rpc_server.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ This unit test simulates an autotuning workflow, where we: 1. Instantiate the Arduino RPC server project 2. Build and flash that project onto our target board """ import pathlib import sys import numpy as np import onnx import pytest import tvm import tvm.testing from PIL import Image from tvm import relay from tvm.relay.testing import byoc from tvm.relay.backend import Executor, Runtime import test_utils def _make_session(model, arduino_board, arduino_cli_cmd, workspace_dir, mod, build_config): project = tvm.micro.generate_project( str(test_utils.TEMPLATE_PROJECT_DIR), mod, workspace_dir / "project", { "board": arduino_board, "arduino_cli_cmd": arduino_cli_cmd, "project_type": "host_driven", "verbose": bool(build_config.get("debug")), }, ) project.build() project.flash() return tvm.micro.Session(project.transport()) def _make_sess_from_op( model, arduino_board, arduino_cli_cmd, workspace_dir, op_name, sched, arg_bufs, build_config ): target = tvm.target.target.micro(model) runtime = Runtime("crt", {"system-lib": True}) with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): mod = tvm.build(sched, arg_bufs, target=target, runtime=runtime, name=op_name) return _make_session(model, arduino_board, arduino_cli_cmd, workspace_dir, mod, build_config) def _make_add_sess(model, arduino_board, arduino_cli_cmd, workspace_dir, build_config): A = tvm.te.placeholder((2,), dtype="int8") B = tvm.te.placeholder((1,), dtype="int8") C = tvm.te.compute(A.shape, lambda i: A[i] + B[0], name="C") sched = tvm.te.create_schedule(C.op) return _make_sess_from_op( model, arduino_board, arduino_cli_cmd, workspace_dir, "add", sched, [A, B, C], build_config ) # The same test code can be executed on both the QEMU simulation and on real hardware. @tvm.testing.requires_micro @pytest.mark.requires_hardware def test_compile_runtime(board, arduino_cli_cmd, microtvm_debug, workspace_dir): """Test compiling the on-device runtime.""" model = test_utils.ARDUINO_BOARDS[board] build_config = {"debug": microtvm_debug} # NOTE: run test in a nested function so cPython will delete arrays before closing the session. def test_basic_add(sess): A_data = tvm.nd.array(np.array([2, 3], dtype="int8"), device=sess.device) assert (A_data.numpy() == np.array([2, 3])).all() B_data = tvm.nd.array(np.array([4], dtype="int8"), device=sess.device) assert (B_data.numpy() == np.array([4])).all() C_data = tvm.nd.array(np.array([0, 0], dtype="int8"), device=sess.device) assert (C_data.numpy() == np.array([0, 0])).all() system_lib = sess.get_system_lib() system_lib.get_function("add")(A_data, B_data, C_data) assert (C_data.numpy() == np.array([6, 7])).all() with _make_add_sess(model, board, arduino_cli_cmd, workspace_dir, build_config) as sess: test_basic_add(sess) @tvm.testing.requires_micro @pytest.mark.requires_hardware def test_platform_timer(board, arduino_cli_cmd, microtvm_debug, workspace_dir): """Test compiling the on-device runtime.""" model = test_utils.ARDUINO_BOARDS[board] build_config = {"debug": microtvm_debug} # NOTE: run test in a nested function so cPython will delete arrays before closing the session. def test_basic_add(sess): A_data = tvm.nd.array(np.array([2, 3], dtype="int8"), device=sess.device) assert (A_data.numpy() == np.array([2, 3])).all() B_data = tvm.nd.array(np.array([4], dtype="int8"), device=sess.device) assert (B_data.numpy() == np.array([4])).all() C_data = tvm.nd.array(np.array([0, 0], dtype="int8"), device=sess.device) assert (C_data.numpy() == np.array([0, 0])).all() system_lib = sess.get_system_lib() time_eval_f = system_lib.time_evaluator( "add", sess.device, number=20, repeat=3, min_repeat_ms=40 ) result = time_eval_f(A_data, B_data, C_data) assert (C_data.numpy() == np.array([6, 7])).all() assert result.mean > 0 assert len(result.results) == 3 with _make_add_sess(model, board, arduino_cli_cmd, workspace_dir, build_config) as sess: test_basic_add(sess) @tvm.testing.requires_micro @pytest.mark.requires_hardware def test_relay(board, arduino_cli_cmd, microtvm_debug, workspace_dir): """Testing a simple relay graph""" model = test_utils.ARDUINO_BOARDS[board] build_config = {"debug": microtvm_debug} shape = (10,) dtype = "int8" # Construct Relay program. x = relay.var("x", relay.TensorType(shape=shape, dtype=dtype)) xx = relay.multiply(x, x) z = relay.add(xx, relay.const(np.ones(shape=shape, dtype=dtype))) func = relay.Function([x], z) target = tvm.target.target.micro(model) runtime = Runtime("crt", {"system-lib": True}) with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): mod = tvm.relay.build(func, target=target, runtime=runtime) with _make_session(model, board, arduino_cli_cmd, workspace_dir, mod, build_config) as session: graph_mod = tvm.micro.create_local_graph_executor( mod.get_graph_json(), session.get_system_lib(), session.device ) graph_mod.set_input(**mod.get_params()) x_in = np.random.randint(10, size=shape[0], dtype=dtype) graph_mod.run(x=x_in) result = graph_mod.get_output(0).numpy() tvm.testing.assert_allclose(graph_mod.get_input(0).numpy(), x_in) tvm.testing.assert_allclose(result, x_in * x_in + 1) @tvm.testing.requires_micro @pytest.mark.requires_hardware def test_onnx(board, arduino_cli_cmd, microtvm_debug, workspace_dir): """Testing a simple ONNX model.""" model = test_utils.ARDUINO_BOARDS[board] build_config = {"debug": microtvm_debug} # Load test images. this_dir = pathlib.Path(__file__).parent mnist_testdata = this_dir.parent / "testdata" / "mnist" digit_2 = Image.open(mnist_testdata / "digit-2.jpg").resize((28, 28)) digit_2 = np.asarray(digit_2).astype("float32") digit_2 = np.expand_dims(digit_2, axis=0) digit_9 = Image.open(mnist_testdata / "digit-9.jpg").resize((28, 28)) digit_9 = np.asarray(digit_9).astype("float32") digit_9 = np.expand_dims(digit_9, axis=0) # Load ONNX model and convert to Relay. onnx_model = onnx.load(mnist_testdata / "mnist-8.onnx") shape = {"Input3": (1, 1, 28, 28)} relay_mod, params = relay.frontend.from_onnx(onnx_model, shape=shape, freeze_params=True) relay_mod = relay.transform.DynamicToStatic()(relay_mod) target = tvm.target.target.micro(model) runtime = Runtime("crt", {"system-lib": True}) with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): executor = Executor("graph", {"link-params": True}) lowered = relay.build(relay_mod, target, params=params, executor=executor, runtime=runtime) graph = lowered.get_graph_json() with _make_session( model, board, arduino_cli_cmd, workspace_dir, lowered, build_config ) as session: graph_mod = tvm.micro.create_local_graph_executor( graph, session.get_system_lib(), session.device ) # Send the digit-2 image and confirm that the correct result is returned. graph_mod.set_input("Input3", tvm.nd.array(digit_2)) graph_mod.run() result = graph_mod.get_output(0).numpy() print(result) assert np.argmax(result) == 2 # Send the digit-9 image and confirm that the correct result is returned. graph_mod.set_input("Input3", tvm.nd.array(digit_9)) graph_mod.run() result = graph_mod.get_output(0).numpy() assert np.argmax(result) == 9 def check_result( relay_mod, model, arduino_board, arduino_cli_cmd, workspace_dir, map_inputs, out_shape, result, build_config, ): """Helper function to verify results""" TOL = 1e-5 target = tvm.target.target.micro(model) runtime = Runtime("crt", {"system-lib": True}) with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): mod = tvm.relay.build(relay_mod, target=target, runtime=runtime) with _make_session( model, arduino_board, arduino_cli_cmd, workspace_dir, mod, build_config ) as session: rt_mod = tvm.micro.create_local_graph_executor( mod.get_graph_json(), session.get_system_lib(), session.device ) rt_mod.set_input(**mod.get_params()) for name, data in map_inputs.items(): rt_mod.set_input(name, data) rt_mod.set_input(**mod.get_params()) rt_mod.run() out_shapes = out_shape if isinstance(out_shape, list) else [out_shape] results = result if isinstance(result, list) else [result] for idx, shape in enumerate(out_shapes): out = tvm.nd.empty(shape, device=session.device) out = rt_mod.get_output(idx, out) tvm.testing.assert_allclose(out.numpy(), results[idx], rtol=TOL, atol=TOL) @tvm.testing.requires_micro @pytest.mark.requires_hardware def test_byoc_microtvm(board, arduino_cli_cmd, microtvm_debug, workspace_dir): """This is a simple test case to check BYOC capabilities of microTVM""" model = test_utils.ARDUINO_BOARDS[board] build_config = {"debug": microtvm_debug} x = relay.var("x", shape=(10, 10)) w0 = relay.var("w0", shape=(10, 10)) w1 = relay.var("w1", shape=(10, 10)) w2 = relay.var("w2", shape=(10, 10)) w3 = relay.var("w3", shape=(10, 10)) w4 = relay.var("w4", shape=(10, 10)) w5 = relay.var("w5", shape=(10, 10)) w6 = relay.var("w6", shape=(10, 10)) w7 = relay.var("w7", shape=(10, 10)) # C compiler z0 = relay.add(x, w0) p0 = relay.subtract(z0, w1) q0 = relay.multiply(p0, w2) z1 = relay.add(x, w3) p1 = relay.subtract(z1, w4) q1 = relay.multiply(p1, w5) # Other parts on TVM z2 = relay.add(x, w6) q2 = relay.subtract(z2, w7) r = relay.concatenate((q0, q1, q2), axis=0) f = relay.Function([x, w0, w1, w2, w3, w4, w5, w6, w7], r) mod = tvm.IRModule() ann = byoc.CcompilerAnnotator() mod["main"] = ann.visit(f) mod = tvm.relay.transform.PartitionGraph()(mod) mod = tvm.relay.transform.InferType()(mod) x_data = np.random.rand(10, 10).astype("float32") w_data = [] for _ in range(8): w_data.append(np.random.rand(10, 10).astype("float32")) map_inputs = {"w{}".format(i): w_data[i] for i in range(8)} map_inputs["x"] = x_data check_result( relay_mod=mod, map_inputs=map_inputs, out_shape=(30, 10), result=np.concatenate( ( ((x_data + w_data[0]) - w_data[1]) * w_data[2], ((x_data + w_data[3]) - w_data[4]) * w_data[5], x_data + w_data[6] - w_data[7], ), axis=0, ), model=model, build_config=build_config, arduino_board=board, arduino_cli_cmd=arduino_cli_cmd, workspace_dir=workspace_dir, ) def _make_add_sess_with_shape( model, arduino_board, arduino_cli_cmd, workspace_dir, shape, build_config ): A = tvm.te.placeholder(shape, dtype="int8") C = tvm.te.compute(A.shape, lambda i: A[i] + A[i], name="C") sched = tvm.te.create_schedule(C.op) return _make_sess_from_op( model, arduino_board, arduino_cli_cmd, workspace_dir, "add", sched, [A, C], build_config ) @pytest.mark.parametrize( "shape,", [ pytest.param((1 * 1024,), id="(1*1024)"), pytest.param((4 * 1024,), id="(4*1024)"), pytest.param((16 * 1024,), id="(16*1024)"), ], ) @tvm.testing.requires_micro @pytest.mark.requires_hardware def test_rpc_large_array(board, arduino_cli_cmd, microtvm_debug, workspace_dir, shape): """Test large RPC array transfer.""" model = test_utils.ARDUINO_BOARDS[board] build_config = {"debug": microtvm_debug} # NOTE: run test in a nested function so cPython will delete arrays before closing the session. def test_tensors(sess): a_np = np.random.randint(low=-128, high=127, size=shape, dtype="int8") A_data = tvm.nd.array(a_np, device=sess.device) assert (A_data.numpy() == a_np).all() C_data = tvm.nd.array(np.zeros(shape, dtype="int8"), device=sess.device) assert (C_data.numpy() == np.zeros(shape)).all() with _make_add_sess_with_shape( model, board, arduino_cli_cmd, workspace_dir, shape, build_config ) as sess: test_tensors(sess) if __name__ == "__main__": tvm.testing.main()
https://github.com/zk-ml/tachikoma
tests/micro/arduino/test_arduino_workflow.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pathlib import re import shutil import sys import pytest import tvm.testing import test_utils """ This unit test simulates a simple user workflow, where we: 1. Generate a base sketch using a simple audio model 2. Modify the .ino file, much like a user would 3. Compile the sketch for the target board -- If physical hardware is present -- 4. Upload the sketch to a connected board 5. Open a serial connection to the board 6. Use serial connection to ensure model behaves correctly """ # Since these tests are sequential, we'll use the same project/workspace # directory for all tests in this file. Note that --board can't be loaded # from the fixture, since the fixture is function scoped (it has to be # for the tests to be named correctly via parameterization). @pytest.fixture(scope="module") def workflow_workspace_dir(request): board = request.config.getoption("--board") return test_utils.make_workspace_dir("arduino_workflow", board) @pytest.fixture(scope="module") def project_dir(workflow_workspace_dir): return workflow_workspace_dir / "project" # We MUST pass workspace_dir, not project_dir, or the workspace will be dereferenced # too soon. We can't use the board fixture either for the reason mentioned above. @pytest.fixture(scope="module") def project(request, arduino_cli_cmd, microtvm_debug, workflow_workspace_dir): board = request.config.getoption("--board") return test_utils.make_kws_project( board, arduino_cli_cmd, microtvm_debug, workflow_workspace_dir ) def _get_directory_elements(directory): return set(f.name for f in directory.iterdir()) def test_project_folder_structure(project_dir, project): assert set(["microtvm_api_server.py", "project.ino", "src"]).issubset( _get_directory_elements(project_dir) ) source_dir = project_dir / "src" assert _get_directory_elements(source_dir) == set( ["model", "standalone_crt", "model.c", "model.h"] ) def test_project_model_integrity(project_dir, project): model_dir = project_dir / "src" / "model" assert _get_directory_elements(model_dir) == set( ["default_lib0.c", "default_lib1.c", "default_lib2.c", "model.tar"] ) def test_model_header_templating(project_dir, project): # Ensure model.h was templated with correct WORKSPACE_SIZE with (project_dir / "src" / "model.h").open() as f: model_h = f.read() workspace_size_defs = re.findall(r"\#define WORKSPACE_SIZE ([0-9]*)", model_h) assert workspace_size_defs assert len(workspace_size_defs) == 1 # Make sure the WORKSPACE_SIZE we define is a reasonable size. We don't want # to set an exact value, as this test shouldn't break if an improvement to # TVM causes the amount of memory needed to decrease. workspace_size = int(workspace_size_defs[0]) assert workspace_size < 30000 assert workspace_size > 10000 def test_import_rerouting(project_dir, project): # Check one file to ensure imports were rerouted runtime_path = project_dir / "src" / "standalone_crt" / "src" / "runtime" c_backend_api_path = runtime_path / "crt" / "common" / "crt_backend_api.c" assert c_backend_api_path.exists() with c_backend_api_path.open() as f: c_backend_api_c = f.read() assert '#include "inttypes.h"' in c_backend_api_c assert "include/tvm/runtime/crt/platform.h" in c_backend_api_c # Build on top of the generated project by replacing the # top-level .ino fileand adding data input files, much # like a user would @pytest.fixture(scope="module") def modified_project(project_dir, project): this_dir = pathlib.Path(__file__).parent kws_testdata_dir = this_dir.parent / "testdata" / "kws" arduino_testdata_dir = this_dir / "testdata" shutil.copy2(arduino_testdata_dir / "project.ino", project_dir / "project.ino") project_data_dir = project_dir / "src" / "data" project_data_dir.mkdir() for sample in ["yes.c", "no.c", "silence.c", "unknown.c"]: shutil.copy2(kws_testdata_dir / sample, project_data_dir / sample) return project @pytest.fixture(scope="module") def compiled_project(modified_project): modified_project.build() return modified_project def test_compile_yes_no_project(project_dir, project, compiled_project): build_dir = project_dir / "build" assert build_dir.exists() first_build_file = next(build_dir.iterdir(), None) assert first_build_file is not None """------------------------------------------------------------ If we're not running on real hardware, no further tests are run ------------------------------------------------------------""" @pytest.fixture(scope="module") def uploaded_project(compiled_project): compiled_project.flash() return compiled_project """ Sample serial output: category,runtime,yes,no,silence,unknown yes,56762,115,-123,-125,-123, no,56762,-128,4,-123,-9, silence,56792,-128,-118,107,-117, unknown,56792,-128,-125,-128,125, """ SERIAL_OUTPUT_HEADERS = "category,runtime,yes,no,silence,unknown" @pytest.fixture(scope="module") def serial_output(uploaded_project): transport = uploaded_project.transport() transport.open() out = transport.read(2048, 60) out_str = out.decode("utf-8") out_lines = out_str.split("\r\n") assert SERIAL_OUTPUT_HEADERS in out_lines headers_index = out_lines.index(SERIAL_OUTPUT_HEADERS) data_lines = out_lines[headers_index + 1 : headers_index + 5] split_lines = [line.split(",") for line in data_lines] return [[line[0]] + list(map(int, line[1:6])) for line in split_lines] TENSORFLOW_EVALUATIONS = { "yes": [115, -123, -125, -123], "no": [-128, 4, -123, -9], "silence": [-128, -118, 107, -117], "unknown": [-128, -125, -128, 125], } MAX_PREDICTION_DIFFERENCE = 2 @pytest.mark.requires_hardware def test_project_inference_correctness(serial_output): predictions = {line[0]: line[2:] for line in serial_output} for sample, prediction in predictions.items(): # Due to rounding issues, we don't get the *exact* same # values as Tensorflow gives, but they're pretty close reference_prediction = TENSORFLOW_EVALUATIONS[sample] deltas = [prediction[i] - reference_prediction[i] for i in range(4)] assert max(deltas) < MAX_PREDICTION_DIFFERENCE MAX_INFERENCE_TIME_US = 200 * 1000 MAX_INFERENCE_TIME_RANGE_US = 1000 @pytest.mark.requires_hardware def test_project_inference_runtime(serial_output): runtimes_us = [line[1] for line in serial_output] # Inference time will vary based on architecture # and clock speed. However, anything more than 200 ms # is way too long. Each inference takes ~60 ms on the # Sony spresense, running at 156 MHz assert max(runtimes_us) < MAX_INFERENCE_TIME_US # Clock speeds should be consistent for each input. On # the Sony spresense, they vary by <100 us. Note that # running with other attached hardware (like the # Spresense extension board) may cause this check to fail range_runtimes_us = max(runtimes_us) - min(runtimes_us) assert range_runtimes_us < MAX_INFERENCE_TIME_RANGE_US if __name__ == "__main__": tvm.testing.main()
https://github.com/zk-ml/tachikoma
tests/micro/arduino/test_utils.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import json import pathlib import requests import datetime import tvm.micro import tvm.target.target from tvm.micro import project from tvm import relay from tvm.relay.backend import Executor, Runtime from tvm.testing.utils import fetch_model_from_url TEMPLATE_PROJECT_DIR = pathlib.Path(tvm.micro.get_microtvm_template_projects("arduino")) BOARDS = TEMPLATE_PROJECT_DIR / "boards.json" def arduino_boards() -> dict: """Returns a dict mapping board to target model""" with open(BOARDS) as f: board_properties = json.load(f) boards_model = {board: info["model"] for board, info in board_properties.items()} return boards_model ARDUINO_BOARDS = arduino_boards() def make_workspace_dir(test_name, board): filepath = pathlib.Path(__file__) board_workspace = ( filepath.parent / f"workspace_{test_name}_{board}" / datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S") ) number = 0 while board_workspace.exists(): number += 1 board_workspace = pathlib.Path(str(board_workspace) + f"-{number}") board_workspace.parent.mkdir(exist_ok=True, parents=True) t = tvm.contrib.utils.tempdir(board_workspace) return t def make_kws_project(board, arduino_cli_cmd, microtvm_debug, workspace_dir): this_dir = pathlib.Path(__file__).parent model = ARDUINO_BOARDS[board] build_config = {"debug": microtvm_debug} mod, params = fetch_model_from_url( url="https://github.com/tensorflow/tflite-micro/raw/main/tensorflow/lite/micro/examples/micro_speech/micro_speech.tflite", model_format="tflite", sha256="09e5e2a9dfb2d8ed78802bf18ce297bff54281a66ca18e0c23d69ca14f822a83", ) target = tvm.target.target.micro(model) runtime = Runtime("crt") executor = Executor("aot", {"unpacked-api": True}) with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): mod = relay.build(mod, target, runtime=runtime, executor=executor, params=params) return tvm.micro.generate_project( str(TEMPLATE_PROJECT_DIR), mod, workspace_dir / "project", { "board": board, "arduino_cli_cmd": arduino_cli_cmd, "project_type": "example_project", "verbose": bool(build_config.get("debug")), }, )
https://github.com/zk-ml/tachikoma
tests/micro/common/__init__.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License.
https://github.com/zk-ml/tachikoma
tests/micro/common/conftest.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations pytest_plugins = [ "tvm.micro.testing.pytest_plugin", ]
https://github.com/zk-ml/tachikoma
tests/micro/common/test_autotune.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from io import StringIO import json import numpy as np import pytest import tvm import tvm.testing import tvm.micro.testing from tvm.testing.utils import fetch_model_from_url TUNING_RUNS_PER_OPERATOR = 2 @pytest.mark.requires_hardware @tvm.testing.requires_micro def test_kws_autotune_workflow(platform, board, tmp_path): mod, params = fetch_model_from_url( url="https://github.com/tensorflow/tflite-micro/raw/main/tensorflow/lite/micro/examples/micro_speech/micro_speech.tflite", model_format="tflite", sha256="09e5e2a9dfb2d8ed78802bf18ce297bff54281a66ca18e0c23d69ca14f822a83", ) target = tvm.micro.testing.get_target(platform, board) str_io_logs = tvm.micro.testing.tune_model( platform, board, target, mod, params, TUNING_RUNS_PER_OPERATOR ) assert isinstance(str_io_logs, StringIO) str_logs = str_io_logs.getvalue().rstrip().split("\n") logs = list(map(json.loads, str_logs)) # Some tuning tasks don't have any config space, and will only be run once with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): tasks = tvm.autotvm.task.extract_from_program(mod["main"], {}, target) assert len(tasks) <= len(logs) <= len(tasks) * TUNING_RUNS_PER_OPERATOR # Check we tested both operators op_names = list(map(lambda x: x["input"][1], logs)) assert op_names[0] == op_names[1] == "conv2d_nhwc_spatial_pack.arm_cpu" # Make sure we tested different code. != does deep comparison in Python 3 assert logs[0]["config"]["index"] != logs[1]["config"]["index"] assert logs[0]["config"]["entity"] != logs[1]["config"]["entity"] # Compile the best model with AOT and connect to it str_io_logs.seek(0) with tvm.micro.testing.create_aot_session( platform, board, target, mod, params, build_dir=tmp_path, tune_logs=str_io_logs, ) as session: aot_executor = tvm.runtime.executor.aot_executor.AotModule(session.create_aot_executor()) samples = ( np.random.randint(low=-127, high=128, size=(1, 1960), dtype=np.int8) for x in range(3) ) # Validate perforance across random runs runtimes = [ runtime for _, runtime in tvm.micro.testing.predict_labels_aot( session, aot_executor, samples, runs_per_sample=20 ) ] # `time` is the average time taken to execute model inference on the # device, measured in seconds. It does not include the time to upload # the input data via RPC. On slow boards like the Arduino Due, time # is around 0.12 (120 ms), so this gives us plenty of buffer. assert np.median(runtimes) < 1 if __name__ == "__main__": tvm.testing.main()
https://github.com/zk-ml/tachikoma
tests/micro/common/test_tvmc.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest import subprocess import shlex import sys import logging import tempfile import pathlib import sys import os import shutil import tvm import tvm.testing from tvm.contrib.download import download_testdata TVMC_COMMAND = [sys.executable, "-m", "tvm.driver.tvmc"] MODEL_URL = "https://github.com/tensorflow/tflite-micro/raw/main/tensorflow/lite/micro/examples/micro_speech/micro_speech.tflite" MODEL_FILE = "micro_speech.tflite" # TODO(mehrdadh): replace this with _main from tvm.driver.tvmc.main # Issue: https://github.com/apache/tvm/issues/9612 def _run_tvmc(cmd_args: list, *args, **kwargs): """Run a tvmc command and return the results""" cmd_args_list = TVMC_COMMAND + cmd_args cwd_str = "" if "cwd" not in kwargs else f" (in cwd: {kwargs['cwd']})" logging.debug("run%s: %s", cwd_str, " ".join(shlex.quote(a) for a in cmd_args_list)) return subprocess.check_call(cmd_args_list, *args, **kwargs) @tvm.testing.requires_micro def test_tvmc_exist(platform, board): cmd_result = _run_tvmc(["micro", "-h"]) assert cmd_result == 0 @tvm.testing.requires_micro @pytest.mark.parametrize( "output_dir,", [pathlib.Path("./tvmc_relative_path_test"), pathlib.Path(tempfile.mkdtemp())], ) def test_tvmc_model_build_only(platform, board, output_dir): target = tvm.micro.testing.get_target(platform, board) if not os.path.isabs(output_dir): out_dir_temp = os.path.abspath(output_dir) if os.path.isdir(out_dir_temp): shutil.rmtree(out_dir_temp) os.mkdir(out_dir_temp) model_path = download_testdata(MODEL_URL, MODEL_FILE, module="data") tar_path = str(output_dir / "model.tar") project_dir = str(output_dir / "project") runtime = "crt" executor = "graph" cmd_result = _run_tvmc( [ "compile", model_path, f"--target={target}", f"--runtime={runtime}", f"--runtime-crt-system-lib", str(1), f"--executor={executor}", "--executor-graph-link-params", str(0), "--output", tar_path, "--output-format", "mlf", "--pass-config", "tir.disable_vectorize=1", "--disabled-pass=AlterOpLayout", ] ) assert cmd_result == 0, "tvmc failed in step: compile" create_project_cmd = [ "micro", "create-project", project_dir, tar_path, platform, "--project-option", "project_type=host_driven", f"board={board}", ] cmd_result = _run_tvmc(create_project_cmd) assert cmd_result == 0, "tvmc micro failed in step: create-project" build_cmd = ["micro", "build", project_dir, platform] cmd_result = _run_tvmc(build_cmd) assert cmd_result == 0, "tvmc micro failed in step: build" shutil.rmtree(output_dir) @pytest.mark.requires_hardware @tvm.testing.requires_micro @pytest.mark.parametrize( "output_dir,", [pathlib.Path("./tvmc_relative_path_test"), pathlib.Path(tempfile.mkdtemp())], ) def test_tvmc_model_run(platform, board, output_dir): target = tvm.micro.testing.get_target(platform, board) if not os.path.isabs(output_dir): out_dir_temp = os.path.abspath(output_dir) if os.path.isdir(out_dir_temp): shutil.rmtree(out_dir_temp) os.mkdir(out_dir_temp) model_path = model_path = download_testdata(MODEL_URL, MODEL_FILE, module="data") tar_path = str(output_dir / "model.tar") project_dir = str(output_dir / "project") runtime = "crt" executor = "graph" cmd_result = _run_tvmc( [ "compile", model_path, f"--target={target}", f"--runtime={runtime}", f"--runtime-crt-system-lib", str(1), f"--executor={executor}", "--executor-graph-link-params", str(0), "--output", tar_path, "--output-format", "mlf", "--pass-config", "tir.disable_vectorize=1", "--disabled-pass=AlterOpLayout", ] ) assert cmd_result == 0, "tvmc failed in step: compile" create_project_cmd = [ "micro", "create-project", project_dir, tar_path, platform, "--project-option", "project_type=host_driven", f"board={board}", ] cmd_result = _run_tvmc(create_project_cmd) assert cmd_result == 0, "tvmc micro failed in step: create-project" build_cmd = ["micro", "build", project_dir, platform] cmd_result = _run_tvmc(build_cmd) assert cmd_result == 0, "tvmc micro failed in step: build" flash_cmd = ["micro", "flash", project_dir, platform] cmd_result = _run_tvmc(flash_cmd) assert cmd_result == 0, "tvmc micro failed in step: flash" run_cmd = [ "run", "--device", "micro", project_dir, ] run_cmd += ["--fill-mode", "random"] cmd_result = _run_tvmc(run_cmd) assert cmd_result == 0, "tvmc micro failed in step: run" shutil.rmtree(output_dir) if __name__ == "__main__": tvm.testing.main()
https://github.com/zk-ml/tachikoma
tests/micro/project_api/test_arduino_microtvm_api_server.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import subprocess import sys from pathlib import Path from unittest import mock from packaging import version import pytest import tvm from tvm.micro.project_api import server sys.path.insert(0, tvm.micro.get_microtvm_template_projects("arduino")) import microtvm_api_server sys.path.pop(0) class TestGenerateProject: DEFAULT_OPTIONS = {"arduino_cli_cmd": "arduino-cli", "board": "nano33ble"} def _set_pathlib_path_exists(self, value): with mock.patch.object(Path, "exists") as mock_exists: mock_exists.return_value = value @mock.patch("pathlib.Path") def test_find_modified_include_path(self, mock_pathlib_path): handler = microtvm_api_server.Handler() project_dir = mock_pathlib_path("/dummy/project") file_path = ( project_dir / "src" / "standalone_crt" / "src" / "runtime" / "crt" / "graph_executor" / "load_json.c" ) # Should return C standard libs unmodified clib_output = handler._find_modified_include_path(project_dir, file_path, "math.h") assert clib_output == "math.h" # If import already works, should return unmodified valid_arduino_import = "../../../../include/tvm/runtime/crt/platform.h" self._set_pathlib_path_exists(True) valid_output = handler._find_modified_include_path( project_dir, file_path, valid_arduino_import ) assert valid_output == valid_arduino_import # Format for arduino-cli v0.18.2 BOARD_CONNECTED_V18 = ( "Port Type Board Name FQBN Core \n" "/dev/ttyACM0 Serial Port (USB) Arduino Nano 33 BLE arduino:mbed_nano:nano33ble arduino:mbed_nano\n" "/dev/ttyACM1 Serial Port (USB) Arduino Nano 33 arduino:mbed_nano:nano33 arduino:mbed_nano\n" "/dev/ttyS4 Serial Port Unknown \n" "\n" ) # Format for arduino-cli v0.21.1 and above BOARD_CONNECTED_V21 = ( "Port Protocol Type Board Name FQBN Core \n" "/dev/ttyACM0 serial arduino:mbed_nano:nano33ble arduino:mbed_nano\n" "\n" ) BOARD_DISCONNECTED_V21 = ( "Port Protocol Type Board Name FQBN Core\n" "/dev/ttyS4 serial Serial Port Unknown\n" "\n" ) def test_parse_connected_boards(self): h = microtvm_api_server.Handler() boards = h._parse_connected_boards(self.BOARD_CONNECTED_V21) assert list(boards) == [ { "port": "/dev/ttyACM0", "protocol": "serial", "type": "", "board name": "", "fqbn": "arduino:mbed_nano:nano33ble", "core": "arduino:mbed_nano", } ] @mock.patch("subprocess.run") def test_auto_detect_port(self, mock_run): process_mock = mock.Mock() handler = microtvm_api_server.Handler() # Test it returns the correct port when a board is connected mock_run.return_value.stdout = bytes(self.BOARD_CONNECTED_V18, "utf-8") assert handler._auto_detect_port(self.DEFAULT_OPTIONS) == "/dev/ttyACM0" # Should work with old or new arduino-cli version mock_run.return_value.stdout = bytes(self.BOARD_CONNECTED_V21, "utf-8") assert handler._auto_detect_port(self.DEFAULT_OPTIONS) == "/dev/ttyACM0" # Test it raises an exception when no board is connected mock_run.return_value.stdout = bytes(self.BOARD_DISCONNECTED_V21, "utf-8") with pytest.raises(microtvm_api_server.BoardAutodetectFailed): handler._auto_detect_port(self.DEFAULT_OPTIONS) # Test that the FQBN needs to match EXACTLY handler._get_fqbn = mock.MagicMock(return_value="arduino:mbed_nano:nano33") mock_run.return_value.stdout = bytes(self.BOARD_CONNECTED_V18, "utf-8") assert ( handler._auto_detect_port({**self.DEFAULT_OPTIONS, "board": "nano33"}) == "/dev/ttyACM1" ) BAD_CLI_VERSION = "arduino-cli Version: 0.7.1 Commit: 7668c465 Date: 2019-12-31T18:24:32Z\n" GOOD_CLI_VERSION = "arduino-cli Version: 0.21.1 Commit: 9fcbb392 Date: 2022-02-24T15:41:45Z\n" @mock.patch("subprocess.run") def test_auto_detect_port(self, mock_run): handler = microtvm_api_server.Handler() mock_run.return_value.stdout = bytes(self.GOOD_CLI_VERSION, "utf-8") arduino_cli_cmd = self.DEFAULT_OPTIONS.get("arduino_cli_cmd") warning_as_error = self.DEFAULT_OPTIONS.get("warning_as_error") cli_command = handler._get_arduino_cli_cmd(arduino_cli_cmd) handler._check_platform_version(cli_command=cli_command, warning_as_error=warning_as_error) assert handler._version == version.parse("0.21.1") handler = microtvm_api_server.Handler() mock_run.return_value.stdout = bytes(self.BAD_CLI_VERSION, "utf-8") with pytest.raises(server.ServerError) as error: handler._check_platform_version(cli_command=cli_command, warning_as_error=True) mock_run.reset_mock() @mock.patch("subprocess.run") def test_flash_retry(self, mock_run): mock_run.return_value.stdout = bytes(self.GOOD_CLI_VERSION, "utf-8") def side_effect(cmd, *args, **kwargs): if cmd[1] == "flash": raise subprocess.TimeoutExpired(cmd, kwargs["timeout"]) return mock.DEFAULT mock_run.side_effect = side_effect handler = microtvm_api_server.Handler() handler._port = "/dev/ttyACM0" # handler.flash will try flashing `handler.FLASH_MAX_RETRIES` times, # after which it will raise a TimeoutExpired exception of its own with pytest.raises(RuntimeError): handler.flash(self.DEFAULT_OPTIONS) # Test we checked version then called upload once per retry attempt, # plus once to verify arduino-cli version. assert mock_run.call_count == handler.FLASH_MAX_RETRIES + 1 @mock.patch("subprocess.run") def test_flash(self, mock_run): mock_run.return_value.stdout = bytes(self.GOOD_CLI_VERSION, "utf-8") handler = microtvm_api_server.Handler() handler._port = "/dev/ttyACM0" # Test no exception thrown when command works handler.flash(self.DEFAULT_OPTIONS) # Test we checked version then called upload assert mock_run.call_count == 2 assert mock_run.call_args_list[0][0] == (["arduino-cli", "version"],) assert mock_run.call_args_list[1][0][0][0:2] == ["make", "flash"] mock_run.reset_mock() # Test exception raised when `arduino-cli upload` returns error code mock_run.side_effect = subprocess.CalledProcessError(2, []) with pytest.raises(subprocess.CalledProcessError): handler.flash(self.DEFAULT_OPTIONS) # Version information should be cached and not checked again mock_run.assert_called_once() assert mock_run.call_args[0][0][0:2] == ["make", "flash"] if __name__ == "__main__": tvm.testing.main()
https://github.com/zk-ml/tachikoma
tests/micro/project_api/test_project_api.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import sys import numpy as np import tvm from tvm import relay from tvm.micro.project_api import server from tvm.relay.backend import Runtime from tvm.micro.testing import get_target API_GENERATE_PROJECT = "generate_project" API_BUILD = "build" API_FLASH = "flash" API_OPEN_TRANSPORT = "open_transport" PLATFORM_ARDUINO = "arduino" PLATFORM_ZEPHYR = "zephyr" platform = tvm.testing.parameter(PLATFORM_ARDUINO, PLATFORM_ZEPHYR) @tvm.testing.requires_micro def test_default_options_exist(platform): sys.path.insert(0, tvm.micro.get_microtvm_template_projects(platform)) import microtvm_api_server platform_options = microtvm_api_server.PROJECT_OPTIONS default_options = server.default_project_options() option_names = [] for option in platform_options: option_names.append(option.name) for option in default_options: assert option.name in option_names @tvm.testing.requires_micro def test_project_minimal_options(platform): """Test template project with minimum projectOptions""" shape = (10,) dtype = "int8" x = relay.var("x", relay.TensorType(shape=shape, dtype=dtype)) xx = relay.multiply(x, x) z = relay.add(xx, relay.const(np.ones(shape=shape, dtype=dtype))) func = relay.Function([x], z) ir_mod = tvm.IRModule.from_expr(func) if platform == "arduino": board = "due" elif platform == "zephyr": board = "qemu_x86" runtime = Runtime("crt", {"system-lib": True}) target = get_target(platform, board) with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): mod = tvm.relay.build(ir_mod, target=target, runtime=runtime) project_options = { "project_type": "host_driven", "board": board, } temp_dir = tvm.contrib.utils.tempdir() project = tvm.micro.generate_project( tvm.micro.get_microtvm_template_projects(platform), mod, temp_dir / "project", project_options, ) project.build() if __name__ == "__main__": tvm.testing.main()
https://github.com/zk-ml/tachikoma
tests/micro/stm32/conftest.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest import tvm.target.target
https://github.com/zk-ml/tachikoma
tests/micro/stm32/test_code_emitter.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os import shutil import struct import sys import numpy as np import tensorflow as tf import tvm import tvm.relay as relay from tvm.micro.contrib import stm32 from tvm.contrib.download import download from tvm import testing import conftest NUM_ITERATIONS = 10 # ========================================================= # get_data # ========================================================= def get_data(in_data_shapes, in_data_dtypes): """Generate a uint8 image.""" assert len(in_data_shapes) == 1, "Only single input models are supported." in_data = OrderedDict() for shape_name, shape in in_data_shapes.items(): for dtype_name, dtype in in_data_dtypes.items(): if dtype_name == shape_name: in_data[shape_name] = np.random.uniform(size=shape).astype(dtype) in_data = np.random.uniform(size=shape).astype("uint8") break if shape_name not in in_data.keys(): raise ValueError("Shape and dtype dictionaries do not fit.") return in_data # ================================================================== # dump_image # ================================================================== def dump_image(filename, image): # Flatten image image_data = image.flatten() outputRaw = [] # Raw binary format for i in range(0, len(image_data)): outputRaw.append(struct.pack("<B", int(image_data[i]) & 0xFF)) # Dump image in raw binary format f = open(filename, "wb") for i in range(0, len(outputRaw)): f.write(outputRaw[i]) f.close() # ================================================================== # scale_input_data # ================================================================== def scale_input_data(input_details, data): if input_details["dtype"] == np.uint8 or input_details["dtype"] == np.int8: input_scale, input_zero_point = input_details["quantization"] print( "== TFLite input quantization: scale={}, zero={}".format(input_scale, input_zero_point) ) data = data / input_scale + input_zero_point data = data.astype(input_details["dtype"]) return data # ================================================================== # scale_output_data # ================================================================== def scale_output_data(output_details, data): if output_details["dtype"] == np.uint8 or output_details["dtype"] == np.int8: output_scale, output_zero_point = output_details["quantization"] print( "== TFLite output quantization: scale={}, zero={}".format( output_scale, output_zero_point ) ) data = data.astype(np.float32) data = (data - output_zero_point) * output_scale return data # ======================================================== # get_tflite_model # ======================================================== def get_tflite_model(model_path): # # Load TFLite model and allocate tensors. # interpreter = tf.lite.Interpreter(model_path=model_path) interpreter.allocate_tensors() # # Get input and output tensors. # input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() # # Figure out shapes and # shape_dict = {} dtype_dict = {} for input in input_details: input_name = input["name"] input_shape = input["shape"].tolist() input_dtype = str(np.dtype(input["dtype"])) shape_dict[input_name] = input_shape dtype_dict[input_name] = input_dtype # # Save the model # # # Load the TFLite Model for TVM: # # https://docs.tvm.ai/tutorials/frontend/from_tflite.html # https://jackwish.net/tflite/docs/ model_buf = open(model_path, "rb").read() # # Get TFLite model from buffer # try: import tflite model = tflite.Model.GetRootAsModel(model_buf, 0) assert isinstance(model, tflite.Model) except AttributeError: import tflite.Model model = tflite.Model.Model.GetRootAsModel(model_buf, 0) assert isinstance(model, tflite.Model.Model) print("TVM: Importing a TFLite model ...") return model, shape_dict, dtype_dict # ======================================================== # extract_tflite_quantization # ======================================================== def _make_qnn_params(quantization): qnn_params = {} qnn_params["min"] = quantization.MinAsNumpy() qnn_params["max"] = quantization.MaxAsNumpy() qnn_params["scale"] = quantization.ScaleAsNumpy() qnn_params["zero_point"] = quantization.ZeroPointAsNumpy() qnn_params["dim"] = quantization.QuantizedDimension() # print(" Quantization: ({}, {}), s={}, z={}, dim={}".format(min, max, scale, zero_point, dim)) return qnn_params def extract_tflite_quantization(model): assert model.SubgraphsLength() == 1, "only support one subgraph (main subgraph)" subgraph = model.Subgraphs(0) quantization_info = {} # model inputs / outputs model_inputs = subgraph.InputsAsNumpy() model_outputs = subgraph.OutputsAsNumpy() for node_id in model_inputs: tensor = subgraph.Tensors(node_id) tensor_name = tensor.Name().decode("utf-8") tensor_type = tensor.Type() dl_tensor_name = stm32.get_input_tensor_name(tensor_name) quantization = tensor.Quantization() if quantization is not None: qnn_params = _make_qnn_params(quantization) quantization_info[dl_tensor_name] = qnn_params for node_id in model_outputs: tensor = subgraph.Tensors(node_id) tensor_name = tensor.Name().decode("utf-8") tensor_type = tensor.Type() # # TODO: TVM does not preserve the output tensor names. # Eventually, we should be able to form a valid name. # dl_tensor_name = stm32.get_output_tensor_name(tensor_name, 0) quantization = tensor.Quantization() if quantization is not None: qnn_params = _make_qnn_params(quantization) quantization_info[dl_tensor_name] = qnn_params return quantization_info # ======================================================== # run_tflite_model # ======================================================== def run_tflite_model(model_path, image_data): # # Load TFLite model and allocate tensors. # interpreter = tf.lite.Interpreter(model_path=model_path) interpreter.allocate_tensors() # # Get input and output tensors. # input_details = interpreter.get_input_details()[0] output_details = interpreter.get_output_details()[0] # # Run test images # tf_results = np.empty(shape=[NUM_ITERATIONS, 10], dtype=np.float) for i, image in enumerate(image_data): # # Normalize the input data # image = image / 255.0 image = scale_input_data(input_details, image) interpreter.set_tensor(input_details["index"], image) interpreter.invoke() tf_results[i] = interpreter.get_tensor(output_details["index"]) tf_results[i] = scale_output_data(output_details, tf_results[i]) print(f"== [{i}] TFLite Output:") print(tf_results[i]) return tf_results # ======================================================== # run_tvm_model # ======================================================== def run_tvm_model(build_dir, model_name, target_dir, image_path): curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) tvm_results_name = os.path.join(build_dir, "tvm_results.txt") # # Build the model # tvm_dir = os.path.join(curr_path, "..", "..", "..") test_dir = os.path.join(tvm_dir, "tests", "crt", "contrib", "stm32") command = f"make -f {test_dir}/Makefile TVM_PATH={tvm_dir} MODEL_PATH={target_dir} BUILD_PATH={build_dir} IMAGE_PATH={image_path}" print(f"{command}") os.system(command) # # Run # command = f"{target_dir}/{model_name}.exe" print(f"{command}") os.system(command) tvm_results = np.loadtxt(tvm_results_name) print(f"== TVM Output:\n {tvm_results}") # # Clean temporary image files # if os.path.exists(tvm_results_name): os.remove(tvm_results_name) return tvm_results # ======================================================== # check_network # ======================================================== def check_network(build_dir, target_name, model_path, image_path): model_name = "network" model, shape_dict, dtype_dict = get_tflite_model(model_path) # # Generate random input data # image_data = [] for i in range(NUM_ITERATIONS): assert len(shape_dict) == 1, "Only single input models are supported." image_shape = list(shape_dict.values())[0] in_data = np.random.randint(0, 255, size=image_shape).astype("uint8") # Write raw data for using with the TVM implementation filename = os.path.join(image_path, "{:02d}.raw".format(i)) dump_image(filename, in_data) image_data.append(in_data) mod, params = relay.frontend.from_tflite(model, shape_dict, dtype_dict) # # Build a TVM C module for the ARM CPU (without compiling the kernels # library to the object code form): # with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): rt_module = relay.build(mod, target="c -device=arm_cpu", params=params) # # Export model library format # target_dir = os.path.join(build_dir, target_name + "_gen") if os.path.exists(target_dir): print(f'Removing existing "{target_dir}" directory') try: shutil.rmtree(target_dir) except OSError as err: raise ValueError(f"emit_code.Error: {target_dir} : {err.strerror}") mlf_tar_path = os.path.join(build_dir, target_name + "_lib.tar") import tvm.micro as micro micro.export_model_library_format(rt_module, mlf_tar_path) emitter = stm32.CodeEmitter() quantization = extract_tflite_quantization(model) emitter.parse_library_format(mlf_tar_path, quantization) emitter.emit_code(target_dir, model_name) # # Results # tf_results = run_tflite_model(model_path, image_data) tvm_results = run_tvm_model(build_dir, model_name, target_dir, image_path) check_result(tf_results, tvm_results) # ======================================================== # check_result # ======================================================== def check_result(tflite_results, tvm_results): """Helper function to verify results""" # # MNIST quantized uint8 results in one single difference of # ~ 0.004 so just escape this # ATOL = 1e-3 RTOL = 0.5 tvm.testing.assert_allclose(tflite_results, tvm_results, rtol=RTOL, atol=ATOL) # ======================================================== # test_mnist # ======================================================== def test_mnist(): DEBUG = False tempdir_root = None if DEBUG: tempdir_root = os.path.join( curr_path, f"workspace", "test_mnist", datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S"), ) curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) model_path = os.path.join(curr_path, "models/mnist.tflite") build_dir = tvm.contrib.utils.tempdir(tempdir_root) model_url = "https://storage.googleapis.com/download.tensorflow.org/models/tflite/digit_classifier/mnist.tflite" download(model_url, model_path) check_network(build_dir.path, "mnist", model_path, build_dir.path) if __name__ == "__main__": sys.exit(pytest.main([os.path.dirname(__file__)] + sys.argv[1:]))
https://github.com/zk-ml/tachikoma
tests/micro/testdata/kws/no.c
/* * This work is a derivative of "Speech Commands V2" by Google, used under CC BY 4.0. */ static const char input_no[1960] = { 0x80, 0x80, 0x80, 0xc5, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xc5, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xc5, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xb4, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xcf, 0xe4, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xdb, 0xe4, 0xc5, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x2f, 0x1e, 0x7, 0xe4, 0xc5, 0xb4, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x52, 0x41, 0x4b, 0x3a, 0x20, 0xf6, 0xcf, 0xb4, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xb4, 0x80, 0x80, 0xb4, 0x80, 0x80, 0x80, 0xc5, 0xb4, 0x80, 0x80, 0x80, 0xb4, 0x80, 0x80, 0x62, 0x53, 0x5d, 0x51, 0x4a, 0xf9, 0xe4, 0xb4, 0xc5, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xb4, 0x80, 0x80, 0x80, 0xc5, 0x80, 0x80, 0x80, 0xc5, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xb4, 0xc5, 0x80, 0xcf, 0x80, 0x41, 0x49, 0x6a, 0x5d, 0x75, 0x62, 0x75, 0x63, 0x7a, 0x65, 0x7b, 0x64, 0x78, 0x62, 0x75, 0x5d, 0x71, 0x5b, 0x37, 0xd, 0x3, 0xf6, 0xec, 0xd6, 0x32, 0x2a, 0x1a, 0xf6, 0x42, 0x4b, 0x3f, 0xe0, 0xe4, 0xcf, 0xf3, 0xef, 0xf3, 0xfb, 0x3, 0x0, 0x6d, 0x56, 0x6e, 0x57, 0x69, 0x55, 0x72, 0x5d, 0x66, 0x52, 0x6e, 0x5d, 0x6f, 0x46, 0x64, 0x52, 0x62, 0x42, 0x4e, 0x29, 0x32, 0xe, 0x25, 0x35, 0x56, 0x49, 0x4d, 0x42, 0x5d, 0x57, 0x61, 0x34, 0x1c, 0x5, 0x20, 0x17, 0x17, 0x17, 0x24, 0x20, 0x76, 0x65, 0x7a, 0x63, 0x7b, 0x65, 0x7b, 0x5d, 0x70, 0x53, 0x73, 0x61, 0x70, 0x53, 0x66, 0x57, 0x63, 0x52, 0x5c, 0x3a, 0x54, 0x4d, 0x6b, 0x5f, 0x78, 0x66, 0x7a, 0x64, 0x7b, 0x64, 0x75, 0x56, 0x5a, 0x46, 0x4b, 0x3d, 0x46, 0x3e, 0x4e, 0x3f, 0x68, 0x58, 0x6e, 0x57, 0x6d, 0x5f, 0x76, 0x5a, 0x6e, 0x57, 0x75, 0x5d, 0x67, 0x53, 0x68, 0x50, 0x67, 0x53, 0x6c, 0x59, 0x68, 0x5a, 0x6a, 0x53, 0x65, 0x5a, 0x74, 0x56, 0x6d, 0x5c, 0x6b, 0x4a, 0x50, 0x46, 0x58, 0x48, 0x66, 0x56, 0x59, 0x46, 0x5e, 0x43, 0x61, 0x44, 0x61, 0x50, 0x6e, 0x55, 0x67, 0x5a, 0x63, 0x4e, 0x5f, 0x3b, 0x63, 0x52, 0x5e, 0x4e, 0x67, 0x4d, 0x62, 0x51, 0x6a, 0x4e, 0x62, 0x48, 0x69, 0x55, 0x66, 0x50, 0x62, 0x50, 0x59, 0x40, 0x4c, 0x41, 0x6c, 0x55, 0x5a, 0x3f, 0x58, 0x3c, 0x5b, 0x28, 0x50, 0x3d, 0x62, 0x4b, 0x5b, 0x55, 0x62, 0x43, 0x5d, 0x3c, 0x50, 0x37, 0x55, 0x2d, 0x55, 0x49, 0x59, 0x48, 0x53, 0x3e, 0x53, 0x46, 0x64, 0x53, 0x61, 0x3f, 0x5e, 0x2e, 0x4d, 0x39, 0x4e, 0x41, 0x61, 0x4a, 0x53, 0x36, 0x52, 0x35, 0x55, 0x2a, 0x4f, 0x3a, 0x5a, 0x3e, 0x55, 0x4f, 0x5e, 0x37, 0x4d, 0x34, 0x4c, 0x37, 0x4e, 0x28, 0x50, 0x36, 0x53, 0x39, 0x49, 0x2b, 0x4f, 0x39, 0x5c, 0x47, 0x51, 0x35, 0x5d, 0x1b, 0x3f, 0x2b, 0x46, 0x3b, 0x5d, 0x44, 0x5a, 0x35, 0x4d, 0x35, 0x4e, 0x30, 0x4b, 0x3f, 0x57, 0x35, 0x59, 0x3f, 0x45, 0xd, 0x2b, 0x4, 0x45, 0x26, 0x48, 0x36, 0x47, 0x26, 0x44, 0x39, 0x50, 0x2e, 0x46, 0x2f, 0x55, 0x43, 0x4c, 0x23, 0x52, 0x2f, 0x3f, 0x25, 0x43, 0x2d, 0x3b, 0xf9, 0x4d, 0x29, 0x44, 0x1b, 0x35, 0x38, 0x48, 0x3a, 0x46, 0x3c, 0x5d, 0x29, 0x43, 0x5, 0x4a, 0xd, 0x26, 0xb4, 0x28, 0xcf, 0x3c, 0x13, 0x25, 0x2, 0x32, 0xf9, 0x2f, 0x1e, 0x4d, 0x19, 0x3a, 0x2, 0x3c, 0x7, 0x3c, 0x12, 0x3c, 0x10, 0xdb, 0x80, 0x37, 0x24, 0x42, 0x21, 0x3a, 0x30, 0x4a, 0x28, 0x32, 0x31, 0x48, 0xe7, 0x2d, 0x80, 0x19, 0xf9, 0x2d, 0xf3, 0x32, 0x2, 0x24, 0xb4, 0x14, 0x80, 0x22, 0xb4, 0x35, 0x3, 0x40, 0xf, 0x30, 0x80, 0x26, 0x80, 0x26, 0xcf, 0x21, 0x80, 0x80, 0x80, 0xf5, 0xef, 0x28, 0x80, 0x4b, 0x34, 0x3c, 0xdb, 0x34, 0x12, 0x44, 0xe0, 0x26, 0x80, 0x1d, 0x80, 0xd6, 0x80, 0x21, 0xe4, 0x80, 0x80, 0xb4, 0x80, 0xf6, 0x11, 0x2b, 0xff, 0x3e, 0x16, 0x1f, 0x80, 0x21, 0xf6, 0x14, 0xd6, 0x27, 0xcf, 0x80, 0x80, 0x0, 0xec, 0x48, 0xd6, 0x3b, 0x0, 0x36, 0x1d, 0x28, 0xcf, 0x2d, 0xef, 0x25, 0x80, 0xcf, 0x80, 0xf5, 0x80, 0xa, 0x80, 0x11, 0x80, 0x80, 0x80, 0xf8, 0xe4, 0x10, 0xea, 0x2a, 0xf1, 0x21, 0x80, 0xcf, 0x80, 0x3, 0xe7, 0x1a, 0xb4, 0x80, 0x80, 0xe0, 0xdb, 0x31, 0xe0, 0x32, 0xc, 0x30, 0x80, 0x0, 0xc5, 0x34, 0x80, 0x2, 0x80, 0xf1, 0x80, 0xcf, 0x80, 0xb4, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x2, 0x80, 0x14, 0x80, 0xd6, 0x80, 0x80, 0x80, 0xfb, 0xdb, 0x8, 0x80, 0x80, 0x80, 0xe4, 0xe7, 0x28, 0xc5, 0x1e, 0xdb, 0x2a, 0xb4, 0x80, 0x80, 0x30, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xf8, 0xb4, 0x17, 0x80, 0xcf, 0x80, 0x80, 0x80, 0x0, 0xcf, 0x12, 0x80, 0x80, 0x80, 0xdb, 0xb4, 0xe4, 0x80, 0x21, 0xb4, 0x2a, 0x80, 0x80, 0x80, 0x13, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xf3, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xfd, 0x80, 0x80, 0x80, 0xe0, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xe4, 0x80, 0xb4, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80};
https://github.com/zk-ml/tachikoma
tests/micro/testdata/kws/silence.c
/* * This work is a derivative of "Speech Commands V2" by Google, used under CC BY 4.0. */ static const char input_silence[1960] = { 0x23, 0x17, 0xe0, 0x3, 0x9, 0xe7, 0xe7, 0xdb, 0xcf, 0xc5, 0xe0, 0xdb, 0xc5, 0xcf, 0xef, 0xcf, 0xcf, 0xdb, 0xef, 0xdb, 0xe7, 0xc5, 0x5, 0x3, 0xfc, 0xe7, 0xf6, 0xdb, 0xcf, 0xe7, 0x9, 0xef, 0xef, 0xdb, 0xcf, 0xe7, 0xe0, 0xe7, 0xe0, 0xc5, 0xff, 0xe0, 0x4, 0xcf, 0xdb, 0xb4, 0x80, 0xdb, 0xef, 0x80, 0xc5, 0xe4, 0x9, 0xe4, 0xcf, 0xc5, 0xdb, 0xcf, 0xdb, 0xcf, 0xf5, 0xdb, 0xe7, 0xcf, 0xef, 0xe4, 0xe7, 0xe4, 0xe7, 0xdb, 0xdb, 0xcf, 0xc5, 0xdb, 0xcf, 0xcf, 0xcf, 0xb4, 0xcf, 0xcf, 0x13, 0xef, 0xf5, 0x80, 0x80, 0x80, 0xc5, 0xcf, 0xcf, 0x80, 0x80, 0xcf, 0xf5, 0xcf, 0x80, 0x80, 0x80, 0x80, 0x80, 0xcf, 0xf9, 0xdb, 0xcf, 0x80, 0x80, 0xcf, 0xe7, 0xdb, 0xfb, 0xe4, 0xdb, 0xcf, 0xe7, 0xcf, 0xe7, 0xb4, 0xdb, 0xe4, 0xcf, 0xb4, 0xfb, 0x0, 0x6, 0xd6, 0xec, 0xb4, 0x80, 0xb4, 0x80, 0x80, 0x80, 0x80, 0xf3, 0xb4, 0xdb, 0xdb, 0xc5, 0xb4, 0xc5, 0x80, 0xcf, 0xb4, 0xdb, 0xb4, 0xb4, 0x80, 0xcf, 0x80, 0xdb, 0xb4, 0xb4, 0x80, 0xc5, 0x80, 0xdb, 0xcf, 0xdb, 0xcf, 0xcf, 0xb4, 0xff, 0xcf, 0xdb, 0x80, 0xb4, 0x80, 0x80, 0xd6, 0xcf, 0xcf, 0x80, 0xcf, 0xcf, 0xcf, 0xe4, 0xcf, 0xc5, 0x80, 0x80, 0x80, 0xdb, 0x80, 0xb4, 0x80, 0xdb, 0x80, 0xb4, 0x80, 0xb4, 0xb4, 0xdb, 0xcf, 0xec, 0xe0, 0xcf, 0xe0, 0xe4, 0xd6, 0xdb, 0x80, 0xef, 0xf6, 0xea, 0xd6, 0xb4, 0xd6, 0xec, 0xc5, 0xec, 0xcf, 0xc5, 0x80, 0xdb, 0x80, 0x80, 0x80, 0x80, 0xb4, 0xdb, 0xcf, 0xdb, 0xd6, 0xe4, 0xc5, 0xdb, 0xb4, 0xcf, 0xc5, 0xcf, 0xd6, 0xe4, 0xc5, 0xf3, 0xe0, 0xec, 0xe0, 0xfd, 0xe7, 0xcf, 0xb4, 0x24, 0x1a, 0x0, 0xf1, 0x19, 0xe0, 0xec, 0xe0, 0xb4, 0xcf, 0xdb, 0xd6, 0xb4, 0xb4, 0xb4, 0x80, 0xdb, 0x80, 0xdb, 0xc5, 0xf1, 0xe7, 0xea, 0xf8, 0xec, 0xc5, 0xe4, 0xe0, 0xec, 0xc5, 0xcf, 0xb4, 0xe4, 0xd6, 0xe4, 0xdb, 0xf1, 0xdb, 0xdb, 0xc5, 0x22, 0xea, 0xe7, 0x80, 0xea, 0xf3, 0xec, 0xfb, 0xec, 0xe0, 0xdb, 0xb4, 0xe4, 0xe0, 0xec, 0xd6, 0xf3, 0xb4, 0xb4, 0x80, 0xd6, 0xd6, 0xe4, 0xdb, 0xcf, 0xb4, 0xdb, 0xdb, 0xf1, 0xe4, 0xcf, 0xb4, 0xe4, 0xcf, 0xe4, 0xea, 0xea, 0xe4, 0xe4, 0xd6, 0xef, 0xb4, 0xc5, 0xc5, 0xd6, 0xc5, 0xe4, 0x80, 0x80, 0x80, 0xb4, 0x80, 0xcf, 0xc5, 0x0, 0xdb, 0xb4, 0xb4, 0xdb, 0x80, 0xb4, 0x80, 0x80, 0x80, 0xb4, 0x80, 0x80, 0x80, 0xb4, 0xc5, 0xcf, 0xb4, 0xcf, 0xcf, 0xe0, 0xcf, 0xcf, 0x80, 0xb4, 0x80, 0xec, 0xd6, 0xe0, 0xc5, 0xb4, 0xb4, 0xcf, 0x80, 0xcf, 0xb4, 0xcf, 0x80, 0xd6, 0xc5, 0x80, 0x80, 0xdb, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xcf, 0x80, 0x80, 0x80, 0xcf, 0xb4, 0xd6, 0xb4, 0xd6, 0xb4, 0xf1, 0xc5, 0xc5, 0x80, 0xb4, 0x80, 0x11, 0xc5, 0xb4, 0x80, 0x80, 0x80, 0xb4, 0x80, 0xb4, 0x80, 0x80, 0x80, 0xc5, 0xcf, 0xb4, 0x80, 0xe4, 0xb4, 0x80, 0xb4, 0x80, 0x80, 0x80, 0x80, 0xcf, 0x80, 0xb4, 0x80, 0x80, 0x80, 0xb4, 0xb4, 0xd6, 0xc5, 0xb4, 0x80, 0xc5, 0x80, 0xb4, 0x80, 0xcf, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xb4, 0xc5, 0xe4, 0xc5, 0xb4, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xef, 0x80, 0xc5, 0xb4, 0xc5, 0xc5, 0xc5, 0xcf, 0xd6, 0xc5, 0xf5, 0xb4, 0xcf, 0x80, 0xe4, 0xc5, 0xb4, 0xe0, 0xd6, 0xb4, 0xcf, 0x80, 0xb4, 0xc5, 0xcf, 0x80, 0xe0, 0xc5, 0xd6, 0x80, 0x80, 0x80, 0xb4, 0x80, 0x80, 0x80, 0xb4, 0xb4, 0xc5, 0x80, 0xd6, 0xb4, 0xe0, 0xb4, 0xb4, 0xc5, 0xc5, 0xb4, 0xc5, 0x80, 0xc5, 0xc5, 0xd6, 0x80, 0x80, 0x80, 0xf8, 0x80, 0x80, 0xb4, 0xd6, 0x80, 0xd6, 0xb4, 0xb4, 0x80, 0xb4, 0x80, 0x80, 0x80, 0x80, 0xb4, 0xcf, 0xcf, 0xe7, 0x80, 0xb4, 0x80, 0xc5, 0x80, 0xc5, 0x80, 0xb4, 0x80, 0xb4, 0xb4, 0xc5, 0x80, 0xb4, 0x80, 0xc5, 0x80, 0xe0, 0x80, 0xef, 0x80, 0xcf, 0x80, 0xb4, 0x80, 0x80, 0x80, 0x80, 0x80, 0xb4, 0xb4, 0xfd, 0xb4, 0x80, 0xb4, 0xe0, 0x80, 0xcf, 0xb4, 0xb4, 0x80, 0xe7, 0xb4, 0xe7, 0xb4, 0xb4, 0xd6, 0xb4, 0x80, 0xe0, 0xc5, 0x80, 0x80, 0xc5, 0xc5, 0xd6, 0x80, 0xc5, 0x80, 0xdb, 0xc5, 0xea, 0x80, 0x80, 0x80, 0xb4, 0x80, 0xb4, 0x80, 0xe0, 0x80, 0x80, 0x80, 0xc5, 0xb4, 0x80, 0x80, 0xd6, 0x80, 0xb4, 0x80, 0xb4, 0x80, 0x80, 0xb4, 0xb4, 0x80, 0x80, 0x80, 0x80, 0x80, 0xb4, 0x80, 0xe7, 0xb4, 0xc5, 0x80, 0xd6, 0x80, 0xe7, 0xc5, 0xdb, 0x80, 0xdb, 0xcf, 0xe0, 0x80, 0x80, 0x80, 0xc5, 0xb4, 0xdb, 0x80, 0xef, 0xc5, 0x80, 0x80, 0x80, 0x80, 0xc5, 0xb4, 0x80, 0x80, 0xb4, 0x80, 0x80, 0x80, 0xb4, 0x80, 0xd6, 0x80, 0xc5, 0xb4, 0xdb, 0x80, 0xb4, 0x80, 0x80, 0x80, 0xe0, 0x80, 0x80, 0xb4, 0xf6, 0xdb, 0xc5, 0x80, 0x80, 0x80, 0xc5, 0x80, 0x80, 0x80, 0xb4, 0x80, 0xc5, 0x80, 0xb4, 0xb4, 0xd6, 0xb4, 0xd6, 0x80, 0x80, 0xb4, 0xd6, 0xb4, 0x80, 0x80, 0xdb, 0xb4, 0xf3, 0xb4, 0xdb, 0x80, 0x80, 0x80, 0xc5, 0x80, 0x1d, 0xcf, 0x16, 0x12, 0x17, 0xc, 0x23, 0x2, 0x1, 0xc5, 0xc5, 0xb4, 0x80, 0x80, 0x80, 0x80, 0x80, 0xc5, 0xd6, 0xc5, 0xb4, 0xc5, 0xdb, 0x80, 0x80, 0x80, 0x80, 0x80, 0xb4, 0xb4, 0xdb, 0xc5, 0xe4, 0x80, 0xdb, 0x80, 0xc5, 0xb4, 0x80, 0x80, 0x78, 0x64, 0x7a, 0x64, 0x76, 0x60, 0x67, 0x55, 0x5a, 0x3a, 0x37, 0x24, 0xf6, 0xc5, 0x14, 0x17, 0x1e, 0x18, 0x31, 0x39, 0x44, 0x43, 0x49, 0x3e, 0x39, 0x23, 0x18, 0x17, 0x42, 0x41, 0x40, 0x34, 0x39, 0x34, 0x37, 0x30, 0x38, 0x23, 0x22, 0x9, 0x75, 0x63, 0x73, 0x63, 0x77, 0x58, 0x73, 0x5f, 0x64, 0x4d, 0x57, 0x41, 0x58, 0x46, 0x36, 0x32, 0x45, 0x51, 0x64, 0x56, 0x72, 0x61, 0x67, 0x57, 0x60, 0x52, 0x49, 0x4e, 0x61, 0x53, 0x62, 0x57, 0x67, 0x50, 0x66, 0x56, 0x63, 0x52, 0x5e, 0x3d, 0x6b, 0x5a, 0x70, 0x5d, 0x72, 0x50, 0x6c, 0x56, 0x67, 0x5a, 0x69, 0x49, 0x5a, 0x4f, 0x56, 0x50, 0x61, 0x50, 0x6c, 0x5d, 0x71, 0x5d, 0x6e, 0x56, 0x6c, 0x58, 0x69, 0x55, 0x6c, 0x57, 0x65, 0x57, 0x6c, 0x56, 0x68, 0x4c, 0x61, 0x58, 0x66, 0x44, 0x68, 0x52, 0x6b, 0x56, 0x6c, 0x60, 0x6e, 0x52, 0x72, 0x4e, 0x5b, 0x4d, 0x56, 0x4e, 0x68, 0x51, 0x69, 0x5a, 0x6a, 0x5a, 0x72, 0x54, 0x6f, 0x5d, 0x75, 0x5f, 0x67, 0x57, 0x65, 0x48, 0x5c, 0x4c, 0x66, 0x52, 0x68, 0x52, 0x63, 0x53, 0x64, 0x44, 0x5f, 0x44, 0x60, 0x49, 0x69, 0x60, 0x71, 0x51, 0x6c, 0x59, 0x6c, 0x53, 0x62, 0x4b, 0x5c, 0x4e, 0x61, 0x4c, 0x6a, 0x5c, 0x69, 0x4b, 0x6b, 0x56, 0x6b, 0x40, 0x5d, 0x43, 0x6c, 0x55, 0x60, 0x3f, 0x5f, 0x4d, 0x69, 0x52, 0x64, 0x4d, 0x64, 0x41, 0x59, 0x3b, 0x55, 0x35, 0x67, 0x55, 0x71, 0x5a, 0x69, 0x58, 0x65, 0x48, 0x5e, 0x4e, 0x6a, 0x55, 0x69, 0x55, 0x73, 0x5c, 0x68, 0x35, 0x64, 0x57, 0x6a, 0x43, 0x57, 0x42, 0x63, 0x4c, 0x71, 0x57, 0x60, 0x43, 0x5a, 0x44, 0x5c, 0x3e, 0x5d, 0x3e, 0x57, 0x31, 0x46, 0x7, 0x56, 0x4b, 0x73, 0x52, 0x64, 0x4b, 0x5b, 0x4a, 0x66, 0x4f, 0x69, 0x4d, 0x69, 0x56, 0x6e, 0x3e, 0x4b, 0x37, 0x5c, 0x44, 0x56, 0x24, 0x4f, 0x2a, 0x46, 0x3b, 0x61, 0x4e, 0x61, 0x43, 0x5d, 0x45, 0x5e, 0x44, 0x50, 0x3c, 0x56, 0x2d, 0x45, 0x4, 0x50, 0x40, 0x64, 0x57, 0x69, 0x4d, 0x64, 0x50, 0x62, 0x4e, 0x67, 0x4e, 0x62, 0x56, 0x67, 0x3c, 0x48, 0x23, 0x58, 0x43, 0x53, 0x28, 0x3b, 0xcf, 0x48, 0x48, 0x5c, 0x40, 0x4d, 0x37, 0x4e, 0x3c, 0x56, 0x20, 0x3d, 0x11, 0x37, 0xc5, 0x4a, 0xd6, 0x2d, 0x2b, 0x57, 0x4e, 0x5a, 0x44, 0x60, 0x43, 0x5a, 0x3f, 0x5c, 0x41, 0x67, 0x50, 0x60, 0x2f, 0x36, 0x1c, 0x54, 0x3e, 0x4f, 0xc, 0x2d, 0x80, 0x36, 0x22, 0x50, 0x41, 0x5f, 0x3e, 0x50, 0x3f, 0x5f, 0x3d, 0x46, 0x19, 0x41, 0xfd, 0x33, 0xd6, 0x25, 0x2, 0x40, 0x2f, 0x59, 0x3a, 0x4f, 0x3d, 0x47, 0x23, 0x52, 0x32, 0x5c, 0x3e, 0x45, 0xcf, 0xd, 0xdb, 0x42, 0x2a, 0x3f, 0x80, 0x15, 0x80, 0xe4, 0xb4, 0x36, 0x28, 0x49, 0x39, 0x52, 0x3a, 0x5a, 0x39, 0x52, 0xb, 0x26, 0x80, 0x27, 0xc5, 0x2f, 0xf6, 0x45, 0x24, 0x40, 0x29, 0x52, 0x33, 0x43, 0xfc, 0x33, 0x1d, 0x44, 0x17, 0x2e, 0x80, 0x80, 0x80, 0xb4, 0x80, 0x80, 0x80, 0x24, 0x80, 0xb4, 0x80, 0x34, 0x32, 0x4c, 0x32, 0x4b, 0x30, 0x54, 0x3f, 0x51, 0x30, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xe4, 0x80, 0x1, 0x80, 0x26, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xfd, 0x80, 0x80, 0x80, 0xb4, 0x80, 0x29, 0xe0, 0xe0, 0xc5, 0x27, 0x80, 0x1b, 0x7, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x23, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xf9, 0x80, 0x80, 0x80, 0x80, 0x80, 0xd6, 0x80, 0x80, 0x80, 0xb4, 0x80, 0xf5, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xe0, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x1d, 0xe4, 0x11, 0xb4, 0x32, 0xa, 0x6, 0x80, 0x80, 0x80, 0xd6, 0x80, 0x1c, 0xd, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x15, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xf8, 0xcf, 0x10, 0x80, 0x17, 0x80, 0x1e, 0x80, 0xff, 0xec, 0x25, 0x80, 0x1c, 0x23, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x11, 0xb4, 0x2, 0x80, 0x30, 0x8, 0x15, 0x80, 0x6, 0x20, 0x36, 0xf8, 0x2e, 0x18, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xf3, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xd, 0x4, 0xa, 0xea, 0x37, 0x24, 0x2a, 0xc, 0x39, 0x26, 0x43, 0x5, 0x2d, 0x1f, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x14, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x7, 0xcf, 0xf, 0xef, 0x32, 0xd, 0x2a, 0x14, 0x37, 0x1, 0x32, 0x0, 0x38, 0x10, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x1c, 0x80, 0x80, 0x80, 0x28, 0xdb, 0xe4, 0xe0, 0xb4, 0x80, 0x16, 0xcf, 0x1b, 0xb4, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xb4, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80};
https://github.com/zk-ml/tachikoma
tests/micro/testdata/kws/unknown.c
/* * This work is a derivative of "Speech Commands V2" by Google, used under CC BY 4.0. */ static const char input_unknown[1960] = { 0x78, 0x66, 0x7a, 0x63, 0x78, 0x62, 0x6d, 0x52, 0x58, 0x19, 0x0, 0xcf, 0x80, 0x80, 0x80, 0x80, 0xcf, 0xc5, 0xc5, 0xc5, 0x80, 0x80, 0x80, 0xc5, 0xc5, 0xe7, 0xe0, 0x80, 0x80, 0xc5, 0x80, 0xcf, 0xc5, 0xc5, 0x80, 0xc5, 0xcf, 0xe7, 0xe0, 0xdb, 0x72, 0x4b, 0x65, 0x60, 0x70, 0x50, 0x73, 0x59, 0x60, 0x4f, 0x4d, 0x3c, 0x11, 0xff, 0xc5, 0xc5, 0xdb, 0xdb, 0xcf, 0xec, 0xe7, 0xcf, 0xcf, 0x2, 0x31, 0x4d, 0x4c, 0xe7, 0xdb, 0xc5, 0x80, 0xcf, 0xef, 0xe4, 0x4, 0xff, 0xf5, 0xec, 0xef, 0x5, 0x6c, 0x4b, 0x56, 0x54, 0x6a, 0x47, 0x6f, 0x5b, 0x63, 0x55, 0x4c, 0x41, 0x2d, 0x22, 0x20, 0x3a, 0x4e, 0xf1, 0xcf, 0xfc, 0x19, 0xf3, 0xe7, 0x2d, 0x48, 0x4e, 0x5b, 0x80, 0xcf, 0xcf, 0x80, 0x80, 0x80, 0xdb, 0x3, 0xfb, 0xf5, 0xea, 0x0, 0xf5, 0x62, 0x40, 0x46, 0x47, 0x62, 0x41, 0x68, 0x53, 0x5f, 0x51, 0x57, 0x4e, 0x5b, 0x51, 0x58, 0x4b, 0x62, 0x2b, 0xef, 0x44, 0x5d, 0x41, 0x49, 0x5c, 0x62, 0x56, 0x58, 0x2f, 0xc5, 0xb4, 0xcf, 0xcf, 0xc5, 0xe0, 0xf9, 0xe7, 0x7, 0xf5, 0xa, 0xfc, 0x5b, 0x39, 0x35, 0x3d, 0x5c, 0x37, 0x5d, 0x49, 0x57, 0x49, 0x63, 0x57, 0x61, 0x55, 0x5e, 0x4d, 0x64, 0x4b, 0x63, 0x58, 0x5c, 0x49, 0x5f, 0x57, 0x6a, 0x56, 0x68, 0x41, 0x15, 0xf1, 0x7, 0xf1, 0xf9, 0xef, 0xfd, 0xfb, 0xc, 0xf6, 0x5, 0xef, 0x5a, 0x40, 0x4a, 0x44, 0x69, 0x57, 0x55, 0x50, 0x63, 0x49, 0x67, 0x5a, 0x72, 0x60, 0x70, 0x5a, 0x71, 0x61, 0x77, 0x63, 0x75, 0x5e, 0x71, 0x52, 0x6f, 0x5f, 0x78, 0x64, 0x78, 0x5d, 0x56, 0x57, 0x56, 0x28, 0x39, 0x3b, 0x58, 0x49, 0x3d, 0x33, 0x58, 0x3f, 0x2a, 0x50, 0x6c, 0x53, 0x6a, 0x5b, 0x69, 0x57, 0x6e, 0x5e, 0x73, 0x60, 0x74, 0x5a, 0x75, 0x61, 0x76, 0x60, 0x75, 0x59, 0x6e, 0x4c, 0x6b, 0x4c, 0x6b, 0x58, 0x74, 0x61, 0x6e, 0x36, 0x49, 0x41, 0x5b, 0x5d, 0x6e, 0x57, 0x5e, 0x44, 0x50, 0x30, 0x3a, 0x46, 0x5f, 0x3c, 0x64, 0x4e, 0x5d, 0x53, 0x69, 0x55, 0x6a, 0x57, 0x69, 0x52, 0x71, 0x5a, 0x6b, 0x47, 0x5f, 0x4d, 0x61, 0x43, 0x5b, 0x37, 0x59, 0x3e, 0x57, 0x3f, 0x53, 0xe, 0x44, 0x47, 0x5c, 0x43, 0x62, 0x51, 0x5d, 0x3f, 0x4a, 0x2a, 0x39, 0x3f, 0x59, 0x37, 0x5c, 0x40, 0x58, 0x50, 0x65, 0x4e, 0x65, 0x52, 0x67, 0x54, 0x6f, 0x52, 0x59, 0x3b, 0x57, 0x48, 0x61, 0x49, 0x54, 0xf8, 0x3e, 0x2d, 0x4e, 0x3e, 0x50, 0xc, 0x3e, 0x53, 0x67, 0x2d, 0x4c, 0x3b, 0x4f, 0x2a, 0x43, 0x14, 0x46, 0x37, 0x50, 0x23, 0x58, 0x36, 0x57, 0x48, 0x63, 0x46, 0x67, 0x4e, 0x65, 0x55, 0x6d, 0x4c, 0x55, 0x35, 0x41, 0x3b, 0x58, 0x3f, 0x53, 0x2f, 0x44, 0x25, 0x48, 0x37, 0x58, 0xe4, 0x4d, 0x48, 0x53, 0x2b, 0x41, 0x28, 0x4a, 0x2d, 0x3d, 0x5, 0x44, 0x29, 0x44, 0x1c, 0x5c, 0x3b, 0x53, 0x35, 0x5a, 0x3b, 0x60, 0x45, 0x61, 0x50, 0x64, 0x3a, 0x43, 0x1f, 0x35, 0x23, 0x4d, 0x4a, 0x5e, 0x3c, 0x4d, 0x30, 0x51, 0x2e, 0x51, 0xf3, 0x4d, 0x3e, 0x50, 0x1a, 0x34, 0xfc, 0x44, 0x27, 0x37, 0xf8, 0x3a, 0x9, 0x32, 0x33, 0x5d, 0x37, 0x57, 0x35, 0x5d, 0x3b, 0x58, 0x31, 0x60, 0x45, 0x50, 0xff, 0x3a, 0xe0, 0x24, 0x3, 0x24, 0x3a, 0x4f, 0xe, 0x32, 0x1d, 0x46, 0x2d, 0x45, 0x4, 0x56, 0x3d, 0x50, 0x7, 0xa, 0x80, 0x3a, 0x1f, 0x31, 0xe0, 0x43, 0x3, 0x26, 0x3a, 0x5b, 0x34, 0x56, 0x30, 0x58, 0x2e, 0x53, 0x1f, 0x61, 0x3f, 0x3f, 0x80, 0x2f, 0xe4, 0x2f, 0x14, 0x30, 0x1e, 0x50, 0xe0, 0x22, 0x0, 0x4b, 0x2d, 0x39, 0xdb, 0x56, 0x3e, 0x46, 0x34, 0x2d, 0x80, 0x29, 0x5, 0x2f, 0xc5, 0x46, 0xfb, 0x1c, 0x3a, 0x56, 0x26, 0x53, 0x2b, 0x4e, 0x8, 0x53, 0x25, 0x65, 0x3a, 0xf, 0x80, 0xf5, 0x80, 0xb, 0xd6, 0x1e, 0x7, 0x55, 0xd6, 0x6, 0x80, 0x2c, 0x0, 0x11, 0xe4, 0x3e, 0x26, 0x41, 0x25, 0x2c, 0x80, 0x1d, 0x2, 0x2a, 0xd6, 0x45, 0xec, 0x4, 0x3c, 0x54, 0x20, 0x4d, 0x12, 0x49, 0xf6, 0x57, 0x32, 0x61, 0x23, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xb, 0xe7, 0x3b, 0x80, 0xc5, 0x80, 0xc5, 0x80, 0xcf, 0xdb, 0x14, 0x1d, 0x3d, 0x36, 0x3f, 0x80, 0x19, 0xfc, 0x1f, 0x80, 0x40, 0xea, 0x8, 0x3c, 0x52, 0x22, 0x3a, 0xf8, 0x49, 0x3, 0x58, 0x21, 0x3c, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xc5, 0x80, 0xf6, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x37, 0x2d, 0x3b, 0x1b, 0x31, 0x80, 0x16, 0xf5, 0xf3, 0x80, 0x3e, 0xcf, 0xec, 0x3b, 0x4e, 0x12, 0x4, 0x80, 0x4f, 0x26, 0x5a, 0x1a, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xfc, 0xb4, 0x2c, 0x0, 0x1b, 0x2a, 0x2f, 0x80, 0xc, 0xdb, 0xd6, 0x80, 0x44, 0xfd, 0x11, 0x33, 0x44, 0xd6, 0x8, 0x80, 0x4e, 0xe, 0x26, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xb4, 0x80, 0x80, 0x80, 0x80, 0x80, 0x1, 0x80, 0xe7, 0x80, 0x80, 0x80, 0x80, 0x80, 0x14, 0xdb, 0xf8, 0x80, 0x48, 0x0, 0x7, 0xe7, 0x18, 0x80, 0xef, 0x80, 0x36, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xdb, 0x80, 0x80, 0x80, 0x80, 0x80, 0x17, 0x80, 0x80, 0x80, 0x48, 0x6, 0x10, 0x80, 0xf1, 0x80, 0x24, 0x80, 0x7, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x3c, 0xf1, 0x7, 0x80, 0xc5, 0x80, 0x33, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xe0, 0x80, 0x26, 0x80, 0xcf, 0x80, 0x80, 0x80, 0x80, 0x80, 0xb4, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xf6, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80};
https://github.com/zk-ml/tachikoma
tests/micro/testdata/kws/yes.c
/* * This work is a derivative of "Speech Commands V2" by Google, used under CC BY 4.0. */ static const char input_yes[1960] = { 0x7c, 0x66, 0x79, 0x65, 0x7d, 0x67, 0x7c, 0x67, 0x7c, 0x66, 0x7c, 0x67, 0x7c, 0x67, 0x7d, 0x66, 0x7c, 0x67, 0x7d, 0x66, 0x7c, 0x67, 0x7d, 0x66, 0x7c, 0x67, 0x7d, 0x67, 0x7d, 0x67, 0x7d, 0x67, 0x7d, 0x67, 0x7d, 0x67, 0x7d, 0x67, 0x7d, 0x67, 0x52, 0x57, 0x78, 0x5a, 0x67, 0x53, 0x6f, 0x4b, 0x6d, 0x5c, 0x71, 0x52, 0x66, 0x4d, 0x6e, 0x56, 0x73, 0x50, 0x5f, 0x54, 0x6d, 0x55, 0x6a, 0x5b, 0x6f, 0x57, 0x68, 0x50, 0x71, 0x58, 0x6d, 0x57, 0x69, 0x55, 0x6a, 0x55, 0x6c, 0x59, 0x6c, 0x5a, 0x5b, 0x3c, 0x54, 0x44, 0x58, 0x4f, 0x66, 0x30, 0x58, 0x50, 0x61, 0x3d, 0x67, 0x36, 0x5b, 0x4d, 0x64, 0x51, 0x6a, 0x4d, 0x60, 0x4b, 0x61, 0x53, 0x69, 0x54, 0x60, 0x47, 0x5c, 0x4d, 0x63, 0x45, 0x64, 0x4d, 0x63, 0x4b, 0x67, 0x50, 0x68, 0x4d, 0x64, 0x4b, 0x64, 0x4e, 0x5f, 0x3d, 0x53, 0x42, 0x59, 0x39, 0x57, 0x43, 0x5e, 0x3a, 0x44, 0x3b, 0x56, 0x3c, 0x5c, 0x46, 0x66, 0x4c, 0x61, 0x3e, 0x5d, 0x49, 0x55, 0x48, 0x5d, 0x45, 0x5a, 0x48, 0x5f, 0x41, 0x59, 0x49, 0x5a, 0x46, 0x5d, 0x3b, 0x51, 0x3d, 0x4c, 0x44, 0x57, 0x37, 0x54, 0x43, 0x4f, 0xa, 0x32, 0x28, 0x5b, 0x3a, 0x5e, 0x47, 0x4d, 0x2b, 0x57, 0x4a, 0x5d, 0x34, 0x52, 0x3e, 0x50, 0x38, 0x54, 0x30, 0x53, 0x41, 0x57, 0x39, 0x5c, 0x3c, 0x53, 0x41, 0x5a, 0x1e, 0x4e, 0x41, 0x4d, 0x2c, 0x3e, 0x18, 0x4c, 0x1c, 0x36, 0x11, 0x4b, 0x32, 0x52, 0x2f, 0x50, 0x2d, 0x4e, 0x20, 0x50, 0x3c, 0x4a, 0x16, 0x44, 0x22, 0x48, 0x29, 0x4d, 0x34, 0x4e, 0x2c, 0x52, 0x2e, 0x46, 0x35, 0x4b, 0x14, 0x50, 0x33, 0x53, 0x3e, 0x50, 0x2d, 0x4a, 0x0, 0x4b, 0x3a, 0x47, 0x16, 0x45, 0x32, 0x45, 0x10, 0x42, 0x23, 0x49, 0x39, 0x41, 0x10, 0x48, 0x32, 0x4e, 0x30, 0x40, 0x34, 0x46, 0x39, 0x54, 0xf5, 0x49, 0x38, 0x53, 0x2c, 0x4a, 0x37, 0x51, 0x2c, 0x46, 0x2f, 0x4c, 0x2a, 0x4d, 0x2b, 0x3d, 0x2f, 0x4e, 0x20, 0x1e, 0x7, 0x41, 0x8, 0x39, 0xd, 0x46, 0x20, 0x3b, 0x2a, 0x3f, 0x20, 0x40, 0xe, 0x4e, 0x2e, 0x3e, 0x21, 0x4f, 0x16, 0x2e, 0x35, 0x54, 0x32, 0x41, 0x1c, 0x48, 0x2a, 0x44, 0xc, 0x48, 0x21, 0x41, 0x19, 0x48, 0x2a, 0x3d, 0x21, 0x44, 0xb4, 0x41, 0x14, 0x3e, 0x2b, 0x45, 0x23, 0x50, 0x28, 0x3e, 0x1f, 0x43, 0x26, 0x46, 0x1b, 0x48, 0x12, 0x44, 0x2d, 0x47, 0x22, 0x3c, 0x32, 0x48, 0x26, 0x2f, 0x21, 0x45, 0x17, 0x43, 0x22, 0x43, 0x1d, 0x44, 0x28, 0x4d, 0x14, 0x56, 0x23, 0x40, 0x2c, 0x34, 0x80, 0x44, 0xf, 0x37, 0x16, 0x49, 0x21, 0x34, 0x1e, 0x3f, 0x22, 0x2b, 0x16, 0x34, 0x28, 0x43, 0x2d, 0x43, 0x11, 0x49, 0x1a, 0x46, 0x20, 0x46, 0x21, 0x3d, 0x17, 0x3d, 0x28, 0x3e, 0xf5, 0x33, 0x15, 0x39, 0x20, 0x4d, 0x2d, 0x36, 0x80, 0x1a, 0xdb, 0x3e, 0x17, 0x3b, 0x1f, 0x40, 0x17, 0x2b, 0xcf, 0x39, 0x2d, 0x4d, 0x2b, 0x35, 0xf6, 0x44, 0x29, 0x3d, 0x24, 0x30, 0x17, 0x3b, 0x28, 0x44, 0xd, 0x38, 0x20, 0x3b, 0xf3, 0x45, 0x19, 0x4c, 0x24, 0x37, 0x15, 0xf3, 0xb4, 0x3c, 0x28, 0x36, 0xf3, 0x44, 0x1b, 0x48, 0x25, 0x1d, 0xd6, 0x25, 0xcf, 0x3a, 0x9, 0x3f, 0xfc, 0x31, 0xf1, 0x41, 0x24, 0x44, 0x17, 0x45, 0x20, 0x42, 0x2, 0x33, 0xb4, 0x31, 0x1b, 0x43, 0x18, 0x2c, 0x14, 0x44, 0xa, 0x43, 0x7, 0x4, 0x80, 0x2b, 0xf3, 0x49, 0x2a, 0x47, 0xea, 0x3b, 0xec, 0x30, 0xfb, 0x3c, 0x18, 0x35, 0xff, 0x14, 0x18, 0x39, 0x7, 0x3c, 0x5, 0xa, 0xf, 0x35, 0x12, 0x3a, 0x0, 0x2d, 0xc, 0x46, 0x13, 0x3e, 0x23, 0x3f, 0x18, 0x3a, 0x16, 0x35, 0xf5, 0x3a, 0x1b, 0x4e, 0x2d, 0x3c, 0xef, 0x3c, 0xfc, 0x2e, 0xa, 0x32, 0xb4, 0x23, 0xfb, 0x3e, 0x16, 0x40, 0xe, 0x24, 0x3, 0x44, 0x24, 0x3b, 0xa, 0x19, 0x80, 0x28, 0x1a, 0x3b, 0xfb, 0x2a, 0xf, 0x31, 0x4, 0x3a, 0x4, 0x2d, 0xec, 0x29, 0xa, 0x25, 0xb4, 0x20, 0xb4, 0x35, 0x1b, 0x31, 0xb4, 0x7, 0xc, 0x4b, 0x1b, 0x1c, 0x80, 0x28, 0xd6, 0x23, 0x16, 0x2d, 0xf8, 0x35, 0xf6, 0x45, 0x11, 0x1d, 0xc5, 0x2a, 0xf6, 0x37, 0xea, 0x36, 0x11, 0x3f, 0x7, 0x36, 0x11, 0x2e, 0xf1, 0x3b, 0x11, 0x16, 0x2a, 0x3a, 0x6, 0x37, 0xcf, 0x18, 0x80, 0x30, 0xd6, 0x14, 0xf1, 0x16, 0xfc, 0x28, 0xe4, 0x3d, 0xe0, 0x2d, 0x80, 0x26, 0xec, 0x3d, 0xf8, 0x36, 0xcf, 0x11, 0xef, 0x2c, 0x16, 0x2d, 0xff, 0x35, 0x12, 0x3e, 0xa, 0x35, 0xd, 0x2f, 0xf9, 0x3f, 0x2d, 0x40, 0x80, 0xe7, 0x6, 0x2a, 0x80, 0x34, 0x4, 0x5, 0x1d, 0x3d, 0x12, 0x1e, 0xa, 0x3f, 0x26, 0x2b, 0xfb, 0x2b, 0x80, 0x26, 0x80, 0x1e, 0x15, 0x24, 0xdb, 0x2a, 0xd6, 0x2b, 0x80, 0x6, 0xdb, 0x26, 0xfd, 0x37, 0xec, 0x2a, 0xec, 0x2, 0x1c, 0x3c, 0xe7, 0x11, 0x80, 0xf3, 0xfd, 0x3a, 0x1, 0x28, 0x17, 0x3a, 0xdb, 0xf6, 0x80, 0x2, 0xd6, 0x21, 0xcf, 0x2a, 0xdb, 0xf, 0x80, 0x2b, 0x17, 0x24, 0xcf, 0x2e, 0xcf, 0x30, 0xf8, 0xa, 0xf1, 0x26, 0xe7, 0x2d, 0xf5, 0x31, 0xef, 0x25, 0x80, 0x1, 0xfb, 0xd6, 0x80, 0x19, 0x1c, 0x37, 0xfb, 0x39, 0x11, 0x2c, 0x80, 0x23, 0x18, 0x33, 0xf8, 0x2e, 0xd, 0x34, 0xcf, 0x2b, 0xf1, 0x21, 0x80, 0x29, 0x80, 0x1f, 0xe4, 0xe, 0xb, 0x25, 0xc5, 0x1f, 0xc5, 0x21, 0x0, 0x19, 0x80, 0xef, 0x80, 0xb, 0xe4, 0x1c, 0xcf, 0x33, 0x16, 0x3e, 0x7, 0x21, 0xf5, 0x2f, 0x0, 0x2e, 0xef, 0x23, 0x6, 0x3d, 0xe7, 0x23, 0xe7, 0x26, 0xd6, 0x40, 0xfd, 0x30, 0x80, 0xa, 0xf5, 0x35, 0x0, 0x32, 0xf8, 0x20, 0xcf, 0x2d, 0xef, 0x32, 0x13, 0x3c, 0x1c, 0x0, 0xfc, 0x26, 0xe0, 0x26, 0xd6, 0xec, 0x80, 0x16, 0xf3, 0xb4, 0xf1, 0x31, 0xcf, 0x1f, 0x80, 0x7, 0xf6, 0x19, 0xfd, 0xe7, 0x80, 0x1, 0x80, 0x1c, 0x2, 0x2f, 0x80, 0x2f, 0x80, 0x26, 0x4, 0x1c, 0xb4, 0x4, 0xdb, 0x1e, 0xcf, 0x2a, 0x80, 0xdb, 0x80, 0x1a, 0xea, 0x31, 0xa, 0x18, 0x23, 0x39, 0xf8, 0x36, 0x22, 0x25, 0xc5, 0x1f, 0x80, 0x26, 0xef, 0x34, 0x80, 0x19, 0xe7, 0x2d, 0xe0, 0x17, 0xe4, 0x2f, 0x17, 0x34, 0x7, 0x31, 0xef, 0x25, 0xe0, 0x1e, 0xf8, 0x1d, 0xdb, 0xfd, 0xb, 0x11, 0x80, 0x11, 0x80, 0xe7, 0xcf, 0x32, 0x80, 0xc, 0xdb, 0xa, 0x80, 0xf9, 0x80, 0x14, 0x14, 0x35, 0x80, 0x2c, 0xf9, 0x1f, 0xdb, 0x1b, 0xea, 0x11, 0x80, 0x26, 0xc5, 0xb, 0xb4, 0xb, 0x80, 0x7, 0xef, 0x22, 0x6, 0x20, 0xe0, 0x0, 0x80, 0x1a, 0x1c, 0x25, 0xfb, 0x2f, 0x80, 0x80, 0xea, 0x31, 0x19, 0x3c, 0xf, 0x23, 0x80, 0x16, 0x0, 0x38, 0xf1, 0x21, 0xea, 0x2c, 0x80, 0x1e, 0xec, 0x2a, 0xe4, 0x7, 0x80, 0xf8, 0x80, 0x9, 0xd6, 0x20, 0xc5, 0x18, 0x80, 0x0, 0x14, 0x2a, 0xcf, 0x1d, 0x80, 0xc, 0xe4, 0x1c, 0xa, 0x3a, 0x24, 0x1b, 0x80, 0xf8, 0x80, 0x8, 0x80, 0x9, 0x80, 0x20, 0xdb, 0x20, 0xd6, 0x2d, 0x19, 0x1a, 0xd6, 0x25, 0x80, 0xb4, 0x80, 0x38, 0x12, 0x17, 0xec, 0x14, 0x80, 0x20, 0xb4, 0x13, 0xdb, 0xb, 0x80, 0xfc, 0x15, 0x2f, 0x0, 0xdb, 0x80, 0xf5, 0x0, 0x8, 0xcf, 0xf8, 0xe4, 0xc, 0x13, 0x34, 0x80, 0x17, 0x80, 0xe7, 0x80, 0x11, 0xcf, 0x2f, 0xf6, 0x5, 0xdb, 0x27, 0x6, 0xf1, 0x80, 0x11, 0xc5, 0x24, 0x80, 0x11, 0xea, 0xa, 0x80, 0x23, 0x1, 0x16, 0xf3, 0xfb, 0x80, 0x15, 0x13, 0x33, 0x6, 0xfc, 0x80, 0xd6, 0x80, 0x10, 0x80, 0x1a, 0xf5, 0x11, 0x80, 0x9, 0xc5, 0xf, 0xcf, 0xef, 0xc5, 0x1b, 0xf9, 0x8, 0x80, 0x20, 0xc5, 0x1c, 0xdb, 0x1f, 0x80, 0x1e, 0xf3, 0x12, 0xea, 0x26, 0xcf, 0x16, 0xcf, 0x2, 0xd6, 0x7, 0x80, 0x24, 0x80, 0xf9, 0xcf, 0x1a, 0xb4, 0x26, 0xc5, 0xfb, 0x80, 0xfc, 0xc5, 0xef, 0xcf, 0x28, 0x80, 0x19, 0xcf, 0x28, 0xea, 0x2c, 0xc5, 0x2f, 0xc, 0x1, 0xec, 0x2d, 0xb4, 0x14, 0x80, 0xc, 0xec, 0xf5, 0xdb, 0x0, 0xc5, 0x20, 0x80, 0x21, 0x1, 0x0, 0x80, 0xa, 0x80, 0x29, 0x80, 0xdb, 0x7, 0xf, 0xb4, 0x23, 0xfb, 0x27, 0xdb, 0x22, 0xec, 0x21, 0x80, 0xd6, 0xb4, 0x15, 0xd6, 0x11, 0x80, 0x1f, 0xc5, 0x1a, 0xb4, 0x7, 0xe0, 0x21, 0xcf, 0x14, 0x16, 0x2a, 0x80, 0x80, 0x80, 0xa, 0xe7, 0x6, 0x80, 0xb4, 0x80, 0xf, 0x80, 0xfc, 0xe4, 0x13, 0x80, 0x19, 0xb4, 0xd, 0xb4, 0xdb, 0xc5, 0x18, 0x80, 0x21, 0xb4, 0x2d, 0xc5, 0xf1, 0xdb, 0xf, 0x80, 0x23, 0xd6, 0x28, 0x80, 0xea, 0xd6, 0xe7, 0xcf, 0x11, 0xe4, 0xec, 0x2, 0x20, 0xb4, 0x29, 0xdb, 0x6, 0x80, 0xef, 0x80, 0xe0, 0x80, 0x4, 0xc5, 0x32, 0xb4, 0x2f, 0x80, 0x7, 0xb4, 0xe0, 0x80, 0xf5, 0x80, 0x5, 0xb4, 0x8, 0xcf, 0x1f, 0xf6, 0x28, 0xdb, 0x1b, 0xff, 0x12, 0x80, 0x2a, 0xff, 0x2f, 0xfc, 0xcf, 0x80, 0xc, 0xf1, 0x21, 0x80, 0x2, 0x1, 0x2d, 0xf8, 0xf9, 0xf3, 0x25, 0x80, 0xdb, 0x80, 0xd6, 0x80, 0xc, 0xe4, 0x1b, 0xc5, 0xe0, 0xec, 0xec, 0x80, 0x6, 0xb4, 0xf5, 0xcf, 0xc, 0x80, 0x1, 0xf6, 0x1d, 0x80, 0xe7, 0x80, 0xf3, 0x80, 0xc5, 0x80, 0xf6, 0x80, 0x1b, 0xcf, 0x11, 0x80, 0xd6, 0x80, 0x80, 0x80, 0xdb, 0x80, 0xec, 0x80, 0x19, 0xe0, 0x2, 0x80, 0x19, 0xef, 0x16, 0x80, 0xd6, 0x80, 0xe7, 0x80, 0x11, 0xd6, 0xfc, 0x80, 0xa, 0xd6, 0x17, 0xe7, 0xe4, 0x80, 0xb4, 0xb4, 0x1d, 0xb4, 0xf, 0x80, 0x32, 0xfb, 0x1b, 0xdb, 0x25, 0xec, 0xf5, 0x80, 0xd6, 0xef, 0x23, 0xec, 0x14, 0x80, 0xe0, 0xdb, 0xf9, 0x80, 0xcf, 0x80, 0xff, 0xb4, 0xd, 0x80, 0xe4, 0x80, 0x0, 0xc5, 0x1f, 0xdb, 0x23, 0xe0, 0x1, 0x80, 0x80, 0x80, 0xcf, 0x80, 0xb4, 0x80, 0xe0, 0xf6, 0x1d, 0xcf, 0xdb, 0x80, 0xdb, 0x80, 0x80, 0xb4, 0xb, 0x80, 0x80, 0x80, 0x1d, 0x80, 0x4, 0xe4, 0xf5, 0x80, 0x80, 0x80, 0x4, 0x80, 0xe4, 0x80, 0xfc, 0x80, 0xd6, 0x80, 0xf9, 0x80, 0x80, 0xb4, 0xc, 0x80, 0x26, 0xf9, 0x80, 0x80, 0xb4, 0x80, 0xf1, 0x80, 0x80, 0x80, 0xf3, 0xb4, 0x0, 0x80, 0x2, 0xcf, 0xb4, 0xea, 0x14, 0x80, 0x18, 0x80, 0xcf, 0x80, 0xd, 0x80, 0xe0, 0x80, 0x16, 0x80, 0xf8, 0xc5, 0x11, 0xb4, 0xf8, 0x80, 0x80, 0x80, 0x80, 0x80, 0xe4, 0xe, 0x1c, 0x80, 0xfc, 0xb4, 0x2a, 0x6, 0x31, 0x10, 0x1c, 0x80, 0xfd, 0xfc, 0xc, 0xe7, 0xea, 0x80, 0xe7, 0xd6, 0xd, 0xb4, 0x22, 0xf1, 0x7, 0xb4, 0x1d, 0xf6, 0x11, 0xd6, 0x28, 0x80, 0xc5, 0xb4, 0x1f, 0xe0, 0x80, 0x80, 0x80, 0x80, 0xfb, 0xe7, 0xc, 0x80, 0xdb, 0x80, 0xcf, 0x80, 0x80, 0x80, 0xd6, 0xc5, 0xf, 0x80, 0x80, 0xb4, 0x1b, 0x80, 0x0, 0xdb, 0xf5, 0x80, 0x80, 0x80, 0x15, 0xec, 0xf, 0x80, 0xd6, 0x80, 0x80, 0xb4, 0xc, 0xd6, 0xd6, 0x80, 0xd6, 0xd6, 0x9, 0x80, 0x80, 0x80, 0x3, 0xc5, 0x9, 0x80, 0x80, 0x80, 0xe4, 0x80, 0xf3, 0x80, 0x10, 0xea, 0xb4, 0x80, 0xdb, 0xf3, 0xa, 0x80, 0xc5, 0x80, 0xef, 0x80, 0xc5, 0x80, 0xec, 0x80, 0xff, 0x80, 0xa, 0xc5, 0xf1, 0x80, 0xb4, 0x80, 0xe0, 0x80, 0xfb, 0x80, 0xf8, 0x80, 0x3, 0x80, 0xc, 0xcf, 0x80, 0xd6, 0xe0, 0x80, 0x80, 0xb4, 0xcf, 0xc5, 0x28, 0xd6, 0x17, 0x80, 0x80, 0x80, 0xc5, 0xec, 0x14, 0x80, 0xf3, 0x80, 0xf8, 0x80, 0xf3, 0x80, 0xcf, 0x80, 0xf8, 0xe0, 0xea, 0x80, 0xc5, 0x0, 0x35, 0xea, 0x3, 0x80, 0x80, 0x80, 0x17, 0xf, 0x16, 0x80, 0x19, 0xd6, 0x80, 0x80, 0x80, 0x80, 0xe0, 0x80, 0xfd, 0x80, 0x4, 0xfc, 0x1e, 0x80, 0xef, 0x80, 0xef, 0xf1, 0x1f, 0x80, 0xfc, 0x80, 0xe7, 0x80, 0xff, 0x80, 0xf8, 0x80, 0x80, 0x80, 0x17, 0x80, 0xcf, 0xfb, 0x1c, 0x0, 0x26, 0x11, 0x16, 0x80, 0x80, 0xb4, 0x80, 0x80, 0x80, 0xcf, 0xf3, 0x80, 0x14, 0xb4, 0xdb, 0x5, 0x19, 0x80, 0xd6, 0x80, 0xf5, 0x80, 0x17, 0xc5, 0x0, 0xc5, 0xcf, 0xc5, 0x4, 0x80, 0x5, 0x80, 0xa, 0x80, 0x19, 0xd6, 0x28, 0x5, 0xea, 0x80, 0x80, 0x80, 0x80, 0xec, 0xd, 0x80, 0x80, 0x80, 0x2, 0x80, 0xf1, 0x80, 0x80, 0x80, 0xd6, 0x80, 0xd6, 0x80, 0xdb, 0x80, 0xf3, 0x80, 0xff, 0x80, 0x80, 0xc5, 0x20, 0x80, 0xea, 0x80, 0xb4, 0x80, 0x22, 0x80, 0x80, 0x80, 0x80, 0x80, 0xc5, 0x80, 0x15, 0x80, 0x24, 0xc5, 0xfc, 0x80, 0xb, 0xe4, 0xcf, 0x80, 0x80, 0x80, 0xe7, 0xa, 0x1, 0xdb, 0x12, 0x80, 0xf5, 0x80, 0x80, 0x80, 0xa, 0xd6, 0xfd, 0xf5, 0xfc, 0xcf, 0xe, 0x80, 0xd6, 0x80, 0x80, 0x80, 0xef, 0x80, 0xfd, 0xc5, 0x12, 0xea, 0x20, 0x80, 0xe0, 0xdb, 0xc5, 0xd6, 0x1a, 0x80, 0x80, 0xd6, 0x14, 0xc5, 0x80, 0x80, 0x80, 0xb4, 0x80, 0x80, 0xc5, 0xb4, 0xe4, 0xb4, 0xf6, 0x3, 0xfc, 0x80, 0x80, 0x80, 0xfb, 0x80, 0x0, 0xe4, 0x80, 0x80, 0xb4, 0x80, 0x5, 0xb4, 0x80, 0x80, 0x19, 0xd6, 0xe0, 0x80, 0x80, 0x80, 0xb4, 0xc5, 0xb4, 0x80, 0xfb, 0x4, 0x13, 0x80, 0xf, 0xc5, 0x2, 0xec, 0xb4, 0xb4, 0xef, 0x80, 0xe0, 0x80, 0xcf, 0xf5, 0x1, 0x80, 0xe4, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x1, 0x1c, 0x80, 0x80, 0x80, 0xc5, 0xcf, 0xc, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0xfb, 0xc5, 0xcf, 0xdb, 0xcf, 0x80, 0xd6, 0x80, 0xea, 0x80, 0x80, 0x80, 0xd6, 0x80, 0x80, 0xcf, 0xf9, 0xdb, 0xf8, 0x80, 0xdb, 0xb4, 0xff, 0xe0, 0xb4, 0x80, 0x80, 0x80, 0x80, 0x80, 0xea, 0x80, 0xc5, 0x80, 0x80, 0x80, 0xe0, 0xec, 0xf5, 0x80, 0x80, 0x80, 0x17, 0x80, 0xcf, 0x80, 0xf8, 0xf6, 0xe7, 0x80, 0xd6, 0x80, 0xcf, 0x80, 0x80, 0x80, 0xb4, 0x80, 0xe4, 0x80, 0xf8, 0x80, 0x80, 0x80, 0xdb, 0x80, 0xfb, 0x80, 0x80, 0x80, 0xf3, 0x80, 0x11, 0xc5, 0x80, 0x80, 0xb4, 0x80, 0x80, 0x80, 0xd6, 0x80, 0xec, 0xb4, 0x14, 0xb4, 0xf3, 0x80, 0xf9, 0x80, 0x8, 0x80, 0x80, 0x80, 0xe7, 0x80, 0x80, 0xc5, 0xf1, 0x80, 0xf3, 0x80};
https://github.com/zk-ml/tachikoma
tests/micro/zephyr/conftest.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. pytest_plugins = [ "tvm.micro.testing.pytest_plugin", ] import pytest def pytest_addoption(parser): parser.addoption( "--west-cmd", default="west", help="Path to `west` command for flashing device." ) parser.addoption( "--use-fvp", action="store_true", default=False, help="If set true, use the FVP emulator to run the test", ) @pytest.fixture(scope="session") def west_cmd(request): return request.config.getoption("--west-cmd") @pytest.fixture def use_fvp(request): return request.config.getoption("--use-fvp") @pytest.fixture(autouse=True) def xfail_on_fvp(request, use_fvp): """mark the tests as xfail if running on fvp.""" if request.node.get_closest_marker("xfail_on_fvp"): if use_fvp: request.node.add_marker( pytest.mark.xfail(reason="checking corstone300 reliability on CI") ) def pytest_configure(config): config.addinivalue_line( "markers", "xfail_on_fvp(): mark test as xfail on fvp", )
https://github.com/zk-ml/tachikoma
tests/micro/zephyr/test_utils.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import io import os import json import pathlib import tarfile import tempfile import logging import numpy as np from urllib.request import urlopen, urlretrieve from urllib.error import HTTPError import json import requests import tvm.micro from tvm.micro import export_model_library_format from tvm.micro.model_library_format import generate_c_interface_header from tvm.micro.testing.utils import ( mlf_extract_workspace_size_bytes, aot_transport_init_wait, aot_transport_find_message, ) TEMPLATE_PROJECT_DIR = pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr")) BOARDS = TEMPLATE_PROJECT_DIR / "boards.json" _LOG = logging.getLogger(__name__) def zephyr_boards() -> dict: """Returns a dict mapping board to target model""" with open(BOARDS) as f: board_properties = json.load(f) boards_model = {board: info["model"] for board, info in board_properties.items()} return boards_model ZEPHYR_BOARDS = zephyr_boards() def qemu_boards(board: str): """Returns True if board is QEMU.""" with open(BOARDS) as f: board_properties = json.load(f) qemu_boards = [name for name, board in board_properties.items() if board["is_qemu"]] return board in qemu_boards def has_fpu(board: str): """Returns True if board has FPU.""" with open(BOARDS) as f: board_properties = json.load(f) fpu_boards = [name for name, board in board_properties.items() if board["fpu"]] return board in fpu_boards def build_project( temp_dir, zephyr_board, west_cmd, mod, build_config, simd=False, extra_files_tar=None ): project_dir = temp_dir / "project" with tempfile.TemporaryDirectory() as tar_temp_dir: model_tar_path = pathlib.Path(tar_temp_dir) / "model.tar" export_model_library_format(mod, model_tar_path) workspace_size = mlf_extract_workspace_size_bytes(model_tar_path) project_options = { "extra_files_tar": extra_files_tar, "project_type": "aot_standalone_demo", "west_cmd": west_cmd, "verbose": bool(build_config.get("debug")), "board": zephyr_board, "compile_definitions": [ # TODO(mehrdadh): It fails without offset. f"-DWORKSPACE_SIZE={workspace_size + 128}", ], } if simd: project_options["config_main_stack_size"] = 1536 project = tvm.micro.project.generate_project_from_mlf( str(TEMPLATE_PROJECT_DIR), project_dir, model_tar_path, project_options ) project.build() return project, project_dir def create_header_file(tensor_name, npy_data, output_path, tar_file): """ This method generates a header file containing the data contained in the numpy array provided. It is used to capture the tensor data (for both inputs and expected outputs). """ header_file = io.StringIO() header_file.write("#include <stddef.h>\n") header_file.write("#include <stdint.h>\n") header_file.write("#include <dlpack/dlpack.h>\n") header_file.write(f"const size_t {tensor_name}_len = {npy_data.size};\n") if npy_data.dtype == "int8": header_file.write(f"int8_t {tensor_name}[] =") elif npy_data.dtype == "int32": header_file.write(f"int32_t {tensor_name}[] = ") elif npy_data.dtype == "uint8": header_file.write(f"uint8_t {tensor_name}[] = ") elif npy_data.dtype == "float32": header_file.write(f"float {tensor_name}[] = ") else: raise ValueError("Data type not expected.") header_file.write("{") for i in np.ndindex(npy_data.shape): header_file.write(f"{npy_data[i]}, ") header_file.write("};\n\n") header_file_bytes = bytes(header_file.getvalue(), "utf-8") raw_path = pathlib.Path(output_path) / f"{tensor_name}.h" ti = tarfile.TarInfo(name=str(raw_path)) ti.size = len(header_file_bytes) ti.mode = 0o644 ti.type = tarfile.REGTYPE tar_file.addfile(ti, io.BytesIO(header_file_bytes)) # TODO move CMSIS integration to microtvm_api_server.py # see https://discuss.tvm.apache.org/t/tvm-capturing-dependent-libraries-of-code-generated-tir-initially-for-use-in-model-library-format/11080 def loadCMSIS(temp_dir): REPO_PATH = "ARM-software/CMSIS_5" BRANCH = "master" API_PATH_URL = f"https://api.github.com/repos/{REPO_PATH}/git/trees" RAW_PATH_URL = f"https://raw.githubusercontent.com/{REPO_PATH}/{BRANCH}" url = "https://api.github.com/repos/ARM-software/CMSIS_5/git/trees/master?recursive=1" r = requests.get(url) res = r.json() include_trees = {} for file in res["tree"]: if file["path"] in {"CMSIS/DSP/Include", "CMSIS/DSP/Include/dsp", "CMSIS/NN/Include"}: include_trees.update({file["path"]: file["sha"]}) for path, sha in include_trees.items(): url = f"{API_PATH_URL}/{sha}" content = json.load(urlopen(url)) temp_path = f"{temp_dir}" if path == "CMSIS/DSP/Include/dsp": temp_path = f"{temp_dir}/dsp" if not os.path.isdir(temp_path): os.makedirs(temp_path) for item in content["tree"]: if item["type"] == "blob": file_name = item["path"] file_url = f"{RAW_PATH_URL}/{path}/{file_name}" print(file_name, " ", file_url) try: urlretrieve(file_url, f"{temp_path}/{file_name}") except HTTPError as e: print(f"Failed to download {file_url}: {e}") def run_model(project): project.flash() with project.transport() as transport: aot_transport_init_wait(transport) transport.write(b"infer%", timeout_sec=5) result_line = aot_transport_find_message(transport, "result", timeout_sec=60) result_line = result_line.strip("\n") result_line = result_line.split(":") result = int(result_line[1]) time = int(result_line[2]) _LOG.info(f"Result: {result}\ttime: {time} ms") return result, time def generate_project( temp_dir, board, west_cmd, lowered, build_config, sample, output_shape, output_type, load_cmsis ): with tempfile.NamedTemporaryFile() as tar_temp_file: with tarfile.open(tar_temp_file.name, "w:gz") as tf: with tempfile.TemporaryDirectory() as tar_temp_dir: model_files_path = os.path.join(tar_temp_dir, "include") os.mkdir(model_files_path) if load_cmsis: loadCMSIS(model_files_path) tf.add( model_files_path, arcname=os.path.relpath(model_files_path, tar_temp_dir) ) header_path = generate_c_interface_header( lowered.libmod_name, ["input_1"], ["Identity"], [], {}, [], 0, model_files_path ) tf.add(header_path, arcname=os.path.relpath(header_path, tar_temp_dir)) create_header_file("input_data", sample, "include", tf) create_header_file( "output_data", np.zeros(shape=output_shape, dtype=output_type), "include", tf ) project, project_dir = build_project( temp_dir, board, west_cmd, lowered, build_config, simd=load_cmsis, extra_files_tar=tar_temp_file.name, ) return project, project_dir
https://github.com/zk-ml/tachikoma
tests/micro/zephyr/test_zephyr.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import logging import os import pathlib import logging import pytest import numpy as np import onnx from PIL import Image import tvm import tvm.testing import tvm.relay as relay from tvm.relay.backend import Executor, Runtime from tvm.relay.testing import byoc from tvm.contrib import utils from tvm.micro.testing.utils import check_tune_log import test_utils _LOG = logging.getLogger(__name__) def _make_sess_from_op( temp_dir, model, zephyr_board, west_cmd, op_name, sched, arg_bufs, build_config, use_fvp ): runtime = Runtime("crt", {"system-lib": True}) target = tvm.target.target.micro(model) target = tvm.target.Target(target=target, host=target) with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): mod = tvm.build(sched, arg_bufs, target=target, runtime=runtime, name=op_name) return _make_session(temp_dir, zephyr_board, west_cmd, mod, build_config, use_fvp) def _make_session(temp_dir, zephyr_board, west_cmd, mod, build_config, use_fvp): config_main_stack_size = None if test_utils.qemu_boards(zephyr_board): config_main_stack_size = 1536 project_options = { "project_type": "host_driven", "west_cmd": west_cmd, "verbose": bool(build_config.get("debug")), "board": zephyr_board, "arm_fvp_path": "/opt/arm/FVP_Corstone_SSE-300/models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55", "use_fvp": bool(use_fvp), } if config_main_stack_size is not None: project_options["config_main_stack_size"] = config_main_stack_size project = tvm.micro.generate_project( str(test_utils.TEMPLATE_PROJECT_DIR), mod, temp_dir / "project", project_options, ) project.build() project.flash() return tvm.micro.Session(project.transport()) def _make_add_sess(temp_dir, model, zephyr_board, west_cmd, build_config, use_fvp, dtype="int8"): A = tvm.te.placeholder((2,), dtype=dtype) B = tvm.te.placeholder((1,), dtype=dtype) C = tvm.te.compute(A.shape, lambda i: A[i] + B[0], name="C") sched = tvm.te.create_schedule(C.op) return _make_sess_from_op( temp_dir, model, zephyr_board, west_cmd, "add", sched, [A, B, C], build_config, use_fvp ) # The same test code can be executed on both the QEMU simulation and on real hardware. @tvm.testing.requires_micro @pytest.mark.skip_boards(["mps2_an521"]) @pytest.mark.xfail_on_fvp() def test_add_uint(workspace_dir, board, west_cmd, microtvm_debug, use_fvp): """Test compiling the on-device runtime.""" model = test_utils.ZEPHYR_BOARDS[board] build_config = {"debug": microtvm_debug} # NOTE: run test in a nested function so cPython will delete arrays before closing the session. def test_basic_add(sess): A_data = tvm.nd.array(np.array([2, 3], dtype="int8"), device=sess.device) assert (A_data.numpy() == np.array([2, 3])).all() B_data = tvm.nd.array(np.array([4], dtype="int8"), device=sess.device) assert (B_data.numpy() == np.array([4])).all() C_data = tvm.nd.array(np.array([0, 0], dtype="int8"), device=sess.device) assert (C_data.numpy() == np.array([0, 0])).all() system_lib = sess.get_system_lib() system_lib.get_function("add")(A_data, B_data, C_data) assert (C_data.numpy() == np.array([6, 7])).all() with _make_add_sess(workspace_dir, model, board, west_cmd, build_config, use_fvp) as sess: test_basic_add(sess) # The same test code can be executed on both the QEMU simulation and on real hardware. @tvm.testing.requires_micro @pytest.mark.skip_boards(["mps2_an521"]) @pytest.mark.xfail_on_fvp() def test_add_float(workspace_dir, board, west_cmd, microtvm_debug, use_fvp): """Test compiling the on-device runtime.""" model = test_utils.ZEPHYR_BOARDS[board] if not test_utils.has_fpu(board): pytest.skip(f"FPU not enabled for {board}") build_config = {"debug": microtvm_debug} # NOTE: run test in a nested function so cPython will delete arrays before closing the session. def test_basic_add(sess): A_data = tvm.nd.array(np.array([2.5, 3.5], dtype="float32"), device=sess.device) assert (A_data.numpy() == np.array([2.5, 3.5])).all() B_data = tvm.nd.array(np.array([4.5], dtype="float32"), device=sess.device) assert (B_data.numpy() == np.array([4.5])).all() C_data = tvm.nd.array(np.array([0, 0], dtype="float32"), device=sess.device) assert (C_data.numpy() == np.array([0, 0])).all() system_lib = sess.get_system_lib() system_lib.get_function("add")(A_data, B_data, C_data) assert (C_data.numpy() == np.array([7, 8])).all() with _make_add_sess( workspace_dir, model, board, west_cmd, build_config, use_fvp, dtype="float32" ) as sess: test_basic_add(sess) @tvm.testing.requires_micro @pytest.mark.skip_boards(["mps2_an521"]) @pytest.mark.xfail_on_fvp() def test_platform_timer(workspace_dir, board, west_cmd, microtvm_debug, use_fvp): """Test compiling the on-device runtime.""" model = test_utils.ZEPHYR_BOARDS[board] build_config = {"debug": microtvm_debug} # NOTE: run test in a nested function so cPython will delete arrays before closing the session. def test_basic_add(sess): A_data = tvm.nd.array(np.array([2, 3], dtype="int8"), device=sess.device) assert (A_data.numpy() == np.array([2, 3])).all() B_data = tvm.nd.array(np.array([4], dtype="int8"), device=sess.device) assert (B_data.numpy() == np.array([4])).all() C_data = tvm.nd.array(np.array([0, 0], dtype="int8"), device=sess.device) assert (C_data.numpy() == np.array([0, 0])).all() system_lib = sess.get_system_lib() time_eval_f = system_lib.time_evaluator( "add", sess.device, number=20, repeat=3, min_repeat_ms=40 ) result = time_eval_f(A_data, B_data, C_data) assert (C_data.numpy() == np.array([6, 7])).all() assert result.mean > 0 assert len(result.results) == 3 with _make_add_sess(workspace_dir, model, board, west_cmd, build_config, use_fvp) as sess: test_basic_add(sess) @tvm.testing.requires_micro @pytest.mark.skip_boards(["mps2_an521"]) @pytest.mark.xfail_on_fvp() def test_relay(workspace_dir, board, west_cmd, microtvm_debug, use_fvp): """Testing a simple relay graph""" model = test_utils.ZEPHYR_BOARDS[board] build_config = {"debug": microtvm_debug} shape = (10,) dtype = "int8" # Construct Relay program. x = relay.var("x", relay.TensorType(shape=shape, dtype=dtype)) xx = relay.multiply(x, x) z = relay.add(xx, relay.const(np.ones(shape=shape, dtype=dtype))) func = relay.Function([x], z) ir_mod = tvm.IRModule.from_expr(func) runtime = Runtime("crt", {"system-lib": True}) target = tvm.target.target.micro(model) with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): mod = tvm.relay.build(ir_mod, target=target, runtime=runtime) with _make_session(workspace_dir, board, west_cmd, mod, build_config, use_fvp) as session: graph_mod = tvm.micro.create_local_graph_executor( mod.get_graph_json(), session.get_system_lib(), session.device ) graph_mod.set_input(**mod.get_params()) x_in = np.random.randint(10, size=shape[0], dtype=dtype) graph_mod.run(x=x_in) result = graph_mod.get_output(0).numpy() tvm.testing.assert_allclose(graph_mod.get_input(0).numpy(), x_in) tvm.testing.assert_allclose(result, x_in * x_in + 1) @tvm.testing.requires_micro @pytest.mark.skip_boards(["mps2_an521"]) @pytest.mark.xfail_on_fvp() def test_onnx(workspace_dir, board, west_cmd, microtvm_debug, use_fvp): """Testing a simple ONNX model.""" model = test_utils.ZEPHYR_BOARDS[board] build_config = {"debug": microtvm_debug} this_dir = pathlib.Path(os.path.dirname(__file__)) mnist_testdata = this_dir.parent / "testdata" / "mnist" digit_2 = Image.open(mnist_testdata / "digit-2.jpg").resize((28, 28)) digit_2 = np.asarray(digit_2).astype("float32") digit_2 = np.expand_dims(digit_2, axis=0) digit_9 = Image.open(mnist_testdata / "digit-9.jpg").resize((28, 28)) digit_9 = np.asarray(digit_9).astype("float32") digit_9 = np.expand_dims(digit_9, axis=0) # Load ONNX model and convert to Relay. onnx_model = onnx.load(mnist_testdata / "mnist-8.onnx") shape = {"Input3": (1, 1, 28, 28)} relay_mod, params = relay.frontend.from_onnx(onnx_model, shape=shape, freeze_params=True) relay_mod = relay.transform.DynamicToStatic()(relay_mod) # We add the link-params=True option to ensure the model parameters are compiled in. # There is currently a bug preventing the host_driven environment from receiving # the model weights when set using graph_mod.set_input(). # See: https://github.com/apache/tvm/issues/7567 target = tvm.target.target.micro(model) with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): executor = Executor("graph", {"link-params": True}) runtime = Runtime("crt", {"system-lib": True}) lowered = relay.build(relay_mod, target, params=params, executor=executor, runtime=runtime) graph = lowered.get_graph_json() with _make_session(workspace_dir, board, west_cmd, lowered, build_config, use_fvp) as session: graph_mod = tvm.micro.create_local_graph_executor( graph, session.get_system_lib(), session.device ) # Send the digit-2 image and confirm that the correct result is returned. graph_mod.set_input("Input3", tvm.nd.array(digit_2)) graph_mod.run() result = graph_mod.get_output(0).numpy() assert np.argmax(result) == 2 # Send the digit-9 image and confirm that the correct result is returned. graph_mod.set_input("Input3", tvm.nd.array(digit_9)) graph_mod.run() result = graph_mod.get_output(0).numpy() assert np.argmax(result) == 9 def check_result( temp_dir, relay_mod, model, zephyr_board, west_cmd, map_inputs, out_shape, result, build_config, use_fvp, ): """Helper function to verify results""" TOL = 1e-5 runtime = Runtime("crt", {"system-lib": True}) target = tvm.target.target.micro(model) with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): mod = tvm.relay.build(relay_mod, target=target, runtime=runtime) with _make_session(temp_dir, zephyr_board, west_cmd, mod, build_config, use_fvp) as session: rt_mod = tvm.micro.create_local_graph_executor( mod.get_graph_json(), session.get_system_lib(), session.device ) rt_mod.set_input(**mod.get_params()) for name, data in map_inputs.items(): rt_mod.set_input(name, data) rt_mod.set_input(**mod.get_params()) rt_mod.run() out_shapes = out_shape if isinstance(out_shape, list) else [out_shape] results = result if isinstance(result, list) else [result] for idx, shape in enumerate(out_shapes): out = tvm.nd.empty(shape, device=session.device) out = rt_mod.get_output(idx, out) tvm.testing.assert_allclose(out.numpy(), results[idx], rtol=TOL, atol=TOL) @tvm.testing.requires_micro @pytest.mark.skip_boards(["mps2_an521"]) @pytest.mark.xfail_on_fvp() def test_byoc_microtvm(workspace_dir, board, west_cmd, microtvm_debug, use_fvp): """This is a simple test case to check BYOC capabilities of microTVM""" model = test_utils.ZEPHYR_BOARDS[board] build_config = {"debug": microtvm_debug} x = relay.var("x", shape=(10, 10)) w0 = relay.var("w0", shape=(10, 10)) w1 = relay.var("w1", shape=(10, 10)) w2 = relay.var("w2", shape=(10, 10)) w3 = relay.var("w3", shape=(10, 10)) w4 = relay.var("w4", shape=(10, 10)) w5 = relay.var("w5", shape=(10, 10)) w6 = relay.var("w6", shape=(10, 10)) w7 = relay.var("w7", shape=(10, 10)) # C compiler z0 = relay.add(x, w0) p0 = relay.subtract(z0, w1) q0 = relay.multiply(p0, w2) z1 = relay.add(x, w3) p1 = relay.subtract(z1, w4) q1 = relay.multiply(p1, w5) # Other parts on TVM z2 = relay.add(x, w6) q2 = relay.subtract(z2, w7) r = relay.concatenate((q0, q1, q2), axis=0) f = relay.Function([x, w0, w1, w2, w3, w4, w5, w6, w7], r) mod = tvm.IRModule() ann = byoc.CcompilerAnnotator() mod["main"] = ann.visit(f) mod = tvm.relay.transform.PartitionGraph()(mod) mod = tvm.relay.transform.InferType()(mod) x_data = np.random.rand(10, 10).astype("float32") w_data = [] for _ in range(8): w_data.append(np.random.rand(10, 10).astype("float32")) map_inputs = {"w{}".format(i): w_data[i] for i in range(8)} map_inputs["x"] = x_data check_result( temp_dir=workspace_dir, relay_mod=mod, map_inputs=map_inputs, out_shape=(30, 10), result=np.concatenate( ( ((x_data + w_data[0]) - w_data[1]) * w_data[2], ((x_data + w_data[3]) - w_data[4]) * w_data[5], x_data + w_data[6] - w_data[7], ), axis=0, ), model=model, zephyr_board=board, west_cmd=west_cmd, build_config=build_config, use_fvp=use_fvp, ) def _make_add_sess_with_shape( temp_dir, model, zephyr_board, west_cmd, shape, build_config, use_fvp ): A = tvm.te.placeholder(shape, dtype="int8") C = tvm.te.compute(A.shape, lambda i: A[i] + A[i], name="C") sched = tvm.te.create_schedule(C.op) return _make_sess_from_op( temp_dir, model, zephyr_board, west_cmd, "add", sched, [A, C], build_config, use_fvp ) @pytest.mark.parametrize( "shape,", [ pytest.param((1 * 1024,), id="(1*1024)"), pytest.param((4 * 1024,), id="(4*1024)"), pytest.param((16 * 1024,), id="(16*1024)"), ], ) @tvm.testing.requires_micro @pytest.mark.skip_boards(["mps2_an521"]) @pytest.mark.xfail_on_fvp() def test_rpc_large_array(workspace_dir, board, west_cmd, microtvm_debug, shape, use_fvp): """Test large RPC array transfer.""" model = test_utils.ZEPHYR_BOARDS[board] build_config = {"debug": microtvm_debug} # NOTE: run test in a nested function so cPython will delete arrays before closing the session. def test_tensors(sess): a_np = np.random.randint(low=-128, high=127, size=shape, dtype="int8") A_data = tvm.nd.array(a_np, device=sess.device) assert (A_data.numpy() == a_np).all() C_data = tvm.nd.array(np.zeros(shape, dtype="int8"), device=sess.device) assert (C_data.numpy() == np.zeros(shape)).all() with _make_add_sess_with_shape( workspace_dir, model, board, west_cmd, shape, build_config, use_fvp ) as sess: test_tensors(sess) @pytest.mark.xfail(strict=False, reason="See https://github.com/apache/tvm/issues/10297") @tvm.testing.requires_micro def test_autotune_conv2d(workspace_dir, board, west_cmd, microtvm_debug, use_fvp): """Test AutoTune for microTVM Zephyr""" if board != "qemu_x86": pytest.xfail(f"Autotune fails on {board}.") runtime = Runtime("crt", {"system-lib": True}) model = test_utils.ZEPHYR_BOARDS[board] build_config = {"debug": microtvm_debug} # Create a Relay model data_shape = (1, 3, 16, 16) weight_shape = (8, 3, 5, 5) data = relay.var("data", relay.TensorType(data_shape, "float32")) weight = relay.var("weight", relay.TensorType(weight_shape, "float32")) y = relay.nn.conv2d( data, weight, padding=(2, 2), kernel_size=(5, 5), kernel_layout="OIHW", out_dtype="float32", ) f = relay.Function([data, weight], y) mod = tvm.IRModule.from_expr(f) mod = relay.transform.InferType()(mod) data_sample = np.random.rand(data_shape[0], data_shape[1], data_shape[2], data_shape[3]).astype( "float32" ) weight_sample = np.random.rand( weight_shape[0], weight_shape[1], weight_shape[2], weight_shape[3] ).astype("float32") params = {mod["main"].params[1].name_hint: weight_sample} target = tvm.target.target.micro(model) pass_context = tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}) with pass_context: tasks = tvm.autotvm.task.extract_from_program(mod["main"], {}, target) assert len(tasks) > 0 config_main_stack_size = None if test_utils.qemu_boards(board): config_main_stack_size = 1536 project_options = { "board": board, "west_cmd": west_cmd, "verbose": 1, "project_type": "host_driven", "use_fvp": bool(use_fvp), } if config_main_stack_size is not None: project_options["config_main_stack_size"] = config_main_stack_size module_loader = tvm.micro.AutoTvmModuleLoader( template_project_dir=test_utils.TEMPLATE_PROJECT_DIR, project_options=project_options, ) timeout = 200 builder = tvm.autotvm.LocalBuilder( timeout=timeout, n_parallel=1, build_kwargs={"build_option": {"tir.disable_vectorize": True}}, do_fork=True, build_func=tvm.micro.autotvm_build_func, runtime=runtime, ) runner = tvm.autotvm.LocalRunner( number=1, repeat=1, timeout=timeout, module_loader=module_loader ) measure_option = tvm.autotvm.measure_option(builder=builder, runner=runner) log_path = pathlib.Path("zephyr_autotune.log") if log_path.exists(): log_path.unlink() n_trial = 10 for task in tasks: tuner = tvm.autotvm.tuner.GATuner(task) tuner.tune( n_trial=n_trial, measure_option=measure_option, callbacks=[ tvm.autotvm.callback.log_to_file(str(log_path)), tvm.autotvm.callback.progress_bar(n_trial, si_prefix="M"), ], si_prefix="M", ) assert tuner.best_flops > 0 check_tune_log(log_path) # Build without tuning with pass_context: lowered = tvm.relay.build(mod, target=target, runtime=runtime, params=params) temp_dir = utils.tempdir() with _make_session(temp_dir, board, west_cmd, lowered, build_config, use_fvp) as session: graph_mod = tvm.micro.create_local_graph_executor( lowered.get_graph_json(), session.get_system_lib(), session.device ) graph_mod.set_input(**lowered.get_params()) graph_mod.run(data=data_sample) expected_output = graph_mod.get_output(0).numpy() del graph_mod # Build using autotune logs with tvm.autotvm.apply_history_best(str(log_path)): with pass_context: lowered_tuned = tvm.relay.build(mod, target=target, runtime=runtime, params=params) temp_dir = utils.tempdir() with _make_session(temp_dir, board, west_cmd, lowered_tuned, build_config, use_fvp) as session: graph_mod = tvm.micro.create_local_graph_executor( lowered_tuned.get_graph_json(), session.get_system_lib(), session.device ) graph_mod.set_input(**lowered_tuned.get_params()) graph_mod.run(data=data_sample) output = graph_mod.get_output(0).numpy() del graph_mod tvm.testing.assert_allclose(output, expected_output, rtol=1e-4, atol=1e-5) @tvm.testing.requires_micro def test_schedule_build_with_cmsis_dependency( workspace_dir, board, west_cmd, microtvm_debug, use_fvp ): """Test Relay schedule with CMSIS dependency. This test shows if microTVM Auto tuning with Zephyr breaks if CMSIS dependency was required for a schedule. """ model = test_utils.ZEPHYR_BOARDS[board] build_config = {"debug": microtvm_debug} target = tvm.target.target.micro(model, options=["-keys=arm_cpu,cpu"]) if not target.features.has_dsp: pytest.skip(f"ISA does not support DSP. target: {target}") # Create a Relay conv2d data_shape = (1, 16, 16, 3) weight_shape = (5, 5, 8, 3) data = relay.var("data", relay.TensorType(data_shape, "int8")) weight = relay.var("weight", relay.TensorType(weight_shape, "int8")) y = relay.nn.conv2d( data, weight, padding=(2, 2), kernel_size=(5, 5), data_layout="NHWC", kernel_layout="HWOI", out_dtype="int32", ) func = relay.Function([data, weight], y) ir_mod = tvm.IRModule.from_expr(func) runtime = Runtime("crt", {"system-lib": True}) with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): mod = tvm.relay.build(ir_mod, target=target, runtime=runtime) project_options = { "project_type": "host_driven", "west_cmd": west_cmd, "verbose": bool(build_config.get("debug")), "board": board, "cmsis_path": os.getenv("CMSIS_PATH"), "use_fvp": bool(use_fvp), } project_dir = workspace_dir / "project" project = tvm.micro.generate_project( str(test_utils.TEMPLATE_PROJECT_DIR), mod, project_dir, project_options, ) project.build() with open(project_dir / "CMakeLists.txt", "r") as cmake_f: cmake_content = cmake_f.read() assert "CMSIS/DSP/Include" in cmake_content assert "CMSIS/DSP/Include/dsp" in cmake_content assert "CMSIS/DSP/Include" in cmake_content assert "CMSIS/NN/Include" in cmake_content if __name__ == "__main__": tvm.testing.main()
https://github.com/zk-ml/tachikoma
tests/micro/zephyr/test_zephyr_aot_exec.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import logging import os import pathlib import sys import logging import pytest import numpy as np import onnx from PIL import Image import tvm import tvm.testing import tvm.relay as relay from tvm.relay.backend import Executor, Runtime from tvm.relay.testing import byoc from tvm.contrib import utils from tvm.micro.testing.utils import check_tune_log from tvm._ffi import get_global_func, register_func import test_utils def _make_session(workspace_dir, zephyr_board, west_cmd, mod, build_config, use_fvp): config_main_stack_size = None if test_utils.qemu_boards(zephyr_board): # fyi: qemu_riscv64 seems to be the greediest stack user config_main_stack_size = 4096 else: # increase stack size for HW platforms config_main_stack_size = 2048 project_options = { "project_type": "host_driven", "west_cmd": west_cmd, "verbose": bool(build_config.get("debug")), "board": zephyr_board, "arm_fvp_path": "/opt/arm/FVP_Corstone_SSE-300/models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55", "use_fvp": bool(use_fvp), } if config_main_stack_size is not None: project_options["config_main_stack_size"] = config_main_stack_size project = tvm.micro.generate_project( str(test_utils.TEMPLATE_PROJECT_DIR), mod, workspace_dir / "project", project_options, ) project.build() project.flash() return tvm.micro.Session(project.transport()) @tvm.testing.requires_micro @pytest.mark.skip_boards(["mps2_an521"]) @pytest.mark.xfail_on_fvp() def test_relay(workspace_dir, board, west_cmd, microtvm_debug, use_fvp): """Testing a simple relay graph""" model = test_utils.ZEPHYR_BOARDS[board] build_config = {"debug": microtvm_debug} shape = (10,) dtype = "int8" # Construct Relay program. x = relay.var("x", relay.TensorType(shape=shape, dtype=dtype)) xx = relay.multiply(x, x) z = relay.add(xx, relay.const(np.ones(shape=shape, dtype=dtype))) func = relay.Function([x], z) ir_mod = tvm.IRModule.from_expr(func) runtime = Runtime("crt", {"system-lib": True}) executor = Executor("aot") target = tvm.target.target.micro(model) with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): mod = tvm.relay.build(ir_mod, target=target, runtime=runtime, executor=executor) with _make_session(workspace_dir, board, west_cmd, mod, build_config, use_fvp) as session: aot_executor = tvm.runtime.executor.aot_executor.AotModule(session.create_aot_executor()) x_in = np.random.randint(10, size=shape[0], dtype=dtype) aot_executor.run(x=x_in) result = aot_executor.get_output(0).numpy() tvm.testing.assert_allclose(aot_executor.get_input(0).numpy(), x_in) tvm.testing.assert_allclose(result, x_in * x_in + 1) @tvm.testing.requires_micro @pytest.mark.skip_boards(["mps2_an521"]) @pytest.mark.xfail_on_fvp() def test_aot_executor(workspace_dir, board, west_cmd, microtvm_debug, use_fvp): """Test use of the AOT executor with microTVM.""" model = test_utils.ZEPHYR_BOARDS[board] build_config = {"debug": microtvm_debug} shape = (10,) dtype = "int8" print("test_relay: construct relay program\n") # Construct Relay program. relay_mod = tvm.parser.fromtext( """ #[version = "0.0.5"] def @main(%a : Tensor[(1, 2), uint8], %b : Tensor[(1, 2), uint8]) { %0 = %a + %b; %0 }""" ) runtime = Runtime("crt", {"system-lib": True}) executor = Executor("aot") target = tvm.target.target.micro(model) with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): mod = tvm.relay.build(relay_mod, target=target, runtime=runtime, executor=executor) def do_test(): aot_executor = tvm.runtime.executor.aot_executor.AotModule(session.create_aot_executor()) assert aot_executor.get_input_index("a") == 0 assert aot_executor.get_input_index("b") == 1 assert aot_executor.get_num_inputs() == 2 assert aot_executor.get_num_outputs() == 1 A_np = np.array([[2, 3]], dtype="uint8") B_np = np.array([[4, 7]], dtype="uint8") A_data = aot_executor.get_input("a").copyfrom(A_np) B_data = aot_executor.get_input("b").copyfrom(B_np) aot_executor.run() out = aot_executor.get_output(0) assert (out.numpy() == np.array([6, 10])).all() B_np_new = np.array([[5, 8]]) aot_executor.set_input("b", B_np_new) assert (B_data.numpy() == B_np_new).all() with _make_session(workspace_dir, board, west_cmd, mod, build_config, use_fvp) as session: do_test() if __name__ == "__main__": tvm.testing.main()
https://github.com/zk-ml/tachikoma
tests/micro/zephyr/test_zephyr_aot_exec_standalone.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import io import logging import os import sys import logging import pathlib import tarfile import tempfile import pytest import numpy as np import tvm import tvm.testing from tvm.micro.project_api import server import tvm.relay as relay from tvm.relay.backend import Executor, Runtime from tvm.contrib.download import download_testdata import test_utils @tvm.testing.requires_micro @pytest.mark.skip_boards(["mps2_an521", "mps3_an547"]) def test_tflite(workspace_dir, board, west_cmd, microtvm_debug): """Testing a TFLite model.""" model = test_utils.ZEPHYR_BOARDS[board] input_shape = (1, 49, 10, 1) output_shape = (1, 12) build_config = {"debug": microtvm_debug} model_url = "https://github.com/tlc-pack/web-data/raw/25fe99fb00329a26bd37d3dca723da94316fd34c/testdata/microTVM/model/keyword_spotting_quant.tflite" model_path = download_testdata(model_url, "keyword_spotting_quant.tflite", module="model") # Import TFLite model tflite_model_buf = open(model_path, "rb").read() try: import tflite tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0) except AttributeError: import tflite.Model tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0) # Load TFLite model and convert to Relay relay_mod, params = relay.frontend.from_tflite( tflite_model, shape_dict={"input_1": input_shape}, dtype_dict={"input_1 ": "int8"} ) target = tvm.target.target.micro(model) executor = Executor( "aot", {"unpacked-api": True, "interface-api": "c", "workspace-byte-alignment": 4} ) runtime = Runtime("crt") with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): lowered = relay.build(relay_mod, target, params=params, runtime=runtime, executor=executor) sample_url = "https://github.com/tlc-pack/web-data/raw/967fc387dadb272c5a7f8c3461d34c060100dbf1/testdata/microTVM/data/keyword_spotting_int8_6.pyc.npy" sample_path = download_testdata(sample_url, "keyword_spotting_int8_6.pyc.npy", module="data") sample = np.load(sample_path) project, _ = test_utils.generate_project( workspace_dir, board, west_cmd, lowered, build_config, sample, output_shape, "int8", load_cmsis=False, ) result, time = test_utils.run_model(project) assert result == 6 @tvm.testing.requires_micro @pytest.mark.skip_boards(["mps2_an521", "mps3_an547"]) def test_qemu_make_fail(workspace_dir, board, west_cmd, microtvm_debug): """Testing QEMU make fail.""" if board not in ["qemu_x86", "mps2_an521", "mps3_an547"]: pytest.skip(msg="Only for QEMU targets.") model = test_utils.ZEPHYR_BOARDS[board] build_config = {"debug": microtvm_debug} shape = (10,) dtype = "float32" # Construct Relay program. x = relay.var("x", relay.TensorType(shape=shape, dtype=dtype)) xx = relay.multiply(x, x) z = relay.add(xx, relay.const(np.ones(shape=shape, dtype=dtype))) func = relay.Function([x], z) ir_mod = tvm.IRModule.from_expr(func) target = tvm.target.target.micro(model) executor = Executor("aot") runtime = Runtime("crt") with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): lowered = relay.build(ir_mod, target, executor=executor, runtime=runtime) sample = np.zeros(shape=shape, dtype=dtype) project, project_dir = test_utils.generate_project( workspace_dir, board, west_cmd, lowered, build_config, sample, shape, dtype, load_cmsis=False, ) file_path = pathlib.Path(project_dir) / "build" / "build.ninja" assert file_path.is_file(), f"[{file_path}] does not exist." # Remove a file to create make failure. os.remove(file_path) project.flash() with pytest.raises(server.JSONRPCError) as excinfo: project.transport().open() assert "QEMU setup failed" in str(excinfo.value) if __name__ == "__main__": tvm.testing.main()
https://github.com/zk-ml/tachikoma
tests/micro/zephyr/test_zephyr_armv7m.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from json import load import logging import os import pathlib import sys import tarfile import tempfile import pytest import numpy as np import tvm import tvm.rpc import tvm.micro import tvm.testing from tvm import relay from tvm.contrib.download import download_testdata from tvm.relay.backend import Executor, Runtime import test_utils _LOG = logging.getLogger(__name__) def _open_tflite_model(): # Import TFLite model model_url = "https://github.com/tlc-pack/web-data/raw/b2f3c02427b67267a00fd968ba1fce28fc833028/testdata/microTVM/model/mnist_model_quant.tflite" model_path = download_testdata(model_url, "mnist_model_quant.tflite", module="model") tflite_model_buf = open(model_path, "rb").read() try: import tflite tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0) except AttributeError: import tflite.Model tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0) relay_mod, params = relay.frontend.from_tflite(tflite_model) return relay_mod, params def _get_test_data(testdata_dir): from PIL import Image image_files = ["digit-2.jpg"] for file in image_files: img = Image.open(testdata_dir / file).resize((28, 28)) img = np.asarray(img).astype("uint8") sample = np.reshape(img, -1) output_shape = (1, 10) return sample, output_shape def _apply_desired_layout_simd(relay_mod): desired_layouts = {"qnn.conv2d": ["NHWC", "HWOI"], "nn.conv2d": ["NHWC", "HWOI"]} seq = tvm.transform.Sequential( [relay.transform.RemoveUnusedFunctions(), relay.transform.ConvertLayout(desired_layouts)] ) with tvm.transform.PassContext(opt_level=3): return seq(relay_mod) def _apply_desired_layout_no_simd(relay_mod): desired_layouts = {"qnn.conv2d": ["NHWC", "HWIO"], "nn.conv2d": ["NHWC", "HWIO"]} seq = tvm.transform.Sequential( [relay.transform.RemoveUnusedFunctions(), relay.transform.ConvertLayout(desired_layouts)] ) with tvm.transform.PassContext(opt_level=3): return seq(relay_mod) @tvm.testing.requires_micro @pytest.mark.skip_boards(["mps2_an521"]) @pytest.mark.xfail(reason="due https://github.com/apache/tvm/issues/12619") def test_armv7m_intrinsic(workspace_dir, board, west_cmd, microtvm_debug): """Testing a ARM v7m SIMD extension.""" if board not in [ "mps2_an521", "stm32f746g_disco", "nucleo_f746zg", "nucleo_l4r5zi", "nrf5340dk_nrf5340_cpuapp", ]: pytest.skip(msg="Platform does not support ARM v7m SIMD extension.") model = test_utils.ZEPHYR_BOARDS[board] build_config = {"debug": microtvm_debug} this_dir = pathlib.Path(os.path.dirname(__file__)) testdata_dir = this_dir.parent / "testdata" / "mnist" relay_mod, params = _open_tflite_model() sample, output_shape = _get_test_data(testdata_dir) relay_mod_simd = _apply_desired_layout_simd(relay_mod) # kernel layout "HWIO" is not supported by arm_cpu SIMD extension (see tvm\python\relay\op\strategy\arm_cpu.py) relay_mod_no_simd = _apply_desired_layout_no_simd(relay_mod) target = tvm.target.target.micro(model, options=["-keys=cpu"]) target_simd = tvm.target.target.micro(model, options=["-keys=arm_cpu,cpu"]) executor = Executor("aot", {"unpacked-api": True, "interface-api": "c"}) runtime = Runtime("crt") workspace_dir_simd = workspace_dir / "simd" workspace_dir_no_simd = workspace_dir / "nosimd" os.makedirs(workspace_dir_simd, exist_ok=True) os.makedirs(workspace_dir_no_simd, exist_ok=True) with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): lowered_simd = relay.build( relay_mod_simd, target_simd, params=params, runtime=runtime, executor=executor ) lowered_no_simd = relay.build( relay_mod_no_simd, target, params=params, runtime=runtime, executor=executor ) simd_project, _ = test_utils.generate_project( workspace_dir_simd, board, west_cmd, lowered_simd, build_config, sample, output_shape, "float32", load_cmsis=True, ) result_simd, time_simd = test_utils.run_model(simd_project) no_simd_project, _ = test_utils.generate_project( workspace_dir_no_simd, board, west_cmd, lowered_no_simd, build_config, sample, output_shape, "float32", load_cmsis=False, ) result_no_simd, time_no_simd = test_utils.run_model(no_simd_project) assert result_no_simd == result_simd == 2 # Time performance measurements on QEMU emulator are always equal to zero. if board not in [ "mps2_an521", "mps3_an547", ]: assert time_no_simd > time_simd if __name__ == "__main__": tvm.testing.main()
https://github.com/zk-ml/tachikoma
tests/python/all-platform-minimal-test/test_minimal_target_codegen_llvm.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """LLVM enablement tests.""" import tvm import tvm.testing from tvm import te from tvm import topi from tvm.contrib import utils import numpy as np import ctypes import math import re @tvm.testing.requires_llvm def test_llvm_add_pipeline(): """all-platform-minimal-test: Check LLVM enablement.""" nn = 1024 n = tvm.runtime.convert(nn) A = te.placeholder((n,), name="A") B = te.placeholder((n,), name="B") AA = te.compute((n,), lambda *i: A(*i), name="A") BB = te.compute((n,), lambda *i: B(*i), name="B") T = te.compute(A.shape, lambda *i: AA(*i) + BB(*i), name="T") C = te.compute(A.shape, lambda *i: T(*i), name="C") s = te.create_schedule(C.op) xo, xi = s[C].split(C.op.axis[0], factor=4) xo1, xo2 = s[C].split(xo, factor=13) s[C].parallel(xo2) s[C].pragma(xo1, "parallel_launch_point") s[C].pragma(xo2, "parallel_stride_pattern") s[C].pragma(xo2, "parallel_barrier_when_finish") s[C].vectorize(xi) def check_llvm(): # Specifically allow offset to test codepath when offset is available Ab = tvm.tir.decl_buffer( A.shape, A.dtype, elem_offset=te.size_var("Aoffset"), offset_factor=8, name="A" ) binds = {A: Ab} # BUILD and invoke the kernel. f = tvm.build(s, [A, B, C], "llvm", binds=binds) dev = tvm.cpu(0) # launch the kernel. n = nn a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) f(a, b, c) tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) check_llvm()
https://github.com/zk-ml/tachikoma
tests/python/all-platform-minimal-test/test_runtime_ndarray.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Basic runtime enablement test.""" import tvm from tvm import te import numpy as np import tvm.testing @tvm.testing.uses_gpu def test_nd_create(): for target, dev in tvm.testing.enabled_targets(): for dtype in ["uint8", "int8", "uint16", "int16", "uint32", "int32", "float32"]: x = np.random.randint(0, 10, size=(3, 4)) x = np.array(x, dtype=dtype) y = tvm.nd.array(x, device=dev) z = y.copyto(dev) assert y.dtype == x.dtype assert y.shape == x.shape assert isinstance(y, tvm.nd.NDArray) np.testing.assert_equal(x, y.numpy()) np.testing.assert_equal(x, z.numpy()) # no need here, just to test usablity dev.sync() def test_fp16_conversion(): n = 100 for (src, dst) in [("float32", "float16"), ("float16", "float32")]: A = te.placeholder((n,), dtype=src) B = te.compute((n,), lambda i: A[i].astype(dst)) s = te.create_schedule([B.op]) func = tvm.build(s, [A, B], "llvm") x_tvm = tvm.nd.array(100 * np.random.randn(n).astype(src) - 50) y_tvm = tvm.nd.array(100 * np.random.randn(n).astype(dst) - 50) func(x_tvm, y_tvm) expected = x_tvm.numpy().astype(dst) real = y_tvm.numpy() tvm.testing.assert_allclose(expected, real) def test_dtype(): dtype = tvm.DataType("handle") assert dtype.type_code == tvm.DataTypeCode.HANDLE if __name__ == "__main__": test_nd_create() test_fp16_conversion() test_dtype()
https://github.com/zk-ml/tachikoma
tests/python/all-platform-minimal-test/test_runtime_packed_func.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Test packed function FFI.""" import tvm from tvm import te import tvm.testing import numpy as np def test_get_global(): targs = (10, 10.0, "hello") # register into global function table @tvm.register_func def my_packed_func(*args): assert tuple(args) == targs return 10 # get it out from global function table f = tvm.get_global_func("my_packed_func") assert isinstance(f, tvm.runtime.PackedFunc) y = f(*targs) assert y == 10 def test_get_callback_with_node(): x = tvm.runtime.convert(10) def test(y): assert y.handle != x.handle return y f2 = tvm.runtime.convert(test) # register into global function table @tvm.register_func def my_callback_with_node(y, f): assert y == x return f(y) # get it out from global function table f = tvm.get_global_func("my_callback_with_node") assert isinstance(f, tvm.runtime.PackedFunc) y = f(x, f2) assert y.value == 10 def test_return_func(): def addy(y): def add(x): return tvm.runtime.convert(x + y) return add myf = tvm.runtime.convert(addy) f = myf(10) assert f(11).value == 21 def test_convert(): # convert a function to tvm function targs = (10, 10.0, "hello", 10) def myfunc(*args): assert tuple(args) == targs f = tvm.runtime.convert(myfunc) assert isinstance(f, tvm.runtime.PackedFunc) def test_byte_array(): s = "hello" a = bytearray(s, encoding="ascii") def myfunc(ss): assert ss == a f = tvm.runtime.convert(myfunc) f(a) def test_empty_array(): def myfunc(ss): assert tuple(ss) == () x = tvm.runtime.convert(()) tvm.runtime.convert(myfunc)(x) def test_device(): def test_device_func(dev): assert tvm.cuda(7) == dev return tvm.cpu(0) x = test_device_func(tvm.cuda(7)) assert x == tvm.cpu(0) x = tvm.opencl(10) x = tvm.testing.device_test(x, x.device_type, x.device_id) assert x == tvm.opencl(10) def test_rvalue_ref(): def callback(x, expected_count): assert expected_count == tvm.testing.object_use_count(x) return x f = tvm.runtime.convert(callback) def check0(): x = tvm.tir.Var("x", "int32") assert tvm.testing.object_use_count(x) == 1 f(x, 2) y = f(x._move(), 1) assert x.handle.value == None def check1(): x = tvm.tir.Var("x", "int32") assert tvm.testing.object_use_count(x) == 1 y = f(x, 2) z = f(x._move(), 2) assert x.handle.value == None assert y.handle.value is not None check0() check1() def test_numpy_scalar(): maxint = (1 << 63) - 1 assert tvm.testing.echo(np.int64(maxint)) == maxint def test_ndarray_args(): def check(arr): assert not arr.is_view assert tvm.testing.object_use_count(arr) == 2 fcheck = tvm.runtime.convert(check) x = tvm.nd.array([1, 2, 3]) fcheck(x) assert tvm.testing.object_use_count(x) == 1 if __name__ == "__main__": test_ndarray_args() test_numpy_scalar() test_rvalue_ref() test_empty_array() test_get_global() test_get_callback_with_node() test_convert() test_return_func() test_byte_array() test_device()
https://github.com/zk-ml/tachikoma
tests/python/ci/__init__.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Infrastructure and tests for CI scripts"""
https://github.com/zk-ml/tachikoma
tests/python/ci/test_ci.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Test various CI scripts and GitHub Actions workflows""" import shutil import subprocess import json import textwrap import sys import logging from pathlib import Path import pytest import tvm.testing from .test_utils import REPO_ROOT, TempGit, run_script # pylint: disable=wrong-import-position,wrong-import-order sys.path.insert(0, str(REPO_ROOT / "ci")) sys.path.insert(0, str(REPO_ROOT / "ci" / "scripts")) import scripts # pylint: enable=wrong-import-position,wrong-import-order def parameterize_named(**kwargs): keys = next(iter(kwargs.values())).keys() return pytest.mark.parametrize( ",".join(keys), [tuple(d.values()) for d in kwargs.values()], ids=kwargs.keys() ) # pylint: disable=line-too-long TEST_DATA_SKIPPED_BOT = { "found-diff-no-additional": { "main_xml_file": "unittest/file1.xml", "main_xml_content": """<?xml version="1.0" encoding="utf-8"?> <testsuites> <testsuite errors="0" failures="0" hostname="13e7c5f749d8" name="python-unittest-gpu-0-shard-1-ctypes" skipped="102" tests="165" time="79.312" timestamp="2022-08-10T22:39:36.673781"> <testcase classname="ctypes.tests.python.unittest.test_auto_scheduler_search_policy" name="test_sketch_search_policy_cuda_rpc_runner" time="9.679"> </testcase> </testsuite> </testsuites> """, "pr_xml_file": "unittest/file2.xml", "pr_xml_content": """<?xml version="1.0" encoding="utf-8"?> <testsuites> <testsuite errors="0" failures="0" hostname="13e7c5f749d8" name="python-unittest-gpu-0-shard-1-ctypes" skipped="102" tests="165" time="79.312" timestamp="2022-08-10T22:39:36.673781"> <testcase classname="ctypes.tests.python.unittest.test_auto_scheduler_search_policy" name="test_sketch_search_policy_cuda_rpc_runner" time="9.679"> <skipped message="This test is skipped" type="pytest.skip"> Skipped </skipped> </testcase> <testcase classname="ctypes.tests.python.unittest.test_roofline" name="test_estimate_peak_bandwidth[cuda]" time="4.679"> <skipped message="This is another skippe test" type="pytest.skip"> Skipped </skipped> </testcase> </testsuite> </testsuites> """, "additional_tests_to_check": """{ "unittest": ["dummy_class#dummy_test"], "unittest_GPU": ["another_dummy_class#another_dummy_test"] } """, "target_url": "https://ci.tlcpack.ai/job/tvm/job/PR-11594/3/display/redirect", "s3_prefix": "tvm-jenkins-artifacts-prod", "jenkins_prefix": "ci.tlcpack.ai", "common_main_build": """{"build_number": "4115", "state": "success"}""", "commit_sha": "sha1234", "expected_body": "The list below shows tests that ran in main sha1234 but were skipped in the CI build of sha1234:\n```\nunittest -> ctypes.tests.python.unittest.test_auto_scheduler_search_policy#test_sketch_search_policy_cuda_rpc_runner\nunittest -> ctypes.tests.python.unittest.test_roofline#test_estimate_peak_bandwidth[cuda]\n```\nA detailed report of ran tests is [here](https://ci.tlcpack.ai/job/tvm/job/PR-11594/3/testReport/).", }, "found-diff-skipped-additional": { "main_xml_file": "unittest/file1.xml", "main_xml_content": """<?xml version="1.0" encoding="utf-8"?> <testsuites> <testsuite errors="0" failures="0" hostname="13e7c5f749d8" name="python-unittest-gpu-0-shard-1-ctypes" skipped="102" tests="165" time="79.312" timestamp="2022-08-10T22:39:36.673781"> <testcase classname="ctypes.tests.python.unittest.test_auto_scheduler_search_policy" name="test_sketch_search_policy_cuda_rpc_runner" time="9.679"> </testcase> </testsuite> </testsuites> """, "pr_xml_file": "unittest/file2.xml", "pr_xml_content": """<?xml version="1.0" encoding="utf-8"?> <testsuites> <testsuite errors="0" failures="0" hostname="13e7c5f749d8" name="python-unittest-gpu-0-shard-1-ctypes" skipped="102" tests="165" time="79.312" timestamp="2022-08-10T22:39:36.673781"> <testcase classname="ctypes.tests.python.unittest.test_auto_scheduler_search_policy" name="test_sketch_search_policy_cuda_rpc_runner" time="9.679"> <skipped message="This test is skipped" type="pytest.skip"> Skipped </skipped> </testcase> <testcase classname="ctypes.tests.python.unittest.test_roofline" name="test_estimate_peak_bandwidth[cuda]" time="4.679"> <skipped message="This is another skippe test" type="pytest.skip"> Skipped </skipped> </testcase> </testsuite> </testsuites> """, "additional_tests_to_check": """{ "unittest": ["ctypes.tests.python.unittest.test_auto_scheduler_search_policy#test_sketch_search_policy_cuda_rpc_runner", "dummy_class#dummy_test"], "unittest_GPU": ["another_dummy_class#another_dummy_test"] } """, "target_url": "https://ci.tlcpack.ai/job/tvm/job/PR-11594/3/display/redirect", "s3_prefix": "tvm-jenkins-artifacts-prod", "jenkins_prefix": "ci.tlcpack.ai", "common_main_build": """{"build_number": "4115", "state": "success"}""", "commit_sha": "sha1234", "expected_body": "The list below shows tests that ran in main sha1234 but were skipped in the CI build of sha1234:\n```\nunittest -> ctypes.tests.python.unittest.test_auto_scheduler_search_policy#test_sketch_search_policy_cuda_rpc_runner\nunittest -> ctypes.tests.python.unittest.test_roofline#test_estimate_peak_bandwidth[cuda]\n```\n\nAdditional tests that were skipped in the CI build and present in the [`required_tests_to_run`](https://github.com/apache/tvm/blob/main/ci/scripts/required_tests_to_run.json) file:\n```\nunittest -> ctypes.tests.python.unittest.test_auto_scheduler_search_policy#test_sketch_search_policy_cuda_rpc_runner\n```\nA detailed report of ran tests is [here](https://ci.tlcpack.ai/job/tvm/job/PR-11594/3/testReport/).", }, "no-diff": { "main_xml_file": "unittest/file1.xml", "main_xml_content": """<?xml version="1.0" encoding="utf-8"?> <testsuites> <testsuite errors="0" failures="0" hostname="13e7c5f749d8" name="python-unittest-gpu-0-shard-1-ctypes" skipped="102" tests="165" time="79.312" timestamp="2022-08-10T22:39:36.673781"> <testcase classname="ctypes.tests.python.unittest.test_auto_scheduler_search_policy" name="test_sketch_search_policy_cuda_rpc_runner" time="9.679"> <skipped message="This test is skipped" type="pytest.skip"> Skipped </skipped> </testcase> </testsuite> </testsuites> """, "pr_xml_file": "unittest/file2.xml", "pr_xml_content": """<?xml version="1.0" encoding="utf-8"?> <testsuites> <testsuite errors="0" failures="0" hostname="13e7c5f749d8" name="python-unittest-gpu-0-shard-1-ctypes" skipped="102" tests="165" time="79.312" timestamp="2022-08-10T22:39:36.673781"> <testcase classname="ctypes.tests.python.unittest.test_auto_scheduler_search_policy" name="test_sketch_search_policy_cuda_rpc_runner" time="9.679"> <skipped message="This test is skipped" type="pytest.skip"> Skipped </skipped> </testcase> </testsuite> </testsuites> """, "additional_tests_to_check": """{ } """, "target_url": "https://ci.tlcpack.ai/job/tvm/job/PR-11594/3/display/redirect", "s3_prefix": "tvm-jenkins-artifacts-prod", "jenkins_prefix": "ci.tlcpack.ai", "common_main_build": """{"build_number": "4115", "state": "success"}""", "commit_sha": "sha1234", "expected_body": "No diff in skipped tests with main found in this branch for commit sha1234.\nA detailed report of ran tests is [here](https://ci.tlcpack.ai/job/tvm/job/PR-11594/3/testReport/).", }, "no-diff-skipped-additional": { "main_xml_file": "unittest/file1.xml", "main_xml_content": """<?xml version="1.0" encoding="utf-8"?> <testsuites> <testsuite errors="0" failures="0" hostname="13e7c5f749d8" name="python-unittest-gpu-0-shard-1-ctypes" skipped="102" tests="165" time="79.312" timestamp="2022-08-10T22:39:36.673781"> <testcase classname="ctypes.tests.python.unittest.test_auto_scheduler_search_policy" name="test_sketch_search_policy_cuda_rpc_runner" time="9.679"> <skipped message="This test is skipped" type="pytest.skip"> Skipped </skipped> </testcase> </testsuite> </testsuites> """, "pr_xml_file": "unittest/file2.xml", "pr_xml_content": """<?xml version="1.0" encoding="utf-8"?> <testsuites> <testsuite errors="0" failures="0" hostname="13e7c5f749d8" name="python-unittest-gpu-0-shard-1-ctypes" skipped="102" tests="165" time="79.312" timestamp="2022-08-10T22:39:36.673781"> <testcase classname="ctypes.tests.python.unittest.test_auto_scheduler_search_policy" name="test_sketch_search_policy_cuda_rpc_runner" time="9.679"> <skipped message="This test is skipped" type="pytest.skip"> Skipped </skipped> </testcase> </testsuite> </testsuites> """, "additional_tests_to_check": """{ "unittest": ["dummy_class#dummy_test", "ctypes.tests.python.unittest.test_auto_scheduler_search_policy#test_sketch_search_policy_cuda_rpc_runner"], "unittest_GPU": ["another_dummy_class#another_dummy_test"] } """, "target_url": "https://ci.tlcpack.ai/job/tvm/job/PR-11594/3/display/redirect", "s3_prefix": "tvm-jenkins-artifacts-prod", "jenkins_prefix": "ci.tlcpack.ai", "common_main_build": """{"build_number": "4115", "state": "success"}""", "commit_sha": "sha1234", "expected_body": "No diff in skipped tests with main found in this branch for commit sha1234.\n\nAdditional tests that were skipped in the CI build and present in the [`required_tests_to_run`](https://github.com/apache/tvm/blob/main/ci/scripts/required_tests_to_run.json) file:\n```\nunittest -> ctypes.tests.python.unittest.test_auto_scheduler_search_policy#test_sketch_search_policy_cuda_rpc_runner\n```\nA detailed report of ran tests is [here](https://ci.tlcpack.ai/job/tvm/job/PR-11594/3/testReport/).", }, "unable-to-run": { "main_xml_file": "unittest/file1.xml", "main_xml_content": """<?xml version="1.0" encoding="utf-8"?> <testsuites> </testsuites> """, "pr_xml_file": "unittest/file2.xml", "pr_xml_content": """<?xml version="1.0" encoding="utf-8"?> <testsuites> </testsuites> """, "additional_tests_to_check": """{ "unittest": ["ctypes.tests.python.unittest.test_auto_scheduler_search_policy#test_sketch_search_policy_cuda_rpc_runner", "dummy_class#dummy_test"], "unittest_GPU": ["another_dummy_class#another_dummy_test"] } """, "target_url": "https://ci.tlcpack.ai/job/tvm/job/PR-11594/3/display/redirect", "s3_prefix": "tvm-jenkins-artifacts-prod", "jenkins_prefix": "ci.tlcpack.ai", "common_main_build": """{"build_number": "4115", "state": "failed"}""", "commit_sha": "sha1234", "expected_body": "Unable to run tests bot because main failed to pass CI at sha1234.", }, } # pylint: enable=line-too-long @tvm.testing.skip_if_wheel_test @parameterize_named(**TEST_DATA_SKIPPED_BOT) # pylint: enable=line-too-long def test_skipped_tests_comment( caplog, tmpdir_factory, main_xml_file, main_xml_content, pr_xml_file, pr_xml_content, additional_tests_to_check, target_url, s3_prefix, jenkins_prefix, common_main_build, commit_sha, expected_body, ): """ Test that a comment with a link to the docs is successfully left on PRs """ def write_xml_file(root_dir, xml_file, xml_content): shutil.rmtree(root_dir, ignore_errors=True) file = root_dir / xml_file file.parent.mkdir(parents=True) with open(file, "w") as f: f.write(textwrap.dedent(xml_content)) git = TempGit(tmpdir_factory.mktemp("tmp_git_dir")) pr_test_report_dir = Path(git.cwd) / "pr-reports" write_xml_file(pr_test_report_dir, pr_xml_file, pr_xml_content) main_test_report_dir = Path(git.cwd) / "main-reports" write_xml_file(main_test_report_dir, main_xml_file, main_xml_content) with open(Path(git.cwd) / "required_tests_to_run.json", "w") as f: f.write(additional_tests_to_check) pr_data = { "commits": { "nodes": [ { "commit": { "oid": commit_sha, "statusCheckRollup": { "contexts": { "nodes": [ { "context": "tvm-ci/pr-head", "targetUrl": target_url, } ] } }, } } ] } } with caplog.at_level(logging.INFO): comment = scripts.github_skipped_tests_comment.get_skipped_tests_comment( pr=pr_data, github=None, s3_prefix=s3_prefix, jenkins_prefix=jenkins_prefix, common_commit_sha=commit_sha, pr_test_report_dir=pr_test_report_dir, main_test_report_dir=main_test_report_dir, common_main_build=json.loads(common_main_build), additional_tests_to_check_file=Path(git.cwd) / "required_tests_to_run.json", ) assert_in(expected_body, comment) assert_in(f"with target {target_url}", caplog.text) @tvm.testing.skip_if_wheel_test @parameterize_named( doc_link=dict( target_url="https://ci.tlcpack.ai/job/tvm/job/PR-11594/3/display/redirect", base_url="https://pr-docs.tlcpack.ai", commit_sha="SHA", expected_body="Built docs for commit SHA can be found " "[here](https://pr-docs.tlcpack.ai/PR-11594/3/docs/index.html).", ) ) def test_docs_comment(target_url, base_url, commit_sha, expected_body): """ Test that a comment with a link to the docs is successfully left on PRs """ pr_data = { "commits": { "nodes": [ { "commit": { "oid": commit_sha, "statusCheckRollup": { "contexts": { "nodes": [ { "context": "tvm-ci/pr-head", "targetUrl": target_url, } ] } }, } } ] } } comment = scripts.github_docs_comment.get_doc_url( pr=pr_data, base_docs_url=base_url, ) assert_in(expected_body, comment) @tvm.testing.skip_if_wheel_test @parameterize_named( cc_no_one=dict( pr_body="abc", requested_reviewers=[], existing_review_users=[], expected_reviewers=[] ), cc_abc=dict( pr_body="cc @abc", requested_reviewers=[], existing_review_users=[], expected_reviewers=["abc"], ), bad_cc_line=dict( pr_body="cc @", requested_reviewers=[], existing_review_users=[], expected_reviewers=[] ), cc_multiple=dict( pr_body="cc @abc @def", requested_reviewers=[], existing_review_users=[], expected_reviewers=["abc", "def"], ), with_existing=dict( pr_body="some text cc @abc @def something else", requested_reviewers=[], existing_review_users=[], expected_reviewers=["abc", "def"], ), with_existing_split=dict( pr_body="some text cc @abc @def something else\n\n another cc @zzz z", requested_reviewers=[], existing_review_users=[], expected_reviewers=["abc", "def", "zzz"], ), with_existing_request=dict( pr_body="some text cc @abc @def something else\n\n another cc @zzz z", requested_reviewers=["abc"], existing_review_users=[], expected_reviewers=["def", "zzz"], ), with_existing_reviewers=dict( pr_body="some text cc @abc @def something else\n\n another cc @zzz z", requested_reviewers=["abc"], existing_review_users=["abc"], expected_reviewers=["def", "zzz"], ), with_no_reviewers=dict( pr_body="some text cc @abc @def something else\n\n another cc @zzz z", requested_reviewers=[], existing_review_users=["abc"], expected_reviewers=["def", "zzz"], ), ) def test_cc_reviewers( tmpdir_factory, pr_body, requested_reviewers, existing_review_users, expected_reviewers ): """ Test that reviewers are added from 'cc @someone' messages in PRs """ reviewers_script = REPO_ROOT / "ci" / "scripts" / "github_cc_reviewers.py" git = TempGit(tmpdir_factory.mktemp("tmp_git_dir")) reviews = [{"user": {"login": r}} for r in existing_review_users] requested_reviewers = [{"login": r} for r in requested_reviewers] proc = run_script( [reviewers_script, "--dry-run", "--testing-reviews-json", json.dumps(reviews)], env={ "PR": json.dumps( {"number": 1, "body": pr_body, "requested_reviewers": requested_reviewers} ) }, cwd=git.cwd, ) assert f"After filtering existing reviewers, adding: {expected_reviewers}" in proc.stdout @parameterize_named( # Missing expected tvm-ci/branch test missing_tvm_ci_branch=dict( statuses=[ { "context": "test", "state": "SUCCESS", } ], expected_rc=1, expected_output="No good commits found in the last 1 commits", ), # Only has the right passing test has_expected_test=dict( statuses=[ { "context": "tvm-ci/branch", "state": "SUCCESS", } ], expected_rc=0, expected_output="Found last good commit: 123: hello", ), # Check with many statuses many_statuses=dict( statuses=[ { "context": "tvm-ci/branch", "state": "SUCCESS", }, { "context": "tvm-ci/branch2", "state": "SUCCESS", }, { "context": "tvm-ci/branch3", "state": "FAILED", }, ], expected_rc=1, expected_output="No good commits found in the last 1 commits", ), many_success_statuses=dict( statuses=[ { "context": "tvm-ci/branch", "state": "SUCCESS", }, { "context": "tvm-ci/branch2", "state": "SUCCESS", }, { "context": "tvm-ci/branch3", "state": "SUCCESS", }, ], expected_rc=0, expected_output="Found last good commit: 123: hello", ), ) def test_update_branch(tmpdir_factory, statuses, expected_rc, expected_output): """ Test that the last-successful branch script updates successfully """ update_script = REPO_ROOT / "ci" / "scripts" / "update_branch.py" git = TempGit(tmpdir_factory.mktemp("tmp_git_dir")) commit = { "statusCheckRollup": {"contexts": {"nodes": statuses}}, "oid": "123", "messageHeadline": "hello", } data = { "data": { "repository": { "defaultBranchRef": {"target": {"history": {"edges": [], "nodes": [commit]}}} } } } proc = run_script( [update_script, "--dry-run", "--testonly-json", json.dumps(data)], cwd=git.cwd, check=False, ) if proc.returncode != expected_rc: raise RuntimeError(f"Wrong return code:\nstdout:\n{proc.stdout}\n\nstderr:\n{proc.stderr}") if expected_output not in proc.stdout: raise RuntimeError( f"Missing {expected_output}:\nstdout:\n{proc.stdout}\n\nstderr:\n{proc.stderr}" ) # pylint: disable=line-too-long @parameterize_named( author_gate=dict( pr_author="abc", comments=[], expected="Skipping comment for author abc", ), new_comment=dict( pr_author="driazati", comments=[], expected="No existing comment found", ), update_comment=dict( pr_author="driazati", comments=[ { "author": {"login": "github-actions"}, "databaseId": "comment456", "body": "<!---bot-comment--> abc", } ], expected="PATCH to https://api.github.com/repos/apache/tvm/issues/comments/comment456", ), new_body=dict( pr_author="driazati", comments=[], expected="Commenting " + textwrap.dedent( """ <!---bot-comment--> Thanks for contributing to TVM! Please refer to the contributing guidelines https://tvm.apache.org/docs/contribute/ for useful information and tips. Please request code reviews from [Reviewers](https://github.com/apache/incubator-tvm/blob/master/CONTRIBUTORS.md#reviewers) by @-ing them in a comment. <!--bot-comment-ccs-start--> * the cc<!--bot-comment-ccs-end--><!--bot-comment-skipped-tests-start--> * the skipped tests<!--bot-comment-skipped-tests-end--><!--bot-comment-docs-start--> * the docs<!--bot-comment-docs-end--> """ ).strip(), ), update_body=dict( pr_author="driazati", comments=[ { "author": {"login": "github-actions"}, "databaseId": "comment456", "body": textwrap.dedent( """ <!---bot-comment--> Thanks for contributing to TVM! Please refer to the contributing guidelines https://tvm.apache.org/docs/contribute/ for useful information and tips. Please request code reviews from [Reviewers](https://github.com/apache/incubator-tvm/blob/master/CONTRIBUTORS.md#reviewers) by @-ing them in a comment. <!--bot-comment-ccs-start--> * the cc<!--bot-comment-ccs-end--><!--bot-comment-something-tests-start--> * something else<!--bot-comment-something-tests-end--><!--bot-comment-docs-start--> * the docs<!--bot-comment-docs-end--> """ ).strip(), } ], expected="Commenting " + textwrap.dedent( """ <!---bot-comment--> Thanks for contributing to TVM! Please refer to the contributing guidelines https://tvm.apache.org/docs/contribute/ for useful information and tips. Please request code reviews from [Reviewers](https://github.com/apache/incubator-tvm/blob/master/CONTRIBUTORS.md#reviewers) by @-ing them in a comment. <!--bot-comment-ccs-start--> * the cc<!--bot-comment-ccs-end--><!--bot-comment-something-tests-start--> * something else<!--bot-comment-something-tests-end--><!--bot-comment-docs-start--> * the docs<!--bot-comment-docs-end--><!--bot-comment-skipped-tests-start--> * the skipped tests<!--bot-comment-skipped-tests-end--> """ ).strip(), ), ) # pylint: enable=line-too-long def test_pr_comment(tmpdir_factory, pr_author, comments, expected): """ Test the PR commenting bot """ comment_script = REPO_ROOT / "ci" / "scripts" / "github_pr_comment.py" git = TempGit(tmpdir_factory.mktemp("tmp_git_dir")) target_url = "https://ci.tlcpack.ai/job/tvm/job/PR-11594/3/display/redirect" commit = { "commit": { "oid": "sha1234", "statusCheckRollup": { "contexts": { "nodes": [ { "context": "tvm-ci/pr-head", "targetUrl": target_url, } ] } }, } } data = { "[1] POST - https://api.github.com/graphql": {}, "[2] POST - https://api.github.com/graphql": { "data": { "repository": { "pullRequest": { "number": 1234, "comments": { "nodes": comments, }, "author": { "login": pr_author, }, "commits": { "nodes": [commit], }, } } } }, } comments = { "ccs": "the cc", "docs": "the docs", "skipped-tests": "the skipped tests", } proc = run_script( [ comment_script, "--dry-run", "--test-data", json.dumps(data), "--test-comments", json.dumps(comments), "--pr", "1234", ], stderr=subprocess.STDOUT, cwd=git.cwd, ) assert_in(expected, proc.stdout) @parameterize_named( dont_skip_main=dict( commands=[], should_skip=False, pr_title="[skip ci] test", why="ci should not be skipped on main", ), dont_skip_main_with_commit=dict( commands=[ ["commit", "--allow-empty", "--message", "[skip ci] commit 1"], ], should_skip=False, pr_title="[skip ci] test", why="ci should not be skipped on main", ), skip_on_new_branch=dict( commands=[ ["checkout", "-b", "some_new_branch"], ["commit", "--allow-empty", "--message", "[skip ci] commit 1"], ], should_skip=True, pr_title="[skip ci] test", why="ci should be skipped on a branch with [skip ci] in the last commit", ), no_skip_in_pr_title=dict( commands=[ ["checkout", "-b", "some_new_branch"], ["commit", "--allow-empty", "--message", "[skip ci] commit 1"], ], should_skip=False, pr_title="[no skip ci] test", why="ci should not be skipped on a branch with " "[skip ci] in the last commit but not the PR title", ), skip_in_pr_title=dict( commands=[ ["checkout", "-b", "some_new_branch"], ["commit", "--allow-empty", "--message", "[skip ci] commit 1"], ["commit", "--allow-empty", "--message", "commit 2"], ], should_skip=True, pr_title="[skip ci] test", why="ci should be skipped with [skip ci] in the PR title", ), skip_in_pr_title_many_commits=dict( commands=[ ["checkout", "-b", "some_new_branch"], ["commit", "--allow-empty", "--message", "commit 1"], ["commit", "--allow-empty", "--message", "commit 2"], ["commit", "--allow-empty", "--message", "commit 3"], ["commit", "--allow-empty", "--message", "commit 4"], ], should_skip=True, pr_title="[skip ci] test", why="ci should be skipped with [skip ci] in the PR title", ), skip_anywhere_in_title=dict( commands=[ ["checkout", "-b", "some_new_branch"], ], should_skip=True, pr_title="[something][skip ci] test", why="skip ci tag should work anywhere in title", ), ) def test_skip_ci(tmpdir_factory, commands, should_skip, pr_title, why): """ Test that CI is skipped when it should be """ skip_ci_script = REPO_ROOT / "ci" / "scripts" / "git_skip_ci.py" git = TempGit(tmpdir_factory.mktemp("tmp_git_dir")) git.run("config", "user.name", "ci") git.run("config", "user.email", "[email protected]") git.run("commit", "--allow-empty", "--message", "base commit") for command in commands: git.run(*command) pr_number = "1234" proc = run_script( [skip_ci_script, "--pr", pr_number, "--pr-title", pr_title], cwd=git.cwd, check=False, ) expected = 0 if should_skip else 1 if proc.returncode != expected: raise RuntimeError( f"Unexpected return code {proc.returncode} " f"(expected {expected}) in {why}:\n{proc.stdout}" ) @parameterize_named( no_file=dict(files=[], should_skip=True), readme=dict(files=["README.md"], should_skip=True), c_file=dict(files=["test.c"], should_skip=False), c_and_readme=dict(files=["test.c", "README.md"], should_skip=False), src_file_and_readme=dict( files=["src/autotvm/feature_visitor.cc", "README.md"], should_skip=False ), yaml_and_readme=dict(files=[".asf.yaml", "docs/README.md"], should_skip=True), ) def test_skip_globs(tmpdir_factory, files, should_skip): """ Test that CI is skipped if only certain files are edited """ script = REPO_ROOT / "ci" / "scripts" / "git_skip_ci_globs.py" git = TempGit(tmpdir_factory.mktemp("tmp_git_dir")) proc = run_script( [ script, "--files", ",".join(files), ], check=False, cwd=git.cwd, ) if should_skip: assert proc.returncode == 0 else: assert proc.returncode == 1 def all_time_keys(time): return { "updatedAt": time, "lastEditedAt": time, "createdAt": time, "publishedAt": time, } @parameterize_named( draft=dict( pull_request={ "isDraft": True, "number": 2, }, check="Checking 0 of 1 fetched", ), not_draft=dict( pull_request={ "isDraft": False, "number": 2, }, check="Checking 0 of 1 fetched", ), week_old=dict( pull_request={ "number": 123, "url": "https://github.com/apache/tvm/pull/123", "body": "cc @someone", "isDraft": False, "author": {"login": "user"}, "reviews": {"nodes": []}, **all_time_keys("2022-01-18T17:54:19Z"), "comments": {"nodes": []}, }, check="Pinging reviewers ['someone'] on https://github.com/apache/tvm/pull/123", ), # Old comment, ping old_comment=dict( pull_request={ "number": 123, "url": "https://github.com/apache/tvm/pull/123", "body": "cc @someone", "isDraft": False, "author": {"login": "user"}, "reviews": {"nodes": []}, **all_time_keys("2022-01-18T17:54:19Z"), "comments": { "nodes": [ { **all_time_keys("2022-01-18T17:54:19Z"), "bodyText": "abc", }, ] }, }, check="Pinging reviewers ['someone'] on https://github.com/apache/tvm/pull/123", ), # New comment, don't ping new_comment=dict( pull_request={ "number": 123, "url": "https://github.com/apache/tvm/pull/123", "body": "cc @someone", "isDraft": False, "author": {"login": "user"}, "reviews": {"nodes": []}, **all_time_keys("2022-01-18T17:54:19Z"), "comments": { "nodes": [ {**all_time_keys("2022-01-27T17:54:19Z"), "bodyText": "abc"}, ] }, }, check="Not pinging PR 123", ), ) def test_ping_reviewers(tmpdir_factory, pull_request, check): """ Test that reviewers are messaged after a time period of inactivity """ reviewers_script = REPO_ROOT / "ci" / "scripts" / "ping_reviewers.py" git = TempGit(tmpdir_factory.mktemp("tmp_git_dir")) data = { "data": { "repository": { "pullRequests": { "nodes": [pull_request], "edges": [], } } } } proc = run_script( [ reviewers_script, "--dry-run", "--wait-time-minutes", "1", "--cutoff-pr-number", "5", "--pr-json", json.dumps(data), "--now", "2022-01-26T17:54:19Z", ], cwd=git.cwd, ) assert_in(check, proc.stdout) def assert_in(needle: str, haystack: str): """ Check that 'needle' is in 'haystack' """ if needle not in haystack: raise AssertionError(f"item not found:\n{needle}\nin:\n{haystack}") @tvm.testing.skip_if_wheel_test @parameterize_named( no_cc=dict( source_type="ISSUE", data={ "title": "A title", "number": 1234, "user": { "login": "person5", }, "labels": [{"name": "abc"}], "body": textwrap.dedent( """ hello """.strip() ), }, check="No one to cc, exiting", ), no_additional_cc=dict( source_type="ISSUE", data={ "title": "A title", "number": 1234, "user": { "login": "person5", }, "labels": [{"name": "abc"}], "body": textwrap.dedent( """ hello cc @test """.strip() ), }, check="No one to cc, exiting", ), cc_update=dict( source_type="ISSUE", data={ "title": "A title", "number": 1234, "user": { "login": "person5", }, "labels": [{"name": "something"}], "body": textwrap.dedent( """ hello something""" ), }, check="would have updated issues/1234 with {'body': " "'\\nhello\\n\\nsomething\\n\\ncc @person1 @person2 @person4'}", ), already_cced=dict( source_type="ISSUE", data={ "title": "A title", "number": 1234, "user": { "login": "person5", }, "labels": [{"name": "something"}], "body": textwrap.dedent( """ hello cc @person1 @person2 @person4""" ), }, check="No one to cc, exiting", ), not_already_cced=dict( source_type="ISSUE", data={ "title": "[something] A title", "number": 1234, "user": { "login": "person5", }, "labels": [{"name": "something2"}], "body": textwrap.dedent( """ hello something""" ), }, check="would have updated issues/1234 with {'body': " "'\\nhello\\n\\nsomething\\n\\ncc @person1 @person2 @person4'}", ), no_new_ccs=dict( source_type="ISSUE", data={ "title": "[something] A title", "number": 1234, "user": { "login": "person5", }, "labels": [{"name": "something2"}], "body": textwrap.dedent( """ hello cc @person1 @person2 @person4""" ), }, check="No one to cc, exiting", ), mismatching_tags=dict( source_type="PR", data={ "title": "[something] A title", "number": 1234, "draft": False, "user": { "login": "person5", }, "labels": [{"name": "something2"}], "body": textwrap.dedent( """ hello cc @person1 @person2 @person4""" ), }, check="No one to cc, exiting", ), draft_pr=dict( source_type="PR", data={ "title": "[something] A title", "number": 1234, "draft": True, "user": { "login": "person5", }, "labels": [{"name": "something2"}], "body": textwrap.dedent( """ hello cc @person1 @person2 @person4""" ), }, check="Terminating since 1234 is a draft", ), edit_inplace=dict( source_type="ISSUE", data={ "title": "[something] A title", "number": 1234, "user": { "login": "person5", }, "labels": [{"name": "something2"}], "body": "`mold` and `lld` can be a much faster alternative to `ld` from gcc. " "We should modify our CMakeLists.txt to detect and use these when possible. cc @person1" "\n\ncc @person4", }, check="would have updated issues/1234 with {'body': '`mold` and `lld` can be a much" " faster alternative to `ld` from gcc. We should modify our CMakeLists.txt to " "detect and use these when possible. cc @person1\\n\\ncc @person2 @person4'}", ), edit_out_of_place=dict( source_type="ISSUE", data={ "title": "[something3] A title", "number": 1234, "user": { "login": "person5", }, "labels": [{"name": "something2"}], "body": "@person2 @SOME1-ONE-", }, check="Dry run, would have updated issues/1234 with" " {'body': '@person2 @SOME1-ONE-\\n\\ncc @person1'}", ), atted_but_not_cced=dict( source_type="ISSUE", data={ "title": "[] A title", "number": 1234, "user": { "login": "person5", }, "labels": [], "body": "@person2 @SOME1-ONE-", }, check="No one to cc, exiting", ), ) def test_github_tag_teams(tmpdir_factory, source_type, data, check): """ Check that individuals are tagged from team headers """ tag_script = REPO_ROOT / "ci" / "scripts" / "github_tag_teams.py" git = TempGit(tmpdir_factory.mktemp("tmp_git_dir")) issue_body = """ some text [temporary] opt-in: @person5 - something: @person1 @person2 - something3: @person1 @person2 @SOME1-ONE- - something else @person1 @person2 - something else2: @person1 @person2 - something-else @person1 @person2 """ comment1 = """ another thing: @person3 another-thing @person3 """ comment2 = """ something @person4 @person5 """ teams = { "data": { "repository": { "issue": { "body": issue_body, "comments": {"nodes": [{"body": comment1}, {"body": comment2}]}, } } } } env = { source_type: json.dumps(data), } proc = run_script( [ tag_script, "--dry-run", "--team-issue-json", json.dumps(teams), ], stderr=subprocess.STDOUT, cwd=git.cwd, env=env, ) assert_in(check, proc.stdout) @tvm.testing.skip_if_wheel_test @parameterize_named( same_tags=dict( tlcpackstaging_body={ "results": [ { "last_updated": "2022-06-01T00:00:00.123456Z", "name": "123-123-abc", }, ] }, tlcpack_body={ "results": [ { "last_updated": "2022-06-01T00:00:00.123456Z", "name": "123-123-abc", }, ] }, expected="Tag names were the same, no update needed", expected_images=[], ), staging_update=dict( tlcpackstaging_body={ "results": [ { "last_updated": "2022-06-01T01:00:00.123456Z", "name": "234-234-abc-staging", }, { "last_updated": "2022-06-01T00:00:00.123456Z", "name": "456-456-abc", }, ] }, tlcpack_body={ "results": [ { "last_updated": "2022-06-01T00:00:00.123456Z", "name": "123-123-abc", }, ] }, expected="Using tlcpackstaging tag on tlcpack", expected_images=[ "ci_arm = 'tlcpack/ci-arm:456-456-abc'", ], ), tlcpack_update=dict( tlcpackstaging_body={ "results": [ { "last_updated": "2022-06-01T00:00:00.123456Z", "name": "123-123-abc", }, ] }, tlcpack_body={ "results": [ { "last_updated": "2022-06-01T00:01:00.123456Z", "name": "234-234-abc", }, ] }, expected="Found newer image, using: tlcpack", expected_images=[ "ci_arm = 'tlcpack/ci-arm:234-234-abc'", ], ), ) def test_open_docker_update_pr( tmpdir_factory, tlcpackstaging_body, tlcpack_body, expected, expected_images ): """Test workflow to open a PR to update Docker images""" tag_script = REPO_ROOT / "ci" / "scripts" / "open_docker_update_pr.py" git = TempGit(tmpdir_factory.mktemp("tmp_git_dir")) git.run("config", "user.name", "ci") git.run("config", "user.email", "[email protected]") images = [ "ci_arm", "ci_cortexm", "ci_cpu", "ci_gpu", "ci_hexagon", "ci_i386", "ci_lint", "ci_minimal", "ci_riscv", "ci_wasm", ] docker_data = {} for image in images: docker_data[f"repositories/tlcpackstaging/{image}/tags"] = tlcpackstaging_body docker_data[f"repositories/tlcpack/{image.replace('_', '-')}/tags"] = tlcpack_body proc = run_script( [ tag_script, "--dry-run", "--testing-docker-data", json.dumps(docker_data), ], cwd=git.cwd, env={"GITHUB_TOKEN": "1234"}, stderr=subprocess.STDOUT, ) for line in expected_images: if line not in proc.stdout: raise RuntimeError(f"Missing line {line} in output:\n{proc.stdout}") assert_in(expected, proc.stdout) @parameterize_named( use_tlcpack=dict( images=["ci_arm=tlcpack/ci-arm:abc-abc-123", "ci_lint=tlcpack/ci-lint:abc-abc-234"], expected={ "ci_arm": "tlcpack/ci-arm:abc-abc-123", "ci_lint": "tlcpack/ci-lint:abc-abc-234", }, ), use_staging=dict( images=["ci_arm2=tlcpack/ci-arm2:abc-abc-123"], expected={ "ci_arm2": "tlcpackstaging/ci_arm2:abc-abc-123", }, ), ) def test_determine_docker_images(tmpdir_factory, images, expected): """Test script to decide whether to use tlcpack or tlcpackstaging for images""" script = REPO_ROOT / "ci" / "scripts" / "determine_docker_images.py" git_dir = tmpdir_factory.mktemp("tmp_git_dir") docker_data = { "repositories/tlcpack/ci-arm/tags/abc-abc-123": {}, "repositories/tlcpack/ci-lint/tags/abc-abc-234": {}, } run_script( [ script, "--testing-docker-data", json.dumps(docker_data), "--base-dir", git_dir, ] + images, cwd=git_dir, ) for expected_filename, expected_image in expected.items(): with open(Path(git_dir) / expected_filename) as f: actual_image = f.read() assert actual_image == expected_image @parameterize_named( invalid_name=dict( changed_files=[], name="abc", check="Image abc is not using new naming scheme", expected_code=1, ), no_hash=dict( changed_files=[], name="123-123-abc", check="No extant hash found", expected_code=1 ), no_changes=dict( changed_files=[["test.txt"]], name=None, check="Did not find changes, no rebuild necessary", expected_code=0, ), docker_changes=dict( changed_files=[["test.txt"], ["docker/test.txt"]], name=None, check="Found docker changes", expected_code=2, ), ) def test_should_rebuild_docker(tmpdir_factory, changed_files, name, check, expected_code): """ Check that the Docker images are built when necessary """ tag_script = REPO_ROOT / "ci" / "scripts" / "should_rebuild_docker.py" git = TempGit(tmpdir_factory.mktemp("tmp_git_dir")) git.run("config", "user.name", "ci") git.run("config", "user.email", "[email protected]") git_path = Path(git.cwd) for i, commits in enumerate(changed_files): for filename in commits: path = git_path / filename path.parent.mkdir(exist_ok=True, parents=True) path.touch() git.run("add", filename) git.run("commit", "-m", f"message {i}") if name is None: ref = "HEAD" if len(changed_files) > 1: ref = f"HEAD~{len(changed_files) - 1}" proc = git.run("rev-parse", ref, stdout=subprocess.PIPE) last_hash = proc.stdout.strip() name = f"123-123-{last_hash}" docker_data = { "repositories/tlcpack": { "results": [ { "name": "ci-something", }, { "name": "something-else", }, ], }, "repositories/tlcpack/ci-something/tags": { "results": [{"name": name}, {"name": name + "old"}], }, } proc = run_script( [ tag_script, "--testing-docker-data", json.dumps(docker_data), ], stderr=subprocess.STDOUT, cwd=git.cwd, check=False, ) assert_in(check, proc.stdout) assert proc.returncode == expected_code @parameterize_named( passing=dict( title="[something] a change", body="something", expected="All checks passed", expected_code=0, ), period=dict( title="[something] a change.", body="something", expected="trailing_period: FAILED", expected_code=1, ), empty_body=dict( title="[something] a change", body=None, expected="non_empty: FAILED", expected_code=1, ), ) def test_pr_linter(title, body, expected, expected_code): """ Test the PR linter """ tag_script = REPO_ROOT / "ci" / "scripts" / "check_pr.py" pr_data = { "title": title, "body": body, } proc = run_script( [ tag_script, "--pr", 1234, "--pr-data", json.dumps(pr_data), ], check=False, ) assert proc.returncode == expected_code assert_in(expected, proc.stdout) if __name__ == "__main__": tvm.testing.main()
https://github.com/zk-ml/tachikoma
tests/python/ci/test_script_converter.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Test the conversion of bash to rst """ import sys import tvm from tvm.contrib import utils # this has to be after the sys.path patching, so ignore pylint # pylint: disable=wrong-import-position,wrong-import-order from .test_utils import REPO_ROOT sys.path.insert(0, str(REPO_ROOT / "docs")) from script_convert import ( bash_to_python, BASH, BASH_IGNORE, BASH_MULTILINE_COMMENT_START, BASH_MULTILINE_COMMENT_END, ) # pylint: enable=wrong-import-position,wrong-import-order def test_bash_cmd(): """Test that a bash command gets turned into a rst code block""" temp = utils.tempdir() src_path = temp / "src.sh" dest_path = temp / "dest.py" with open(src_path, "w") as src_f: src_f.write(BASH) src_f.write("\n") src_f.write("tvmc\n") src_f.write(BASH) src_f.write("\n") bash_to_python(src_path, dest_path) with open(dest_path, "r") as dest_f: generated_cmd = dest_f.read() expected_cmd = "# .. code-block:: bash\n" "#\n" "#\t tvmc\n" "#\n" assert generated_cmd == expected_cmd def test_bash_ignore_cmd(): """Test that ignored bash commands are not turned into code blocks""" temp = utils.tempdir() src_path = temp / "src.sh" dest_path = temp / "dest.py" with open(src_path, "w") as src_f: src_f.write("# start\n") src_f.write(BASH_IGNORE) src_f.write("\n") src_f.write("tvmc\n") src_f.write(BASH_IGNORE) src_f.write("\n") src_f.write("# end\n") bash_to_python(src_path, dest_path) with open(dest_path, "r") as dest_f: generated_cmd = dest_f.read() expected_cmd = "# start\n" "# end\n" assert generated_cmd == expected_cmd def test_no_command(): """Test a file with no code blocks""" temp = utils.tempdir() src_path = temp / "src.sh" dest_path = temp / "dest.py" with open(src_path, "w") as src_f: src_f.write("# start\n") src_f.write("# description\n") src_f.write("end\n") bash_to_python(src_path, dest_path) with open(dest_path, "r") as dest_f: generated_cmd = dest_f.read() expected_cmd = "# start\n" "# description\n" "end\n" assert generated_cmd == expected_cmd def test_text_and_bash_command(): """Test a file with a bash code block""" temp = utils.tempdir() src_path = temp / "src.sh" dest_path = temp / "dest.py" with open(src_path, "w") as src_f: src_f.write("# start\n") src_f.write(BASH) src_f.write("\n") src_f.write("tvmc\n") src_f.write(BASH) src_f.write("\n") src_f.write("# end\n") bash_to_python(src_path, dest_path) with open(dest_path, "r") as dest_f: generated_cmd = dest_f.read() expected_cmd = "# start\n" "# .. code-block:: bash\n" "#\n" "#\t tvmc\n" "#\n" "# end\n" assert generated_cmd == expected_cmd def test_last_line_break(): """Test that line endings are correct""" temp = utils.tempdir() src_path = temp / "src.sh" dest_path = temp / "dest.py" with open(src_path, "w") as src_f: src_f.write("# start\n") src_f.write("# end\n") bash_to_python(src_path, dest_path) with open(dest_path, "r") as dest_f: generated_cmd = dest_f.read() expected_cmd = "# start\n" "# end\n" assert generated_cmd == expected_cmd def test_multiline_comment(): """Test that bash comments are inserted correctly""" temp = utils.tempdir() src_path = temp / "src.sh" dest_path = temp / "dest.py" with open(src_path, "w") as src_f: src_f.write(BASH_MULTILINE_COMMENT_START) src_f.write("\n") src_f.write("comment\n") src_f.write(BASH_MULTILINE_COMMENT_END) src_f.write("\n") bash_to_python(src_path, dest_path) with open(dest_path, "r") as dest_f: generated_cmd = dest_f.read() expected_cmd = '"""\n' "comment\n" '"""\n' assert generated_cmd == expected_cmd if __name__ == "__main__": tvm.testing.main()
https://github.com/zk-ml/tachikoma
tests/python/ci/test_tvmbot.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Test the @tvm-bot merge code """ import json from pathlib import Path from typing import Dict, Any import tvm from .test_utils import REPO_ROOT, TempGit, run_script SUCCESS_EXPECTED_OUTPUT = """ Dry run, would have merged with url=pulls/10786/merge and data={ "commit_title": "[Hexagon] 2-d allocation cleanup (#10786)", "commit_message": "- Added device validity check in allocation. HexagonDeviceAPI should only be called for CPU/Hexagon types.\\n\\n- Check for \\"global.vtcm\\" scope instead of \\"vtcm\\". The ccope of N-d allocations produced by `LowerVtcmAlloc` should be `\\"global.vtcm\\"`. The previous check allowed unsupported scope such as `\\"local.vtcm\\"`.\\n\\n- Remove `vtcmallocs` entry after calling free.\\n\\nPreviously, the vtcm allocation map kept dangling pointers to `HexagonBuffer` objects after they had been freed.\\n\\n- Rename N-d alloc and free packed functions. Since most of the similar device functions use snake case, renaming `*.AllocND` to `*.alloc_nd` and `*.FreeND` to `*.free_nd`.\\n\\n\\ncc someone\\n\\n\\nCo-authored-by: Adam Straw <[email protected]>", "sha": "6f04bcf57d07f915a98fd91178f04d9e92a09fcd", "merge_method": "squash" } """.strip() class _TvmBotTest: NUMBER = 10786 def preprocess_data(self, data: Dict[str, Any]): """ Used to pre-process PR data before running the test. Override as necessary to edit data for specific test cases. """ return data @tvm.testing.skip_if_wheel_test def test(self, tmpdir_factory): """ Run the tvm-bot script using the data from preprocess_data """ mergebot_script = REPO_ROOT / "ci" / "scripts" / "github_tvmbot.py" test_json_dir = Path(__file__).resolve().parent / "sample_prs" with open(test_json_dir / f"pr{self.NUMBER}.json") as f: test_data = json.load(f) # Update testing data with replacements / additions test_data = self.preprocess_data(test_data) git = TempGit(tmpdir_factory.mktemp("tmp_git_dir")) comment = { "body": self.COMMENT, "id": 123, "user": { "login": self.USER, }, } allowed_users = [{"login": "abc"}, {"login": "other-abc"}] proc = run_script( [ mergebot_script, "--pr", self.NUMBER, "--dry-run", "--run-url", "https://example.com", "--testing-pr-json", json.dumps(test_data), "--testing-collaborators-json", json.dumps(allowed_users), "--testing-mentionable-users-json", json.dumps(allowed_users), "--trigger-comment-json", json.dumps(comment), ], env={ "TVM_BOT_JENKINS_TOKEN": "123", "GH_ACTIONS_TOKEN": "123", }, cwd=git.cwd, ) if self.EXPECTED not in proc.stderr: raise RuntimeError(f"{proc.stderr}\ndid not contain\n{self.EXPECTED}") class TestNoRequest(_TvmBotTest): """ A PR for which the mergebot runs but no merge is requested """ COMMENT = "@tvm-bot do something else" USER = "abc" EXPECTED = "Command 'do something else' did not match anything" def preprocess_data(self, data: Dict[str, Any]): data["reviews"]["nodes"][0]["body"] = "nothing" return data class TestSuccessfulMerge(_TvmBotTest): """ Everything is fine so this PR will merge """ COMMENT = "@tvm-bot merge" USER = "abc" EXPECTED = SUCCESS_EXPECTED_OUTPUT class TestBadCI(_TvmBotTest): """ A PR which failed CI and cannot merge """ COMMENT = "@tvm-bot merge" USER = "abc" EXPECTED = "Cannot merge, these CI jobs are not successful on" def preprocess_data(self, data: Dict[str, Any]): # Mark the Jenkins build as failed contexts = data["commits"]["nodes"][0]["commit"]["statusCheckRollup"]["contexts"]["nodes"] for context in contexts: if "context" in context and context["context"] == "tvm-ci/pr-head": context["state"] = "FAILED" return data class TestOldReview(_TvmBotTest): """ A PR with passing CI and approving reviews on an old commit so it cannot merge """ COMMENT = "@tvm-bot merge" USER = "abc" EXPECTED = "Cannot merge, did not find any approving reviews" def preprocess_data(self, data: Dict[str, Any]) -> Dict[str, Any]: data["reviews"]["nodes"][0]["commit"]["oid"] = "abc12345" return data class TestMissingJob(_TvmBotTest): """ PR missing an expected CI job and cannot merge """ COMMENT = "@tvm-bot merge" USER = "abc" EXPECTED = "Cannot merge, missing expected jobs" def preprocess_data(self, data: Dict[str, Any]) -> Dict[str, Any]: contexts = data["commits"]["nodes"][0]["commit"]["statusCheckRollup"]["contexts"]["nodes"] for context in contexts: if "context" in context and context["context"] == "tvm-ci/pr-head": context["context"] = "something" return data class TestInvalidAuthor(_TvmBotTest): """ Merge requester is not a committer and cannot merge """ COMMENT = "@tvm-bot merge" USER = "not-abc" EXPECTED = "Failed auth check 'collaborators', quitting" class TestUnauthorizedComment(_TvmBotTest): """ Check that a merge comment not from a CONTRIBUTOR is rejected """ COMMENT = "@tvm-bot merge" USER = "not-abc2" EXPECTED = "Failed auth check 'collaborators'" class TestNoReview(_TvmBotTest): """ Check that a merge request without any reviews is rejected """ COMMENT = "@tvm-bot merge" USER = "abc" EXPECTED = "Cannot merge, did not find any approving reviews from users with write access" def preprocess_data(self, data: Dict[str, Any]) -> Dict[str, Any]: data["reviews"]["nodes"] = [] return data class TestChangesRequested(_TvmBotTest): """ Check that a merge request with a 'Changes Requested' review is rejected """ COMMENT = "@tvm-bot merge" USER = "abc" EXPECTED = "Cannot merge, found [this review]" def preprocess_data(self, data: Dict[str, Any]) -> Dict[str, Any]: data["reviews"]["nodes"][0]["state"] = "CHANGES_REQUESTED" data["reviews"]["nodes"][0]["url"] = "http://example.com" return data class TestCoAuthors(_TvmBotTest): """ Check that a merge request with co-authors generates the correct commit message """ COMMENT = "@tvm-bot merge" USER = "abc" EXPECTED = "Co-authored-by: Some One <[email protected]>" def preprocess_data(self, data: Dict[str, Any]) -> Dict[str, Any]: data["authorCommits"]["nodes"][0]["commit"]["authors"]["nodes"].append( {"name": "Some One", "email": "[email protected]"} ) return data class TestRerunCI(_TvmBotTest): """ Start a new CI job """ COMMENT = "@tvm-bot rerun" USER = "abc" EXPECTED = "Rerunning ci with" class TestRerunPermissions(_TvmBotTest): """ Start a new CI job as an unauthorized user """ COMMENT = "@tvm-bot rerun" USER = "someone" EXPECTED = "Failed auth check 'metionable_users', quitting" class TestRerunNonAuthor(_TvmBotTest): """ Start a new CI job as a mentionable user """ COMMENT = "@tvm-bot rerun" USER = "other-abc" EXPECTED = "Passed auth check 'metionable_users', continuing" class TestIgnoreJobs(_TvmBotTest): """ Ignore GitHub Actions jobs that don't start with CI / """ COMMENT = "@tvm-bot merge" USER = "abc" EXPECTED = "Dry run, would have merged" if __name__ == "__main__": tvm.testing.main()
https://github.com/zk-ml/tachikoma
tests/python/ci/test_utils.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Constants used in various CI tests """ import subprocess import pathlib from typing import List, Any REPO_ROOT = pathlib.Path(__file__).resolve().parent.parent.parent.parent class TempGit: """ A wrapper to run commands in a directory (specifically for use in CI tests) """ def __init__(self, cwd): self.cwd = cwd # Jenkins git is too old and doesn't have 'git init --initial-branch', # so init and checkout need to be separate steps self.run("init", stderr=subprocess.PIPE, stdout=subprocess.PIPE) self.run("checkout", "-b", "main", stderr=subprocess.PIPE) self.run("remote", "add", "origin", "https://github.com/apache/tvm.git") def run(self, *args, **kwargs): """ Run a git command based on *args """ proc = subprocess.run( ["git"] + list(args), encoding="utf-8", cwd=self.cwd, check=False, **kwargs ) if proc.returncode != 0: raise RuntimeError(f"git command failed: '{args}'") return proc def run_script(command: List[Any], check: bool = True, **kwargs): """ Wrapper to run a script and print its output if there was an error """ command = [str(c) for c in command] kwargs_to_send = { "stdout": subprocess.PIPE, "stderr": subprocess.PIPE, "encoding": "utf-8", } kwargs_to_send.update(kwargs) proc = subprocess.run( command, check=False, **kwargs_to_send, ) if check and proc.returncode != 0: raise RuntimeError(f"Process failed:\nstdout:\n{proc.stdout}\n\nstderr:\n{proc.stderr}") return proc
https://github.com/zk-ml/tachikoma
tests/python/conftest.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import sys import tvm import pytest collect_ignore = [] if sys.platform.startswith("win"): collect_ignore.append("frontend/caffe") collect_ignore.append("frontend/caffe2") collect_ignore.append("frontend/coreml") collect_ignore.append("frontend/darknet") collect_ignore.append("frontend/keras") collect_ignore.append("frontend/mxnet") collect_ignore.append("frontend/pytorch") collect_ignore.append("frontend/tensorflow") collect_ignore.append("frontend/tflite") collect_ignore.append("frontend/onnx") collect_ignore.append("driver/tvmc/test_autoscheduler.py") collect_ignore.append("unittest/test_auto_scheduler_cost_model.py") # stack overflow # collect_ignore.append("unittest/test_auto_scheduler_measure.py") # exception ignored collect_ignore.append("unittest/test_auto_scheduler_search_policy.py") # stack overflow # collect_ignore.append("unittest/test_auto_scheduler_measure.py") # exception ignored collect_ignore.append("unittest/test_tir_intrin.py") def pytest_addoption(parser): parser.addoption( "--enable-corstone300-tests", action="store_true", default=False, help="Run Corstone-300 FVP tests", ) def pytest_collection_modifyitems(config, items): if not config.getoption("--enable-corstone300-tests"): for item in items: if "corstone300" in item.keywords: item.add_marker( pytest.mark.skip( reason="Need --enable-corstone300-tests option to run this test" ) )
https://github.com/zk-ml/tachikoma
tests/python/contrib/test_arm_compute_lib/__init__.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Infrastructure and tests for Arm Compute Library"""
https://github.com/zk-ml/tachikoma
tests/python/contrib/test_arm_compute_lib/infrastructure.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from itertools import zip_longest, combinations import json import os import warnings import numpy as np import tvm from tvm import relay from tvm import rpc from tvm.contrib import graph_executor from tvm.relay.op.contrib import arm_compute_lib from tvm.contrib import utils from tvm.autotvm.measure import request_remote class Device: """ Configuration for Arm Compute Library tests. Check tests/python/contrib/arm_compute_lib/ for the presence of an test_config.json file. This file can be used to override the default configuration here which will attempt to run the Arm Compute Library runtime tests locally if the runtime is available. Changing the configuration will allow these runtime tests to be offloaded to a remote Arm device via a tracker for example. Notes ----- The test configuration will be loaded once when the class is created. If the configuration changes between tests, any changes will not be picked up. Parameters ---------- device : RPCSession Allows tests to connect to and use remote device. Attributes ---------- connection_type : str Details the type of RPC connection to use. Options: local - Use the local device, tracker - Connect to a tracker to request a remote device, remote - Connect to a remote device directly. host : str Specify IP address or hostname of remote target. port : int Specify port number of remote target. target : str The compilation target. device_key : str The device key of the remote target. Use when connecting to a remote device via a tracker. cross_compile : str Specify path to cross compiler to use when connecting a remote device from a non-arm platform. """ connection_type = "local" host = "127.0.0.1" port = 9090 target = "llvm -mtriple=aarch64-linux-gnu -mattr=+neon" device_key = "" cross_compile = "" def __init__(self): """Keep remote device for lifetime of object.""" self.device = self._get_remote() @classmethod def _get_remote(cls): """Get a remote (or local) device to use for testing.""" if cls.connection_type == "tracker": device = request_remote(cls.device_key, cls.host, cls.port, timeout=1000) elif cls.connection_type == "remote": device = rpc.connect(cls.host, cls.port) elif cls.connection_type == "local": device = rpc.LocalSession() else: raise ValueError( "connection_type in test_config.json should be one of: " "local, tracker, remote." ) return device @classmethod def load(cls, file_name): """Load test config Load the test configuration by looking for file_name relative to the test_arm_compute_lib directory. """ location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) config_file = os.path.join(location, file_name) if not os.path.exists(config_file): warnings.warn( "Config file doesn't exist, resuming Arm Compute Library tests with default config." ) return with open(config_file, mode="r") as config: test_config = json.load(config) cls.connection_type = test_config["connection_type"] cls.host = test_config["host"] cls.port = test_config["port"] cls.target = test_config["target"] cls.device_key = test_config.get("device_key") or "" cls.cross_compile = test_config.get("cross_compile") or "" def get_cpu_op_count(mod): """Traverse graph counting ops offloaded to TVM.""" class Counter(tvm.relay.ExprVisitor): def __init__(self): super().__init__() self.count = 0 def visit_call(self, call): if isinstance(call.op, tvm.ir.Op): self.count += 1 super().visit_call(call) c = Counter() c.visit(mod["main"]) return c.count def skip_runtime_test(): """Skip test if it requires the runtime and it's not present.""" # ACL codegen not present. if not tvm.get_global_func("relay.ext.arm_compute_lib", True): print("Skip because Arm Compute Library codegen is not available.") return True # Remote device is in use or ACL runtime not present # Note: Ensure that the device config has been loaded before this check if ( not Device.connection_type != "local" and not arm_compute_lib.is_arm_compute_runtime_enabled() ): print("Skip because runtime isn't present or a remote device isn't being used.") return True def skip_codegen_test(): """Skip test if it requires the ACL codegen and it's not present.""" if not tvm.get_global_func("relay.ext.arm_compute_lib", True): print("Skip because Arm Compute Library codegen is not available.") return True def build_module( mod, target, params=None, enable_acl=True, tvm_ops=0, acl_partitions=1, disabled_ops=["concatenate"], ): """Build module with option to build for ACL.""" if isinstance(mod, tvm.relay.expr.Call): mod = tvm.IRModule.from_expr(mod) with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]): if enable_acl: mod = arm_compute_lib.partition_for_arm_compute_lib( mod, params, disabled_ops=disabled_ops ) tvm_op_count = get_cpu_op_count(mod) assert tvm_op_count == tvm_ops, "Got {} TVM operators, expected {}".format( tvm_op_count, tvm_ops ) partition_count = 0 for global_var in mod.get_global_vars(): if "arm_compute_lib" in global_var.name_hint: partition_count += 1 assert ( acl_partitions == partition_count ), "Got {} Arm Compute Library partitions, expected {}".format( partition_count, acl_partitions ) relay.backend.te_compiler.get().clear() return relay.build(mod, target=target, params=params) def build_and_run( mod, inputs, outputs, params, device, enable_acl=True, no_runs=1, tvm_ops=0, acl_partitions=1, config=None, disabled_ops=["concatenate"], ): """Build and run the relay module.""" if config is None: config = {} try: lib = build_module( mod, device.target, params, enable_acl, tvm_ops, acl_partitions, disabled_ops ) except Exception as e: err_msg = "The module could not be built.\n" if config: err_msg += f"The test failed with the following parameters: {config}\n" err_msg += str(e) raise Exception(err_msg) lib = update_lib(lib, device.device, device.cross_compile) gen_module = graph_executor.GraphModule(lib["default"](device.device.cpu(0))) gen_module.set_input(**inputs) out = [] for _ in range(no_runs): gen_module.run() out.append([gen_module.get_output(i) for i in range(outputs)]) return out def update_lib(lib, device, cross_compile): """Export the library to the remote/local device.""" lib_name = "mod.so" temp = utils.tempdir() lib_path = temp.relpath(lib_name) if cross_compile: lib.export_library(lib_path, cc=cross_compile) else: lib.export_library(lib_path) device.upload(lib_path) lib = device.load_module(lib_name) return lib def verify(answers, atol, rtol, verify_saturation=False, config=None): """Compare the array of answers. Each entry is a list of outputs.""" if config is None: config = {} if len(answers) < 2: raise RuntimeError(f"No results to compare: expected at least two, found {len(answers)}") for answer in zip_longest(*answers): for outs in combinations(answer, 2): try: if verify_saturation: assert ( np.count_nonzero(outs[0].numpy() == 255) < 0.25 * outs[0].numpy().size ), "Output is saturated: {}".format(outs[0]) assert ( np.count_nonzero(outs[0].numpy() == 0) < 0.25 * outs[0].numpy().size ), "Output is saturated: {}".format(outs[0]) tvm.testing.assert_allclose(outs[0].numpy(), outs[1].numpy(), rtol=rtol, atol=atol) except AssertionError as e: err_msg = "Results not within the acceptable tolerance.\n" if config: err_msg += f"The test failed with the following parameters: {config}\n" err_msg += str(e) raise AssertionError(err_msg) def extract_acl_modules(module): """Get the ACL module(s) from llvm module.""" return list( filter(lambda mod: mod.type_key == "arm_compute_lib", module.get_lib().imported_modules) ) def verify_codegen( module, known_good_codegen, num_acl_modules=1, tvm_ops=0, target="llvm -mtriple=aarch64-linux-gnu -mattr=+neon", disabled_ops=["concatenate"], ): """Check acl codegen against a known good output.""" module = build_module( module, target, tvm_ops=tvm_ops, acl_partitions=num_acl_modules, disabled_ops=disabled_ops, ) acl_modules = extract_acl_modules(module) assert len(acl_modules) == num_acl_modules, ( f"The number of Arm Compute Library modules produced ({len(acl_modules)}) does not " f"match the expected value ({num_acl_modules})." ) for mod in acl_modules: source = mod.get_source("json") codegen = json.loads(source)["nodes"] # remove input and const names as these cannot be predetermined for node in range(len(codegen)): if codegen[node]["op"] == "input" or codegen[node]["op"] == "const": codegen[node]["name"] = "" codegen_str = json.dumps(codegen, sort_keys=True, indent=2) known_good_codegen_str = json.dumps(known_good_codegen, sort_keys=True, indent=2) assert codegen_str == known_good_codegen_str, ( f"The JSON produced by codegen does not match the expected result. \n" f"Actual={codegen_str} \n" f"Expected={known_good_codegen_str}" )
https://github.com/zk-ml/tachikoma
tests/python/contrib/test_arm_compute_lib/test_add.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Arm Compute Library integration reshape tests.""" import numpy as np import tvm import tvm.testing from tvm import relay from test_arm_compute_lib.infrastructure import ( skip_runtime_test, skip_codegen_test, build_and_run, verify, verify_codegen, ) from test_arm_compute_lib.infrastructure import Device _qnn_params = { "lhs_scale": relay.const(0.0156863, "float32"), "lhs_zero_point": relay.const(127, "int32"), "rhs_scale": relay.const(0.0117647, "float32"), "rhs_zero_point": relay.const(85, "int32"), "output_scale": relay.const(0.0235294, "float32"), "output_zero_point": relay.const(128, "int32"), } def _get_model(shape, dtype, var_names, op, op_params): a = relay.var(next(var_names), shape=shape, dtype=dtype) b = relay.var(next(var_names), shape=shape, dtype=dtype) return op(a, b, **op_params) def _get_expected_codegen(shape, dtype, op_name, qnn_params): input_a = {"op": "input", "name": "", "attrs": {"shape": [[list(shape)]], "dtype": [[dtype]]}} input_b = {"op": "input", "name": "", "attrs": {"shape": [[list(shape)]], "dtype": [[dtype]]}} input_qnn = [ { "op": "const", "name": "", "attrs": { "shape": [[list(qnn_params[_].data.shape)]], "dtype": [[qnn_params[_].data.dtype]], }, } for _ in qnn_params ] inputs = [input_a, input_b, *input_qnn] node = { "op": "kernel", "name": op_name, "inputs": [[_, 0, 0] for _ in range(len(inputs))], "attrs": { "num_inputs": str(len(inputs)), "num_outputs": "1", "shape": [[list(shape)]], "dtype": [[dtype]], }, } if qnn_params: node["attrs"]["lhs_axis"] = [["-1"]] node["attrs"]["rhs_axis"] = [["-1"]] return [*inputs, node] def test_runtime_add(): Device.load("test_config.json") if skip_runtime_test(): return device = Device() np.random.seed(0) for dtype, low, high, atol, rtol, op, op_params in [ ("float32", -127, 128, 1e-7, 1e-7, relay.add, {}), ("uint8", 0, 255, 0.0, 1.0, relay.qnn.op.add, _qnn_params), ]: shape = (2, 2) for inputs in [ { "a": tvm.nd.array(np.random.uniform(low, high, shape).astype(dtype)), "b": tvm.nd.array(np.random.uniform(low, high, shape).astype(dtype)), } ]: outputs = [] func = _get_model(shape, dtype, iter(inputs), op, op_params) for acl in [True, False]: outputs.append(build_and_run(func, inputs, 1, None, device, enable_acl=acl)[0]) config = { "shape": shape, "dtype": dtype, "inputs": inputs, "operation": op, "op_params": op_params, } verify(outputs, atol=atol, rtol=rtol, config=config, verify_saturation=False) def test_codegen_add(): if skip_codegen_test(): return inputs = {"a", "b"} for dtype, op_name, op, qnn_params in [ ("float32", "add", relay.add, {}), ("uint8", "qnn.add", relay.qnn.op.add, _qnn_params), ]: for shape in [(1, 1), (2, 2, 2), (3, 3, 3, 3)]: func = _get_model(shape, dtype, iter(inputs), op, qnn_params) exp_codegen = _get_expected_codegen(shape, dtype, op_name, qnn_params) verify_codegen(func, exp_codegen, 1) if __name__ == "__main__": test_codegen_add() test_runtime_add()
https://github.com/zk-ml/tachikoma
tests/python/contrib/test_arm_compute_lib/test_concatenate.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Arm Compute Library integration concatenate tests.""" import numpy as np import tvm from tvm import relay from tvm import testing from test_arm_compute_lib.infrastructure import ( skip_runtime_test, skip_codegen_test, build_and_run, verify, verify_codegen, ) from test_arm_compute_lib.infrastructure import Device def _get_model(input_shape_a, input_shape_b, input_shape_c, axis, dtype, var_names): """Return a model and any parameters it may have.""" a = relay.var(next(var_names), shape=input_shape_a, dtype=dtype) b = relay.var(next(var_names), shape=input_shape_b, dtype=dtype) c = relay.var(next(var_names), shape=input_shape_c, dtype=dtype) out = relay.concatenate([a, b, c], axis) return out def _get_expected_codegen(input_shape_a, input_shape_b, input_shape_c, axis, dtype): node = { "op": "kernel", "name": "concatenate", "inputs": [ [0, 0, 0], [1, 0, 0], [2, 0, 0], ], "attrs": { "num_outputs": "1", "num_inputs": "3", "dtype": [[dtype]], "axis": [[str(axis)]], "shape": [[[6, 234, 234, 256]]], }, } input_a = { "op": "input", "name": "", "attrs": { "shape": [[input_shape_a]], "dtype": [[dtype]], }, } input_b = { "op": "input", "name": "", "attrs": { "shape": [[input_shape_b]], "dtype": [[dtype]], }, } input_c = { "op": "input", "name": "", "attrs": { "shape": [[input_shape_c]], "dtype": [[dtype]], }, } return [input_a, input_b, input_c, node] def test_concatenate(): Device.load("test_config.json") if skip_runtime_test(): return device = Device() np.random.seed(0) for input_shape_a, input_shape_b, input_shape_c, axis, dtype in [ ([1, 234, 234, 256], [2, 234, 234, 256], [3, 234, 234, 256], 0, "float32"), ([1, 1, 234, 256], [1, 2, 234, 256], [1, 3, 234, 256], 1, "float32"), ([1, 234, 234, 1], [1, 234, 234, 2], [1, 234, 234, 3], -1, "float32"), ([1, 234, 234, 256], [2, 234, 234, 256], [3, 234, 234, 256], -4, "float32"), ([1, 234, 234, 256], [2, 234, 234, 256], [3, 234, 234, 256], 0, "uint8"), ([1, 1, 234, 256], [1, 2, 234, 256], [1, 3, 234, 256], 1, "uint8"), ([1, 234, 234, 1], [1, 234, 234, 2], [1, 234, 234, 3], -1, "uint8"), ([1, 234, 234, 256], [2, 234, 234, 256], [3, 234, 234, 256], -4, "uint8"), ]: outputs = [] inputs = { "a": tvm.nd.array(np.random.randn(*input_shape_a).astype(dtype)), "b": tvm.nd.array(np.random.randn(*input_shape_b).astype(dtype)), "c": tvm.nd.array(np.random.randn(*input_shape_c).astype(dtype)), } func = _get_model( inputs["a"].shape, inputs["b"].shape, inputs["c"].shape, axis, dtype, iter(inputs) ) for acl in [False, True]: outputs.append( build_and_run(func, inputs, 1, None, device, enable_acl=acl, disabled_ops=[])[0] ) config = { "input_shape_a": input_shape_a, "input_shape_b": input_shape_b, "input_shape_c": input_shape_c, "axis": axis, "dtype": dtype, } verify(outputs, atol=1e-7, rtol=1e-7, config=config) def test_codegen_concatenate(): if skip_codegen_test(): return shape_a = [1, 234, 234, 256] shape_b = [2, 234, 234, 256] shape_c = [3, 234, 234, 256] axis = 0 inputs = {"a", "b", "c"} for dtype in ["float32"]: args = (shape_a, shape_b, shape_c, axis, dtype) func = _get_model(*args, iter(inputs)) exp_codegen = _get_expected_codegen(*args) verify_codegen(func, exp_codegen, 1, disabled_ops=[]) if __name__ == "__main__": test_concatenate() test_codegen_concatenate()
https://github.com/zk-ml/tachikoma
tests/python/contrib/test_arm_compute_lib/test_conv2d.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Arm Compute Library integration conv2d tests.""" import numpy as np import tvm from tvm import relay from test_arm_compute_lib.infrastructure import ( skip_runtime_test, skip_codegen_test, build_and_run, verify, verify_codegen, ) from test_arm_compute_lib.infrastructure import Device def _get_model( shape, kernel_h, kernel_w, padding, strides, dilation, groups, dtype, channels, var_names, has_bias=False, has_activation=False, has_pad=False, ): """Return a model and any parameters it may have""" a = relay.var(next(var_names), shape=shape, dtype=dtype) if has_pad: p = ((0, 0), (padding[0], padding[0]), (padding[1], padding[1]), (0, 0)) a = relay.nn.pad(a, pad_width=p) padding = (0, 0, 0, 0) else: if len(padding) == 2: padding = (padding[0], padding[1], padding[0], padding[1]) shape = (shape[0], shape[1] + padding[0] * 2, shape[2] + padding[1] * 2, shape[3]) is_depthwise = shape[3] == channels == groups weight_format = "HWOI" if is_depthwise else "HWIO" if weight_format == "HWIO": weight_shape = (kernel_h, kernel_w, shape[3] // groups, channels) else: weight_shape = (kernel_h, kernel_w, channels, shape[3] // groups) w = tvm.nd.array(np.random.uniform(-128, 127, weight_shape).astype(dtype)) weights = relay.const(w, dtype) out = relay.nn.conv2d( a, weights, kernel_size=(kernel_h, kernel_w), data_layout="NHWC", kernel_layout=weight_format, dilation=dilation, strides=strides, padding=padding, groups=groups, channels=channels, out_dtype=dtype, ) params = {"w": w} if has_bias: bias_shape = weight_shape[2] if is_depthwise else weight_shape[3] b = tvm.nd.array(np.random.uniform(-128, 127, bias_shape).astype(dtype)) biasc = relay.const(b, dtype) out = relay.nn.bias_add(out, biasc, axis=3) params["b"] = b if has_activation: out = relay.nn.relu(out) return out, params def _get_qnn_params(input_zp, input_sc, kernel_zp, kernel_sc, kernel_h, kernel_w, channels): """Get output qnn parameters given input and kernel parameters.""" input_max = input_sc * (255 - input_zp) input_min = -input_sc * input_zp kernel_max = kernel_sc * (255 - kernel_zp) kernel_min = -kernel_sc * kernel_zp output_limits = [ kernel_max * kernel_h * kernel_w * channels * input_max, kernel_min * kernel_h * kernel_w * channels * input_max, kernel_min * kernel_h * kernel_w * channels * input_min, kernel_max * kernel_h * kernel_w * channels * input_min, ] output_max = max(output_limits) output_min = min(output_limits) output_sc = (output_max - output_min) / 255 output_zp = -int(output_min / output_sc) return output_zp, output_sc def _get_qnn_model( shape, kernel_h, kernel_w, padding, strides, dilation, groups, dtype, channels, input_zp, input_sc, kernel_zp, kernel_sc, output_zp, output_sc, var_names, has_bias=False, has_activation=False, has_pad=False, ): """Return a model and any parameters it may have.""" a = relay.var(next(var_names), shape=shape, dtype=dtype) if has_pad: p = ((0, 0), (padding[0], padding[0]), (padding[1], padding[1]), (0, 0)) a = relay.nn.pad(a, pad_width=p, pad_value=input_zp, pad_mode="constant") padding = (0, 0, 0, 0) else: if len(padding) == 2: padding = (padding[0], padding[1], padding[0], padding[1]) shape = (shape[0], shape[1] + padding[0] * 2, shape[2] + padding[1] * 2, shape[3]) is_depthwise = shape[3] == channels == groups weight_format = "HWOI" if is_depthwise else "HWIO" if weight_format == "HWIO": weight_shape = (kernel_h, kernel_w, shape[3] // groups, channels) else: weight_shape = (kernel_h, kernel_w, channels, shape[3] // groups) w = tvm.nd.array(np.random.uniform(0, 255, weight_shape).astype(dtype)) weights = relay.const(w, dtype) out = relay.qnn.op.conv2d( a, weights, input_zero_point=relay.const(input_zp, "int32"), kernel_zero_point=relay.const(kernel_zp, "int32"), input_scale=relay.const(input_sc, "float32"), kernel_scale=relay.const(kernel_sc, "float32"), kernel_size=(kernel_h, kernel_w), data_layout="NHWC", kernel_layout=weight_format, dilation=dilation, strides=strides, padding=padding, groups=groups, channels=channels, out_dtype="int32", ) params = {"w": w} if has_bias: bias_shape = weight_shape[2] if is_depthwise else weight_shape[3] b = tvm.nd.array(np.random.uniform(-128, 127, bias_shape).astype("int32")) biasc = relay.const(b, "int32") out = relay.nn.bias_add(out, biasc, axis=3) params["b"] = b if has_activation: out = relay.nn.relu(out) req = relay.qnn.op.requantize( out, relay.const(input_sc * kernel_sc, "float32"), # input scale relay.const(0, "int32"), # input zero point relay.const(output_sc, "float32"), # output scale relay.const(output_zp, "int32"), # output zero point out_dtype="uint8", ) return req, params def _get_expected_codegen( shape, kernel_h, kernel_w, padding, strides, dilation, groups, dtype, channels, has_bias=False, has_activation=False, ): if len(padding) == 2: padding = (padding[0], padding[1], padding[0], padding[1]) output_height = ((shape[1] - kernel_h + padding[0] + padding[2]) / strides[0]) + 1 output_width = ((shape[2] - kernel_w + padding[1] + padding[3]) / strides[1]) + 1 output_shape = (1, int(output_height), int(output_width), channels) out_dtype = "int32" if dtype == "uint8" else "float32" is_depthwise = shape[3] == channels == groups weight_format = "IHWO" if is_depthwise else "OHWI" if weight_format == "IHWO": weight_shape = (shape[3] // groups, kernel_h, kernel_w, channels) else: weight_shape = (channels, kernel_h, kernel_w, shape[3] // groups) if is_depthwise: name = "nn.depthwise_conv2d" else: name = "nn.conv2d" node = { "op": "kernel", "name": name, "inputs": [], "attrs": { "groups": [[str(groups)]], "num_outputs": "1", "data_layout": [["NHWC"]], "kernel_layout": [[weight_format]], "channels": [[str(channels)]], "dilation": [[str(dilation[0]), str(dilation[1])]], "out_layout": [[""]], "out_dtype": [[out_dtype]], "kernel_size": [[str(kernel_h), str(kernel_w)]], "shape": [[list(output_shape)]], "dtype": [[dtype]], "padding": [[str(p) for p in padding]], "strides": [[str(s) for s in strides]], }, } if has_activation: node["attrs"]["activation_type"] = [["relu"]] inputs = [ {"op": "input", "name": "", "attrs": {"shape": [[list(shape)]], "dtype": [[str(dtype)]]}}, { "op": "const", "name": "", "attrs": {"shape": [[list(weight_shape)]], "dtype": [[str(dtype)]]}, }, ] # qnn.conv2d params, input and kernel if dtype == "uint8": node["name"] = "qnn." + node["name"].split(".")[1] for param_dtype in ["int32", "float32"]: for _ in range(2): inputs.append( { "op": "const", "name": "", "attrs": {"shape": [[[]]], "dtype": [[param_dtype]]}, } ) if has_bias: bias_dtype = "int32" if dtype == "uint8" else "float32" inputs.append( { "op": "const", "name": "", "attrs": { "shape": [[[1, 1, 1, weight_shape[3] if is_depthwise else weight_shape[0]]]], "dtype": [[bias_dtype]], }, } ) # qnn.conv2d params, output if dtype == "uint8": for param_dtype in ["float32", "int32"]: inputs.append( {"op": "const", "name": "", "attrs": {"shape": [[[]]], "dtype": [[param_dtype]]}} ) input_idx = 0 for _ in range(len(inputs)): node["inputs"].append([input_idx, 0, 0]) input_idx += 1 node["attrs"]["num_inputs"] = str(len(inputs)) inputs.append(node) return inputs def test_conv2d(): Device.load("test_config.json") if skip_runtime_test(): return device = Device() np.random.seed(0) dtype = "float32" trials = [ # Normal convolution [2, 2, (1, 1), (1, 1), (1, 1), 4, (10, 10, 14), (False, False, False), False], [2, 1, (2, 2), (1, 1), (1, 1), 7, (12, 15, 16), (False, False, True), False], [3, 3, (2, 1), (1, 1), (1, 1), 4, (10, 10, 14), (False, True, False), False], [3, 3, (1, 1), (1, 1), (1, 1), 16, (12, 15, 16), (False, False, False), False], [5, 5, (1, 1), (2, 2), (1, 1), 4, (10, 10, 14), (True, False, False), False], [1, 3, (1, 1), (1, 1), (1, 1), 7, (20, 20, 20), (False, False, True), False], [2, 2, (2, 2), (1, 1), (1, 1), 4, (20, 20, 20), (False, True, False), False], [5, 5, (1, 1), (2, 2), (1, 1), 4, (10, 10, 14), (True, False, False), False], [3, 3, (2, 1), (1, 1), (1, 1), 7, (20, 20, 20), (False, False, False), False], [3, 3, (1, 1), (2, 2), (1, 1), 16, (10, 10, 14), (False, True, True), False], # Depth-wise convolution [3, 3, (1, 1), (1, 1), (1, 1), 20, (20, 20, 20), (False, False, True), True], [5, 5, (2, 2), (1, 1), (1, 1), 20, (20, 20, 20), (False, True, False), True], [3, 3, (2, 2), (2, 2), (1, 1), 14, (10, 10, 14), (True, False, False), True], [5, 5, (0, 0), (1, 1), (1, 1), 20, (20, 20, 20), (False, False, False), True], [3, 3, (1, 1), (2, 2), (1, 1), 14, (10, 10, 14), (False, True, True), True], ] for ( kernel_h, kernel_w, pad, stride, dilation, out_channels, shape, composite, is_depthwise, ) in trials: shape = (1, *shape) if is_depthwise: groups = shape[3] else: groups = 1 outputs = [] inputs = { "a": tvm.nd.array(np.random.uniform(-128, 127, shape).astype(dtype)), } func, params = _get_model( shape, kernel_h, kernel_w, pad, stride, dilation, groups, dtype, out_channels, iter(inputs), has_pad=composite[0], has_bias=composite[1], has_activation=composite[2], ) for acl in [False, True]: outputs.append(build_and_run(func, inputs, 1, params, device, enable_acl=acl)[0]) config = { "shape": shape, "groups": groups, "kernel size": (kernel_h, kernel_w), "padding": pad, "stride": stride, "dilation": dilation, "out channels": out_channels, "composite operators (pad, bias, activation)": composite, } verify(outputs, atol=0.002, rtol=0.01, config=config) def test_codegen_conv2d(): if skip_codegen_test(): return dtype = "float32" trials = [ # Normal convolution [2, 2, (1, 1), (1, 1), (1, 1), 4, (10, 10, 14), (False, False, False), False], [2, 1, (2, 2), (1, 1), (1, 1), 7, (12, 15, 16), (False, False, True), False], [3, 3, (2, 1), (1, 1), (1, 1), 4, (10, 10, 14), (False, True, False), False], [3, 3, (1, 1), (1, 1), (1, 1), 16, (12, 15, 16), (False, False, False), False], [5, 5, (1, 1), (2, 2), (1, 1), 4, (10, 10, 14), (True, False, False), False], [1, 3, (1, 1), (1, 1), (1, 1), 7, (20, 20, 20), (False, False, True), False], [2, 2, (2, 2), (1, 1), (1, 1), 4, (20, 20, 20), (False, True, False), False], [5, 5, (1, 1), (2, 2), (1, 1), 4, (10, 10, 14), (True, False, False), False], [3, 3, (2, 1), (1, 1), (1, 1), 7, (20, 20, 20), (False, False, False), False], [3, 3, (1, 1), (2, 2), (1, 1), 16, (10, 10, 14), (False, True, True), False], # Depth-wise convolution [3, 3, (1, 1), (1, 1), (1, 1), 20, (20, 20, 20), (False, False, True), True], [5, 5, (2, 2), (1, 1), (1, 1), 20, (20, 20, 20), (False, True, False), True], [3, 3, (2, 2), (2, 2), (1, 1), 14, (10, 10, 14), (True, False, False), True], [5, 5, (0, 0), (1, 1), (1, 1), 20, (20, 20, 20), (False, False, False), True], [3, 3, (1, 1), (2, 2), (1, 1), 14, (10, 10, 14), (False, True, True), True], ] for ( kernel_h, kernel_w, pad, stride, dilation, out_channels, shape, composite, is_depthwise, ) in trials: shape = (1, *shape) if is_depthwise: groups = shape[3] else: groups = 1 inputs = {"a"} args = (shape, kernel_h, kernel_w, pad, stride, dilation, groups, dtype, out_channels) func, params = _get_model( *args, var_names=iter(inputs), has_pad=composite[0], has_bias=composite[1], has_activation=composite[2], ) exp_codegen = _get_expected_codegen( *args, has_bias=composite[1], has_activation=composite[2] ) verify_codegen(func, exp_codegen, 1) def test_qnn_conv2d(): Device.load("test_config.json") if skip_runtime_test(): return device = Device() np.random.seed(0) dtype = "uint8" trials = [ # Normal convolution [2, 2, (1, 1), (1, 1), (1, 1), 4, (10, 10, 14), (False, False, False), False], [2, 1, (2, 2), (1, 1), (1, 1), 7, (12, 15, 16), (False, False, True), False], [3, 3, (2, 1), (1, 1), (1, 1), 4, (10, 10, 14), (False, True, False), False], [3, 3, (1, 1), (1, 1), (1, 1), 16, (12, 15, 16), (False, False, False), False], [5, 5, (1, 1), (2, 2), (1, 1), 4, (10, 10, 14), (True, False, False), False], [1, 3, (1, 1), (1, 1), (1, 1), 7, (20, 20, 20), (False, False, True), False], [2, 2, (2, 2), (1, 1), (1, 1), 4, (20, 20, 20), (False, True, False), False], [5, 5, (1, 1), (2, 2), (1, 1), 4, (10, 10, 14), (True, False, False), False], [3, 3, (2, 1), (1, 1), (1, 1), 7, (20, 20, 20), (False, False, False), False], [3, 3, (1, 1), (2, 2), (1, 1), 16, (10, 10, 14), (False, True, True), False], # Depth-wise convolution [3, 3, (1, 1), (1, 1), (1, 1), 20, (20, 20, 20), (False, False, True), True], [5, 5, (2, 2), (1, 1), (1, 1), 20, (20, 20, 20), (False, True, False), True], [3, 3, (2, 2), (2, 2), (1, 1), 14, (10, 10, 14), (True, False, False), True], [5, 5, (0, 0), (1, 1), (1, 1), 20, (20, 20, 20), (False, False, False), True], [3, 3, (1, 1), (2, 2), (1, 1), 14, (10, 10, 14), (False, True, True), True], ] for ( kernel_h, kernel_w, pad, stride, dilation, out_channels, shape, composite, is_depthwise, ) in trials: shape = (1, *shape) if is_depthwise: groups = shape[3] else: groups = 1 outputs = [] inputs = {"a": tvm.nd.array(np.random.uniform(0, 255, shape).astype(dtype))} input_zp = 100 input_sc = 0.5 kernel_zp = 25 kernel_sc = 0.03 output_zp, output_sc = _get_qnn_params( input_zp, input_sc, kernel_zp, kernel_sc, kernel_h, kernel_w, shape[3] ) func, params = _get_qnn_model( shape, kernel_h, kernel_w, pad, stride, dilation, groups, dtype, out_channels, input_zp, input_sc, kernel_zp, kernel_sc, output_zp, output_sc, iter(inputs), has_pad=composite[0], has_bias=composite[1], has_activation=composite[2], ) for acl in [False, True]: outputs.append(build_and_run(func, inputs, 1, params, device, enable_acl=acl)[0]) config = { "shape": shape, "groups": groups, "kernel size": (kernel_h, kernel_w), "padding": pad, "stride": stride, "dilation": dilation, "out channels": out_channels, "composite operators (pad, bias, activation)": composite, "input scale": input_sc, "input zero point": input_zp, "kernel scale": kernel_sc, "kernel zero point": kernel_zp, "output scale": output_sc, "output zero point": output_zp, } atol = 2 if is_depthwise else 1 verify(outputs, atol=atol, rtol=0, config=config, verify_saturation=True) def test_codegen_qnn_conv2d(): if skip_codegen_test(): return dtype = "uint8" trials = [ # Normal convolution [2, 2, (1, 1), (1, 1), (1, 1), 4, (10, 10, 14), (False, False, False), False], [2, 1, (2, 2), (1, 1), (1, 1), 7, (12, 15, 16), (False, False, True), False], [3, 3, (2, 1), (1, 1), (1, 1), 4, (10, 10, 14), (False, True, False), False], [3, 3, (1, 1), (1, 1), (1, 1), 16, (12, 15, 16), (False, False, False), False], [5, 5, (1, 1), (2, 2), (1, 1), 4, (10, 10, 14), (True, False, False), False], [1, 3, (1, 1), (1, 1), (1, 1), 7, (20, 20, 20), (False, False, True), False], [2, 2, (2, 2), (1, 1), (1, 1), 4, (20, 20, 20), (False, True, False), False], [5, 5, (1, 1), (2, 2), (1, 1), 4, (10, 10, 14), (True, False, False), False], [3, 3, (2, 1), (1, 1), (1, 1), 7, (20, 20, 20), (False, False, False), False], [3, 3, (1, 1), (2, 2), (1, 1), 16, (10, 10, 14), (False, True, True), False], # Depth-wise convolution [3, 3, (1, 1), (1, 1), (1, 1), 20, (20, 20, 20), (False, False, True), True], [5, 5, (2, 2), (1, 1), (1, 1), 20, (20, 20, 20), (False, True, False), True], [3, 3, (2, 2), (2, 2), (1, 1), 14, (10, 10, 14), (True, False, False), True], [5, 5, (0, 0), (1, 1), (1, 1), 20, (20, 20, 20), (False, False, False), True], [3, 3, (1, 1), (2, 2), (1, 1), 14, (10, 10, 14), (False, True, True), True], ] for ( kernel_h, kernel_w, pad, stride, dilation, out_channels, shape, composite, is_depthwise, ) in trials: shape = (1, *shape) if is_depthwise: groups = shape[3] else: groups = 1 inputs = {"a"} input_zp = 100 input_sc = 0.5 kernel_zp = 25 kernel_sc = 0.03 output_zp, output_sc = _get_qnn_params( input_zp, input_sc, kernel_zp, kernel_sc, kernel_h, kernel_w, shape[3] ) args = (shape, kernel_h, kernel_w, pad, stride, dilation, groups, dtype, out_channels) func, params = _get_qnn_model( *args, input_zp=input_zp, input_sc=input_sc, kernel_zp=kernel_zp, kernel_sc=kernel_sc, output_zp=output_zp, output_sc=output_sc, var_names=iter(inputs), has_pad=composite[0], has_bias=composite[1], has_activation=composite[2], ) exp_codegen = _get_expected_codegen( *args, has_bias=composite[1], has_activation=composite[2] ) verify_codegen(func, exp_codegen, 1) if __name__ == "__main__": test_conv2d() test_qnn_conv2d() test_codegen_conv2d() test_codegen_qnn_conv2d()
https://github.com/zk-ml/tachikoma