file_path
stringlengths 7
180
| content
stringlengths 0
811k
| repo
stringclasses 11
values |
---|---|---|
include/tvm/topi/x86/default.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file x86/default.h
* \brief default x86 schedule
*/
#ifndef TVM_TOPI_X86_DEFAULT_H_
#define TVM_TOPI_X86_DEFAULT_H_
#include <tvm/target/generic_func.h>
#include <tvm/te/operation.h>
#include <tvm/te/schedule_pass.h>
#include <tvm/topi/detail/fuse.h>
#include <tvm/topi/tags.h>
namespace tvm {
namespace topi {
using namespace tvm::te;
namespace x86 {
/*!
* \brief Helper to create a default x86 schedule for the given ops.
*
* \param target The target to generate a schedule for.
* \param outs The output tensors.
* \param auto_inline Whether to apply the auto inline step.
*
* \return A schedule for the given ops.
*/
inline Schedule MakeDefaultSchedule(const Target& target, const Array<Tensor>& outs,
bool auto_inline) {
Array<Operation> out_ops;
for (auto t : outs) {
out_ops.push_back(t->op);
}
auto s = create_schedule(out_ops);
auto x = outs[0];
auto axis = s[x]->op.as<ComputeOpNode>()->axis;
if (auto_inline) {
tvm::te::AutoInlineInjective(s);
if (axis.size() > 0) {
detail::Fuse(s[x], axis);
}
return s;
}
if (axis.size() == 4) {
auto n = axis[0];
auto c = axis[1];
auto fused = detail::Fuse(s[x], {n, c}); // for nhwc layout, fuse n and h
s[x].parallel(fused);
} else {
s[x].parallel(axis[0]);
}
return s;
}
/*!
* \brief Create a default x86 schedule for the given ops.
*
* \param target The target to generate a schedule for.
* \param outs The output tensors.
*
* \return A schedule for the given ops.
*/
inline Schedule default_schedule(const Target& target, const Array<Tensor>& outs) {
return MakeDefaultSchedule(target, outs, false);
}
/*!
* \brief Create a default x86 schedule for the given ops, with auto inline
*
* \param target The target to generate a schedule for.
* \param outs The output tensors.
*
* \return A schedule for the given ops.
*/
inline Schedule default_schedule_auto_inline(const Target& target, const Array<Tensor>& outs) {
return MakeDefaultSchedule(target, outs, true);
}
} // namespace x86
} // namespace topi
} // namespace tvm
#endif // TVM_TOPI_X86_DEFAULT_H_
| https://github.com/zk-ml/tachikoma |
include/tvm/topi/x86/injective.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file x86/injective.h
* \brief x86 schedule for injective ops
*/
#ifndef TVM_TOPI_X86_INJECTIVE_H_
#define TVM_TOPI_X86_INJECTIVE_H_
#include <tvm/target/generic_func.h>
#include <tvm/te/operation.h>
#include <tvm/topi/detail/fuse.h>
#include <tvm/topi/tags.h>
namespace tvm {
namespace topi {
using namespace tvm::te;
namespace x86 {
/*!
* \brief Updates an existing schedule for the given injective ops.
*
* \param sch The schedule to update.
* \param out The tensor representing the injective op.
*
* \return The updated schedule.
*/
inline Schedule schedule_injective_from_existing(Schedule sch, const Tensor& out) {
auto axis = sch[out]->op.as<ComputeOpNode>()->axis;
if (axis.size() == 4) {
auto n = axis[0];
auto c = axis[1];
auto fused = detail::Fuse(sch[out], {n, c}); // for nhwc layout, fuse n and h
sch[out].parallel(fused);
} else {
sch[out].parallel(axis[0]);
}
return sch;
}
/*!
* \brief Create an x86 schedule for the given injective ops.
*
* \param target The target to generate a schedule for.
* \param outs The output tensors.
*
* \return A schedule for the given ops.
*/
inline Schedule schedule_injective(const Target& target, const Array<Tensor>& outs) {
Array<Operation> out_ops;
for (auto t : outs) {
out_ops.push_back(t->op);
}
auto s = create_schedule(out_ops);
tvm::te::AutoInlineInjective(s);
auto x = outs[0];
schedule_injective_from_existing(s, x);
return s;
}
} // namespace x86
} // namespace topi
} // namespace tvm
#endif // TVM_TOPI_X86_INJECTIVE_H_
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/API.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm;
import java.util.HashMap;
import java.util.Map;
/**
* TVM API functions.
*/
public final class API {
private static ThreadLocal<Map<String, Function>> apiFuncs
= new ThreadLocal<Map<String, Function>>() {
@Override
protected Map<String, Function> initialValue() {
return new HashMap<String, Function>();
}
};
/**
* Get a tvm api function according by name.
* @param name function name.
* @return a TVM Function.
*/
public static Function get(final String name) {
Function func = apiFuncs.get().get(name);
if (func == null) {
func = Function.getFunction(name);
apiFuncs.get().put(name, func);
}
return func;
}
/**
* Cannot be instantiated.
*/
private API() {
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/APIInternal.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm;
/**
* Internal api functions.
*/
public final class APIInternal {
/**
* Get a tvm api function according by name.
* @param name function name.
* @return a TVM Function.
*/
public static Function get(final String name) {
return API.get(name);
}
/**
* Cannot be instantiated.
*/
private APIInternal() {
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/ArgTypeCode.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm;
// Type code used in API calls
public enum ArgTypeCode {
INT(0), UINT(1), FLOAT(2), HANDLE(3), NULL(4), TVM_TYPE(5),
DLDEVICE(6), ARRAY_HANDLE(7), NODE_HANDLE(8), MODULE_HANDLE(9),
FUNC_HANDLE(10), STR(11), BYTES(12), NDARRAY_CONTAINER(13);
public final int id;
private ArgTypeCode(int id) {
this.id = id;
}
@Override
public String toString() {
return String.valueOf(id);
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/Base.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm;
import org.apache.tvm.NativeLibraryLoader.Action;
import java.io.File;
import java.io.IOException;
/**
* Initializing methods and types.
*/
final class Base {
/**
* Hold Long reference for JNI.
*/
public static class RefLong {
public final long value;
public RefLong(final long value) {
this.value = value;
}
public RefLong() {
this(0L);
}
}
/**
* Hold TVMValue reference for JNI.
*/
public static class RefTVMValue {
public final TVMValue value;
public RefTVMValue(TVMValue value) {
this.value = value;
}
public RefTVMValue() {
this(null);
}
}
public static final LibInfo _LIB = new LibInfo();
static {
boolean loadNativeRuntimeLib = true;
try {
try {
tryLoadLibraryOS("tvm4j");
} catch (UnsatisfiedLinkError e) {
System.err.println("[WARN] TVM native library not found in path. "
+ "Copying native library from the archive. "
+ "Consider installing the library somewhere in the path "
+ "(for Windows: PATH, for Linux: LD_LIBRARY_PATH), "
+ "or specifying by Java cmd option -Djava.library.path=[lib path].");
NativeLibraryLoader.loadLibrary("tvm4j");
}
} catch (Throwable e) {
System.err.println("[WARN] Couldn't find native library tvm4j.");
e.printStackTrace();
System.err.println("Try to load tvm4j (runtime packed version) ...");
try {
System.loadLibrary("tvm4j_runtime_packed");
// if tvm runtime is packed in libtvm4j, we do not need to dlopen libtvm_runtime.so.
loadNativeRuntimeLib = false;
} catch (UnsatisfiedLinkError errFull) {
System.err.println("[ERROR] Couldn't find native library tvm4j_runtime_packed.");
throw new RuntimeException(errFull);
}
}
System.err.println("libtvm4j loads successfully.");
if (loadNativeRuntimeLib) {
String tvmLibFilename = System.getProperty("libtvm.so.path");
if (tvmLibFilename == null || !new File(tvmLibFilename).isFile()
|| _LIB.nativeLibInit(tvmLibFilename) != 0) {
try {
String runtimeLibname;
String os = System.getProperty("os.name");
// ref: http://lopica.sourceforge.net/os.html
if (os.startsWith("Linux")) {
runtimeLibname = "libtvm_runtime.so";
} else if (os.startsWith("Mac")) {
runtimeLibname = "libtvm_runtime.dylib";
} else {
// TODO(yizhi) support windows later
throw new UnsatisfiedLinkError(os + " not supported currently");
}
NativeLibraryLoader.extractResourceFileToTempDir(runtimeLibname, new Action() {
@Override public void invoke(File target) {
System.err.println("Loading tvm runtime from " + target.getPath());
checkCall(_LIB.nativeLibInit(target.getPath()));
}
});
} catch (IOException e) {
throw new RuntimeException(e);
}
}
} else {
_LIB.nativeLibInit(null);
}
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override public void run() {
_LIB.shutdown();
}
});
}
/**
* Load JNI for different OS.
* @param libname library name.
* @throws UnsatisfiedLinkError if loading fails.
*/
private static void tryLoadLibraryOS(String libname) throws UnsatisfiedLinkError {
try {
System.err.println(String.format("Try loading %s from native path.", libname));
System.loadLibrary(libname);
} catch (UnsatisfiedLinkError e) {
String os = System.getProperty("os.name");
// ref: http://lopica.sourceforge.net/os.html
if (os.startsWith("Linux")) {
tryLoadLibraryXPU(libname, "linux-x86_64");
} else if (os.startsWith("Mac")) {
tryLoadLibraryXPU(libname, "osx-x86_64");
} else {
// TODO(yizhi) support windows later
throw new UnsatisfiedLinkError("Windows not supported currently");
}
}
}
/**
* Load native library for different architectures.
* @param libname library name.
* @param arch architecture.
* @throws UnsatisfiedLinkError if loading fails
*/
private static void tryLoadLibraryXPU(String libname, String arch) throws UnsatisfiedLinkError {
System.err.println(String.format("Try loading %s-%s from native path.", libname, arch));
System.loadLibrary(String.format("%s-%s", libname, arch));
}
// helper function definitions
/**
* Check the return value of C API call.
* <p>
* This function will raise exception when error occurs.
* Wrap every API call with this function
* </p>
* @param ret return value from API calls
*/
public static void checkCall(int ret) throws TVMError {
if (ret != 0) {
throw new TVMError(_LIB.tvmGetLastError());
}
}
/**
* TVM Runtime error.
*/
static class TVMError extends RuntimeException {
public TVMError(String err) {
super(err);
}
}
/**
* Cannot be instantiated.
*/
private Base() {
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/Device.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm;
import java.util.HashMap;
import java.util.Map;
import org.apache.tvm.rpc.RPC;
public class Device {
/**
* Provides the same information as the C++ enums DLDeviceType and
* TVMDeviceExtType.
*/
static final int kDLCPU = 1, kDLCUDA = 2, kDLCUDAHost = 3, kDLOpenCL = 4, kDLVulkan = 7,
kDLMetal = 8, kDLVPI = 9, kDLROCM = 10, kDLROCMHost = 11, kDLExtDev = 12,
kDLCUDAManaged = 13, kDLOneAPI = 14, kDLWebGPU = 15, kDLHexagon = 16,
kDLAOCL = 32, kDLSDAccel = 33, kOpenGL = 34, kDLMicroDev = 35;
private static final Map<Integer, String> MASK2STR = new HashMap<Integer, String>();
private static final Map<String, Integer> STR2MASK = new HashMap<String, Integer>();
static {
MASK2STR.put(kDLCPU, "cpu");
MASK2STR.put(kDLCUDA, "cuda");
MASK2STR.put(kDLOpenCL, "opencl");
MASK2STR.put(kDLVulkan, "vulkan");
MASK2STR.put(kDLMetal, "metal");
MASK2STR.put(kDLVPI, "vpi");
MASK2STR.put(kDLHexagon, "hexagon");
STR2MASK.put("cpu", kDLCPU);
STR2MASK.put("cuda", kDLCUDA);
STR2MASK.put("cl", kDLOpenCL);
STR2MASK.put("opencl", kDLOpenCL);
STR2MASK.put("vulkan", kDLVulkan);
STR2MASK.put("metal", kDLMetal);
STR2MASK.put("vpi", kDLVPI);
STR2MASK.put("hexagon", kDLHexagon);
}
/**
* Construct a CPU device.
* @param devId The device id
* @return The created device
*/
public static Device cpu(int devId) {
return new Device(kDLCPU, devId);
}
public static Device cpu() {
return cpu(0);
}
/**
* Construct a CUDA GPU device.
* @param devId The device id
* @return The created device
*/
public static Device cuda(int devId) {
return new Device(kDLCUDA, devId);
}
public static Device cuda() {
return cuda(0);
}
/**
* Construct a OpenCL device.
* @param devId The device id
* @return The created device
*/
public static Device opencl(int devId) {
return new Device(kDLOpenCL, devId);
}
public static Device opencl() {
return opencl(0);
}
/**
* Construct a Vulkan device.
* @param devId The device id
* @return The created device
*/
public static Device vulkan(int devId) {
return new Device(kDLVulkan, devId);
}
public static Device vulkan() {
return vulkan(0);
}
/**
* Construct a metal device.
* @param devId The device id
* @return The created device
*/
public static Device metal(int devId) {
return new Device(kDLMetal, devId);
}
public static Device metal() {
return metal(0);
}
/**
* Construct a VPI simulated device.
* @param devId The device id
* @return The created device
*/
public static Device vpi(int devId) {
return new Device(kDLVPI, devId);
}
public static Device vpi() {
return vpi(0);
}
/**
* Construct a Hexagon device.
* @param devId The device id
* @return The created device
*/
public static Device hexagon(int devId) {
return new Device(kDLHexagon, devId);
}
public static Device hexagon() {
return hexagon(0);
}
public final int deviceType;
public final int deviceId;
public Device(int deviceType, int deviceId) {
this.deviceType = deviceType;
this.deviceId = deviceId;
}
public Device(String deviceType, int deviceId) {
this(STR2MASK.get(deviceType), deviceId);
}
/**
* Whether this device exists.
* @return true if exists.
*/
public boolean exist() {
TVMValue ret =
APIInternal.get("_GetDeviceAttr").pushArg(deviceType).pushArg(deviceId).pushArg(0).invoke();
return ((TVMValueLong) ret).value != 0;
}
/**
* Maximum number of threads on each block.
* @return the maximum thread number.
*/
public long maxThreadsPerBlock() {
TVMValue ret =
APIInternal.get("_GetDeviceAttr").pushArg(deviceType).pushArg(deviceId).pushArg(1).invoke();
return ((TVMValueLong) ret).value;
}
/**
* Number of threads that executes in concurrent.
* @return the thread number.
*/
public long warpSize() {
TVMValue ret =
APIInternal.get("_GetDeviceAttr").pushArg(deviceType).pushArg(deviceId).pushArg(2).invoke();
return ((TVMValueLong) ret).value;
}
/**
* Synchronize until jobs finished at the device.
*/
public void sync() {
Base.checkCall(Base._LIB.tvmSynchronize(deviceType, deviceId));
}
@Override
public int hashCode() {
return (deviceType << 16) | deviceId;
}
@Override
public boolean equals(Object other) {
if (other != null && other instanceof Device) {
Device obj = (Device) other;
return deviceId == obj.deviceId && deviceType == obj.deviceType;
}
return false;
}
@Override
public String toString() {
if (deviceType >= RPC.RPC_SESS_MASK) {
int tblId = deviceType / RPC.RPC_SESS_MASK - 1;
int devType = deviceType % RPC.RPC_SESS_MASK;
return String.format("remote[%d]:%s(%d)", tblId, MASK2STR.get(devType), deviceId);
}
return String.format("%s(%d)", MASK2STR.get(deviceType), deviceId);
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/Function.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
* TVM Packed Function.
*/
public class Function extends TVMValue {
final long handle;
public final boolean isResident;
private boolean isReleased = false;
/**
* Get registered function.
* @param name full function name.
* @return TVM function.
*/
public static Function getFunction(final String name) {
for (String fullName : listGlobalFuncNames()) {
if (fullName.equals(name)) {
return getGlobalFunc(fullName, true, false);
}
}
return null;
}
/**
* Get list of global functions registered.
* @return List of global functions names.
*/
private static List<String> listGlobalFuncNames() {
List<String> names = new ArrayList<String>();
Base.checkCall(Base._LIB.tvmFuncListGlobalNames(names));
return Collections.unmodifiableList(names);
}
/**
* Get a global function by name.
* @param name The name of the function.
* @param isResident Whether it is a global 'resident' function.
* @param allowMissing Whether allow missing function or raise an error.
* @return The function to be returned, None if function is missing.
*/
private static Function getGlobalFunc(String name, boolean isResident, boolean allowMissing) {
Base.RefLong handle = new Base.RefLong();
Base.checkCall(Base._LIB.tvmFuncGetGlobal(name, handle));
if (handle.value != 0) {
return new Function(handle.value, isResident);
} else {
if (allowMissing) {
return null;
} else {
throw new IllegalArgumentException("Cannot find global function " + name);
}
}
}
/**
* Initialize the function with handle.
* @param handle the handle to the underlying function.
* @param isResident Whether this is a resident function in jvm
*/
Function(long handle, boolean isResident) {
super(ArgTypeCode.FUNC_HANDLE);
this.handle = handle;
this.isResident = isResident;
}
Function(long handle) {
this(handle, false);
}
@Override protected void finalize() throws Throwable {
release();
super.finalize();
}
/**
* Easy for user to get the instance from returned TVMValue.
* @return this
*/
@Override public Function asFunction() {
return this;
}
@Override long asHandle() {
return handle;
}
/**
* Release the Function.
* <p>
* We highly recommend you to do this manually since the GC strategy is lazy.
* </p>
*/
@Override public void release() {
if (!isReleased) {
if (!isResident) {
Base.checkCall(Base._LIB.tvmFuncFree(handle));
isReleased = true;
}
}
}
/**
* Invoke the function.
* @return the result.
*/
public TVMValue invoke() {
Base.RefTVMValue ret = new Base.RefTVMValue();
Base.checkCall(Base._LIB.tvmFuncCall(handle, ret));
return ret.value;
}
/**
* Push argument to the function.
* @param arg int argument.
* @return this
*/
public Function pushArg(int arg) {
Base._LIB.tvmFuncPushArgLong(arg);
return this;
}
/**
* Push argument to the function.
* @param arg long argument.
* @return this
*/
public Function pushArg(long arg) {
Base._LIB.tvmFuncPushArgLong(arg);
return this;
}
/**
* Push argument to the function.
* @param arg float argument.
* @return this
*/
public Function pushArg(float arg) {
Base._LIB.tvmFuncPushArgDouble(arg);
return this;
}
/**
* Push argument to the function.
* @param arg double argument.
* @return this
*/
public Function pushArg(double arg) {
Base._LIB.tvmFuncPushArgDouble(arg);
return this;
}
/**
* Push argument to the function.
* @param arg String argument.
* @return this
*/
public Function pushArg(String arg) {
Base._LIB.tvmFuncPushArgString(arg);
return this;
}
/**
* Push argument to the function.
* @param arg NDArray.
* @return this
*/
public Function pushArg(NDArrayBase arg) {
int id = arg.isView ? ArgTypeCode.ARRAY_HANDLE.id : ArgTypeCode.NDARRAY_CONTAINER.id;
Base._LIB.tvmFuncPushArgHandle(arg.handle, id);
return this;
}
/**
* Push argument to the function.
* @param arg Module.
* @return this
*/
public Function pushArg(Module arg) {
Base._LIB.tvmFuncPushArgHandle(arg.handle, ArgTypeCode.MODULE_HANDLE.id);
return this;
}
/**
* Push argument to the function.
* @param arg Function.
* @return this
*/
public Function pushArg(Function arg) {
Base._LIB.tvmFuncPushArgHandle(arg.handle, ArgTypeCode.FUNC_HANDLE.id);
return this;
}
/**
* Push argument to the function.
* @param arg bytes.
* @return this
*/
public Function pushArg(byte[] arg) {
Base._LIB.tvmFuncPushArgBytes(arg);
return this;
}
/**
* Invoke function with arguments.
* @param args Can be Integer, Long, Float, Double, String, NDArray.
* @return the result.
*/
public TVMValue call(Object... args) {
for (Object arg : args) {
pushArgToStack(arg);
}
return invoke();
}
private static void pushArgToStack(Object arg) {
if (arg instanceof Integer) {
Base._LIB.tvmFuncPushArgLong((Integer) arg);
} else if (arg instanceof Long) {
Base._LIB.tvmFuncPushArgLong((Long) arg);
} else if (arg instanceof Float) {
Base._LIB.tvmFuncPushArgDouble((Float) arg);
} else if (arg instanceof Double) {
Base._LIB.tvmFuncPushArgDouble((Double) arg);
} else if (arg instanceof String) {
Base._LIB.tvmFuncPushArgString((String) arg);
} else if (arg instanceof byte[]) {
Base._LIB.tvmFuncPushArgBytes((byte[]) arg);
} else if (arg instanceof NDArrayBase) {
NDArrayBase nd = (NDArrayBase) arg;
int id = nd.isView ? ArgTypeCode.ARRAY_HANDLE.id : ArgTypeCode.NDARRAY_CONTAINER.id;
Base._LIB.tvmFuncPushArgHandle(nd.handle, id);
} else if (arg instanceof Module) {
Base._LIB.tvmFuncPushArgHandle(((Module) arg).handle, ArgTypeCode.MODULE_HANDLE.id);
} else if (arg instanceof Function) {
Base._LIB.tvmFuncPushArgHandle(((Function) arg).handle, ArgTypeCode.FUNC_HANDLE.id);
} else if (arg instanceof TVMValue) {
TVMValue tvmArg = (TVMValue) arg;
switch (tvmArg.typeCode) {
case UINT:
case INT:
Base._LIB.tvmFuncPushArgLong(tvmArg.asLong());
break;
case FLOAT:
Base._LIB.tvmFuncPushArgDouble(tvmArg.asDouble());
break;
case STR:
Base._LIB.tvmFuncPushArgString(tvmArg.asString());
break;
case BYTES:
Base._LIB.tvmFuncPushArgBytes(tvmArg.asBytes());
break;
case HANDLE:
case ARRAY_HANDLE:
case MODULE_HANDLE:
case FUNC_HANDLE:
Base._LIB.tvmFuncPushArgHandle(tvmArg.asHandle(), tvmArg.typeCode.id);
break;
default:
throw new IllegalArgumentException("Invalid argument: " + arg);
}
} else {
throw new IllegalArgumentException("Invalid argument: " + arg);
}
}
public static interface Callback {
public Object invoke(TVMValue... args);
}
/**
* Register user-defined global function.
* @param name The function name.
* @param function The function to be registered.
* @param override Whether override existing entry.
*/
public static void register(String name, Callback function, boolean override) {
Base.RefLong createdFuncHandleRef = new Base.RefLong();
Base.checkCall(Base._LIB.tvmFuncCreateFromCFunc(function, createdFuncHandleRef));
int ioverride = override ? 1 : 0;
Base.checkCall(Base._LIB.tvmFuncRegisterGlobal(name, createdFuncHandleRef.value, ioverride));
}
/**
* Register user-defined global function, do not override existing entry.
* @param name The function name.
* @param function The function to be registered.
*/
public static void register(String name, Callback function) {
register(name, function, false);
}
/**
* Convert a Java function to TVM function.
* @param function Java function.
* @return TVM function.
*/
public static Function convertFunc(Callback function) {
Base.RefLong createdFuncHandleRef = new Base.RefLong();
Base.checkCall(Base._LIB.tvmFuncCreateFromCFunc(function, createdFuncHandleRef));
return new Function(createdFuncHandleRef.value);
}
private static Object invokeRegisteredCbFunc(Callback cb, TVMValue[] args) {
if (cb == null) {
System.err.println("[ERROR] Failed to get registered function");
return null;
}
return cb.invoke(args);
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/LibInfo.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm;
import java.util.List;
class LibInfo {
native int nativeLibInit(String tvmLibFile);
native int shutdown();
native String tvmGetLastError();
// Function
native void tvmFuncPushArgLong(long arg);
native void tvmFuncPushArgDouble(double arg);
native void tvmFuncPushArgString(String arg);
native void tvmFuncPushArgBytes(byte[] arg);
native void tvmFuncPushArgHandle(long arg, int argType);
native int tvmFuncListGlobalNames(List<String> funcNames);
native int tvmFuncFree(long handle);
native int tvmFuncGetGlobal(String name, Base.RefLong handle);
native int tvmFuncCall(long handle, Base.RefTVMValue retVal);
native int tvmFuncCreateFromCFunc(Function.Callback function, Base.RefLong handle);
native int tvmFuncRegisterGlobal(String name, long handle, int override);
// Module
native int tvmModFree(long handle);
native int tvmModGetFunction(long handle, String name,
int queryImports, Base.RefLong retHandle);
native int tvmModImport(long mod, long dep);
// NDArray
native int tvmArrayFree(long handle);
native int tvmArrayAlloc(long[] shape, int dtypeCode, int dtypeBits, int dtypeLanes,
int deviceType, int deviceId, Base.RefLong refHandle);
native int tvmArrayGetShape(long handle, List<Long> shape);
native int tvmArrayCopyFromTo(long from, long to);
native int tvmArrayCopyFromJArray(byte[] fromRaw, long from, long to);
native int tvmArrayCopyToJArray(long from, byte[] to);
// Device
native int tvmSynchronize(int deviceType, int deviceId);
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/Module.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm;
import java.util.HashMap;
import java.util.Map;
/**
* Container of compiled functions of TVM.
*/
public class Module extends TVMValue {
public final long handle;
private boolean isReleased = false;
private static ThreadLocal<Map<String, Function>> apiFuncs
= new ThreadLocal<Map<String, Function>>() {
@Override
protected Map<String, Function> initialValue() {
return new HashMap<String, Function>();
}
};
private static Function getApi(String name) {
Function func = apiFuncs.get().get(name);
if (func == null) {
func = Function.getFunction("runtime." + name);
apiFuncs.get().put(name, func);
}
return func;
}
Module(long handle) {
super(ArgTypeCode.MODULE_HANDLE);
this.handle = handle;
}
private Function entry = null;
private final String entryName = "__tvm_main__";
@Override protected void finalize() throws Throwable {
release();
super.finalize();
}
/**
* Easy for user to get the instance from returned TVMValue.
* @return this
*/
@Override public Module asModule() {
return this;
}
@Override long asHandle() {
return handle;
}
/**
* Release the Module.
* <p>
* We highly recommend you to do this manually since the GC strategy is lazy.
* </p>
*/
@Override public void release() {
if (!isReleased) {
Base.checkCall(Base._LIB.tvmModFree(handle));
isReleased = true;
}
}
/**
* Get the entry function.
* @return The entry function if exist
*/
public Function entryFunc() {
if (entry == null) {
entry = getFunction(entryName);
}
return entry;
}
/**
* Get function from the module.
* @param name The name of the function.
* @param queryImports Whether also query modules imported by this module.
* @return The result function.
*/
public Function getFunction(String name, boolean queryImports) {
Base.RefLong retHandle = new Base.RefLong();
Base.checkCall(Base._LIB.tvmModGetFunction(
handle, name, queryImports ? 1 : 0, retHandle));
if (retHandle.value == 0) {
throw new IllegalArgumentException("Module has no function " + name);
}
return new Function(retHandle.value, false);
}
public Function getFunction(String name) {
return getFunction(name, false);
}
/**
* Add module to the import list of current one.
* @param module The other module.
*/
public void importModule(Module module) {
Base.checkCall(Base._LIB.tvmModImport(handle, module.handle));
}
/**
* Get type key of the module.
* @return type key of the module.
*/
public String typeKey() {
return getApi("ModuleGetTypeKey").pushArg(this).invoke().asString();
}
/**
* Load module from file.
* @param path The path to the module file.
* @param fmt The format of the file,
* if not specified it will be inferred from suffix of the file.
* @return The loaded module
*/
public static Module load(String path, String fmt) {
TVMValue ret = getApi("ModuleLoadFromFile").pushArg(path).pushArg(fmt).invoke();
assert ret.typeCode == ArgTypeCode.MODULE_HANDLE;
return ret.asModule();
}
public static Module load(String path) {
return load(path, "");
}
/**
* Whether module runtime is enabled for target,
* e.g., The following code checks if cuda is enabled.
* Module.enabled("cuda")
* @param target The target device type.
* @return Whether runtime is enabled.
*/
public static boolean enabled(String target) {
TVMValue ret = getApi("RuntimeEnabled").pushArg(target).invoke();
return ret.asLong() != 0;
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/NDArray.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.ArrayList;
import java.util.List;
/**
* Lightweight NDArray class of TVM runtime.
*/
public class NDArray extends NDArrayBase {
private final TVMType dtype;
private final Device device;
NDArray(long handle, boolean isView, TVMType dtype, Device dev) {
super(handle, isView);
this.dtype = dtype;
this.device = dev;
}
@Override
protected void finalize() throws Throwable {
super.finalize();
}
/**
* Copy from a native array.
* The NDArray type must by float64
* @param sourceArray the source data
*/
public void copyFrom(double[] sourceArray) {
checkCopySize(sourceArray.length);
if (dtype.typeCode != TVMType.FLOAT || dtype.bits != 64) {
throw new IllegalArgumentException("Cannot set double[] for " + dtype.toString() + " array");
}
byte[] nativeArr = new byte[sourceArray.length * dtype.numOfBytes];
for (int i = 0; i < sourceArray.length; ++i) {
wrapBytes(nativeArr, i * dtype.numOfBytes, dtype.numOfBytes).putDouble(sourceArray[i]);
}
NDArray tmpArr = empty(shape(), this.dtype);
Base.checkCall(Base._LIB.tvmArrayCopyFromJArray(nativeArr, tmpArr.handle, handle));
tmpArr.release();
}
/**
* Copy from a native array.
* The NDArray type must by float32
* @param sourceArray the source data
*/
public void copyFrom(float[] sourceArray) {
checkCopySize(sourceArray.length);
if (dtype.typeCode != TVMType.FLOAT || dtype.bits != 32) {
throw new IllegalArgumentException("Cannot set float[] for " + dtype.toString() + " array");
}
byte[] nativeArr = new byte[sourceArray.length * dtype.numOfBytes];
for (int i = 0; i < sourceArray.length; ++i) {
wrapBytes(nativeArr, i * dtype.numOfBytes, dtype.numOfBytes).putFloat(sourceArray[i]);
}
NDArray tmpArr = empty(shape(), this.dtype);
Base.checkCall(Base._LIB.tvmArrayCopyFromJArray(nativeArr, tmpArr.handle, handle));
tmpArr.release();
}
/**
* Copy from a native array.
* The NDArray type must by int64
* @param sourceArray the source data
*/
public void copyFrom(long[] sourceArray) {
checkCopySize(sourceArray.length);
if (dtype.typeCode != TVMType.INT || dtype.bits != 64) {
throw new IllegalArgumentException("Cannot set long[] for " + dtype.toString() + " array");
}
byte[] nativeArr = new byte[sourceArray.length * dtype.numOfBytes];
for (int i = 0; i < sourceArray.length; ++i) {
wrapBytes(nativeArr, i * dtype.numOfBytes, dtype.numOfBytes).putLong(sourceArray[i]);
}
NDArray tmpArr = empty(shape(), this.dtype);
Base.checkCall(Base._LIB.tvmArrayCopyFromJArray(nativeArr, tmpArr.handle, handle));
tmpArr.release();
}
/**
* Copy from a native array.
* The NDArray type must by float32
* @param sourceArray the source data
*/
public void copyFrom(int[] sourceArray) {
checkCopySize(sourceArray.length);
if (dtype.typeCode != TVMType.INT || dtype.bits != 32) {
throw new IllegalArgumentException("Cannot set int[] for " + dtype.toString() + " array");
}
byte[] nativeArr = new byte[sourceArray.length * dtype.numOfBytes];
for (int i = 0; i < sourceArray.length; ++i) {
wrapBytes(nativeArr, i * dtype.numOfBytes, dtype.numOfBytes).putInt(sourceArray[i]);
}
NDArray tmpArr = empty(shape(), this.dtype);
Base.checkCall(Base._LIB.tvmArrayCopyFromJArray(nativeArr, tmpArr.handle, handle));
tmpArr.release();
}
/**
* Copy from a native array.
* The NDArray type must by int16
* @param sourceArray the source data
*/
public void copyFrom(short[] sourceArray) {
checkCopySize(sourceArray.length);
if (dtype.typeCode != TVMType.INT || dtype.bits != 16) {
throw new IllegalArgumentException("Cannot set short[] for " + dtype.toString() + " array");
}
byte[] nativeArr = new byte[sourceArray.length * dtype.numOfBytes];
for (int i = 0; i < sourceArray.length; ++i) {
wrapBytes(nativeArr, i * dtype.numOfBytes, dtype.numOfBytes).putShort(sourceArray[i]);
}
NDArray tmpArr = empty(shape(), this.dtype);
Base.checkCall(Base._LIB.tvmArrayCopyFromJArray(nativeArr, tmpArr.handle, handle));
tmpArr.release();
}
/**
* Copy from a native array.
* The NDArray type must by int8
* @param sourceArray the source data
*/
public void copyFrom(byte[] sourceArray) {
checkCopySize(sourceArray.length);
if (dtype.typeCode != TVMType.INT || dtype.bits != 8) {
throw new IllegalArgumentException("Cannot set byte[] for " + dtype.toString() + " array");
}
copyFromRaw(sourceArray);
}
/**
* Copy from a native array.
* The NDArray type must by uint16
* @param sourceArray the source data
*/
public void copyFrom(char[] sourceArray) {
checkCopySize(sourceArray.length);
if (dtype.typeCode != TVMType.UINT || dtype.bits != 16) {
throw new IllegalArgumentException("Cannot set char[] for " + dtype.toString() + " array");
}
byte[] nativeArr = new byte[sourceArray.length * dtype.numOfBytes];
for (int i = 0; i < sourceArray.length; ++i) {
wrapBytes(nativeArr, i * dtype.numOfBytes, dtype.numOfBytes).putChar(sourceArray[i]);
}
NDArray tmpArr = empty(shape(), this.dtype);
Base.checkCall(Base._LIB.tvmArrayCopyFromJArray(nativeArr, tmpArr.handle, handle));
tmpArr.release();
}
private void checkCopySize(int sourceLength) {
long arrSize = size();
if (arrSize != sourceLength) {
throw new IllegalArgumentException(
String.format("Array shape size not match: %d v.s. %d", sourceLength, size()));
}
}
/**
* Copy from a raw byte array.
* @param sourceArray the source data
*/
public void copyFromRaw(byte[] sourceArray) {
NDArray tmpArr = empty(shape(), this.dtype);
Base.checkCall(Base._LIB.tvmArrayCopyFromJArray(sourceArray, tmpArr.handle, handle));
tmpArr.release();
}
/**
* Get shape of current NDArray.
* @return an array representing shape of current ndarray
*/
public long[] shape() {
List<Long> data = new ArrayList<Long>();
Base.checkCall(Base._LIB.tvmArrayGetShape(handle, data));
long[] shapeArr = new long[data.size()];
for (int i = 0; i < shapeArr.length; ++i) {
shapeArr[i] = data.get(i);
}
return shapeArr;
}
/**
* Get total size of current NDArray.
* @return size of current NDArray.
*/
public long size() {
long product = 1L;
long[] shapeArr = shape();
for (int i = 0; i < shapeArr.length; ++i) {
product *= shapeArr[i];
}
return product;
}
/**
* Return a copied flat java array of current array (row-major).
* The NDArray dtype must be float64
* @return A copy of array content.
*/
public double[] asDoubleArray() {
if (dtype.typeCode != TVMType.FLOAT || dtype.bits != 64) {
throw new IllegalArgumentException(
"Cannot set convert to double[] for " + dtype.toString() + " array");
}
byte[][] units = groupInternalBytes();
double[] array = new double[units.length];
for (int i = 0; i < units.length; ++i) {
array[i] = wrapBytes(units[i]).getDouble();
}
return array;
}
/**
* Return a copied flat java array of current array (row-major).
* The NDArray dtype must be float32
* @return A copy of array content.
*/
public float[] asFloatArray() {
if (dtype.typeCode != TVMType.FLOAT || dtype.bits != 32) {
throw new IllegalArgumentException(
"Cannot set convert to float[] for " + dtype.toString() + " array");
}
byte[][] units = groupInternalBytes();
float[] array = new float[units.length];
for (int i = 0; i < units.length; ++i) {
array[i] = wrapBytes(units[i]).getFloat();
}
return array;
}
/**
* Return a copied flat java array of current array (row-major).
* The NDArray dtype must be int64
* @return A copy of array content.
*/
public long[] asLongArray() {
if (dtype.typeCode != TVMType.INT || dtype.bits != 64) {
throw new IllegalArgumentException(
"Cannot set convert to long[] for " + dtype.toString() + " array");
}
byte[][] units = groupInternalBytes();
long[] array = new long[units.length];
for (int i = 0; i < units.length; ++i) {
array[i] = wrapBytes(units[i]).getLong();
}
return array;
}
/**
* Return a copied flat java array of current array (row-major).
* The NDArray dtype must be int32
* @return A copy of array content.
*/
public int[] asIntArray() {
if (dtype.typeCode != TVMType.INT || dtype.bits != 32) {
throw new IllegalArgumentException(
"Cannot set convert to int[] for " + dtype.toString() + " array");
}
byte[][] units = groupInternalBytes();
int[] array = new int[units.length];
for (int i = 0; i < units.length; ++i) {
array[i] = wrapBytes(units[i]).getInt();
}
return array;
}
/**
* Return a copied flat java array of current array (row-major).
* The NDArray dtype must be int16
* @return A copy of array content.
*/
public short[] asShortArray() {
if (dtype.typeCode != TVMType.INT || dtype.bits != 16) {
throw new IllegalArgumentException(
"Cannot set convert to short[] for " + dtype.toString() + " array");
}
byte[][] units = groupInternalBytes();
short[] array = new short[units.length];
for (int i = 0; i < units.length; ++i) {
array[i] = wrapBytes(units[i]).getShort();
}
return array;
}
/**
* Return a copied flat java array of current array (row-major).
* The NDArray dtype must be uint16
* @return A copy of array content.
*/
public char[] asCharArray() {
if (dtype.typeCode != TVMType.UINT || dtype.bits != 16) {
throw new IllegalArgumentException(
"Cannot set convert to char[] for " + dtype.toString() + " array");
}
byte[][] units = groupInternalBytes();
char[] array = new char[units.length];
for (int i = 0; i < units.length; ++i) {
array[i] = wrapBytes(units[i]).getChar();
}
return array;
}
/**
* Return a copied flat java array of current array (row-major).
* The NDArray dtype must be int8
* @return A copy of array content.
*/
public byte[] asByteArray() {
if (dtype.typeCode != TVMType.INT || dtype.bits != 8) {
throw new IllegalArgumentException(
"Cannot set convert to byte[] for " + dtype.toString() + " array");
}
return internal();
}
/**
* Return a copied internal byte array of current array (row-major).
* @return A copy of array content.
*/
public byte[] internal() {
NDArray tmp = NDArray.empty(shape(), dtype);
copyTo(tmp);
int arrLength = dtype.numOfBytes * (int) size();
byte[] arr = new byte[arrLength];
Base.checkCall(Base._LIB.tvmArrayCopyToJArray(tmp.handle, arr));
return arr;
}
private byte[][] groupInternalBytes() {
byte[] raw = internal();
int unitSize = dtype.numOfBytes;
if (raw.length <= 0 || raw.length % unitSize != 0) {
throw new IllegalArgumentException(String.format(
"%s size %d cannot divide byte array size %d", dtype.toString(), unitSize, raw.length));
}
int numOfUnits = raw.length / unitSize;
byte[][] units = new byte[numOfUnits][unitSize];
for (int i = 0; i < numOfUnits; ++i) {
System.arraycopy(raw, i * unitSize, units[i], 0, unitSize);
}
return units;
}
/**
* Get the device of current array.
* @return the device.
*/
public Device device() {
return device;
}
/**
* Create an empty array given shape, type and device.
* @param shape The shape of the array.
* @param dtype The data type of the array.
* @param dev The device of the array.
* @return The array tvm supported.
*/
public static NDArray empty(long[] shape, TVMType dtype, Device dev) {
Base.RefLong refHandle = new Base.RefLong();
Base.checkCall(Base._LIB.tvmArrayAlloc(
shape, dtype.typeCode, dtype.bits, dtype.lanes, dev.deviceType, dev.deviceId, refHandle));
return new NDArray(refHandle.value, false, dtype, dev);
}
/**
* Create an empty array on cpu given shape and type.
* @param shape The shape of the array.
* @param dtype The data type of the array.
* @return The array tvm supported.
*/
public static NDArray empty(long[] shape, TVMType dtype) {
return empty(shape, dtype, Device.cpu(0));
}
/**
* Create an empty float32 array on cpu given shape.
* @param shape The shape of the array.
* @return The array tvm supported.
*/
public static NDArray empty(long[] shape) {
return empty(shape, new TVMType("float32", 1), Device.cpu(0));
}
/**
* Create an empty float32 array given shape and device.
* @param shape The shape of the array.
* @param dev The device of the array.
* @return The array tvm supported.
*/
public static NDArray empty(long[] shape, Device dev) {
return empty(shape, new TVMType("float32", 1), dev);
}
private static ByteBuffer wrapBytes(byte[] bytes) {
ByteBuffer bb = ByteBuffer.wrap(bytes);
bb.order(ByteOrder.LITTLE_ENDIAN);
return bb;
}
private static ByteBuffer wrapBytes(byte[] bytes, int offset, int length) {
ByteBuffer bb = ByteBuffer.wrap(bytes, offset, length);
bb.order(ByteOrder.LITTLE_ENDIAN);
return bb;
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/NDArrayBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm;
/**
* Base class of NDArray. To handle callback array.
* Only deep-copy supported.
*/
public class NDArrayBase extends TVMValue {
protected final long handle;
protected final boolean isView;
private boolean isReleased = false;
NDArrayBase(long handle, boolean isView) {
super(ArgTypeCode.ARRAY_HANDLE);
this.handle = handle;
this.isView = isView;
}
NDArrayBase(long handle) {
this(handle, true);
}
@Override public NDArrayBase asNDArray() {
return this;
}
@Override long asHandle() {
return handle;
}
/**
* Copy array to target.
* @param target The target array to be copied, must have same shape as this array.
* @return target
*/
public NDArrayBase copyTo(NDArrayBase target) {
Base.checkCall(Base._LIB.tvmArrayCopyFromTo(handle, target.handle));
return target;
}
/**
* Release the NDArray memory.
* <p>
* We highly recommend you to do this manually since the GC strategy is lazy.
* </p>
*/
public void release() {
if (!isReleased) {
if (!isView) {
Base.checkCall(Base._LIB.tvmArrayFree(handle));
isReleased = true;
}
}
}
@Override protected void finalize() throws Throwable {
release();
super.finalize();
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/NativeLibraryLoader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
class NativeLibraryLoader {
private static final String libPathInJar = "/lib/native/";
private static File tempDir;
static {
try {
tempDir = File.createTempFile("tvm4j", "");
if (!tempDir.delete() || !tempDir.mkdir()) {
throw new IOException("Couldn't create directory " + tempDir.getAbsolutePath());
}
/*
* Different cleanup strategies for Windows and Linux.
* TODO: shutdown hook won't work on Windows
*/
if (!"Windows".equals(getUnifiedOSName())) {
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override public void run() {
for (File f : tempDir.listFiles()) {
System.err.println("Deleting " + f.getAbsolutePath());
if (!f.delete()) {
System.err.println("[WARN] Couldn't delete temporary file " + f.getAbsolutePath());
}
}
System.err.println("Deleting " + tempDir.getAbsolutePath());
if (!tempDir.delete()) {
System.err.println(
"[WARN] Couldn't delete temporary directory " + tempDir.getAbsolutePath());
}
}
});
} else {
throw new RuntimeException("Windows not supported yet.");
}
} catch (IOException ex) {
System.err.println("Couldn't create temporary directory: " + ex.getMessage());
throw new RuntimeException(ex);
}
}
/**
* Find the library as a resource in jar, copy it to a tempfile
* and load it using System.load(). The name of the library has to be the
* base name, it is mapped to the corresponding system name using
* System.mapLibraryName(). e.g., the library "foo" is called "libfoo.so"
* under Linux and "foo.dll" under Windows, but you just have to pass "foo" to
* the loadLibrary().
*
* @param libname basename of the library
* @throws UnsatisfiedLinkError if library not found.
* @throws IOException if file not found.
*/
public static void loadLibrary(String libname) throws UnsatisfiedLinkError, IOException {
String mappedLibname = System.mapLibraryName(libname);
String loadLibname = mappedLibname;
if (mappedLibname.endsWith("dylib")) {
System.err.println("Replaced .dylib with .jnilib");
loadLibname = mappedLibname.replace(".dylib", ".jnilib");
}
System.err.println("Attempting to load " + loadLibname);
extractResourceFileToTempDir(loadLibname, new Action() {
@Override public void invoke(File target) {
System.err.println("Loading library from " + target.getPath());
System.load(target.getPath());
}
});
}
/**
* Translate all those Windows to "Windows". ("Windows XP", "Windows Vista", "Windows 7", etc.)
*/
private static String unifyOSName(String osname) {
if (osname.startsWith("Windows")) {
return "Windows";
}
return osname;
}
private static String getUnifiedOSName() {
return unifyOSName(System.getProperty("os.name"));
}
private static File createTempFile(String name) throws IOException {
return new File(tempDir + File.separator + name);
}
static interface Action {
public void invoke(File file);
}
/**
* Copies the resource file to a temp file and do an action.
* @param filename source file name (in lib/native).
* @param action callback function to deal with the copied file.
*/
public static void extractResourceFileToTempDir(String filename, Action action)
throws IOException {
final String libFileInJar = libPathInJar + filename;
InputStream is = NativeLibraryLoader.class.getResourceAsStream(libFileInJar);
if (is == null) {
throw new UnsatisfiedLinkError("Couldn't find the resource " + filename);
}
System.err.println(String.format("Loading %s from %s", filename, libPathInJar));
try {
File tempfile = createTempFile(filename);
OutputStream os = new FileOutputStream(tempfile);
final long savedTime = System.currentTimeMillis();
byte[] buf = new byte[8192];
int len = is.read(buf);
while (len > 0) {
os.write(buf, 0, len);
len = is.read(buf);
}
os.flush();
final FileInputStream lock = new FileInputStream(tempfile);
os.close();
double seconds = (double) (System.currentTimeMillis() - savedTime) / 1e3;
System.err.println(String.format("Copying took %.2f seconds.", seconds));
action.invoke(tempfile);
lock.close();
} catch (IOException io) {
System.err.println("[ERROR] Could not create the temp file: " + io.toString());
throw io;
} catch (UnsatisfiedLinkError ule) {
System.err.println("Couldn't load copied link file: " + ule.toString());
throw ule;
}
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/TVMType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm;
public class TVMType {
public static final int INT = 0;
public static final int UINT = 1;
public static final int FLOAT = 2;
public static final int HANDLE = 4;
public final int typeCode;
public final int bits;
public final int numOfBytes;
public final int lanes;
/**
* TVMType constructor.
* @param typeStr type name, e.g., "float32", "float64", "uint8", etc.
* @param lanes NDArray lanes.
*/
public TVMType(String typeStr, int lanes) {
this.lanes = lanes;
int bitsTemp = 0;
if (typeStr.startsWith("int")) {
typeCode = INT;
bitsTemp = Integer.parseInt(typeStr.substring(3));
} else if (typeStr.startsWith("uint")) {
typeCode = UINT;
bitsTemp = Integer.parseInt(typeStr.substring(4));
} else if (typeStr.startsWith("float")) {
typeCode = FLOAT;
bitsTemp = Integer.parseInt(typeStr.substring(5));
} else if (typeStr.startsWith("handle")) {
typeCode = HANDLE;
bitsTemp = 64;
} else {
throw new IllegalArgumentException("Do not know how to handle type " + typeStr);
}
bits = (bitsTemp == 0) ? 32 : bitsTemp;
if ((bits & (bits - 1)) != 0 || bits < 8) {
throw new IllegalArgumentException("Do not know how to handle type " + typeStr);
}
numOfBytes = bits / 8;
}
public TVMType(String typeStr) {
this(typeStr, 1);
}
@Override public int hashCode() {
return (typeCode << 16) | (bits << 8) | lanes;
}
@Override public boolean equals(Object other) {
if (other != null && other instanceof TVMType) {
TVMType otherInst = (TVMType) other;
return (bits == otherInst.bits)
&& (typeCode == otherInst.typeCode) && (lanes == otherInst.lanes);
}
return false;
}
@Override public String toString() {
String typeCodeStr;
switch (typeCode) {
case INT:
typeCodeStr = "int";
break;
case UINT:
typeCodeStr = "uint";
break;
case FLOAT:
typeCodeStr = "float";
break;
case HANDLE:
typeCodeStr = "handle";
break;
default:
typeCodeStr = "Unknown";
break;
}
String str = typeCodeStr + bits;
if (lanes != 1) {
str += lanes;
}
return str;
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/TVMValue.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm;
public class TVMValue {
public final ArgTypeCode typeCode;
public TVMValue(ArgTypeCode tc) {
typeCode = tc;
}
public void release() {
}
public long asLong() {
throw new UnsupportedOperationException();
}
public double asDouble() {
throw new UnsupportedOperationException();
}
public byte[] asBytes() {
throw new UnsupportedOperationException();
}
public Module asModule() {
throw new UnsupportedOperationException();
}
public Function asFunction() {
throw new UnsupportedOperationException();
}
public NDArrayBase asNDArray() {
throw new UnsupportedOperationException();
}
public String asString() {
throw new UnsupportedOperationException();
}
// easy for JNI to use.
long asHandle() {
throw new UnsupportedOperationException();
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/TVMValueBytes.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm;
public class TVMValueBytes extends TVMValue {
public final byte[] value;
public TVMValueBytes(byte[] value) {
super(ArgTypeCode.BYTES);
this.value = value;
}
@Override public byte[] asBytes() {
return value;
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/TVMValueDouble.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm;
public class TVMValueDouble extends TVMValue {
public final double value;
public TVMValueDouble(double value) {
super(ArgTypeCode.FLOAT);
this.value = value;
}
@Override public double asDouble() {
return value;
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/TVMValueHandle.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm;
/**
* Java class related to TVM handles (ArgTypeCode.HANDLE)
*/
public class TVMValueHandle extends TVMValue {
public final long value;
public TVMValueHandle(long value) {
super(ArgTypeCode.HANDLE);
this.value = value;
}
@Override public long asHandle() {
return value;
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/TVMValueLong.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm;
public class TVMValueLong extends TVMValue {
public final long value;
public TVMValueLong(long value) {
super(ArgTypeCode.INT);
this.value = value;
}
@Override public long asLong() {
return value;
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/TVMValueNull.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm;
public class TVMValueNull extends TVMValue {
public TVMValueNull() {
super(ArgTypeCode.NULL);
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/TVMValueString.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm;
public class TVMValueString extends TVMValue {
public final String value;
public TVMValueString(String value) {
super(ArgTypeCode.STR);
this.value = value;
}
@Override public String asString() {
return value;
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/contrib/GraphExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm.contrib;
import org.apache.tvm.Device;
import org.apache.tvm.Function;
import org.apache.tvm.Module;
import org.apache.tvm.TVMValue;
import org.apache.tvm.rpc.RPC;
import org.apache.tvm.rpc.RPCSession;
import org.apache.tvm.rpc.TVMRemoteDevice;
import java.lang.reflect.Field;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
public class GraphExecutor {
/**
* Create a runtime executor module given a graph and module.
* @param graphJson The graph deployed in json format output by compiler.
* @param libmod The module of the corresponding function.
* @param dev The local or remote device to deploy the module.
* @return Runtime graph module that can be used to execute the graph.
*/
public static GraphModule create(String graphJson, Module libmod, Device dev) {
Function fcreate = Function.getFunction("tvm.graph_executor.create");
if (fcreate == null) {
throw new RuntimeException("Cannot find global function tvm.graph_executor.create."
+ "Did you compile tvm_runtime with correct version?");
}
Module graphModule = fcreate.pushArg(graphJson)
.pushArg(libmod).pushArg(dev.deviceType).pushArg(dev.deviceId)
.invoke().asModule();
return new GraphModule(graphModule, dev);
}
private static Object reflectionGetField(Object obj, String fieldName) {
try {
Field field = obj.getClass().getDeclaredField(fieldName);
field.setAccessible(true);
return field.get(obj);
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
private static Object reflectionStaticCall(Class<?> clazz, String methodName, Object ... args) {
Class<?>[] types = new Class<?>[args.length];
for (int i = 0; i < args.length; ++i) {
types[i] = args[i].getClass();
}
try {
Method method = clazz.getDeclaredMethod(methodName, types);
method.setAccessible(true);
return method.invoke(null, args);
} catch (NoSuchMethodException e) {
throw new RuntimeException(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
} catch (InvocationTargetException e) {
throw new RuntimeException(e);
}
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/contrib/GraphModule.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.tvm.contrib;
import org.apache.tvm.Device;
import org.apache.tvm.Function;
import org.apache.tvm.Module;
import org.apache.tvm.NDArray;
/**
* Wrapper runtime module.
* This is a thin wrapper of the underlying TVM module.
* you can also directly call set_input, run, and get_output
* of underlying module functions.
*/
public class GraphModule {
private Module module;
private Device device;
private Function fsetInput;
private Function frun;
private Function fgetOutput;
private Function fgetInput;
private Function fdebugGetOutput;
private Function floadParams;
GraphModule(Module module, Device dev) {
this.module = module;
this.device = dev;
fsetInput = module.getFunction("set_input");
frun = module.getFunction("run");
fgetInput = module.getFunction("get_input");
fgetOutput = module.getFunction("get_output");
try {
fdebugGetOutput = module.getFunction("debug_get_output");
} catch (IllegalArgumentException ignored) {
// ignore
}
floadParams = module.getFunction("load_params");
}
/**
* Release the GraphModule.
* <p>
* We highly recommend you to do this manually since the GC strategy is lazy.
* </p>
*/
public void release() {
fsetInput.release();
frun.release();
fgetInput.release();
fgetOutput.release();
if (fdebugGetOutput != null) {
fdebugGetOutput.release();
}
floadParams.release();
module.release();
}
/**
* Set inputs to the module.
* @param key The input key.
* @param value The input value
* @return self.
*/
public GraphModule setInput(String key, NDArray value) {
NDArray input = value;
if (!value.device().equals(device)) {
input = NDArray.empty(value.shape(), device);
value.copyTo(input);
}
fsetInput.pushArg(key).pushArg(input).invoke();
return this;
}
/**
* Set inputs to the module.
* @param key The input key.
* @param value The input value.
* @return self.
*/
public GraphModule setInput(int key, NDArray value) {
NDArray input = value;
if (!value.device().equals(device)) {
input = NDArray.empty(value.shape(), device);
value.copyTo(input);
}
fsetInput.pushArg(key).pushArg(input).invoke();
return this;
}
/**
* Run forward execution of the graph.
* @return self.
*/
public GraphModule run() {
frun.invoke();
return this;
}
/**
* Get index-th input to out.
* @param index The input index.
* @param out The output array container.
* @return out.
*/
public NDArray getInput(int index, NDArray out) {
fgetInput.pushArg(index).pushArg(out).invoke();
return out;
}
/**
* Get index-th output to out.
* @param index The output index.
* @param out The output array container.
* @return out.
*/
public NDArray getOutput(int index, NDArray out) {
fgetOutput.pushArg(index).pushArg(out).invoke();
return out;
}
/**
* Run graph up to node and get the output to out.
* @param node The node name.
* @param out The output array container.
* @return out.
*/
public NDArray debugGetOutput(String node, NDArray out) {
if (fdebugGetOutput != null) {
fdebugGetOutput.pushArg(node).pushArg(out).invoke();
} else {
throw new RuntimeException("Please compile runtime with USE_PROFILER = ON");
}
return out;
}
/**
* Run graph up to node and get the output to out.
* @param node The node index.
* @param out The output array container.
* @return out.
*/
public NDArray debugGetOutput(int node, NDArray out) {
if (fdebugGetOutput != null) {
fdebugGetOutput.pushArg(node).pushArg(out).invoke();
} else {
throw new RuntimeException("Please compile runtime with USE_PROFILER = ON");
}
return out;
}
/**
* Load parameters from serialized byte array of parameter dict.
* @param params The serialized parameter.
* @return self.
*/
public GraphModule loadParams(byte[] params) {
floadParams.pushArg(params).invoke();
return this;
}
/**
* Get internal module function.
* @param key The key to the module.
* @return The function.
* @throws IllegalArgumentException if function does not exist.
*/
public Function getFunction(String key) {
return module.getFunction(key);
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/rpc/Client.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm.rpc;
import org.apache.tvm.Function;
import org.apache.tvm.TVMValue;
public class Client {
/**
* Connect to RPC Server.
* @param url The url of the host.
* @param port The port to connect to.
* @param key Additional key to match server.
* @return The connected session.
*/
public static RPCSession connect(String url, int port, String key) {
Function doConnect = RPC.getApi("Connect");
if (doConnect == null) {
throw new RuntimeException("Please compile with USE_RPC=1");
}
TVMValue sess = doConnect.pushArg(url).pushArg(port).pushArg(key).invoke();
return new RPCSession(sess.asModule());
}
/**
* Connect to RPC Server.
* @param url The url of the host.
* @param port The port to connect to.
* @return The connected session.
*/
public static RPCSession connect(String url, int port) {
return connect(url, port, "");
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/rpc/ConnectProxyServerProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm.rpc;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.SocketAddress;
/**
* Server processor for proxy connection.
*/
public class ConnectProxyServerProcessor implements ServerProcessor {
private final String host;
private final int port;
private final String key;
private volatile Socket currSocket = new Socket();
private Runnable callback;
/**
* Construct proxy server processor.
* @param host Proxy server host.
* @param port Proxy server port.
* @param key Proxy server key.
*/
public ConnectProxyServerProcessor(String host, int port, String key) {
this.host = host;
this.port = port;
this.key = "server:" + key;
}
/**
* Set a callback when a connection is received e.g., to record the time for a
* watchdog.
* @param callback Runnable object.
*/
public void setStartTimeCallback(Runnable callback) {
this.callback = callback;
}
/**
* Close the socket.
*/
@Override public void terminate() {
Utils.closeQuietly(currSocket);
}
@Override public void run() {
try {
SocketAddress address = new InetSocketAddress(host, port);
currSocket.connect(address, 6000);
final InputStream in = currSocket.getInputStream();
final OutputStream out = currSocket.getOutputStream();
out.write(Utils.toBytes(RPC.RPC_MAGIC));
out.write(Utils.toBytes(key.length()));
out.write(Utils.toBytes(key));
int magic = Utils.wrapBytes(Utils.recvAll(in, 4)).getInt();
if (magic == RPC.RPC_MAGIC + 1) {
throw new RuntimeException(
String.format("key: %s has already been used in proxy", key));
} else if (magic == RPC.RPC_MAGIC + 2) {
System.err.println("RPCProxy do not have matching client key " + key);
} else if (magic != RPC.RPC_MAGIC) {
throw new RuntimeException(address + " is not RPC Proxy");
}
// Get key from remote
int keylen = Utils.wrapBytes(Utils.recvAll(in, 4)).getInt();
String remoteKey = Utils.decodeToStr(Utils.recvAll(in, keylen));
System.err.println("RPCProxy connected to " + address);
if (callback != null) {
callback.run();
}
SocketChannel sockChannel = new SocketChannel(currSocket);
new NativeServerLoop(sockChannel.getFsend(), sockChannel.getFrecv()).run();
System.err.println("Finish serving " + address);
} catch (Throwable e) {
e.printStackTrace();
throw new RuntimeException(e);
} finally {
terminate();
}
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/rpc/ConnectTrackerServerProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm.rpc;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.BindException;
import java.net.ConnectException;
import java.net.InetSocketAddress;
import java.net.ServerSocket;
import java.net.Socket;
import java.net.SocketAddress;
import java.net.SocketException;
import java.net.SocketTimeoutException;
/**
* Server processor with tracker connection (based on standalone).
* This RPC Server registers itself with an RPC Tracker for a specific queue
* (using its device key) and listens for incoming requests.
*/
public class ConnectTrackerServerProcessor implements ServerProcessor {
private ServerSocket server;
private final String trackerHost;
private final int trackerPort;
// device key
private final String key;
// device key plus randomly generated key (per-session)
private final String matchKey;
private int serverPort = 5001;
public static final int MAX_SERVER_PORT = 5555;
// time to wait before aborting tracker connection (ms)
public static final int TRACKER_TIMEOUT = 6000;
// time to wait before retrying tracker connection (ms)
public static final int RETRY_PERIOD = TRACKER_TIMEOUT;
// time to wait for a connection before refreshing tracker connection (ms)
public static final int STALE_TRACKER_TIMEOUT = 300000;
// time to wait if no timeout value is specified (seconds)
public static final int HARD_TIMEOUT_DEFAULT = 300;
private RPCWatchdog watchdog;
private Socket trackerSocket;
/**
* Construct tracker server processor.
* @param trackerHost Tracker host.
* @param trackerPort Tracker port.
* @param key Device key.
* @param watchdog watch for timeout, etc.
* @throws java.io.IOException when socket fails to open.
*/
public ConnectTrackerServerProcessor(String trackerHost, int trackerPort, String key,
RPCWatchdog watchdog) throws IOException {
while (true) {
try {
this.server = new ServerSocket(serverPort);
server.setSoTimeout(STALE_TRACKER_TIMEOUT);
break;
} catch (BindException e) {
System.err.println(serverPort);
System.err.println(e);
serverPort++;
if (serverPort > MAX_SERVER_PORT) {
throw e;
}
}
}
System.err.println("using port: " + serverPort);
this.trackerHost = trackerHost;
this.trackerPort = trackerPort;
this.key = key;
this.matchKey = key + ":" + Math.random();
this.watchdog = watchdog;
}
public String getMatchKey() {
return matchKey;
}
@Override public void terminate() {
try {
server.close();
} catch (IOException e) {
e.printStackTrace();
}
}
@Override public void run() {
String recvKey = null;
try {
trackerSocket = connectToTracker();
// open a socket and handshake with tracker
register();
Socket socket = null;
InputStream in = null;
OutputStream out = null;
while (true) {
try {
System.err.println("waiting for requests...");
// wait for client request
socket = server.accept();
in = socket.getInputStream();
out = socket.getOutputStream();
int magic = Utils.wrapBytes(Utils.recvAll(in, 4)).getInt();
if (magic != RPC.RPC_MAGIC) {
out.write(Utils.toBytes(RPC.RPC_CODE_MISMATCH));
System.err.println("incorrect RPC magic");
Utils.closeQuietly(socket);
continue;
}
recvKey = Utils.recvString(in);
System.err.println("matchKey:" + matchKey);
System.err.println("key: " + recvKey);
// incorrect key
if (recvKey.indexOf(matchKey) == -1) {
out.write(Utils.toBytes(RPC.RPC_CODE_MISMATCH));
System.err.println("key mismatch, expected: " + matchKey + " got: " + recvKey);
Utils.closeQuietly(socket);
continue;
}
// successfully got client request and completed handshake with client
break;
} catch (SocketTimeoutException e) {
System.err.println("no incoming connections, refreshing...");
// need to reregister, if the tracker died we should see a socked closed exception
if (!needRefreshKey()) {
System.err.println("reregistering...");
register();
}
}
}
int timeout = HARD_TIMEOUT_DEFAULT;
int timeoutArgIndex = recvKey.indexOf(RPC.TIMEOUT_ARG);
if (timeoutArgIndex != -1) {
timeout = Integer.parseInt(recvKey.substring(timeoutArgIndex + RPC.TIMEOUT_ARG.length()));
}
System.err.println("alloted timeout: " + timeout);
if (!recvKey.startsWith("client:")) {
System.err.println("recv key mismatch...");
out.write(Utils.toBytes(RPC.RPC_CODE_MISMATCH));
} else {
out.write(Utils.toBytes(RPC.RPC_MAGIC));
// send server key to the client
Utils.sendString(out, recvKey);
}
System.err.println("Connection from " + socket.getRemoteSocketAddress().toString());
// received timeout in seconds
watchdog.startTimeout(timeout * 1000);
SocketChannel sockChannel = new SocketChannel(socket);
new NativeServerLoop(sockChannel.getFsend(), sockChannel.getFrecv()).run();
System.err.println("Finish serving " + socket.getRemoteSocketAddress().toString());
Utils.closeQuietly(socket);
} catch (ConnectException e) {
// if the tracker connection failed, wait a bit before retrying
try {
Thread.sleep(RETRY_PERIOD);
} catch (InterruptedException e_) {
System.err.println("interrupted before retrying to connect to tracker...");
}
} catch (Throwable e) {
e.printStackTrace();
} finally {
try {
if (trackerSocket != null) {
trackerSocket.close();
}
server.close();
} catch (Throwable e) {
e.printStackTrace();
}
}
}
private Socket connectToTracker() throws IOException {
trackerSocket = new Socket();
SocketAddress address = new InetSocketAddress(trackerHost, trackerPort);
trackerSocket.connect(address, TRACKER_TIMEOUT);
InputStream trackerIn = trackerSocket.getInputStream();
OutputStream trackerOut = trackerSocket.getOutputStream();
trackerOut.write(Utils.toBytes(RPC.RPC_TRACKER_MAGIC));
int trackerMagic = Utils.wrapBytes(Utils.recvAll(trackerIn, 4)).getInt();
if (trackerMagic != RPC.RPC_TRACKER_MAGIC) {
throw new SocketException("failed to connect to tracker (WRONG MAGIC)");
}
String infoJSON = generateCinfo(key);
Utils.sendString(trackerOut, infoJSON);
int recvCode = Integer.parseInt(Utils.recvString(trackerIn));
if (recvCode != RPC.TrackerCode.SUCCESS) {
throw new SocketException("failed to connect to tracker (not SUCCESS)");
}
return trackerSocket;
}
/*
* Register the RPC Server with the RPC Tracker.
*/
private void register() throws IOException {
InputStream trackerIn = trackerSocket.getInputStream();
OutputStream trackerOut = trackerSocket.getOutputStream();
// send a JSON with PUT, device key, RPC server port, and the randomly
// generated key
String putJSON = generatePut(RPC.TrackerCode.PUT, key, serverPort, matchKey);
Utils.sendString(trackerOut, putJSON);
int recvCode = Integer.parseInt(Utils.recvString(trackerIn));
if (recvCode != RPC.TrackerCode.SUCCESS) {
throw new SocketException("failed to register with tracker (not SUCCESS)");
}
System.err.println("registered with tracker...");
}
/*
* Check if the RPC Tracker has our key.
*/
private boolean needRefreshKey() throws IOException {
InputStream trackerIn = trackerSocket.getInputStream();
OutputStream trackerOut = trackerSocket.getOutputStream();
String getJSON = generateGetPendingMatchKeys(RPC.TrackerCode.GET_PENDING_MATCHKEYS);
Utils.sendString(trackerOut, getJSON);
String recvJSON = Utils.recvString(trackerIn);
System.err.println("pending matchkeys: " + recvJSON);
// fairly expensive string operation...
if (recvJSON.indexOf(matchKey) != -1 ) {
return true;
}
return false;
}
// handcrafted JSON
private String generateCinfo(String key) {
String cinfo = "{\"key\" : " + "\"server:" + key + "\", \"addr\": [null, \""
+ serverPort + "\"]}";
return "[" + RPC.TrackerCode.UPDATE_INFO + ", " + cinfo + "]";
}
// handcrafted JSON
private String generatePut(int code, String key, int port, String matchKey) {
return "[" + code + ", " + "\"" + key + "\"" + ", " + "[" + port + ", "
+ "\"" + matchKey + "\"" + "]" + ", " + "null" + "]";
}
// handcrafted JSON
private String generateGetPendingMatchKeys(int code) {
return "[" + code + "]";
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/rpc/NativeServerLoop.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm.rpc;
import org.apache.tvm.Function;
import org.apache.tvm.Module;
import org.apache.tvm.TVMValue;
import java.io.File;
import java.io.IOException;
/**
* Call native ServerLoop on socket file descriptor.
*/
public class NativeServerLoop implements Runnable {
private final Function fsend;
private final Function frecv;
/**
* Constructor for NativeServerLoop.
* @param fsend socket.send function.
* @param frecv socket.recv function.
*/
public NativeServerLoop(final Function fsend, final Function frecv) {
this.fsend = fsend;
this.frecv = frecv;
}
@Override public void run() {
File tempDir = null;
try {
tempDir = serverEnv();
System.err.println("starting server loop...");
RPC.getApi("ServerLoop").pushArg(fsend).pushArg(frecv).invoke();
System.err.println("done server loop...");
} catch (IOException e) {
e.printStackTrace();
} finally {
if (tempDir != null) {
String[] entries = tempDir.list();
for (String s : entries) {
File currentFile = new File(tempDir.getPath(), s);
if (!currentFile.delete()) {
System.err.println(
"[WARN] Couldn't delete temporary file " + currentFile.getAbsolutePath());
}
}
if (!tempDir.delete()) {
System.err.println(
"[WARN] Couldn't delete temporary directory " + tempDir.getAbsolutePath());
}
}
}
}
private static File serverEnv() throws IOException {
// Server environment function return temp dir.
final File tempDir = File.createTempFile("tvm4j_rpc_", "");
if (!tempDir.delete() || !tempDir.mkdir()) {
throw new IOException("Couldn't create directory " + tempDir.getAbsolutePath());
}
Function.register("tvm.rpc.server.workpath", new Function.Callback() {
@Override public Object invoke(TVMValue... args) {
return tempDir + File.separator + args[0].asString();
}
}, true);
Function.register("tvm.rpc.server.load_module", new Function.Callback() {
@Override public Object invoke(TVMValue... args) {
String filename = args[0].asString();
String path = tempDir + File.separator + filename;
System.err.println("Load module from " + path);
return Module.load(path);
}
}, true);
return tempDir;
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/rpc/RPC.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm.rpc;
import org.apache.tvm.Function;
import java.util.HashMap;
import java.util.Map;
public class RPC {
public static final int RPC_TRACKER_MAGIC = 0x2f271;
public static final int RPC_MAGIC = 0xff271;
public static final int RPC_CODE_MISMATCH = RPC_MAGIC + 2;
public static final int RPC_SESS_MASK = 128;
public static final String TIMEOUT_ARG = "-timeout=";
public class TrackerCode {
public static final int PUT = 3;
public static final int UPDATE_INFO = 5;
public static final int GET_PENDING_MATCHKEYS = 7;
public static final int SUCCESS = 0;
}
private static ThreadLocal<Map<String, Function>> apiFuncs
= new ThreadLocal<Map<String, Function>>() {
@Override
protected Map<String, Function> initialValue() {
return new HashMap<String, Function>();
}
};
/**
* Get internal function starts with namespace tvm.rpc.
* @param name function name.
* @return the function, null if not exists.
*/
static Function getApi(String name) {
Function func = apiFuncs.get().get(name);
if (func == null) {
func = Function.getFunction("rpc." + name);
if (func == null) {
return null;
}
apiFuncs.get().put(name, func);
}
return func;
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/rpc/RPCSession.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm.rpc;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;
import org.apache.tvm.Device;
import org.apache.tvm.Function;
import org.apache.tvm.Module;
/**
* RPC Client session module.
* Do not directly create the object, use Client.connect.
*/
public class RPCSession {
private final Module session;
private final int tblIndex;
private final Map<String, Function> remoteFuncs = new HashMap<String, Function>();
RPCSession(Module sess) {
session = sess;
tblIndex = (int) RPC.getApi("SessTableIndex").pushArg(session).invoke().asLong();
}
/**
* Get function from the session.
* @param name The name of the function.
* @return The result function.
*/
public Function getFunction(String name) {
return session.getFunction(name);
}
/**
* Construct a remote device.
* @param devType device type.
* @param devId device id.
* @return The corresponding encoded remote device.
*/
public Device device(String devType, int devId) {
Device dev = new Device(devType, devId);
int encode = (tblIndex + 1) * RPC.RPC_SESS_MASK;
return new TVMRemoteDevice(dev.deviceType + encode, devId, this);
}
/**
* Construct a remote device.
* @param devType device type.
* @return The corresponding encoded remote device.
*/
public Device device(String devType) {
return device(devType, 0);
}
/**
* Construct a remote device.
* @param devType device type.
* @param devId device id.
* @return The corresponding encoded remote device.
*/
public Device device(int devType, int devId) {
int encode = (tblIndex + 1) * RPC.RPC_SESS_MASK;
return new TVMRemoteDevice(devType + encode, devId, this);
}
/**
* Construct a remote device.
* @param devType device type.
* @return The corresponding encoded remote device.
*/
public Device device(int devType) {
return device(devType, 0);
}
/**
* Construct remote CPU device.
* @param devId device id.
* @return Remote CPU device.
*/
public Device cpu(int devId) {
return Device.cpu(devId);
}
/**
* Construct remote CPU device.
* @return Remote CPU device.
*/
public Device cpu() {
return cpu(0);
}
/**
* Construct remote CUDA GPU device.
* @param devId device id.
* @return Remote CUDA GPU device.
*/
public Device cuda(int devId) {
return Device.cuda(devId);
}
/**
* Construct remote CUDA GPU device.
* @return Remote CUDA GPU device.
*/
public Device cuda() {
return cuda(0);
}
/**
* Construct remote OpenCL device.
* @param devId device id.
* @return Remote OpenCL device.
*/
public Device cl(int devId) {
return Device.opencl(devId);
}
/**
* Construct remote OpenCL device.
* @return Remote OpenCL device.
*/
public Device cl() {
return cl(0);
}
/**
* Construct remote OpenCL device.
* @param devId device id.
* @return Remote OpenCL device.
*/
public Device vulkan(int devId) {
return Device.vulkan(devId);
}
/**
* Construct remote OpenCL device.
* @return Remote OpenCL device.
*/
public Device vulkan() {
return vulkan(0);
}
/**
* Construct remote Metal device.
* @param devId device id.
* @return Remote metal device.
*/
public Device metal(int devId) {
return Device.metal(devId);
}
/**
* Construct remote Metal device.
* @return Remote metal device.
*/
public Device metal() {
return metal(0);
}
/**
* Upload binary to remote runtime temp folder.
* @param data The binary in local to upload.
* @param target The path in remote, cannot be null.
*/
public void upload(byte[] data, String target) {
if (target == null) {
throw new IllegalArgumentException("Please specify the upload target");
}
final String funcName = "upload";
Function remoteFunc = remoteFuncs.get(funcName);
if (remoteFunc == null) {
remoteFunc = getFunction("tvm.rpc.server.upload");
remoteFuncs.put(funcName, remoteFunc);
}
remoteFunc.pushArg(target).pushArg(data).invoke();
}
/**
* Upload file to remote runtime temp folder.
* @param data The file in local to upload.
* @param target The path in remote.
* @throws java.io.IOException for network failure.
*/
public void upload(File data, String target) throws IOException {
byte[] blob = getBytesFromFile(data);
upload(blob, target);
}
/**
* Upload file to remote runtime temp folder.
* @param data The file in local to upload.
* @throws java.io.IOException for network failure.
*/
public void upload(File data) throws IOException {
upload(data, data.getName());
}
/**
* Download file from remote temp folder.
* @param path The relative location to remote temp folder.
* @return The result blob from the file.
*/
public byte[] download(String path) {
final String name = "download";
Function func = remoteFuncs.get(name);
if (func == null) {
func = getFunction("tvm.rpc.server.download");
remoteFuncs.put(name, func);
}
return func.pushArg(path).invoke().asBytes();
}
/**
* Load a remote module, the file need to be uploaded first.
* @param path The relative location to remote temp folder.
* @return The remote module containing remote function.
*/
public Module loadModule(String path) {
return RPC.getApi("LoadRemoteModule").pushArg(session).pushArg(path).invoke().asModule();
}
private static byte[] getBytesFromFile(File file) throws IOException {
// Get the size of the file
long length = file.length();
if (length > Integer.MAX_VALUE) {
throw new IOException("File " + file.getName() + " is too large!");
}
// cannot create an array using a long type.
byte[] bytes = new byte[(int) length];
// Read in the bytes
int offset = 0;
int numRead = 0;
InputStream is = new FileInputStream(file);
try {
while (
offset < bytes.length && (numRead = is.read(bytes, offset, bytes.length - offset)) >= 0) {
offset += numRead;
}
} finally {
is.close();
}
// Ensure all the bytes have been read in
if (offset < bytes.length) {
throw new IOException("Could not completely read file " + file.getName());
}
return bytes;
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/rpc/RPCWatchdog.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm.rpc;
/**
* Watchdog for RPC.
*/
public class RPCWatchdog extends Thread {
private int timeout = 0;
private boolean started = false;
public RPCWatchdog() {
super();
}
/**
* Start a timeout with watchdog (must be called before finishTimeout).
* @param timeout watchdog timeout in ms.
*/
public synchronized void startTimeout(int timeout) {
this.timeout = timeout;
started = true;
this.notify();
}
/**
* Finish a timeout with watchdog (must be called after startTimeout).
*/
public synchronized void finishTimeout() {
started = false;
this.notify();
}
/**
* Wait and kill RPC if timeout is exceeded.
*/
@Override public void run() {
while (true) {
// timeout not started
synchronized (this) {
while (!started) {
try {
this.wait();
} catch (InterruptedException e) {
System.err.println("watchdog interrupted...");
}
}
}
synchronized (this) {
while (started) {
try {
System.err.println("waiting for timeout: " + timeout);
this.wait(timeout);
if (!started) {
System.err.println("watchdog woken up, ok...");
} else {
System.err.println("watchdog woke up!");
System.err.println("terminating...");
terminate();
}
} catch (InterruptedException e) {
System.err.println("watchdog interrupted...");
}
}
}
}
}
/**
* Default method to terminate the running RPCActivity process.
*/
protected void terminate() {
System.exit(0);
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/rpc/Server.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm.rpc;
import java.io.IOException;
/**
* RPC Server.
*/
public class Server {
private final WorkerThread worker;
private static class WorkerThread extends Thread {
private volatile boolean running = true;
private final ServerProcessor processor;
public WorkerThread(ServerProcessor processor) {
this.processor = processor;
}
@Override public void run() {
while (running) {
processor.run();
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
public void terminate() {
running = false;
processor.terminate();
}
}
/**
* Start a standalone server.
* @param serverPort Port.
* @throws IOException if failed to bind localhost:port.
*/
public Server(int serverPort) throws IOException {
worker = new WorkerThread(new StandaloneServerProcessor(serverPort));
}
/**
* Start a server connected to proxy.
* Use sun.misc.SharedSecrets.getJavaIOFileDescriptorAccess
* to get file descriptor for the socket.
* @param proxyHost The proxy server host.
* @param proxyPort The proxy server port.
* @param key The key to identify the server.
*/
public Server(String proxyHost, int proxyPort, String key) {
worker = new WorkerThread(
new ConnectProxyServerProcessor(proxyHost, proxyPort, key));
}
/**
* Start the server.
*/
public void start() {
worker.start();
}
/**
* Stop the server.
*/
public void terminate() {
worker.terminate();
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/rpc/ServerProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm.rpc;
/**
* Abstract runnable class for RPC server process.
*/
public interface ServerProcessor extends Runnable {
public void terminate();
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/rpc/SocketChannel.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.tvm.rpc;
import org.apache.tvm.Function;
import org.apache.tvm.TVMValue;
import org.apache.tvm.TVMValueBytes;
import java.io.IOException;
import java.net.Socket;
public class SocketChannel {
private final Socket socket;
SocketChannel(Socket sock) {
socket = sock;
}
private Function fsend = Function.convertFunc(new Function.Callback() {
@Override public Object invoke(TVMValue... args) {
byte[] data = args[0].asBytes();
try {
socket.getOutputStream().write(data);
} catch (IOException e) {
e.printStackTrace();
return -1;
}
return data.length;
}
});
private Function frecv = Function.convertFunc(new Function.Callback() {
@Override public Object invoke(TVMValue... args) {
long size = args[0].asLong();
try {
return new TVMValueBytes(Utils.recvAll(socket.getInputStream(), (int) size));
} catch (IOException e) {
e.printStackTrace();
return -1;
}
}
});
public Function getFsend() {
return fsend;
}
public Function getFrecv() {
return frecv;
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/rpc/StandaloneServerProcessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm.rpc;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.ServerSocket;
import java.net.Socket;
/**
* Server processor for standalone.
*/
public class StandaloneServerProcessor implements ServerProcessor {
private final ServerSocket server;
public StandaloneServerProcessor(int serverPort) throws IOException {
this.server = new ServerSocket(serverPort);
}
@Override public void terminate() {
try {
server.close();
} catch (IOException e) {
e.printStackTrace();
}
}
@Override public void run() {
try {
final Socket socket = server.accept();
final InputStream in = socket.getInputStream();
final OutputStream out = socket.getOutputStream();
int magic = Utils.wrapBytes(Utils.recvAll(in, 4)).getInt();
if (magic != RPC.RPC_MAGIC) {
Utils.closeQuietly(socket);
return;
}
int keyLen = Utils.wrapBytes(Utils.recvAll(in, 4)).getInt();
String key = Utils.decodeToStr(Utils.recvAll(in, keyLen));
if (!key.startsWith("client:")) {
out.write(Utils.toBytes(RPC.RPC_MAGIC + 2));
} else {
out.write(Utils.toBytes(RPC.RPC_MAGIC));
// send server key to the client
String serverKey = "server:java";
out.write(Utils.toBytes(serverKey.length()));
out.write(Utils.toBytes(serverKey));
}
SocketChannel sockChannel = new SocketChannel(socket);
System.err.println("Connection from " + socket.getRemoteSocketAddress().toString());
new NativeServerLoop(sockChannel.getFsend(), sockChannel.getFrecv()).run();
System.err.println("Finish serving " + socket.getRemoteSocketAddress().toString());
Utils.closeQuietly(socket);
} catch (Throwable e) {
e.printStackTrace();
}
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/rpc/TVMRemoteDevice.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm.rpc;
import org.apache.tvm.Device;
// always related to RPCSession. Cannot construct by users.
public class TVMRemoteDevice extends Device {
public final RPCSession rpcSession;
TVMRemoteDevice(int deviceType, int deviceId, RPCSession rpcSession) {
super(deviceType, deviceId);
this.rpcSession = rpcSession;
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/main/java/org/apache/tvm/rpc/Utils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm.rpc;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.Socket;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
/**
* Utilities for RPC.
*/
class Utils {
public static byte[] recvAll(final InputStream in, final int numBytes) throws IOException {
byte[] res = new byte[numBytes];
int numRead = 0;
while (numRead < numBytes) {
int chunk = in.read(res, numRead, Math.min(numBytes - numRead, 1024));
numRead += chunk;
}
return res;
}
public static void closeQuietly(Socket socket) {
if (socket != null) {
try {
socket.shutdownInput();
socket.shutdownOutput();
socket.close();
} catch (IOException ioe) {
// close quietly, do nothing.
}
}
}
public static ByteBuffer wrapBytes(byte[] bytes) {
ByteBuffer bb = ByteBuffer.wrap(bytes);
bb.order(ByteOrder.LITTLE_ENDIAN);
return bb;
}
public static byte[] toBytes(int number) {
ByteBuffer bb = ByteBuffer.allocate(4);
bb.order(ByteOrder.LITTLE_ENDIAN);
return bb.putInt(number).array();
}
public static byte[] toBytes(String str) {
byte[] bytes = new byte[str.length()];
for (int i = 0; i < str.length(); ++i) {
bytes[i] = (byte) str.charAt(i);
}
return bytes;
}
public static String decodeToStr(byte[] bytes) {
StringBuilder builder = new StringBuilder();
for (byte bt : bytes) {
builder.append((char) bt);
}
return builder.toString();
}
public static String recvString(InputStream in) throws IOException {
String recvString = null;
int len = wrapBytes(Utils.recvAll(in, 4)).getInt();
recvString = decodeToStr(Utils.recvAll(in, len));
return recvString;
}
public static void sendString(OutputStream out, String string) throws IOException {
out.write(toBytes(string.length()));
out.write(toBytes(string));
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/test/java/org/apache/tvm/FunctionTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm;
import org.junit.Test;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
public class FunctionTest {
@Test
public void test_reg_sum_number() {
Function.register("sum_number", new Function.Callback() {
@Override public Object invoke(TVMValue... args) {
long res = 0L;
for (TVMValue arg : args) {
res += arg.asLong();
}
return res;
}
});
Function func = Function.getFunction("sum_number");
TVMValue res = func.pushArg(10).pushArg(20).invoke();
assertEquals(30, res.asLong());
res.release();
func.release();
}
@Test
public void test_add_string() {
Function func = Function.convertFunc(new Function.Callback() {
@Override public Object invoke(TVMValue... args) {
String res = "";
for (TVMValue arg : args) {
res += arg.asString();
}
return res;
}
});
TVMValue res = func.pushArg("Hello").pushArg(" ").pushArg("World!").invoke();
assertEquals("Hello World!", res.asString());
res.release();
func.release();
}
@Test
public void test_sum_first_byte() {
Function func = Function.convertFunc(new Function.Callback() {
@Override public Object invoke(TVMValue... args) {
byte[] bt = new byte[1];
for (TVMValue arg : args) {
bt[0] += arg.asBytes()[0];
}
return bt;
}
});
TVMValue res = func.pushArg(new byte[]{1}).pushArg(new byte[]{2, 3}).invoke();
assertArrayEquals(new byte[]{3}, res.asBytes());
res.release();
func.release();
}
@Test
public void test_sum_ndarray() {
final long[] shape = new long[]{2, 1};
Function func = Function.convertFunc(new Function.Callback() {
@Override public Object invoke(TVMValue... args) {
double sum = 0.0;
for (TVMValue arg : args) {
NDArray arr = NDArray.empty(shape, new TVMType("float32"));
arg.asNDArray().copyTo(arr);
float[] nativeArr = arr.asFloatArray();
for (int i = 0; i < nativeArr.length; ++i) {
sum += nativeArr[i];
}
arr.release();
}
return sum;
}
});
NDArray arr = NDArray.empty(shape, new TVMType("float32"));
arr.copyFrom(new float[]{2f, 3f});
TVMValue res = func.pushArg(arr).pushArg(arr).invoke();
assertEquals(10.0, res.asDouble(), 1e-3);
res.release();
func.release();
}
@Test
public void test_return_function() {
Function myFunc = Function.convertFunc(new Function.Callback() {
@Override public Object invoke(TVMValue... args) {
final long y = args[0].asLong();
return Function.convertFunc(new Function.Callback() {
@Override public Object invoke(TVMValue... args) {
final long x = args[0].asLong();
return x + y;
}
});
}
});
Function func = myFunc.pushArg(10).invoke().asFunction();
TVMValue res = func.pushArg(20).invoke();
assertEquals(30, res.asLong());
func.release();
myFunc.release();
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/test/java/org/apache/tvm/ModuleTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.junit.Assert.*;
import java.io.File;
import java.util.Random;
public class ModuleTest {
private final Logger logger = LoggerFactory.getLogger(ModuleTest.class);
private static String loadingDir;
@BeforeClass
public static void beforeClass() {
loadingDir = System.getProperty("test.tempdir");
}
@Test
public void test_load_add_func_cpu() {
Module fadd = Module.load(loadingDir + File.separator + "add_cpu.so");
Device dev = new Device("cpu", 0);
long[] shape = new long[]{2};
NDArray arr = NDArray.empty(shape, dev);
arr.copyFrom(new float[]{3f, 4f});
NDArray res = NDArray.empty(shape, dev);
fadd.entryFunc().pushArg(arr).pushArg(arr).pushArg(res).invoke();
assertArrayEquals(new float[]{6f, 8f}, res.asFloatArray(), 1e-3f);
// test call() api
fadd.entryFunc().call(arr, arr, res);
assertArrayEquals(new float[]{6f, 8f}, res.asFloatArray(), 1e-3f);
arr.release();
res.release();
fadd.release();
}
@Test
public void test_load_add_func_cuda() {
final Random RND = new Random(0);
Device dev = new Device("cuda", 0);
if (!dev.exist()) {
logger.warn("CUDA GPU does not exist. Skip the test.");
return;
}
Module fadd = Module.load(loadingDir + File.separator + "add_cuda.so");
Module faddDev = Module.load(loadingDir + File.separator + "add_cuda.ptx");
fadd.importModule(faddDev);
final int dim = 100;
long[] shape = new long[]{dim};
NDArray arr = NDArray.empty(shape, dev);
float[] data = new float[dim];
float[] dataX2 = new float[dim];
for (int i = 0; i < dim; ++i) {
data[i] = RND.nextFloat();
dataX2[i] = data[i] * 2;
}
arr.copyFrom(data);
NDArray res = NDArray.empty(shape, dev);
fadd.entryFunc().pushArg(arr).pushArg(arr).pushArg(res).invoke();
assertArrayEquals(dataX2, res.asFloatArray(), 1e-3f);
arr.release();
res.release();
faddDev.release();
fadd.release();
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/test/java/org/apache/tvm/NDArrayTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm;
import org.junit.Test;
import static org.junit.Assert.*;
public class NDArrayTest {
@Test
public void test_from_float32() {
NDArray ndarray = NDArray.empty(new long[]{2, 2}, new TVMType("float32"));
ndarray.copyFrom(new float[]{1, 2, 3, 4});
assertArrayEquals(new float[]{1f, 2f, 3f, 4f}, ndarray.asFloatArray(), 1e-3f);
ndarray.release();
}
@Test
public void test_from_float64() {
NDArray ndarray = NDArray.empty(new long[]{2, 2}, new TVMType("float64"));
ndarray.copyFrom(new double[]{1, 2, 3, 4});
assertArrayEquals(new double[]{1.0, 2.0, 3.0, 4.0}, ndarray.asDoubleArray(), 1e-3);
ndarray.release();
}
@Test
public void test_from_int8() {
NDArray ndarray = NDArray.empty(new long[]{2, 2}, new TVMType("int8"));
ndarray.copyFrom(new byte[]{1, 2, 3, 4});
assertArrayEquals(new byte[]{1, 2, 3, 4}, ndarray.asByteArray());
ndarray.release();
}
@Test
public void test_from_int16() {
NDArray ndarray = NDArray.empty(new long[]{2, 2}, new TVMType("int16"));
ndarray.copyFrom(new short[]{1, 2, 3, 4});
assertArrayEquals(new short[]{1, 2, 3, 4}, ndarray.asShortArray());
ndarray.release();
}
@Test
public void test_from_int32() {
NDArray ndarray = NDArray.empty(new long[]{2, 2}, new TVMType("int32"));
ndarray.copyFrom(new int[]{1, 2, 3, 4});
assertArrayEquals(new int[]{1, 2, 3, 4}, ndarray.asIntArray());
ndarray.release();
}
@Test
public void test_from_int64() {
NDArray ndarray = NDArray.empty(new long[]{2, 2}, new TVMType("int64"));
ndarray.copyFrom(new long[]{1, 2, 3, 4});
assertArrayEquals(new long[]{1, 2, 3, 4}, ndarray.asLongArray());
ndarray.release();
}
@Test
public void test_from_uint16() {
NDArray ndarray = NDArray.empty(new long[]{2, 2}, new TVMType("uint16"));
ndarray.copyFrom(new char[]{65535, 2, 3, 4});
assertArrayEquals(new char[]{65535, 2, 3, 4}, ndarray.asCharArray());
ndarray.release();
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/test/java/org/apache/tvm/TestUtils.java | package org.apache.tvm;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.tvm.rpc.Server;
import java.io.IOException;
public class TestUtils {
public static class RefInt {
public int value;
}
public static Server startServer(RefInt portRef) {
Server server = null;
int port = 9981;
for (int i = 0; i < 10; ++i) {
try {
server = new Server(port + i);
server.start();
portRef.value = port + i;
return server;
} catch (IOException e) {
}
}
throw new RuntimeException("Cannot find an available port.");
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/test/java/org/apache/tvm/contrib/GraphExecutorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm.contrib;
import org.apache.tvm.Module;
import org.apache.tvm.NDArray;
import org.apache.tvm.Device;
import org.apache.tvm.TestUtils;
import org.apache.tvm.rpc.Client;
import org.apache.tvm.rpc.RPCSession;
import org.apache.tvm.rpc.Server;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.Scanner;
import static org.junit.Assert.assertArrayEquals;
public class GraphExecutorTest {
private final Logger logger = LoggerFactory.getLogger(GraphExecutor.class);
private static String loadingDir;
@BeforeClass
public static void beforeClass() {
loadingDir = System.getProperty("test.tempdir");
}
@Test
public void test_add_one_local() throws IOException {
Module libmod = Module.load(loadingDir + File.separator + "graph_addone_lib.so");
String graphJson = new Scanner(new File(
loadingDir + File.separator + "graph_addone.json"))
.useDelimiter("\\Z").next();
Device dev = Device.cpu();
GraphModule graph = GraphExecutor.create(graphJson, libmod, dev);
long[] shape = new long[]{4};
NDArray arr = NDArray.empty(shape, dev);
arr.copyFrom(new float[]{1f, 2f, 3f, 4f});
NDArray out = NDArray.empty(shape, dev);
graph.setInput("x", arr).run();
graph.getOutput(0, out);
assertArrayEquals(new float[]{2f, 3f, 4f, 5f}, out.asFloatArray(), 1e-3f);
arr.release();
out.release();
graph.release();
}
@Test
public void test_add_one_remote() throws IOException {
if (!Module.enabled("rpc")) {
logger.warn("RPC is not enabled. Skip.");
return;
}
String libPath = loadingDir + File.separator + "graph_addone_lib.so";
String graphJson = new Scanner(new File(
loadingDir + File.separator + "graph_addone.json"))
.useDelimiter("\\Z").next();
TestUtils.RefInt port = new TestUtils.RefInt();
Server server = null;
try {
server = TestUtils.startServer(port);
RPCSession remote = Client.connect("127.0.0.1", port.value);
Device dev = remote.cpu();
remote.upload(new File(libPath));
Module mlib = remote.loadModule("graph_addone_lib.so");
GraphModule graph = GraphExecutor.create(graphJson, mlib, dev);
long[] shape = new long[]{4};
NDArray arr = NDArray.empty(shape, dev);
arr.copyFrom(new float[]{1f, 2f, 3f, 4f});
NDArray out = NDArray.empty(shape, dev);
graph.setInput("x", arr).run();
graph.getOutput(0, out);
assertArrayEquals(new float[]{2f, 3f, 4f, 5f}, out.asFloatArray(), 1e-3f);
arr.release();
out.release();
graph.release();
} finally {
if (server != null) {
server.terminate();
}
}
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/test/java/org/apache/tvm/rpc/RPCTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tvm.rpc;
import org.apache.tvm.Function;
import org.apache.tvm.Module;
import org.apache.tvm.TVMValue;
import org.apache.tvm.TestUtils;
import org.junit.Ignore;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.junit.Assert.assertEquals;
public class RPCTest {
private final Logger logger = LoggerFactory.getLogger(RPCTest.class);
@Test
public void test_addone() {
if (!Module.enabled("rpc")) {
logger.warn("RPC is not enabled. Skip.");
return;
}
Function.register("test.rpc.addone", new Function.Callback() {
@Override public Object invoke(TVMValue... args) {
return args[0].asLong() + 1L;
}
});
TestUtils.RefInt port = new TestUtils.RefInt();
Server server = null;
try {
server = TestUtils.startServer(port);
RPCSession client = Client.connect("127.0.0.1", port.value);
Function func = client.getFunction("test.rpc.addone");
assertEquals(11L, func.call(10).asLong());
} finally {
if (server != null) {
server.terminate();
}
}
}
@Test
public void test_strcat() {
if (!Module.enabled("rpc")) {
logger.warn("RPC is not enabled. Skip.");
return;
}
Function.register("test.rpc.strcat", new Function.Callback() {
@Override public Object invoke(TVMValue... args) {
return args[0].asString() + ":" + args[1].asLong();
}
});
TestUtils.RefInt port = new TestUtils.RefInt();
Server server = null;
try {
server = TestUtils.startServer(port);
RPCSession client = Client.connect("127.0.0.1", port.value);
Function func = client.getFunction("test.rpc.strcat");
assertEquals("abc:11", func.call("abc", 11L).asString());
} finally {
if (server != null) {
server.terminate();
}
}
}
@Ignore("Proxy server may not have been ready when this test runs,"
+ " will add retry when callback function can deal with Java exception."
+ " After that we'll enable this test.")
@Test
public void test_connect_proxy_server() {
String proxyHost = System.getProperty("test.rpc.proxy.host");
int proxyPort = Integer.parseInt(System.getProperty("test.rpc.proxy.port"));
Function.register("test.rpc.proxy.addone", new Function.Callback() {
@Override public Object invoke(TVMValue... tvmValues) {
return tvmValues[0].asLong() + 1L;
}
});
Server server = null;
try {
server = new Server(proxyHost, proxyPort, "x1");
server.start();
RPCSession client = Client.connect(proxyHost, proxyPort, "x1");
Function f1 = client.getFunction("test.rpc.proxy.addone");
assertEquals(11L, f1.call(10L).asLong());
} finally {
if (server != null) {
server.terminate();
}
}
}
}
| https://github.com/zk-ml/tachikoma |
jvm/core/src/test/scripts/test_add_cpu.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tvm
from tvm import te
from tvm.contrib import cc, utils
def test_add(target_dir):
n = te.var("n")
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
C = te.compute(A.shape, lambda i: A[i] + B[i], name="C")
s = te.create_schedule(C.op)
fadd = tvm.build(s, [A, B, C], "llvm", name="myadd")
fadd.save(os.path.join(target_dir, "add_cpu.o"))
cc.create_shared(
os.path.join(target_dir, "add_cpu.so"), [os.path.join(target_dir, "add_cpu.o")]
)
if __name__ == "__main__":
import sys
if len(sys.argv) != 2:
sys.exit(-1)
test_add(sys.argv[1])
| https://github.com/zk-ml/tachikoma |
jvm/core/src/test/scripts/test_add_gpu.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tvm
from tvm import te
from tvm.contrib import cc, utils, nvcc
@tvm.register_func("tvm_callback_cuda_compile", override=True)
def tvm_callback_cuda_compile(code):
ptx = nvcc.compile_cuda(code, target_format="ptx")
return ptx
def test_add(target_dir):
if not tvm.runtime.enabled("cuda"):
print("skip %s because cuda is not enabled..." % __file__)
return
n = te.var("n")
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
C = te.compute(A.shape, lambda i: A[i] + B[i], name="C")
s = te.create_schedule(C.op)
bx, tx = s[C].split(C.op.axis[0], factor=64)
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
fadd_cuda = tvm.build(s, [A, B, C], tvm.target.Target("cuda", host="llvm"), name="myadd")
fadd_cuda.save(os.path.join(target_dir, "add_cuda.o"))
fadd_cuda.imported_modules[0].save(os.path.join(target_dir, "add_cuda.ptx"))
cc.create_shared(
os.path.join(target_dir, "add_cuda.so"), [os.path.join(target_dir, "add_cuda.o")]
)
if __name__ == "__main__":
import sys
if len(sys.argv) != 2:
sys.exit(-1)
test_add(sys.argv[1])
| https://github.com/zk-ml/tachikoma |
jvm/core/src/test/scripts/test_graph_executor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tvm
from tvm import te
import json
from tvm.contrib import graph_executor
def dump_graph_lib(target_dir):
dim = 4
A = te.placeholder((dim,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
sched = te.create_schedule(B.op)
node0 = {"op": "null", "name": "x", "inputs": []}
node1 = {
"op": "tvm_op",
"name": "add",
"inputs": [[0, 0, 0]],
"attrs": {"func_name": "myadd", "flatten_data": "1", "num_inputs": "1", "num_outputs": "1"},
}
nodes = [node0, node1]
arg_nodes = [0]
node_row_ptr = [0, 1, 2]
outputs = [[1, 0, 0]]
shape = (4,)
attrs = {
"shape": ["list_shape", [shape, shape]],
"dltype": ["list_str", ["float32", "float32"]],
"storage_id": ["list_int", [0, 1]],
}
graph = {
"nodes": nodes,
"arg_nodes": arg_nodes,
"node_row_ptr": node_row_ptr,
"heads": outputs,
"attrs": attrs,
}
graph = json.dumps(graph)
mlib = tvm.build(sched, [A, B], "llvm", name="myadd")
mlib.export_library(os.path.join(target_dir, "graph_addone_lib.so"))
with open(os.path.join(target_dir, "graph_addone.json"), "w") as fo:
fo.write(graph)
if __name__ == "__main__":
import sys
if len(sys.argv) != 2:
sys.exit(-1)
dump_graph_lib(sys.argv[1])
| https://github.com/zk-ml/tachikoma |
jvm/core/src/test/scripts/test_rpc_proxy_server.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
from tvm.rpc import proxy
def start_proxy_server(port, timeout):
prox = proxy.Proxy("127.0.0.1", port=port, port_end=port + 1)
if timeout > 0:
import time
time.sleep(timeout)
prox.terminate()
else:
prox.proc.join()
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
sys.exit(-1)
port = int(sys.argv[1])
timeout = 0 if len(sys.argv) == 2 else float(sys.argv[2])
start_proxy_server(port, timeout)
| https://github.com/zk-ml/tachikoma |
jvm/native/src/main/native/jni_helper_func.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file jni_helper_func.h
* \brief Helper functions for operating JVM objects
*/
#include <jni.h>
#ifndef TVM4J_JNI_MAIN_NATIVE_JNI_HELPER_FUNC_H_
#define TVM4J_JNI_MAIN_NATIVE_JNI_HELPER_FUNC_H_
// Helper functions for RefXXX getter & setter
jlong getLongField(JNIEnv* env, jobject obj) {
jclass refClass = env->FindClass("org/apache/tvm/Base$RefLong");
jfieldID refFid = env->GetFieldID(refClass, "value", "J");
jlong ret = env->GetLongField(obj, refFid);
env->DeleteLocalRef(refClass);
return ret;
}
jint getIntField(JNIEnv* env, jobject obj) {
jclass refClass = env->FindClass("org/apache/tvm/Base$RefInt");
jfieldID refFid = env->GetFieldID(refClass, "value", "I");
jint ret = env->GetIntField(obj, refFid);
env->DeleteLocalRef(refClass);
return ret;
}
void setIntField(JNIEnv* env, jobject obj, jint value) {
jclass refClass = env->FindClass("org/apache/tvm/Base$RefInt");
jfieldID refFid = env->GetFieldID(refClass, "value", "I");
env->SetIntField(obj, refFid, value);
env->DeleteLocalRef(refClass);
}
void setLongField(JNIEnv* env, jobject obj, jlong value) {
jclass refClass = env->FindClass("org/apache/tvm/Base$RefLong");
jfieldID refFid = env->GetFieldID(refClass, "value", "J");
env->SetLongField(obj, refFid, value);
env->DeleteLocalRef(refClass);
}
void setStringField(JNIEnv* env, jobject obj, const char* value) {
jclass refClass = env->FindClass("org/apache/tvm/Base$RefString");
jfieldID refFid = env->GetFieldID(refClass, "value", "Ljava/lang/String;");
env->SetObjectField(obj, refFid, env->NewStringUTF(value));
env->DeleteLocalRef(refClass);
}
// Helper functions for TVMValue
jlong getTVMValueLongField(JNIEnv* env, jobject obj,
const char* clsname = "org/apache/tvm/TVMValueLong") {
jclass cls = env->FindClass(clsname);
jfieldID fid = env->GetFieldID(cls, "value", "J");
jlong ret = env->GetLongField(obj, fid);
env->DeleteLocalRef(cls);
return ret;
}
jdouble getTVMValueDoubleField(JNIEnv* env, jobject obj) {
jclass cls = env->FindClass("org/apache/tvm/TVMValueDouble");
jfieldID fid = env->GetFieldID(cls, "value", "D");
jdouble ret = env->GetDoubleField(obj, fid);
env->DeleteLocalRef(cls);
return ret;
}
jstring getTVMValueStringField(JNIEnv* env, jobject obj) {
jclass cls = env->FindClass("org/apache/tvm/TVMValueString");
jfieldID fid = env->GetFieldID(cls, "value", "Ljava/lang/String;");
jstring ret = static_cast<jstring>(env->GetObjectField(obj, fid));
env->DeleteLocalRef(cls);
return ret;
}
jobject newTVMValueHandle(JNIEnv* env, jlong value) {
jclass cls = env->FindClass("org/apache/tvm/TVMValueHandle");
jmethodID constructor = env->GetMethodID(cls, "<init>", "(J)V");
jobject object = env->NewObject(cls, constructor, value);
env->DeleteLocalRef(cls);
return object;
}
jobject newTVMValueLong(JNIEnv* env, jlong value) {
jclass cls = env->FindClass("org/apache/tvm/TVMValueLong");
jmethodID constructor = env->GetMethodID(cls, "<init>", "(J)V");
jobject object = env->NewObject(cls, constructor, value);
env->DeleteLocalRef(cls);
return object;
}
jobject newTVMValueDouble(JNIEnv* env, jdouble value) {
jclass cls = env->FindClass("org/apache/tvm/TVMValueDouble");
jmethodID constructor = env->GetMethodID(cls, "<init>", "(D)V");
jobject object = env->NewObject(cls, constructor, value);
env->DeleteLocalRef(cls);
return object;
}
jobject newTVMValueString(JNIEnv* env, const char* value) {
jstring jvalue = env->NewStringUTF(value);
jclass cls = env->FindClass("org/apache/tvm/TVMValueString");
jmethodID constructor = env->GetMethodID(cls, "<init>", "(Ljava/lang/String;)V");
jobject object = env->NewObject(cls, constructor, jvalue);
env->DeleteLocalRef(cls);
env->DeleteLocalRef(jvalue);
return object;
}
jobject newTVMValueBytes(JNIEnv* env, const TVMByteArray* arr) {
jbyteArray jarr = env->NewByteArray(arr->size);
env->SetByteArrayRegion(jarr, 0, arr->size,
reinterpret_cast<jbyte*>(const_cast<char*>(arr->data)));
jclass cls = env->FindClass("org/apache/tvm/TVMValueBytes");
jmethodID constructor = env->GetMethodID(cls, "<init>", "([B)V");
jobject object = env->NewObject(cls, constructor, jarr);
env->DeleteLocalRef(cls);
env->DeleteLocalRef(jarr);
return object;
}
jobject newModule(JNIEnv* env, jlong value) {
jclass cls = env->FindClass("org/apache/tvm/Module");
jmethodID constructor = env->GetMethodID(cls, "<init>", "(J)V");
jobject object = env->NewObject(cls, constructor, value);
env->DeleteLocalRef(cls);
return object;
}
jobject newFunction(JNIEnv* env, jlong value) {
jclass cls = env->FindClass("org/apache/tvm/Function");
jmethodID constructor = env->GetMethodID(cls, "<init>", "(J)V");
jobject object = env->NewObject(cls, constructor, value);
env->DeleteLocalRef(cls);
return object;
}
jobject newNDArray(JNIEnv* env, jlong handle, jboolean isview) {
jclass cls = env->FindClass("org/apache/tvm/NDArrayBase");
jmethodID constructor = env->GetMethodID(cls, "<init>", "(JZ)V");
jobject object = env->NewObject(cls, constructor, handle, isview);
env->DeleteLocalRef(cls);
return object;
}
jobject newObject(JNIEnv* env, const char* clsname) {
jclass cls = env->FindClass(clsname);
jmethodID constructor = env->GetMethodID(cls, "<init>", "()V");
jobject object = env->NewObject(cls, constructor);
env->DeleteLocalRef(cls);
return object;
}
void fromJavaDType(JNIEnv* env, jobject jdtype, DLDataType* dtype) {
jclass tvmTypeClass = env->FindClass("org/apache/tvm/DLDataType");
dtype->code = (uint8_t)(env->GetIntField(jdtype, env->GetFieldID(tvmTypeClass, "typeCode", "I")));
dtype->bits = (uint8_t)(env->GetIntField(jdtype, env->GetFieldID(tvmTypeClass, "bits", "I")));
dtype->lanes = (uint16_t)(env->GetIntField(jdtype, env->GetFieldID(tvmTypeClass, "lanes", "I")));
env->DeleteLocalRef(tvmTypeClass);
}
void fromJavaDevice(JNIEnv* env, jobject jdev, DLDevice* dev) {
jclass deviceClass = env->FindClass("org/apache/tvm/Device");
dev->device_type = static_cast<DLDeviceType>(
env->GetIntField(jdev, env->GetFieldID(deviceClass, "deviceType", "I")));
dev->device_id =
static_cast<int>(env->GetIntField(jdev, env->GetFieldID(deviceClass, "deviceId", "I")));
env->DeleteLocalRef(deviceClass);
}
jobject tvmRetValueToJava(JNIEnv* env, TVMValue value, int tcode) {
switch (tcode) {
case kDLUInt:
case kDLInt:
return newTVMValueLong(env, static_cast<jlong>(value.v_int64));
case kDLFloat:
return newTVMValueDouble(env, static_cast<jdouble>(value.v_float64));
case kTVMOpaqueHandle:
return newTVMValueHandle(env, reinterpret_cast<jlong>(value.v_handle));
case kTVMModuleHandle:
return newModule(env, reinterpret_cast<jlong>(value.v_handle));
case kTVMPackedFuncHandle:
return newFunction(env, reinterpret_cast<jlong>(value.v_handle));
case kTVMDLTensorHandle:
return newNDArray(env, reinterpret_cast<jlong>(value.v_handle), true);
case kTVMNDArrayHandle:
return newNDArray(env, reinterpret_cast<jlong>(value.v_handle), false);
case kTVMStr:
return newTVMValueString(env, value.v_str);
case kTVMBytes:
return newTVMValueBytes(env, reinterpret_cast<TVMByteArray*>(value.v_handle));
case kTVMNullptr:
return newObject(env, "org/apache/tvm/TVMValueNull");
default:
LOG(FATAL) << "Do NOT know how to handle return type code " << tcode;
}
return NULL;
}
#endif // TVM4J_JNI_MAIN_NATIVE_JNI_HELPER_FUNC_H_
| https://github.com/zk-ml/tachikoma |
python/gen_requirements.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TVM Python requirements.txt generator.
This script generates a set of requirements.txt files (stored in `./requirements`) that describe
TVM's Python dependencies.
## Pieces
TVM can be roughly broken into these named pieces along the lines of Python dependencies:
- "core": A core piece, which is intended to be buildable with very few external dependencies. Users
can use Relay, compile models, and run autotuning with this part.
- "importer-<tool>": Model importers, which convert models defined in various other tools (i.e.
TensorFlow, PyTorch, etc) into Relay models.
- Extra features (i.e. XGBoost in AutoTVM). These enhance TVM's functionality, but aren't required
for basic operation.
## What this tool does
From these pieces, this tool builds:
- requirements/<name>.txt - Python dependencies for each named piece above, `<name>` is the same as
the quoted piece name.
- requirements/all.txt - Consolidated Python dependencies for all pieces, excluding dev below.
- requirements/dev.txt - Python dependencies needed to develop TVM, such as lint and test tools.
The data representing each piece is contained in the two maps below.
"""
import argparse
import collections
import os
import re
import textwrap
import sys
import typing
RequirementsByPieceType = typing.List[typing.Tuple[str, typing.Tuple[str, typing.List[str]]]]
# Maps named TVM piece (see description above) to a list of names of Python packages. Please use
# alphabetical order for each package list, and do not add version constraints here!
REQUIREMENTS_BY_PIECE: RequirementsByPieceType = [
# Base requirements needed to install tvm.
(
"core",
(
"Base requirements needed to install tvm",
[
"attrs",
"cloudpickle",
"decorator",
"numpy",
"psutil",
"scipy",
"synr",
"tornado",
],
),
),
# Provide support for Arm(R) Ethos(TM)-U NPU.
(
"ethosu",
(
"Requirements for using Arm(R) Ethos(TM)-U NPU",
[
"ethos-u-vela",
],
),
),
# Relay frontends.
(
"importer-caffe",
(
"Requirements for the Caffe importer",
[
"numpy",
"protobuf",
"scikit-image",
"six",
],
),
),
(
"importer-caffe2",
(
"Requirements for the Caffe2 importer",
[
"future", # Hidden dependency of torch.
"torch",
],
),
),
("importer-coreml", ("Requirements for the CoreML importer", ["coremltools"])),
("importer-darknet", ("Requirements for the DarkNet importer", ["opencv-python"])),
(
"importer-keras",
("Requirements for the Keras importer", ["tensorflow", "tensorflow-estimator"]),
),
(
"importer-onnx",
(
"Requirements for the ONNX importer",
[
"future", # Hidden dependency of torch.
"onnx",
"onnxoptimizer",
"onnxruntime",
"torch",
"torchvision",
],
),
),
(
"importer-paddle",
("Requirements for the PaddlePaddle importer", ["paddlepaddle"]),
),
(
"importer-pytorch",
(
"Requirements for the PyTorch importer",
[
"future", # Hidden dependency of torch.
"torch",
"torchvision",
],
),
),
(
"importer-tensorflow",
("Requirements for the TensorFlow importer", ["tensorflow", "tensorflow-estimator"]),
),
(
"importer-tflite",
("Requirements for the TFLite importer", ["tensorflow", "tensorflow-estimator", "tflite"]),
),
(
"tvmc",
(
"Requirements for the tvmc command-line tool",
[
"ethos-u-vela",
"future", # Hidden dependency of torch.
"onnx",
"onnxoptimizer",
"onnxruntime",
"paddlepaddle",
"tensorflow",
"tflite",
"torch",
"torchvision",
"xgboost",
],
),
),
# Vitis AI requirements
(
"vitis-ai",
(
"Requirements for the Vitis AI codegen",
[
"h5py",
"progressbar",
],
),
),
# XGBoost, useful for autotuning on some targets.
(
"xgboost",
(
"Requirements for XGBoost autotuning",
[
"future", # Hidden dependency of torch.
"torch",
"xgboost",
],
),
),
# Development requirements
(
"dev",
(
"Requirements to develop TVM -- lint, docs, testing, etc.",
[
"astroid", # pylint requirement, listed so a hard constraint can be included.
"autodocsumm",
"black",
"commonmark",
"cpplint",
"docutils",
"image",
"matplotlib",
"pillow",
"pylint",
"sphinx",
"sphinx_autodoc_annotation",
"sphinx_gallery",
"sphinx_rtd_theme",
"types-psutil",
],
),
),
]
ConstraintsType = typing.List[typing.Tuple[str, typing.Union[None, str]]]
# Maps a named Python package (which should appear in REQUIREMENTS_BY_PIECE above) to a
# semver or pip version constraint. Semver constraints are translated into requirements.txt-friendly
# constraints.
#
# These constraints serve only to record technical reasons why a particular version can't be used.
# They are the default install_requires used in setup.py. These can be further narrowed to restrict
# dependencies to those tested or used in CI; however, that process is not done here.
#
# Policy for constraints listed here:
# 1. Each package specified in REQUIREMENTS_BY_PIECE must be included here.
# 2. If TVM will functionally break against an old version of a dependency, specify a >= relation
# here. Include a comment linking to context or explaining why the constraint is in place.
CONSTRAINTS = [
("astroid", None),
("attrs", None),
("autodocsumm", None),
("black", "==20.8b1"),
("cloudpickle", None),
("commonmark", ">=0.7.3"), # From PR #213.
("coremltools", None),
("cpplint", None),
("decorator", None),
(
"docutils",
"<0.17",
), # Work around https://github.com/readthedocs/sphinx_rtd_theme/issues/1115
("ethos-u-vela", "==3.2.0"),
("future", None),
("h5py", "==2.10.0"),
("image", None),
("matplotlib", None),
("numpy", None),
("onnx", None),
("onnxoptimizer", None),
("onnxruntime", None),
("opencv-python", None),
("paddlepaddle", None),
("pillow", None),
("progressbar", None),
("protobuf", None),
("psutil", None),
("pylint", None),
("scikit-image", None),
("scipy", None),
("six", None),
("sphinx", None),
("sphinx_autodoc_annotation", None),
("sphinx_gallery", None),
("sphinx_rtd_theme", None),
("synr", "==0.6.0"),
("tensorflow", None),
("tensorflow-estimator", None),
("tflite", None),
("torch", None),
("torchvision", None),
("tornado", None),
("xgboost", ">=1.1.0,<1.6.0"), # From PR #4953 & Issue #12009
]
################################################################################
# End of configuration options.
################################################################################
# Required keys in REQUIREMENTS_BY_PIECE.
REQUIRED_PIECES: typing.List[str] = ["core", "dev"]
# Regex to validates piece names.
PIECE_REGEX: typing.Pattern = re.compile(r"^[a-z0-9][a-z0-9-]*", re.IGNORECASE)
# Regex to match a constraint specification. Multiple constraints are not supported.
CONSTRAINT_REGEX: typing.Pattern = re.compile(r"(?:\^|\<|(?:~=)|(?:<=)|(?:==)|(?:>=)|\>)[^<>=\^,]+")
# Regex for parsing semantic versions. See
# https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string
SEMVER_REGEX: typing.Pattern = re.compile(
r"^(?P<major>0|[1-9]\d*)\.(?P<minor>0|[1-9]\d*)\.(?P<patch>0|[1-9]\d*)(?:-(?P<prerelease>(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P<buildmetadata>[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$"
)
def validate_requirements_by_piece() -> typing.List[str]:
"""Validate REQUIREMENTS_BY_PIECE, returning a list of problems.
Returns
-------
list[str] :
A list of strings, each one describing a distinct problem with REQUIREMENTS_BY_PIECE.
"""
problems = []
unseen_required_pieces = set(REQUIRED_PIECES)
seen_pieces = set()
# Ensure that core is listed first and dev is listed last.
saw_core = False
saw_dev = False
if not isinstance(REQUIREMENTS_BY_PIECE, (list, tuple)):
problems.append(f"must be list or tuple, see {REQUIREMENTS_BY_PIECE!r}")
return problems
for piece, value in REQUIREMENTS_BY_PIECE:
if not isinstance(piece, str):
problems.append(f"piece {piece!r}: must be str")
continue
if piece in unseen_required_pieces:
unseen_required_pieces.remove(piece)
piece_lower = piece.lower()
if piece_lower in seen_pieces:
problems.append(f"piece {piece}: listed twice")
seen_pieces.add(piece_lower)
if not saw_core and piece != "core":
problems.append(f'piece {piece}: must list after "core" (core must be first)')
elif piece == "core":
saw_core = True
if saw_dev:
problems.append(f'piece {piece}: must list before "dev" (dev must be last)')
elif piece == "dev":
saw_dev = True
if not isinstance(value, (tuple, list)) or len(value) != 2:
problems.append(
f'piece {piece}: should be formatted like ("{piece}", ("<requirements.txt comment>", ["dep1", "dep2", ...])). got: {value!r}'
)
continue
description, deps = value
if not isinstance(description, str):
problems.append(f"piece {piece}: description should be a string, got {description!r}")
if not isinstance(deps, (list, tuple)) or any(not isinstance(d, str) for d in deps):
problems.append(f"piece {piece}: deps should be a list of strings, got {deps!r}")
continue
if list(sorted(deps)) != list(deps):
problems.append(
f"piece {piece}: deps must be sorted. Correct order:\n {list(sorted(deps))!r}"
)
piece_deps = set()
for d in deps:
if CONSTRAINT_REGEX.search(d):
problems.append(
f"piece {piece}: dependency {d} should not specify a version. "
"Add it to CONSTRAINTS instead."
)
if d.lower() in piece_deps:
problems.append(f"piece {piece}: dependency {d} listed twice")
piece_deps.add(d.lower())
extras_pieces = [
k for (k, _) in REQUIREMENTS_BY_PIECE if k not in ("dev", "core") if isinstance(k, str)
]
sorted_extras_pieces = list(sorted(extras_pieces))
if sorted_extras_pieces != list(extras_pieces):
problems.append(
'pieces other than "core" and "dev" must appear in alphabetical order: '
f"{sorted_extras_pieces}"
)
return problems
def parse_semver(
package: str, constraint: str, problems: typing.List[str]
) -> typing.Tuple[typing.List[str], int, int]:
"""Parse a semantic versioning constraint of the form "^X.[.Y[.Z[...]]]]"
Parameters
----------
package : str
Name of the package specifying this constraint, for reporting problems.
constraint : str
The semver constraint. Must start with "^"
problems : List[str]
A list of strings describing problems that have occurred validating the configuration.
Problems encountered while validating constraint are appended to this list.
Returns
-------
tuple[list[str], int, int] :
A 3-tuple. The first element is a list containing an entry for each component in the
semver string (components separated by "."). The second element is the index of the
component in the list which must not change to meet the semver constraint. The third element
is an integer, the numeric value of the changing component (this can be non-trivial when
the patch is the changing part but pre-, post-release, or build metadta.
See "Caret requirements" at https://python-poetry.org/docs/versions/.
"""
m = SEMVER_REGEX.match(constraint[1:])
if not m:
problems.append(f"{package}: invalid semver constraint {constraint}")
return [], 0, 0
min_ver_parts = [
m.group("major"),
m.group("minor"),
m.group("patch")
+ (f"-{m.group('prerelease')}" if m.group("prerelease") else "")
+ (f"+{m.group('buildmetadata')}" if m.group("buildmetadata") else ""),
]
# Major/minor version handling is simple
for i, p in enumerate(min_ver_parts[:2]):
x = int(p.strip())
if x:
return min_ver_parts, i, x
# For patch version, consult only the numeric patch
if m.group("patch"):
patch_int = int(m.group("patch"))
if patch_int or min_ver_parts[2] != m.group("patch"):
return min_ver_parts, 2, patch_int
# All 0's
return min_ver_parts, 0, 0
def validate_constraints() -> typing.List[str]:
"""Validate CONSTRAINTS, returning a list of problems found.
Returns
-------
list[str] :
A list of strings, each one describing a distinct problem found in CONSTRAINTS.
"""
problems = []
if not isinstance(CONSTRAINTS, (list, tuple)):
problems.append(f"must be list or tuple, see: {CONSTRAINTS!r}")
seen_packages = set()
all_deps = set()
for _, (_, deps) in REQUIREMENTS_BY_PIECE:
for d in deps:
all_deps.add(d.lower())
for package, constraint in CONSTRAINTS:
if package in seen_packages:
problems.append(f"{package}: specified twice")
seen_packages.add(package)
if package.lower() not in all_deps:
problems.append(f"{package}: not specified in REQUIREMENTS_BY_PIECE")
if constraint is None: # None is just a placeholder that allows for comments.
continue
if not CONSTRAINT_REGEX.match(constraint):
problems.append(
f'{package}: constraint "{constraint}" does not look like a valid constraint'
)
if constraint.startswith("^"):
parse_semver(package, constraint, problems)
all_constrained_packages = [p for (p, _) in CONSTRAINTS]
sorted_constrained_packages = list(sorted(all_constrained_packages))
if sorted_constrained_packages != all_constrained_packages:
problems.append(
"CONSTRAINTS entries should be in this sorted order: " f"{sorted_constrained_packages}"
)
return problems
class ValidationError(Exception):
"""Raised when a validation error occurs."""
@staticmethod
def format_problems(config: str, problems: typing.List[str]) -> str:
"""Format a list of problems with a global config variable into human-readable output.
Parameters
----------
config : str
Name of the global configuration variable of concern. Prepended to the output.
problems: list[str]
A list of strings, each one a distinct problem with that config variable.
Returns
-------
str :
A human-readable string suitable for console, listing the problems as bullet points.
"""
formatted = []
for p in problems:
assert isinstance(p, str), f"problems element not a str: {p}"
formatted.append(
"\n".join(
textwrap.wrap(
f"{config}: {p}", width=80, initial_indent=" * ", subsequent_indent=" "
)
)
)
return "\n".join(formatted)
def __init__(self, config: str, problems: typing.List[str]):
"""Describes an error that occurs validating one of the global config variables.
Parameters
----------
config : str
Name of the global configuration variable of concern. Prepended to the output.
problems: list[str]
A list of strings, each one a distinct problem with that config variable.
"""
super(ValidationError, self).__init__(self.format_problems(config, problems))
self.problems = problems
def validate_or_raise():
problems = validate_requirements_by_piece()
if problems:
raise ValidationError("REQUIREMENTS_BY_PIECE", problems)
problems = validate_constraints()
if problems:
raise ValidationError("CONSTRAINTS", problems)
def semver_to_requirements(dep: str, constraint: str, joined_deps: typing.List[str]):
"""Convert a SemVer-style constraint to a setuptools-compatible constraint.
Parameters
----------
dep : str
Name of the PyPI package to depend on.
constraint : str
The SemVer constraint, of the form "^<semver constraint>"
joined_deps : list[str]
A list of strings, each a setuptools-compatible constraint which could be written to
a line in requirements.txt. The converted constraint is appended to this list.
"""
problems: typing.List[str] = []
min_ver_parts, fixed_index, fixed_part = parse_semver(dep, constraint, problems)
text_problems = "\n" + "\n".join(f" * {p}" for p in problems)
assert (
not problems
), f"should not happen: validated semver {constraint} parses with problems:{text_problems}"
max_ver_parts = (
min_ver_parts[:fixed_index]
+ [str(fixed_part + 1)]
+ ["0" for _ in min_ver_parts[fixed_index + 1 :]]
)
joined_deps.append(f'{dep}>={".".join(min_ver_parts)},<{".".join(max_ver_parts)}')
def join_requirements() -> typing.Dict[str, typing.Tuple[str, typing.List[str]]]:
"""Validate, then join REQUIRMENTS_BY_PIECE against CONSTRAINTS and return the result.
Returns
-------
An OrderedDict containing REQUIREMENTS_BY_PIECE, except any dependency mentioned in CONSTRAINTS
is replaced by a setuptools-compatible constraint.
"""
validate_or_raise()
constraints_map = collections.OrderedDict([(p.lower(), c) for (p, c) in CONSTRAINTS])
to_return = collections.OrderedDict()
all_deps = set()
for piece, (description, deps) in REQUIREMENTS_BY_PIECE:
joined_deps = []
for d in deps:
constraint = constraints_map.get(d.lower())
if constraint is None:
joined_deps.append(d)
continue
if constraint[0] == "^":
semver_to_requirements(d, constraint, joined_deps)
else:
joined_deps.append(f"{d}{constraint}")
if piece != "dev":
all_deps.update(joined_deps)
to_return[piece] = (description, joined_deps)
to_return["all-prod"] = (
"Combined dependencies for all TVM pieces, excluding dev",
list(sorted(all_deps)),
)
return to_return
def join_and_write_requirements(args: argparse.Namespace):
try:
joined_deps = join_requirements()
except ValidationError as e:
print(f"ERROR: invalid requirements configuration in {__file__}:", file=sys.stderr)
print(str(e), file=sys.stderr)
sys.exit(2)
if args.lint:
sys.exit(0)
output_dir = os.path.join(os.path.dirname(__file__), "requirements")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
elif not os.path.isdir(output_dir):
print(
f"ERROR: output directory {output_dir} exists but is not a dir. Delete it",
file=sys.stderr,
)
sys.exit(2)
for piece, (description, deps) in joined_deps.items():
with open(os.path.join(output_dir, f"{piece}.txt"), "w") as f:
f.write(
f"# AUTOGENERATED by python/gen_requirements.py{os.linesep}"
f"#{os.linesep}"
f"# {description}{os.linesep}"
)
for d in deps:
f.write(f"{d}{os.linesep}")
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument(
"--lint", action="store_true", help="Just lint dependencies, don't generate anything"
)
return parser.parse_args()
def main():
args = parse_args()
join_and_write_requirements(args)
if __name__ == "__main__":
main()
| https://github.com/zk-ml/tachikoma |
python/setup.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, exec-used
"""Setup TVM package."""
import os
import shutil
import sys
import sysconfig
import pathlib
import platform
from setuptools import find_packages
from setuptools.dist import Distribution
# need to use distutils.core for correct placement of cython dll
if "--inplace" in sys.argv:
from distutils.core import setup
from distutils.extension import Extension
else:
from setuptools import setup
from setuptools.extension import Extension
CURRENT_DIR = os.path.dirname(__file__)
FFI_MODE = os.environ.get("TVM_FFI", "auto")
CONDA_BUILD = os.getenv("CONDA_BUILD") is not None
def get_lib_path():
"""Get library path, name and version"""
# We can not import `libinfo.py` in setup.py directly since __init__.py
# Will be invoked which introduces dependencies
libinfo_py = os.path.join(CURRENT_DIR, "./tvm/_ffi/libinfo.py")
libinfo = {"__file__": libinfo_py}
exec(compile(open(libinfo_py, "rb").read(), libinfo_py, "exec"), libinfo, libinfo)
version = libinfo["__version__"]
if not CONDA_BUILD:
lib_path = libinfo["find_lib_path"]()
libs = [lib_path[0]]
if "runtime" not in libs[0]:
for name in lib_path[1:]:
if "runtime" in name:
libs.append(name)
break
# Add standalone_crt, if present
for name in lib_path:
candidate_path = os.path.join(os.path.dirname(name), "standalone_crt")
if os.path.isdir(candidate_path):
libs.append(candidate_path)
break
# Add microTVM template projects
for name in lib_path:
candidate_path = os.path.join(os.path.dirname(name), "microtvm_template_projects")
if os.path.isdir(candidate_path):
libs.append(candidate_path)
break
# Add tvmc configuration json files
for name in lib_path:
candidate_path = os.path.abspath(os.path.join(os.path.dirname(name), "..", "configs"))
if os.path.isdir(candidate_path):
libs.append(candidate_path)
break
else:
libs = None
return libs, version
def git_describe_version(original_version):
"""Get git describe version."""
ver_py = os.path.join(CURRENT_DIR, "..", "version.py")
libver = {"__file__": ver_py}
exec(compile(open(ver_py, "rb").read(), ver_py, "exec"), libver, libver)
_, gd_version = libver["git_describe_version"]()
if gd_version != original_version and "--inplace" not in sys.argv:
print("Use git describe based version %s" % gd_version)
return gd_version
LIB_LIST, __version__ = get_lib_path()
__version__ = git_describe_version(__version__)
def config_cython():
"""Try to configure cython and return cython configuration"""
if FFI_MODE not in ("cython"):
if os.name == "nt" and not CONDA_BUILD:
print("WARNING: Cython is not supported on Windows, will compile without cython module")
return []
sys_cflags = sysconfig.get_config_var("CFLAGS")
if sys_cflags and "i386" in sys_cflags and "x86_64" in sys_cflags:
print("WARNING: Cython library may not be compiled correctly with both i386 and x64")
return []
try:
from Cython.Build import cythonize
# from setuptools.extension import Extension
if sys.version_info >= (3, 0):
subdir = "_cy3"
else:
subdir = "_cy2"
ret = []
path = "tvm/_ffi/_cython"
extra_compile_args = ["-std=c++17", "-DDMLC_USE_LOGGING_LIBRARY=<tvm/runtime/logging.h>"]
if os.name == "nt":
library_dirs = ["tvm", "../build/Release", "../build"]
libraries = ["tvm"]
extra_compile_args = [
"/std:c++17",
"/D DMLC_USE_LOGGING_LIBRARY=<tvm/runtime/logging.h>",
]
# library is available via conda env.
if CONDA_BUILD:
library_dirs = [os.environ["LIBRARY_LIB"]]
else:
library_dirs = None
libraries = None
for fn in os.listdir(path):
if not fn.endswith(".pyx"):
continue
ret.append(
Extension(
"tvm._ffi.%s.%s" % (subdir, fn[:-4]),
["tvm/_ffi/_cython/%s" % fn],
include_dirs=[
"../include/",
"../3rdparty/dmlc-core/include",
"../3rdparty/dlpack/include",
],
extra_compile_args=extra_compile_args,
library_dirs=library_dirs,
libraries=libraries,
language="c++",
)
)
return cythonize(ret, compiler_directives={"language_level": 3})
except ImportError as error:
if FFI_MODE == "cython":
raise error
print("WARNING: Cython is not installed, will compile without cython module")
return []
class BinaryDistribution(Distribution):
def has_ext_modules(self):
return True
def is_pure(self):
return False
setup_kwargs = {}
if not CONDA_BUILD:
with open("MANIFEST.in", "w") as fo:
for path in LIB_LIST:
if os.path.isfile(path):
shutil.copy(path, os.path.join(CURRENT_DIR, "tvm"))
_, libname = os.path.split(path)
fo.write(f"include tvm/{libname}\n")
if os.path.isdir(path):
_, libname = os.path.split(path)
shutil.copytree(path, os.path.join(CURRENT_DIR, "tvm", libname))
fo.write(f"recursive-include tvm/{libname} *\n")
setup_kwargs = {"include_package_data": True}
def get_package_data_files():
# Relay standard libraries
return ["relay/std/prelude.rly", "relay/std/core.rly"]
def long_description_contents():
with open(pathlib.Path(CURRENT_DIR).resolve().parent / "README.md", encoding="utf-8") as readme:
description = readme.read()
return description
# Temporarily add this directory to the path so we can import the requirements generator
# tool.
sys.path.insert(0, os.path.dirname(__file__))
import gen_requirements
sys.path.pop(0)
requirements = gen_requirements.join_requirements()
extras_require = {
piece: deps for piece, (_, deps) in requirements.items() if piece not in ("all", "core")
}
setup(
name="tvm",
version=__version__,
description="TVM: An End to End Tensor IR/DSL Stack for Deep Learning Systems",
long_description=long_description_contents(),
long_description_content_type="text/markdown",
url="https://tvm.apache.org/",
download_url="https://github.com/apache/tvm/tags",
author="Apache TVM",
license="Apache",
# See https://pypi.org/classifiers/
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
],
keywords="machine learning",
zip_safe=False,
entry_points={"console_scripts": ["tvmc = tvm.driver.tvmc.main:main"]},
install_requires=requirements["core"][1],
extras_require=extras_require,
packages=find_packages(),
package_dir={"tvm": "tvm"},
package_data={"tvm": get_package_data_files()},
distclass=BinaryDistribution,
ext_modules=config_cython(),
**setup_kwargs,
)
if not CONDA_BUILD:
# Wheel cleanup
os.remove("MANIFEST.in")
for path in LIB_LIST:
_, libname = os.path.split(path)
path_to_be_removed = f"tvm/{libname}"
if os.path.isfile(path_to_be_removed):
os.remove(path_to_be_removed)
if os.path.isdir(path_to_be_removed):
shutil.rmtree(path_to_be_removed)
| https://github.com/zk-ml/tachikoma |
python/tvm/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-builtin, wildcard-import
"""TVM: Open Deep Learning Compiler Stack."""
import multiprocessing
import sys
import os
import traceback
# top-level alias
# tvm._ffi
from ._ffi.base import TVMError, __version__, _RUNTIME_ONLY
from ._ffi.runtime_ctypes import DataTypeCode, DataType
from ._ffi import register_object, register_func, register_extension, get_global_func
# top-level alias
# tvm.runtime
from .runtime.object import Object
from .runtime.ndarray import device, cpu, cuda, gpu, opencl, cl, vulkan, metal, mtl
from .runtime.ndarray import vpi, rocm, ext_dev, hexagon
from .runtime import ndarray as nd
# tvm.error
from . import error
# tvm.ir
from .ir import IRModule
from .ir import transform
from .ir import instrument
from .ir import container
from .ir import PoolInfo
from .ir import WorkspacePoolInfo
from .ir import ConstantPoolInfo
from .ir import PoolInfoProperties
from .ir import WorkspaceMemoryPools
from .ir import ConstantMemoryPools
from . import ir
# tvm.tir
from . import tir
# tvm.target
from . import target
# tvm.te
from . import te
# tvm.driver
from .driver import build, lower
# tvm.parser
from . import parser
# others
from . import arith
# support infra
from . import support
# Contrib initializers
from .contrib import rocm as _rocm, nvcc as _nvcc, sdaccel as _sdaccel
if not _RUNTIME_ONLY and support.libinfo().get("USE_MICRO", "OFF") == "ON":
from . import micro
# NOTE: This file should be python2 compatible so we can
# raise proper error message when user run the package using
# an older version of the python
def _should_print_backtrace():
in_pytest = "PYTEST_CURRENT_TEST" in os.environ
tvm_backtrace = os.environ.get("TVM_BACKTRACE", "0")
try:
tvm_backtrace = bool(int(tvm_backtrace))
except ValueError:
raise ValueError(
"invalid value for TVM_BACKTRACE {}, please set to 0 or 1.".format(tvm_backtrace)
)
return in_pytest or tvm_backtrace
def tvm_wrap_excepthook(exception_hook):
"""Wrap given excepthook with TVM additional work."""
def wrapper(exctype, value, trbk):
"""Clean subprocesses when TVM is interrupted."""
if exctype is error.DiagnosticError and not _should_print_backtrace():
# TODO(@jroesch): consider moving to C++?
print("note: run with `TVM_BACKTRACE=1` environment variable to display a backtrace.")
else:
exception_hook(exctype, value, trbk)
if hasattr(multiprocessing, "active_children"):
# pylint: disable=not-callable
for p in multiprocessing.active_children():
p.terminate()
return wrapper
sys.excepthook = tvm_wrap_excepthook(sys.excepthook)
| https://github.com/zk-ml/tachikoma |
python/tvm/_ffi/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""C interfacing code.
This namespace contains everything that interacts with C code.
Most TVM C related object are ctypes compatible, which means
they contains a handle field that is ctypes.c_void_p and can
be used via ctypes function calls.
Some performance critical functions are implemented by cython
and have a ctypes fallback implementation.
"""
from . import _pyversion
from .base import register_error
from .registry import register_object, register_func, register_extension
from .registry import _init_api, get_global_func, get_object_type_index
| https://github.com/zk-ml/tachikoma |
python/tvm/_ffi/_ctypes/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""ctypes specific implementation of FFI"""
| https://github.com/zk-ml/tachikoma |
python/tvm/_ffi/_ctypes/ndarray.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Runtime NDArray api"""
import ctypes
from ..base import _LIB, check_call, c_str
from ..runtime_ctypes import TVMArrayHandle
from .types import RETURN_SWITCH, C_TO_PY_ARG_SWITCH, _wrap_arg_func, _return_handle
TVMPyCapsuleDestructor = ctypes.CFUNCTYPE(None, ctypes.c_void_p)
_c_str_dltensor = c_str("dltensor")
_c_str_used_dltensor = c_str("used_dltensor")
# used for PyCapsule manipulation
if hasattr(ctypes, "pythonapi"):
ctypes.pythonapi.PyCapsule_GetName.restype = ctypes.c_char_p
ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.c_void_p
ctypes.pythonapi.PyCapsule_New.restype = ctypes.py_object
def _from_dlpack(dltensor):
dltensor = ctypes.py_object(dltensor)
if ctypes.pythonapi.PyCapsule_IsValid(dltensor, _c_str_dltensor):
ptr = ctypes.pythonapi.PyCapsule_GetPointer(dltensor, _c_str_dltensor)
# enforce type to make sure it works for all ctypes
ptr = ctypes.cast(ptr, ctypes.c_void_p)
handle = TVMArrayHandle()
check_call(_LIB.TVMArrayFromDLPack(ptr, ctypes.byref(handle)))
ctypes.pythonapi.PyCapsule_SetName(dltensor, _c_str_used_dltensor)
ctypes.pythonapi.PyCapsule_SetDestructor(dltensor, TVMPyCapsuleDestructor(0))
return _make_array(handle, False, False)
raise ValueError("Expect a dltensor field, PyCapsule can only be consumed once")
def _dlpack_deleter(pycapsule):
pycapsule = ctypes.cast(pycapsule, ctypes.py_object)
if ctypes.pythonapi.PyCapsule_IsValid(pycapsule, _c_str_dltensor):
ptr = ctypes.pythonapi.PyCapsule_GetPointer(pycapsule, _c_str_dltensor)
# enforce type to make sure it works for all ctypes
ptr = ctypes.cast(ptr, ctypes.c_void_p)
_LIB.TVMDLManagedTensorCallDeleter(ptr)
ctypes.pythonapi.PyCapsule_SetDestructor(pycapsule, None)
_c_dlpack_deleter = TVMPyCapsuleDestructor(_dlpack_deleter)
class NDArrayBase(object):
"""A simple Device/CPU Array object in runtime."""
__slots__ = ["handle", "is_view"]
# pylint: disable=no-member
def __init__(self, handle, is_view=False):
"""Initialize the function with handle
Parameters
----------
handle : TVMArrayHandle
the handle to the underlying C++ TVMArray
"""
self.handle = handle
self.is_view = is_view
def __del__(self):
if not self.is_view and _LIB:
check_call(_LIB.TVMArrayFree(self.handle))
@property
def _tvm_handle(self):
return ctypes.cast(self.handle, ctypes.c_void_p).value
def _copyto(self, target_nd):
"""Internal function that implements copy to target ndarray."""
check_call(_LIB.TVMArrayCopyFromTo(self.handle, target_nd.handle, None))
return target_nd
@property
def shape(self):
"""Shape of this array"""
return tuple(self.handle.contents.shape[i] for i in range(self.handle.contents.ndim))
def to_dlpack(self):
"""Produce an array from a DLPack Tensor without copying memory
Returns
-------
dlpack : DLPack tensor view of the array data
"""
handle = ctypes.c_void_p()
check_call(_LIB.TVMArrayToDLPack(self.handle, ctypes.byref(handle)))
return ctypes.pythonapi.PyCapsule_New(handle, _c_str_dltensor, _c_dlpack_deleter)
def _make_array(handle, is_view, is_container):
global _TVM_ND_CLS
handle = ctypes.cast(handle, TVMArrayHandle)
if is_container:
tindex = ctypes.c_uint()
check_call(_LIB.TVMArrayGetTypeIndex(handle, ctypes.byref(tindex)))
cls = _TVM_ND_CLS.get(tindex.value, _CLASS_NDARRAY)
else:
cls = _CLASS_NDARRAY
ret = cls.__new__(cls)
ret.handle = handle
ret.is_view = is_view
return ret
_TVM_COMPATS = ()
def _reg_extension(cls, fcreate):
global _TVM_COMPATS
_TVM_COMPATS += (cls,)
if fcreate:
fret = lambda x: fcreate(_return_handle(x))
RETURN_SWITCH[cls._tvm_tcode] = fret
C_TO_PY_ARG_SWITCH[cls._tvm_tcode] = _wrap_arg_func(fret, cls._tvm_tcode)
_TVM_ND_CLS = {}
def _register_ndarray(index, cls):
global _TVM_ND_CLS
_TVM_ND_CLS[index] = cls
_CLASS_NDARRAY = None
def _set_class_ndarray(cls):
global _CLASS_NDARRAY
_CLASS_NDARRAY = cls
| https://github.com/zk-ml/tachikoma |
python/tvm/_ffi/_ctypes/object.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Runtime Object api"""
import ctypes
from ..base import _LIB, check_call
from .types import ArgTypeCode, RETURN_SWITCH, C_TO_PY_ARG_SWITCH, _wrap_arg_func
from .ndarray import _register_ndarray, NDArrayBase
ObjectHandle = ctypes.c_void_p
__init_by_constructor__ = None
"""Maps object type index to its constructor"""
OBJECT_TYPE = {}
"""Maps object type to its type index"""
OBJECT_INDEX = {}
_CLASS_OBJECT = None
def _set_class_object(object_class):
global _CLASS_OBJECT
_CLASS_OBJECT = object_class
def _register_object(index, cls):
"""register object class"""
if issubclass(cls, NDArrayBase):
_register_ndarray(index, cls)
return
OBJECT_TYPE[index] = cls
OBJECT_INDEX[cls] = index
def _get_object_type_index(cls):
"""get the type index of object class"""
return OBJECT_INDEX.get(cls)
def _return_object(x):
handle = x.v_handle
if not isinstance(handle, ObjectHandle):
handle = ObjectHandle(handle)
tindex = ctypes.c_uint()
check_call(_LIB.TVMObjectGetTypeIndex(handle, ctypes.byref(tindex)))
cls = OBJECT_TYPE.get(tindex.value, _CLASS_OBJECT)
if issubclass(cls, PyNativeObject):
obj = _CLASS_OBJECT.__new__(_CLASS_OBJECT)
obj.handle = handle
return cls.__from_tvm_object__(cls, obj)
# Avoid calling __init__ of cls, instead directly call __new__
# This allows child class to implement their own __init__
obj = cls.__new__(cls)
obj.handle = handle
return obj
RETURN_SWITCH[ArgTypeCode.OBJECT_HANDLE] = _return_object
C_TO_PY_ARG_SWITCH[ArgTypeCode.OBJECT_HANDLE] = _wrap_arg_func(
_return_object, ArgTypeCode.OBJECT_HANDLE
)
C_TO_PY_ARG_SWITCH[ArgTypeCode.OBJECT_RVALUE_REF_ARG] = _wrap_arg_func(
_return_object, ArgTypeCode.OBJECT_RVALUE_REF_ARG
)
class PyNativeObject:
"""Base class of all TVM objects that also subclass python's builtin types."""
__slots__ = []
def __init_tvm_object_by_constructor__(self, fconstructor, *args):
"""Initialize the internal tvm_object by calling constructor function.
Parameters
----------
fconstructor : Function
Constructor function.
args: list of objects
The arguments to the constructor
Note
----
We have a special calling convention to call constructor functions.
So the return object is directly set into the object
"""
# pylint: disable=assigning-non-slot
obj = _CLASS_OBJECT.__new__(_CLASS_OBJECT)
obj.__init_handle_by_constructor__(fconstructor, *args)
self.__tvm_object__ = obj
class ObjectBase(object):
"""Base object for all object types"""
__slots__ = ["handle"]
def __del__(self):
if _LIB is not None:
try:
handle = self.handle
except AttributeError:
return
check_call(_LIB.TVMObjectFree(handle))
def __init_handle_by_constructor__(self, fconstructor, *args):
"""Initialize the handle by calling constructor function.
Parameters
----------
fconstructor : Function
Constructor function.
args: list of objects
The arguments to the constructor
Note
----
We have a special calling convention to call constructor functions.
So the return handle is directly set into the Node object
instead of creating a new Node.
"""
# assign handle first to avoid error raising
# pylint: disable=not-callable
self.handle = None
handle = __init_by_constructor__(fconstructor, args)
if not isinstance(handle, ObjectHandle):
handle = ObjectHandle(handle)
self.handle = handle
def same_as(self, other):
"""Check object identity.
Parameters
----------
other : object
The other object to compare against.
Returns
-------
result : bool
The comparison result.
"""
if not isinstance(other, ObjectBase):
return False
if self.handle is None:
return other.handle is None
return self.handle.value == other.handle.value
| https://github.com/zk-ml/tachikoma |
python/tvm/_ffi/_ctypes/packed_func.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=invalid-name, protected-access, too-many-branches, global-statement, unused-import
"""Function configuration API."""
import ctypes
import traceback
from numbers import Number, Integral
from ..base import _LIB, get_last_ffi_error, py2cerror, check_call
from ..base import c_str, string_types
from ..runtime_ctypes import DataType, TVMByteArray, Device, ObjectRValueRef
from . import ndarray as _nd
from .ndarray import NDArrayBase, _make_array
from .types import TVMValue, ArgTypeCode
from .types import TVMPackedCFunc, TVMCFuncFinalizer
from .types import RETURN_SWITCH, C_TO_PY_ARG_SWITCH, _wrap_arg_func, _device_to_int64
from .object import ObjectBase, PyNativeObject, _set_class_object
from . import object as _object
PackedFuncHandle = ctypes.c_void_p
ModuleHandle = ctypes.c_void_p
ObjectHandle = ctypes.c_void_p
TVMRetValueHandle = ctypes.c_void_p
def _ctypes_free_resource(rhandle):
"""callback to free resources when it is not needed."""
pyobj = ctypes.cast(rhandle, ctypes.py_object)
ctypes.pythonapi.Py_DecRef(pyobj)
# Global callback that is always alive
TVM_FREE_PYOBJ = TVMCFuncFinalizer(_ctypes_free_resource)
ctypes.pythonapi.Py_IncRef(ctypes.py_object(TVM_FREE_PYOBJ))
def _make_packed_func(handle, is_global):
"""Make a packed function class"""
obj = _CLASS_PACKED_FUNC.__new__(_CLASS_PACKED_FUNC)
obj.is_global = is_global
obj.handle = handle
return obj
def convert_to_tvm_func(pyfunc):
"""Convert a python function to TVM function
Parameters
----------
pyfunc : python function
The python function to be converted.
Returns
-------
tvmfunc: tvm.nd.Function
The converted tvm function.
"""
local_pyfunc = pyfunc
def cfun(args, type_codes, num_args, ret, _):
"""ctypes function"""
num_args = num_args.value if isinstance(num_args, ctypes.c_int) else num_args
pyargs = (C_TO_PY_ARG_SWITCH[type_codes[i]](args[i]) for i in range(num_args))
# pylint: disable=broad-except
try:
rv = local_pyfunc(*pyargs)
except Exception:
msg = traceback.format_exc()
msg = py2cerror(msg)
_LIB.TVMAPISetLastError(c_str(msg))
return -1
if rv is not None:
if isinstance(rv, tuple):
raise ValueError("PackedFunction can only support one return value")
temp_args = []
values, tcodes, _ = _make_tvm_args((rv,), temp_args)
if not isinstance(ret, TVMRetValueHandle):
ret = TVMRetValueHandle(ret)
if _LIB.TVMCFuncSetReturn(ret, values, tcodes, ctypes.c_int(1)) != 0:
raise get_last_ffi_error()
_ = temp_args
_ = rv
return 0
handle = PackedFuncHandle()
f = TVMPackedCFunc(cfun)
# NOTE: We will need to use python-api to increase ref count of the f
# TVM_FREE_PYOBJ will be called after it is no longer needed.
pyobj = ctypes.py_object(f)
ctypes.pythonapi.Py_IncRef(pyobj)
if _LIB.TVMFuncCreateFromCFunc(f, pyobj, TVM_FREE_PYOBJ, ctypes.byref(handle)) != 0:
raise get_last_ffi_error()
return _make_packed_func(handle, False)
def _make_tvm_args(args, temp_args):
"""Pack arguments into c args tvm call accept"""
num_args = len(args)
values = (TVMValue * num_args)()
type_codes = (ctypes.c_int * num_args)()
for i, arg in enumerate(args):
if isinstance(arg, ObjectBase):
values[i].v_handle = arg.handle
type_codes[i] = ArgTypeCode.OBJECT_HANDLE
elif arg is None:
values[i].v_handle = None
type_codes[i] = ArgTypeCode.NULL
elif isinstance(arg, NDArrayBase):
values[i].v_handle = ctypes.cast(arg.handle, ctypes.c_void_p)
type_codes[i] = (
ArgTypeCode.NDARRAY_HANDLE if not arg.is_view else ArgTypeCode.DLTENSOR_HANDLE
)
elif isinstance(arg, PyNativeObject):
values[i].v_handle = arg.__tvm_object__.handle
type_codes[i] = ArgTypeCode.OBJECT_HANDLE
elif isinstance(arg, _nd._TVM_COMPATS):
values[i].v_handle = ctypes.c_void_p(arg._tvm_handle)
type_codes[i] = arg.__class__._tvm_tcode
elif isinstance(arg, Integral):
values[i].v_int64 = arg
type_codes[i] = ArgTypeCode.INT
elif isinstance(arg, Number):
values[i].v_float64 = arg
type_codes[i] = ArgTypeCode.FLOAT
elif isinstance(arg, DataType):
values[i].v_str = c_str(str(arg))
type_codes[i] = ArgTypeCode.STR
elif isinstance(arg, Device):
values[i].v_int64 = _device_to_int64(arg)
type_codes[i] = ArgTypeCode.DLDEVICE
elif isinstance(arg, (bytearray, bytes)):
# from_buffer only taeks in bytearray.
if isinstance(arg, bytes):
byte_arr = bytearray(arg)
temp_args.append(byte_arr)
arg = byte_arr
arr = TVMByteArray()
arr.data = ctypes.cast(
(ctypes.c_byte * len(arg)).from_buffer(arg), ctypes.POINTER(ctypes.c_byte)
)
arr.size = len(arg)
values[i].v_handle = ctypes.c_void_p(ctypes.addressof(arr))
temp_args.append(arr)
type_codes[i] = ArgTypeCode.BYTES
elif isinstance(arg, string_types):
values[i].v_str = c_str(arg)
type_codes[i] = ArgTypeCode.STR
elif isinstance(arg, (list, tuple, dict, _CLASS_OBJECT_GENERIC)):
arg = _FUNC_CONVERT_TO_OBJECT(arg)
values[i].v_handle = arg.handle
type_codes[i] = ArgTypeCode.OBJECT_HANDLE
temp_args.append(arg)
elif isinstance(arg, _CLASS_MODULE):
values[i].v_handle = arg.handle
type_codes[i] = ArgTypeCode.MODULE_HANDLE
elif isinstance(arg, PackedFuncBase):
values[i].v_handle = arg.handle
type_codes[i] = ArgTypeCode.PACKED_FUNC_HANDLE
elif isinstance(arg, ctypes.c_void_p):
values[i].v_handle = arg
type_codes[i] = ArgTypeCode.HANDLE
elif isinstance(arg, ObjectRValueRef):
values[i].v_handle = ctypes.cast(ctypes.byref(arg.obj.handle), ctypes.c_void_p)
type_codes[i] = ArgTypeCode.OBJECT_RVALUE_REF_ARG
elif callable(arg):
arg = convert_to_tvm_func(arg)
values[i].v_handle = arg.handle
type_codes[i] = ArgTypeCode.PACKED_FUNC_HANDLE
temp_args.append(arg)
else:
raise TypeError("Don't know how to handle type %s" % type(arg))
return values, type_codes, num_args
class PackedFuncBase(object):
"""Function base."""
__slots__ = ["handle", "is_global"]
# pylint: disable=no-member
def __init__(self, handle, is_global):
"""Initialize the function with handle
Parameters
----------
handle : PackedFuncHandle
the handle to the underlying function.
is_global : bool
Whether this is a global function in python
"""
self.handle = handle
self.is_global = is_global
def __del__(self):
if not self.is_global and _LIB is not None:
if _LIB.TVMFuncFree(self.handle) != 0:
raise get_last_ffi_error()
def __call__(self, *args):
"""Call the function with positional arguments
args : list
The positional arguments to the function call.
"""
temp_args = []
values, tcodes, num_args = _make_tvm_args(args, temp_args)
ret_val = TVMValue()
ret_tcode = ctypes.c_int()
if (
_LIB.TVMFuncCall(
self.handle,
values,
tcodes,
ctypes.c_int(num_args),
ctypes.byref(ret_val),
ctypes.byref(ret_tcode),
)
!= 0
):
raise get_last_ffi_error()
_ = temp_args
_ = args
return RETURN_SWITCH[ret_tcode.value](ret_val)
def __init_handle_by_constructor__(fconstructor, args):
"""Initialize handle by constructor"""
temp_args = []
values, tcodes, num_args = _make_tvm_args(args, temp_args)
ret_val = TVMValue()
ret_tcode = ctypes.c_int()
if (
_LIB.TVMFuncCall(
fconstructor.handle,
values,
tcodes,
ctypes.c_int(num_args),
ctypes.byref(ret_val),
ctypes.byref(ret_tcode),
)
!= 0
):
raise get_last_ffi_error()
_ = temp_args
_ = args
assert ret_tcode.value == ArgTypeCode.OBJECT_HANDLE
handle = ret_val.v_handle
return handle
def _return_module(x):
"""Return function"""
handle = x.v_handle
if not isinstance(handle, ModuleHandle):
handle = ModuleHandle(handle)
return _CLASS_MODULE(handle)
def _handle_return_func(x):
"""Return function"""
handle = x.v_handle
if not isinstance(handle, PackedFuncHandle):
handle = PackedFuncHandle(handle)
return _CLASS_PACKED_FUNC(handle, False)
def _get_global_func(name, allow_missing=False):
handle = PackedFuncHandle()
check_call(_LIB.TVMFuncGetGlobal(c_str(name), ctypes.byref(handle)))
if handle.value:
return _make_packed_func(handle, False)
if allow_missing:
return None
raise ValueError("Cannot find global function %s" % name)
# setup return handle for function type
_object.__init_by_constructor__ = __init_handle_by_constructor__
RETURN_SWITCH[ArgTypeCode.PACKED_FUNC_HANDLE] = _handle_return_func
RETURN_SWITCH[ArgTypeCode.MODULE_HANDLE] = _return_module
RETURN_SWITCH[ArgTypeCode.NDARRAY_HANDLE] = lambda x: _make_array(x.v_handle, False, True)
C_TO_PY_ARG_SWITCH[ArgTypeCode.PACKED_FUNC_HANDLE] = _wrap_arg_func(
_handle_return_func, ArgTypeCode.PACKED_FUNC_HANDLE
)
C_TO_PY_ARG_SWITCH[ArgTypeCode.MODULE_HANDLE] = _wrap_arg_func(
_return_module, ArgTypeCode.MODULE_HANDLE
)
C_TO_PY_ARG_SWITCH[ArgTypeCode.DLTENSOR_HANDLE] = lambda x: _make_array(x.v_handle, True, False)
C_TO_PY_ARG_SWITCH[ArgTypeCode.NDARRAY_HANDLE] = _wrap_arg_func(
lambda x: _make_array(x.v_handle, False, True), ArgTypeCode.NDARRAY_HANDLE
)
_CLASS_MODULE = None
_CLASS_PACKED_FUNC = None
_CLASS_OBJECT_GENERIC = None
_FUNC_CONVERT_TO_OBJECT = None
def _set_class_module(module_class):
"""Initialize the module."""
global _CLASS_MODULE
_CLASS_MODULE = module_class
def _set_class_packed_func(packed_func_class):
global _CLASS_PACKED_FUNC
_CLASS_PACKED_FUNC = packed_func_class
def _set_class_object_generic(object_generic_class, func_convert_to_object):
global _CLASS_OBJECT_GENERIC
global _FUNC_CONVERT_TO_OBJECT
_CLASS_OBJECT_GENERIC = object_generic_class
_FUNC_CONVERT_TO_OBJECT = func_convert_to_object
| https://github.com/zk-ml/tachikoma |
python/tvm/_ffi/_ctypes/types.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The C Types used in API."""
# pylint: disable=invalid-name
import ctypes
import struct
from ..base import py_str, check_call, _LIB
from ..runtime_ctypes import TVMByteArray, ArgTypeCode, Device
class TVMValue(ctypes.Union):
"""TVMValue in C API"""
_fields_ = [
("v_int64", ctypes.c_int64),
("v_float64", ctypes.c_double),
("v_handle", ctypes.c_void_p),
("v_str", ctypes.c_char_p),
]
TVMPackedCFunc = ctypes.CFUNCTYPE(
ctypes.c_int,
ctypes.POINTER(TVMValue),
ctypes.POINTER(ctypes.c_int),
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_void_p,
)
TVMCFuncFinalizer = ctypes.CFUNCTYPE(None, ctypes.c_void_p)
def _return_handle(x):
"""return handle"""
handle = x.v_handle
if not isinstance(handle, ctypes.c_void_p):
handle = ctypes.c_void_p(handle)
return handle
def _return_bytes(x):
"""return bytes"""
handle = x.v_handle
if not isinstance(handle, ctypes.c_void_p):
handle = ctypes.c_void_p(handle)
arr = ctypes.cast(handle, ctypes.POINTER(TVMByteArray))[0]
size = arr.size
res = bytearray(size)
rptr = (ctypes.c_byte * size).from_buffer(res)
if not ctypes.memmove(rptr, arr.data, size):
raise RuntimeError("memmove failed")
return res
def _return_device(value):
"""return Device"""
# use bit unpacking from int64 view
# We use this to get around ctypes issue on Union of Structure
data = struct.pack("=q", value.v_int64)
arr = struct.unpack("=ii", data)
return Device(arr[0], arr[1])
def _wrap_arg_func(return_f, type_code):
def _wrap_func(x):
tcode = ctypes.c_int(type_code)
check_call(_LIB.TVMCbArgToReturn(ctypes.byref(x), ctypes.byref(tcode)))
return return_f(x)
return _wrap_func
def _device_to_int64(dev):
"""Pack context into int64 in native endian"""
data = struct.pack("=ii", dev.device_type, dev.device_id)
return struct.unpack("=q", data)[0]
RETURN_SWITCH = {
ArgTypeCode.INT: lambda x: x.v_int64,
ArgTypeCode.FLOAT: lambda x: x.v_float64,
ArgTypeCode.HANDLE: _return_handle,
ArgTypeCode.NULL: lambda x: None,
ArgTypeCode.STR: lambda x: py_str(x.v_str),
ArgTypeCode.BYTES: _return_bytes,
ArgTypeCode.DLDEVICE: _return_device,
}
C_TO_PY_ARG_SWITCH = {
ArgTypeCode.INT: lambda x: x.v_int64,
ArgTypeCode.FLOAT: lambda x: x.v_float64,
ArgTypeCode.HANDLE: _return_handle,
ArgTypeCode.NULL: lambda x: None,
ArgTypeCode.STR: lambda x: py_str(x.v_str),
ArgTypeCode.BYTES: _return_bytes,
ArgTypeCode.DLDEVICE: _return_device,
}
| https://github.com/zk-ml/tachikoma |
python/tvm/_ffi/_cy2/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""cython2 namespace"""
| https://github.com/zk-ml/tachikoma |
python/tvm/_ffi/_cy3/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""cython3 namespace"""
| https://github.com/zk-ml/tachikoma |
python/tvm/_ffi/_pyversion.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Python version check
"""
import sys
# ----------------------------
# Python3 version.
# ----------------------------
if not (sys.version_info[0] >= 3 and sys.version_info[1] >= 6):
PY3STATEMENT = "The minimal Python requirement is Python 3.6"
raise Exception(PY3STATEMENT)
| https://github.com/zk-ml/tachikoma |
python/tvm/_ffi/base.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=invalid-name, import-outside-toplevel
"""Base library for TVM FFI."""
import sys
import os
import ctypes
import numpy as np
from . import libinfo
# ----------------------------
# library loading
# ----------------------------
string_types = (str,)
integer_types = (int, np.int32)
numeric_types = integer_types + (float, np.float16, np.float32)
# this function is needed for python3
# to convert ctypes.char_p .value back to python str
if sys.platform == "win32":
def _py_str(x):
try:
return x.decode("utf-8")
except UnicodeDecodeError:
encoding = "cp" + str(ctypes.cdll.kernel32.GetACP())
return x.decode(encoding)
py_str = _py_str
else:
py_str = lambda x: x.decode("utf-8")
def _load_lib():
"""Load libary by searching possible path."""
lib_path = libinfo.find_lib_path()
# The dll search path need to be added explicitly in
# windows after python 3.8
if sys.platform.startswith("win32") and sys.version_info >= (3, 8):
for path in libinfo.get_dll_directories():
os.add_dll_directory(path)
lib = ctypes.CDLL(lib_path[0], ctypes.RTLD_GLOBAL)
lib.TVMGetLastError.restype = ctypes.c_char_p
return lib, os.path.basename(lib_path[0])
try:
# The following import is needed for TVM to work with pdb
import readline # pylint: disable=unused-import
except ImportError:
pass
# version number
__version__ = libinfo.__version__
# library instance
_LIB, _LIB_NAME = _load_lib()
# Whether we are runtime only
_RUNTIME_ONLY = "runtime" in _LIB_NAME
# The FFI mode of TVM
_FFI_MODE = os.environ.get("TVM_FFI", "auto")
# ----------------------------
# helper function in ctypes.
# ----------------------------
def c_str(string):
"""Create ctypes char * from a python string
Parameters
----------
string : string type
python string
Returns
-------
str : c_char_p
A char pointer that can be passed to C API
"""
return ctypes.c_char_p(string.encode("utf-8"))
def c_array(ctype, values):
"""Create ctypes array from a python array
Parameters
----------
ctype : ctypes data type
data type of the array we want to convert to
values : tuple or list
data content
Returns
-------
out : ctypes array
Created ctypes array
"""
return (ctype * len(values))(*values)
def decorate(func, fwrapped):
"""A wrapper call of decorator package, differs to call time
Parameters
----------
func : function
The original function
fwrapped : function
The wrapped function
"""
import decorator
return decorator.decorate(func, fwrapped)
# -----------------------------------------
# Base code for structured error handling.
# -----------------------------------------
# Maps error type to its constructor
ERROR_TYPE = {}
class TVMError(RuntimeError):
"""Default error thrown by TVM functions.
TVMError will be raised if you do not give any error type specification,
"""
def register_error(func_name=None, cls=None):
"""Register an error class so it can be recognized by the ffi error handler.
Parameters
----------
func_name : str or function or class
The name of the error function.
cls : function
The function to create the class
Returns
-------
fregister : function
Register function if f is not specified.
Examples
--------
.. code-block:: python
@tvm.error.register_error
class MyError(RuntimeError):
pass
err_inst = tvm.error.create_ffi_error("MyError: xyz")
assert isinstance(err_inst, MyError)
"""
if callable(func_name):
cls = func_name
func_name = cls.__name__
def register(mycls):
"""internal register function"""
err_name = func_name if isinstance(func_name, str) else mycls.__name__
ERROR_TYPE[err_name] = mycls
return mycls
if cls is None:
return register
return register(cls)
def _valid_error_name(name):
"""Check whether name is a valid error name."""
return all(x.isalnum() or x in "_." for x in name)
def _find_error_type(line):
"""Find the error name given the first line of the error message.
Parameters
----------
line : str
The first line of error message.
Returns
-------
name : str The error name
"""
if sys.platform == "win32":
# Stack traces aren't logged on Windows due to a DMLC limitation,
# so we should try to get the underlying error another way.
# DMLC formats errors "[timestamp] file:line: ErrorMessage"
# ErrorMessage is usually formatted "ErrorType: message"
# We can try to extract the error type using the final ":"
end_pos = line.rfind(":")
if end_pos == -1:
return None
start_pos = line.rfind(":", 0, end_pos)
if start_pos == -1:
err_name = line[:end_pos].strip()
else:
err_name = line[start_pos + 1 : end_pos].strip()
if _valid_error_name(err_name):
return err_name
return None
end_pos = line.find(":")
if end_pos == -1:
return None
err_name = line[:end_pos]
if _valid_error_name(err_name):
return err_name
return None
def c2pyerror(err_msg):
"""Translate C API error message to python style.
Parameters
----------
err_msg : str
The error message.
Returns
-------
new_msg : str
Translated message.
err_type : str
Detected error type.
"""
arr = err_msg.split("\n")
if arr[-1] == "":
arr.pop()
err_type = _find_error_type(arr[0])
trace_mode = False
stack_trace = []
message = []
for line in arr:
if trace_mode:
if line.startswith(" ") and len(stack_trace) > 0:
stack_trace[-1] += "\n" + line
elif line.startswith(" "):
stack_trace.append(line)
else:
trace_mode = False
if not trace_mode:
if line.startswith("Stack trace"):
trace_mode = True
else:
message.append(line)
out_msg = ""
if stack_trace:
out_msg += "Traceback (most recent call last):\n"
out_msg += "\n".join(reversed(stack_trace)) + "\n"
out_msg += "\n".join(message)
return out_msg, err_type
def py2cerror(err_msg):
"""Translate python style error message to C style.
Parameters
----------
err_msg : str
The error message.
Returns
-------
new_msg : str
Translated message.
"""
arr = err_msg.split("\n")
if arr[-1] == "":
arr.pop()
trace_mode = False
stack_trace = []
message = []
for line in arr:
if trace_mode:
if line.startswith(" "):
stack_trace.append(line)
else:
trace_mode = False
if not trace_mode:
if line.find("Traceback") != -1:
trace_mode = True
else:
message.append(line)
# Remove the first error name if there are two of them.
# RuntimeError: MyErrorName: message => MyErrorName: message
head_arr = message[0].split(":", 3)
if len(head_arr) >= 3 and _valid_error_name(head_arr[1].strip()):
head_arr[1] = head_arr[1].strip()
message[0] = ":".join(head_arr[1:])
# reverse the stack trace.
out_msg = "\n".join(message)
if stack_trace:
out_msg += "\nStack trace:\n"
out_msg += "\n".join(reversed(stack_trace)) + "\n"
return out_msg
def get_last_ffi_error():
"""Create error object given result of TVMGetLastError.
Returns
-------
err : object
The error object based on the err_msg
"""
c_err_msg = py_str(_LIB.TVMGetLastError())
py_err_msg, err_type = c2pyerror(c_err_msg)
if err_type is not None and err_type.startswith("tvm.error."):
err_type = err_type[10:]
return ERROR_TYPE.get(err_type, TVMError)(py_err_msg)
def check_call(ret):
"""Check the return value of C API call
This function will raise exception when error occurs.
Wrap every API call with this function
Parameters
----------
ret : int
return value from API calls
"""
if ret != 0:
raise get_last_ffi_error()
| https://github.com/zk-ml/tachikoma |
python/tvm/_ffi/libinfo.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Library information."""
import sys
import os
def split_env_var(env_var, split):
"""Splits environment variable string.
Parameters
----------
env_var : str
Name of environment variable.
split : str
String to split env_var on.
Returns
-------
splits : list(string)
If env_var exists, split env_var. Otherwise, empty list.
"""
if os.environ.get(env_var, None):
return [p.strip() for p in os.environ[env_var].split(split)]
return []
def get_dll_directories():
"""Get the possible dll directories"""
# NB: This will either be the source directory (if TVM is run
# inplace) or the install directory (if TVM is installed).
# An installed TVM's curr_path will look something like:
# $PREFIX/lib/python3.6/site-packages/tvm/_ffi
ffi_dir = os.path.dirname(os.path.realpath(os.path.expanduser(__file__)))
source_dir = os.path.join(ffi_dir, "..", "..", "..")
install_lib_dir = os.path.join(ffi_dir, "..", "..", "..", "..")
dll_path = []
if os.environ.get("TVM_LIBRARY_PATH", None):
dll_path.append(os.environ["TVM_LIBRARY_PATH"])
if sys.platform.startswith("linux") or sys.platform.startswith("freebsd"):
dll_path.extend(split_env_var("LD_LIBRARY_PATH", ":"))
dll_path.extend(split_env_var("PATH", ":"))
elif sys.platform.startswith("darwin"):
dll_path.extend(split_env_var("DYLD_LIBRARY_PATH", ":"))
dll_path.extend(split_env_var("PATH", ":"))
elif sys.platform.startswith("win32"):
dll_path.extend(split_env_var("PATH", ";"))
# Pip lib directory
dll_path.append(os.path.join(ffi_dir, ".."))
# Default cmake build directory
dll_path.append(os.path.join(source_dir, "build"))
dll_path.append(os.path.join(source_dir, "build", "Release"))
# Default make build directory
dll_path.append(os.path.join(source_dir, "lib"))
dll_path.append(install_lib_dir)
if os.path.isdir(source_dir):
dll_path.append(os.path.join(source_dir, "web", "dist", "wasm"))
dll_path.append(os.path.join(source_dir, "web", "dist"))
dll_path = [os.path.realpath(x) for x in dll_path]
return [x for x in dll_path if os.path.isdir(x)]
def find_lib_path(name=None, search_path=None, optional=False):
"""Find dynamic library files.
Parameters
----------
name : list of str
List of names to be found.
Returns
-------
lib_path : list(string)
List of all found path to the libraries
"""
use_runtime = os.environ.get("TVM_USE_RUNTIME_LIB", False)
dll_path = get_dll_directories()
if search_path is not None:
if isinstance(search_path, list):
dll_path = dll_path + search_path
else:
dll_path.append(search_path)
if name is not None:
if isinstance(name, list):
lib_dll_path = []
for n in name:
lib_dll_path += [os.path.join(p, n) for p in dll_path]
else:
lib_dll_path = [os.path.join(p, name) for p in dll_path]
runtime_dll_path = []
else:
if sys.platform.startswith("win32"):
lib_dll_names = ["libtvm.dll", "tvm.dll"]
runtime_dll_names = ["libtvm_runtime.dll", "tvm_runtime.dll"]
elif sys.platform.startswith("darwin"):
lib_dll_names = ["libtvm.dylib"]
runtime_dll_names = ["libtvm_runtime.dylib"]
else:
lib_dll_names = ["libtvm.so"]
runtime_dll_names = ["libtvm_runtime.so"]
name = lib_dll_names + runtime_dll_names
lib_dll_path = [os.path.join(p, name) for name in lib_dll_names for p in dll_path]
runtime_dll_path = [os.path.join(p, name) for name in runtime_dll_names for p in dll_path]
if not use_runtime:
# try to find lib_dll_path
lib_found = [p for p in lib_dll_path if os.path.exists(p) and os.path.isfile(p)]
lib_found += [p for p in runtime_dll_path if os.path.exists(p) and os.path.isfile(p)]
else:
# try to find runtime_dll_path
use_runtime = True
lib_found = [p for p in runtime_dll_path if os.path.exists(p) and os.path.isfile(p)]
if not lib_found:
if not optional:
message = (
f"Cannot find libraries: {name}\n"
+ "List of candidates:\n"
+ "\n".join(lib_dll_path + runtime_dll_path)
)
raise RuntimeError(message)
return None
if use_runtime:
sys.stderr.write("Loading runtime library %s... exec only\n" % lib_found[0])
sys.stderr.flush()
return lib_found
def find_include_path(name=None, search_path=None, optional=False):
"""Find header files for C compilation.
Parameters
----------
name : list of str
List of directory names to be searched.
Returns
-------
include_path : list(string)
List of all found paths to header files.
"""
ffi_dir = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
source_dir = os.path.join(ffi_dir, "..", "..", "..")
third_party_dir = os.path.join(source_dir, "3rdparty")
header_path = []
if os.environ.get("TVM_INCLUDE_PATH", None):
header_path.append(os.environ["TVM_INCLUDE_PATH"])
header_path.append(source_dir)
header_path.append(third_party_dir)
header_path = [os.path.abspath(x) for x in header_path]
if search_path is not None:
if isinstance(search_path, list):
header_path = header_path + search_path
else:
header_path.append(search_path)
if name is not None:
if isinstance(name, list):
tvm_include_path = []
for n in name:
tvm_include_path += [os.path.join(p, n) for p in header_path]
else:
tvm_include_path = [os.path.join(p, name) for p in header_path]
dlpack_include_path = []
dmlc_include_path = []
else:
tvm_include_path = [os.path.join(p, "include") for p in header_path]
dlpack_include_path = [os.path.join(p, "dlpack/include") for p in header_path]
dmlc_include_path = [os.path.join(p, "dmlc-core/include") for p in header_path]
# try to find include path
include_found = [p for p in tvm_include_path if os.path.exists(p) and os.path.isdir(p)]
include_found += [p for p in dlpack_include_path if os.path.exists(p) and os.path.isdir(p)]
include_found += [p for p in dmlc_include_path if os.path.exists(p) and os.path.isdir(p)]
if not include_found:
message = (
"Cannot find the files.\n"
+ "List of candidates:\n"
+ str("\n".join(tvm_include_path + dlpack_include_path))
)
if not optional:
raise RuntimeError(message)
return None
return include_found
# current version
# We use the version of the incoming release for code
# that is under development.
# The following line is set by tvm/python/update_version.py
__version__ = "0.11.dev0"
| https://github.com/zk-ml/tachikoma |
python/tvm/_ffi/registry.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-import
"""FFI registry to register function and objects."""
import sys
import ctypes
from .base import _LIB, check_call, py_str, c_str, string_types, _FFI_MODE, _RUNTIME_ONLY
try:
# pylint: disable=wrong-import-position,unused-import
if _FFI_MODE == "ctypes":
raise ImportError()
from ._cy3.core import _register_object, _get_object_type_index
from ._cy3.core import _reg_extension
from ._cy3.core import convert_to_tvm_func, _get_global_func, PackedFuncBase
except (RuntimeError, ImportError) as error:
# pylint: disable=wrong-import-position,unused-import
if _FFI_MODE == "cython":
raise error
from ._ctypes.object import _register_object, _get_object_type_index
from ._ctypes.ndarray import _reg_extension
from ._ctypes.packed_func import convert_to_tvm_func, _get_global_func, PackedFuncBase
def register_object(type_key=None):
"""register object type.
Parameters
----------
type_key : str or cls
The type key of the node
Examples
--------
The following code registers MyObject
using type key "test.MyObject"
.. code-block:: python
@tvm.register_object("test.MyObject")
class MyObject(Object):
pass
"""
object_name = type_key if isinstance(type_key, str) else type_key.__name__
def register(cls):
"""internal register function"""
if hasattr(cls, "_type_index"):
tindex = cls._type_index
else:
tidx = ctypes.c_uint()
if not _RUNTIME_ONLY:
check_call(_LIB.TVMObjectTypeKey2Index(c_str(object_name), ctypes.byref(tidx)))
else:
# directly skip unknown objects during runtime.
ret = _LIB.TVMObjectTypeKey2Index(c_str(object_name), ctypes.byref(tidx))
if ret != 0:
return cls
tindex = tidx.value
_register_object(tindex, cls)
return cls
if isinstance(type_key, str):
return register
return register(type_key)
def get_object_type_index(cls):
"""
Get type index of object type
Parameters
----------
cls : type
The object type to get type index for.
Returns
-------
type_index : Optional[int]
The type index, or None if type not found in the registry.
"""
return _get_object_type_index(cls)
def register_extension(cls, fcreate=None):
"""Register a extension class to TVM.
After the class is registered, the class will be able
to directly pass as Function argument generated by TVM.
Parameters
----------
cls : class
The class object to be registered as extension.
fcreate : function, optional
The creation function to create a class object given handle value.
Note
----
The registered class is requires one property: _tvm_handle.
If the registered class is a subclass of NDArray,
it is required to have a class attribute _array_type_code.
Otherwise, it is required to have a class attribute _tvm_tcode.
- ```_tvm_handle``` returns integer represents the address of the handle.
- ```_tvm_tcode``` or ```_array_type_code``` gives integer represents type
code of the class.
Returns
-------
cls : class
The class being registered.
Example
-------
The following code registers user defined class
MyTensor to be DLTensor compatible.
.. code-block:: python
@tvm.register_extension
class MyTensor(object):
_tvm_tcode = tvm.ArgTypeCode.ARRAY_HANDLE
def __init__(self):
self.handle = _LIB.NewDLTensor()
@property
def _tvm_handle(self):
return self.handle.value
"""
assert hasattr(cls, "_tvm_tcode")
if fcreate:
raise ValueError("Extension with fcreate is no longer supported")
_reg_extension(cls, fcreate)
return cls
def register_func(func_name, f=None, override=False):
"""Register global function
Parameters
----------
func_name : str or function
The function name
f : function, optional
The function to be registered.
override: boolean optional
Whether override existing entry.
Returns
-------
fregister : function
Register function if f is not specified.
Examples
--------
The following code registers my_packed_func as global function.
Note that we simply get it back from global function table to invoke
it from python side. However, we can also invoke the same function
from C++ backend, or in the compiled TVM code.
.. code-block:: python
targs = (10, 10.0, "hello")
@tvm.register_func
def my_packed_func(*args):
assert(tuple(args) == targs)
return 10
# Get it out from global function table
f = tvm.get_global_func("my_packed_func")
assert isinstance(f, tvm.PackedFunc)
y = f(*targs)
assert y == 10
"""
if callable(func_name):
f = func_name
func_name = f.__name__
if not isinstance(func_name, str):
raise ValueError("expect string function name")
ioverride = ctypes.c_int(override)
def register(myf):
"""internal register function"""
if not isinstance(myf, PackedFuncBase):
myf = convert_to_tvm_func(myf)
check_call(_LIB.TVMFuncRegisterGlobal(c_str(func_name), myf.handle, ioverride))
return myf
if f:
return register(f)
return register
def get_global_func(name, allow_missing=False):
"""Get a global function by name
Parameters
----------
name : str
The name of the global function
allow_missing : bool
Whether allow missing function or raise an error.
Returns
-------
func : PackedFunc
The function to be returned, None if function is missing.
"""
return _get_global_func(name, allow_missing)
def list_global_func_names():
"""Get list of global functions registered.
Returns
-------
names : list
List of global functions names.
"""
plist = ctypes.POINTER(ctypes.c_char_p)()
size = ctypes.c_uint()
check_call(_LIB.TVMFuncListGlobalNames(ctypes.byref(size), ctypes.byref(plist)))
fnames = []
for i in range(size.value):
fnames.append(py_str(plist[i]))
return fnames
def extract_ext_funcs(finit):
"""
Extract the extension PackedFuncs from a C module.
Parameters
----------
finit : ctypes function
a ctypes that takes signature of TVMExtensionDeclarer
Returns
-------
fdict : dict of str to Function
The extracted functions
"""
fdict = {}
def _list(name, func):
fdict[name] = func
myf = convert_to_tvm_func(_list)
ret = finit(myf.handle)
_ = myf
if ret != 0:
raise RuntimeError("cannot initialize with %s" % finit)
return fdict
def remove_global_func(name):
"""Remove a global function by name
Parameters
----------
name : str
The name of the global function
"""
check_call(_LIB.TVMFuncRemoveGlobal(c_str(name)))
def _get_api(f):
flocal = f
flocal.is_global = True
return flocal
def _init_api(namespace, target_module_name=None):
"""Initialize api for a given module name
namespace : str
The namespace of the source registry
target_module_name : str
The target module name if different from namespace
"""
target_module_name = target_module_name if target_module_name else namespace
if namespace.startswith("tvm."):
_init_api_prefix(target_module_name, namespace[4:])
else:
_init_api_prefix(target_module_name, namespace)
def _init_api_prefix(module_name, prefix):
module = sys.modules[module_name]
for name in list_global_func_names():
if not name.startswith(prefix):
continue
fname = name[len(prefix) + 1 :]
target_module = module
if fname.find(".") != -1:
continue
f = get_global_func(name)
ff = _get_api(f)
ff.__name__ = fname
ff.__doc__ = "TVM PackedFunc %s. " % fname
setattr(target_module, ff.__name__, ff)
| https://github.com/zk-ml/tachikoma |
python/tvm/_ffi/runtime_ctypes.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Common runtime ctypes."""
# pylint: disable=invalid-name
import ctypes
import json
import numpy as np
from .base import _LIB, check_call
tvm_shape_index_t = ctypes.c_int64
class ArgTypeCode(object):
"""Type code used in API calls"""
INT = 0
UINT = 1
FLOAT = 2
HANDLE = 3
NULL = 4
TVM_TYPE = 5
DLDEVICE = 6
DLTENSOR_HANDLE = 7
OBJECT_HANDLE = 8
MODULE_HANDLE = 9
PACKED_FUNC_HANDLE = 10
STR = 11
BYTES = 12
NDARRAY_HANDLE = 13
OBJECT_RVALUE_REF_ARG = 14
EXT_BEGIN = 15
class TVMByteArray(ctypes.Structure):
"""Temp data structure for byte array."""
_fields_ = [("data", ctypes.POINTER(ctypes.c_byte)), ("size", ctypes.c_size_t)]
class DataTypeCode(object):
"""DataType code in DLTensor."""
INT = 0
UINT = 1
FLOAT = 2
HANDLE = 3
BFLOAT = 4
class DataType(ctypes.Structure):
"""TVM datatype structure"""
_fields_ = [("type_code", ctypes.c_uint8), ("bits", ctypes.c_uint8), ("lanes", ctypes.c_uint16)]
CODE2STR = {
DataTypeCode.INT: "int",
DataTypeCode.UINT: "uint",
DataTypeCode.FLOAT: "float",
DataTypeCode.HANDLE: "handle",
DataTypeCode.BFLOAT: "bfloat",
}
NUMPY2STR = {
np.dtype(np.bool_): "bool",
np.dtype(np.int8): "int8",
np.dtype(np.int16): "int16",
np.dtype(np.int32): "int32",
np.dtype(np.int64): "int64",
np.dtype(np.uint8): "uint8",
np.dtype(np.uint16): "uint16",
np.dtype(np.uint32): "uint32",
np.dtype(np.uint64): "uint64",
np.dtype(np.float16): "float16",
np.dtype(np.float32): "float32",
np.dtype(np.float64): "float64",
np.dtype(np.float_): "float64",
}
STR2DTYPE = {
"bool": {"type_code": DataTypeCode.UINT, "bits": 1, "lanes": 1},
"int8": {"type_code": DataTypeCode.INT, "bits": 8, "lanes": 1},
"int16": {"type_code": DataTypeCode.INT, "bits": 16, "lanes": 1},
"int32": {"type_code": DataTypeCode.INT, "bits": 32, "lanes": 1},
"int64": {"type_code": DataTypeCode.INT, "bits": 64, "lanes": 1},
"uint8": {"type_code": DataTypeCode.UINT, "bits": 8, "lanes": 1},
"uint16": {"type_code": DataTypeCode.UINT, "bits": 16, "lanes": 1},
"uint32": {"type_code": DataTypeCode.UINT, "bits": 32, "lanes": 1},
"uint64": {"type_code": DataTypeCode.UINT, "bits": 64, "lanes": 1},
"float16": {"type_code": DataTypeCode.FLOAT, "bits": 16, "lanes": 1},
"float32": {"type_code": DataTypeCode.FLOAT, "bits": 32, "lanes": 1},
"float64": {"type_code": DataTypeCode.FLOAT, "bits": 64, "lanes": 1},
}
def __init__(self, type_str):
super(DataType, self).__init__()
numpy_str_map = DataType.NUMPY2STR
if type_str in numpy_str_map:
type_str = numpy_str_map[type_str]
elif isinstance(type_str, np.dtype):
type_str = str(type_str)
assert isinstance(type_str, str)
str_dtype_map = DataType.STR2DTYPE
if type_str in str_dtype_map:
dtype_map = str_dtype_map[type_str]
self.bits = dtype_map["bits"]
self.type_code = dtype_map["type_code"]
self.lanes = dtype_map["lanes"]
return
arr = type_str.split("x")
head = arr[0]
self.lanes = int(arr[1]) if len(arr) > 1 else 1
bits = 32
if head.startswith("int"):
self.type_code = DataTypeCode.INT
head = head[3:]
elif head.startswith("uint"):
self.type_code = DataTypeCode.UINT
head = head[4:]
elif head.startswith("float"):
self.type_code = DataTypeCode.FLOAT
head = head[5:]
elif head.startswith("handle"):
self.type_code = DataTypeCode.HANDLE
bits = 64
head = ""
elif head.startswith("bfloat"):
self.type_code = DataTypeCode.BFLOAT
head = head[6:]
elif head.startswith("custom"):
# pylint: disable=import-outside-toplevel
import tvm.runtime._ffi_api
low, high = head.find("["), head.find("]")
if not low or not high or low >= high:
raise ValueError("Badly formatted custom type string %s" % type_str)
type_name = head[low + 1 : high]
self.type_code = tvm.runtime._ffi_api._datatype_get_type_code(type_name)
head = head[high + 1 :]
else:
raise ValueError("Do not know how to handle type %s" % type_str)
bits = int(head) if head else bits
self.bits = bits
def __repr__(self):
# pylint: disable=import-outside-toplevel
if self.bits == 1 and self.lanes == 1:
return "bool"
if self.type_code in DataType.CODE2STR:
type_name = DataType.CODE2STR[self.type_code]
else:
import tvm.runtime._ffi_api
type_name = "custom[%s]" % tvm.runtime._ffi_api._datatype_get_type_name(self.type_code)
x = "%s%d" % (type_name, self.bits)
if self.lanes != 1:
x += "x%d" % self.lanes
return x
def __eq__(self, other):
return (
self.bits == other.bits
and self.type_code == other.type_code
and self.lanes == other.lanes
)
def __ne__(self, other):
return not self.__eq__(other)
RPC_SESS_MASK = 128
class Device(ctypes.Structure):
"""TVM device strucure.
Typically constructed using convenience function
:meth:`tvm.runtime.device`.
Exposes uniform interface to device-specific APIs such as CUDA or
OpenCL. Some properties may return None depending on whether an
API exposes that particular property.
NOTE! The integer values in MASK2STR and STR2MASK *must* correspond
to the values provided by the DLDeviceType and TVMDeviceExtType enums.
"""
kDLCPU = 1
kDLCUDA = 2
kDLCUDAHost = 3
kDLOpenCL = 4
kDLVulkan = 7
kDLMetal = 8
kDLVPI = 9
kDLROCM = 10
kDLROCMHost = 11
kDLExtDev = 12
kDLCUDAManaged = 13
kDLOneAPI = 14
kDLWebGPU = 15
kDLHexagon = 16
kDLAOCL = 32
kDLSDAccel = 33
kOpenGL = 34
kDLMicroDev = 35
_fields_ = [("device_type", ctypes.c_int), ("device_id", ctypes.c_int)]
MASK2STR = {
kDLCPU: "cpu",
kDLCUDA: "cuda",
kDLCUDAHost: "cuda_host",
kDLCUDAManaged: "cuda_managed",
kDLOpenCL: "opencl",
kDLVulkan: "vulkan",
kDLMetal: "metal",
kDLVPI: "vpi",
kDLROCM: "rocm",
kDLROCMHost: "rocm_host",
kDLExtDev: "ext_dev",
kDLOneAPI: "oneapi",
kDLWebGPU: "webgpu",
kDLHexagon: "hexagon",
kDLAOCL: "aocl",
kDLSDAccel: "sdaccel",
kOpenGL: "opengl",
kDLMicroDev: "microdev",
}
STR2MASK = {
"llvm": kDLCPU,
"stackvm": kDLCPU,
"cpu": kDLCPU,
"c": kDLCPU,
"test": kDLCPU,
"hybrid": kDLCPU,
"composite": kDLCPU,
"cuda": kDLCUDA,
"nvptx": kDLCUDA,
"cl": kDLOpenCL,
"opencl": kDLOpenCL,
"sdaccel": kDLOpenCL,
"aocl": kDLAOCL,
"aocl_sw_emu": kDLAOCL,
"vulkan": kDLVulkan,
"metal": kDLMetal,
"vpi": kDLVPI,
"rocm": kDLROCM,
"ext_dev": kDLExtDev,
"hexagon": kDLHexagon,
"webgpu": kDLWebGPU,
}
def __init__(self, device_type, device_id):
super(Device, self).__init__()
self.device_type = int(device_type)
self.device_id = device_id
def _GetDeviceAttr(self, device_type, device_id, attr_id):
"""Internal helper function to invoke runtime.GetDeviceAttr"""
# pylint: disable=import-outside-toplevel
import tvm.runtime._ffi_api
return tvm.runtime._ffi_api.GetDeviceAttr(device_type, device_id, attr_id)
@property
def exist(self):
"""Whether this device exists.
Returns True if TVM has support for the device, if the
physical device is present, and the device is accessible
through appropriate drivers (e.g. cuda/vulkan).
Returns
-------
exist : bool
True if the device exists
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 0) != 0
@property
def max_threads_per_block(self):
"""Maximum number of threads on each block.
Returns device value for cuda, metal, rocm, opencl, and vulkan
devices. Returns remote device value for RPC devices.
Returns None for all other devices.
Returns
-------
max_threads_per_block : int or None
The number of threads on each block
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 1)
@property
def warp_size(self):
"""Number of threads that execute concurrently.
Returns device value for cuda, rocm, and vulkan. Returns
1 for metal and opencl devices, regardless of the physical
device. Returns remote device value for RPC devices. Returns
None for all other devices.
Returns
-------
warp_size : int or None
Number of threads that execute concurrently
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 2)
@property
def max_shared_memory_per_block(self):
"""Total amount of shared memory per block in bytes.
Returns device value for cuda, rocm, opencl, and vulkan.
Returns remote device value for RPC devices. Returns None for
all other devices.
Returns
-------
max_shared_memory_per_block : int or None
Total amount of shared memory per block in bytes
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 3)
@property
def compute_version(self):
"""Get compute version number as string.
Returns maximum API version (e.g. CUDA/OpenCL/Vulkan)
supported by the device.
Returns device value for cuda, rocm, opencl, and
vulkan. Returns remote device value for RPC devices. Returns
None for all other devices.
Returns
-------
version : str or None
The version string in `major.minor` format.
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 4)
@property
def device_name(self):
"""Return the vendor-specific name of device.
Returns device value for cuda, rocm, opencl, and vulkan.
Returns remote device value for RPC devices. Returns None for
all other devices.
Returns
-------
device_name : str or None
The name of the device.
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 5)
@property
def max_clock_rate(self):
"""Return the max clock frequency of device (kHz).
Returns device value for cuda, rocm, and opencl. Returns
remote device value for RPC devices. Returns None for all
other devices.
Returns
-------
max_clock_rate : int or None
The maximum clock frequency of the device (kHz)
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 6)
@property
def multi_processor_count(self):
"""Return the number of compute units in the device.
Returns device value for cuda, rocm, and opencl. Returns
remote device value for RPC devices. Returns None for all
other devices.
Returns
-------
multi_processor_count : int or None
Thee number of compute units in the device
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 7)
@property
def max_thread_dimensions(self):
"""Return the maximum size of each thread axis
Returns device value for cuda, rocm, opencl, and vulkan.
Returns remote device value for RPC devices. Returns None for
all other devices.
Returns
-------
dims: List of int, or None
The maximum length of threadIdx.x, threadIdx.y, threadIdx.z
"""
return json.loads(self._GetDeviceAttr(self.device_type, self.device_id, 8))
@property
def api_version(self):
"""Returns version number of the SDK used to compile TVM.
For example, CUDA_VERSION for cuda or VK_HEADER_VERSION for
Vulkan.
Returns device value for cuda, rocm, opencl, and vulkan.
Returns remote device value for RPC devices. Returns None for
all other devices.
Returns
-------
version : int or None
The version of the SDK
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 11)
@property
def driver_version(self):
"""Returns version number of the driver
Returns driver vendor's internal version number.
(e.g. "450.408.256" for nvidia-driver-450)
Returns device value for opencl and vulkan. Returns remote
device value for RPC devices. Returns None for all other
devices.
Returns
-------
version : str or None
The version string in `major.minor.patch` format.
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 12)
def texture_spatial_limit(self):
"""Returns limits for textures by spatial dimensions
Returns
-------
limit : int or None
Maximum size of the texture by spatial dimensions
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 12)
def create_raw_stream(self):
"""Create a new runtime stream at the context.
User should free the stream after use.
Returns
-------
stream : TVMStreamHandle
The created runtime stream.
"""
stream = ctypes.c_void_p()
check_call(_LIB.TVMStreamCreate(self.device_type, self.device_id, ctypes.byref(stream)))
return stream
def free_raw_stream(self, stream):
"""Free a created stream handle.
Parameters
----------
stream : TVMStreamHandle
The stream which should to be released.
"""
check_call(_LIB.TVMStreamFree(self.device_type, self.device_id, stream))
def set_raw_stream(self, stream):
"""Set a created stream handle.
Parameters
----------
stream : TVMStreamHandle
The stream which should to be set to the device.
"""
check_call(_LIB.TVMSetStream(self.device_type, self.device_id, stream))
def sync(self, stream=None):
"""Synchronize until jobs finished at the context.
Parameters
----------
stream : TVMStreamHandle
Jobs in this stream should be finished.
"""
check_call(_LIB.TVMSynchronize(self.device_type, self.device_id, stream))
def __eq__(self, other):
return (
isinstance(other, Device)
and self.device_id == other.device_id
and self.device_type == other.device_type
)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(str(self))
def __repr__(self):
if self.device_type >= RPC_SESS_MASK:
tbl_id = self.device_type / RPC_SESS_MASK - 1
dev_type = self.device_type % RPC_SESS_MASK
return "remote[%d]:%s(%d)" % (tbl_id, Device.MASK2STR[dev_type], self.device_id)
return "%s(%d)" % (Device.MASK2STR[self.device_type], self.device_id)
class TVMArray(ctypes.Structure):
"""TVMValue in C API"""
_fields_ = [
("data", ctypes.c_void_p),
("device", Device),
("ndim", ctypes.c_int),
("dtype", DataType),
("shape", ctypes.POINTER(tvm_shape_index_t)),
("strides", ctypes.POINTER(tvm_shape_index_t)),
("byte_offset", ctypes.c_uint64),
]
class ObjectRValueRef:
"""Represent an RValue ref to an object that can be moved.
Parameters
----------
obj : tvm.runtime.Object
The object that this value refers to
"""
__slots__ = ["obj"]
def __init__(self, obj):
self.obj = obj
TVMArrayHandle = ctypes.POINTER(TVMArray)
| https://github.com/zk-ml/tachikoma |
python/tvm/arith/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Integer bound analysis, simplification and pattern detection."""
from .int_set import (
IntSet,
IntervalSet,
estimate_region_lower_bound,
estimate_region_strict_bound,
estimate_region_upper_bound,
)
from .analyzer import ModularSet, ConstIntBound, Analyzer
from .bound import deduce_bound
from .pattern import detect_linear_equation, detect_clip_bound
from .int_solver import solve_linear_equations, solve_linear_inequalities
from .iter_affine_map import IterMapExpr, IterMark, IterSplitExpr, IterSumExpr
from .iter_affine_map import (
detect_iter_map,
normalize_iter_map_to_expr,
subspace_divide,
inverse_affine_iter_map,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/arith/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.arith"""
import tvm._ffi
tvm._ffi._init_api("arith", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/arith/analyzer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arithmetic data structure and utility"""
import tvm._ffi
from tvm.runtime import Object
from . import _ffi_api
@tvm._ffi.register_object("arith.ModularSet")
class ModularSet(Object):
"""Represent range of (coeff * x + base) for x in Z"""
def __init__(self, coeff, base):
self.__init_handle_by_constructor__(_ffi_api.ModularSet, coeff, base)
@tvm._ffi.register_object("arith.ConstIntBound")
class ConstIntBound(Object):
"""Represent constant integer bound
Parameters
----------
min_value : int
The minimum value of the bound.
max_value : int
The maximum value of the bound.
"""
POS_INF = (1 << 63) - 1
NEG_INF = -POS_INF
def __init__(self, min_value, max_value):
self.__init_handle_by_constructor__(_ffi_api.ConstIntBound, min_value, max_value)
class ConstraintScope:
"""Constraint scope.
Parameters
----------
fenter : function
A function that will be called to create an enter context.
Note
----
Do not create object directly, use Analyzer.constraint_scope
"""
def __init__(self, fenter):
self._fenter = fenter
self._fexit = None
def __enter__(self):
self._fexit = self._fenter()
def __exit__(self, ptype, value, trace):
self._fexit()
class Analyzer:
"""Integer arithmetic analyzer
This is a stateful analyzer class that can
be used to perform various symbolic integer analysis.
"""
def __init__(self):
_mod = _ffi_api.CreateAnalyzer()
self._const_int_bound = _mod("const_int_bound")
self._const_int_bound_update = _mod("const_int_bound_update")
self._bind = _mod("bind")
self._modular_set = _mod("modular_set")
self._simplify = _mod("Simplify")
self._rewrite_simplify = _mod("rewrite_simplify")
self._canonical_simplify = _mod("canonical_simplify")
self._int_set = _mod("int_set")
self._enter_constraint_context = _mod("enter_constraint_context")
self._can_prove_equal = _mod("can_prove_equal")
def const_int_bound(self, expr):
"""Find constant integer bound for expr.
Parameters
----------
expr : PrimExpr
The expression.
Returns
-------
bound : ConstIntBound
The result bound
"""
return self._const_int_bound(expr)
def modular_set(self, expr):
"""Find a modular set that expr belongs to.
Parameters
----------
expr : PrimExpr
The expression.
Returns
-------
result : ModularSet
The result.
"""
return self._modular_set(expr)
def simplify(self, expr, steps=2):
"""Simplify expression via both rewrite and canonicalization.
Parameters
----------
expr : PrimExpr
The expression.
steps : The simplification runs in the order of
rewrite_simplify (step 1) -> canonical_simplify (step 2) ->
rewrite_simplify (step 3) -> canonical_simplify (step 4) -> ...
param steps controls how many steps to run.
Default is 2, i.e., rewrite_simplify + canonical_simplify.
Returns
-------
result : Expr
The result.
"""
return self._simplify(expr, steps)
def rewrite_simplify(self, expr):
"""Simplify expression via rewriting rules.
Parameters
----------
expr : PrimExpr
The expression.
Returns
-------
result : Expr
The result.
"""
return self._rewrite_simplify(expr)
def canonical_simplify(self, expr):
"""Simplify expression via canonicalization.
Parameters
----------
expr : PrimExpr
The expression.
Returns
-------
result : Expr
The result.
"""
return self._canonical_simplify(expr)
def int_set(self, expr, dom_map):
"""Compute a symbolic IntSet that covers expr for all values in dom_map.
Parameters
----------
expr : PrimExpr
The expression.
dom_map : Dict[Var, tvm.arith.IntSet]
The domain for variables to be relaxed.
Returns
-------
result : IntSet
The result.
"""
return self._int_set(expr, dom_map)
def bind(self, var, expr):
"""Bind a variable to the expression.
Parameters
----------
var : tvm.tir.Var
The variable.
expr : PrimExpr
The expression.
"""
return self._bind(var, expr)
def constraint_scope(self, constraint):
"""Create a constraint scope.
Parameters
----------
constraint : PrimExpr
The constraint expression.
returns
-------
scope : ConstraintScope
The constraint scope
Examples
--------
.. code-block:: python
x = te.var("x")
analyzer = tvm.arith.Analyzer()
with analzyer.constraint_scope(x % 3 == 0):
# constraint in effect
assert analyzer.modular_set(x).coeff == 3
# constraint no longer in effect
assert analyzer.modular_set(x).coeff != 3
"""
def _fenter():
return self._enter_constraint_context(constraint)
return ConstraintScope(_fenter)
def update(self, var, info, override=False):
"""Update infomation about var
Parameters
----------
var : tvm.tir.Var
The variable.
info : tvm.Object
Related information.
override : bool
Whether allow override.
"""
if isinstance(info, ConstIntBound):
self._const_int_bound_update(var, info, override)
else:
raise TypeError("Do not know how to handle type {}".format(type(info)))
def can_prove_equal(self, lhs: "PrimExpr", rhs: "PrimExpr"):
"""Whether we can prove that lhs == rhs
Parameters
----------
lhs: PrimExpr
The left-hand side of the comparison
rhs: PrimExpr
The right-hand side of the comparison
Returns
-------
result: bool
Whether we can prove that lhs == rhs
"""
return self._can_prove_equal(lhs, rhs)
| https://github.com/zk-ml/tachikoma |
python/tvm/arith/bound.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Bound deduction."""
from . import _ffi_api
def deduce_bound(var, cond, hint_map, relax_map):
"""Deduce the bound of the target variable in the cond.
Parameters
----------
var : Var
The target variable to be deduced.
cond : PrimExpr
The condition
hint_map : Map[Var, IntSet]
Domain of variables used to help deduction.
relax_map : Map[Var, IntSet]
The fomain of the variables to be relaxed
using the provided domain.
"""
return _ffi_api.DeduceBound(var, cond, hint_map, relax_map)
| https://github.com/zk-ml/tachikoma |
python/tvm/arith/int_set.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Integer set."""
import tvm._ffi
from tvm.runtime import Object
from . import _ffi_api
class IntSet(Object):
"""Represent a set of integer in one dimension."""
def is_nothing(self):
"""Whether the set represent nothing"""
return _ffi_api.IntSetIsNothing(self)
def is_everything(self):
"""Whether the set represent everything"""
return _ffi_api.IntSetIsEverything(self)
@staticmethod
def vector(vec):
"""Construct an integer set that covers the vector expr
Parameters
----------
vec : PrimExpr
The vector expression.
Returns
-------
rset : IntSet
The result set.
"""
return _ffi_api.intset_vector(vec)
@staticmethod
def single_point(point):
"""Construct a point set.
Parameters
----------
point : PrimExpr
The vector expression.
Returns
-------
rset : IntSet
The result set.
"""
return _ffi_api.intset_single_point(point)
@tvm._ffi.register_object("arith.IntervalSet")
class IntervalSet(IntSet):
"""Represent set of continuous interval [min_value, max_value]
Parameters
----------
min_value : PrimExpr
The minimum value in the interval.
max_value : PrimExpr
The maximum value in the interval.
"""
def __init__(self, min_value, max_value):
self.__init_handle_by_constructor__(_ffi_api.IntervalSet, min_value, max_value)
def estimate_region_lower_bound(region, var_dom, predicate):
"""Analyze the region with affine map, given the domain of variables and their predicate
Some subregion may be discarded during the lower-bound analysis.
Parameters
----------
region : List[Range]
The region to be analyzed.
var_dom : Dict[Var, Range]
The ranges of the variables
predicate : PrimExpr
The predicate for the affine map
Returns
----------
region_int_set : Optional[List[IntSet]]
None if the detection fails, or an array of IntSets as the result of analysis
"""
return _ffi_api.EstimateRegionLowerBound(region, var_dom, predicate)
def estimate_region_strict_bound(region, var_dom, predicate):
"""Analyze the region with affine map, given the domain of variables and their predicate
The result should be strict, i.e. no region is discarded or relaxed.
Parameters
----------
region : List[Range]
The region to be analyzed.
var_dom : Dict[Var, Range]
The ranges of the variables
predicate : PrimExpr
The predicate for the affine map
Returns
----------
region_int_set : Optional[List[IntSet]]
None if the detection fails, or an array of IntSets as the result of analysis
"""
return _ffi_api.EstimateRegionStrictBound(region, var_dom, predicate)
def estimate_region_upper_bound(region, var_dom, predicate):
"""Analyze the region with affine map, given the domain of variables and their predicate
Relaxation of the region may be used in upper-bound analysis,
i.e. some extra region may be added to the result.
Parameters
----------
region : List[Range]
The region to be analyzed.
var_dom : Dict[Var, Range]
The ranges of the variables
predicate : PrimExpr
The predicate for the affine map
Returns
----------
region_int_set : List[IntSet]
an array of IntSets as the result of analysis
"""
return _ffi_api.EstimateRegionUpperBound(region, var_dom, predicate)
def pos_inf():
"""Returns the symbolic positive infinity
Returns
----------
pos_inf : Var
A symbolic var that indicates positive infinity
"""
return _ffi_api.PosInf()
def neg_inf():
"""Returns the symbolic positive infinity
Returns
----------
neg_inf : Var
A symbolic var that indicates positive infinity
"""
return _ffi_api.NegInf()
def union_lower_bound(sets):
"""Create a lower-bound of union set, where some of the segments may be dropped
Parameters
----------
sets : List[IntSet]
The sets to be combined
Returns
----------
union_lower_bound : List[IntSet]
An N-dimensional integer set, the lower bound of the union
"""
return _ffi_api.UnionLowerBound(sets)
| https://github.com/zk-ml/tachikoma |
python/tvm/arith/int_solver.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""integer constraints data structures and solvers"""
import tvm._ffi
from tvm.runtime import Object
from . import _ffi_api
@tvm._ffi.register_object("arith.IntGroupBounds")
class IntGroupBounds(Object):
"""Represent integer grouped bounds which are classified into
lower bounds (include), upper bounds (include) and equalities.
Parameters
----------
coef : tvm.ir.PrimExpr
The coefficient. Must be integer type.
coef * var >= lower
coef * var == equal
coef * var >= upper
lower : List[tvm.ir.PrimExpr]
the lower bounds (include)
equal : List[tvm.ir.PrimExpr]
equalities
upper : List[tvm.ir.PrimExpr]
the upper bounds (include)
"""
def __init__(self, coef, lower, equal, upper):
self.__init_handle_by_constructor__(_ffi_api.IntGroupBounds, coef, lower, equal, upper)
@staticmethod
def from_range(rng):
"""Construct a IntGroupedBounds by Range.
Parameters
----------
rng : tvm.ir.Range
Returns
-------
ret : Range
The constructed range.
"""
return _ffi_api.IntGroupBounds_from_range(rng)
def find_best_range(self):
"""Return the best range from the grouped bounds.
None if (-inf, +inf).
"""
return _ffi_api.IntGroupBounds_FindBestRange(self)
@tvm._ffi.register_object("arith.IntConstraints")
class IntConstraints(Object):
"""Represent a set of integer constraints including variables, their ranges and
the relations between them (either equations or inequalities)
Parameters
----------
variables : List[tvm.tir.Var]
The variables in the constraints. Must be integers
ranges : Map[tvm.tir.Var, tvm.ir.Range]
The ranges of the variables.
relations : List[tvm.ir.PrimExpr]
The relations between the variables (either equations or inequalities)
"""
def __init__(self, variables, ranges, relations):
self.__init_handle_by_constructor__(_ffi_api.IntConstraints, variables, ranges, relations)
@tvm._ffi.register_object("arith.IntConstraintsTransform")
class IntConstraintsTransform(Object):
"""We can have different set of variables to represent the same integer constraints.
For example, the following two constrains are equivalent,
{a + b = 0 | a >= 0, b >= 0} and
{m - n = 0 | m >= 0, n <= 0}
This data structure represents the transformation
between two equivalent integer constraints.
In the above example,
src : {a + b = 0 | a >= 0, b >= 0}
dst : {m - n = 0 | m >= 0, n <= 0}
src_to_dst : {a -> m, b -> -n}
dst_to_src : {m -> a, n -> -b}
Parameters
----------
src : arith.IntConstraints
source integer constraints, e.g., {a + b = 0 | a >= 0, b >= 0}
dst : arith.IntConstraints
integer constraints equivalent to the source, e.g., {m - n = 0 | m >= 0, n <= 0}
src_to_dst : Map[tvm.tir.Var, tvm.ir.PrimExpr]
mapping from variables in the src to the variables in the dst,
e.g., {a -> m, b -> -n}
dst_to_src : Map[tvm.tir.Var, tvm.ir.PrimExpr]
mapping from variables in the dst to the variables in the src,
e.g., {m -> a, n -> -b}
"""
def __init__(self, src, dst, src_to_dst, dst_to_src):
self.__init_handle_by_constructor__(
_ffi_api.IntConstraintsTransform, src, dst, src_to_dst, dst_to_src
)
def solve_linear_equations(equations, variables=None, ranges=None):
"""Solve linear equations.
Parameters
----------
equations: List[tvm.ir.PrimExpr] or IntConstraints
The equations of the variables
variables : Optional[List[tvm.tir.Var]]
The variables in the system.
ranges : Optional[Map[tvm.tir.Var, tvm.ir.Range]]
The ranges of the variables.
Returns
-------
int_constraints_transform : IntConstraintsTransform
New integer constraints, with less variables (if the problem is NOT of full rank),
or no variable (if the problem is of full rank),
or an empty integer constraints (if the problem is unsolvable).
It also provides the ranges of the variables in the new system,
as well as inequalities inferred from the problem.
You can get the mapping from the original variables to the solution via
int_constraints_transform.src_to_dst.
"""
if isinstance(equations, IntConstraints):
return _ffi_api.SolveLinearEquations(equations)
return _ffi_api.SolveLinearEquations(variables, ranges, equations)
def solve_linear_inequalities(equations, variables=None, ranges=None, deskew_range=False):
"""Solve linear inequalities.
Parameters
----------
equations : List[tvm.ir.PrimExpr] or IntConstraints
The inequalities of the variables
variables : Optional[List[tvm.tir.Var]]
The variables in the system.
ranges : Optional[Map[tvm.tir.Var, tvm.ir.Range]]
The ranges of the variables.
deskew_range: Optional[bool]
Whether deskew the result ranges to be started from zero.
Default false.
Returns
-------
ret_ranges: IntConstraints or IntConstraintsTransform
The result ranges for each variables.
Constrains that cannot be transformed to Range will be stored in IntConstraints.relations.
If deskew_range is set (=True), the result ranges will be deskewed to be started from zero.
New variables are created accordingly therefore IntConstraintsTransform is returned.
"""
solver = (
_ffi_api.SolveInequalitiesDeskewRange if deskew_range else _ffi_api.SolveInequalitiesToRange
)
if isinstance(equations, IntConstraints):
assert variables is None
assert ranges is None
return solver(equations)
return solver(variables, ranges, equations)
| https://github.com/zk-ml/tachikoma |
python/tvm/arith/iter_affine_map.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Iterator (quasi)affine mapping patterns."""
from enum import IntEnum
import tvm._ffi
from tvm.runtime import Object
from tvm.ir import PrimExpr
from . import _ffi_api
class IterMapExpr(PrimExpr):
"""Base class of all IterMap expressions."""
@tvm._ffi.register_object("arith.IterMark")
class IterMark(Object):
"""Mark the source as an iterator in [0, extent).
Parameters
----------
source : PrimExpr.
The source expression.
extent : PrimExpr
The extent of the iterator.
"""
def __init__(self, source, extent):
self.__init_handle_by_constructor__(_ffi_api.IterMark, source, extent)
@tvm._ffi.register_object("arith.IterSplitExpr")
class IterSplitExpr(IterMapExpr):
"""Split of an iterator.
result = floormod(floordiv(source, lower_factor), extent) * scale
Parameters
----------
source : IterMark
The source marked iterator.
lower_factor : PrimExpr
The lower factor to split the domain.
extent : PrimExpr
The extent of the split.
scale : PrimExpr
Additional scale to the split.
"""
def __init__(self, source, lower_factor, extent, scale):
self.__init_handle_by_constructor__(
_ffi_api.IterSplitExpr, source, lower_factor, extent, scale
)
@tvm._ffi.register_object("arith.IterSumExpr")
class IterSumExpr(IterMapExpr):
"""Fuse multiple iterators by summing them with scaling.
result = sum(args) + base
Parameters
----------
args : List[IterSplitExpr]
The input to the sum expression.
base : PrimExpr
The base offset.
"""
def __init__(self, args, base):
self.__init_handle_by_constructor__(_ffi_api.IterSumExpr, args, base)
class IterMapLevel(IntEnum):
"""Possible kinds of iter mapping check level."""
Bijective = 0
Surjective = 1
NoCheck = 3
@staticmethod
def from_str(name: str):
"""Helper to create level enum from string"""
if name is None:
return IterMapLevel.NoCheck
name = name.lower()
if name == "bijective":
check_level = IterMapLevel.Bijective
elif name == "surjective":
check_level = IterMapLevel.Surjective
elif name == "nocheck":
check_level = IterMapLevel.NoCheck
else:
raise ValueError(f"Unknown check level {name}")
return check_level
def detect_iter_map(
indices,
input_iters,
predicate=True,
check_level=IterMapLevel.Surjective,
simplify_trivial_iterators=True,
):
"""Detect if indices can be written as mapped iters from input iters
Parameters
----------
indices : List[PrimExpr]
The input indices
input_iters : Map[Var, Range]
The domain of each input iterators.
predicate : PrimExpr
The predicate constraints on the input iterators
check_level : Union[str, IterMapLevel]
Checking level of iteration mapping
simplify_trivial_iterators: bool
If true, iterators with extent of 1 will be replaced with a
constant value.
Returns
-------
results : IterMapResult
The iter map matching result.
The result's .indices is empty array if no match can be found.
"""
if isinstance(check_level, str):
check_level = IterMapLevel.from_str(check_level)
elif check_level is None:
check_level = IterMapLevel.NoCheck
return _ffi_api.DetectIterMap(
indices, input_iters, predicate, check_level, simplify_trivial_iterators
)
def normalize_iter_map_to_expr(expr):
"""Given an IterMapExpr, transform it to normal PrimExpr
Parameters
----------
expr : IterMapExpr
the input IterMapExpr
Returns
-------
result : PrimExpr
the corresponding normal PrimExpr
"""
return _ffi_api.NormalizeIterMapToExpr(expr)
def subspace_divide(
bindings, input_iters, sub_iters, predicate=True, check_level=IterMapLevel.Surjective
):
"""Detect if bindings can be written as
[a_0*e_0 + b_0 + c_0, a_1*e_1 + b_1, ..., a_n*e_n + b_n]
where a = some-quasi-affine-iter-map(input_iters set_minus sub_iters)
b = some-quasi-affine-iter-map(sub_iters)
c is constant symbols
e is the extent of b
For example, z*12 + y*3 + x + c = (z*4+y)*3 + x
bindings = [z*12 + y*3 + x + c]
input_iters = [z, y, x]
sub_iter = [x]
Then the result will be [a, b] where
a = [z*4 + y]
b = [x]
Parameters
----------
bindings : List[PrimExpr]
The input bindings
input_iters : Map[Var, Range]
The domain of input iterator, which is the basis of the whole space
sub_iters : Array[Var]
The subset of input_iters, which is the basis of the subspace
predicate : PrimExpr
The predicate constraints on the input iterators
check_level : Union[str, IterMapLevel]
Checking level of iteration mapping
Returns
-------
results : List[List[PrimExpr]]
The result list has length len(bindings) + 1
[0, len(bindings)): The iter map matching result. The inner list is of length 2.
The first expr is the basis of the quotient space.
The second expr is the basis of the subspace.
len(bindings): the predicate of outer space and inner space
Empty array if no match can be found.
"""
if isinstance(check_level, str):
check_level = IterMapLevel.from_str(check_level)
return _ffi_api.SubspaceDivide(bindings, input_iters, sub_iters, predicate, check_level)
def inverse_affine_iter_map(iter_map, outputs):
"""Apply the inverse of the affine transformation to the outputs.
Similar to the back-propagation, starting from the outputs, it visits the DAG of the expressions
in reverse topology order and applies the inverse of the affine transformation until it reaches
the input. The affine iter map is required to be bijective.
For example, iter_map = [l0 // 16, l0 % 16], outputs = [output_0, output_1],
the affine transformation specified by `iter_map` will be applied to `outputs` and the result
will be {l0: ((output_0*16) + output_1)}.
See also :any:`detect_iter_map`.
Parameters
----------
iter_map : List[IterSumExpr]
The bijective affine iter map.
outputs : List[PrimExpr]
The outputs of the affine transformation.
Returns
-------
results : Map[Var, PrimExpr]
The map from the input to the transformed result.
"""
return _ffi_api.InverseAffineIterMap(iter_map, outputs)
| https://github.com/zk-ml/tachikoma |
python/tvm/arith/pattern.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Detect common patterns."""
from . import _ffi_api
def detect_linear_equation(expr, var_list):
"""Match `expr = sum_{i=0}^{n-1} var[i] * coeff[i] + coeff[n]`
Where coeff[i] and base are invariant of var[j] for all i and j.
Parameters
----------
expr : PrimExpr
The expression to be matched.
var_list : List[tvm.tir.Var]
A list of variables.
Returns
-------
coeff : List[PrimExpr]
A list of co-efficients if the match is successful.
An empty list if the match failed.
"""
return _ffi_api.DetectLinearEquation(expr, var_list)
def detect_clip_bound(expr, var_list):
"""Detect if expression corresponds to clip bound of the vars
Parameters
----------
expr : PrimExpr
The expression to be matched.
var_list : List[tvm.tir.Var]
A list of variables.
Returns
-------
coeff : List[PrimExpr]
`concat([min_value[i], max_value[i]] for i, v in enumerate(var_list))`
An empty list if the match failed.
"""
return _ffi_api.DetectClipBound(expr, var_list)
| https://github.com/zk-ml/tachikoma |
python/tvm/auto_scheduler/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-import, redefined-builtin
""" Namespace for TVM Auto-scheduler. """
from . import (
compute_dag,
dispatcher,
feature,
loop_state,
measure,
measure_record,
relay_integration,
search_policy,
search_task,
task_scheduler,
utils,
workload_registry,
)
# Shortcut
from .compute_dag import (
ComputeDAG,
LayoutRewriteOption,
get_shape_from_rewritten_layout,
)
from .cost_model import RandomModel, XGBModel
from .dispatcher import ApplyHistoryBest, ApplyHistoryBestOrSample, DispatchContext
from .measure import (
LocalBuilder,
LocalRPCMeasureContext,
LocalRunner,
MeasureInput,
MeasureResult,
RPCRunner,
register_task_input_check_func,
)
from .measure_record import (
RecordReader,
RecordToFile,
load_best_record,
load_records,
save_records,
)
from .relay_integration import (
extract_tasks,
is_auto_scheduler_enabled,
remove_index_check,
rewrite_compute_body,
rewrite_tensor_shape,
)
from .search_policy import (
EmptyPolicy,
PreloadCustomSketchRule,
PreloadMeasuredStates,
SketchPolicy,
)
from .search_task import (
HardwareParams,
SearchTask,
TuningOptions,
auto_schedule,
create_task,
)
from .task_scheduler import TaskScheduler
from .workload_registry import make_workload_key, register_workload
| https://github.com/zk-ml/tachikoma |
python/tvm/auto_scheduler/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Register FFI APIs from C++ for the namespace tvm.auto_scheduler. """
import tvm._ffi
tvm._ffi._init_api("auto_scheduler", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/auto_scheduler/compute_dag.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
""" The auto-scheduler's computational graph and related program analyses. """
import hashlib
import json
import tvm._ffi
from tvm.runtime import Object
from tvm.runtime._ffi_node_api import LoadJSON, SaveJSON
from . import _ffi_api
from .loop_state import State, StateObject
from .utils import get_const_tuple
from .workload_registry import workload_key_to_tensors
class LayoutRewriteOption:
"""
Options for applying layout rewrite.
The NO_REWRITE and INSERT_TRANSFORM_STAGE are expected to be used when tuning a standalone op,
and the REWRITE_FOR_PRE_TRANSFORMED is expected to be used when tuning ops inside a network.
"""
# Do not perform layout rewrite
NO_REWRITE = 0
# Insert layout transformation stages for input placeholders in the compute DAG
INSERT_TRANSFORM_STAGE = 1
# Do not insert layout transformation stages and assume the input placeholders
# are pre-transformed.
# Note: The lowered function with this option does not accept the origial input shapes,
# so this option must be used along with `AutoSchedulerLayoutRewrite` pass in Relay.
REWRITE_FOR_PRE_TRANSFORMED = 2
@staticmethod
def get_target_default(target, in_relay_integration=False):
"""Get the default layout rewrite option for the specified target.
Currently we only enable layout rewrite for cpu / mali backend for now
Parameters
----------
target: tvm.target.Target
The compilation target.
in_relay_integration: bool
If this check is ask for relay integration.
Returns
-------
layout_rewrite_option: LayoutRewriteOption
The default layout rewrite option for the specified target.
"""
layout_rewrite_option = LayoutRewriteOption.NO_REWRITE
if target.kind.name == "llvm" or (
"device" in target.attrs and target.attrs["device"] == "mali"
):
layout_rewrite_option = (
LayoutRewriteOption.REWRITE_FOR_PRE_TRANSFORMED
if in_relay_integration
else LayoutRewriteOption.INSERT_TRANSFORM_STAGE
)
return layout_rewrite_option
@tvm._ffi.register_object("auto_scheduler.ComputeDAG")
class ComputeDAG(Object):
"""
The auto-scheduler's computational graph and related program analyses.
We convert a compute declaration described by `tvm.compute` (could be a single operator or a
subgraph) to a ComputeDAG. It keeps the input/output tensors, all operations in the DAG, and
some static analysis results for the DAG (e.g. the total float operation count,
consumer/producer relations of operations, whether an operation stage should
be tiled/compute inlined).
These analyses can help the search policy to make decisions during the search.
ComputeDAG is also responsible for the interaction between auto-scheduler's `LoopState` and
TVM schedule (e.g. applying the `LoopState` transform steps to a TVM schedule, providing
`LoopState` with extra information got from TVM schedule).
Parameters
----------
compute : Union[List[Tensor], str, tvm.te.Schedule]
Input/output tensors or workload key for a compute declaration.
"""
def __init__(self, compute_or_sche):
if isinstance(compute_or_sche, str):
compute = workload_key_to_tensors(compute_or_sche)
sche = None
elif isinstance(compute_or_sche, (list, tvm.ir.container.Array)):
for item in compute_or_sche:
if not isinstance(item, tvm.te.Tensor):
raise ValueError(
"The input of ComputeDAG should be a list of Tensor, but got %s"
% type(item)
)
compute = compute_or_sche
sche = None
elif isinstance(compute_or_sche, tvm.te.Schedule):
compute = None
sche = compute_or_sche
else:
raise ValueError(
"Invalid compute type: %s. ComputeDAG expects string, list of Tensor, or Schedule"
% type(compute_or_sche)
)
self.__init_handle_by_constructor__(_ffi_api.ComputeDAG, compute, sche)
def get_init_state(self):
"""Get the init state of this ComputeDAG.
Returns
-------
state : State
The initial State without any transform steps.
"""
return State(self.init_state, self)
def apply_steps_from_state(self, state, layout_rewrite=LayoutRewriteOption.NO_REWRITE):
"""
Apply the history transform steps from a State to get a TVM schedule.
Parameters
----------
state : Union[State, StateObject]
The state from which we get transform steps.
layout_rewrite: LayoutRewriteOption = NoRewrite
Rewrite the layout of placeholders specified by "layout_free_placeholders" attr
to make it most friendly for the generated schedule to read from.
Returns
-------
A `te.schedule` and the a list of `te.Tensor` to be used in `tvm.lower` or `tvm.build`.
"""
state_obj = state if isinstance(state, StateObject) else state.state_object
return _ffi_api.ComputeDAGApplyStepsFromState(self, state_obj, layout_rewrite)
def print_python_code_from_state(self, state):
"""
Print transform steps in the history of a State as TVM's python schedule code.
This is used to print transformation steps for debugging.
Use `apply_steps_from_state` if you want to get a schedule for code generation.
Parameters
----------
state : Union[State, StateObject]
The state from which we get transform steps.
Returns
-------
str : Str
The Python schedule code.
"""
state_obj = state if isinstance(state, StateObject) else state.state_object
return _ffi_api.ComputeDAGPrintPythonCodeFromState(self, state_obj)
def infer_bound_from_state(self, state):
"""
Infer and fill the bound of all iterators of a state.
The states may lose complete bound information after some transform steps
(e.g., compute_at).
We can call this function to infer and fill all the bound information.
This function calls TVM InferBound pass internally to get the bound.
The returned state of this function is guaranteed to have complete iterator extent
information.
Parameters
----------
state : Union[State, StateObject]
The state from which we get transform steps.
Returns
-------
updated_state : State
The State with complete bound information.
"""
state_obj = state if isinstance(state, StateObject) else state.state_object
updated_state = State(_ffi_api.ComputeDAGInferBoundFromState(self, state_obj), self)
# Copy the stage_id_map from the original state to make sure the old indices are still
# valid
if isinstance(state, State):
for k, v in state.stage_id_map.items():
updated_state.stage_id_map[k] = v
return updated_state
def rewrite_layout_from_state(self, state):
"""
Rewrite the layout of the DAG according to the history transform steps of a state.
Parameters
----------
state : Union[State, StateObject]
The state from which we get transform steps.
Returns
-------
updated_dag : ComputeDAG
The compute dag with rewritten layout.
"""
state_obj = state if isinstance(state, StateObject) else state.state_object
return _ffi_api.ComputeDAGRewriteLayoutFromState(self, state_obj)
def workload_key(self):
"""Return the workload key of this compute DAG.
The workload key is a JSON string from a tuple of (hash of DAG, tensor shapes...)
Returns
-------
key: str
The workload key of this compute DAG
"""
str_dag = _ffi_api.ComputeDAGPrintDAG(self, True)
hash_func = tvm._ffi.get_global_func(
"auto_scheduler.compute_dag.hash_func", allow_missing=True
)
if hash_func is None:
str_dag = str_dag.encode("utf-8")
hash_key = hashlib.md5(str_dag).hexdigest()
else:
hash_key = hash_func(str_dag)
io_shapes = []
for tensor in self.tensors:
io_shapes.append(get_const_tuple(tensor.shape))
return json.dumps([hash_key] + io_shapes)
def __str__(self):
# pretty print
MAX_LINE_WIDTH = 256
raw_lines = super().__str__().split("\n")
lines = []
for line in raw_lines:
if len(line) > MAX_LINE_WIDTH:
line = (
line[: MAX_LINE_WIDTH // 2] + " ..(OMITTED).. " + line[-MAX_LINE_WIDTH // 2 :]
)
lines.append(line)
return "\n".join(lines)
def __getstate__(self):
return {"tensors": SaveJSON(self.tensors)}
def __setstate__(self, state):
# Since we always use tensors to recover the ComputeDAG, we do not support
# (de)serialization of the ComputeDAG constructed by a schedule.
self.__init_handle_by_constructor__(_ffi_api.ComputeDAG, LoadJSON(state["tensors"]), None)
def get_shape_from_rewritten_layout(rewritten_layout, axis_names):
"""Get the orginal shape from a rewritten layout string.
Parameters
----------
rewritten_layout: str
The layout after rewrite
axis_names: List[str]
Specify the order of axes by names
Returns
-------
shape: List[PrimExpr]
The original shape
"""
return _ffi_api.GetShapeFromRewrittenLayout(rewritten_layout, axis_names)
| https://github.com/zk-ml/tachikoma |
python/tvm/auto_scheduler/cost_model/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-import, redefined-builtin
""" Cost model that estimates the performance of programs """
from .cost_model import RandomModel
from .xgb_model import XGBModel
| https://github.com/zk-ml/tachikoma |
python/tvm/auto_scheduler/cost_model/cost_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Cost models that estimate the performance of programs """
import ctypes
import numpy as np
import tvm._ffi
from tvm.runtime import Object
from .. import _ffi_api
@tvm._ffi.register_object("auto_scheduler.CostModel")
class CostModel(Object):
"""The base class for cost model"""
@tvm._ffi.register_object("auto_scheduler.RandomModel")
class RandomModel(CostModel):
"""A model that returns random estimation for all inputs"""
def __init__(self):
self.__init_handle_by_constructor__(_ffi_api.RandomModel)
def update(self, inputs, results):
"""Update the cost model according to new measurement results (training data).
Parameters
----------
inputs : List[auto_scheduler.measure.MeasureInput]
The measurement inputs
results : List[auto_scheduler.measure.MeasureResult]
The measurement results
"""
_ffi_api.CostModelUpdate(self, inputs, results)
def predict(self, search_task, states):
"""Predict the scores of states
Parameters
----------
search_task : SearchTask
The search task of states
states : List[State]
The input states
Returns
-------
scores: List[float]
The predicted scores for all states
"""
return [x.value for x in _ffi_api.CostModelPredict(self, search_task, states)]
@tvm._ffi.register_func("auto_scheduler.cost_model.random_fill_float")
def random_fill_float(size, return_ptr):
"""Fills a c++ float array with random numbers in [0, 1]
Parameters
----------
size: int
The size of the array
return_ptr:
A pointer to a c++ float array
"""
if size == 0:
return
return_ptr = ctypes.cast(return_ptr, ctypes.POINTER(ctypes.c_float))
array_wrapper = np.ctypeslib.as_array(return_ptr, shape=(size,))
array_wrapper[:] = np.random.uniform(0, 1, (size,))
@tvm._ffi.register_object("auto_scheduler.PythonBasedModel")
class PythonBasedModel(CostModel):
"""Base class for cost models implemented in python"""
def __init__(self):
def update_func(inputs, results):
self.update(inputs, results)
def predict_func(task, states, return_ptr):
return_ptr = ctypes.cast(return_ptr, ctypes.POINTER(ctypes.c_float))
array_wrapper = np.ctypeslib.as_array(return_ptr, shape=(len(states),))
array_wrapper[:] = self.predict(task, states)
def predict_stage_func(task, states, return_ptr):
ret = self.predict_stages(task, states)
return_ptr = ctypes.cast(return_ptr, ctypes.POINTER(ctypes.c_float))
array_wrapper = np.ctypeslib.as_array(return_ptr, shape=ret.shape)
array_wrapper[:] = ret
self.__init_handle_by_constructor__(
_ffi_api.PythonBasedModel, update_func, predict_func, predict_stage_func
)
def update(self, inputs, results):
"""Update the cost model according to new measurement results (training data).
Parameters
----------
inputs : List[auto_scheduler.measure.MeasureInput]
The measurement inputs
results : List[auto_scheduler.measure.MeasureResult]
The measurement results
"""
raise NotImplementedError
def predict(self, task, states):
"""Predict the scores of states
Parameters
----------
search_task : SearchTask
The search task of states
states : List[State]
The input states
Returns
-------
scores: List[float]
The predicted scores for all states
"""
raise NotImplementedError
def predict_stages(self, task, states):
"""Predict the scores of all stages in states. This is the breakdown version of `predict`.
Parameters
----------
search_task : SearchTask
The search task of states
states : List[State]
The input states
Returns
-------
scores: List[float]
The predicted scores for all stages in all states in the packed format
Note
----
For faster data copy between c++ and python, the python part returns scores in a
single flatten array using a packed format. The c++ part then unpacks the flatten array.
The packed format is:
{
float scores[N]; // scores[i] is the score for states[i].
int n_stage_0; // the number of stages in states[0]
float stage_scores_0[[n_stage_0] // the scores for all stages in states[0]
int n_stage_1; // the number of stages in states[1]
float stage_scores_1[n_stage_1]; // the scores for all stages in states[1]
...
int n_stage_i; // the number of stages in states[i]
float stage_scores_1[n_stage_i]; // the scores for all stages in states[i]
... // until i == N - 1
}
To implement this format, we also store int as float, so we can store all numbers
into a single float array.
"""
raise NotImplementedError
| https://github.com/zk-ml/tachikoma |
python/tvm/auto_scheduler/cost_model/xgb_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Cost model based on xgboost"""
import multiprocessing
import logging
from collections import defaultdict
import numpy as np
from tvm.autotvm.tuner.metric import max_curve
from .cost_model import PythonBasedModel
from ..feature import get_per_store_features_from_measure_pairs, get_per_store_features_from_states
from ..measure_record import RecordReader
xgb = None
logger = logging.getLogger("auto_scheduler")
class XGBDMatrixContext:
"""A global context to hold additional attributes of xgb.DMatrix"""
def __init__(self):
self.context_dict = defaultdict(dict)
def get(self, key, matrix, default=None):
"""
Get an attribute of a xgb.DMatrix
Parameters
----------
key: str
The name of the attribute
matrix: xgb.DMatrix
The matrix
default: Optional[Any]
The default value if the item does not exist
"""
return self.context_dict[key].get(matrix.handle.value, default)
def set(self, key, matrix, value):
"""
Set an attribute for a xgb.DMatrix
Parameters
----------
key: str
The name of the attribute
matrix: xgb.DMatrix
The matrix
value: Optional[Any]
The new value
"""
self.context_dict[key][matrix.handle.value] = value
dmatrix_context = XGBDMatrixContext()
class XGBModel(PythonBasedModel):
"""Train a XGBoost model to predict the normalized throughputs of programs.
Let the normalized throughput be the score of a program (higher is better). We predict
the (approximate) score of a program = the sum of the scores of all stages in this program.
i.e. score(P) = score_s0 + score_s1 + ... + score_sn,
where score_si is the score of Stage i in Program P.
We extract feature for each stage and let the xgboost predict the score for each stage.
We then sum up the predictions as the score of the whole program.
We use RMSE as the loss function. i.e. loss(P, y) = 1/2 * (score(P) - y)^2,
where P is the program and y is the normalized throughput according to
the ground truth (measurement).
XGBoost does not support this loss function because `score(P)` is a sum of the prediction
of several samples, so we implemented a custom loss function and call it pack-sum-rmse.
It is called "pack-sum" because we combine several samples into a "pack" and sum up
their predictions.
Parameters
----------
verbose_eval: int = 25
Print training log every `verbose_eval` iterations.
num_warmup_sample: int = 100
The minimum number of samples to start to use the trained model.
If the number of samples is less than this number, the model outputs random predictions.
seed: Optional[int]
The random seed
model_file: Optional[str]
If is not None, save model to this file after every update.
adaptive_training: bool = False
Whether to use adaptive training, which reduces the training frequency when there are
too many logs.
"""
def __init__(
self,
verbose_eval=25,
num_warmup_sample=100,
seed=None,
model_file=None,
adaptive_training=False,
):
global xgb
try:
if xgb is None:
xgb = __import__("xgboost")
except ImportError:
# add "from Node" to silence
# "During handling of the above exception, another exception occurred"
raise ImportError(
"XGBoost is required for XGBModel. "
"Please install its python package first. "
"Help: (https://xgboost.readthedocs.io/en/latest/) "
) from None
self.xgb_params = {
"max_depth": 10,
"gamma": 0.001,
"min_child_weight": 0,
"eta": 0.2,
# todo(merrymercy): automatically decrease learning rate when the loss is too large
"n_gpus": 0,
"nthread": multiprocessing.cpu_count() // 2,
"verbosity": 0,
"seed": seed or 43,
"disable_default_eval_metric": 1,
}
self.bst = None
self.plan_size = 32
self.num_warmup_sample = num_warmup_sample
self.verbose_eval = verbose_eval
self.model_file = model_file
self.adaptive_training = adaptive_training
super().__init__()
# cache measurement input/result pairs and extracted features
self.inputs = []
self.results = []
self.last_train_length = 0
self.inputs_feature_cache = []
def update(self, inputs, results):
"""Update the cost model according to new measurement results (training data).
XGBoost does not support incremental training, so we re-train a new model every time.
Parameters
----------
inputs : List[MeasureInput]
The measurement inputs
results : List[MeasureResult]
The measurement results
"""
if len(inputs) <= 0:
return
assert len(inputs) == len(results)
self.inputs.extend(inputs)
self.results.extend(results)
if (
self.adaptive_training
and len(self.inputs) - self.last_train_length < self.last_train_length / 5
):
# Set a training threshold related to `last_train_length` to reduce the training
# overhead when there're too many logs
return
self.last_train_length = len(self.inputs)
# extract feature
n_cached = len(self.inputs_feature_cache)
features, normalized_throughputs, task_ids = get_per_store_features_from_measure_pairs(
self.inputs, self.results, skip_first_n_feature_extraction=n_cached
)
if n_cached > 0:
features = list(features)
features[:n_cached] = self.inputs_feature_cache
features = np.array(features, dtype=object)
self.inputs_feature_cache = features
dtrain = pack_sum_xgbmatrix(
features, normalized_throughputs, task_ids, normalized_throughputs
)
# train xgb model
self.bst = xgb.train(
self.xgb_params,
dtrain,
num_boost_round=10000,
obj=pack_sum_square_error,
callbacks=[
custom_callback(
stopping_rounds=50,
metric="tr-p-rmse",
fevals=[
pack_sum_rmse,
pack_sum_average_peak_score(self.plan_size),
],
evals=[(dtrain, "tr")],
maximize=False,
verbose_eval=self.verbose_eval,
)
],
)
# Update the model file if it has been set
if self.model_file:
self.save(self.model_file)
def predict(self, task, states):
"""Predict the scores of states
Parameters
----------
search_task : SearchTask
The search task of states
statse : List[State]
The input states
Returns
-------
scores: List[float]
The predicted scores for all states
"""
features = get_per_store_features_from_states(states, task)
if self.bst is not None and len(self.inputs) > self.num_warmup_sample:
dtest, pack_ids = feature_to_pack_sum_xgbmatrix(features)
raw_preds = self.bst.predict(dtest)
ret = predict_throughput_pack_sum(raw_preds, pack_ids)
else:
ret = np.random.uniform(0, 1, (len(states),))
# Predict -inf for invalid states that failed to be lowered.
for idx, feature in enumerate(features):
if feature.min() == feature.max() == 0:
ret[idx] = float("-inf")
return ret
def predict_stages(self, task, states):
"""Predict the scores of all stages in states. This is the breakdown version of `predict`.
Parameters
----------
search_task : SearchTask
The search task of states
statse : List[State]
The input states
Returns
-------
scores: List[float]
The predicted scores for all stages in all states in the packed format
Note
----
For faster data copy between c++ and python, the python part returns scores in a
single flatten array using a packed format. The c++ part then unpacks the flatten array.
The packed format is:
{
float scores[N]; // scores[i] is the score for states[i].
int n_stage_0; // the number of stages in states[0]
float stage_scores_0[[n_stage_0] // the scores for all stages in states[0]
int n_stage_1; // the number of stages in states[1]
float stage_scores_1[n_stage_1]; // the scores for all stages in states[1]
...
int n_stage_i; // the number of stages in states[i]
float stage_scores_1[n_stage_i]; // the scores for all stages in states[i]
... // untill i == N - 1
}
To implement this format, we also store int as float, so we can store all numbers
into a single float array.
"""
features = get_per_store_features_from_states(states, task)
if self.bst is not None and len(self.inputs) > self.num_warmup_sample:
dtest, pack_ids = feature_to_pack_sum_xgbmatrix(features)
raw_preds = self.bst.predict(dtest)
breakdown = predict_throughput_pack_sum(raw_preds, pack_ids)
stage_scores = [[] for _ in range(len(states))]
for pred, pack_id in zip(raw_preds, pack_ids):
stage_scores[pack_id].append(pred)
for idx, stage_score in enumerate(stage_scores):
breakdown = np.append(breakdown, len(stage_score))
breakdown = np.concatenate((breakdown, np.array(stage_score)))
else:
breakdown = np.concatenate(
(
np.random.uniform(0, 1, (len(states),)),
np.zeros(
len(states),
),
)
)
# Predict 0 for invalid states that failed to be lowered.
for idx, feature in enumerate(features):
if feature.min() == feature.max() == 0:
breakdown[idx] = float("-inf")
return breakdown
def update_from_file(self, file_name, n_lines=None):
"""Load measure records from a log file to update the cost model.
This function can be used to pre-train the cost model with history log files.
Parameters
----------
file_name: str
The filename
n_lines: Optional[int]
Only load first n lines of the log file
"""
inputs, results = RecordReader(file_name).read_lines(n_lines)
logger.info("XGBModel: Loaded %s measurement records from %s", len(inputs), file_name)
self.update(inputs, results)
def save(self, file_name: str):
"""Save the model to a file
Parameters
----------
file_name: str
The filename
"""
self.bst.save_model(file_name)
def load(self, file_name: str):
"""Load the model from a file
Parameters
----------
file_name: str
The filename
"""
if self.bst is None:
self.bst = xgb.Booster(self.xgb_params)
self.bst.load_model(file_name)
self.num_warmup_sample = -1
def feature_to_pack_sum_xgbmatrix(xs):
"""Convert an extracted multi-stage feature vector to a xgbmatrx in pack-sum format
Parameters
----------
xs: np.ndarray
The feature vector
Returns
-------
dmatrix: xgb.DMatrix
The DMatrix
pack_ids: List[int]
pack ids information
"""
x_flatten = []
pack_ids = []
for ct, x in enumerate(xs):
for row in x:
x_flatten.append(row)
pack_ids.append(ct)
return xgb.DMatrix(np.array(x_flatten)), pack_ids
def pack_sum_xgbmatrix(xs, ys, gids=None, weights=None):
"""Convert (feature, label) pairs into a xgb matrix with pack-sum format
Parameters
----------
xs: np.ndarray
The feature vector
ys: np.ndarray
The normaizlied throughput
gids: Optional[List[int]]
Group id (task id)
weights: Optional[np.ndarray]
The weight of samples
Returns
-------
dmatrix: xgb.DMatrix
The DMatrix with pack-sum information
"""
if gids is not None:
# sort by group
indices = gids.argsort()
xs, ys = xs[indices], ys[indices]
group_sizes = np.bincount(gids)
if weights is not None:
weights = weights[indices]
else:
# assume it has only one group
group_sizes = [len(xs)]
x_flatten = []
y_flatten = []
weights_flatten = []
pack_ids = []
if weights is not None:
for ct, (x, y, w) in enumerate(zip(xs, ys, weights)):
for row in x:
x_flatten.append(row)
y_flatten.append(y)
weights_flatten.append(w)
pack_ids.append(ct)
else:
for ct, (x, y) in enumerate(zip(xs, ys)):
for row in x:
x_flatten.append(row)
y_flatten.append(y)
pack_ids.append(ct)
ret = xgb.DMatrix(np.array(x_flatten), y_flatten)
if weights is not None:
ret.set_weight(weights_flatten)
dmatrix_context.set("pack_ids", ret, np.array(pack_ids))
dmatrix_context.set("group_sizes", ret, group_sizes)
return ret
def predict_throughput_pack_sum(raw_preds, pack_ids):
"""Predict the throughputs for predictions in pack-sum format
Parameters
----------
raw_preds: np.ndarray
The raw predictions
pack_ids: List[int]
The pack id for predictions
Returns
-------
throughputs: np.ndarray
The throughput
"""
sum_pred = np.bincount(pack_ids, weights=raw_preds)
return sum_pred
def pack_sum_square_error(preds, dtrain):
"""Implement square error loss on pack-sum format as
a custom objective function for xgboost.
Parameters
----------
preds: np.ndarray
The predicitons
dtrain: xgb.DMatrix
The training set
Returns
-------
gradient: np.ndarray
hessian: np.ndarray
gradient and hessian according to the xgboost format
"""
pack_ids = dmatrix_context.get("pack_ids", dtrain)
weight = dtrain.get_weight()
sum_pred = np.bincount(pack_ids, weights=preds)
x = sum_pred[pack_ids]
y = dtrain.get_label()
gradient = x - y
hessian = np.ones_like(gradient)
if len(weight) == 0:
return gradient, hessian
return gradient * weight, hessian * weight
def pack_sum_rmse(raw_preds, labels):
"""Evaluate RMSE (rooted mean square error) in the pack-sum format
Parameters
----------
raw_preds: np.ndarray
The raw prediction
labels: xgb.DMatrix
The groud-truth label matrix
Returns
-------
name: str
score: float
The name and score of this metric
"""
pack_ids = dmatrix_context.get("pack_ids", labels)
preds = predict_throughput_pack_sum(raw_preds, pack_ids)[pack_ids]
return "p-rmse", np.sqrt(np.mean(np.square((preds - labels.get_label()))))
def pack_sum_average_peak_score(N):
"""Return the evaluation function for average-peak-score@N
Parameters
----------
N: int
The "N" in "average-peak-score@N"
Returns
-------
The evaluation function
"""
def feval(preds, labels):
"""Evaluate average-peak-score@N in the pack-sum format
Parameters
----------
raw_preds: np.ndarray
The raw prediction
labels: xgb.DMatrix
The groud-truth label matrix
Returns
-------
name: str
score: float
The name and score of this metric
"""
group_sizes = dmatrix_context.get("group_sizes", labels, [len(preds)])
pack_ids = dmatrix_context.get("pack_ids", labels)
preds = predict_throughput_pack_sum(preds, pack_ids)
labels = (
np.bincount(pack_ids, weights=labels.get_label())
/ np.unique(pack_ids, return_counts=True)[1]
)
scores = []
offset = 0
for size in group_sizes:
preds_group = preds[offset : offset + size]
labels_group = labels[offset : offset + size]
offset += size
trials = np.argsort(preds_group)[::-1][:N]
trial_scores = labels_group[trials]
curve = max_curve(trial_scores) / np.max(labels_group)
scores.append(np.mean(curve))
return "a-peak@%d" % N, np.mean(scores)
return feval
def custom_callback(
stopping_rounds,
metric,
fevals,
evals=(),
log_file=None,
maximize=False,
verbose_eval=True,
skip_every=2,
):
"""Callback function for xgboost to support multiple custom evaluation functions"""
# pylint: disable=import-outside-toplevel
from xgboost.core import EarlyStopException
from xgboost.callback import _fmt_metric
try:
from xgboost.training import aggcv
except ImportError:
from xgboost.callback import _aggcv as aggcv
state = {}
metric_shortname = metric.split("-")[1]
def init(env):
"""internal function"""
bst = env.model
state["maximize_score"] = maximize
state["best_iteration"] = 0
if maximize:
state["best_score"] = float("-inf")
else:
state["best_score"] = float("inf")
if bst is not None:
if bst.attr("best_score") is not None:
state["best_score"] = float(bst.attr("best_score"))
state["best_iteration"] = int(bst.attr("best_iteration"))
state["best_msg"] = bst.attr("best_msg")
else:
bst.set_attr(best_iteration=str(state["best_iteration"]))
bst.set_attr(best_score=str(state["best_score"]))
else:
assert env.cvfolds is not None
def callback(env):
"""internal function"""
if not state:
init(env)
bst = env.model
i = env.iteration
cvfolds = env.cvfolds
res_dict = {}
if i % skip_every == 1:
return
##### evaluation #####
if cvfolds is not None:
for feval in fevals:
tmp = aggcv([f.eval(i, feval) for f in cvfolds])
for k, mean, std in tmp:
res_dict[k] = [mean, std]
else:
for feval in fevals:
bst_eval = bst.eval_set(evals, i, feval)
res = [x.split(":") for x in bst_eval.split()]
for kv in res[1:]:
res_dict[kv[0]] = [float(kv[1])]
eval_res = []
keys = list(res_dict.keys())
keys.sort(key=lambda x: x if metric_shortname not in x else "a" + x)
for key in keys:
v = res_dict[key]
eval_res.append([key] + v)
##### print eval result #####
if not isinstance(verbose_eval, bool) and verbose_eval and i % verbose_eval == 0:
infos = ["XGB iter: %3d" % i]
for item in eval_res:
if "null" in item[0]:
continue
infos.append("%s: %.6f" % (item[0], item[1]))
logger.debug("\t".join(infos))
if log_file:
with open(log_file, "a") as fout:
fout.write("\t".join(infos) + "\n")
##### choose score and do early stopping #####
score = None
for item in eval_res:
if item[0] == metric:
score = item[1]
break
assert score is not None
best_score = state["best_score"]
best_iteration = state["best_iteration"]
maximize_score = state["maximize_score"]
if (maximize_score and score > best_score) or (not maximize_score and score < best_score):
msg = "[%d] %s" % (env.iteration, "\t".join([_fmt_metric(x) for x in eval_res]))
state["best_msg"] = msg
state["best_score"] = score
state["best_iteration"] = env.iteration
# save the property to attributes, so they will occur in checkpoint.
if env.model is not None:
env.model.set_attr(
best_score=str(state["best_score"]),
best_iteration=str(state["best_iteration"]),
best_msg=state["best_msg"],
)
elif env.iteration - best_iteration >= stopping_rounds:
best_msg = state["best_msg"]
if verbose_eval and env.rank == 0:
logger.debug("XGB stopped. Best iteration: %s ", best_msg)
raise EarlyStopException(best_iteration)
return callback
| https://github.com/zk-ml/tachikoma |
python/tvm/auto_scheduler/dispatcher.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The global context that dispatches best schedules to workloads.
In auto-scheduler, a state (loop_state.py::StateObject) saves the
schedule configuration by its transform_steps, so a state is used
as a schedule configuration here.
"""
# pylint: disable=invalid-name
import logging
import pathlib
from collections.abc import Iterable
import numpy as np
from tvm.contrib.utils import tempdir
from tvm.tir.expr import FloatImm
from .cost_model import RandomModel, XGBModel
from .measure import LocalRPCMeasureContext
from .measure_record import RecordToFile, load_records
from .search_policy import PreloadMeasuredStates, SketchPolicy
from .search_task import SearchTask, TuningOptions
from .utils import calc_workload_dis_factor, decode_workload_key
logger = logging.getLogger("auto_scheduler")
class DispatchContext(object):
"""
Base class of dispatch context.
"""
current = None
def __init__(self):
self._old_ctx = DispatchContext.current
def query(self, target, workload_key, has_complex_op, dag, func_name):
"""
Query the context to get the specific config for a workload.
If this function cannot find the result inside this context, it will query the result
from the upper contexts.
Parameters
----------
target: Target
The current target
workload_key : str
The workload key
has_complex_op: bool
Whether this workload has at least one complex op.
dag: ComputeDAG
The ComputeDAG of the workload.
func_name: str
The function name of this workload.
Returns
-------
state : StateObject
The state that stores schedule configuration for the workload
"""
ret = self._query_inside(target, workload_key, func_name)
if ret is None:
ret = self._old_ctx.query(target, workload_key, has_complex_op, dag, func_name)
return ret
def update(self, target, workload_key, state):
"""
Update the config for a workload
Parameters
----------
target: Target
The current target
workload_key : str
The current workload_key.
state : StateObject
The state that stores schedule configuration for the workload
"""
raise NotImplementedError()
def _query_inside(self, target, workload_key, func_name):
"""
Query the context to get the specific config for a workload.
This function only query config inside this context.
Parameters
----------
target: Target
The current target
workload_key : str
The current workload_key.
func_name: str
The function name of this workload.
Returns
-------
state : StateObject
The schedule configuration for the workload
"""
raise NotImplementedError()
def __enter__(self):
self._old_ctx = DispatchContext.current
DispatchContext.current = self
return self
def __exit__(self, ptype, value, trace):
DispatchContext.current = self._old_ctx
class ApplyHistoryBest(DispatchContext):
"""
Apply the history best config
Parameters
----------
records : str, list of str, or iterator of (auto_scheduler.measure.MeasureInput,\
auto_scheduler.measure.MeasureResult)
Collection of tuning records.
If is str, then it should be the filename of a records log file.
Each row of this file is an encoded record pair. If it is an iterator,
it can either be a set of str filenames which will be applied jointly,
or a set of (input, result) tuples.
n_lines: Optional[int]
if it is not None, only load the first `n_lines` lines of log.
include_compatible: bool
When set to True, compatible records will also be considered.
"""
def __init__(self, records, n_lines=None, include_compatible=False):
super(ApplyHistoryBest, self).__init__()
self.include_compatible = include_compatible
# Dict[str (target key),
# Dict[str (workload hash),
# Dict[tuple (workload args), tuple (State, cost)]]]
self.best_by_targetkey = {}
self.best_by_model = {}
self._best_user_defined = {}
self.load(records, n_lines)
@staticmethod
def get_workload_entry(best_records, target_key, workload_key):
"""Get the entry of the target key and workload key hash in the given best record map.
Parameters
----------
best_records: Dict[str, Dict[str, Dict[str, Any]]]
The best record map.
target_key: str
The first key to the best_records.
workload_key: str
The workload key that can be decoded to workload hash and args.
Returns
-------
entry: Dict[str, Any]
The entry in best_records with target key and workload hash.
workload_hash: str
The workload hash decoded from workload_key.
workload_args: Tuple[Any, ...]
The hashable tuple of workload args decoded from workload_key.
"""
workload_hash, workload_args = decode_workload_key(workload_key)
if target_key not in best_records:
best_records[target_key] = {}
if workload_hash not in best_records[target_key]:
best_records[target_key][workload_hash] = {}
return best_records[target_key][workload_hash], workload_hash, workload_args
def load(self, records, n_lines=None):
"""Load records to this dispatch context
Parameters
----------
records : str or iterator of (auto_scheduler.measure.MeasureInput,\
auto_scheduler.measure.MeasureResult)
Collection of tuning records.
If is str, then it should be the filename of a records log file.
Each row of this file is an encoded record pair. Otherwise, it is an iterator.
n_lines: Optional[int]
if it is not None, only load the first `n_lines` lines of log
"""
joint_records = []
if not isinstance(records, Iterable) or isinstance(records, str):
records = [records]
for rec in records:
if isinstance(rec, pathlib.Path):
rec = str(rec)
if isinstance(rec, str):
rec = load_records(rec)
joint_records += rec
else:
if rec is not None:
joint_records.append(rec)
if not joint_records:
return
best_by_targetkey = self.best_by_targetkey
best_by_model = self.best_by_model
counter = 0
for inp, res in joint_records:
if n_lines is not None and counter >= n_lines:
break
counter += 1
if res.error_no != 0:
continue
costs = [x.value for x in res.costs if isinstance(x, FloatImm)]
cost = np.mean(costs)
# use target keys in tvm target system as key to build best map
for k in inp.task.target.keys:
entry, _, workload_args = self.get_workload_entry(
best_by_targetkey, k, inp.task.workload_key
)
if workload_args not in entry:
entry[workload_args] = (inp.state, cost)
else:
_, other_cost = entry[workload_args]
if other_cost > cost:
entry[workload_args] = (inp.state, cost)
# use model as key to build best map
entry, _, workload_args = self.get_workload_entry(
best_by_model, inp.task.target.model, inp.task.workload_key
)
if workload_args not in entry:
if inp.task.target.model != "unknown":
entry[workload_args] = (inp.state, cost)
else:
_, other_cost = entry[workload_args]
if other_cost > cost:
entry[workload_args] = (inp.state, cost)
logger.debug("Finish loading %d records", counter)
def _query_inside(self, target, workload_key, func_name):
if target is None:
raise RuntimeError(
"Need a target context to find the history best. "
"Hint: If your target is llvm, use `with tvm.target.create('llvm'):`"
" above the dispatcher call. So does other target. "
)
def match_record(best_records, target_key, workload_key):
"""The helper function to match the record in the given map
and return the matched state, or None if no match.
"""
ret = None
entry, workload_hash, workload_args = self.get_workload_entry(
best_records, target_key, workload_key
)
if workload_args in entry:
ret = entry[workload_args][0]
elif self.include_compatible:
best_cost = float("inf")
for args, val in entry.items():
dis_f = calc_workload_dis_factor(
(workload_hash, workload_args), (workload_hash, args)
)
if dis_f == float("inf"):
continue
state, cost = val
cost *= dis_f
if ret is None or cost < best_cost:
best_cost = cost
ret = state
return ret
# first try matching by model
ret = match_record(self._best_user_defined, target.model, workload_key)
if ret is not None:
return ret
ret = match_record(self.best_by_model, target.model, workload_key)
if ret is not None:
return ret
# then try matching by target key
for k in target.keys:
ret = match_record(self._best_user_defined, k, workload_key)
if ret is not None:
return ret
ret = match_record(self.best_by_targetkey, k, workload_key)
if ret is not None:
return ret
return None
def update(self, target, workload_key, state):
entry, _, workload_args = self.get_workload_entry(
self._best_user_defined, target.model, workload_key
)
entry[workload_args] = (state, 1)
for k in target.keys:
entry, _, _ = self.get_workload_entry(self._best_user_defined, k, workload_key)
entry[workload_args] = (state, 1)
class ApplyHistoryBestOrSample(ApplyHistoryBest):
"""
Apply the history best config, or sample a valid schedule if no config is found.
Parameters
----------
records : str or iterator of (auto_scheduler.measure.MeasureInput,\
auto_scheduler.measure.MeasureResult)
Collection of tuning records.
If is str, then it should be the filename of a records log file.
Each row of this file is an encoded record pair. Otherwise, it is an iterator.
sample_simple_workloads: bool
When False, sampling will not apply to simple workloads (w/o reduction).
cost_model_file: str
The filename of the pre-trained XGBoost cost model. If not present, then random
model will be used.
num_measure: int
Meausre the top-N rank of sampled schedules on the device. The default -1 means
no measurement and simply return the top-1 schedule ranked by the cost model.
"""
def __init__(
self, records, sample_simple_workloads=False, cost_model_file=None, num_measure=-1
):
self.sample_simple_workloads = sample_simple_workloads
self.num_measure = num_measure
self.log_dir = tempdir()
if cost_model_file is None:
self.cost_model = RandomModel()
else:
self.cost_model = XGBModel()
self.cost_model.load(cost_model_file)
super(ApplyHistoryBestOrSample, self).__init__(
records, n_lines=None, include_compatible=True
)
def query(self, target, workload_key, has_complex_op, dag, func_name):
if has_complex_op or self.sample_simple_workloads:
ret = self._query_inside(target, workload_key, func_name)
else:
ret = super(ApplyHistoryBestOrSample, self)._query_inside(
target, workload_key, func_name
)
if ret is None:
ret = self._old_ctx.query(target, workload_key, has_complex_op, dag, func_name)
return ret
def _query_inside(self, target, workload_key, func_name):
ret = super(ApplyHistoryBestOrSample, self)._query_inside(target, workload_key, func_name)
if ret is not None:
return ret
# Sampling valid schedules when no existing records can be used.
task = SearchTask(workload_key=workload_key, target=target)
measure_ctx = LocalRPCMeasureContext(min_repeat_ms=300)
log_file = self.log_dir.relpath("%s.log" % decode_workload_key(workload_key)[0])
while ret is None:
tune_option = TuningOptions(
num_measure_trials=self.num_measure,
runner=measure_ctx.runner,
measure_callbacks=[RecordToFile(log_file)],
verbose=0,
)
search_policy = SketchPolicy(
task,
self.cost_model,
params={
"eps_greedy": 0.01,
"sample_init_min_population": 64,
"evolutionary_search_num_iters": 0,
},
init_search_callbacks=[PreloadMeasuredStates(log_file)],
verbose=0,
)
task.tune(tune_option, search_policy)
# Load the sampled records and query again.
self.load(log_file)
ret = super(ApplyHistoryBestOrSample, self)._query_inside(
target, workload_key, func_name
)
del measure_ctx
return ret
class FallbackContext(DispatchContext):
"""
A fallback dispatch context.
This is used as the root context.
"""
def __init__(self):
super(FallbackContext, self).__init__()
self.memory = {}
# Verbose level:
# 0: Completely silent.
# 1: Warning the missing configs for querying complex tasks.
# 2: Warning the missing configs for querying all tasks.
self.verbose = 1
# a set to prevent print duplicated message
self.messages = set()
def query(self, target, workload_key, has_complex_op, dag, func_name):
key = (str(target), workload_key)
if key in self.memory:
return self.memory[key]
if self.verbose == 2 or (has_complex_op and self.verbose == 1):
msg = (
f"-----------------------------------\n"
f"{func_name}\n"
f"Cannot find tuned schedules for target={target}, workload_key={workload_key}. "
f"A fallback TOPI schedule is used, "
f"which may bring great performance regression or even compilation failure. "
f"Compute DAG info:\n{dag}"
)
if msg not in self.messages:
self.messages.add(msg)
logger.warning(msg)
state = None
# cache this config to avoid duplicated warning message
self.memory[key] = state
return state
def _query_inside(self, target, workload_key, func_name):
_ = target = workload_key = func_name
raise RuntimeError("This function should never be called")
def update(self, target, workload_key, state):
key = (str(target), workload_key)
self.memory[key] = state
DispatchContext.current = FallbackContext()
| https://github.com/zk-ml/tachikoma |
python/tvm/auto_scheduler/feature.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""""
Python API for Feature extraction. The extracted features vector are used by cost models.
We extract one feature vector per BufferStoreNode statement in a TIR Stmt,
so we call this feature as "per-store" feature.
The cost model also does prediction for each BufferStoreNode statement and aggregates
the predicted score of each BufferStoreNode as the score of a TIR Stmt.
The feature specification is defined by `src/auto_scheduler/feature.cc::FeatureSet`
"""
from typing import List, Tuple, Union, Optional, Dict
import struct
import numpy as np
from .loop_state import State, StateObject
from .measure import MeasureInput, MeasureResult
from . import _ffi_api
from ..tir import PrimFunc
# The maximum number of extracted buffers for one statement
DEFAULT_MAX_N_BUFS = 5
# The length of the feature vector
DEFAULT_FEATURE_VEC_LEN = 164
# The size of int and float in bytes
SIZE_OF_INT32 = 4
SIZE_OF_FLOAT32 = 4
def unpack_feature(byte_arr: bytearray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Unpack the flatten feature (in byte array format) from c++
Parameters
----------
byte_arr: bytearray
The two-dimensional feature vector in serialized byte array format
Returns
-------
features: np.ndarray
Feature vectors
normalized_throughputs: np.ndarray
Normalized throughputs
task_ids: np.ndarray
Task ids
Note
----
For faster data copy between c++ and python, the c++ part returns features in a single
flatten array using a packed format. The python part then unpacks the flatten array.
The packed format for n records is:
{
int n;
int sizes[n+2]; // The sizes for the following arrays
float features_0[size[0]]; // The features for record 0
float features_1[size[1]]; // The features for record 1
...
float features_i[size[i]]; // The features for record i
... // until i == n - 1
float throughputs[sizes[n]]; // The normalized throughputs for n records
int task_ids[size[n+1]]; // The task ids for n records
}
To implement this format, we also store int as float, so we can store all numbers
into a single float array.
"""
vec_len = DEFAULT_FEATURE_VEC_LEN
# unpack sizes
offset = 0
n = struct.unpack_from("1i", byte_arr, offset=offset)[0]
offset += SIZE_OF_INT32
sizes = struct.unpack_from("%di" % (n + 2), byte_arr, offset=offset)
offset += SIZE_OF_INT32 * (n + 2)
# unpack features
features = []
for size in sizes[:-2]:
row = []
# Now, we need to unpack the feature for multiple statements.
# The format is:
# {
# int n_stage; // The number of stages
# float feature_vecs[n_stage][vec_len] // The feature vector for each stage
# }
# where vec_len can be calculated by `(size - 1) / n_stmts`
if size == 0:
# failed during lowering
features.append(np.zeros((1, vec_len)))
else:
n_stmts = struct.unpack_from("f", byte_arr, offset=offset)
offset += SIZE_OF_FLOAT32
n_stmts = int(n_stmts[0] + 0.5)
tmp_vec_len = (size - 1) // n_stmts
assert (
tmp_vec_len == vec_len
), "The length of feature vector is wrong. Expected %d but got %d." % (
vec_len,
tmp_vec_len,
)
assert tmp_vec_len * n_stmts == size - 1
for _ in range(n_stmts):
x = struct.unpack_from("%df" % vec_len, byte_arr, offset=offset)
offset += vec_len * SIZE_OF_FLOAT32
row.append(x)
features.append(np.array(row))
# unpack normalized_throughputs
m = sizes[-2]
normalized_throughputs = struct.unpack_from("%df" % m, byte_arr, offset=offset)
offset += m * SIZE_OF_FLOAT32
# unpack task_ids
m = sizes[-1]
task_ids = struct.unpack_from("%di" % m, byte_arr, offset=offset)
offset += m * SIZE_OF_INT32
assert offset == len(byte_arr), "%d vs %d" % (offset, len(byte_arr))
return np.array(features, dtype=object), np.array(normalized_throughputs), np.array(task_ids)
def get_per_store_features_from_file(
filename: str, max_lines: int, max_n_bufs: Optional[int] = None
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Get per-store features from a log file
Parameters
----------
filename: str
The input filename
max_lines: int
Only extract the first n lines of the file
max_n_bufs: Optional[int]
The maximum number of extracted buffers for one statement
Returns
-------
features: np.ndarray
Feature vectors
normalized_throughputs: np.ndarray
Normalized throughputs
task_ids: np.ndarray
Task ids
"""
byte_arr = _ffi_api.GetPerStoreFeaturesFromFile(
filename, max_lines, max_n_bufs or DEFAULT_MAX_N_BUFS
)
return unpack_feature(byte_arr)
def get_per_store_features_from_measure_pairs(
inputs: List[MeasureInput],
results: List[MeasureResult],
skip_first_n_feature_extraction: int = 0,
max_n_bufs: Optional[int] = None,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Get per-store features from measurement input/result pairs
Parameters
----------
inputs: List[MeasureInput]
The measure inputs
results: List[MeasureResult]
The measure results
skip_first_n_feature_extraction: int
Skip feature extraction for the first n states
max_n_bufs: int
The maximum number of extracted buffers for one statement
Returns
-------
features: np.ndarray
Feature vectors
normalized_throughputs: np.ndarray
Normalized throughputs
task_ids: np.ndarray
Task ids
"""
byte_arr = _ffi_api.GetPerStoreFeaturesFromMeasurePairs(
inputs, results, skip_first_n_feature_extraction, max_n_bufs or DEFAULT_MAX_N_BUFS
)
return unpack_feature(byte_arr)
def get_per_store_features_from_states(
states: List[Union[State, StateObject]], task: "SearchTask", max_n_bufs: Optional[int] = None
) -> np.ndarray:
"""Get per-store features from measurement input/result pairs
Parameters
----------
states: List[Union[State, StateObject]]
The input states
task: SearchTask
The search task of the input states
max_n_bufs: Optional[int]
The maximum number of extracted buffers for one statement
Returns
-------
features: np.ndarray
Feature vectors
"""
if isinstance(states[0], State):
state_objects = [s.state_object for s in states]
elif isinstance(states[0], StateObject):
state_objects = states
byte_arr = _ffi_api.GetPerStoreFeaturesFromStates(
state_objects, task, max_n_bufs or DEFAULT_MAX_N_BUFS
)
return unpack_feature(byte_arr)[0]
def get_per_store_feature_names(max_n_bufs: Optional[int] = None) -> List[str]:
"""Get the name of every element in the feature vector. Use this for debug and inspection.
Parameters
----------
max_n_bufs: int
The maximum number of extracted buffers for one statement
Returns
-------
names: List[str]
The names of elements in the flatten feature vector
"""
return _ffi_api.GetPerStoreFeatureNames(max_n_bufs or DEFAULT_MAX_N_BUFS)
def features_from_primfunc(
func: PrimFunc,
cache_line_bytes: int = 64,
max_n_bufs: Optional[int] = None,
log_scale: bool = False,
) -> Optional[np.ndarray]:
"""Extract performance features from a PrimFunc.
Parameters
----------
func: PrimFunc
PrimFunc from which features will be extracted. Each store operation to
a unique buffer in the function will result in one row of features in
the output.
cache_line_bytes: int, optional
Size of a cache line in bytes. Defaults to 64 which is the size for
most x86 processors.
max_n_bufs: int, optional
Maximum number of buffers in generated features. This determines the
length of the resulting feature vector.
log_scale: bool
Should entries in the feature vector be scaled by log2(x + 1). Defaults
to False. Use True if using features with a cost model.
Returns
-------
Optional[np.ndarray]
Output features, one row per store into a unique buffer statement in `func`.
"""
return _ffi_api.FeaturesFromPrimFunc(
func, cache_line_bytes, max_n_bufs or DEFAULT_MAX_N_BUFS, log_scale
).numpy()
def named_features_from_primfunc(
func: PrimFunc,
cache_line_bytes: int = 64,
max_n_bufs: Optional[int] = None,
log_scale: bool = False,
) -> Optional[Dict[str, np.ndarray]]:
"""Extract performance features and associated names from a PrimFunc.
Parameters
----------
func: PrimFunc
PrimFunc from which features will be extracted. Each store operation to
a unique buffer in the function will result in one row of features in
the output.
cache_line_bytes: int, optional
Size of a cache line in bytes. Defaults to 64 which is the size for
most x86 processors.
max_n_bufs: int, optional
Maximum number of buffers in generated features. This determines the
length of the resulting feature vector.
log_scale: bool
Should entries in the feature vector be scaled by log2(x + 1). Defaults
to False. Use True if using features with a cost model.
Returns
-------
Optional[Dict[str, np.ndarray]]
Mapping from feature name to features. One element per store into a
unique buffer statement in `func`.
"""
features = features_from_primfunc(func, cache_line_bytes, max_n_bufs, log_scale)
names = get_per_store_feature_names(max_n_bufs)
return {name: features[:, i] for i, name in enumerate(names)}
| https://github.com/zk-ml/tachikoma |
python/tvm/auto_scheduler/loop_state.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-import
"""
The definition of the "state" in the search.
Each LoopState corresponds to a schedule for its ComputeDAG.
A LoopState consists of: 1. a current loop structure; 2. a list of transformation steps used to
construct the loop structure.
The loop structure keeps a preview of how the schedule will finally look like after lowering the
current state (e.g. number of iterators, the extent of each iterator, the compute_at locations
...).
During the schedule search process, the loop structure can provide search policy with necessary
information on how to manipulate the current state.
The transform history is a sequence of `TransformStep` which will finally be mapped to TVM
schedule primitives. The steps are also used for the serialization of a state.
The LoopState can be seen as a lightweight loop structure IR specifically for schedule search.
We don't use the existing TVM IR but to extend a new structure on it is because:
1. We want fast incremental change to the loop structures. The search policy needs to get the
immediate loop structures update rather than after TVM lowering;
2. We want serializable transform history for replay, backtracking, and mutation;
3. We may create some macro schedule primitives that represent the combination of several
TVM schedule primitives.
When the search is finished, we will lower the state to TVM IR with TVM's schedule primitives.
Since we share a lot of common objects during search, the transformation is implemented in
copy on write style. All objects are immutable, which is similar to TVM IR.
"""
import tvm._ffi
from tvm.te.tensor import Operation, Tensor
from tvm.runtime import Object
from . import _ffi_api
@tvm._ffi.register_object("auto_scheduler.Iterator")
class Iterator(Object):
"""A loop iterator structure."""
@tvm._ffi.register_object("auto_scheduler.Stage")
class Stage(Object):
"""A stage in the compute declaration. Similar to tvm.te.schedule.Stage."""
# Static trans table for compute_at location
# This is used to transform the compute_at location to C++ enum
COMPUTE_AT_TRANS_TABLE = {"root": 0, "inlined": 1, "iter": 2}
@tvm._ffi.register_object("auto_scheduler.State")
class StateObject(Object):
"""The internal State object"""
def __eq__(self, other):
return _ffi_api.StateEqual(self, other)
class State:
"""
A state in the search process. It consists of the current loop structure
and a list of transformation steps used to construct it.
Each State corresponds to a specific schedule for its ComputeDAG.
Parameters
----------
state_object : StateObject
The StateObject corresponding to C++ internal State object.
dag : ComputeDAG
The original ComputeDAG of this State.
Notes
-----
This is a wrapper class of StateObject to deal with copy-on-write property
"""
# Static trans table for thread bind and annotation
# This is used to transform the annotation name to C++ enum
ANNOTATION_TRANS_TABLE = {
"none": 0,
"unroll": 1,
"vectorize": 2,
"parallel": 3,
"vthread": 4,
"blockIdx.x": 5,
"threadIdx.x": 6,
"blockIdx.y": 7,
"threadIdx.y": 8,
"blockIdx.z": 9,
"threadIdx.z": 10,
"tensorize": 11,
}
def __init__(self, state_object, dag):
self.state_object = state_object
self.compute_dag = dag
self.stage_id_map = {} # A dict maps operation to stage id
self._update_stage_id_map()
@property
def stages(self):
"""
Returns
-------
stages : List[Stage]
"""
return self.state_object.stages
@property
def transform_steps(self):
"""
Returns
-------
transform_steps : List[transform_steps]
"""
return self.state_object.transform_steps
@property
def stage_ops(self):
"""
Returns
-------
ops: List[Operation]
"""
return [stage.op for stage in self.stages]
def bind(self, stage, iterator, thread_name):
"""Schedule primitive corresponding to `te.Stage.bind`.
See also the `te.Stage` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be binded, which can be specified by the integer index, Operation,
or output tensor of the stage.
iterator : Iterator
The iterator to be binded.
thread_name : str
The thread type to be binded. Candidates:
- vthread
- blockIdx.x
- threadIdx.x
- blockIdx.y
- threadIdx.y
- blockIdx.z
- threadIdx.z
Returns
-------
res_it : Iterator
The binded Iterator.
"""
if not thread_name in State.ANNOTATION_TRANS_TABLE.keys():
raise ValueError("Invalid thread_name: ", thread_name)
self.state_object, res = _ffi_api.StateBind(
self.state_object,
self._resolve_stage_id(stage),
iterator,
State.ANNOTATION_TRANS_TABLE[thread_name],
)
return res
def parallel(self, stage, iterator):
"""Schedule primitive corresponding to `te.Stage.parallel`.
See also the `te.Stage` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be paralleled, which can be specified by the integer index, Operation,
or output tensor of the stage.
iterator : Iterator
The iterator to be paralleled.
Returns
-------
res_it : Iterator
The paralleled Iterator.
"""
self.state_object, res = _ffi_api.StateParallel(
self.state_object, self._resolve_stage_id(stage), iterator
)
return res
def unroll(self, stage, iterator, max_unroll=None):
"""Schedule primitive corresponding to `te.Stage.unroll`.
See also the `te.Stage` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be unrolled, which can be specified by the integer index, Operation,
or output tensor of the stage.
iterator : Iterator
The iterator to be unrolled.
max_unroll : Optional[int]
The max unroll limit. Iterator with extent larger than this limit will be skipped.
Returns
-------
res_it : Iterator
The unrolled Iterator.
"""
self.state_object, res = _ffi_api.StateUnroll(
self.state_object,
self._resolve_stage_id(stage),
iterator,
max_unroll if max_unroll else -1,
)
return res
def vectorize(self, stage, iterator):
"""Schedule primitive corresponding to `te.Stage.vectorize`.
See also the `te.Stage` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be vectorized, which can be specified by the integer index, Operation,
or output tensor of the stage.
iterator : Iterator
The iterator to be vectorized.
Returns
-------
res_it : Iterator
The vectorized Iterator.
"""
self.state_object, res = _ffi_api.StateVectorize(
self.state_object, self._resolve_stage_id(stage), iterator
)
return res
def fuse(self, stage, iters):
"""Schedule primitive corresponding to `te.Stage.fuse`.
See also the `te.Stage` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be fused, which can be specified by the integer index, Operation,
or output tensor of the stage.
iters : List[Iterator]
The iterators to be fused.
Returns
-------
res_it : Iterator
The fused Iterator.
Notes
-----
If the iterators to be fused have stages attached at them(by compute_at), the fused
result will become the new attach point.
"""
self.state_object, res = _ffi_api.StateFuse(
self.state_object, self._resolve_stage_id(stage), iters
)
return res
def pragma(self, stage, iterator, pragma_type):
"""Schedule primitive corresponding to `te.Stage.pragma`.
See also the `te.Stage` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to add pragma, which can be specified by the integer index, Operation,
or output tensor of the stage.
iterator : Iterator
The iterator to add pragma.
pragma_type : str
The pragma string.
"""
self.state_object = _ffi_api.StatePragma(
self.state_object, self._resolve_stage_id(stage), iterator, pragma_type
)
def reorder(self, stage, order):
"""Schedule primitive corresponding to `te.Stage.reorder`.
See also the `te.Stage` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be reordered, which can be specified by the integer index, Operation,
or output tensor of the stage.
order : List[Iterator]
Iterators in the expected order.
"""
self.state_object = _ffi_api.StateReorder(
self.state_object, self._resolve_stage_id(stage), order
)
def split(self, stage, iterator, lengths, inner_to_outer=True):
"""Schedule primitive corresponding to `te.Stage.split`.
See also the `te.Stage` for more details.
This API supports multiple split factors. (e.g. with 2 split factors, the original iterator
will be split to 3 parts, use `inner_to_outer` to control the split order)
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be split, which can be specified by the integer index, Operation,
or output tensor of the stage.
iterator : Iterator
The iterator to be split.
lengths: List[int]
The multiple split factors. Can be None to be filled by search policy.
inner_to_outer: boolean = True
Whether the factor go from inner to outer, or from outer to inner.
Returns
-------
res_its : List[Iterator]
The splitted new Iterators.
Notes
-----
If we do split on an iterator which has stages attached at it(by compute_at), the inner
most iterator of split results will become the new attach point.
"""
self.state_object, res = _ffi_api.StateSplit(
self.state_object, self._resolve_stage_id(stage), iterator, lengths, inner_to_outer
)
return res
def follow_split(self, stage, iterator, src_step_id, n_split):
"""The schedule primitive similar to split, but uses split factors from previous steps.
This step splits the iterator by the same factors as the given SplitStep.
Notes
------
This step is useful in a scenario that we have subgraph Dense -> Relu,
and we want to compute the Dense stage at ReLU. In this case, we need them to have
the same tiling structure of common outer loops.
The follow_split step could be used here to split the Dense stage and makes sure its
splitting factors are the same as the given split step for the ReLU stage.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be split, which can be specified by the integer index, Operation,
or output tensor of the stage.
iterator : Iterator
The iterator to split.
src_step_id : int
The index of the split step to be followed in the history.
n_split : int
The number of split level.
Returns
-------
res_its : List[Iterator]
The splitted new Iterators.
"""
self.state_object, res = _ffi_api.StateFollowSplit(
self.state_object, self._resolve_stage_id(stage), iterator, src_step_id, n_split
)
return res
def follow_fused_split(self, stage, iterator, src_step_ids, level, factor_or_nparts):
"""Schedule primitive extends to split step.
This step is used to split an iterator by the same factors
as the given list of SplitSteps and FuseSteps.
Notes
------
This step is useful in a scenario that we have a subgraph
in GPU schedule: Input -> Dense
for [email protected] = ... : Bind to blockIdx.x
for [email protected] = ... : Bind to threadIdx.x
for [email protected] = ...
Input_shared = Input ...
for k = ...
Dense = ...
We intend to apply cooperative fetching with the input stage, while the threadIdx.x
axis is bound to an iterator generated by split & fuse step.
The follow_fused_step is used split the iterator to 2 parts, while the split factor
matches the final extent of the threadIdx.x bound iterator.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be split, which can be specified by the integer index, Operation,
or output tensor of the stage.
iterator : Iterator
The iterator to split.
src_step_ids : List[int]
The indices of the split steps to be followed in the history.
level : int
Use the length in this split level.
factor_or_nparts : bool
True to use `factor` for split from inner to outer,
False to use `nparts` for split from outer to inner.
Returns
-------
res_its : List[Iterator]
The splitted new Iterators.
"""
self.state_object, res = _ffi_api.StateFollowFusedSplit(
self.state_object,
self._resolve_stage_id(stage),
iterator,
src_step_ids,
level,
factor_or_nparts,
)
return res
def storage_align(self, stage, iterator, factor, offset):
"""Schedule primitive corresponding to `te.Stage.storage_align`.
See also the `te.Stage` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be storage aligned, which can be specified by the integer index,
Operation, or output tensor of the stage.
iterator : Iterator
The iterator to be aligned.
factor : int
The factor in alignment specification.
offset : int
The offset in the alignment specification.
"""
self.state_object = _ffi_api.StateStorageAlign(
self.state_object, self._resolve_stage_id(stage), iterator, factor, offset
)
def compute_at(self, stage, target_stage, target_iter):
"""Schedule primitive corresponding to `te.Stage.compute_at`.
See also the `te.Stage` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The source Stage of computed at, which can be specified by the integer index,
Operation, or output tensor of the stage.
target_stage : Union[int, Operation, Tensor]
The target stage of compute_at, which can be specified by the integer index, Operation,
or output tensor of the stage.
target_iter : Iterator
The target Iterator of compute_at.
Notes
-----
After compute_at, we need careful dependency analysis to compute the accurate bound
information. However, it is relatively expensive and complicated, so we just fill "None"
as bound for the newly created iterators.
Call ComputeDAG::InferBound on the returned state to get the complete bound information.
"""
self.state_object = _ffi_api.StateComputeAt(
self.state_object,
self._resolve_stage_id(stage),
self._resolve_stage_id(target_stage),
target_iter,
)
def compute_inline(self, stage):
"""Schedule primitive corresponding to `te.Stage.compute_inline`, see also the `te.Stage`
for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be marked compute inlined, which can be specified by the integer index,
Operation, or output tensor of the stage.
"""
self.state_object = _ffi_api.StateComputeInline(
self.state_object, self._resolve_stage_id(stage)
)
def compute_root(self, stage):
"""Schedule primitive corresponding to `te.Stage.compute_root`.
Ssee also the `te.Stage` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be marked compute at root, which can be specified by the integer index,
Operation, or output tensor of the stage.
Notes
-----
After compute_root, we need careful dependency analysis to compute the accurate bound
information. However, it is relatively expensive and complicated, so we just fill "None"
as bound for the newly created iterators.
Call ComputeDAG::InferBound on the returned state to get the complete bound information.
"""
self.state_object = _ffi_api.StateComputeRoot(
self.state_object, self._resolve_stage_id(stage)
)
def cache_read(self, stage, scope_name, reader_stages):
"""Schedule primitive corresponding to `te.Schedule.cache_read`.
See also the `te.Schedule` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be cache_read, which can be specified by the integer index, Operation,
or output tensor of the stage.
scope_name : str
The scope name of the newly added read stage.
reader_stages : List[Union[int, Operation, Tensor]]
The reader stages. Each of the list can be specified by the integer index, Operation,
or output tensor of the stage.
Returns
-------
new_stage_op : Operator
The Operator of the new added stage.
Notes
-----
Cache read step will insert an extra stage to the original ComputeDAG (at the back of the
target stage).
"""
reader_stage_ids = [self._resolve_stage_id(i) for i in reader_stages]
self.state_object, new_stage_id = _ffi_api.StateCacheRead(
self.state_object,
self._resolve_stage_id(stage),
scope_name,
reader_stage_ids,
self.compute_dag,
)
# Add a new stage will change all ops behind the added stage. But we still want to keep the
# original ops map, apply stage id offset to stage_id_map to make them work.
self._apply_stage_id_offset(int(new_stage_id))
self._update_stage_id_map()
return self.stages[int(new_stage_id)].op
def cache_write(self, stage, scope_name):
"""Schedule primitive corresponding to `te.Schedule.cache_write`.
See also the `te.Schedule` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be cache_write, which can be specified by the integer index, Operation,
or output tensor of the stage.
scope_name : str
The scope name of the newly added compute stage.
Returns
-------
new_stage_op : Operator
The Operator of the new added stage.
Notes
-----
Cache write step will insert an extra stage to the original ComputeDAG (in the front of the
target stage).
This step will cache write all output tensors of the target stage.
"""
self.state_object, new_stage_id = _ffi_api.StateCacheWrite(
self.state_object, self._resolve_stage_id(stage), scope_name, self.compute_dag
)
# Add a new stage will change all ops behind the added stage. But we still want to keep the
# original ops map, apply stage id offset to stage_id_map to make them work.
self._apply_stage_id_offset(int(new_stage_id))
self._update_stage_id_map()
return self.stages[int(new_stage_id)].op
def rfactor(self, stage, iterator, factor_iter_id):
"""Schedule primitive corresponding to `te.Schedule.rfactor`.
See also the `te.Schedule` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be factored, which can be specified by the integer index, Operation,
or output tensor of the stage.
iterator : Iterator
The reduction iterator to be factored.
factor_iter_id : int
The position where the new iterator is placed.
Returns
-------
new_stage_op : Operator
The Operator of the new added stage.
Notes
-----
Rfactor step will insert an extra stage to the original ComputeDAG (in the front of the
target stage).
"""
self.state_object, new_stage_id = _ffi_api.StateRfactor(
self.state_object,
self._resolve_stage_id(stage),
iterator,
factor_iter_id,
self.compute_dag,
)
# Add a new stage will change all ops behind the added stage. But we still want to keep the
# original ops map, apply stage id offset to stage_id_map to make them work.
self._apply_stage_id_offset(int(new_stage_id))
self._update_stage_id_map()
return self.stages[int(new_stage_id)].op
def copy(self):
"""Do deep copy of this State."""
state = State(self.state_object, self.compute_dag)
state.stage_id_map = self.stage_id_map.copy()
return state
def _resolve_stage_id(self, stage_id):
if isinstance(stage_id, Operation):
return self.stage_id_map[stage_id]
if isinstance(stage_id, Tensor):
return self.stage_id_map[stage_id.op]
if isinstance(stage_id, int):
return stage_id
raise ValueError(
"Invalid stage: " + stage_id + " . Expect to be a int, Operation or Tensor"
)
def _update_stage_id_map(self):
for index, stage in enumerate(self.stages):
self.stage_id_map[stage.op] = index
def _apply_stage_id_offset(self, start_id, offset=1):
for key, value in self.stage_id_map.items():
if value >= start_id:
self.stage_id_map[key] = value + offset
def __getitem__(self, key):
if isinstance(key, Tensor):
key = key.op
if isinstance(key, Operation):
return self.stages[self.stage_id_map[key]]
raise ValueError("Invalid item: " + key + " . Expect to be a Operation or Tensor")
def __str__(self):
return str(self.state_object)
def __eq__(self, other):
return _ffi_api.StateEqual(self.state_object, other.state_object)
| https://github.com/zk-ml/tachikoma |
python/tvm/auto_scheduler/measure.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Distributed measurement infrastructure to measure the runtime costs of tensor programs.
These functions are responsible for building the tvm module, uploading it to
remote devices, recording the running time costs, and checking the correctness of the output.
We separate the measurement into two steps: build and run.
A builder builds the executable binary files and a runner runs the binary files to
get the measurement results. The flow of data structures is
. `ProgramBuilder` `ProgramRunner`
`MeasureInput` -----------------> `BuildResult` ----------------> `MeasureResult`
We implement these in python to utilize python's multiprocessing and error handling.
"""
import logging
import multiprocessing
import os
import shutil
import tempfile
import time
import tvm._ffi
from tvm.autotvm.env import AutotvmGlobalScope, reset_global_scope
from tvm.contrib import ndk, tar
from tvm.contrib.popen_pool import PopenPoolExecutor, PopenWorker, StatusKind
from tvm.driver import build_module
from tvm.ir import transform
from tvm.runtime import Object, module, ndarray
from tvm.target import Target
from . import _ffi_api
from .loop_state import StateObject
from .utils import (
call_func_with_timeout,
check_remote,
get_const_tuple,
get_func_name,
make_traceback_info,
request_remote,
)
from .workload_registry import (
deserialize_workload_registry_entry,
serialize_workload_registry_entry,
)
# pylint: disable=invalid-name
logger = logging.getLogger("auto_scheduler")
# The time cost for measurements with errors
# We use 1e10 instead of sys.float_info.max for better readability in log
MAX_FLOAT = 1e10
class BuildFunc:
"""store build_func name and callable to class variable.
name: str = "default"
The name of registered build function.
build_func: callable = tar.tar
The callable of registered build function.
"""
name = "default"
build_func = tar.tar
@tvm._ffi.register_object("auto_scheduler.MeasureCallback")
class MeasureCallback(Object):
"""The base class of measurement callback functions."""
@tvm._ffi.register_object("auto_scheduler.PythonBasedMeasureCallback")
class PythonBasedMeasureCallback(MeasureCallback):
"""Base class for measure callbacks implemented in python"""
def __init__(self):
def callback_func(policy, inputs, results):
self.callback(policy, inputs, results)
self.__init_handle_by_constructor__(_ffi_api.PythonBasedMeasureCallback, callback_func)
def callback(self, policy, inputs, results):
"""The callback function.
Parameters
----------
policy: auto_scheduler.search_policy.SearchPolicy
The search policy.
inputs : List[auto_scheduler.measure.MeasureInput]
The measurement inputs
results : List[auto_scheduler.measure.MeasureResult]
The measurement results
"""
raise NotImplementedError
@tvm._ffi.register_object("auto_scheduler.MeasureInput")
class MeasureInput(Object):
"""Store the input of a measurement.
Parameters
----------
task : SearchTask
The SearchTask of this measurement.
state : Union[State, StateObject]
The State to be measured.
"""
def __init__(self, task, state):
state = state if isinstance(state, StateObject) else state.state_object
self.__init_handle_by_constructor__(_ffi_api.MeasureInput, task, state)
def serialize(self):
"""Custom serialization to workaround MeasureInput not exposing all its
members to the TVM ffi interface.
Note that we do not implement __getstate__ as it does not seem to work
with initialization of the workload registry (maybe because of
initialization order?).
"""
return [
_ffi_api.SerializeMeasureInput(self),
serialize_workload_registry_entry(self.task.workload_key),
]
@staticmethod
def deserialize(data):
inp = _ffi_api.DeserializeMeasureInput(data[0])
deserialize_workload_registry_entry(data[1])
return recover_measure_input(inp)
@tvm._ffi.register_object("auto_scheduler.BuildResult")
class BuildResult(Object):
"""Store the result of a build.
Parameters
----------
filename : Optional[str]
The filename of built binary file.
args : List[Tensor]
The arguments.
error_no : int
The error code.
error_msg : Optional[str]
The error message if there is any error.
time_cost : float
The time cost of build.
"""
def __init__(self, filename, args, error_no, error_msg, time_cost):
filename = filename if filename else ""
error_msg = error_msg if error_msg else ""
self.__init_handle_by_constructor__(
_ffi_api.BuildResult, filename, args, error_no, error_msg, time_cost
)
@tvm._ffi.register_object("auto_scheduler.MeasureResult")
class MeasureResult(Object):
"""Store the results of a measurement.
Parameters
----------
costs : List[float]
The time costs of execution.
error_no : int
The error code.
error_msg : Optional[str]
The error message if there is any error.
all_cost : float
The time cost of build and run.
timestamp : float
The time stamps of this measurement.
"""
def __init__(self, costs, error_no, error_msg, all_cost, timestamp):
error_msg = error_msg if error_msg else ""
self.__init_handle_by_constructor__(
_ffi_api.MeasureResult, costs, error_no, error_msg, all_cost, timestamp
)
def recover_measure_input(inp, rebuild_state=False):
"""
Recover a deserialized MeasureInput by rebuilding the missing fields.
1. Rebuid the compute_dag in inp.task
2. (Optional) Rebuild the stages in inp.state
Parameters
----------
inp: MeasureInput
The deserialized MeasureInput
rebuild_state: bool = False
Whether rebuild the stages in MeasureInput.State
Returns
-------
new_input: MeasureInput
The fully recovered MeasureInput with all fields rebuilt.
"""
# pylint: disable=import-outside-toplevel
from .search_task import SearchTask # lazily import to avoid recursive dependency
task = inp.task
task.target, task.target_host = Target.canon_target_and_host(task.target, task.target_host)
new_task = SearchTask(
workload_key=task.workload_key,
target=task.target,
hardware_params=task.hardware_params,
layout_rewrite_option=task.layout_rewrite_option,
task_inputs=list(task.task_input_names),
)
if rebuild_state:
new_state = new_task.compute_dag.infer_bound_from_state(inp.state)
else:
new_state = inp.state
return MeasureInput(new_task, new_state)
@tvm._ffi.register_object("auto_scheduler.ProgramBuilder")
class ProgramBuilder(Object):
"""The base class of ProgramBuilders."""
def build(self, measure_inputs, verbose=1):
"""Build programs and return results.
Parameters
----------
measure_inputs : List[MeasureInput]
A List of MeasureInput.
verbose: int = 1
Verbosity level. 0 for silent, 1 to output information during program building.
Returns
-------
res : List[BuildResult]
"""
return _ffi_api.ProgramBuilderBuild(self, measure_inputs, verbose)
@tvm._ffi.register_object("auto_scheduler.ProgramRunner")
class ProgramRunner(Object):
"""The base class of ProgramRunners."""
def run(self, measure_inputs, build_results, verbose=1):
"""Run measurement and return results.
Parameters
----------
measure_inputs : List[MeasureInput]
A List of MeasureInput.
build_results : List[BuildResult]
A List of BuildResult to be ran.
verbose: int = 1
Verbosity level. 0 for silent, 1 to output information during program running.
Returns
-------
res : List[MeasureResult]
"""
return _ffi_api.ProgramRunnerRun(self, measure_inputs, build_results, verbose)
@tvm._ffi.register_object("auto_scheduler.ProgramMeasurer")
class ProgramMeasurer(Object):
"""
Measurer that measures the time costs of tvm programs
This class combines ProgramBuilder and ProgramRunner, and provides a simpler API.
Parameters
----------
builder : ProgramBuilder
The ProgramBuilder to build programs
runner : ProgramRunner
The ProgramRunner to measure programs.
callbacks : List[MeasureCallback]
Callbacks to be called after each measurement batch
verbose : int
The Verbosity level: 0 for silent, 1 to output information during program
max_continuous_error : Optional[int]
The number of allowed maximum continuous error before stop the tuning
"""
def __init__(self, builder, runner, callbacks, verbose, max_continuous_error=None):
max_continuous_error = max_continuous_error or -1 # -1 means using the default value
self.__init_handle_by_constructor__(
_ffi_api.ProgramMeasurer, builder, runner, callbacks, verbose, max_continuous_error
)
@tvm._ffi.register_object("auto_scheduler.LocalBuilder")
class LocalBuilder(ProgramBuilder):
"""LocalBuilder use local CPU cores to build programs in parallel.
Parameters
----------
timeout : int = 15
The timeout limit (in second) for each build thread.
This is used in a wrapper of the multiprocessing.Process.join().
n_parallel : int = multiprocessing.cpu_count()
Number of threads used to build in parallel.
build_func: callable or str = "default"
If is 'default', use default build function
If is 'ndk', use function for android ndk
If is callable, use it as custom build function, expect lib_format field.
"""
def __init__(self, timeout=15, n_parallel=multiprocessing.cpu_count(), build_func="default"):
if build_func == "default":
BuildFunc.name = "default"
BuildFunc.build_func = tar.tar
elif build_func == "ndk":
BuildFunc.name = "ndk"
BuildFunc.build_func = ndk.create_shared
elif callable(build_func):
BuildFunc.name = "custom"
BuildFunc.build_func = build_func
else:
raise ValueError("Invalid build_func" + build_func)
self.__init_handle_by_constructor__(
_ffi_api.LocalBuilder, timeout, n_parallel, BuildFunc.name
)
@tvm._ffi.register_object("auto_scheduler.LocalRunner")
class LocalRunner(ProgramRunner):
"""LocalRunner that uses local CPU/GPU to measures the time cost of programs.
Parameters
----------
timeout : int = 10
The timeout limit (in second) for each run.
This is used in a wrapper of the multiprocessing.Process.join().
number : int = 3
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int = 1
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first "1" is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms : int = 100
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval : float = 0.0
The cool down interval between two measurements in seconds.
enable_cpu_cache_flush: bool = False
Whether to flush cache on CPU between repeated measurements.
Flushing cache can make the measured latency of one operator closer to
its actual latency during end-to-end inference.
To make this option effective, the argument `number` should also be set to 1.
This is only has effect on CPU task.
device: int = 0
Which device to run on if multiple are available.
"""
def __init__(
self,
timeout=10,
number=3,
repeat=1,
min_repeat_ms=100,
cooldown_interval=0.0,
enable_cpu_cache_flush=False,
device=0,
):
if enable_cpu_cache_flush:
number = 1
min_repeat_ms = 0
self.__init_handle_by_constructor__(
_ffi_api.LocalRunner,
timeout,
number,
repeat,
min_repeat_ms,
cooldown_interval,
enable_cpu_cache_flush,
device,
)
@tvm._ffi.register_object("auto_scheduler.RPCRunner")
class RPCRunner(ProgramRunner):
"""RPCRunner that uses RPC call to measures the time cost of programs on remote devices.
Or sometime we may need to use RPC even in local running to insulate the thread environment.
(e.g. running CUDA programs)
Parameters
----------
key : str
The key of the device registered in the RPC tracker.
host : str
The host address of the RPC Tracker.
port : int
The port of RPC Tracker.
priority : int = 1
The priority of this run request, larger is more prior.
n_parallel : int = 1
The number of tasks run in parallel.
timeout : int = 10
The timeout limit (in second) for each run.
This is used in a wrapper of the multiprocessing.Process.join().
number : int = 3
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int = 1
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first "1" is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms : int = 100
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval : float = 0.0
The cool down interval between two measurements in seconds.
enable_cpu_cache_flush: bool = False
Whether to flush cache on CPU between repeated measurements.
Flushing cache can make the measured latency of one operator closer to
its actual latency during end-to-end inference.
To make this option effective, the argument `number` should also be set to 1.
This is only has effect on CPU task.
device: int = 0
Which device to run on if multiple are available.
"""
def __init__(
self,
key,
host,
port,
priority=1,
n_parallel=1,
timeout=10,
number=3,
repeat=1,
min_repeat_ms=100,
cooldown_interval=0.0,
enable_cpu_cache_flush=False,
device=0,
):
self.__init_handle_by_constructor__(
_ffi_api.RPCRunner,
key,
host,
port,
priority,
n_parallel,
timeout,
number,
repeat,
min_repeat_ms,
cooldown_interval,
enable_cpu_cache_flush,
device,
)
if check_remote(key, host, port, priority, timeout):
print("Get devices for measurement successfully!")
else:
raise RuntimeError(
"Cannot get remote devices from the tracker. "
"Please check the status of tracker by "
"'python -m tvm.exec.query_rpc_tracker --port [THE PORT YOU USE]' "
"and make sure you have free devices on the queue status."
)
class LocalRPCMeasureContext:
"""A context wrapper for running RPCRunner locally.
This will launch a local RPC Tracker and local RPC Server.
Parameters
----------
priority : int = 1
The priority of this run request, larger is more prior.
n_parallel : int = 1
The number of tasks run in parallel.
timeout : int = 10
The timeout limit (in second) for each run.
This is used in a wrapper of the multiprocessing.Process.join().
number : int = 3
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int = 1
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first "1" is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms : int = 0
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval : float = 0.0
The cool down interval between two measurements in seconds.
enable_cpu_cache_flush: bool = False
Whether to flush cache on CPU between repeated measurements.
Flushing cache can make the measured latency of one operator closer to
its actual latency during end-to-end inference.
To make this option effective, the argument `number` should also be set to 1.
This is only has effect on CPU task.
device: int = 0
Which device to run on if multiple are available.
"""
def __init__(
self,
priority=1,
n_parallel=1,
timeout=10,
number=3,
repeat=1,
min_repeat_ms=0,
cooldown_interval=0.0,
enable_cpu_cache_flush=False,
device=0,
):
# pylint: disable=import-outside-toplevel
from tvm.rpc.server import Server
from tvm.rpc.tracker import Tracker
self.tracker = Tracker(port=9000, port_end=10000, silent=True)
device_key = "$local$device$%d" % self.tracker.port
self.server = Server(
port=self.tracker.port,
port_end=10000,
key=device_key,
silent=True,
tracker_addr=("127.0.0.1", self.tracker.port),
)
self.runner = RPCRunner(
device_key,
"127.0.0.1",
self.tracker.port,
priority,
n_parallel,
timeout,
number,
repeat,
min_repeat_ms,
cooldown_interval,
enable_cpu_cache_flush,
device,
)
# Wait for the processes to start
time.sleep(0.5)
def __del__(self):
# Close the tracker and server before exit
self.tracker.terminate()
self.server.terminate()
time.sleep(0.5)
class MeasureErrorNo(object):
"""Error type for MeasureResult."""
NO_ERROR = 0 # No error
INSTANTIATION_ERROR = 1 # Errors happen when apply transform steps from init state
COMPILE_HOST = 2 # Errors happen when compiling code on host (e.g., tvm.build)
COMPILE_DEVICE = 3 # Errors happen when compiling code on device
# (e.g. OpenCL JIT on the device)
RUNTIME_DEVICE = 4 # Errors happen when run program on device
WRONG_ANSWER = 5 # Answer is wrong when compared to a reference output
BUILD_TIMEOUT = 6 # Timeout during compilation
RUN_TIMEOUT = 7 # Timeout during run
UNKNOWN_ERROR = 8 # Unknown error
def _local_build_worker(inp_serialized, build_func, verbose):
tic = time.time()
inp = MeasureInput.deserialize(inp_serialized)
task = inp.task
task.target, task.target_host = Target.canon_target_and_host(task.target, task.target_host)
error_no = MeasureErrorNo.NO_ERROR
error_msg = None
args = []
try:
sch, args = task.compute_dag.apply_steps_from_state(
inp.state, layout_rewrite=task.layout_rewrite_option
)
# pylint: disable=broad-except
except Exception:
error_no = MeasureErrorNo.INSTANTIATION_ERROR
error_msg = make_traceback_info()
if error_no == 0:
dirname = tempfile.mkdtemp()
filename = os.path.join(dirname, "tmp_func." + build_func.output_format)
try:
with transform.PassContext().current():
func = build_module.build(sch, args, target=task.target)
func.export_library(filename, build_func)
# pylint: disable=broad-except
except Exception:
error_no = MeasureErrorNo.COMPILE_HOST
error_msg = make_traceback_info()
else:
filename = ""
if verbose >= 1:
if error_no == MeasureErrorNo.NO_ERROR:
print(".", end="", flush=True)
else:
print(".E", end="", flush=True) # Build error
return filename, args, error_no, error_msg, time.time() - tic
def local_build_worker(args):
"""
Build function of LocalBuilder to be ran in the Builder thread pool.
Parameters
----------
args: Tuple[MeasureInput, callable, int]
inputs, build-func, verbose args passed to local_builder_build
Returns
-------
res : BuildResult
The build result of this Builder thread.
"""
inp, build_func, verbose = args
return _local_build_worker(inp, build_func, verbose)
@tvm._ffi.register_func("auto_scheduler.local_builder.build")
def local_builder_build(inputs, timeout, n_parallel, build_func="default", verbose=1):
"""
Build function of LocalBuilder to build the MeasureInputs to runnable modules.
Parameters
----------
inputs : List[MeasureInput]
The MeasureInputs to be built.
timeout : int
The timeout limit (in second) for each build thread.
This is used in a wrapper of the multiprocessing.Process.join().
n_parallel : int
Number of threads used to build in parallel.
build_func : str = 'default'
The name of build function to process the built module.
verbose: int = 1
Verbosity level. 0 for silent, 1 to output information during program building.
Returns
-------
res : List[BuildResult]
The build results of these MeasureInputs.
"""
assert build_func == BuildFunc.name, (
"BuildFunc.name: " + BuildFunc.name + ", but args is: " + build_func
)
executor = PopenPoolExecutor(
n_parallel, timeout, reset_global_scope, (AutotvmGlobalScope.current,)
)
tuple_res = executor.map_with_error_catching(
local_build_worker,
[
(
i.serialize(),
BuildFunc.build_func,
verbose,
)
for i in inputs
],
)
results = []
for res in tuple_res:
if res.status == StatusKind.COMPLETE:
results.append(BuildResult(*res.value))
elif res.status == StatusKind.TIMEOUT:
if verbose >= 1:
print(".T", end="", flush=True) # Build timeout
results.append(BuildResult(None, [], MeasureErrorNo.BUILD_TIMEOUT, None, timeout))
elif res.status == StatusKind.EXCEPTION:
if verbose >= 1:
print(".E", end="", flush=True) # Build error
results.append(
BuildResult(None, [], MeasureErrorNo.COMPILE_HOST, repr(res.value), timeout)
)
else:
raise ValueError("Result status is not expected. Unreachable branch")
return results
TASK_INPUT_CHECK_FUNC_REGISTRY = {}
def register_task_input_check_func(func_name, f=None, override=False):
"""Register a function that checks the input buffer map.
The input function should take a list of Tensor wich indicate the Input/output Tensor of a TVM
subgraph and return a Map from the input Tensor to its buffer name.
Parameters
----------
func_name : Union[Function, str]
The check function that returns the compute declaration Tensors or its function name.
f : Optional[Function]
The check function to be registered.
override : boolean = False
Whether to override existing entry.
Examples
--------
.. code-block:: python
@auto_scheduler.register_task_input_check_func
def check_task_input_by_placeholder_name(args : List[Tensor]):
tensor_input_map = {}
for arg in args:
if isinstance(arg.op, tvm.te.PlaceholderOp):
if arg.op.name != "placeholder":
tensor_input_map[arg] = arg.op.name
return tensor_input_map
"""
global TASK_INPUT_CHECK_FUNC_REGISTRY
if callable(func_name):
f = func_name
func_name = get_func_name(f)
if not isinstance(func_name, str):
raise ValueError("expect string function name")
def register(myf):
"""internal register function"""
if func_name in TASK_INPUT_CHECK_FUNC_REGISTRY and not override:
raise RuntimeError("%s has been registered already" % func_name)
TASK_INPUT_CHECK_FUNC_REGISTRY[func_name] = myf
return myf
if f:
return register(f)
return register
def prepare_input_map(args, workload_key=None):
"""This function deals with special task inputs. Map the input Tensor of a TVM subgraph
to a specific buffer name in the global buffer map.
Parameters
----------
args : List[Tensor]
Input/output Tensor of a TVM subgraph.
workload_key: Optional[str]
The workload for which these inputs are being prepared. This
is used to identify if an input is being provided by (see
`register_task_input_buffer`).
Returns
-------
Dict[Tensor, str] :
Map from the input Tensor to its buffer name.
Notes
-----
The buffer name is specially designed, and these buffer should be provided in
`SearchTask(..., task_inputs={...})`.
"""
# pylint: disable=import-outside-toplevel
global TASK_INPUT_CHECK_FUNC_REGISTRY
from .search_task import TASK_INPUT_BUFFER_TABLE
# A dict that maps the input tensor arg to a buffer name
tensor_input_map = {}
# Case 0: Check placeholder name
for arg in args:
if isinstance(arg.op, tvm.te.PlaceholderOp):
if (
workload_key
and workload_key in TASK_INPUT_BUFFER_TABLE
and arg.op.name in TASK_INPUT_BUFFER_TABLE[workload_key]
):
tensor_input_map[arg] = arg.op.name
# Case 1: Check specific tensor inputs
for func_name in TASK_INPUT_CHECK_FUNC_REGISTRY:
func = TASK_INPUT_CHECK_FUNC_REGISTRY[func_name]
tensor_input_map.update(func(args))
return tensor_input_map
def prepare_runner_args(inp, build_res):
"""This function prepares the pre-defined arguments in `TASK_INPUT_BUFFER_TABLE` for local/rpc
runner in main process
Parameters
----------
inp : MeasureInput
Measure input to be measured.
build_res : BuildResult
Build result to be measured.
Returns
-------
List[Optional[numpy.ndarray]] :
List of arguments for running the program. If the argument does not have a pre-defined input
buffer, None is added to the list as a placeholder.
"""
# pylint: disable=import-outside-toplevel
from .search_task import get_task_input_buffer # lazily import to avoid recursive dependency
task_input_names = inp.task.task_input_names
tensor_input_map = prepare_input_map(build_res.args, inp.task.workload_key)
if not task_input_names:
tensor_input_map = {}
args = []
task_inputs_count = 0
for arg in build_res.args:
if arg in tensor_input_map:
tensor_name = tensor_input_map[arg]
if tensor_name in task_input_names:
task_input_buffer = get_task_input_buffer(inp.task.workload_key, tensor_name)
# convert tvm.NDArray to picklable numpy.ndarray
args.append(task_input_buffer.numpy())
task_inputs_count += 1
else:
raise ValueError(
"%s not found in task_inputs, " % (tensor_name)
+ "should provide with `SearchTask(..., task_inputs={...})`"
)
else:
args.append(None)
if task_inputs_count != len(task_input_names):
raise RuntimeError("task_inputs not fully matched, check if there's any unexpected error")
return args
def _timed_eval_func(
inp_serialized,
build_res,
args,
number,
repeat,
min_repeat_ms,
cooldown_interval,
enable_cpu_cache_flush,
verbose,
device,
):
inp = MeasureInput.deserialize(inp_serialized)
tic = time.time()
error_no = 0
error_msg = None
try:
func = module.load_module(build_res.filename)
dev = ndarray.device(str(inp.task.target), device)
# Limitation:
# We can not get PackFunction directly in the remote mode as it is wrapped
# under the std::function. We could lift the restriction later once we fold
# the PackedFunc as an object. Currently, we pass function name to work
# around it.
f_prepare = "cache_flush_cpu_non_first_arg" if enable_cpu_cache_flush else ""
time_f = func.time_evaluator(
func.entry_name,
dev,
number=number,
repeat=repeat,
min_repeat_ms=min_repeat_ms,
f_preproc=f_prepare,
)
# pylint: disable=broad-except
except Exception:
costs = (MAX_FLOAT,)
error_no = MeasureErrorNo.COMPILE_DEVICE
error_msg = make_traceback_info()
if error_no == 0:
try:
random_fill = tvm.get_global_func("tvm.contrib.random.random_fill", True)
assert random_fill, "Please make sure USE_RANDOM is ON in the config.cmake"
assert len(args) == len(build_res.args)
loc_args = []
# pylint: disable=consider-using-enumerate
for idx in range(len(args)):
if args[idx] is None:
build_res_arg = build_res.args[idx]
empty_array = ndarray.empty(
get_const_tuple(build_res_arg.shape), build_res_arg.dtype, dev
)
random_fill(empty_array)
loc_args.append(empty_array)
else:
loc_args.append(ndarray.array(args[idx], dev))
dev.sync()
costs = time_f(*loc_args).results
# pylint: disable=broad-except
except Exception:
costs = (MAX_FLOAT,)
error_no = MeasureErrorNo.RUNTIME_DEVICE
error_msg = make_traceback_info()
shutil.rmtree(os.path.dirname(build_res.filename))
toc = time.time()
time.sleep(cooldown_interval)
if verbose >= 1:
if error_no == MeasureErrorNo.NO_ERROR:
print("*", end="", flush=True)
else:
print("*E", end="", flush=True) # Run error
return costs, error_no, error_msg, toc - tic + build_res.time_cost, toc
@tvm._ffi.register_func("auto_scheduler.local_runner.run")
def local_run(
inputs,
build_results,
timeout=10,
number=3,
repeat=1,
min_repeat_ms=0,
cooldown_interval=0,
enable_cpu_cache_flush=False,
verbose=1,
device=0,
):
"""
Run function of LocalRunner to test the performance of the input BuildResults.
Parameters
----------
inputs : List[MeasureInput]
The MeasureInputs to be measured.
build_results : List[BuildResult]
The BuildResults to be measured.
timeout : int = 10
The timeout limit (in second) for each run.
This is used in a wrapper of the multiprocessing.Process.join().
number : int = 3
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int = 1
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first "1" is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms : int = 0
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval : float = 0.0
The cool down interval between two measurements in seconds.
enable_cpu_cache_flush: bool = False
Whether to flush cache on CPU between repeated measurements.
Flushing cache can make the measured latency of one operator closer to
its actual latency during end-to-end inference.
To make this option effective, the argument `number` should also be set to 1.
This is only has effect on CPU task.
verbose: int = 1
Verbosity level. 0 for silent, 1 to output information during program measuring.
device: int = 0
Which device to run on if multiple are available.
Returns
-------
res : List[MeasureResult]
The measure results of these MeasureInputs.
"""
measure_results = []
assert len(inputs) == len(build_results), "Measure input size should be equal to build results"
worker = PopenWorker()
for inp, build_res in zip(inputs, build_results):
if build_res.error_no != 0:
res = (
(MAX_FLOAT,),
build_res.error_no,
build_res.error_msg,
build_res.time_cost,
time.time(),
)
else:
args = prepare_runner_args(inp, build_res)
res = call_func_with_timeout(
worker,
timeout,
_timed_eval_func,
args=(
inp.serialize(),
build_res,
args,
number,
repeat,
min_repeat_ms,
cooldown_interval,
enable_cpu_cache_flush,
verbose,
device,
),
)
if isinstance(res, TimeoutError):
if verbose >= 1:
print("*T", end="", flush=True) # Run timeout
res = (
(MAX_FLOAT,),
MeasureErrorNo.RUN_TIMEOUT,
None,
build_res.time_cost + timeout,
time.time(),
)
elif isinstance(res, Exception):
if verbose >= 1:
print("*E", end="", flush=True) # Run error
res = (
(MAX_FLOAT,),
MeasureErrorNo.RUNTIME_DEVICE,
str(res),
build_res.time_cost + timeout,
time.time(),
)
measure_results.append(MeasureResult(*res))
if verbose >= 1:
print("", flush=True)
return measure_results
def _rpc_run(
inp_serialized,
build_res,
args,
key,
host,
port,
priority,
timeout,
number,
repeat,
min_repeat_ms,
cooldown_interval,
enable_cpu_cache_flush,
verbose,
device,
):
inp = MeasureInput.deserialize(inp_serialized)
tic = time.time()
error_no = 0
error_msg = None
try:
# upload built module
remote = request_remote(key, host, port, priority, timeout)
remote.upload(build_res.filename)
func = remote.load_module(os.path.split(build_res.filename)[1])
dev = remote.device(str(inp.task.target), device)
# Limitation:
# We can not get PackFunction directly in the remote mode as it is wrapped
# under the std::function. We could lift the restriction later once we fold
# the PackedFunc as an object. Currently, we pass function name to work
# around it.
f_prepare = "cache_flush_cpu_non_first_arg" if enable_cpu_cache_flush else ""
time_f = func.time_evaluator(
func.entry_name,
dev,
number=number,
repeat=repeat,
min_repeat_ms=min_repeat_ms,
f_preproc=f_prepare,
)
# pylint: disable=broad-except
except Exception:
costs = (MAX_FLOAT,)
error_no = MeasureErrorNo.COMPILE_DEVICE
error_msg = make_traceback_info()
if error_no == 0:
try:
stream = dev.create_raw_stream()
dev.set_raw_stream(stream)
random_fill = remote.get_function("tvm.contrib.random.random_fill")
assert (
random_fill
), "Please make sure USE_RANDOM is ON in the config.cmake on the remote devices"
assert len(args) == len(build_res.args)
loc_args = []
# pylint: disable=consider-using-enumerate
for idx in range(len(args)):
if args[idx] is None:
build_res_arg = build_res.args[idx]
empty_array = ndarray.empty(
get_const_tuple(build_res_arg.shape), build_res_arg.dtype, dev
)
random_fill(empty_array)
loc_args.append(empty_array)
else:
loc_args.append(ndarray.array(args[idx], dev))
dev.sync()
# First run for check that the kernel is correct
func.entry_func(*loc_args)
dev.sync()
costs = time_f(*loc_args).results
# clean up remote files
remote.remove(build_res.filename)
remote.remove(os.path.splitext(build_res.filename)[0] + ".so")
remote.remove("")
dev.free_raw_stream(stream)
# pylint: disable=broad-except
except Exception:
dev.free_raw_stream(stream)
costs = (MAX_FLOAT,)
error_no = MeasureErrorNo.RUNTIME_DEVICE
error_msg = make_traceback_info()
shutil.rmtree(os.path.dirname(build_res.filename))
toc = time.time()
time.sleep(cooldown_interval)
if verbose >= 1:
if error_no == MeasureErrorNo.NO_ERROR:
print("*", end="")
else:
print("*E", end="") # Run error
return costs, error_no, error_msg, toc - tic + build_res.time_cost, toc
def _rpc_run_worker(args):
"""Function to be ran in the RPCRunner thread pool.
Parameters
----------
args : Tuple[MeasureInput, BuildResult, ...]
Single input and build result plus the rest of the arguments to `rpc_runner_run`.
Returns
-------
res : MeasureResult
The measure result of this Runner thread.
"""
_, build_res, _, _, _, _, _, timeout, _, _, _, _, _, verbose, _ = args
if build_res.error_no != MeasureErrorNo.NO_ERROR:
return (
(MAX_FLOAT,),
build_res.error_no,
build_res.error_msg,
build_res.time_cost,
time.time(),
)
try:
res = _rpc_run(*args)
# pylint: disable=broad-except
except Exception:
if verbose >= 1:
print("*E", end="") # Run error
res = (
(MAX_FLOAT,),
MeasureErrorNo.RUNTIME_DEVICE,
make_traceback_info(),
build_res.time_cost + timeout,
time.time(),
)
return res
@tvm._ffi.register_func("auto_scheduler.rpc_runner.run")
def rpc_runner_run(
inputs,
build_results,
key,
host,
port,
priority=1,
n_parallel=1,
timeout=10,
number=3,
repeat=1,
min_repeat_ms=0,
cooldown_interval=0.0,
enable_cpu_cache_flush=False,
verbose=1,
device=0,
):
"""Run function of RPCRunner to test the performance of the input BuildResults.
Parameters
----------
inputs : List[MeasureInput]
The MeasureInputs to be measured.
build_results : List[BuildResult]
The BuildResults to be measured.
key : str
The key of the device registered in the RPC tracker.
host : str
The host address of the RPC Tracker.
port : int
The port of RPC Tracker.
priority : int = 1
The priority of this run request, larger is more prior.
n_parallel : int = 1
The number of tasks run in parallel.
timeout : int = 10
The timeout limit (in second) for each run.
This is used in a wrapper of the multiprocessing.Process.join().
number : int = 3
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int = 1
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first "1" is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms : int = 0
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval : float = 0.0
The cool down interval between two measurements in seconds.
enable_cpu_cache_flush: bool = False
Whether to flush cache on CPU between repeated measurements.
Flushing cache can make the measured latency of one operator closer to
its actual latency during end-to-end inference.
To make this option effective, the argument `number` should also be set to 1.
This is only has effect on CPU task.
verbose: int = 1
Verbosity level. 0 for silent, 1 to output information during program measuring.
device: int = 0
Which device to run on if multiple are available.
Returns
-------
res : List[MeasureResult]
The measure results of these MeasureInputs.
"""
assert len(inputs) == len(build_results), "Measure input size should be equal to build results"
# This pool is not doing computationally intensive work, so we can use threads
executor = PopenPoolExecutor(n_parallel)
tuple_res = executor.map_with_error_catching(
_rpc_run_worker,
[
(
inp.serialize(),
build_res,
prepare_runner_args(inp, build_res),
key,
host,
port,
priority,
timeout,
number,
repeat,
min_repeat_ms,
cooldown_interval,
enable_cpu_cache_flush,
verbose,
device,
)
for inp, build_res in zip(inputs, build_results)
],
)
results = []
for i, res in enumerate(tuple_res):
if res.status == StatusKind.COMPLETE:
results.append(MeasureResult(*res.value))
else:
assert res.status == StatusKind.TIMEOUT
if verbose >= 1:
print("*T", end="") # Run timeout
build_res = build_results[i]
results.append(
MeasureResult(
(MAX_FLOAT,),
MeasureErrorNo.RUN_TIMEOUT,
None,
build_res.time_cost + timeout,
time.time(),
)
)
if verbose >= 1:
print("")
return results
| https://github.com/zk-ml/tachikoma |
python/tvm/auto_scheduler/measure_record.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, pointless-string-statement
""" Serialization and other I/O support for measurement records (tuning logs). """
import argparse
import logging
import os
import itertools
import numpy as np
import tvm._ffi
from tvm.runtime import Object
from .measure import MeasureErrorNo, MeasureCallback
from .utils import calc_workload_dis_factor, decode_workload_key
from . import _ffi_api
logger = logging.getLogger("auto_scheduler")
@tvm._ffi.register_object("auto_scheduler.RecordToFile")
class RecordToFile(MeasureCallback):
"""
A measurement callback that writes measurement records into a file.
Parameters
----------
filename : str
File name for this callback to write log to.
"""
def __init__(self, filename):
dirname = os.path.dirname(os.path.abspath(filename))
if not os.path.exists(dirname):
os.makedirs(dirname)
self.__init_handle_by_constructor__(_ffi_api.RecordToFile, filename)
@tvm._ffi.register_object("auto_scheduler.RecordReader")
class RecordReader(Object):
"""
Reader of the json log file.
Parameters
----------
filename : str
File name for this reader to load log from.
"""
def __init__(self, filename):
if not os.path.exists(filename):
logger.warning("%s does not exist!", filename)
# a set to prevent print duplicated message
self.messages = set()
self.__init_handle_by_constructor__(_ffi_api.RecordReader, filename)
def check_workload_key(self, inputs):
"""Check and throw warnings for records with old format workload key.
Parameters
----------
inputs: List[MeasureInput]
The measure inputs to be checked.
Notes
-----
This checker could be deprecated in the future.
"""
for inp in inputs:
_, args = decode_workload_key(inp.task.workload_key)
if args is None:
continue
if not args:
msg = (
"MeasureInput with old format workload key %s should be updated "
"using the script from https://github.com/apache/tvm/pull/7317."
% inp.task.workload_key
)
if msg not in self.messages:
self.messages.add(msg)
logger.warning(msg)
def read_lines(self, max_lines=None, skip_lines=0):
"""Read multiple lines from the log file.
Parameters
----------
max_lines : Optional[int]
The maximum number of lines. None to read all lines.
skip_lines : int = 0
Skip the first n lines.
Returns
-------
inputs : List[auto_scheduler.measure.MeasureInput]
The MeasureInputs loaded from the log file.
results : List[auto_scheduler.measure.MeasureResult]
The MeasureResults loaded from the log file.
Notes
-----
Some unimportant and expensive fields in the returned MeasureInput are not deserialized
for faster read speed (e.g. input.task.compute_dag, input.state.stages).
If you want to use them, you can call the :code:`recover_measure_input` below
to rebuild these fields.
"""
inputs, results = _ffi_api.RecordReaderReadLines(
self, max_lines if max_lines else -1, skip_lines
)
self.check_workload_key(inputs)
return inputs, results
def __iter__(self):
while True:
ret = _ffi_api.RecordReaderReadNext(self)
if not ret:
break
self.check_workload_key([ret[0]])
yield ret[0], ret[1] # (input, result)
def load_record_from_string(record):
"""
Load the measure record from string.
Parameters
----------
record: str
A record string, including the serialized MeausreInput and MeasureResult.
Returns
-------
ret: Tuple[MeasureInput, MeasureResult]
A tuple of MeasureInput, MeasureResult.
"""
return _ffi_api.ReadMeasureRecord(record)
def dump_record_to_string(inp, res):
"""
Dump the measure record to a string.
Parameters
----------
inp: MeasureInput
The measure input.
res: MeasureResult
The measure result.
Returns
-------
ret: str
The dumped string.
"""
return _ffi_api.WriteMeasureRecords(inp, res)
def load_records(filename):
"""
Load measurement records from a file.
Parameters
----------
filename : str
File name to load log from.
Returns
-------
logs : List[auto_scheduler.measure.MeasureInput, auto_scheduler.measure.MeasureResult]
Notes
-----
Some unimportant and expensive fields in the returned MeasureInput are not deserialized
for faster read speed (e.g., input.task.compute_dag, input.state.stages).
If you want to use them, you can call the :code:`recover_measure_input` below
to rebuild these fields.
"""
return zip(*RecordReader(filename).read_lines())
def save_records(filename, inputs, results):
"""
Append measure records to file.
Parameters
----------
filename : str
File name to write log to.
inputs: List[MeasureInputs]
The MeasureInputs to be written.
results: List[MeasureResults]
The MeasureResults to be written.
"""
dirname = os.path.dirname(os.path.abspath(filename))
if not os.path.exists(dirname):
os.makedirs(dirname)
_ffi_api.SaveRecords(filename, inputs, results)
def load_best_record(filename, workload_key=None, target=None, include_compatible=False):
"""Return the best measurement pair form a log file. This may return none results if
there is no legal measure pair with the specified workload_key/target found from the log file.
Parameters
----------
filename : str
File name to load log from.
workload_key : Optional[str]
The workload key of the compute declaration.
With `None`, this returns the best measure pair of all workloads.
target : Optional[tvm.target.Target]
The target device.
With `None`, this returns the best measure pair of all target devices.
include_compatible: bool
When set to True, all compatible records in the log file will be considered.
Returns
-------
input : auto_scheduler.measure.MeasureInput
The best State's MeasureInput from this log fine.
result : auto_scheduler.measure.MeasureResult
The best State's MeasureResult from this log fine.
"""
log_reader = RecordReader(filename)
best_cost = 1e30
best_inp = None
best_res = None
for inp, res in log_reader:
if res.error_no != MeasureErrorNo.NO_ERROR:
continue
if target and inp.task.target.kind.name != target.kind.name:
continue
costs = [v.value for v in res.costs]
cost = np.mean(costs)
if workload_key is not None:
dis_f = calc_workload_dis_factor(
decode_workload_key(workload_key), decode_workload_key(inp.task.workload_key)
)
if dis_f == float("inf"):
continue
if not include_compatible and dis_f != 1:
continue
# Since different workloads have different FLOPS, we multiply the factor to
# eliminate this difference, which is basically the concept of throughput.
cost *= dis_f
if cost < best_cost:
best_cost = cost
best_inp = inp
best_res = res
return best_inp, best_res
def distill_record_file(in_file, out_file):
"""
Pick the best entries from a record file and store them to another file.
This function distills the useful log entries from a large log file.
If out_file already exists, the best entries from both
in_file and out_file will be saved.
Parameters
----------
in_file: str
The filename of input
out_file: str or file
The filename of output
"""
# pylint: disable=import-outside-toplevel
from .dispatcher import ApplyHistoryBest
context = load_records(in_file)
dirname = os.path.dirname(os.path.abspath(out_file))
if not os.path.exists(dirname):
os.makedirs(dirname)
if os.path.isfile(out_file):
out_context = load_records(out_file)
context = itertools.chain(context, out_context)
def measure_input_str_key(inp):
return _ffi_api.SerializeMeasureInput(inp)
# Dict[target key,
# Dict[workload hash,
# Dict[workload args, (cost, (MeasureInput, MeasureResult))]]]
# Full type: Dict[str, Dict[str, Dict[Tuple, Tuple[float, Tuple[Measureinput, MeasureResult]]]]]
best_records = {}
for inp, res in context:
if res.error_no != 0:
continue
# Keep the best record for each target and workload.
costs = [x.value for x in res.costs if isinstance(x, tvm.tir.expr.FloatImm)]
cost = np.mean(costs)
for k in inp.task.target.keys:
entry, _, workload_args = ApplyHistoryBest.get_workload_entry(
best_records, k, inp.task.workload_key
)
if workload_args not in entry or cost < entry[workload_args][0]:
entry[workload_args] = (cost, (inp, res))
# Remove duplications by multiple target keys.
out_records = {}
for target_entry in best_records.values():
for workload_entry in target_entry.values():
for _, (inp, res) in workload_entry.values():
out_records[measure_input_str_key(inp)] = (inp, res)
inputs = []
results = []
for inp, res in out_records.values():
inputs.append(inp)
results.append(res)
# create a new file and save the best records
open(out_file, "w")
save_records(out_file, inputs, results)
logger.info("Extract %d best records from %s to %s", len(inputs), in_file, out_file)
def main():
"""The main function for CLI."""
parser = argparse.ArgumentParser()
parser.add_argument("--mode", choices=["distill"], default="distill")
parser.add_argument("-i", "--input", type=str, help="input file")
parser.add_argument("-o", "--output", type=str, default=None, help="output file")
args = parser.parse_args()
logging.basicConfig()
logger.setLevel(logging.INFO)
if args.mode == "distill":
args.output = args.output or args.input + ".best.json"
distill_record_file(args.input, args.output)
"""
Usage:
* Distill the best entries from a large log file
e.g. python -m tvm.auto_scheduler.measure_record --mode distill -i input.json
"""
if __name__ == "__main__":
main()
| https://github.com/zk-ml/tachikoma |
python/tvm/auto_scheduler/relay_integration.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable,invalid-name
"""
Integrate auto_scheduler into relay. It implements the following items:
1. Extract search tasks from a relay program
2. Provide auto-scheduling for all TOPI compute functions
"""
import json
import logging
import threading
import traceback
import tvm
from tvm import autotvm, transform
from tvm._ffi.base import TVMError
from tvm.ir.transform import PassContext
from tvm.runtime import convert_to_object
from tvm.target import Target
from tvm.te.tensor import ComputeOp, PlaceholderOp, Tensor
from tvm.tir import Reduce
from tvm.tir import expr as _expr
from . import _ffi_api
from .compute_dag import ComputeDAG, LayoutRewriteOption
from .dispatcher import DispatchContext
from .search_task import SearchTask
from .utils import get_const_tuple
from .workload_registry import register_workload_tensors
logger = logging.getLogger("auto_scheduler")
def call_all_topi_funcs(mod, params, target, error_list, opt_level=3):
"""Call all TOPI compute to extract auto_scheduler tasks in a Relay program"""
# pylint: disable=import-outside-toplevel
from tvm import relay
# Turn off AutoTVM config not found warnings
old_autotvm_silent = autotvm.GLOBAL_SCOPE.silent
autotvm.GLOBAL_SCOPE.silent = True
with transform.PassContext(
opt_level=opt_level,
config={
"relay.backend.use_auto_scheduler": True,
},
disabled_pass={"AutoSchedulerLayoutRewrite"},
):
compiler = relay.vm.VMCompiler()
if params:
compiler.set_params(params)
mod = tvm.IRModule.from_expr(mod) if isinstance(mod, relay.Function) else mod
try:
compiler.lower(mod, target)
except TVMError:
error_list.append(f"{traceback.format_exc()}")
finally:
autotvm.GLOBAL_SCOPE.silent = old_autotvm_silent
def extract_tasks(
mod,
params,
target,
target_host=None,
hardware_params=None,
include_simple_tasks=False,
dump_workload_to_dag_log=None,
opt_level=3,
):
"""Extract tuning tasks from a relay program.
Parameters
----------
mod: tvm.IRModule or relay.function.Function
The module or function to tune
params: dict of str to numpy array
The associated parameters of the program
target: Union[tvm.target.Target, str]
The compilation target
target_host: Optional[Union[tvm.target.Target, str]]
The host compilation target
hardware_params : Optional[HardwareParams]
Hardware parameters used for the search tasks
include_simple_tasks: bool
Whether to extract simple tasks that do not include complicated ops.
dump_workload_to_dag_log: Optional[str]
A file to dump an association between the workload keys and the actual DAG
opt_level : Optional[int]
The optimization level of the task extractions.
Returns
-------
tasks: List[SearchTask]
The tasks in this network
weights: List[int]
The weight (i.e. the number of appearance) of extracted tasks
"""
# pylint: disable=import-outside-toplevel
target, target_host = Target.canon_target_and_host(target, target_host)
# Run the compiler to collect all TOPI calls during compilation.
env = TracingEnvironment(
TracingMode.EXTRACT_TASK if include_simple_tasks else TracingMode.EXTRACT_COMPLEX_TASK_ONLY
)
dispatch_ctx = DispatchContext.current
old_verbose = dispatch_ctx.verbose
dispatch_ctx.verbose = 0
errors = []
with env:
# Wrap build call in a new thread to avoid the conflict
# between python's multiprocessing and tvm's thread pool
build_thread = threading.Thread(
target=call_all_topi_funcs, args=(mod, params, target, errors, opt_level)
)
build_thread.start()
build_thread.join()
if errors:
error_strings = ["Task extraction had the following errors:"] + errors
raise TVMError("\n".join(error_strings))
dispatch_ctx.verbose = old_verbose
# create search tasks
tasks = []
weights = []
for wkl_key, (weight, func_names) in env.wkl_key_to_weight.items():
tasks.append(
SearchTask(
workload_key=wkl_key,
target=target,
hardware_params=hardware_params,
# When auto scheduler is used in end to end network, try to apply layout rewrite
# to improve the overall performance
layout_rewrite_option=LayoutRewriteOption.get_target_default(target, True),
task_inputs=(
env.wkl_key_to_input_names[wkl_key]
if wkl_key in env.wkl_key_to_input_names
else None
),
task_inputs_save_to_file=True,
desc=",".join(func_names),
)
)
weights.append(int(weight))
if dump_workload_to_dag_log is not None:
with open(dump_workload_to_dag_log, "w") as f:
json.dump({task.workload_key: str(task.compute_dag) for task in tasks}, f)
return tasks, weights
class TracingMode:
"""Two modes for tracing"""
EXTRACT_TASK = 0 # trace all topi calls to extract tasks
# same as EXTRACT_TASK but ignore the task without complex ops
EXTRACT_COMPLEX_TASK_ONLY = 1
PREPARE_LAYOUT_REWRITE = 2 # trace topi calls to prepare layout rewrite
class TracingEnvironment:
"""Global environment for tracing all topi function calls"""
current = None
def __init__(self, tracing_mode):
self.tracing_mode = tracing_mode
self.relay_disable_build_cache = "false"
self.func_name_to_wkl_key = {}
self.wkl_key_to_weight = {}
self.wkl_key_to_input_names = {}
def __enter__(self):
TracingEnvironment.current = self
return self
def __exit__(self, exc_type, exc_val, exc_tb):
TracingEnvironment.current = None
def add_workload_key(self, func_name, workload_key):
"""Add the workload key of a search task.
Parameters
----------
func_name: str
The function name of the task.
workload_key: str
The workload key of a task.
"""
self.func_name_to_wkl_key[func_name] = workload_key
if workload_key not in self.wkl_key_to_weight:
self.wkl_key_to_weight[workload_key] = (0, set())
weight, func_names = self.wkl_key_to_weight[workload_key]
func_names.add(func_name)
self.wkl_key_to_weight[workload_key] = (weight + 1, func_names)
def add_workload_input_names(self, workload_key, input_names):
"""Add special task inputs to this workload.
Parameters
----------
workload_key : str
The workload key of a task.
input_names : List[str]
A list of input names.
"""
self.wkl_key_to_input_names[workload_key] = input_names
@tvm._ffi.register_func("auto_scheduler.enter_layout_rewrite")
def enter_layout_rewrite():
"""Enter layout rewrite tracing environment"""
env = TracingEnvironment(TracingMode.PREPARE_LAYOUT_REWRITE)
env.__enter__()
@tvm._ffi.register_func("auto_scheduler.exit_layout_rewrite")
def exit_layout_rewrite():
"""Exit layout rewrite tracing environment"""
env = TracingEnvironment.current
env.__exit__(None, None, None)
def traverse_to_get_io_tensors(outs):
"""Traverse from a list of output tensors to get input/output tensors and
other useful information.
Parameters
----------
outs: List[Tensor]
The output tensors
Returns
-------
io_tensors: List[Tensor]
The input and output tensors with static shape
has_layout_free: bool
Whether the compute DAG has layout_free placeholders
has_complex_op: bool
Whether the topi compute function includes at least one complex (reduce) op
"""
layout_free_ops = []
inputs = []
has_complex_op = False
visited = set()
def traverse(t):
nonlocal has_complex_op
# We cannot directly add tensors to the set, because the comparison of
# two tensors with ndim=0 is ambiguous.
assert t.handle is not None
if t.handle.value in visited:
return
if isinstance(t.op, PlaceholderOp):
inputs.append(t)
elif isinstance(t.op, ComputeOp):
has_complex_op = has_complex_op or any([isinstance(e, Reduce) for e in t.op.body])
if "layout_free_placeholders" in t.op.attrs:
layout_free_ops.append(t.op)
for x in t.op.input_tensors:
traverse(x)
visited.add(t.handle.value)
for t in outs:
traverse(t)
io_tensors = inputs + list(outs)
for tensor in io_tensors:
# Reject the compute if any of its I/O tensors has dynamic shape.
if any([not isinstance(v, int) for v in get_const_tuple(tensor.shape)]):
return ([], False, False)
return (io_tensors, len(layout_free_ops) > 0, has_complex_op)
@tvm._ffi.register_func("auto_scheduler.relay_integration.auto_schedule_topi_compute")
def auto_schedule_topi(func_name, outs):
"""Use auto-scheduler to schedule any topi compute function.
Note: This is used internally for relay integration. Do
not use this as a general user-facing API.
Parameters
----------
func_name: str
The name of the function being scheduled.
outs: List[Tensor]
The output tensors of topi compute functions
Returns
-------
sch: Optional[te.Schedule]
A tuned schedule or none (if not tuned) in the final build mode;
None in the tracing mode so that the fallback topi schedule will be used.
"""
# pylint: disable=import-outside-toplevel
from tvm.auto_scheduler.measure import ( # lazily import to avoid recursive dependency
prepare_input_map,
)
io_tensors, has_layout_free, has_complex_op = traverse_to_get_io_tensors(outs)
if not io_tensors: # The compute includes dynamic shapes which are not supported yet.
return None
try:
dag = ComputeDAG(io_tensors)
except tvm.error.TVMError as err:
logger.info("Failed to create a ComputeDAG for auto_scheduler: %s", str(err))
return None
workload_key = dag.workload_key()
key = register_workload_tensors(workload_key, io_tensors)
target = tvm.target.Target.current()
dispatch_ctx = DispatchContext.current
state = dispatch_ctx.query(target, key, has_complex_op, dag, func_name)
schedule = None
env = TracingEnvironment.current
if env is None:
# in the final build mode
if state is None:
return None
schedule, _ = dag.apply_steps_from_state(state)
return schedule
if env.tracing_mode in [TracingMode.EXTRACT_TASK, TracingMode.EXTRACT_COMPLEX_TASK_ONLY]:
# in the task extraction mode
if has_complex_op or env.tracing_mode == TracingMode.EXTRACT_TASK:
env.add_workload_key(func_name, key)
input_map = prepare_input_map(io_tensors, workload_key)
if input_map:
env.add_workload_input_names(key, list(input_map.values()))
elif env.tracing_mode == TracingMode.PREPARE_LAYOUT_REWRITE:
# in prepare_layout_rewrite mode
if (
LayoutRewriteOption.get_target_default(target, True) != LayoutRewriteOption.NO_REWRITE
and has_layout_free
):
if state is None:
return None
# rewrite the layout and update the context for the new dag
new_dag = dag.rewrite_layout_from_state(state)
new_key = new_dag.workload_key()
if new_key != key:
dispatch_ctx.update(target, new_key, state)
else:
raise ValueError("Invalid tracing mode: " + env.tracing_mode)
return schedule
@tvm._ffi.register_func("auto_scheduler.relay_integration.te_compiler_update_weights")
def te_compiler_update_weights(function_weights):
"""A callback for updating the weights of extracted tasks. When using the TE compiler
that avoids compiling the same function multiple times by caching, all extracted tasks
have weight 1, so the TE compiler invokes this callback at the end. In this case,
we override existing weights with the use_count in TE compiler cache.
Parameters
----------
function_weights: Dict[str, int]
Mapping from function names to their weights.
"""
env = TracingEnvironment.current
if env is not None:
# Override this map with the weights in the TE compiler.
env.wkl_key_to_weight = {}
for func_name, weight in function_weights.items():
# If the function name is not in the map, then it means we are not interested in
# this function during task extraction (e.g., a function without reduction).
if func_name not in env.func_name_to_wkl_key:
continue
workload_key = env.func_name_to_wkl_key[func_name]
if workload_key not in env.wkl_key_to_weight:
env.wkl_key_to_weight[workload_key] = (0, set())
# Note that the function appears multiple times in a model will be renamed
# to make sure function names are unique, so we use the workload key generated
# from the function's TE compute to determine their weights.
old_weight, func_names = env.wkl_key_to_weight[workload_key]
func_names.add(func_name)
env.wkl_key_to_weight[workload_key] = (old_weight + weight, func_names)
def tensor_no_check_call(self, *indices):
"""An indexing function without any check.
This is the same as `tvm.te.Tensor::__call__` except that the safety
check is removed.
"""
indices = convert_to_object(indices)
args = []
for x in indices:
if isinstance(x, _expr.PrimExpr):
args.append(x)
elif isinstance(x, _expr.IterVar):
args.append(x.var)
else:
raise ValueError("The indices must be expression")
return _expr.ProducerLoad(self, args)
def remove_index_check(tensor):
"""Remove the safety check in the indexing function for a tensor.
This is done by monkey patching its indexing function.
After removing the check, we are allowed to create a
temporary wrong IR and fix it later in other places.
Parameters
----------
tensor: Tensor
The tensor to remove index check.
"""
# Monkey patch the indexing function
tensor.__call__ = tensor_no_check_call.__get__(tensor, Tensor)
def rewrite_compute_body(compute_tensor, new_layout):
"""Rewrite the body of a ComputeOp according to a new layout of a placeholder"""
op = compute_tensor.op
# Get layout free placeholders
layout_free_placeholders = op.attrs["layout_free_placeholders"]
assert len(layout_free_placeholders) == 1, "Only support one layout free placeholder"
placeholder_op = layout_free_placeholders[0].op
# Rewrite the index expression in body
body = []
for b in op.body:
body.append(_ffi_api.RewriteIndexForNewLayout(placeholder_op, new_layout, b))
op_node = tvm.te._ffi_api.ComputeOp(op.name, op.tag, op.attrs, op.axis, body)
num = op_node.num_outputs
outputs = tuple(op_node.output(i) for i in range(num))
return outputs[0] if num == 1 else outputs
def rewrite_tensor_shape(tensor, shape):
"""Rewrite the tensor shape"""
_ffi_api.RewriteTensorShape(tensor, shape)
def is_auto_scheduler_enabled():
"""Return whether the auto-scheduler is enabled.
Parameters
----------
enabled: bool
Whether the auto-scheduler is enabled
"""
return PassContext.current().config.get(
"relay.backend.use_auto_scheduler",
False,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/auto_scheduler/search_policy.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The search policies of TVM auto-scheduler.
The auto-scheduler constructs a search space according to the compute declaration.
It then randomly samples programs from the search space and uses evolutionary search with a
learned cost model to fine tune the sampled programs.
The final optimized programs are sent to actual hardware for measurement.
The above process is repeated until the auto-scheduler runs out of time budget.
Reference:
L. Zheng, C. Jia, M. Sun, Z. Wu, C. Yu, et al. "Ansor : Generating High-Performance Tensor
Programs for Deep Learning." (OSDI 2020).
"""
import random
import tvm._ffi
from tvm.runtime import Object
from .cost_model import RandomModel
from . import _ffi_api
@tvm._ffi.register_object("auto_scheduler.SearchCallback")
class SearchCallback(Object):
"""Callback function before or after search process"""
@tvm._ffi.register_object("auto_scheduler.PreloadMeasuredStates")
class PreloadMeasuredStates(SearchCallback):
"""A SearchCallback to load measured states from the log file for a search policy.
This can resume the state of the search policy:
- Making sure an already measured state in former searches will never be measured again.
- The history states can be used to speed up the search process(e.g. SketchPolicy uses
history states as starting point to perform Evolutionary Search).
Parameters
----------
filename : str
The name of the record file.
"""
def __init__(self, filename):
self.__init_handle_by_constructor__(_ffi_api.PreloadMeasuredStates, filename)
@tvm._ffi.register_object("auto_scheduler.PreloadCustomSketchRule")
class PreloadCustomSketchRule(SearchCallback):
"""
A SearchCallback for SketchSearchPolicy that allows users to add
custom sketch rule.
Notes
-----
This is an advanced feature. Make sure you're clear how it works and this should only be used
in SketchSearchPolicy.
Parameters
----------
meet_condition_func: Callable
A function with `(policy, state, stage_id) -> int`. Should return one of the result
enumeration.
apply_func: Callable
A function with `(policy, state, stage_id) -> [[State, int], ...]`.
rule_name: str = "CustomSketchRule"
The name of this custom sketch rule.
"""
# Result enumeration of the condition function.
PASS = 0 # Skip this rule and continue to try the next rules.
APPLY = 1 # Apply this rule and continue to try the next rules.
APPLY_AND_SKIP_REST = 2 # Apply this rule and skip the rest rules.
def __init__(self, meet_condition_func, apply_func, rule_name="CustomSketchRule"):
self.__init_handle_by_constructor__(
_ffi_api.PreloadCustomSketchRule, meet_condition_func, apply_func, rule_name
)
@tvm._ffi.register_object("auto_scheduler.SearchPolicy")
class SearchPolicy(Object):
"""The base class of search policies."""
def continue_search_one_round(self, num_measure, measurer):
"""
Continue the search by doing an additional search round.
Parameters
----------
num_measure: int
The number of programs to measure in this round
measurer: ProgramMeasurer
The program measurer to measure programs
Returns
-------
inputs: List[MeasureInput]
The inputs of measurments in this search round
results: List[MeasureResult]
The results of measurments in this search round
"""
return _ffi_api.SearchPolicyContinueSearchOneRound(self, num_measure, measurer)
def set_verbose(self, verbose):
"""
Set the verbosity level of the search policy.
Parameters
----------
verbose: int
The verbosity level
"""
return _ffi_api.SearchPolicySetVerbose(self, verbose)
@tvm._ffi.register_object("auto_scheduler.EmptyPolicy")
class EmptyPolicy(SearchPolicy):
"""A simple example of the search policy which always returns
the initial naive schedule (state).
Parameters
----------
task : SearchTask
The SearchTask for the computation declaration.
init_search_callbacks : Optional[List[SearchCallback]]
Callback functions called before the search process.
"""
def __init__(self, task, init_search_callbacks=None):
self.__init_handle_by_constructor__(_ffi_api.EmptyPolicy, task, init_search_callbacks)
@tvm._ffi.register_object("auto_scheduler.SketchPolicy")
class SketchPolicy(SearchPolicy):
"""The search policy that searches in a hierarchical search space defined by sketches.
The policy randomly samples programs from the space defined by sketches and use evolutionary
search to fine-tune them.
Parameters
----------
task : SearchTask
The SearchTask for the computation declaration.
program_cost_model : CostModel = RandomModel()
The cost model to estimate the complete schedules.
params : Optional[Dict[str, Any]]
Parameters of the search policy.
See `src/auto_scheduler/search_policy/sketch_search_policy.h` for the definitions.
See `DEFAULT_PARAMS` below to find the default values.
seed : Optional[int]
Random seed.
verbose : int = 1
Verbosity level. 0 for silent, 1 to output information during schedule search.
init_search_callbacks : Optional[List[SearchCallback]]
Callback functions called before the search process, usually used to do extra
initializations.
Possible callbacks:
- auto_scheduler.PreloadMeasuredStates
- auto_scheduler.PreloadCustomSketchRule
"""
DEFAULT_PARAMS = {
"eps_greedy": 0.05,
"retry_search_one_round_on_empty": 1,
"sample_init_min_population": 50,
"sample_init_use_measured_ratio": 0.2,
"evolutionary_search_population": 2048,
"evolutionary_search_num_iters": 4,
"evolutionary_search_mutation_prob": 0.85,
"cpu_multi_level_tiling_structure": "SSRSRS",
"gpu_multi_level_tiling_structure": "SSSRRSRS",
# Notice: the default thread bind policy of GPU assumes the tiling structure to have at
# least 3 spatial tiling levels in outermost
"max_innermost_split_factor": 64,
"max_vectorize_size": 16,
"disable_change_compute_location": 0,
}
def __init__(
self,
task,
program_cost_model=RandomModel(),
params=None,
seed=None,
verbose=1,
init_search_callbacks=None,
):
if params is None:
params = SketchPolicy.DEFAULT_PARAMS
else:
for key, value in SketchPolicy.DEFAULT_PARAMS.items():
if key not in params:
params[key] = value
self.__init_handle_by_constructor__(
_ffi_api.SketchPolicy,
task,
program_cost_model,
params,
seed or random.randint(1, 1 << 30),
verbose,
init_search_callbacks,
)
def generate_sketches(self, print_for_debug=False):
"""Generate the sketches.
This python interface is mainly used for debugging and testing.
The actual search is all done in c++.
Parameters
----------
print_for_debug : bool = False
Whether print out the sketches for debug.
Returns
-------
sketches : List[State]
The generated sketches of this search task.
"""
sketches = _ffi_api.SketchPolicyGenerateSketches(self)
if print_for_debug:
for i, s in enumerate(sketches):
print("=" * 20 + " %d " % i + "=" * 20)
print(s)
return sketches
def sample_initial_population(self):
"""Sample initial population.
This python interface is mainly used for debugging and testing.
The actual search is all done in c++.
Returns
-------
states: List[State]
The sampled states
"""
states = _ffi_api.SketchPolicySampleInitialPopulation(self)
return states
def evolutionary_search(self, init_populations, out_size):
"""Perform evolutionary search.
This python interface is mainly used for debugging and testing.
The actual search is all done in c++.
Parameters
----------
init_populations: List[State]
The initial population states
out_size : int
The size of generated states
Returns
-------
states: List[State]
The generated states
"""
states = _ffi_api.SketchPolicyEvolutionarySearch(self, init_populations, out_size)
return states
| https://github.com/zk-ml/tachikoma |
python/tvm/auto_scheduler/search_task.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" The definiton of SearchTask """
import json
import os
import logging
import numpy as np
import tvm._ffi
from tvm.runtime import Object, ndarray
from tvm.driver.build_module import build
from tvm.target import Target
from .measure import LocalBuilder, LocalRunner
from .measure_record import load_best_record
from .workload_registry import make_workload_key
from .compute_dag import ComputeDAG, LayoutRewriteOption
from .cost_model import XGBModel
from .search_policy import SketchPolicy
from .workload_registry import WORKLOAD_FUNC_REGISTRY, register_workload_tensors
from . import _ffi_api
# pylint: disable=invalid-name
logger = logging.getLogger("auto_scheduler")
@tvm._ffi.register_object("auto_scheduler.HardwareParams")
class HardwareParams(Object):
"""The parameters of target hardware used to guide the search policy.
When a parameter isn't provided, it will instead use the
current machine's default value if target is specified.
TODO(jcf94): This is considered to be merged with the new Target specification:
https://discuss.tvm.apache.org/t/rfc-tvm-target-specification/6844
Parameters
----------
num_cores : int, optional
The number of device cores.
vector_unit_bytes : int, optional
The width of vector units in bytes.
cache_line_bytes : int, optional
The size of cache line in bytes.
max_shared_memory_per_block : int, optional
The max shared memory per block in bytes.
max_local_memory_per_block : int, optional
The max local memory per block in bytes.
max_threads_per_block : int, optional
The max number of threads per block.
max_vthread_extent : int, optional
The max vthread extent.
warp_size : int, optional
The thread numbers of a warp.
target : str or Target, optional
The compilation target. Used to determine default values if provided.
target_host : str or Target, optional
The compilation target host. Used to determine default values if provided.
"""
def __init__(
self,
num_cores=None,
vector_unit_bytes=None,
cache_line_bytes=None,
max_shared_memory_per_block=None,
max_local_memory_per_block=None,
max_threads_per_block=None,
max_vthread_extent=None,
warp_size=None,
target=None,
target_host=None,
):
# If target is provided, get the default paramters for this machine.
if target is not None:
if isinstance(target, str):
target = tvm.target.Target(target)
if isinstance(target_host, str):
target_host = tvm.target.Target(target_host)
default_params = _ffi_api.GetDefaultHardwareParams(target, target_host)
if num_cores is None:
num_cores = default_params.num_cores
if vector_unit_bytes is None:
vector_unit_bytes = default_params.vector_unit_bytes
if cache_line_bytes is None:
cache_line_bytes = default_params.cache_line_bytes
if max_shared_memory_per_block is None:
max_shared_memory_per_block = default_params.max_shared_memory_per_block
if max_local_memory_per_block is None:
max_local_memory_per_block = default_params.max_local_memory_per_block
if max_threads_per_block is None:
max_threads_per_block = default_params.max_threads_per_block
if max_vthread_extent is None:
max_vthread_extent = default_params.max_vthread_extent
if warp_size is None:
warp_size = default_params.warp_size
self.__init_handle_by_constructor__(
_ffi_api.HardwareParams,
num_cores,
vector_unit_bytes,
cache_line_bytes,
max_shared_memory_per_block,
max_local_memory_per_block,
max_threads_per_block,
max_vthread_extent,
warp_size,
)
def __str__(self):
"""Pretty printing for hardware parameter configuration."""
format_str = (
"HardwareParams:\n"
f" num_cores: {self.num_cores}\n"
f" vector_unit_bytes: {self.vector_unit_bytes}\n"
f" cache_line_bytes: {self.cache_line_bytes}\n"
f" max_shared_memory_per_block: {self.max_shared_memory_per_block}\n"
f" max_local_memory_per_block: {self.max_local_memory_per_block}\n"
f" max_threads_per_block: {self.max_threads_per_block}\n"
f" max_vthread_extent: {self.max_vthread_extent}\n"
f" warp_size: {self.warp_size}\n"
)
return format_str
@tvm._ffi.register_object("auto_scheduler.TuningOptions")
class TuningOptions(Object):
"""This controls the options of performance tuning.
Parameters
----------
num_measure_trials: int = 0
The number of measurement trials.
The search policy measures `num_measure_trials` schedules in total and returns the best one
among them.
With `num_measure_trials` == 0, the policy will do the schedule search but won't involve
measurement. This can be used to get a runnable schedule quickly without auto-tuning.
early_stopping: Optional[int]
Stop the tuning early if getting no improvement after n measurements.
num_measures_per_round: int = 64
The number of schedules to be measured at each search round.
The whole schedule search process will try a total number of `num_measure_trials` in several
rounds.
verbose: int = 1
Verbosity level. 0 for silent, 1 to output information during schedule search.
builder: Union[ProgramBuilder, str] = 'local'
ProgramBuilder which builds the program.
runner: Union[ProgramRunner, str] = 'local'
ProgramRunner which runs the program and measures time costs.
measure_callbacks: Optional[List[MeasureCallback]]
Callback functions called after each measurement.
Candidates:
- auto_scheduler.RecordToFile
"""
def __init__(
self,
num_measure_trials=0,
early_stopping=None,
num_measures_per_round=64,
verbose=1,
builder="local",
runner="local",
measure_callbacks=None,
):
if isinstance(builder, str):
if builder == "local":
builder = LocalBuilder()
else:
raise ValueError("Invalid builder: " + builder)
elif not isinstance(builder, tvm.auto_scheduler.measure.ProgramBuilder):
raise ValueError(
"Invalid builder: "
+ builder
+ " . TuningOptions expects a ProgramBuilder or string."
)
if isinstance(runner, str):
if runner == "local":
runner = LocalRunner()
else:
raise ValueError("Invalid runner: " + runner)
elif not isinstance(runner, tvm.auto_scheduler.measure.ProgramRunner):
raise ValueError(
"Invalid runner: " + runner + " . TuningOptions expects a ProgramRunner or string."
)
self.__init_handle_by_constructor__(
_ffi_api.TuningOptions,
num_measure_trials,
early_stopping or -1,
num_measures_per_round,
verbose,
builder,
runner,
measure_callbacks,
)
# The map stores special registered buffer for measurement.
# This can be used for sparse workloads when we cannot use random tensors for measurment.
# {
# "workload_key_0": {
# "task_input_0": Tensor(...),
# "task_input_1": Tensor(...)
# },
# "workload_key_1": {
# "task_input_2": Tensor(...),
# "task_input_3": Tensor(...)
# },
# ...
# }
TASK_INPUT_BUFFER_TABLE = {}
def _save_buffer_to_file(buffer_name, buffer_data):
"""Save the current Tensor buffer to a numpy file.
File name will be: {buffer_name}.{buffer_shape}_{buffer_data_type}.npy
"""
np_data = buffer_data.numpy()
buffer_name += "."
for i in np_data.shape:
buffer_name += "%d_" % (i)
buffer_name += "%s" % (np_data.dtype)
buffer_name += ".npy"
np_data.tofile(buffer_name, " ")
def _try_load_buffer_from_file(buffer_name):
"""Try to load buffer from a numpy file, if not found, return None.
File name has a same format as `_save_buffer_to_file`.
"""
filelist = os.listdir()
for file in filelist:
if file.startswith(buffer_name + "."):
meta_info = file.split(".")[-2].split("_")
shape = [int(i) for i in meta_info[:-1]]
dtype = meta_info[-1]
buffer_data = np.fromfile(file, dtype=dtype, sep=" ")
buffer_data = buffer_data.reshape(shape)
return ndarray.array(buffer_data)
return None
def register_task_input_buffer(
workload_key,
input_name,
input_data,
overwrite=False,
save_to_file=False,
):
"""Register special buffer for measurement.
Parameters
----------
workload_key : str
The workload key of the SearchTask.
input_name : str
The name of input buffer.
input_data : tvm.nd.NDArray
The input Tensor data.
overwrite : bool = False
Whether to overwrite the data if a name has already registered.
save_to_file : bool = False
Whether to save the data to a local file as well. This can be reused to resume the last
tuning process.
Returns
-------
tvm.nd.NDArray
The actual registered Tensor data of this input_name. With `overwrite` set to False, will
return the original one if the name has already registered before.
"""
global TASK_INPUT_BUFFER_TABLE
if workload_key not in TASK_INPUT_BUFFER_TABLE:
TASK_INPUT_BUFFER_TABLE[workload_key] = {}
input_table = TASK_INPUT_BUFFER_TABLE[workload_key]
if not overwrite:
if input_name not in input_table.keys():
# Try to load buffer data from local file
tensor_from_file = _try_load_buffer_from_file(input_name)
if tensor_from_file:
input_table[input_name] = tensor_from_file
elif input_name in input_table.keys():
raise RuntimeError(
"Tensor %s exists in TASK_INPUT_BUFFER_TABLE, %s"
% (input_name, "set overwrite to True or this Tensor will not be registered")
)
input_table[input_name] = input_data
if save_to_file:
_save_buffer_to_file(input_name, input_data)
return input_data
def get_task_input_buffer(workload_key, input_name):
"""Get special buffer for measurement.
The buffers are registered by `register_task_input_buffer`.
Parameters
----------
workload_key : str
The workload key of the SearchTask.
input_name : str
The name of input buffer.
Returns
-------
tvm.nd.NDArray
The registered input buffer.
"""
global TASK_INPUT_BUFFER_TABLE
if workload_key not in TASK_INPUT_BUFFER_TABLE:
TASK_INPUT_BUFFER_TABLE[workload_key] = {}
input_table = TASK_INPUT_BUFFER_TABLE[workload_key]
if input_name not in input_table:
# Try to load buffer data from local file
tensor_from_file = _try_load_buffer_from_file(input_name)
if tensor_from_file:
input_table[input_name] = tensor_from_file
# Then check for the default table, the input names extracted from a relay model will be
# stored here for we're not able to get the workload_key at that time
if input_name not in input_table:
input_table = TASK_INPUT_BUFFER_TABLE["default"]
if input_name in input_table:
return input_table[input_name]
raise ValueError(
"%s not found in TASK_INPUT_BUFFER_TABLE, " % (input_name)
+ "should provide with `SearchTask(..., task_inputs={...})`"
)
@tvm._ffi.register_object("auto_scheduler.SearchTask")
class SearchTask(Object):
"""The computation information and hardware parameters for a schedule search task.
Parameters
----------
func : Union[Function, str]
The function that returns the compute declaration Tensors.
Can be the a function or the function name.
args : Union[Tuple[Any, ...], List[Any]]
The args of the function.
compute_dag : ComputeDAG
The ComputeDAG for the corresponding compute declaration.
workload_key : str
The workload key for the corresponding compute declaration.
target : any target-like object, see Target.canon_target
The target device of this search task.
target_host : None or any target-like object, see Target.canon_target
The target host device of this search task.
hardware_params : Optional[HardwareParams]
Hardware parameters used in this search task.
layout_rewrite_option : Optional[LayoutRewriteOption]
The layout rewrite option used for measuring programs. If None, the default value will be
set depending on the specified target.
Auto_scheduler will find a better schedule for the specified layout rewrite option.
The NO_REWRITE and INSERT_TRANSFORM_STAGE are expected to be used when tuning a standalone
op, and the REWRITE_FOR_PRE_TRANSFORMED is expected to be used when tuning ops inside a
network.
task_inputs : Union[Dict[str, tvm.nd.NDArray], List[str]]
A dict maps the input names to input tensors or a list of input names.
Some special Tensor used as inputs in program measuring. Usually we do not need to care
about it, but for special workloads like Sparse computation the Sparse Tensor input are
meaningful that we cannot use random input directly.
task_inputs_overwrite : bool = False
Whether to overwrite the data if a name has already in the global table.
task_inputs_save_to_file : bool = False
Whether to save the data to a local file as well. This can be reused to resume the last
tuning process.
desc: str = ""
The description string of this task.
Examples
--------
.. code-block:: python
# We support two ways to create a search task
# Way 1: create a task by a workload generation function.
# The `workload_func` is a function decorated by @auto_scheduler.register_workload
task = SearchTask(func=workload_func, args=args, target=target)
# Way 2: create a task by a workload_key.
# The `workload_key` is a string, which can be either a hash key or a json-serialized
# tuple(func, args).
task = SearchTask(workload_key=workload_key, target=target)
"""
def __init__(
self,
func=None,
args=None,
compute_dag=None,
workload_key=None,
target=None,
target_host=None,
hardware_params=None,
layout_rewrite_option=None,
task_inputs=None,
task_inputs_overwrite=False,
task_inputs_save_to_file=False,
desc="",
):
assert (
func is not None or workload_key is not None
), "Either a workload generation function or a workload key should be provided"
if func is not None:
workload_key = make_workload_key(func, args)
if compute_dag is None:
compute_dag = ComputeDAG(workload_key)
assert target is not None, "Must specify a target."
target, target_host = Target.canon_target_and_host(target, target_host)
if layout_rewrite_option is None:
layout_rewrite_option = LayoutRewriteOption.get_target_default(target)
task_input_names = []
if isinstance(task_inputs, list):
task_input_names = task_inputs
elif isinstance(task_inputs, dict):
for input_name in task_inputs:
register_task_input_buffer(
workload_key,
input_name,
task_inputs[input_name],
task_inputs_overwrite,
task_inputs_save_to_file,
)
task_input_names.append(input_name)
elif task_inputs is not None:
raise ValueError("task_inputs should be a dict or a list.")
self.__init_handle_by_constructor__(
_ffi_api.SearchTask,
compute_dag,
workload_key,
target,
target_host,
hardware_params,
layout_rewrite_option,
task_input_names,
desc,
)
def tune(self, tuning_options, search_policy=None, adaptive_training=False):
"""Run auto scheduling search for a task
Parameters
----------
tuning_options : TuningOptions
Tuning and measurement options.
search_policy : Optional[SearchPolicy]
The search policy to be used for schedule search.
"""
if search_policy is None:
cost_model = XGBModel(adaptive_training=adaptive_training)
search_policy = SketchPolicy(self, cost_model)
_ffi_api.AutoSchedule(search_policy, tuning_options)
def apply_best(self, log_file, include_compatible=False, layout_rewrite_option=None):
"""Apply the history best from a log file and return the schedule.
Parameters
----------
log_file : str
The name of the log file.
include_compatible: bool
When set to True, all compatible records in the log file will be considered.
layout_rewrite_option : Optional[LayoutRewriteOption]
The layout rewrite option.
Returns
-------
A `te.Schedule` and the a list of `te.Tensor` to be used in `tvm.lower` or `tvm.build`.
"""
inp, _ = load_best_record(
log_file, self.workload_key, include_compatible=include_compatible
)
if inp is None:
raise RuntimeError(
"Cannot find any valid schedule for %s in file %s" % (self.workload_key, log_file)
)
sch, args = self.compute_dag.apply_steps_from_state(
inp.state, layout_rewrite_option or self.layout_rewrite_option
)
return sch, args
def print_best(self, log_file, print_mode="schedule"):
"""Print the best schedule as python schedule API code or CUDA source code.
Parameters
----------
log_file : str
The name of the log file
print_mode: str
if "schedule", print the best schedule as python schedule API code.
if "cuda", print the best schedule as CUDA source code.
Returns
-------
code: str
The best schedule code in python API or CUDA source code
"""
inp, _ = load_best_record(log_file, self.workload_key)
if inp is None:
raise RuntimeError(
"Cannot find any valid schedule for %s in file %s" % (self.workload_key, log_file)
)
if print_mode == "schedule":
return self.compute_dag.print_python_code_from_state(inp.state)
if print_mode == "cuda":
assert self.target.kind.name == "cuda"
sch, args = self.compute_dag.apply_steps_from_state(inp.state)
func = build(sch, args, "cuda")
return func.imported_modules[0].get_source()
raise ValueError("Invalid print_mode: %s" % print_mode)
def __getstate__(self):
self.target, self.target_host = Target.canon_target_and_host(self.target, self.target_host)
return {
"compute_dag": self.compute_dag,
"workload_key": self.workload_key,
"target": self.target,
"target_host": self.target_host,
"hardware_params": self.hardware_params,
"layout_rewrite_option": self.layout_rewrite_option,
"task_input_names": self.task_input_names,
"desc": self.desc,
}
def __setstate__(self, state):
# Register the workload if needed
try:
workload = json.loads(state["workload_key"])
except Exception: # pylint: disable=broad-except
raise RuntimeError("Invalid workload key %s" % state["workload_key"])
# workload[0] is either the compute function name or the ComputeDAG hash.
# The compute functions are already registered when importing TVM, so here
# we only register the ComputeDAG workloads. If the same workload has
# already been registered, the later registration overrides the prvious one.
if workload[0] not in WORKLOAD_FUNC_REGISTRY:
register_workload_tensors(state["workload_key"], state["compute_dag"].tensors)
state["target"], state["target_host"] = Target.canon_target_and_host(
state["target"], state["target_host"]
)
self.__init_handle_by_constructor__(
_ffi_api.SearchTask,
state["compute_dag"],
state["workload_key"],
state["target"],
state["target"].host,
state["hardware_params"],
state["layout_rewrite_option"],
state["task_input_names"],
state["desc"],
)
def create_task(func, args, target, target_host=None, hardware_params=None):
"""THIS API IS DEPRECATED.
Create a search task.
Parameters
----------
func : Union[Function, str]
The function that returns the compute declaration Tensors.
Can be the a function or the function name.
args : Union[Tuple[Any, ...], List[Any]]
The args of the function.
target : Union[tvm.target.Target, str]
The target device of this search task.
target_host : Optional[Union[tvm.target.Target, str]]
The target host device of this search task.
hardware_params : Optional[HardwareParams]
Hardware parameters used in this search task.
Returns
-------
SearchTask: the created task
"""
raise ValueError(
'The API "auto_scheduler.create_task" is deprecated.'
"See https://github.com/apache/tvm/pull/7028 for the upgrade guide"
)
def auto_schedule(task, search_policy=None, tuning_options=TuningOptions()):
"""THIS API IS DEPRECATED.
Run auto scheduling search for a task.
Parameters
----------
task : SearchTask
The SearchTask for the computation declaration.
search_policy : Optional[SearchPolicy]
The search policy to be used for schedule search.
tuning_options : Optional[TuningOptions]
Tuning and measurement options.
Returns
-------
A `te.Schedule` and the a list of `te.Tensor` to be used in `tvm.lower` or `tvm.build`.
"""
raise ValueError(
'The API "auto_scheduler.create_task" is deprecated.'
"See https://github.com/apache/tvm/pull/7028 for the upgrade guide."
)
| https://github.com/zk-ml/tachikoma |
python/tvm/auto_scheduler/task_scheduler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
""" The task scheduler that allocates the time resources when tuning multiple tasks together
The details of the "gradient" strategy below can be found in the section 6 of this paper:
L. Zheng, C. Jia, M. Sun, Z. Wu, C. Yu, et al. "Ansor : Generating High-Performance Tensor
Programs for Deep Learning." (OSDI 2020).
"""
import os
import time
import math
import logging
import numpy as np
from .search_policy import SearchPolicy, SketchPolicy, PreloadMeasuredStates
from .cost_model import RandomModel, XGBModel
from .utils import array_mean
from .measure import ProgramMeasurer
from .measure_record import RecordReader
from . import _ffi_api
logger = logging.getLogger("auto_scheduler")
def make_search_policies(
search_policy,
search_policy_params,
tasks,
num_measures_per_round,
verbose,
load_model_file=None,
load_log_file=None,
adaptive_training=False,
):
"""Make a list of search policies for a list of search tasks.
It creates one policy per task.
Parameters
----------
search_policy: Union[str, List[SearchPolicy]]
The name of search policy.
search_policy_params: Dict[str, Any]]
The parameters of the search policy.
tasks: List[SearchTask]
The list of all tasks
num_measures_per_round: int
The number of schedules to be measured at each search round.
This should be the same as `TuningOptions.num_measures_per_round`
verbose: int
The verbosity level. 0 for silent.
load_model_file: Optional[str]
Load pre-trained model from this file. If this is None, the cost model will
be trained from scratch.
load_log_file: Optional[str]
Load measurement records from this file. If it is not None, the status of the
task scheduler, search policies and cost models will be restored according to this file.
adaptive_training: bool = False
Option used by XGBModel to reduce the model training frequency when there're too
many logs.
Returns
-------
policies: List[SearchPolicy]
The list of search policies
"""
if search_policy == "default":
search_policy = "sketch.xgb"
if isinstance(search_policy, str):
policy_type, model_type = search_policy.split(".")
if model_type == "xgb":
cost_model = XGBModel(
num_warmup_sample=len(tasks) * num_measures_per_round,
model_file=load_model_file,
adaptive_training=adaptive_training,
)
if load_model_file and os.path.isfile(load_model_file):
logger.info("TaskScheduler: Load pretrained model...")
cost_model.load(load_model_file)
elif load_log_file:
logger.info("TaskScheduler: Reload measured states and train the model...")
cost_model.update_from_file(load_log_file)
elif model_type == "random":
cost_model = RandomModel()
else:
raise ValueError("Invalid search policy: " + search_policy)
if policy_type == "sketch":
if load_log_file:
# use the log file to restore the status of search policies.
init_search_callbacks = [PreloadMeasuredStates(load_log_file)]
else:
init_search_callbacks = None
search_policies = [
SketchPolicy(
task,
cost_model,
params=search_policy_params,
verbose=verbose,
init_search_callbacks=init_search_callbacks,
)
for task in tasks
]
else:
raise ValueError("Invalid search policy: " + search_policy)
else:
# check type
assert isinstance(search_policy, (tuple, list))
for item in search_policy:
assert isinstance(item, SearchPolicy)
search_policies = search_policy
return search_policies
def derive_similarity_tag(dag, log_base=1.618):
"""Derive the tag for similarity check from one computational DAG.
The DAGs with the same tag are considered as similar tasks.
The tag format is <op1-tag>_<op2-tag> ... <log(flop)>.
If the tag is "", then the task is not considered to be similar to any other tasks.
Parameters
----------
dag: ComputeDAG
The input computational DAG
log_base: float = 1.618
The base of log to normalize FLOPS
Returns
-------
tag: str
The tag of this computational DAG.
"""
ret = ""
for op in dag.ops:
tag = op.attrs.get("auto_scheduler_task_scheduler_tag", None)
if tag:
ret += op.attrs["auto_scheduler_task_scheduler_tag"] + "_"
if ret:
ret += "%d" % int(math.log(dag.flop_ct + 1, log_base))
return ret
class TaskScheduler:
"""
Allocate the time resources when tuning multiple tasks together.
This implements two strategies: "round-robin" and "gradient".
Parameters
----------
tasks: List[SearchTask]
All tasks to tune
task_weights: Optional[List[float]]
The weights of tasks.
If provided, the task scheduler will set the objective function to
sum(weight[t] * latency[t]), where weight[t] is the weight of a task
and the lantecy[t] is the lantecy of the task.
If not provided, the task scheduer will assign equal weights to all
tasks (i.e., the objective function is sum(latency[t])).
objective_func: Optional[Callable[List[float] -> float]]
The objective function to be minimized.
The objective function accepts the current latencies of all tasks and returns the
objective.
If not provided, the objective is the weighted sum of the latencies of all tasks.
strategy: str = "gradient"
The scheduling strategy.
"round-robin": Tune tasks in round robin order.
"gradient" : Tune tasks with gradient descent.
load_model_file: Optional[str]
Load pre-trained model from this file. If this is None, the cost model will
be trained from scratch.
load_log_file: Optional[str]
Load measurement records from this file. If it is not None, the status of the
task scheduler, search policies and cost models will be restored according to this file.
verbose: int = 1
The level of verbosity. 0 means silent.
alpha: float = 0.2
The parameter used for 'gradient' strategy
beta: float = 2
The parameter used for 'gradient' strategy
backward_window_size: int = 3
The parameter used for 'gradient' strategy
callbacks: Optional[List[TaskSchedulerCallback]]
The task scheduler callbacks that will be called before and after tuning a task.
If None, PrintTableInfo and LogEstimatedLatency callback will be used.
"""
def __init__(
self,
tasks,
task_weights=None,
objective_func=None,
strategy="gradient",
load_model_file: str = None,
load_log_file: str = None,
alpha: float = 0.2,
beta: float = 2,
gamma: float = 0.5,
backward_window_size: int = 3,
callbacks=None,
):
self.tasks = tasks
if objective_func: # use custom objective function
self.objective_func = objective_func
else: # use weighted sum
if task_weights:
self.objective_func = lambda costs: sum(c * w for c, w in zip(costs, task_weights))
else:
self.objective_func = sum
self.strategy = strategy
self.load_log_file = load_log_file
self.load_model_file = load_model_file
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.backward_window_size = backward_window_size
self.callbacks = (
callbacks
if callbacks is not None
else [PrintTableInfo(), LogEstimatedLatency("total_latency.tsv")]
)
assert len(self.tasks) != 0, "No tasks"
assert self.strategy in ["round-robin", "gradient"]
# task_cts[i] saves how many times task i is tuned
self.task_cts = [0 for _ in range(len(self.tasks))]
# task_best_cts[i] saves the round task i found the best latency
self.task_best_cts = [0 for _ in range(len(self.tasks))]
# task_costs_history[i] saves the latency history of task i
self.task_costs_history = [[] for _ in range(len(self.tasks))]
# best_costs[i] saves the best latency of task i
self.best_costs = 1e10 * np.ones(len(self.tasks))
self.cur_score = self._compute_score(self.best_costs)
self.tune_option = self.measurer = self.search_policies = None
self.ct = self.best_ct = self.best_score = self.tic = None
self.num_measures_per_round = None
self.dead_tasks = set()
# Build similarity groups
self.task_tags = [] # task_id -> tag
self.tag_to_group_id = {} # tag -> group_id
self.group_task_ids = [] # group_id -> all task ids in this group
self.flop_cts = [] # task_id -> the number of floating ops
for i, task in enumerate(self.tasks):
tag = derive_similarity_tag(task.compute_dag)
self.task_tags.append(tag)
self.flop_cts.append(task.compute_dag.flop_ct)
if not tag:
continue
if tag not in self.tag_to_group_id:
self.tag_to_group_id[tag] = len(self.tag_to_group_id)
self.group_task_ids.append([])
self.group_task_ids[self.tag_to_group_id[tag]].append(i)
def tune(
self,
tune_option,
search_policy="default",
search_policy_params=None,
adaptive_training=False,
per_task_early_stopping=None,
):
"""Tune a batch of tasks together.
Parameters
----------
tune_option: TuningOptions
The tuning options applied to all tasks.
search_policy: : Union[str, List[SearchPolicy]] = "default"
The list of search policies.
If it is str,
"default" for the default policy (SketchPolicy + XGBModel),
"sketch.xgb" for SketchPolicy + XGBModel,
"sketch.random" for SketchPolicy + RandomModel.
search_policy_params : Optional[Dict[str, Any]]
The parameters of the search policy
adaptive_training : bool = False
Option used by XGBModel to reduce the model training frequency when there're
too many logs.
per_task_early_stopping : Optional[int]
Stop tuning a task early if getting no improvement after n measurements.
"""
# init members
self.tune_option = tune_option
self.early_stopping_all = (
1e20 if tune_option.early_stopping < 0 else tune_option.early_stopping
)
self.early_stopping_task = (
1e20 if per_task_early_stopping is None else per_task_early_stopping
)
self.measurer = ProgramMeasurer(
tune_option.builder,
tune_option.runner,
tune_option.measure_callbacks,
tune_option.verbose,
)
self.ct = self.best_ct = 0
self.tic = time.time()
# reset num_measures_per_round to make sure every task is tuned at least once
self.num_measures_per_round = min(
tune_option.num_measures_per_round, tune_option.num_measure_trials // len(self.tasks)
)
if self.num_measures_per_round <= 0:
raise ValueError(
"num_measure_trials is too small. Please set it to a higher value."
f"It should be at least {len(self.tasks)} for this model."
)
# restore the status of the task scheduler from a log file
if self.load_log_file:
self._restore_status(self.load_log_file, self.num_measures_per_round)
# make one search policy for one task
self.search_policies = make_search_policies(
search_policy,
search_policy_params,
self.tasks,
self.num_measures_per_round,
tune_option.verbose,
self.load_model_file,
self.load_log_file,
adaptive_training,
)
# do a round robin first to warm up
for idx in range(len(self.tasks)):
# skip warming up this task if it has been tuned before (restored from the log file)
if not self.task_cts[idx]:
self._tune_task(idx)
self.best_ct = self.ct
self.best_score = self.cur_score
# use the specific strategy to choose workload to tune
task_idx = -1
while self.ct < tune_option.num_measure_trials and len(self.dead_tasks) < len(self.tasks):
if self.strategy == "round-robin":
task_idx = (task_idx + 1) % len(self.tasks)
while task_idx in self.dead_tasks:
task_idx = (task_idx + 1) % len(self.tasks)
elif self.strategy == "gradient":
gradients = []
for i in range(len(self.tasks)):
if i in self.dead_tasks:
gradients.append(0)
continue
# compute gradient from chain rule : (delta f / delta g_i)
delta = 1e-4
new_costs = list(self.best_costs)
new_costs[i] -= delta
chain_grad = (
self._compute_score(self.best_costs) - self._compute_score(new_costs)
) / delta
# compute (g_i(t_i) - g(t_i - \Delta t)) / (\Delta t)
if (
self.task_cts[i] - 1 < len(self.task_costs_history[i])
and self.task_cts[i] - 1 - self.backward_window_size >= 0
):
backward_grad = (
self.task_costs_history[i][self.task_cts[i] - 1]
- self.task_costs_history[i][
self.task_cts[i] - 1 - self.backward_window_size
]
) / self.backward_window_size
else:
backward_grad = 0
# compute (g_i(t_i + \Delta t) - g(t_i)) / (\Delta t)
g_next_1 = self.best_costs[i] - (self.best_costs[i] / self.task_cts[i])
g_next_2 = self.beta * 1e30
group_id = self.tag_to_group_id.get(self.task_tags[i], None)
if group_id is not None and len(self.group_task_ids[group_id]) > 1:
best_flops = max(
[
self.flop_cts[j] / self.best_costs[j]
for j in self.group_task_ids[group_id]
]
)
g_next_2 = self.beta * self.flop_cts[i] / best_flops
g_next = min(g_next_1, g_next_2)
forward_grad = g_next - self.best_costs[i]
# combine all grads
grad = chain_grad * (
self.alpha * backward_grad + (1 - self.alpha) * forward_grad
)
assert grad <= 0
gradients.append(grad)
if max(gradients) == min(gradients):
task_idx = np.random.choice(len(gradients))
else:
task_idx = np.argmin(gradients)
else:
raise ValueError("Invalid strategy: " + self.strategy)
self._tune_task(task_idx)
self._adjust_similarity_group(task_idx)
if self.cur_score < self.best_score:
self.best_score = self.cur_score
self.best_ct = self.ct
elif self.ct - self.best_ct >= self.early_stopping_all and all(
cost < 1e9 for cost in self.best_costs
):
if self.tune_option.verbose >= 1:
print(
"Stop early since no performance improvement in the last "
+ str(self.early_stopping_all)
+ " measurement trials."
)
break
def _tune_task(self, task_idx):
"""Tune the select task for one round"""
# Run pre-tune callbacks
for callback in self.callbacks:
callback.pre_tune(self, task_idx)
measure_inputs, measure_results = self.search_policies[task_idx].continue_search_one_round(
self.num_measures_per_round, self.measurer
)
self.task_cts[task_idx] += 1
for res in measure_results:
cost = array_mean(res.costs)
if cost < self.best_costs[task_idx]:
self.task_best_cts[task_idx] = self.task_cts[task_idx]
self.best_costs[task_idx] = cost
# Stop tuning this task in the rest of the process if its search space has been
# fully explored or it has no improvement for a long while.
no_change_trials = (
self.task_cts[task_idx] - self.task_best_cts[task_idx]
) * self.num_measures_per_round
if len(measure_inputs) == 0 or no_change_trials > self.early_stopping_task:
self.dead_tasks.add(task_idx)
self.task_costs_history[task_idx].append(self.best_costs[task_idx])
self.ct += len(measure_inputs)
self.cur_score = self._compute_score(self.best_costs)
# Run post-tune callbacks
for callback in self.callbacks:
callback.post_tune(self, task_idx)
def _compute_score(self, costs):
"""compute the objective function"""
# Make sure to return float.
score = self.objective_func(costs)
return score.value if hasattr(score, "value") else score
def _adjust_similarity_group(self, task_idx):
"""adjust the similarity group for the selected task"""
group_id = self.tag_to_group_id.get(self.task_tags[task_idx], None)
if group_id is None or len(self.group_task_ids[group_id]) <= 1:
return
group_ids = self.group_task_ids[group_id]
best_group_flops = max([self.flop_cts[j] / self.best_costs[j] for j in group_ids])
cur_flops = self.flop_cts[task_idx] / self.best_costs[task_idx]
# if we tune a task for many times but it still cannot achieve
# a similar speed to the fastest one in its group, this means this task
# is actually not similar to other tasks in its group.
# So we will remove it from its original group.
if cur_flops < best_group_flops / self.beta and self.task_cts[task_idx] > 5 + max(
self.task_cts[j] for j in group_ids if j != task_idx
):
self.task_tags[task_idx] = None
group_ids.remove(task_idx)
def _restore_status(self, log_file, num_measures_per_round):
"""restore task_cts and best_costs from a log file"""
str_target = str(self.tasks[0].target)
workload_key_to_task_id = {t.workload_key: i for i, t in enumerate(self.tasks)}
total_ct = -1
for total_ct, (inp, res) in enumerate(RecordReader(log_file)):
if str(inp.task.target) != str_target:
continue
task_idx = workload_key_to_task_id.get(inp.task.workload_key, None)
if task_idx is None:
continue
self.task_cts[task_idx] += 1
if res.error_no == 0:
cost = array_mean(res.costs)
if cost < self.best_costs[task_idx]:
self.best_costs[task_idx] = cost
self.task_best_cts[task_idx] = self.task_cts[task_idx]
for idx in range(len(self.tasks)):
if self.task_cts[idx] - self.task_best_cts[idx] > self.early_stopping_task:
self.dead_tasks.add(idx)
# The computation of taks_cts is just an estimation.
# The estimation may not be accurate if the log file is changed externally or
# `num_measures_per_round` is different from the last tuning.
self.task_cts[idx] = int(self.task_cts[idx] / num_measures_per_round + 0.5)
self.task_best_cts[idx] = int(self.task_best_cts[idx] / num_measures_per_round + 0.5)
self.task_costs_history[idx].append(self.best_costs[idx])
self.cur_score = self._compute_score(self.best_costs)
logger.info("TaskScheduler: Loaded %d measurement records from %s", total_ct + 1, log_file)
class TaskSchedulerCallback:
"""The base class of task scheduler callback functions."""
def pre_tune(self, task_scheduler, task_id):
"""The callback before tuning each task.
Parameters
----------
task_scheduler: TaskScheduler
The task scheduler.
task_id: int
The task ID going to be tuned.
"""
# Do nothing by default
def post_tune(self, task_scheduler, task_id):
"""The callback after tuning each task.
Parameters
----------
task_scheduler: TaskScheduler
The task scheduler.
task_id: int
The task ID be tuned.
"""
# Do nothing by default
class PrintTableInfo(TaskSchedulerCallback):
"""The callback that prints a table of current progress."""
def pre_tune(self, task_scheduler, task_id):
if task_scheduler.tune_option.verbose < 1:
return
_ffi_api.PrintTitle("Task Scheduler")
print(
"| ID "
"| Task Description "
"| Latency (ms) | Speed (GFLOPS) | Trials |"
)
print(
"----------------------------------------------------------------"
"-------------------------------------------------"
)
# content
for i in range(len(task_scheduler.tasks)):
id_str = "%d" % i
latency_str = (
"%.3f" % (1e3 * task_scheduler.best_costs[i])
if task_scheduler.best_costs[i] < 1e9
else "-"
)
task_desc = task_scheduler.tasks[i].desc
speed_str = (
"%.2f"
% (task_scheduler.tasks[i].compute_dag.flop_ct / task_scheduler.best_costs[i] / 1e9)
if task_scheduler.best_costs[i] < 1e9
else "-"
)
trials_str = "%d" % (task_scheduler.task_cts[i] * task_scheduler.num_measures_per_round)
print(
"| %4s | %61s | %12s | % 14s | %6s |"
% (id_str, task_desc, latency_str, speed_str, trials_str)
)
print(
"----------------------------------------------------------------"
"-------------------------------------------------"
)
# overall info
if all(cost < 1e9 for cost in task_scheduler.best_costs):
total_latency_str = "%.3f" % (task_scheduler.cur_score * 1e3)
else:
total_latency_str = "-"
print(
"Estimated total latency: %s ms\tTrials: %d\tUsed time : %.0f s\tNext ID: %d\t"
% (
total_latency_str,
task_scheduler.ct,
time.time() - task_scheduler.tic,
task_id,
)
)
class LogEstimatedLatency(TaskSchedulerCallback):
"""Log the estimated latency to the file after tuning a task.
Parameters
----------
log_file: str
The log file path.
"""
def __init__(self, log_file):
if os.path.exists(log_file): # Remove existing log
os.remove(log_file)
self.log_file = log_file
def post_tune(self, task_scheduler, task_id):
if all(cost < 1e9 for cost in task_scheduler.best_costs):
total_latency_str = "%.3f" % (task_scheduler.cur_score * 1e3)
else:
total_latency_str = "N/A"
with open(self.log_file, "a") as filep:
filep.write(
"ElapsedTime(s)\t%.0f\tEstimatedLatency(ms)\t%s\tTrials\t%d\n"
% (
time.time() - task_scheduler.tic,
total_latency_str,
task_scheduler.ct,
)
)
filep.flush()
| https://github.com/zk-ml/tachikoma |
python/tvm/auto_scheduler/testing/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-import, redefined-builtin
"""Testing utilities in auto scheduler."""
# NOTE: Do not import any module here by default
| https://github.com/zk-ml/tachikoma |
python/tvm/auto_scheduler/testing/tune_onnx.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
from distutils.util import strtobool
import argparse
import json
import os
import onnx # type: ignore
import tvm
from tvm import auto_scheduler
from tvm import meta_schedule as ms
from tvm import relay
from tvm.meta_schedule.testing.custom_builder_runner import run_module_via_rpc
from tvm.meta_schedule.testing.tune_utils import generate_input_data, create_timer
from tvm.meta_schedule.utils import cpu_count
from tvm.relay.frontend import from_onnx
from tvm.support import describe
def _parse_args():
args = argparse.ArgumentParser()
args.add_argument(
"--model-name",
type=str,
required=True,
)
args.add_argument(
"--onnx-path",
type=str,
required=True,
)
args.add_argument(
"--input-shape",
type=str,
required=True,
help='example: `[{"name": "input1", "dtype": "int64", "shape": [1, 1, 8]}]',
)
args.add_argument(
"--target",
type=str,
required=True,
)
args.add_argument(
"--num-trials",
type=int,
required=True,
)
args.add_argument(
"--rpc-host",
type=str,
required=True,
)
args.add_argument(
"--rpc-port",
type=int,
required=True,
)
args.add_argument(
"--rpc-key",
type=str,
required=True,
)
args.add_argument(
"--work-dir",
type=str,
required=True,
)
args.add_argument(
"--number",
type=int,
default=3,
)
args.add_argument(
"--repeat",
type=int,
default=1,
)
args.add_argument(
"--min-repeat-ms",
type=int,
default=100,
)
args.add_argument(
"--adaptive-training",
type=lambda x: bool(strtobool(x)),
help="example: True / False",
default=True,
)
args.add_argument(
"--cpu-flush",
type=lambda x: bool(strtobool(x)),
help="example: True / False",
required=True,
)
args.add_argument(
"--backend",
type=str,
choices=["graph", "vm"],
help="example: graph / vm",
required=True,
)
parsed = args.parse_args()
parsed.target = tvm.target.Target(parsed.target)
parsed.input_shape = json.loads(parsed.input_shape)
parsed.rpc_config = ms.runner.RPCConfig(
tracker_host=parsed.rpc_host,
tracker_port=parsed.rpc_port,
tracker_key=parsed.rpc_key,
session_timeout_sec=600,
)
return parsed
ARGS = _parse_args()
def main():
log_file = os.path.join(ARGS.work_dir, f"{ARGS.model_name}.json")
runner = auto_scheduler.RPCRunner(
key=ARGS.rpc_key,
host=ARGS.rpc_host,
port=ARGS.rpc_port,
n_parallel=cpu_count(logical=True),
number=ARGS.number,
repeat=ARGS.repeat,
min_repeat_ms=ARGS.min_repeat_ms,
enable_cpu_cache_flush=ARGS.cpu_flush,
timeout=ARGS.rpc_config.session_timeout_sec,
)
if ARGS.target.kind.name == "llvm":
hardware_params = auto_scheduler.HardwareParams(
num_cores=int(ARGS.target.attrs["num-cores"]),
target=ARGS.target,
)
elif ARGS.target.kind.name == "cuda":
hardware_params = auto_scheduler.HardwareParams(
num_cores=-1,
vector_unit_bytes=16,
cache_line_bytes=64,
max_shared_memory_per_block=int(ARGS.target.attrs["max_shared_memory_per_block"]),
max_threads_per_block=int(ARGS.target.attrs["max_threads_per_block"]),
# The value `max_local_memory_per_block` is not used in AutoScheduler,
# but is required by the API.
max_local_memory_per_block=12345678,
max_vthread_extent=8,
warp_size=32,
)
else:
raise NotImplementedError(f"Unsupported target {ARGS.target}")
describe()
print(f"Workload: {ARGS.model_name}")
onnx_model = onnx.load(ARGS.onnx_path)
shape_dict = {}
for item in ARGS.input_shape:
print(f" input_name : {item['name']}")
print(f" input_shape: {item['shape']}")
print(f" input_dtype: {item['dtype']}")
shape_dict[item["name"]] = item["shape"]
mod, params = from_onnx(onnx_model, shape_dict, freeze_params=True)
input_data = {
item["name"]: generate_input_data(item["shape"], item["dtype"]) for item in ARGS.input_shape
}
with ms.Profiler() as profiler:
tasks, task_weights = auto_scheduler.extract_tasks(
mod["main"],
params,
target=ARGS.target,
hardware_params=hardware_params,
)
for idx, (task, task_weight) in enumerate(zip(tasks, task_weights)):
print(
f"==== Task {idx}: {task.desc} "
f"(weight {task_weight} key: {task.workload_key}) ====="
)
print(task.compute_dag)
if ARGS.num_trials > 0:
tuner = auto_scheduler.TaskScheduler(tasks, task_weights)
tuner.tune(
auto_scheduler.TuningOptions(
num_measure_trials=ARGS.num_trials,
runner=runner,
measure_callbacks=[
auto_scheduler.RecordToFile(log_file),
],
),
adaptive_training=ARGS.adaptive_training,
)
relay_build = {"graph": relay.build, "vm": relay.vm.compile}[ARGS.backend]
with auto_scheduler.ApplyHistoryBest(log_file):
with tvm.transform.PassContext(
opt_level=3,
config={"relay.backend.use_auto_scheduler": True},
):
lib = relay_build(
mod,
target=ARGS.target,
params=params,
)
print("Tuning Time:")
print(profiler.table())
run_module_via_rpc(
rpc_config=ARGS.rpc_config,
lib=lib,
dev_type=ARGS.target.kind.name,
args=input_data,
continuation=create_timer(ARGS.backend),
backend=ARGS.backend,
)
if __name__ == "__main__":
main()
| https://github.com/zk-ml/tachikoma |
python/tvm/auto_scheduler/testing/tune_relay.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import argparse
import json
import os
from distutils.util import strtobool
import tvm
from tvm import auto_scheduler
from tvm import meta_schedule as ms
from tvm import relay
from tvm.meta_schedule.testing.custom_builder_runner import run_module_via_rpc
from tvm.meta_schedule.testing.relay_workload import get_network
from tvm.meta_schedule.testing.tune_utils import create_timer, generate_input_data
from tvm.meta_schedule.utils import cpu_count
from tvm.support import describe
def _parse_args():
args = argparse.ArgumentParser()
args.add_argument(
"--workload",
type=str,
required=True,
)
args.add_argument(
"--input-shape",
type=str,
required=True,
)
args.add_argument(
"--target",
type=str,
required=True,
)
args.add_argument(
"--num-trials",
type=int,
required=True,
)
args.add_argument(
"--rpc-host",
type=str,
required=True,
)
args.add_argument(
"--rpc-port",
type=int,
required=True,
)
args.add_argument(
"--rpc-key",
type=str,
required=True,
)
args.add_argument(
"--work-dir",
type=str,
required=True,
)
args.add_argument(
"--layout",
type=str,
default=None,
)
args.add_argument(
"--cache-dir",
type=str,
default=None,
)
args.add_argument(
"--number",
type=int,
default=3,
)
args.add_argument(
"--repeat",
type=int,
default=1,
)
args.add_argument(
"--min-repeat-ms",
type=int,
default=100,
)
args.add_argument(
"--adaptive-training",
type=lambda x: bool(strtobool(x)),
help="example: True / False",
default=True,
)
args.add_argument(
"--cpu-flush",
type=lambda x: bool(strtobool(x)),
help="example: True / False",
required=True,
)
args.add_argument(
"--backend",
type=str,
choices=["graph", "vm"],
help="example: graph / vm",
required=True,
)
parsed = args.parse_args()
parsed.target = tvm.target.Target(parsed.target)
parsed.input_shape = json.loads(parsed.input_shape)
parsed.rpc_config = ms.runner.RPCConfig(
tracker_host=parsed.rpc_host,
tracker_port=parsed.rpc_port,
tracker_key=parsed.rpc_key,
session_timeout_sec=600,
)
return parsed
ARGS = _parse_args()
def main():
log_file = os.path.join(ARGS.work_dir, f"{ARGS.workload}.json")
runner = auto_scheduler.RPCRunner(
key=ARGS.rpc_key,
host=ARGS.rpc_host,
port=ARGS.rpc_port,
n_parallel=cpu_count(logical=True),
number=ARGS.number,
repeat=ARGS.repeat,
min_repeat_ms=ARGS.min_repeat_ms,
enable_cpu_cache_flush=ARGS.cpu_flush,
timeout=ARGS.rpc_config.session_timeout_sec,
)
if ARGS.target.kind.name == "llvm":
hardware_params = auto_scheduler.HardwareParams(
num_cores=int(ARGS.target.attrs["num-cores"]),
target=ARGS.target,
)
elif ARGS.target.kind.name == "cuda":
hardware_params = auto_scheduler.HardwareParams(
num_cores=-1,
vector_unit_bytes=16,
cache_line_bytes=64,
max_shared_memory_per_block=int(ARGS.target.attrs["max_shared_memory_per_block"]),
max_threads_per_block=int(ARGS.target.attrs["max_threads_per_block"]),
# The value `max_local_memory_per_block` is not used in AutoScheduler,
# but is required by the API.
max_local_memory_per_block=12345678,
max_vthread_extent=8,
warp_size=32,
)
else:
raise NotImplementedError(f"Unsupported target {ARGS.target}")
describe()
print(f"Workload: {ARGS.workload}")
mod, params, (input_name, input_shape, input_dtype) = get_network(
ARGS.workload,
ARGS.input_shape,
layout=ARGS.layout,
cache_dir=ARGS.cache_dir,
)
input_info = [
{
"name": input_name,
"shape": input_shape,
"dtype": input_dtype,
},
]
input_data = {
item["name"]: generate_input_data(item["shape"], item["dtype"]) for item in input_info
}
for item in input_info:
print(f" input_name : {item['name']}")
print(f" input_shape: {item['shape']}")
print(f" input_dtype: {item['dtype']}")
with ms.Profiler() as profiler:
with ms.Profiler.timeit("TaskExtraction"):
tasks, task_weights = auto_scheduler.extract_tasks(
mod["main"],
params,
target=ARGS.target,
hardware_params=hardware_params,
)
for idx, (task, task_weight) in enumerate(zip(tasks, task_weights)):
print(
f"==== Task {idx}: {task.desc} "
f"(weight {task_weight} key: {task.workload_key}) ====="
)
print(task.compute_dag)
with ms.Profiler.timeit("Tuning"):
if ARGS.num_trials > 0:
tuner = auto_scheduler.TaskScheduler(tasks, task_weights)
tuner.tune(
auto_scheduler.TuningOptions(
num_measure_trials=ARGS.num_trials,
runner=runner,
measure_callbacks=[
auto_scheduler.RecordToFile(log_file),
],
),
adaptive_training=ARGS.adaptive_training,
)
relay_build = {"graph": relay.build, "vm": relay.vm.compile}[ARGS.backend]
with ms.Profiler.timeit("PostTuningCompilation"):
with auto_scheduler.ApplyHistoryBest(log_file):
with tvm.transform.PassContext(
opt_level=3,
config={"relay.backend.use_auto_scheduler": True},
):
lib = relay_build(
mod,
target=ARGS.target,
params=params,
)
print("Tuning Time:")
print(profiler.table())
run_module_via_rpc(
rpc_config=ARGS.rpc_config,
lib=lib,
dev_type=ARGS.target.kind.name,
args=input_data,
continuation=create_timer(ARGS.backend),
backend=ARGS.backend,
)
if __name__ == "__main__":
main()
| https://github.com/zk-ml/tachikoma |
python/tvm/auto_scheduler/testing/tune_te.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
from distutils.util import strtobool
import argparse
import os
import tvm
from tvm import auto_scheduler
from tvm import meta_schedule as ms
from tvm.meta_schedule.testing.te_workload import CONFIGS
from tvm.meta_schedule.utils import cpu_count
from tvm.support import describe
def _parse_args():
args = argparse.ArgumentParser()
args.add_argument(
"--workload",
type=str,
required=True,
)
args.add_argument(
"--target",
type=str,
required=True,
)
args.add_argument(
"--num-trials",
type=int,
required=True,
)
args.add_argument(
"--rpc-host",
type=str,
required=True,
)
args.add_argument(
"--rpc-port",
type=int,
required=True,
)
args.add_argument(
"--rpc-key",
type=str,
required=True,
)
args.add_argument(
"--work-dir",
type=str,
required=True,
)
args.add_argument(
"--number",
type=int,
default=3,
)
args.add_argument(
"--repeat",
type=int,
default=1,
)
args.add_argument(
"--min-repeat-ms",
type=int,
default=100,
)
args.add_argument(
"--adaptive-training",
type=lambda x: bool(strtobool(x)),
required=False,
help="example: True / False",
default=True,
)
args.add_argument(
"--cpu-flush",
type=lambda x: bool(strtobool(x)),
help="example: True / False",
required=True,
)
parsed = args.parse_args()
parsed.target = tvm.target.Target(parsed.target)
parsed.rpc_config = ms.runner.RPCConfig(
tracker_host=parsed.rpc_host,
tracker_port=parsed.rpc_port,
tracker_key=parsed.rpc_key,
session_timeout_sec=60,
)
return parsed
ARGS = _parse_args()
def main():
log_file = os.path.join(ARGS.work_dir, f"{ARGS.workload}.json")
runner = auto_scheduler.RPCRunner(
key=ARGS.rpc_key,
host=ARGS.rpc_host,
port=ARGS.rpc_port,
n_parallel=cpu_count(logical=True),
number=ARGS.number,
repeat=ARGS.repeat,
min_repeat_ms=ARGS.min_repeat_ms,
enable_cpu_cache_flush=ARGS.cpu_flush,
timeout=ARGS.rpc_config.session_timeout_sec,
)
if ARGS.target.kind.name == "llvm":
hardware_params = auto_scheduler.HardwareParams(
num_cores=int(ARGS.target.attrs["num-cores"]),
target=ARGS.target,
)
elif ARGS.target.kind.name == "cuda":
hardware_params = auto_scheduler.HardwareParams(
num_cores=-1,
vector_unit_bytes=16,
cache_line_bytes=64,
max_shared_memory_per_block=int(ARGS.target.attrs["max_shared_memory_per_block"]),
max_threads_per_block=int(ARGS.target.attrs["max_threads_per_block"]),
# The value `max_local_memory_per_block` is not used in AutoScheduler,
# but is required by the API.
max_local_memory_per_block=12345678,
max_vthread_extent=8,
warp_size=32,
)
else:
raise NotImplementedError(f"Unsupported target {ARGS.target}")
describe()
print(f"Workload: {ARGS.workload}")
with ms.Profiler() as profiler:
# Same as MetaSchedule Tune TE
# Does not count ApplyHistoryBest time
workload_func, params = CONFIGS[ARGS.workload]
params = params[0] # type: ignore
workload_func = auto_scheduler.register_workload(workload_func)
task = auto_scheduler.SearchTask(
func=workload_func,
args=params,
target=ARGS.target,
hardware_params=hardware_params,
)
# Inspect the computational graph
print("Computational DAG:")
print(task.compute_dag)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=ARGS.num_trials,
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
verbose=2,
runner=runner,
)
if ARGS.num_trials > 0:
print("Running AutoTuning:")
task.tune(tune_option, adaptive_training=ARGS.adaptive_training)
print("Tuning Time:")
print(profiler.table())
print("History Best:")
print(task.print_best(log_file))
sch, args = task.apply_best(log_file)
print("Lowered TIR:")
print(tvm.lower(sch, args, simple_mode=True))
if __name__ == "__main__":
main()
| https://github.com/zk-ml/tachikoma |
python/tvm/auto_scheduler/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
""" Common utilities for auto_scheduler. """
from typing import Hashable
import json
import signal
import threading
import traceback
import os
import numpy as np
try:
import psutil
except ImportError:
psutil = None
import tvm
from tvm import rpc
from tvm.tir import expr
from tvm.tir.transform import Simplify
from tvm.ir.transform import Sequential
from ..te import Tensor, placeholder
def decode_workload_key(workload_key):
"""Decode the workload key from a string to the name and arguments. The wokrload key
is expected to be a list of "[func_name/hash, args ...]" in a JSON string. If not,
then simply return the workload key as the name without arguments.
Parameters
----------
workload_key: str
The workload key in string. Format: "[func_name/hash, args ...]".
Returns
-------
name: str
The workload function name or the DAG hash.
args: Optional[Tuple[Any, ...]]
The flatten arguments in a tuple, or None if the workload key format is not decodeable.
"""
def flatten_list(inp):
ret = []
for elt in inp:
if isinstance(elt, list):
ret += flatten_list(elt)
else:
ret.append(elt)
return ret
try:
key_list = json.loads(workload_key)
if isinstance(key_list, list) and len(key_list) >= 1:
return key_list[0], tuple(flatten_list(key_list[1:]))
except json.decoder.JSONDecodeError:
pass
return workload_key, None
def calc_workload_dis_factor(target_workload_pair, workload_pair):
"""Calculate the distance factor of the workload to the target workload.
If two workloads are not compatible at all (i.e., different compute DAG or function),
then the distance factor is "inf". Otherwise, we calculate the factor by traversing
the workload arguments, which are the arguments of the compute function,
or the output shapes for the ComputeDAG. The factor is calculated by the following rules:
1. For non-zero integer values: `product(target_arg / candidate_arg)`.
2. For non-integer or zero values: "inf" if not equal else 1.
As a result, factor=1 is the optimal when two workloads are identical.
Parameters
----------
target_workload_pair: Tuple[str, Optional[Tuple[Any, ...]]]
The target workload pair: (hash, argument tuple).
workload_pair: Tuple[str, Optional[Tuple[Any, ...]]]
The candidate workload pair: (hash, argument tuple).
Returns
-------
dis_f: float
The distance factor.
"""
target_key, target_args = target_workload_pair
target_args = target_args if target_args is not None else []
key, args = workload_pair
args = args if args is not None else []
# Not even the same func/DAG.
if key != target_key or len(target_args) != len(args):
return float("inf")
dis_f = 1
for target_arg, arg in zip(target_args, args):
if isinstance(target_arg, int):
if target_arg == 0 or arg == 0:
if target_arg != arg:
return float("inf")
elif target_arg % arg != 0:
return float("inf")
else:
dis_f *= target_arg / arg
elif target_arg != arg:
return float("inf")
return dis_f
def get_func_name(func):
"""Get name of a function.
Parameters
----------
func: Function
The input function.
Returns
-------
name: str
The function name.
"""
return func.func_name if hasattr(func, "func_name") else func.__qualname__
def get_const_int(exp):
"""Verifies expr is integer and get the constant value.
Parameters
----------
exp : Union[tvm.tir.expr, int]
The input expression.
Returns
-------
out_value : int
The output.
"""
if isinstance(exp, int):
return exp
if not isinstance(exp, expr.IntImm):
opt = Sequential([Simplify()])
exp = opt(exp)
if not isinstance(exp, expr.IntImm):
raise ValueError("Expect value to be constant int")
return exp.value
def get_const_tuple(in_tuple):
"""Verifies input tuple is IntImm, returns tuple of int.
Parameters
----------
in_tuple : Tuple[tvm.tir.expr]
The input.
Returns
-------
out_tuple : Tuple[Union[int,tvm.tir.Var,tvm.tir.Any]]
The output tuple of int. The dynamic shape variables (Var or Any) will be preserved.
"""
ret = []
for elem in in_tuple:
if isinstance(elem, (tvm.tir.Var, tvm.tir.expr.Any)):
ret.append(elem)
else:
ret.append(get_const_int(elem))
return tuple(ret)
def list_to_tuple(x):
"""Convert a list to a tuple recursively."""
assert isinstance(x, list)
return tuple(list_to_tuple(y) if isinstance(y, list) else y for y in x)
def serialize_args(args):
"""
Serialize arguments of a function to a hashable and jsonable tuple.
Currently this is mainly used for tvm.tensor.Tensor
"""
ret = []
if args is None:
return tuple(ret)
for t in args:
if isinstance(t, Tensor):
t = ("TENSOR", get_const_tuple(t.shape), t.dtype)
elif isinstance(t, list):
t = list_to_tuple(t)
assert isinstance(t, Hashable), str(t) + " is not hashable"
ret.append(t)
return tuple(ret)
def deserialize_args(args):
"""The inverse function of :code:`serialize_args`"""
ret = []
for t in args:
if isinstance(t, (tuple, list)) and t[0] == "TENSOR":
ret.append(placeholder(shape=t[1], dtype=t[2]))
else:
ret.append(t)
return ret
def kill_child_processes(parent_pid, sig=signal.SIGTERM):
"""kill all child processes recursively"""
if not psutil:
raise ImportError("psutil not found, try `pip install psutil` to fix this")
try:
parent = psutil.Process(parent_pid)
except psutil.NoSuchProcess:
return
try:
children = parent.children(recursive=True)
for process in children:
process.send_signal(sig)
except psutil.NoSuchProcess:
return
# The maximum length of traceback information
MAX_TRACEBACK_INFO_LEN = 512
def make_traceback_info():
"""Get the error message from traceback."""
info = str(traceback.format_exc())
if len(info) > MAX_TRACEBACK_INFO_LEN:
info = (
info[: MAX_TRACEBACK_INFO_LEN // 2] + "\n...\n" + info[-MAX_TRACEBACK_INFO_LEN // 2 :]
)
return info
class PropagatingThread(threading.Thread):
"""A thread that propagates the exception to the main thread"""
def run(self):
self.exc = None
try:
self.ret = self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self.exc = e
def join(self, timeout=None):
super(PropagatingThread, self).join(timeout)
if self.exc:
raise self.exc
return self.ret
def call_func_with_thread(func, args, kwargs):
"""Call a function within a new thread"""
res = []
def wrapper():
res.append(func(*args, **kwargs))
t = PropagatingThread(target=wrapper)
t.start()
t.join()
return res[0]
def call_func_with_timeout(
worker, timeout, func, args=(), kwargs=None
): # pylint: disable=unused-argument
"""Call a function with timeout"""
worker.send(func, args, kwargs, timeout)
try:
res = worker.recv()
except Exception: # pylint: disable=broad-except
res = Exception(make_traceback_info())
return res
def request_remote(device_key, host=None, port=None, priority=1, timeout=60):
"""Request a remote session.
Parameters
----------
device_key : str
The device key of registered device in tracker.
host : Optional[str]
The host address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_HOST".
port : Optional[int]
The port of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_PORT".
priority : int = 1
The priority of this request, larger is more prior.
timeout : int = 60
The timeout of this session in second.
Returns
-------
remote : RPCSession
The connected remote RPCSession.
"""
# connect to the tracker
host = host or os.environ["TVM_TRACKER_HOST"]
port = port or int(os.environ["TVM_TRACKER_PORT"])
tracker = rpc.connect_tracker(host, port)
remote = tracker.request(device_key, priority=priority, session_timeout=timeout)
return remote
def check_remote(device_key, host=None, port=None, priority=100, timeout=10):
"""
Check the availability of a remote device.
Parameters
----------
device_key: str
device key of registered device in tracker.
host: Optional[str]
The host address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_HOST".
port: Optional[int]
The port address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_PORT".
priority: int = 100
The priority of this request, larger is more prior.
timeout: int = 10
The timeout of this check in seconds.
Returns
-------
available: bool
True if can find available device.
"""
def _check():
request_remote(device_key, host, port, priority)
t = threading.Thread(
target=_check,
)
t.start()
t.join(timeout)
return not t.is_alive()
def array_mean(arr):
"""Compute mean of the elments in a TVM Array<PrimExpr>
Parameters
----------
arr: Array
A TVM Array<PrimExpr>
Returns
-------
mean: float
The mean of the elements in the array
"""
return sum(x.value for x in arr) / len(arr)
def to_str_round(x, decimal=6):
"""Convert an object to str and round float numbers
Parameters
----------
x: Union[str, list, int, float, np.ndarray]
The input object
decimal: int
The precision of decimal fraction
Returns
-------
ret: str
The string format of these objects
"""
if isinstance(x, str):
return x
if isinstance(x, (list, tuple, np.ndarray)):
return "[" + ", ".join([to_str_round(y, decimal=decimal) for y in x]) + "]"
if isinstance(x, dict):
return str({k: to_str_round(v) for k, v in x.items()})
if isinstance(x, int):
return str(x)
if isinstance(x, (np.float32, np.float64, float)):
format_str = "%%.%df" % decimal
return format_str % x
raise ValueError("Invalid value: " + str(x) + "\ttype: " + str(type(x)))
| https://github.com/zk-ml/tachikoma |
python/tvm/auto_scheduler/workload_registry.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""
Workload registration and serialization.
We use a json string to represent a workload (a computation graph).
The format of the string is `[func_name, [args...]]`.
The dag should be the return value of this `func_name(*args)`.
Rationale: The workload is actually a compute dag defined by tvm dsl. But serializing compute dags
and matching them efficiently is not easy. Therefore, we use the above string to encode a compute
dag.
These strings are efficient for serialization/matching and won't be too long.
When we need the dag, we decode the string and call the function, which will return the dag.
"""
import json
import logging
import pickle
import tvm._ffi
from tvm.runtime._ffi_node_api import LoadJSON, SaveJSON
from .utils import deserialize_args, get_func_name, serialize_args
logger = logging.getLogger("auto_scheduler")
# Global workload function and hash key registry
# It stores two types of workload:
# 1. User registered tasks. This type of workload is registered
# by the decorator "register_workload"
# 2. Extracted tasks from a relay program. This type of workload is
# registered by function "register_workload_tensors".
#
# For 1, the dictionary maps a function name to its function pointer
# For 2, the dictionary maps a hash key to a list of input/output tensors
WORKLOAD_FUNC_REGISTRY = {}
def register_workload(func_name, f=None, override=False):
"""Register a function that generates a certain workload.
The input function should take hashable and jsonable arguments
(int, float, tuple of int, tvm.tensor.Tensor, ...) and return a list of tvm.tensor.Tensor.
Parameters
----------
func_name : Union[Function, str]
The generation function that returns the compute declaration Tensors or its function name.
f : Optional[Function]
The generation function to be registered.
override : boolean = False
Whether to override existing entry.
Examples
--------
.. code-block:: python
@auto_scheduler.register_workload
def matmul(N, M, K):
A = te.placeholder((N, K), name='A')
B = te.placeholder((K, M), name='B')
k = te.reduce_axis((0, K), name='k')
C = te.compute((N, M), lambda i, j: tvm.sum(A[i][k] * B[k][j], axis=[k]), name='C')
return [A, B, C]
"""
global WORKLOAD_FUNC_REGISTRY
if callable(func_name):
f = func_name
func_name = get_func_name(f)
if not isinstance(func_name, str):
raise ValueError("expect string function name")
def register(myf):
"""internal register function"""
if func_name in WORKLOAD_FUNC_REGISTRY and not override:
raise RuntimeError("%s has been registered already" % func_name)
WORKLOAD_FUNC_REGISTRY[func_name] = myf
return myf
if f:
return register(f)
return register
def register_workload_tensors(workload_key, tensors, override=True):
"""Register a workload by provding input/output tensors. Since this function is used
when extracting/deserializing tasks, it expects duplicated registrations by default.
Parameters
----------
workload_key: str
The wokrload key of the compute DAG in JSON string.
tensors: List[Tensor]
The input/output tensors of a compute DAG
override : boolean = True
Whether to override existing entry.
Returns
-------
workload_key: str
The wokrload key of the compute DAG in JSON string.
"""
register_workload(workload_key, override=override)(tensors)
return workload_key
def make_workload_key(func, args):
"""Make a workload key by function and arguments.
Parameters
----------
func : Union[Function, str]
The function that returns the compute declaration Tensors.
Can be the a function or the function name.
args : Args
The args of the function.
Returns
-------
workload_key : str
The workload key of the function.
"""
global WORKLOAD_FUNC_REGISTRY
if callable(func):
func_name = get_func_name(func)
elif isinstance(func, str):
func_name = func
else:
raise ValueError(
"Invalid function: "
+ str(func)
+ " . `make_workload_key` expects a callable function or its function name"
)
if not func_name in WORKLOAD_FUNC_REGISTRY:
raise ValueError(
"%s is not registered. " % func,
"Please register it with @auto_scheduler.register_workload",
)
args = serialize_args(args)
return json.dumps((func_name,) + args)
@tvm._ffi.register_func("auto_scheduler.workload_key_to_tensors")
def workload_key_to_tensors(workload_key):
"""Get the input/output tensors from the workload key.
This method is usually used to create a ComputeDAG by workload key.
Parameters
----------
workload_key : str
The input workload key in JSON string. The format is either (func_name, arguments...)
for compute functions, or (hash, shapes...) for ComputeDAG.
Returns
-------
tensors : List[Tensor]
The registered compute declaration Tensors.
"""
global WORKLOAD_FUNC_REGISTRY
# We register ComputeDAG with both hash and argumetns, which are fixed in ComputeDAG,
# so we use an entire workload key to query the ComputeDAG.
if workload_key in WORKLOAD_FUNC_REGISTRY:
return WORKLOAD_FUNC_REGISTRY[workload_key]
# We register compute function with only the function name since
# it does not bind to specific arguments, so we use the function name to query
# the function and call the function with arguments to get the tensors.
workload = json.loads(workload_key)
name = workload[0]
value = WORKLOAD_FUNC_REGISTRY[name]
assert callable(value)
args = deserialize_args(workload[1:])
result = value(*args)
if isinstance(result, tuple):
result = list(result)
return result
def serialize_workload_registry_entry(workload_key):
"""
Serialize a workload registry entry.
This is used when the start method of multiprocessing is spawn.
We need to serialize the entry and register it in the new processes.
Parameters
----------
workload_key : str
The workload key
Returns
-------
data: Tuple
The serialized pickable data
"""
global WORKLOAD_FUNC_REGISTRY
if workload_key in WORKLOAD_FUNC_REGISTRY:
sname = workload_key
else:
workload = json.loads(workload_key)
sname = workload[0]
svalue = WORKLOAD_FUNC_REGISTRY[sname]
if not callable(svalue):
# pylint: disable=assignment-from-no-return
svalue = SaveJSON(svalue)
return sname, svalue
def deserialize_workload_registry_entry(data):
"""
Deserialize a workload registry entry.
This should be used along with :code:`serialize_workload_registry_entry`
Parameters
----------
data: Tuple
The return value of :code:`serialize_workload_registry_entry`
"""
global WORKLOAD_FUNC_REGISTRY
name, value = data
if name not in WORKLOAD_FUNC_REGISTRY:
# pylint: disable=assignment-from-no-return
if not callable(value):
value = LoadJSON(value)
WORKLOAD_FUNC_REGISTRY[name] = value
def save_workload_func_registry(filename):
"""Dump workload function registry to a pickle binary file.
Parameters
----------
filename : str
The filename to dump workload function registry to.
"""
global WORKLOAD_FUNC_REGISTRY
pickle.dump(WORKLOAD_FUNC_REGISTRY, open(filename, "wb"))
def load_workload_func_registry(filename):
"""Load workload function registry from a pickle binary file.
Parameters
----------
filename : str
The filename to load workload function registry from.
"""
global WORKLOAD_FUNC_REGISTRY
WORKLOAD_FUNC_REGISTRY = pickle.load(open(filename, "rb"))
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The auto-tuning module of tvm
This module includes:
* Tuning space definition API
* Efficient auto-tuners
* Tuning result and database support
* Distributed measurement to scale up tuning
"""
from . import database
from . import feature
from . import measure
from . import record
from . import task
from . import tuner
from . import utils
from . import env
from . import tophub
# some shortcuts
from .measure import (
measure_option,
MeasureInput,
MeasureResult,
MeasureErrorNo,
LocalBuilder,
LocalRunner,
RPCRunner,
)
from .tuner import callback
from .task import (
get_config,
create,
ConfigSpace,
ConfigEntity,
register_topi_compute,
register_topi_schedule,
template,
DispatchContext,
FallbackContext,
ApplyHistoryBest as apply_history_best,
ApplyGraphBest as apply_graph_best,
ApplyFixedConfig as apply_fixed_config,
)
from .env import GLOBAL_SCOPE
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/database.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=consider-using-enumerate,invalid-name
"""
Database of MeasureInput/MeasureResult pair.
This can be used for replaying measurement.
"""
import os
from .record import encode, decode, measure_str_key
class Database(object):
"""
Base class for a record database object.
"""
def load(self, inp, get_all=False):
"""
Load a result based on an input's string key
Parameters
----------
inp: MeasureInput
to be translated into key for RedisDB
get_all: bool, optional
Whether the latest result (or all matching results) should be returned
Returns
-------
rec: MeasureResult if previously saved, otherwise None
"""
raise NotImplementedError()
def save(self, inp, res, extend=False):
"""
Save a result based on an input's string key
Parameters
----------
inp: MeasureInput
to be translated into key for RedisDB
res: MeasureResult
to associate with key
extend:
Whether to extend existing MeasureResults if they exist
"""
raise NotImplementedError()
def filter_inputs(db, measure_inputs, retry=False):
"""
Filter a measure_inputs batch based on saved db results
Parameters
----------
db: Database
database object
measure_inputs: Array of MeasureInput
measure_inputs as expected in measure_batch
retry: bool
whether to retry if the saved result is a failure
Returns
-------
partial_results: Array of MeasureResult
a full list of result, where None denotes no corresponding saved result
unsaved: Array of MeasureInput
a list that only contains unsaved inputs
"""
partial_results = list()
unsaved = list()
for inp in measure_inputs:
res = db.load(inp)
if res is None or (retry and res.error_no != 0):
unsaved.append(inp)
partial_results.append(None)
else:
partial_results.append(res)
return partial_results, unsaved
class RedisDatabase(Database):
"""
Redis version of record database
"""
REDIS_PROD = 15
REDIS_LOCA = 14
REDIS_TEST = 13 # for unit test
REDIS_NIGHT_TEMP = 12 # for nightly report (will be flushed after every workload)
MAGIC_SPLIT = "$"
def __init__(self, db_index=REDIS_PROD):
# pylint: disable=import-outside-toplevel
import redis
if db_index == RedisDatabase.REDIS_TEST:
host = "127.0.0.1"
else:
host = os.environ.get("TVM_FLEET_HOST")
self.db = redis.StrictRedis(host=host, port=6379, db=db_index)
self.db_index = db_index
def set(self, key, value):
self.db.set(key, value)
def get(self, key):
current = self.db.get(key)
return current.decode() if isinstance(current, bytes) else current
def load(self, inp, get_all=False):
current = self.get(measure_str_key(inp))
if current is not None:
records = [decode(x) for x in current.split(RedisDatabase.MAGIC_SPLIT)]
results = [rec[1] for rec in records if rec is not None]
if get_all:
return results
return max(results, key=lambda result: result.timestamp)
return current
def save(self, inp, res, extend=False):
current = self.get(measure_str_key(inp))
if not extend or current is None:
self.set(measure_str_key(inp), RedisDatabase.MAGIC_SPLIT.join([encode(inp, res)]))
else:
current = current.split(RedisDatabase.MAGIC_SPLIT)
self.set(
measure_str_key(inp), RedisDatabase.MAGIC_SPLIT.join(current + [encode(inp, res)])
)
def filter(self, func):
"""
Dump all of the records that match the given rule
Parameters
----------
func: callable
The signature of the function is (MeasureInput, [MeasureResult]) -> bool
Returns
-------
list of records in tuple (MeasureInput, MeasureResult) matching the rule
Examples
--------
get records for a target
>>> db.filter(lambda inp, results: "cuda" in inp.target.keys)
get records with errors
>>> db.filter(lambda inp, results: any(r.error_no != 0 for r in results))
"""
matched_records = list()
# may consider filtering in iterator in the future
for key in self.db.keys():
current = self.get(key)
try:
records = [decode(x) for x in current.split(RedisDatabase.MAGIC_SPLIT)]
records = [rec for rec in records if rec is not None]
except TypeError: # got a badly formatted/old format record
continue
if not records:
continue
inps, results = zip(*records)
inp = inps[0]
if not func(inp, results):
continue
result = max(results, key=lambda res: res.timestamp)
matched_records.append((inp, result))
return matched_records
def flush(self):
self.db.flushdb()
class DummyDatabase(RedisDatabase):
"""
A database based on python dictionary for testing.
"""
def __init__(self):
# pylint: disable=super-init-not-called
self.db = {}
def set(self, key, value):
self.db[key] = value
def flush(self):
self.db = {}
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/env.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Global configuration/variable scope for autotvm"""
class AutotvmGlobalScope(object):
"""The global autotvm scope."""
current = None
def __init__(self):
self._old = AutotvmGlobalScope.current
AutotvmGlobalScope.current = self
self.in_tuning = False
self.silent = False
def deep_copy(self, global_scope):
"""Deep copy from another instance of AutotvmGlobalScope."""
self._old = AutotvmGlobalScope.current
self.in_tuning = global_scope.in_tuning
self.silent = global_scope.silent
GLOBAL_SCOPE = AutotvmGlobalScope()
def reset_global_scope(global_scope):
"""Reset global autotvm state. This is needed to initialize PopenPool workers."""
global GLOBAL_SCOPE
GLOBAL_SCOPE.deep_copy(global_scope)
AutotvmGlobalScope.current = global_scope
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/feature.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,
"""Extract feature of iter vars
There are two types of feature
1) Itervar feature
This feature is extracted based on loop variables.
Different loop structures will result in different shapes of feature
2) Curve sample feature (relation feature)
This feature is extracted by sampling relation curve.
This feature is invariant of loop structure.
"""
import struct
import numpy as np
import tvm._ffi
from tvm.target import Target
from tvm.driver import build_module
def ana_lower(sch, args, binds=None, simple_mode=True):
"""Do lower while keeping all axes in IR
i.e. Do not eliminate loop with extent of 1, do not vectorize, unroll or inject virtual threads
"""
sch = sch.normalize()
# Phase 0
context = tvm.transform.PassContext(config={"tir.debug_keep_trivial_loop": True})
with context:
mod = build_module.schedule_to_module(sch, args, binds=binds)
mod = tvm.tir.transform.StorageFlatten(64)(mod._move())
mod = tvm.tir.transform.Simplify()(mod._move())
assert simple_mode
return mod["main"].body
try:
_get_buffer_curve_sample_flatten = tvm._ffi.get_global_func(
"autotvm.feature.GetCurveSampleFeatureFlatten"
)
_get_itervar_feature = tvm._ffi.get_global_func("autotvm.feature.GetItervarFeature")
_get_itervar_feature_flatten = tvm._ffi.get_global_func(
"autotvm.feature.GetItervarFeatureFlatten"
)
except ValueError as e:
def raise_error(*args, **kwargs): # pylint: disable=unused-argument
raise RuntimeError("Cannot load autotvm c++ API")
_get_buffer_curve_sample_flatten = (
_get_itervar_feature
) = _get_itervar_feature_flatten = raise_error
def get_itervar_feature(sch, args, take_log=False):
"""get features of iter vars
Parameters
----------
sch: tvm.te.schedule.Schedule
args: Array of te.tensor.Tensor
the buffer args for lower
take_log: bool
whether take log of numerical statics
Returns
-------
features of every axis in the IR, see doc/features.md for detail
"""
stmt = ana_lower(sch, args, simple_mode=True)
feas = _get_itervar_feature(stmt, take_log)
# convert tvm node to python type
ret = []
for row in feas:
tmp = []
tmp.append([row[0][0].value, row[0][1]])
for item in row[1:]:
tmp.append([item[0].value] + [x.value for x in item[1:]])
ret.append(tmp)
return ret
def flatten_itervar_feature(fea):
"""flatten features into one-dimensional feature vectors
Parameters
----------
fea: list
return value of get_itervar_feature
Returns
-------
flatten_feature: np.ndarray
one-dimensional vector
"""
flatten = []
for axis in fea:
for pair in axis[1:]:
flatten.append(pair[1:])
return np.concatenate(flatten)
def get_itervar_feature_flatten(sch, args, take_log=True):
"""get flatten features of iter vars
this is equivalent to get_itervar_feature + flatten_itervar_feature, but much faster.
Parameters
----------
sch: tvm.te.schedule.Schedule
args: Array of te.tensor.Tensor
the buffer args for lower
take_log: bool
whether take log of numerical statics
Returns
-------
flatten_feature: np.ndarray
one-dimensional vector
"""
stmt = ana_lower(sch, args, simple_mode=True)
feas = _get_itervar_feature_flatten(stmt, take_log)
feas = struct.unpack("%df" % (len(feas) // 4), feas)
return feas
def get_flatten_name(fea):
"""Get names of feature after flatten.
Parameters
----------
fea: list or str
return value of get_itervar_feature or a line of logfile
Returns
-------
feature_names: Array of str
"""
feature_name = {
"_attr_": ["length", "nest_level", "topdown", "bottomup"]
+ ["ann_%d" % i for i in range(20)],
"_arith_": ["add", "mul", "div"],
"buf_touch": ["stride", "mod", "count", "reuse", "T_count", "T_reuse"],
}
if isinstance(fea, str):
# pylint: disable=import-outside-toplevel
from .record import decode
# flatten line to feature
line = fea
ret = decode(line)
if ret is None:
raise ValueError("Unsupported AutoTVM log format")
inp, _ = ret
target = Target(inp.target)
with target:
s, args = inp.template.instantiate(inp.config)
fea = get_itervar_feature(s, args)
names = []
ct = 0
for row in fea:
var_name = str(row[0][1])
for pair in row[1:]:
key = pair[0]
if key in feature_name:
name_list = feature_name[key]
else:
name_list = feature_name["buf_touch"]
for i in range(len((pair[1:]))):
names.append(".".join(["f%d" % ct, var_name, key, name_list[i]]))
ct += 1
return names
def get_buffer_curve_sample_flatten(sch, args, sample_n=30):
"""
Get flatten curve sample feature (relation feature)
Parameters
----------
sch: tvm.te.schedule.Schedule
args: Array of te.tensor.Tensor
the buffer args for lower
sample_n: int
number of sample points along one dimension
Returns
-------
flatten_feature: np.ndarray
one-dimensional vector
"""
stmt = ana_lower(sch, args, simple_mode=True)
feas = _get_buffer_curve_sample_flatten(stmt, sample_n, False)
feas = struct.unpack("%df" % (len(feas) // 4), feas)
return feas
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/graph_tuner/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Autotvm graph tuner API."""
from __future__ import absolute_import as _abs
from . import _base
from . import base_graph_tuner
from .base_graph_tuner import BaseGraphTuner
from .dynamic_programming_tuner import DPTuner
from .pbqp_tuner import PBQPTuner
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/graph_tuner/_base.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Helper functions and global data"""
# We set a large time to represent an invalid layout-transformation.
# This number is set to be 10e9 seconds to align with autotvm.
INVALID_LAYOUT_TIME = 10e9
MAX_OUTPUT_NODES = 16
OPT_OUT_OP = ["layout_transform"]
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/graph_tuner/base_graph_tuner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-arguments,too-many-locals,too-many-statements,too-many-instance-attributes,too-many-branches,too-many-nested-blocks,invalid-name,unused-argument,unused-variable,no-member,no-value-for-parameter
"""Base class for graph tuner."""
import logging
from abc import abstractmethod
import numpy as np
from tvm import topi
import tvm
from tvm import te
from tvm import autotvm, relay
from tvm.autotvm.task import get_config
from tvm.autotvm.record import encode, load_from_file
from tvm.autotvm.measure import MeasureResult, MeasureInput
from tvm.target import Target
from ...target import Target
from .utils import (
is_boundary_node,
get_in_nodes,
get_out_nodes,
has_multiple_inputs,
bind_inputs,
expr2graph,
)
from ._base import INVALID_LAYOUT_TIME
from ._base import OPT_OUT_OP
def get_infer_layout(task_name):
if task_name.startswith("conv2d"):
return topi.nn.conv2d_infer_layout
if task_name.startswith("depthwise_conv2d"):
return topi.nn.depthwise_conv2d_infer_layout
raise ValueError("Cannot find infer layout for task %s" % task_name)
@autotvm.template("layout_transform")
def layout_transform(*args):
"""Autotvm layout transform template."""
cfg = get_config()
cfg.add_flop(-1)
data = args[0]
out = topi.layout_transform(*args)
sch = topi.generic.schedule_injective([out])
return sch, [data, out]
class BaseGraphTuner(object):
"""Class to search schedules considering both kernel execution time and
layout transformation time.
Before creating a Graph Executor instance, schedule candidates for all kernels in
graph should be provided through tensor-level tuning.
"""
def __init__(
self,
graph,
input_shapes,
records,
target_ops,
target,
max_sch_num=20,
dtype="float32",
verbose=True,
log_file="graph_tuner.log",
log_level=logging.DEBUG,
name="graph_tuner",
):
"""Create a GlobalTuner instance. Local schedule searching for all nodes with
target_op in the input graph and layout transformation benchmark need to be
executed before initialization.
graph : tvm.relay.function.Function
Input graph
input_shapes : dict of str to tuple.
Input shapes of graph
records : str or iterator of (MeasureInput, MeasureResult)
Collection of kernel level tuning records.
If it is str, then it should be the filename of a records log file.
Each row of this file is an encoded record pair.
Otherwise, it is an iterator.
target_ops : List of tvm.ir.Op
Target tuning operators.
target : str or tvm.target
Compilation target.
max_sch_num : int, optional
Maximum number of schedule candidates for each workload.
dtype : str, optional
Data type.
log_file : str, optional
graph tuner log file name
name : str, optional
Name of global tuner.
"""
self._node_list = []
self._layout_transform_perf_records = {}
self._layout_transform_interlayer_cost = {}
self._input_shapes = input_shapes
self._target_ops = target_ops
self._name = name
self._max_sch_num = max_sch_num
self._optimal_sch_dict = {}
self._records = records
self._dtype = dtype
if isinstance(target, str):
target = Target(target)
self._target = target
self._optimal_record_dict = {}
# Set up logger
self._verbose = verbose
self._logger = logging.getLogger(name + "_logger")
need_file_handler = need_console_handler = True
for handler in self._logger.handlers:
if handler.__class__.__name__ == "FileHandler":
need_file_handler = False
if handler.__class__.__name__ == "StreamHandler":
need_console_handler = False
self._log_level = log_level
self._log_file = log_file
self._formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
self._logger.setLevel(log_level)
if need_file_handler:
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(self._formatter)
self._logger.addHandler(file_handler)
if self._verbose and need_console_handler:
console_handler = logging.StreamHandler()
console_handler.setFormatter(self._formatter)
self._logger.addHandler(console_handler)
self._logger.setLevel(log_level)
self._logger.propagate = False
# Generate workload and schedule dictionaries.
if isinstance(graph, tvm.IRModule):
graph = graph["main"]
if isinstance(graph, relay.function.Function):
node_dict = {}
graph = bind_inputs(graph, input_shapes, dtype)
expr2graph(graph, self._target_ops, node_dict, self._node_list, target)
else:
raise RuntimeError("Unsupported graph type: %s" % str(type(graph)))
self._graph = graph
self._in_nodes_dict = get_in_nodes(self._node_list, self._target_ops, input_shapes.keys())
if len(self._in_nodes_dict) == 0:
raise RuntimeError(
"Could not find any input nodes with whose "
"operator is one of %s" % self._target_ops
)
self._out_nodes_dict = get_out_nodes(self._in_nodes_dict)
self._fetch_cfg()
self._opt_out_op = OPT_OUT_OP
# Setup infer_layout for elemwise-like nodes
# Note: graph tuner currently only supports tuning of single input and single output
# op as target op, such as conv2d, dense and conv2d_transpose. In this case, we can
# reuse infer_layout function from target ops for elemwise-like nodes. The behavior
# is to modify the first tensor shape of input workload to the output shape of
# elemwise-like node, and use infer_layout function from input op to generate layouts.
input_names = self._input_shapes.keys()
for idx in sorted(self._in_nodes_dict.keys()):
if has_multiple_inputs(self._node_list, idx, input_names, self._opt_out_op):
node_entry = self._node_list[idx]
node_entry["topi_op"] = []
node_entry["workloads"] = []
for input_idx in self._in_nodes_dict[idx]:
input_node = self._node_list[input_idx]
if not is_boundary_node(input_node, input_names):
input_topi_op = input_node["topi_op"][0]
node_entry["topi_op"].append(input_topi_op)
# Only replace the first input tensor
input_workload = input_node["workloads"][0]
first_tensor = input_workload[1]
dtype = first_tensor[-1]
new_shape = tuple([val.value for val in node_entry["types"][0].shape])
actual_workload = (
(input_workload[0],)
+ (("TENSOR", new_shape, dtype),)
+ input_workload[2:]
)
node_entry["workloads"].append(actual_workload)
if "record_candidates" not in node_entry:
node_entry["record_candidates"] = input_node["record_candidates"]
else:
node_entry["topi_op"].append(None)
node_entry["workloads"].append(None)
def _fetch_cfg(self):
"""Read and pre-process input schedules."""
if isinstance(self._records, str):
records = load_from_file(self._records)
else:
records = self._records
cfg_dict = {}
for record in records:
in_measure, _ = record
workload = in_measure.task.workload
if workload not in cfg_dict:
cfg_dict[workload] = []
cfg_dict[workload].append(record)
cache_dict = {}
for key in self._in_nodes_dict:
node_entry = self._node_list[key]
if node_entry["op"] not in self._target_ops:
continue
workload = node_entry["workloads"][0]
if workload in cache_dict:
node_entry["record_candidates"] = cache_dict[workload]
continue
record_candidates = []
infer_layout_func = get_infer_layout(node_entry["topi_op"][0])
layout_tracking_dict = {}
for record in cfg_dict[workload]:
in_measure, out_measure = record
workload = in_measure.task.workload
cfg = in_measure.config
# For multiple cfgs which produces the same in/out layouts,
# only the most efficient one is preserved.
with self._target:
layouts = infer_layout_func(workload, cfg)
if layouts in layout_tracking_dict:
cost = out_measure.costs[0]
current_best_cost = layout_tracking_dict[layouts][1].costs[0]
if cost < current_best_cost:
layout_tracking_dict[layouts] = record
else:
layout_tracking_dict[layouts] = record
sorted_records = sorted(
layout_tracking_dict.values(), key=lambda item: item[1].costs[0]
)
for i in range(min(self._max_sch_num, len(sorted_records))):
record_candidates.append(sorted_records[i])
node_entry["record_candidates"] = record_candidates
cache_dict[workload] = record_candidates
def _iterate_layout_transform(self, callback):
"""Iterate all possible layout transformations and execute callback for each
iteration. callback function accepts 6 arguments: from_node_idx, to_node_idx,
from_sch_idx, to_sch_idx, args which represent the argument list of layout
transformation and is_valid showing whether this is a valid layout transformation.
"""
input_names = self._input_shapes.keys()
pair_tracker = set()
for key, val in self._in_nodes_dict.items():
node_entry = self._node_list[key]
target_input_idx = -1
target_input_pos = -1
if has_multiple_inputs(self._node_list, key, input_names, self._opt_out_op):
for i, item in enumerate(val):
node = self._node_list[item]
if not is_boundary_node(node, input_names):
target_input_idx = item
target_input_pos = i
break
for i, item in enumerate(val):
i_idx = item
in_node_entry = self._node_list[i_idx]
if is_boundary_node(in_node_entry, input_names):
continue
if node_entry["op"] in self._target_ops:
o_idx = key
o_infer_layout_func = get_infer_layout(node_entry["topi_op"][0])
o_wkl = node_entry["workloads"][0]
i_topi_op = in_node_entry["topi_op"][0]
i_wkl = in_node_entry["workloads"][0]
pivot = 0
while not i_wkl:
pivot += 1
i_topi_op = in_node_entry["topi_op"][pivot]
i_wkl = in_node_entry["workloads"][pivot]
i_infer_layout_func = get_infer_layout(i_topi_op)
else:
o_idx = target_input_idx
if i <= target_input_pos:
continue
o_infer_layout_func = get_infer_layout(node_entry["topi_op"][0])
o_wkl = node_entry["workloads"][target_input_pos]
i_infer_layout_func = get_infer_layout(node_entry["topi_op"][i])
i_wkl = node_entry["workloads"][i]
if (i_idx, o_idx) in pair_tracker:
continue
pair_tracker.add((i_idx, o_idx))
for m, i_record in enumerate(in_node_entry["record_candidates"]):
for n, o_record in enumerate(node_entry["record_candidates"]):
i_cfg, o_cfg = i_record[0].config, o_record[0].config
with self._target:
i_input_info, i_output_info = i_infer_layout_func(i_wkl, i_cfg)
o_input_info, o_output_info = o_infer_layout_func(o_wkl, o_cfg)
if (
len(i_input_info) > 1
or len(i_output_info) > 1
or len(o_input_info) > 1
or len(o_output_info) > 1
):
raise RuntimeError(
"Graph tuner only supports target operator "
"with single input and single output. "
"Please check target_ops argument."
)
in_shape, in_layout = i_output_info[0]
if node_entry["op"] in self._target_ops:
_, out_layout = o_input_info[0]
else:
_, out_layout = o_output_info[0]
data_placeholder = te.placeholder(in_shape, name="data", dtype=self._dtype)
args = [data_placeholder, in_layout, out_layout]
callback(i_idx, o_idx, m, n, args)
def _create_matrix_callback(self, from_node_idx, to_node_idx, from_sch_idx, to_sch_idx, args):
"""Create dictionary containing matrix format of layout transformation
between nodes."""
in_layout, out_layout = args[1], args[2]
ltf_workload = autotvm.task.args_to_workload(args, "layout_transform")
idx_pair_key = (from_node_idx, to_node_idx)
if in_layout == out_layout:
layout_transform_time = 0
else:
layout_transform_time = self._layout_transform_perf_records[ltf_workload][1].costs[0]
if idx_pair_key not in self._layout_transform_interlayer_cost:
self._layout_transform_interlayer_cost[idx_pair_key] = []
if len(self._layout_transform_interlayer_cost[idx_pair_key]) <= from_sch_idx:
self._layout_transform_interlayer_cost[idx_pair_key].append([])
self._layout_transform_interlayer_cost[idx_pair_key][from_sch_idx].append(
layout_transform_time
)
def benchmark_layout_transform(
self,
min_exec_num=100,
timeout=10,
use_rpc=False,
device_key=None,
host="127.0.0.1",
port=9190,
n_parallel=1,
build_func="default",
layout_records=None,
target_host=None,
infer_layout=False,
runner=None,
):
"""Benchmark all possible layout transformation in the graph,
given a set of schedule candidates for each workload of target operator.
Parameters
----------
min_exec_num : int, optional
Minimum number of execution. Final execution time is the average of
all execution time.
timeout : int, optional
Time out for each execution.
use_rpc : boolean, optional
Whether to use rpc mode for benchmarking.
device_key : str, optional
Remote device key which can be queried by
python -m tvm.exec.query_rpc_tracker --host=0.0.0.0 --port=9190
host : str, optional
IP address used to create RPC tracker on host machine.
port : int, optional
Port number used to create RPC tracker on host machine.
n_parallel: int, optional
The number of measurement task that can run in parallel.
Set this according to the number of cpu cores (for compilation) and
the number of devices you have (for measuring generate code).
build_func: str or callable, optional
'default': call default builder. This works for normal target (llvm, cuda)
'ndk': use Android NDK to create shared library. Use this for android target.
callable: customized build function for other backends (e.g. VTA).
See autotvm/measure/measure_methods.py::default_build_func for example.
layout_records : str or iterator of (MeasureInput, MeasureResult). optional
Collection of layout_transform benchmarking records.
If is str, then it should be the filename of a records log file.
Each row of this file is an encoded record pair.
Otherwise, it is an iterator.
If this argument is set, graph tuner will first check whether layout_transform
workload already exists in records and skip benchmarking if possible.
target_host : str, optional
str or :any:`tvm.target.Target` optional
Host compilation target, if target is device.
When TVM compiles device specific program such as CUDA,
we also need host(CPU) side code to interact with the driver
setup the dimensions and parameters correctly.
target_host is used to specify the host side codegen target.
By default, llvm is used if it is enabled,
otherwise a stackvm intepreter is used.
infer_layout : bool, optional
Whether to infer layout transformation time if it doesn't exist in records, instead
of benchmarking on target device.
This might bring performance loss comparing to benchmarking layout transformation.
runner : Runner, optional
Accept a user-supplied runner
"""
self._logger.info("Start to benchmark layout transformation...")
self._target, target_host = Target.canon_target_and_host(self._target, target_host)
if layout_records is None and infer_layout:
raise RuntimeError("Requires some records to infer layout transformation time.")
if isinstance(layout_records, str):
layout_records = load_from_file(layout_records)
if not layout_records and infer_layout:
raise RuntimeError("Records must be non-empty to infer layout transformation time.")
if isinstance(layout_records, str):
layout_records = load_from_file(layout_records)
num_flops, total_time = 0, 0
if layout_records is not None:
for record in layout_records:
ltf_wkl = record[0].task.workload
self._layout_transform_perf_records[ltf_wkl] = record
input_shape = ltf_wkl[1][1]
flops = np.prod(input_shape)
num_flops += flops
total_time += record[1].costs[0]
avg_time = total_time / num_flops if num_flops > 0 else 0
args_list = []
def _fetch_args_callback(from_node_idx, to_node_idx, from_sch_idx, to_sch_idx, args):
"""Callback function to fetch layout transform args"""
_, in_layout, out_layout = args
if in_layout != out_layout:
args_list.append(args)
self._iterate_layout_transform(_fetch_args_callback)
def _log_to_list(record_list):
"""Callback to log result to a list."""
def _callback(_, inputs, results):
"""Callback implementation"""
record_list.append((inputs[0], results[0]))
return _callback
builder = autotvm.LocalBuilder(n_parallel=n_parallel, build_func=build_func)
if use_rpc:
if device_key is None:
raise RuntimeError("device_key need to be set to use rpc tracker mode.")
runner = autotvm.measure.RPCRunner(
device_key,
host,
port,
n_parallel=n_parallel,
number=min_exec_num,
repeat=1,
timeout=timeout,
)
elif not runner:
runner = autotvm.LocalRunner(number=min_exec_num, repeat=1, timeout=timeout)
measure_option = autotvm.measure_option(builder=builder, runner=runner)
for args in args_list:
data, in_layout, out_layout = args
ltf_workload = autotvm.task.args_to_workload(args, "layout_transform")
if ltf_workload in self._layout_transform_perf_records:
continue
if infer_layout:
input_shape = ltf_workload[1][1]
flops = 1
for i in input_shape:
flops *= i
# Rule out invalid layout transformations
out = topi.layout_transform(data, in_layout, out_layout)
out_flops = 1
for i in topi.utils.get_const_tuple(out.shape):
out_flops *= i
if flops != out_flops:
inferred_time = INVALID_LAYOUT_TIME
else:
inferred_time = flops * avg_time
record_input = MeasureInput(target=self._target, task=None, config=None)
record_output = MeasureResult(
costs=(inferred_time,), error_no=0, all_cost=-1, timestamp=-1
)
self._layout_transform_perf_records[ltf_workload] = (record_input, record_output)
continue
records = []
task = autotvm.task.create("layout_transform", args=args, target=self._target)
tuner = autotvm.tuner.GridSearchTuner(task)
tuner.tune(n_trial=1, measure_option=measure_option, callbacks=[_log_to_list(records)])
if not isinstance(records[0][1].costs[0], float):
records[0] = (records[0][0], records[0][1]._replace(costs=(INVALID_LAYOUT_TIME,)))
self._layout_transform_perf_records[ltf_workload] = records[0]
self._iterate_layout_transform(self._create_matrix_callback)
self._logger.info("Benchmarking layout transformation successful.")
@property
def layout_transform_perf_records(self):
"""Get layout transformation dictionary for input graph.
Returns
-------
layout_transform_perf_records : dict of tuple to (MeasureInput, MeasureResult)
Layout transformation dictionary for input graph.
"""
return self._layout_transform_perf_records
def get_optimal_records(self):
"""Convert optimal record dictionary to a list of records
with ascending order of node index in graph.
Returns
-------
sch_list : list of tuple
List of records with ascending order of node index in graph.
"""
ordered_index_list = sorted(self._optimal_record_dict.keys())
ret = []
for index in ordered_index_list:
node_entry = self._node_list[index]
if node_entry["op"] not in self._target_ops:
continue
ret.append(node_entry["record_candidates"][self._optimal_record_dict[index]])
return ret
def write_opt_sch2record_file(self, record_file="graph_opt_schedule.log"):
"""Write graph level optimal schedules into file.
Parameters
----------
record_file : str, optional
Output schedule file.
"""
with open(record_file, "a") as out_file:
records = self.get_optimal_records()
for record in records:
out_file.write(encode(record[0], record[1]) + "\n")
msg = "Writing optimal schedules to %s successfully." % record_file
self._logger.info(msg)
@abstractmethod
def run(self, **kwargs):
"""Run graph tuning."""
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/graph_tuner/dynamic_programming_stage.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-instance-attributes,too-many-branches,too-many-statements,too-many-arguments,too-many-locals,invalid-name
"""Stage class for dynamic programming tuner"""
import numpy as np
from .utils import is_boundary_node
class DPStage(object):
"""Class to represent node in Markov decision process. A stage has states
to represent different schedules of the current node. Since in this problem
the action is the schedule selected for current node, action can be fully
represented by states. No extra attribute needs for action.
In most cases, instance of this class should be created through DPTuner.
"""
def __init__(
self,
idx,
input_shapes,
node_list,
counted_nodes_set,
layout_transform_interlayer_cost,
stage_dict,
in_nodes_dict,
out_nodes_dict,
dep_dict,
target_ops,
dtype="float32",
):
"""Initialize a stage and create all states.
Parameters
----------
idx : int
Index for current node.
input_shapes : dict of string to tuple of int
Input shapes for current graph.
node_list : list of dict
List of all nodes for current graph.
counted_nodes_set : set of int
Global set recording whether the execution time of a node has been counted.
layout_transform_interlayer_cost : dict of tuple to list
Dictionary maps node index pair to layout transformation time between them.
stage_dict : dict of int to Stage
Global dictionary for all stages mapping node index to stage.
in_nodes_dict : dict of int to list of int
Dictionary maps node index to corresponding input node index.
out_nodes_dict : dict of int to list of int
Dictionary maps node index to corresponding output node index.
dep_dict : dict of int to set of int
Dictionary maps node index to dependent node index.
target_ops : list of str
Target operators
dtype : str, optional
Data type.
"""
self._global_input_shapes = input_shapes
self._global_input_names = input_shapes.keys()
self._global_node_list = node_list
self._global_counted_nodes_set = counted_nodes_set
self._global_layout_transform_interlayer_cost = layout_transform_interlayer_cost
self._global_stage_dict = stage_dict
self._global_in_nodes_dict = in_nodes_dict
self._global_out_nodes_dict = out_nodes_dict
self._global_dep_dict = dep_dict
self._idx = idx
self._node_entry = self._global_node_list[idx]
self._target_ops = target_ops
self._wkl = self._node_entry["workloads"][0]
self._record_list = self._node_entry["record_candidates"]
self._dep = []
self._dtype = dtype
self._states = None
self._full_states = None
self._full_states_idx = None
self._create_states()
def _create_states(self):
"""Create states."""
node = self._global_node_list[self._idx]
if node["op"] in self._target_ops:
self._create_op_states()
else:
self._create_multi_inputs_states()
def _create_op_states(self):
"""State creation routine for nodes with target_op."""
input_idx = self._global_in_nodes_dict[self._idx][0]
input_node_entry = self._global_node_list[input_idx]
if is_boundary_node(input_node_entry, self._global_input_names):
self._full_states = np.array([record[1].costs[0] for record in self._record_list])
self._states = self._full_states
else:
input_stage = self._global_stage_dict[input_idx]
input_dep = input_stage.dep
input_states = input_stage.states
input_flatten_states = input_states.flatten()
input_record_list = input_node_entry["record_candidates"]
num_schedules = len(self._record_list)
num_input_schedules = len(input_record_list)
num_input_states = input_flatten_states.shape[0]
full_states_shape = tuple(
[num_schedules, num_input_schedules]
+ [
len(self._global_node_list[dep_idx]["record_candidates"])
for dep_idx in input_dep
]
)
self._full_states = np.zeros(full_states_shape).flatten().astype("float32")
self._full_states_idx = [self._idx, input_idx] + input_dep
dep_multiplier = 1
for i in range(2, len(full_states_shape)):
dep_multiplier *= full_states_shape[i]
input_node_time_counted = input_idx in self._global_counted_nodes_set
for i in range(num_schedules):
current_sch_time = float(self._record_list[i][1].costs[0])
for j in range(num_input_states):
input_sch_idx = j // dep_multiplier
layout_transform_time = self._global_layout_transform_interlayer_cost[
(input_idx, self._idx)
][input_sch_idx][i]
if input_node_time_counted:
total_time = current_sch_time + layout_transform_time
else:
total_time = (
current_sch_time + layout_transform_time + input_flatten_states[j]
)
current_state_idx = i * num_input_states + j
self._full_states[current_state_idx] = total_time
if not input_node_time_counted:
self._global_counted_nodes_set.add(input_idx)
self._full_states = self._full_states.reshape(full_states_shape)
# If out degree of input node is 1, we can remove the dimension of input node,
# since the states of input node will not be needed any more. Otherwise, input
# node should become a dependency.
if len(self._global_out_nodes_dict[input_idx]) == 1:
self._states = np.amin(self._full_states, axis=1)
self._dep = list(input_dep)
else:
self._states = self._full_states
self._dep = [
input_idx,
] + input_dep
# Update global dependency dictionary.
# This is to monitor the dependency states to decide
# when a dependency can be eliminated, so that total
# number of states can be largely reduced.
for dep_idx in self._dep:
self._global_dep_dict[dep_idx].remove(self._idx)
for child in self._global_out_nodes_dict[self._idx]:
self._global_dep_dict[dep_idx].add(child)
if len(self._global_out_nodes_dict[self._idx]) > 1:
self._global_dep_dict[self._idx] = set()
for child in self._global_out_nodes_dict[self._idx]:
self._global_dep_dict[self._idx].add(child)
def _create_multi_inputs_states(self):
"""State creation routine for multi_input operator
In tvm, layout transformation for an elemwise-like follow the rule which
all input operators transform their layouts to the leftmost input operator
layout. For example:
elemwise-sum
| | |
| | |
op0 op1 op2
In this block, the possible layout transformations are: op1 -> op0 and op2 -> op0.
In graph tuning, a 3-D array with shape (k0, k1, k2) can represent the layout
transformations between these three nodes. It is also possible some earlier states
belong to other nodes(We name them as dependency) are required for dynamic programming.
The final states array for this elemwise-sum can be with shape (e0, k0, k1, e1, k2).
To iterate through all states, we first align the shape of op0, op1 and op2 to be
(e0, k0, k1, e1, k2) by broadcasting the original states. We also record the axis of
each input node in the states array, together with the multiplier. For example,
the axis index for op0 is 1, and multiplier is k1 * e1 * k2. If current iterating index
in the flatten array is i, the index of op0 can be computed as:
i % (k0 * k1 * e1 * k2) // (k1 * e1 * k2).
"""
full_input_node_list = list(self._global_in_nodes_dict[self._idx])
input_index_list = []
# Remove input and ruled_out nodes
for input_idx in full_input_node_list:
input_node = self._global_node_list[input_idx]
if not is_boundary_node(input_node, self._global_input_names):
input_index_list.append(input_idx)
# Generate new states
states_list, aligned_node_list = DPStage.align_states(
input_index_list, self._global_stage_dict, self._global_node_list
)
target_node_idx, target_major_axis, target_multiplier, target_states = states_list[0]
aligned_shape = target_states.shape
self._full_states = np.zeros(aligned_shape).astype("float32").flatten()
self._full_states_idx = list(aligned_node_list)
num_states = self._full_states.shape[0]
node_time_counted = [item[0] in self._global_counted_nodes_set for item in states_list]
target_states = target_states.flatten()
src_states_list = [states_list[i][3].flatten() for i in range(1, len(states_list))]
for i in range(num_states):
target_sch_idx = (
i % (target_multiplier * aligned_shape[target_major_axis])
) // target_multiplier
if node_time_counted[0]:
new_state = 0
else:
new_state = target_states[i]
for j in range(1, len(states_list)):
src_states = src_states_list[j - 1]
src_node_idx, src_major_axis, src_multiplier, _ = states_list[j]
src_sch_idx = (
i % (src_multiplier * aligned_shape[src_major_axis])
) // src_multiplier
layout_transform_time = self._global_layout_transform_interlayer_cost[
(src_node_idx, target_node_idx)
][src_sch_idx][target_sch_idx]
if node_time_counted[j]:
new_state += layout_transform_time
else:
new_state += layout_transform_time + src_states[i]
self._full_states[i] = new_state
for i, node_counted in enumerate(node_time_counted):
if not node_counted:
self._global_counted_nodes_set.add(states_list[i][0])
self._full_states = self._full_states.reshape(aligned_shape)
# Remove dependency to reduce states
reduced_states = np.array(self._full_states)
reduced_states_transpose = [states_list[0][1]]
reduced_states_dep_list = []
self._dep = []
for i in range(len(reduced_states.shape)):
if i != states_list[0][1]:
reduced_states_transpose.append(i)
reduced_states_dep_list.append(aligned_node_list[i])
reduced_states = np.transpose(reduced_states, reduced_states_transpose)
shift = 0
for i, dep in enumerate(reduced_states_dep_list):
if dep not in self._global_dep_dict or len(self._global_dep_dict[dep]) == 1:
self._global_dep_dict.pop(dep, None)
reduced_states = np.amin(reduced_states, axis=i + 1 - shift)
shift += 1
else:
self._dep.append(dep)
self._states = reduced_states
# Update dependency
for dep in self._dep:
self._global_dep_dict[dep].remove(self._idx)
for child in self._global_out_nodes_dict[self._idx]:
self._global_dep_dict[dep].add(child)
if len(self._global_out_nodes_dict[self._idx]) > 1:
self._global_dep_dict[self._idx] = set()
for child in self._global_out_nodes_dict[self._idx]:
self._global_dep_dict[self._idx].add(child)
@property
def dep(self):
"""Get dependency list."""
return self._dep
@property
def states(self):
"""Get states."""
return self._states
@property
def full_states(self):
"""Get complete states."""
return self._full_states
@property
def full_states_idx(self):
"""Get node index of complete states."""
return self._full_states_idx
@staticmethod
def align_states(input_index_list, stage_dict, node_list):
"""Align all input node states shapes to be the same and transpose/reshape properly.
This is used in creating multi_input operator states.
Parameters
----------
input_index_list : list of int
List of input node index.
stage_dict : dict of int to Stage
Global dictionary of node index to stage.
node_list : list of dict
List of all nodes for current graph.
Returns
-------
states_list : list of tuple
List of aligned states.
aligned_node_list : list in int
List of node index for aligned states.
"""
aligned_node_list = list(input_index_list)
states_list = []
for input_idx in input_index_list:
input_node_stage = stage_dict[input_idx]
for dep_idx in input_node_stage.dep:
if dep_idx not in aligned_node_list:
aligned_node_list.append(dep_idx)
aligned_shape = []
for idx in aligned_node_list:
aligned_shape.append(len(node_list[idx]["record_candidates"]))
for input_idx in input_index_list:
input_node_stage = stage_dict[input_idx]
input_node_shape_idx_list = [input_idx] + input_node_stage.dep
transpose_idx_list = []
reshape_list = []
major_axis = -1
for i, idx in enumerate(aligned_node_list):
if input_idx == idx:
major_axis = i
if idx in input_node_shape_idx_list:
transpose_idx_list.append(idx)
reshape_list.append(aligned_shape[i])
else:
reshape_list.append(1)
transpose_list = [input_node_shape_idx_list.index(idx) for idx in transpose_idx_list]
input_node_states = np.transpose(input_node_stage.states, tuple(transpose_list))
input_node_states = np.reshape(input_node_states, tuple(reshape_list))
input_node_states = np.broadcast_to(input_node_states, aligned_shape)
multiplier = 1
for i in range(major_axis + 1, len(aligned_shape)):
multiplier *= aligned_shape[i]
states_list.append((input_idx, major_axis, multiplier, input_node_states))
return states_list, aligned_node_list
| https://github.com/zk-ml/tachikoma |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.