file_path
stringlengths 7
180
| content
stringlengths 0
811k
| repo
stringclasses 11
values |
---|---|---|
apps/pt_tvmdsoop/tests/test_boolean_tensor.py | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test script for boolean tensor support"""
import tempfile
import torch
import tvm
import tvm.testing
from tvm.contrib.torch import as_torch, optimize_torch
from tvm.script import tir as T
def negate(x):
return x.logical_not()
def sum_up_tensor(x):
return x.size(dim=0) - torch.sum(x.int())
def tensor_boolean_operation(x):
arr1 = (x + 0.3).floor().bool()
arr2 = (~((x + 0.7).int().bool())).bool()
ret = ((arr1 & arr2).byte() + 0.5).half()
return ~(ret.bool())
def test_bool_tensor_negate():
input = torch.ones(1, dtype=torch.bool)
optimized_negate = optimize_torch(
negate,
input,
)
with tempfile.NamedTemporaryFile(suffix=".pt") as tmp:
torch.save(optimized_negate, tmp.name)
loaded_mod = torch.load(tmp.name)
output = loaded_mod(negate(input))
tvm.testing.assert_allclose(input.numpy(), output.numpy(), atol=1e-5, rtol=1e-5)
def test_sum_up_tensor():
x = torch.randint(0, 2, (16,))
y = x.bool()
optimized_func = optimize_torch(
sum_up_tensor,
(y,),
)
ret1 = (x[x == 0]).size(dim=0)
ret2 = optimized_func(y).numpy()
tvm.testing.assert_allclose(ret1, ret2, atol=1e-5, rtol=1e-5)
def test_tensor_boolean_operation():
input = torch.rand(200)
model = optimize_torch(
tensor_boolean_operation,
input,
)
ret1 = tensor_boolean_operation(input)
ret2 = model(input)
tvm.testing.assert_allclose(ret1, ret2, atol=1e-5, rtol=1e-5)
@as_torch
@T.prim_func
def negate_tvmscript(
X: T.Buffer[(8, 8), "bool"],
Y: T.Buffer[(8, 8), "float32"],
Z: T.Buffer[(8, 8), "bool"],
U: T.Buffer[(8, 8), "float32"],
) -> None:
for i, j in T.grid(8, 8):
with T.block():
if Y[i, j] > 0.0:
Z[i, j] = X[i, j]
U[i, j] = Y[i, j]
else:
Z[i, j] = not X[i, j]
U[i, j] = 0.0 - Y[i, j]
def negate_vanila(x, y):
z = torch.zeros(8, 8).bool()
for i in range(8):
for j in range(8):
if y[i, j] > 0:
z[i, j] = x[i, j]
else:
z[i, j] = ~x[i, j]
return z
def test_tvmscript_torch_decorator():
q1 = (torch.rand(8, 8) + 0.5).int().bool()
q2 = torch.rand(8, 8) - 0.5
q3 = torch.zeros(8, 8).bool()
q4 = torch.zeros(8, 8)
std1 = negate_vanila(q1, q2)
std2 = torch.abs(q2)
negate_tvmscript(q1, q2, q3, q4)
tvm.testing.assert_allclose(std1.numpy(), q3.numpy(), atol=1e-5, rtol=1e-5)
tvm.testing.assert_allclose(std2.numpy(), q4.numpy(), atol=1e-5, rtol=1e-5)
if __name__ == "__main__":
test_tvmscript_torch_decorator()
test_bool_tensor_negate()
test_sum_up_tensor()
test_tensor_boolean_operation()
| https://github.com/zk-ml/tachikoma |
apps/pt_tvmdsoop/tests/test_optimize_torch.py | # pylint: disable=missing-class-docstring
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test script for tvm torch module"""
import tempfile
import torch
from torch.utils import benchmark
from torchvision.models import resnet18
import tvm
import tvm.testing
from tvm.contrib.torch import optimize_torch
from tvm.meta_schedule import TuneConfig
def test_matmul_tuning_relay():
def matmul(x, w):
return torch.matmul(x, w)
x = torch.randn(15, 20)
w = torch.randn(20, 30)
example_inputs = (x, w)
rt_mod = optimize_torch(matmul, example_inputs)
torch_answer = torch.matmul(x, w).numpy()
tvm_answer = rt_mod(x, w).numpy()
tvm.testing.assert_allclose(torch_answer, tvm_answer, atol=1e-5, rtol=1e-5)
class InnerModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 20, 5)
def forward(self, x):
return torch.nn.functional.relu(self.conv(x))
class SimpleModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(20, 20, 5)
self.relu = InnerModel()
def forward(self, x):
x = self.relu(x)
return torch.nn.functional.relu(self.conv(x))
def test_nested_module():
simple_module = SimpleModel()
example_input = torch.randn(20, 1, 10, 10)
optimized_module = optimize_torch(simple_module, example_input)
ret1 = simple_module(example_input).detach().numpy()
ret2 = optimized_module(example_input).detach().numpy()
tvm.testing.assert_allclose(ret1, ret2, atol=1e-5, rtol=1e-5)
def test_save_load_function():
def foo(x):
return 2 * x + 1
example_input = torch.rand(3)
opt_foo = optimize_torch(foo, example_input)
ret1 = opt_foo(example_input)
with tempfile.NamedTemporaryFile(suffix=".pt") as tmp:
torch.save(opt_foo, tmp.name)
loaded_mod = torch.load(tmp.name)
ret2 = loaded_mod(example_input)
tvm.testing.assert_allclose(ret1.numpy(), ret2.numpy(), atol=1e-5, rtol=1e-5)
class MyResNet18(torch.nn.Module):
def __init__(self, config, target=None):
super(MyResNet18, self).__init__()
self.means = torch.nn.Parameter(
torch.tensor([103.939, 116.779, 123.68]).resize_(1, 3, 1, 1)
).cuda()
self.resnet = optimize_torch(resnet18(), [torch.rand(1, 3, 224, 224)], config, target)
def forward(self, input):
return self.resnet(input - self.means)
class JitModule(torch.nn.Module):
def __init__(self):
super(JitModule, self).__init__()
self.means = torch.nn.Parameter(
torch.tensor([103.939, 116.779, 123.68]).resize_(1, 3, 1, 1)
).cuda()
self.resnet = torch.jit.optimize_for_inference(torch.jit.script(resnet18().cuda().eval()))
def forward(self, input):
return self.resnet(input - self.means)
# default config for testing
config = TuneConfig(
strategy="evolutionary",
num_trials_per_iter=4,
max_trials_per_task=8,
max_trials_global=16,
)
if torch.cuda.is_available():
target_cuda = "nvidia/geforce-rtx-3070"
meta_module_resnet18 = MyResNet18(config, target_cuda)
jit_module_resnet18 = JitModule()
def compare_optimize_resnet18_to_torchscript():
results = []
for i in range(20):
test_input = torch.rand(1, 3, 224, 224).half().cuda()
sub_label = f"[test {i}]"
results.append(
benchmark.Timer(
stmt="meta_module_resnet18(test_input)",
setup="from __main__ import meta_module_resnet18",
globals={"test_input": test_input},
sub_label=sub_label,
description="tuning by meta",
).blocked_autorange()
)
results.append(
benchmark.Timer(
stmt="jit_module_resnet18(test_input)",
setup="from __main__ import jit_module_resnet18",
globals={"test_input": test_input},
sub_label=sub_label,
description="tuning by jit",
).blocked_autorange()
)
compare = benchmark.Compare(results)
compare.print()
if __name__ == "__main__":
test_matmul_tuning_relay()
test_nested_module()
test_save_load_function()
if torch.cuda.is_available():
compare_optimize_resnet18_to_torchscript()
| https://github.com/zk-ml/tachikoma |
apps/pt_tvmdsoop/tests/test_torch_compile_cpu.py | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test script for torch module"""
import torch
import time
import tvm
from tvm.contrib.torch import compile
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x: torch.Tensor):
return x * x
model = Model()
x = torch.rand([1, 3, 224, 224])
model_jit = torch.jit.trace(model, x)
print(model_jit.graph)
print("run torchscript...")
for i in range(20):
t = time.time()
model_jit(x)
print(time.time() - t)
option = {
"input_infos": [
("x", (1, 3, 224, 224)),
],
"default_dtype": "float16",
"export_dir": "pytorch_compiled",
"num_outputs": 1,
"tuning_n_trials": 1, # set zero to skip tuning
"tuning_log_file": "tuning.log",
"target": "llvm",
"device": tvm.cpu(),
}
pytorch_tvm_module = compile(model_jit, option)
torch.jit.script(pytorch_tvm_module).save("model_tvm.pt")
print("Run PyTorch...")
for i in range(20):
t = time.time()
outputs = pytorch_tvm_module.forward([x.cpu()])
print(1000 * (time.time() - t))
print(outputs[0].shape)
| https://github.com/zk-ml/tachikoma |
apps/pt_tvmdsoop/tests/test_torch_compile_gpu.py | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test script for torch module"""
import torch
import time
from torchvision.models import resnet50
import tvm
from tvm.contrib.torch import compile
model = resnet50().half().cuda()
x = torch.rand([1, 3, 224, 224]).half().cuda()
model_jit = torch.jit.trace(model, x)
print(model_jit.graph)
print("run torchscript...")
for i in range(20):
t = time.time()
model_jit(x)
torch.cuda.synchronize()
print(time.time() - t)
option = {
"input_infos": [
("x", (1, 3, 224, 224)),
],
"default_dtype": "float16",
"export_dir": "pytorch_compiled",
"num_outputs": 1,
"tuning_n_trials": 1, # set zero to skip tuning
"tuning_log_file": "tuning.log",
"target": "cuda",
"device": tvm.cuda(0),
}
pytorch_tvm_module = compile(model_jit, option)
torch.jit.script(pytorch_tvm_module).save("model_tvm.pt")
print("Run PyTorch...")
for i in range(20):
t = time.time()
outputs = pytorch_tvm_module.forward([x])
torch.cuda.synchronize()
print(1000 * (time.time() - t))
print(outputs[0].shape)
| https://github.com/zk-ml/tachikoma |
apps/pt_tvmdsoop/tests/test_torch_graph_module.py | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test script for torch module"""
import tempfile
import os
import logging
import torch
import numpy as np
import tvm
import tvm.testing
from tvm import te, relay
import tvm.contrib.torch
from tvm.contrib import graph_runtime
TVM_ASSETS = ["mod.so", "graph.json", "params"]
def test_use_pt_graph_module():
"""main test function"""
def build_export_graph(device):
"""relay build & export graph"""
x = relay.var("x", shape=(10, 5))
y = relay.var("y", shape=(1, 5))
z = relay.add(x, y)
z = relay.exp(z)
func = relay.Function([x, y], z)
x_data = np.random.rand(10, 5).astype("float32")
y_data = np.random.rand(1, 5).astype("float32")
params = {"y": y_data}
pt_device = torch.device(device)
if pt_device.type == "cuda":
target = "cuda"
ctx = tvm.cuda(pt_device.index)
else:
target = "llvm"
ctx = tvm.cpu(0)
graph, lib, params = relay.build(tvm.IRModule.from_expr(func), target=target, params=params)
mod = graph_runtime.create(graph, lib, device=ctx)
mod.set_input(**params)
mod.set_input(x=x_data)
mod.run()
res = mod.get_output(0).asnumpy()
ref_res = np.exp(y_data + x_data)
tvm.testing.assert_allclose(res, ref_res, atol=1e-5, rtol=1e-5)
# export to tempdir
export_dir = tempfile.mkdtemp("tvm_export")
lib.export_library(os.path.join(export_dir, TVM_ASSETS[0]))
with open(os.path.join(export_dir, TVM_ASSETS[1]), "w") as fout:
fout.write(graph)
with open(os.path.join(export_dir, TVM_ASSETS[2]), "wb") as fout:
fout.write(relay.save_param_dict(params))
return export_dir
def test_pt_run(device, trace=True, to_device=None):
"""test add lib with Pytorch wrapper"""
print("\n############## Test on device:", device, "#################")
export_dir = build_export_graph(device)
engine = tvm.contrib.torch.GraphModule(num_inputs=2, num_outputs=1).to(device)
x = np.random.rand(10, 5).astype("float32")
y = np.random.rand(1, 5).astype("float32")
expect = np.exp(y + x)
def get_inputs_by_device(device):
inps = [torch.Tensor(x), torch.Tensor(y)]
if device == "cpu":
return inps
else:
device_type, device_id = device.split(":")
assert device_type == "cuda"
return [inp.cuda(int(device_id)) for inp in inps]
assets = [os.path.join(export_dir, i) for i in TVM_ASSETS]
engine.init((x.shape, y.shape), *assets)
outputs = engine.forward(get_inputs_by_device(device))
tvm.testing.assert_allclose(outputs[0].cpu(), expect, atol=1e-5, rtol=1e-5)
if trace:
print("\n################ Test trace and load #################")
scripted = torch.jit.script(engine)
scripted_dir = tempfile.mkdtemp("scripted")
scripted_path = os.path.join(scripted_dir, "model.pt")
scripted.save(scripted_path)
loaded = torch.jit.load(scripted_path)
outputs = loaded.forward(get_inputs_by_device(device))
tvm.testing.assert_allclose(outputs[0].cpu(), expect, atol=1e-5, rtol=1e-5)
del scripted
del loaded
if to_device:
print(
"\n################ Test move from [{}] to [{}] #################".format(
device, to_device
)
)
engine = engine.to(to_device)
outputs = engine.forward(get_inputs_by_device(to_device))
tvm.testing.assert_allclose(outputs[0].cpu(), expect, atol=1e-5, rtol=1e-5)
del engine
test_pt_run(device="cuda:0", trace=True, to_device="cuda:1")
test_pt_run(device="cpu", trace=True)
if __name__ == "__main__":
test_use_pt_graph_module()
| https://github.com/zk-ml/tachikoma |
apps/pt_tvmdsoop/tests/test_torch_script.py | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test script for torch module"""
import os
import torch
import time
import numpy as np
import tvm
import tvm.testing
import tempfile
from tvm.contrib.torch import PyTorchTVMModule, compile
class Model(torch.nn.Module):
def forward(self, x, y):
return torch.matmul(x, y.softmax(1))
model = Model()
model.cuda().half()
x = torch.rand([1280, 2464, 4]).cuda().half()
y = torch.rand([1280, 4, 1]).cuda().half()
for i in range(20):
t = time.time()
o = model(x, y)
torch.cuda.synchronize()
print(1000 * (time.time() - t))
print(o.shape)
model_jit = torch.jit.script(model)
print(model_jit.graph)
input_shapes = [("x", list(x.shape)), ("y", list(y.shape))]
dtype = "float16"
export_dir = tempfile.mkdtemp("pytorch_compiled")
print("tmp export_dir:", export_dir)
mod = PyTorchTVMModule()
print("Converting...")
mod.from_pytorch(model_jit, input_shapes, dtype)
log_file = os.path.join(export_dir, "tuning.log")
if not os.path.exists(log_file):
print("Tuning...")
mod.tune_tvm(log_file=log_file, n_trial=20)
print("Building...")
tvm_mod = mod.build_tvm(export_dir)
pytorch_mod = mod.build_pytorch_module(num_inputs=2, num_outputs=1)
## Or you can load from a prebuilt tvm module
# mod = PyTorchTVMModule()
# tvm_mod = mod.load_tvm(export_dir)
# pytorch_mod = mod.build_pytorch_module(num_inputs=2, num_outputs=1, input_infos=input_shapes)
print("Run TVM...")
tvm_x = tvm.nd.array(x.cpu().numpy().astype(dtype), device=tvm.gpu(0))
tvm_y = tvm.nd.array(y.cpu().numpy().astype(dtype), device=tvm.gpu(0))
for i in range(20):
t = time.time()
tvm_mod.run(x=tvm_x, y=tvm_y)
print(1000 * (time.time() - t))
tvm_output = tvm_mod.get_output(0)
print(tvm_output.shape)
print("Run PyTorch...")
for i in range(20):
t = time.time()
outputs = pytorch_mod.forward([x, y])
torch.cuda.synchronize()
print(1000 * (time.time() - t))
print(outputs[0].shape)
class EnsembleModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer = torch.jit.script(pytorch_mod)
def forward(self, x, y, z) -> torch.Tensor:
if x > 1:
out = self.layer(y, z)[0]
else:
out = torch.ones([1280, 2464, 1])
return out
print("Exporting...")
scripted = torch.jit.script(EnsembleModel())
print(scripted.graph)
scripted_path = os.path.join(export_dir, "model_tvm.pt")
scripted.save(scripted_path)
# print(o == outputs[0])
# print(o - outputs[0])
| https://github.com/zk-ml/tachikoma |
apps/pt_tvmdsoop/tests/test_torch_vm_module.py | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test script for torch vm module"""
import tempfile
import os
import logging
import torch
import numpy as np
import tvm
from tvm.contrib.torch.pytorch_tvm import TVM_ASSETS
import tvm.testing
from tvm import te, relay
import tvm.contrib.torch
from tvm.contrib import graph_runtime
TVM_ASSETS = ["mod.so", "code.ro"]
def test_use_pt_vm_module():
"""main test function"""
def build_export_vm(device):
"""relay build & export graph"""
x = relay.var("x", shape=(10, 5))
y = relay.var("y", shape=(1, 5))
z = relay.add(x, y)
z = relay.exp(z)
func = relay.Function([x, y], z)
x_data = np.random.rand(10, 5).astype("float32")
y_data = np.random.rand(1, 5).astype("float32")
pt_device = torch.device(device)
if pt_device.type == "cuda":
target = "cuda"
ctx = tvm.cuda(pt_device.index)
else:
target = "llvm"
ctx = tvm.cpu(0)
exe = relay.vm.compile(tvm.IRModule.from_expr(func), target=target, params={})
code, lib = exe.save()
export_dir = tempfile.mkdtemp("tvm_export")
# export to tempdir
lib.export_library(os.path.join(export_dir, TVM_ASSETS[0]))
with open(os.path.join(export_dir, TVM_ASSETS[1]), "wb") as fout:
fout.write(code)
vm = tvm.runtime.vm.VirtualMachine(exe, ctx)
res = vm.run(x_data, y_data)
ref_res = np.exp(y_data + x_data)
tvm.testing.assert_allclose(res.numpy(), ref_res, atol=1e-5, rtol=1e-5)
return export_dir
def test_pt_run(device, trace=True, to_device=None, inp_on_cuda=False):
"""test add lib with Pytorch wrapper"""
print("\n############## Test on device:", device, "#################")
export_dir = build_export_vm(device)
engine = tvm.contrib.torch.VMModule(num_inputs=2, num_outputs=1).to(device)
x = np.random.rand(10, 5).astype("float32")
y = np.random.rand(1, 5).astype("float32")
expect = np.exp(y + x)
def get_inputs_by_device(device):
inps = [torch.Tensor(x), torch.Tensor(y)]
if device == "cpu":
return inps
else:
device_type, device_id = device.split(":")
assert device_type == "cuda"
return [inp.cuda(int(device_id)) for inp in inps]
assets = [os.path.join(export_dir, i) for i in TVM_ASSETS]
engine.init((x.shape, y.shape), *assets)
outputs = engine.forward(get_inputs_by_device(device))
tvm.testing.assert_allclose(outputs[0].cpu(), expect, atol=1e-5, rtol=1e-5)
if trace:
print("\n################ Test trace and load #################")
scripted = torch.jit.script(engine)
scripted_dir = tempfile.mkdtemp("scripted")
scripted_path = os.path.join(scripted_dir, "model.pt")
scripted.save(scripted_path)
loaded = torch.jit.load(scripted_path)
outputs = loaded.forward(get_inputs_by_device(device))
tvm.testing.assert_allclose(outputs[0].cpu(), expect, atol=1e-5, rtol=1e-5)
del scripted
del loaded
if to_device:
print(
"\n################ Test move from [{}] to [{}] #################".format(
device, to_device
)
)
engine = engine.to(to_device)
outputs = engine.forward(get_inputs_by_device(to_device))
tvm.testing.assert_allclose(outputs[0].cpu(), expect, atol=1e-5, rtol=1e-5)
del engine
test_pt_run(device="cuda:0", trace=True, to_device="cuda:1", inp_on_cuda=True)
test_pt_run(device="cpu", trace=True, inp_on_cuda=False)
if __name__ == "__main__":
test_use_pt_vm_module()
| https://github.com/zk-ml/tachikoma |
apps/pt_tvmdsoop/tests/test_trace_tvm_module.py | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test script for torch module"""
import torch
import time
import tvm
from tvm.contrib.torch import compile, TraceTvmModule, pytorch_tvm
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x: torch.Tensor, y: torch.Tensor):
return x * y
model = Model()
x = torch.rand([1, 2, 3])
y = torch.rand([1, 2, 3])
model_jit = torch.jit.script(model)
option = {
"input_infos": [("x", (1, 2, 3)), ("y", (1, 2, 3))],
"default_dtype": "float32",
"export_dir": "pytorch_compiled",
"num_outputs": 1,
"tuning_n_trials": 0, # set zero to skip tuning
"tuning_log_file": "tuning.log",
"target": "llvm",
"device": tvm.cpu(),
}
# use TraceTvmModule to convert List[Tensor] input/output
# to tuple of Tensors
pytorch_tvm_module = compile(model_jit, option)
scripted = torch.jit.script(pytorch_tvm_module)
traced = torch.jit.trace(TraceTvmModule(scripted), (x, y))
res_traced = traced.forward(x, y)
res_expected = pytorch_tvm_module.forward([x, y])[0]
tvm.testing.assert_allclose(res_traced, res_expected)
| https://github.com/zk-ml/tachikoma |
apps/sgx/build.rs | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
use std::process::Command;
macro_rules! mf_dir {
($p:literal) => {
concat!(env!("CARGO_MANIFEST_DIR"), $p)
};
}
fn main() {
let out_dir = std::env::var("OUT_DIR").unwrap();
let build_output = Command::new(mf_dir!("/src/build_model.py"))
.arg(&out_dir)
.env(
"PYTHONPATH",
concat!(
mf_dir!("/../../python"),
":",
mf_dir!("/../../nnvm/python")
),
)
.output()
.expect("Failed to build model");
assert!(
["model.o", "graph.json", "params.bin"]
.iter()
.all(|f| { std::path::Path::new(&format!("{}/{}", out_dir, f)).exists() }),
"Could not build tvm lib: STDOUT:\n\n{}\n\nSTDERR\n\n{}",
String::from_utf8(build_output.stdout).unwrap().trim(),
String::from_utf8(build_output.stderr).unwrap().trim()
);
let sysroot_output = Command::new("rustc")
.args(&["--print", "sysroot"])
.output()
.expect("Failed to get sysroot");
let sysroot = String::from_utf8(sysroot_output.stdout).unwrap();
let sysroot = sysroot.trim();
let mut llvm_tools_path = std::path::PathBuf::from(&sysroot);
llvm_tools_path.push("lib/rustlib/x86_64-unknown-linux-gnu/bin");
Command::new("rustup")
.args(&["component", "add", "llvm-tools-preview"])
.output()
.expect("failed to install llvm tools");
std::process::Command::new(llvm_tools_path.join("llvm-objcopy"))
.arg("--globalize-symbol=__tvm_module_startup")
.arg("--remove-section=.ctors")
.arg(&format!("{}/model.o", out_dir))
.output()
.expect("gould not gloablize startup function");
std::process::Command::new(llvm_tools_path.join("llvm-ar"))
.arg("rcs")
.arg(&format!("{}/libmodel.a", out_dir))
.arg(&format!("{}/model.o", out_dir))
.output()
.expect("failed to package model archive");
println!("cargo:rustc-link-lib=static=model");
println!("cargo:rustc-link-search=native={}", out_dir);
}
| https://github.com/zk-ml/tachikoma |
apps/sgx/read_results.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import struct
import sys
import numpy as np
def float_bytes(l):
for i in range(0, len(l), 4):
yield l[i : i + 4]
floats = [struct.unpack("f", f)[0] for f in float_bytes(sys.stdin.buffer.read())]
print(np.array(floats))
| https://github.com/zk-ml/tachikoma |
apps/sgx/src/build_model.py | #!/usr/bin/python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Creates a simple TVM modules."""
import os
from os import path as osp
import sys
from tvm import relay, runtime
from tvm.relay import testing
import tvm
from tvm import te
def main():
dshape = (1, 28, 28)
net, params = relay.testing.mlp.get_workload(batch_size=dshape[0], dtype="float32")
dshape = (1, 3, 224, 224)
net, params = relay.testing.resnet.get_workload(
layers=18, batch_size=dshape[0], image_shape=dshape[1:]
)
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(
net,
"llvm",
params=params,
runtime=tvm.relay.backend.Runtime("cpp", {"system-lib": True}),
)
build_dir = osp.abspath(sys.argv[1])
if not osp.isdir(build_dir):
os.makedirs(build_dir, exist_ok=True)
lib.save(osp.join(build_dir, "model.o"))
with open(osp.join(build_dir, "graph.json"), "w") as f_graph_json:
f_graph_json.write(graph)
with open(osp.join(build_dir, "params.bin"), "wb") as f_params:
f_params.write(runtime.save_param_dict(params))
if __name__ == "__main__":
main()
| https://github.com/zk-ml/tachikoma |
apps/sgx/src/main.rs | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
extern crate tvm_runtime;
use std::{
convert::TryFrom as _,
io::{Read as _, Write as _},
};
fn main() {
let syslib = tvm_runtime::SystemLibModule::default();
let graph_json = include_str!(concat!(env!("OUT_DIR"), "/graph.json"));
let params_bytes = include_bytes!(concat!(env!("OUT_DIR"), "/params.bin"));
let params = tvm_runtime::load_param_dict(params_bytes).unwrap();
let graph = tvm_runtime::Graph::try_from(graph_json).unwrap();
let mut exec = tvm_runtime::GraphExecutor::new(graph, &syslib).unwrap();
exec.load_params(params);
let listener = std::net::TcpListener::bind("127.0.0.1:4242").unwrap();
for stream in listener.incoming() {
let mut stream = stream.unwrap();
if let Err(_) =
stream.read_exact(exec.get_input("data").unwrap().data().view().as_mut_slice())
{
continue;
}
exec.run();
if let Err(_) = stream.write_all(exec.get_output(0).unwrap().data().as_slice()) {
continue;
}
}
}
| https://github.com/zk-ml/tachikoma |
apps/tf_tvmdsoop/tests/test_tfop_module.py | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test script for tf op module"""
import tempfile
import os
import logging
import tensorflow as tf
import numpy as np
import tvm
from tvm import te
from tvm.contrib import tf_op
def test_use_tvmdso_op():
"""main test function"""
def export_cpu_add_lib():
"""create cpu add op lib"""
n = te.var("n")
ph_a = te.placeholder((n,), name="ph_a")
ph_b = te.placeholder((n,), name="ph_b")
ph_c = te.compute(ph_a.shape, lambda i: ph_a[i] + ph_b[i], name="ph_c")
sched = te.create_schedule(ph_c.op)
fadd_dylib = tvm.build(sched, [ph_a, ph_b, ph_c], "c", name="vector_add")
lib_path = tempfile.mktemp("tvm_add_dll.so")
fadd_dylib.export_library(lib_path)
return lib_path
def export_gpu_add_lib():
"""create gpu add op lib"""
n = te.var("n")
ph_a = te.placeholder((n,), name="ph_a")
ph_b = te.placeholder((n,), name="ph_b")
ph_c = te.compute(ph_a.shape, lambda i: ph_a[i] + ph_b[i], name="ph_c")
sched = te.create_schedule(ph_c.op)
b_axis, t_axis = sched[ph_c].split(ph_c.op.axis[0], factor=64)
sched[ph_c].bind(b_axis, te.thread_axis("blockIdx.x"))
sched[ph_c].bind(t_axis, te.thread_axis("threadIdx.x"))
fadd_dylib = tvm.build(sched, [ph_a, ph_b, ph_c], "cuda", name="vector_add")
lib_path = tempfile.mktemp("tvm_add_cuda_dll.so")
fadd_dylib.export_library(lib_path)
return lib_path
def test_add(session, lib_path, tf_device):
"""test add lib with TensorFlow wrapper"""
module = tf_op.OpModule(lib_path)
left = tf.placeholder("float32", shape=[4])
right = tf.placeholder("float32", shape=[4])
feed_dict = {left: [1.0, 2.0, 3.0, 4.0], right: [5.0, 6.0, 7.0, 8.0]}
expect = np.asarray([6.0, 8.0, 10.0, 12.0])
add1 = module.func("vector_add", output_shape=[4], output_dtype="float")
add2 = module.func("vector_add", output_shape=tf.shape(left), output_dtype="float")
add3 = module.func("vector_add", output_shape=[tf.shape(left)[0]], output_dtype="float")
with tf.device(tf_device):
output1 = session.run(add1(left, right), feed_dict)
np.testing.assert_equal(output1, expect)
output2 = session.run(add2(left, right), feed_dict)
np.testing.assert_equal(output2, expect)
output3 = session.run(add3(left, right), feed_dict)
np.testing.assert_equal(output3, expect)
def cpu_test(session):
"""test function for cpu"""
cpu_lib = None
try:
cpu_lib = export_cpu_add_lib()
test_add(session, cpu_lib, "/cpu:0")
finally:
if cpu_lib is not None:
os.remove(cpu_lib)
def gpu_test(session):
"""test function for gpu"""
gpu_lib = None
try:
gpu_lib = export_gpu_add_lib()
test_add(session, gpu_lib, "/gpu:0")
finally:
if gpu_lib is not None:
os.remove(gpu_lib)
with tf.Session() as session:
if tvm.runtime.enabled("cpu"):
logging.info("Test TensorFlow op on cpu kernel")
cpu_test(session)
if tvm.runtime.enabled("gpu"):
logging.info("Test TensorFlow op on gpu kernel")
gpu_test(session)
if __name__ == "__main__":
test_use_tvmdso_op()
| https://github.com/zk-ml/tachikoma |
apps/topi_recipe/broadcast/test_broadcast_map.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tvm
from tvm import te
from tvm.contrib import nvcc
import numpy as np
from tvm import topi
TASK = "reduce_map"
USE_MANUAL_CODE = False
@tvm.register_func("tvm_callback_cuda_compile", override=True)
def tvm_callback_cuda_compile(code):
ptx = nvcc.compile_cuda(code, target_format="ptx")
return ptx
def write_code(code, fname):
with open(fname, "w") as f:
f.write(code)
@tvm.register_func
def tvm_callback_cuda_postproc(code):
if not os.path.exists("perf"):
os.mkdir("perf")
write_code(code, "perf/%s_generated.cu" % TASK)
if USE_MANUAL_CODE:
code = open("perf/%s_manual.cu" % TASK).read()
return code
def test_broadcast_to(in_shape, out_shape):
global TASK
TASK = (
"bcast_to_i"
+ "_".join([str(ele) for ele in in_shape])
+ "o"
+ "_".join([str(ele) for ele in out_shape])
)
# Build the logic and compile the function
A = te.placeholder(shape=in_shape, name="A")
B = topi.broadcast_to(A, out_shape)
s = topi.cuda.schedule_broadcast(B)
fcuda = tvm.build(s, [A, B], "cuda", name="broadcast_to")
data_npy = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = np.broadcast_to(data_npy, out_shape)
data_nd = tvm.nd.array(data_npy, tvm.cuda())
out_nd = tvm.nd.array(np.empty(out_shape).astype(B.dtype), tvm.cuda())
for _ in range(2):
fcuda(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
def test_broadcast_binary_op(lhs_shape, rhs_shape, typ="add"):
global TASK
TASK = (
"bcast_binary_"
+ typ
+ "_lhs"
+ "_".join([str(ele) for ele in lhs_shape])
+ "rhs"
+ "_".join([str(ele) for ele in rhs_shape])
)
A = te.placeholder(shape=lhs_shape, name="A")
B = te.placeholder(shape=rhs_shape, name="B")
if typ == "add":
C = topi.broadcast_add(A, B)
elif typ == "sub":
C = topi.broadcast_sub(A, B)
elif typ == "div":
C = topi.broadcast_div(A, B)
elif typ == "mul":
C = topi.broadcast_mul(A, B)
elif typ == "maximum":
C = topi.broadcast_maximum(A, B)
elif typ == "minimum":
C = topi.broadcast_minimum(A, B)
else:
raise NotImplementedError
s = topi.cuda.schedule_broadcast(C)
fcuda = tvm.build(s, [A, B, C], "cuda", name="broadcast_binary" + "_" + typ)
lhs_npy = np.random.uniform(size=lhs_shape).astype(A.dtype)
rhs_npy = np.random.uniform(size=rhs_shape).astype(A.dtype)
if typ == "add":
out_npy = lhs_npy + rhs_npy
elif typ == "sub":
out_npy = lhs_npy - rhs_npy
elif typ == "div":
rhs_npy = np.abs(rhs_npy) + 0.001
out_npy = lhs_npy / rhs_npy
elif typ == "mul":
out_npy = lhs_npy * rhs_npy
elif typ == "maximum":
out_npy = np.maximum(lhs_npy, rhs_npy)
elif typ == "minimum":
out_npy = np.minimum(lhs_npy, rhs_npy)
lhs_nd = tvm.nd.array(lhs_npy, tvm.cuda())
rhs_nd = tvm.nd.array(rhs_npy, tvm.cuda())
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(B.dtype), tvm.cuda())
for _ in range(2):
fcuda(lhs_nd, rhs_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
if __name__ == "__main__":
test_broadcast_to((1,), (10,))
test_broadcast_to((1, 1, 5, 4), (3, 4, 4, 4, 5, 4))
test_broadcast_to((1, 128, 1, 32), (64, 128, 64, 32))
test_broadcast_binary_op((5, 2, 3), (2, 1), typ="add")
test_broadcast_binary_op((5, 64, 128), (2, 5, 64, 1), typ="mul")
test_broadcast_binary_op((2, 3, 1, 32), (64, 32), typ="div")
test_broadcast_binary_op((1, 32), (64, 32), typ="sub")
test_broadcast_binary_op((32,), (64, 32), typ="maximum")
test_broadcast_binary_op((1, 2, 2, 1, 32), (64, 32), typ="minimum")
| https://github.com/zk-ml/tachikoma |
apps/topi_recipe/conv/depthwise_conv2d_test.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tvm
from tvm import te
import numpy as np
from scipy import signal
from tvm.contrib import nvcc
from tvm import topi
from tvm.topi.utils import get_const_tuple
from tvm.topi.cuda.depthwise_conv2d import (
schedule_depthwise_conv2d_nchw,
schedule_depthwise_conv2d_nhwc,
)
TASK = "depthwise_conv2d"
USE_MANUAL_CODE = False
@tvm.register_func("tvm_callback_cuda_compile", override=True)
def tvm_callback_cuda_compile(code):
ptx = nvcc.compile_cuda(code, target_format="ptx")
return ptx
def write_code(code, fname):
with open(fname, "w") as f:
f.write(code)
@tvm.register_func
def tvm_callback_cuda_postproc(code):
if not os.path.exists("perf"):
os.mkdir("perf")
write_code(code, "perf/%s_generated.cu" % TASK)
if USE_MANUAL_CODE:
code = open("perf/%s_manual.cu" % TASK).read()
return code
def test_depthwise_conv2d_nchw():
"""You may test different settings."""
batch = 1
in_channel = 256
in_height = 96
in_width = 96
filter_channel = in_channel
channel_multiplier = 1
filter_height = 3
filter_width = 3
stride_h = 1
stride_w = 1
padding = "SAME" # or 'VALID'
# Placeholder
Input = te.placeholder((batch, in_channel, in_height, in_width), name="Input")
Filter = te.placeholder(
(filter_channel, channel_multiplier, filter_height, filter_width), name="Filter"
)
Stride = [stride_h, stride_w]
Scale = te.placeholder((in_channel * channel_multiplier,), name="Scale")
Shift = te.placeholder((in_channel * channel_multiplier,), name="Shift")
# Declare
DepthwiseConv2d = topi.nn.depthwise_conv2d_nchw(Input, Filter, Stride, padding)
ScaleShift = topi.nn.scale_shift_nchw(DepthwiseConv2d, Scale, Shift)
Relu = topi.nn.relu(ScaleShift)
# Schedule
s1 = schedule_depthwise_conv2d_nchw(DepthwiseConv2d)
s2 = schedule_depthwise_conv2d_nchw(ScaleShift)
s3 = schedule_depthwise_conv2d_nchw(Relu)
input_np = np.random.uniform(size=get_const_tuple(Input.shape)).astype(Input.dtype)
filter_np = np.random.uniform(size=get_const_tuple(Filter.shape)).astype(Filter.dtype)
scale_np = np.random.uniform(size=(in_channel * channel_multiplier)).astype(Scale.dtype)
shift_np = np.random.uniform(size=(in_channel * channel_multiplier)).astype(Shift.dtype)
def check_device(device):
if not tvm.runtime.enabled(device):
print("Skip because %s is not enabled" % device)
return
dev = tvm.device(device, 0)
# Build the kernel
f1 = tvm.build(s1, [Input, Filter, DepthwiseConv2d], device)
f2 = tvm.build(s2, [Input, Filter, Scale, Shift, ScaleShift], device)
f3 = tvm.build(s3, [Input, Filter, Scale, Shift, Relu], device)
# Prepare data
input_tvm = tvm.nd.array(input_np, dev)
filter_tvm = tvm.nd.array(filter_np, dev)
scale_tvm = tvm.nd.array(scale_np, dev)
shift_tvm = tvm.nd.array(shift_np, dev)
depthwise_conv2d_tvm = tvm.nd.array(
np.zeros(shape=get_const_tuple(DepthwiseConv2d.shape), dtype=DepthwiseConv2d.dtype), dev
)
scale_shift_tvm = tvm.nd.array(
np.zeros(shape=get_const_tuple(ScaleShift.shape), dtype=ScaleShift.dtype), dev
)
relu_tvm = tvm.nd.array(np.zeros(shape=get_const_tuple(Relu.shape), dtype=Relu.dtype), dev)
# Measure time cost of kernel 1 (depthwise_conv2d)
timer_1 = f1.time_evaluator(f1.entry_name, dev, number=1000)
tcost_1 = timer_1(input_tvm, filter_tvm, depthwise_conv2d_tvm).mean
# Measure time cost of kernel 2 (depthwise_conv2d + scale_shift)
timer_2 = f2.time_evaluator(f2.entry_name, dev, number=1000)
tcost_2 = timer_2(input_tvm, filter_tvm, scale_tvm, shift_tvm, scale_shift_tvm).mean
# Measure time cost of kernel 3 (depthwise_conv2d + scale_shift + relu)
timer_3 = f3.time_evaluator(f3.entry_name, dev, number=1000)
tcost_3 = timer_3(input_tvm, filter_tvm, scale_tvm, shift_tvm, relu_tvm).mean
print("Input shape = " + str(get_const_tuple(Input.shape)))
print("Filter shape = " + str(get_const_tuple(Filter.shape)))
print("Stride = (%d, %d)" % (stride_h, stride_w))
print("padding = %s\n" % padding)
print("Output shape = " + str(get_const_tuple(DepthwiseConv2d.shape)))
print("average time cost of 1000 runs (depthwise_conv2d) = %g us" % (tcost_1 * 1e6))
print(
"average time cost of 1000 runs (depthwise_conv2d + scale_shift) = %g us"
% (tcost_2 * 1e6)
)
print(
"average time cost of 1000 runs (depthwise_conv2d + scale_shift + relu) = %g us"
% (tcost_3 * 1e6)
)
# correctness
depthwise_conv2d_scipy = tvm.topi.testing.depthwise_conv2d_python_nchw(
input_np, filter_np, stride=[stride_h, stride_w], padding=padding
)
scale_shift_scipy = np.zeros(shape=get_const_tuple(ScaleShift.shape))
for c in range(in_channel * channel_multiplier):
scale_shift_scipy[:, c, :, :] = (
depthwise_conv2d_scipy[:, c, :, :] * scale_np[c] + shift_np[c]
)
relu_scipy = np.maximum(scale_shift_scipy, 0)
tvm.testing.assert_allclose(depthwise_conv2d_tvm.numpy(), depthwise_conv2d_scipy, rtol=1e-5)
tvm.testing.assert_allclose(scale_shift_tvm.numpy(), scale_shift_scipy, rtol=1e-5)
tvm.testing.assert_allclose(relu_tvm.numpy(), relu_scipy, rtol=1e-5)
print("success")
for device in ["cuda", "opencl", "rocm"]:
with tvm.transform.PassContext(
config={"tir.UnrollLoop": {"auto_max_step": 128, "explicit_unroll": device != "rocm"}}
):
check_device(device)
def test_depthwise_conv2d_nhwc():
"""You may test different settings."""
batch = 1
in_channel = 256
in_height = 96
in_width = 96
filter_channel = in_channel
channel_multiplier = 1
filter_height = 3
filter_width = 3
stride_h = 1
stride_w = 1
padding = "SAME" # or 'VALID'
# Placeholder
Input = te.placeholder((batch, in_height, in_width, in_channel), name="Input")
Filter = te.placeholder(
(filter_height, filter_width, filter_channel, channel_multiplier), name="Filter"
)
Stride = [stride_h, stride_w]
Scale = te.placeholder((in_channel * channel_multiplier,), name="Scale")
Shift = te.placeholder((in_channel * channel_multiplier,), name="Shift")
# Declare
DepthwiseConv2d = topi.nn.depthwise_conv2d_nhwc(Input, Filter, Stride, padding)
ScaleShift = topi.nn.scale_shift_nhwc(DepthwiseConv2d, Scale, Shift)
Relu = topi.nn.relu(ScaleShift)
# Schedule
s1 = schedule_depthwise_conv2d_nhwc(DepthwiseConv2d)
s2 = schedule_depthwise_conv2d_nhwc(ScaleShift)
s3 = schedule_depthwise_conv2d_nhwc(Relu)
input_np = np.random.uniform(size=get_const_tuple(Input.shape)).astype(Input.dtype)
filter_np = np.random.uniform(size=get_const_tuple(Filter.shape)).astype(Filter.dtype)
scale_np = np.random.uniform(size=(in_channel * channel_multiplier)).astype(Scale.dtype)
shift_np = np.random.uniform(size=(in_channel * channel_multiplier)).astype(Shift.dtype)
def check_device(device):
if not tvm.runtime.enabled(device):
print("Skip because %s is not enabled" % device)
return
dev = tvm.device(device, 0)
# Build the kernel
f1 = tvm.build(s1, [Input, Filter, DepthwiseConv2d], device)
f2 = tvm.build(s2, [Input, Filter, Scale, Shift, ScaleShift], device)
f3 = tvm.build(s3, [Input, Filter, Scale, Shift, Relu], device)
# Prepare data
input_tvm = tvm.nd.array(input_np, dev)
filter_tvm = tvm.nd.array(filter_np, dev)
scale_tvm = tvm.nd.array(scale_np, dev)
shift_tvm = tvm.nd.array(shift_np, dev)
depthwise_conv2d_tvm = tvm.nd.array(
np.zeros(shape=get_const_tuple(DepthwiseConv2d.shape), dtype=DepthwiseConv2d.dtype), dev
)
scale_shift_tvm = tvm.nd.array(
np.zeros(shape=get_const_tuple(ScaleShift.shape), dtype=ScaleShift.dtype), dev
)
relu_tvm = tvm.nd.array(np.zeros(shape=get_const_tuple(Relu.shape), dtype=Relu.dtype), dev)
# Measure time cost of kernel 1 (depthwise_conv2d)
timer_1 = f1.time_evaluator(f1.entry_name, dev, number=1000)
tcost_1 = timer_1(input_tvm, filter_tvm, depthwise_conv2d_tvm).mean
# Measure time cost of kernel 2 (depthwise_conv2d + scale_shift)
timer_2 = f2.time_evaluator(f2.entry_name, dev, number=1000)
tcost_2 = timer_2(input_tvm, filter_tvm, scale_tvm, shift_tvm, scale_shift_tvm).mean
# Measure time cost of kernel 3 (depthwise_conv2d + scale_shift + relu)
timer_3 = f3.time_evaluator(f3.entry_name, dev, number=1000)
tcost_3 = timer_3(input_tvm, filter_tvm, scale_tvm, shift_tvm, relu_tvm).mean
print("Input shape = " + str(get_const_tuple(Input.shape)))
print("Filter shape = " + str(get_const_tuple(Filter.shape)))
print("Stride = (%d, %d)" % (stride_h, stride_w))
print("padding = %s\n" % padding)
print("Output shape = " + str(get_const_tuple(DepthwiseConv2d.shape)))
print("average time cost of 1000 runs (depthwise_conv2d) = %g us" % (tcost_1 * 1e6))
print(
"average time cost of 1000 runs (depthwise_conv2d + scale_shift) = %g us"
% (tcost_2 * 1e6)
)
print(
"average time cost of 1000 runs (depthwise_conv2d + scale_shift + relu) = %g us"
% (tcost_3 * 1e6)
)
# correctness
depthwise_conv2d_scipy = tvm.topi.testing.depthwise_conv2d_python_nhwc(
input_np, filter_np, stride=[stride_h, stride_w], padding=padding
)
scale_shift_scipy = np.zeros(shape=get_const_tuple(ScaleShift.shape))
for c in range(in_channel * channel_multiplier):
scale_shift_scipy[:, :, :, c] = (
depthwise_conv2d_scipy[:, :, :, c] * scale_np[c] + shift_np[c]
)
relu_scipy = np.maximum(scale_shift_scipy, 0)
tvm.testing.assert_allclose(depthwise_conv2d_tvm.numpy(), depthwise_conv2d_scipy, rtol=1e-5)
tvm.testing.assert_allclose(scale_shift_tvm.numpy(), scale_shift_scipy, rtol=1e-5)
tvm.testing.assert_allclose(relu_tvm.numpy(), relu_scipy, rtol=1e-5)
print("success")
for device in ["cuda", "opencl", "rocm"]:
with tvm.transform.PassContext(
config={"tir.UnrollLoop": {"auto_max_step": 128, "explicit_unroll": device != "cuda"}}
):
check_device(device)
if __name__ == "__main__":
test_depthwise_conv2d_nchw()
test_depthwise_conv2d_nhwc()
| https://github.com/zk-ml/tachikoma |
apps/topi_recipe/conv/test_conv2d_hwcn_map.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example code to do convolution."""
import os
import numpy as np
import scipy.signal
import tvm
from tvm import te
from tvm.contrib import nvcc
from tvm import topi
from tvm.topi.utils import get_const_tuple
TASK = "conv2d_hwcn_map"
USE_MANUAL_CODE = False
@tvm.register_func("tvm_callback_cuda_compile", override=True)
def tvm_callback_cuda_compile(code):
ptx = nvcc.compile_cuda(code, target_format="ptx")
return ptx
def write_code(code, fname):
with open(fname, "w") as f:
f.write(code)
@tvm.register_func
def tvm_callback_cuda_postproc(code):
if not os.path.exists("perf"):
os.mkdir("perf")
write_code(code, "perf/%s_generated.cu" % TASK)
if USE_MANUAL_CODE:
code = open("perf/%s_manual.cu" % TASK).read()
return code
def test_conv2d_hwcn_map():
batch = 64
in_channel = 128
in_height = 16
in_width = 16
num_filter = 128
kernel = 3
stride = 2
padding = "SAME"
A = te.placeholder((in_height, in_width, in_channel, batch), name="A")
W = te.placeholder((kernel, kernel, in_channel, num_filter), name="W")
B = topi.nn.conv2d_hwcn(A, W, stride, padding)
C = topi.nn.relu(B)
s1 = topi.cuda.schedule_conv2d_hwcn([B])
s2 = topi.cuda.schedule_conv2d_hwcn([C])
a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
w_np = np.random.uniform(size=get_const_tuple(W.shape)).astype(W.dtype)
b_np = tvm.topi.testing.conv2d_hwcn_python(a_np, w_np, stride, padding)
c_np = np.maximum(b_np, 0)
def check_device(device):
if not tvm.runtime.enabled(device):
print("Skip because %s is not enabled" % device)
return
dev = tvm.device(device, 0)
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
with tvm.transform.PassContext(
config={
"tir.UrollLoop": {"auto_unroll_max_step": 128, "explicit_unroll": device == "rocm"}
}
):
func1 = tvm.build(s1, [A, W, B], device)
func1(a, w, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
func2 = tvm.build(s2, [A, W, C], device)
func2(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
for device in ["cuda", "opencl", "rocm"]:
check_device(device)
if __name__ == "__main__":
test_conv2d_hwcn_map()
| https://github.com/zk-ml/tachikoma |
apps/topi_recipe/conv/test_conv_int8_arm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable-msg=too-many-arguments, too-many-locals, assignment-from-no-return
""" Conv Int8 functional and performance testing"""
import sys
import logging
import numpy as np
import tvm
from tvm import te
from tvm import topi
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
LOGGER = logging.getLogger("test_conv_int8_intel")
LOGGER.disabled = False
# All the WORKLOADS from Resnet except first layer
# Workload is ['height', 'width', 'in_filter', 'out_filter',
# 'hkernel', 'wkernel', 'hpad', 'wpad', 'hstride', 'wstride'])
WORKLOADS = [
(56, 56, 64, 64, 3, 3, 1, 1, 1, 1),
(56, 56, 64, 64, 1, 1, 0, 0, 1, 1),
(56, 56, 64, 128, 3, 3, 1, 1, 2, 2),
(56, 56, 64, 128, 1, 1, 0, 0, 2, 2),
(28, 28, 128, 128, 3, 3, 1, 1, 1, 1),
(28, 28, 128, 256, 3, 3, 1, 1, 2, 2),
(28, 28, 128, 256, 1, 1, 0, 0, 2, 2),
(14, 14, 256, 256, 3, 3, 1, 1, 1, 1),
(14, 14, 256, 512, 3, 3, 1, 1, 2, 2),
(14, 14, 256, 512, 1, 1, 0, 0, 2, 2),
(7, 7, 512, 512, 3, 3, 1, 1, 1, 1),
(56, 56, 64, 256, 1, 1, 0, 0, 1, 1),
(56, 56, 256, 64, 1, 1, 0, 0, 1, 1),
(56, 56, 256, 128, 1, 1, 0, 0, 2, 2),
(28, 28, 128, 512, 1, 1, 0, 0, 1, 1),
(56, 56, 256, 512, 1, 1, 0, 0, 2, 2),
(28, 28, 512, 128, 1, 1, 0, 0, 1, 1),
(28, 28, 512, 256, 1, 1, 0, 0, 2, 2),
(14, 14, 256, 1024, 1, 1, 0, 0, 1, 1),
(28, 28, 512, 1024, 1, 1, 0, 0, 2, 2),
(14, 14, 1024, 256, 1, 1, 0, 0, 1, 1),
(14, 14, 1024, 512, 1, 1, 0, 0, 2, 2),
(7, 7, 512, 2048, 1, 1, 0, 0, 1, 1),
(14, 14, 1024, 2048, 1, 1, 0, 0, 2, 2),
(7, 7, 2048, 512, 1, 1, 0, 0, 1, 1),
]
TARGET_NAME = "llvm -device=arm_cpu -mtriple=aarch64-linux-gnu -mattr=+v8.2a,+dotprod"
NUM_VEC_LANES = 16
DEV = tvm.device(TARGET_NAME, 0)
def get_shape(
im_height, im_width, in_filter, out_filter, k_h, k_w, hpad, wpad, hstride, wstride, out_dtype
):
"""
Finds out the shape of all data structures
"""
data_shape = (1, in_filter // NUM_VEC_LANES, im_height, im_width, NUM_VEC_LANES)
if out_dtype == "int32" or out_dtype == "uint32":
kernel_shape = (
out_filter // NUM_VEC_LANES,
in_filter // NUM_VEC_LANES,
k_h,
k_w,
NUM_VEC_LANES // 4,
NUM_VEC_LANES,
4,
)
elif out_dtype == "float32":
kernel_shape = (
out_filter // NUM_VEC_LANES,
in_filter // NUM_VEC_LANES,
k_h,
k_w,
NUM_VEC_LANES,
NUM_VEC_LANES,
)
out_height = (im_height + 2 * hpad - k_h) // hstride + 1
out_width = (im_width + 2 * wpad - k_w) // wstride + 1
o_shape = (1, out_filter // NUM_VEC_LANES, out_height, out_width, NUM_VEC_LANES)
return (data_shape, kernel_shape, o_shape)
def run_inference(
data_dtype,
kernel_dtype,
out_dtype,
im_height,
im_width,
in_filter,
out_filter,
k_h,
k_w,
hpad,
wpad,
hstride,
wstride,
):
"""
Runs the inference and checks the functional correctness between
compute and schedule outputs
"""
(data_shape, kernel_shape, o_shape) = get_shape(
im_height,
im_width,
in_filter,
out_filter,
k_h,
k_w,
hpad,
wpad,
hstride,
wstride,
out_dtype,
)
# Create TVM placeholders
data = te.placeholder(data_shape, name="data", dtype=data_dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=kernel_dtype)
# Create the numpy arrays to be used for executing conv models
if data_dtype == "float32":
data_array = tvm.nd.array(np.random.rand(*data_shape).astype(dtype=data_dtype), DEV)
kernel_array = tvm.nd.array(np.random.rand(*kernel_shape).astype(dtype=kernel_dtype), DEV)
else:
data_array = tvm.nd.array(np.random.randint(100, size=data_shape).astype(data_dtype))
kernel_array = tvm.nd.array(np.random.randint(100, size=kernel_shape).astype(kernel_dtype))
# c_orig will be used for declaration ouptut
# c_sch will be used for scheduled computation output
c_orig = tvm.nd.array(np.zeros(o_shape, dtype=out_dtype), DEV)
c_sch = tvm.nd.array(np.zeros(o_shape, dtype=out_dtype), DEV)
with tvm.target.Target(TARGET_NAME):
if out_dtype == "float32":
conv = topi.nn.conv2d_NCHWc(
data,
kernel,
stride=hstride,
padding=hpad,
dilation=(1, 1),
layout="NCHWc",
out_layout="NCHWc",
out_dtype=out_dtype,
)
else:
conv = topi.nn.conv2d_NCHWc_int8(
data,
kernel,
strides=hstride,
padding=hpad,
dilation=(1, 1),
layout="NCHWc",
out_layout="NCHWc",
out_dtype=out_dtype,
)
out = topi.nn.relu(conv)
sch = te.create_schedule(out.op)
func = tvm.build(sch, [data, kernel, out], target=TARGET_NAME, name="out")
func(data_array, kernel_array, c_orig)
LOGGER.debug(tvm.lower(sch, [data, kernel], simple_mode=True))
# Generate and run the optimized schedule
if out_dtype == "float32":
sconv = topi.generic.nn.schedule_conv2d_NCHWc(outs=[out])
else:
sconv = topi.generic.nn.schedule_conv2d_NCHWc_int8(outs=[out])
func = tvm.build(sconv, [data, kernel, out], target=TARGET_NAME, name="conv")
func(data_array, kernel_array, c_sch)
# Functional check
if data_dtype == "uint8":
np.testing.assert_equal(c_orig.numpy(), c_sch.numpy())
else:
assert np.allclose(c_orig.numpy(), c_sch.numpy())
evaluator = func.time_evaluator(func.entry_name, DEV, number=1000)
LOGGER.debug(tvm.lower(sconv, [data, kernel], simple_mode=True))
return evaluator(data_array, kernel_array, c_sch).mean
if __name__ == "__main__":
LOGGER.info("Workload, Kernel_size, FP32_time, INT8_time, Speedup")
SPEEDUP_ARRAY = []
for i, wkl in enumerate(WORKLOADS):
for dtype in ["uint", "int"]:
fp32_time = run_inference("float32", "float32", "float32", *wkl)
int8_time = run_inference("%s8" % dtype, "%s8" % dtype, "%s32" % dtype, *wkl)
kernel_h = wkl[4]
kernel_w = wkl[5]
LOGGER.info(
"[%s] Workload#" % dtype
+ str(i)
+ ", "
+ str(kernel_h)
+ "x"
+ str(kernel_w)
+ ", "
+ str(fp32_time)
+ ", "
+ str(int8_time)
+ ", "
+ str(fp32_time / int8_time)
)
SPEEDUP_ARRAY.append(fp32_time / int8_time)
LOGGER.info("Average speedup --> %s" % str(sum(SPEEDUP_ARRAY) / float(len(SPEEDUP_ARRAY))))
| https://github.com/zk-ml/tachikoma |
apps/topi_recipe/conv/test_conv_int8_intel.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable-msg=too-many-arguments, too-many-locals, assignment-from-no-return
""" Conv Int8 functional and performance testing"""
import sys
import logging
import numpy as np
import tvm
from tvm import te
from tvm import topi
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
LOGGER = logging.getLogger("test_conv_int8_intel")
LOGGER.disabled = False
# All the WORKLOADS from Resnet except first layer
# Workload is ['height', 'width', 'in_filter', 'out_filter',
# 'hkernel', 'wkernel', 'hpad', 'wpad', 'hstride', 'wstride'])
WORKLOADS = [
(56, 56, 64, 64, 3, 3, 1, 1, 1, 1),
(56, 56, 64, 64, 1, 1, 0, 0, 1, 1),
(56, 56, 64, 128, 3, 3, 1, 1, 2, 2),
(56, 56, 64, 128, 1, 1, 0, 0, 2, 2),
(28, 28, 128, 128, 3, 3, 1, 1, 1, 1),
(28, 28, 128, 256, 3, 3, 1, 1, 2, 2),
(28, 28, 128, 256, 1, 1, 0, 0, 2, 2),
(14, 14, 256, 256, 3, 3, 1, 1, 1, 1),
(14, 14, 256, 512, 3, 3, 1, 1, 2, 2),
(14, 14, 256, 512, 1, 1, 0, 0, 2, 2),
(7, 7, 512, 512, 3, 3, 1, 1, 1, 1),
(56, 56, 64, 256, 1, 1, 0, 0, 1, 1),
(56, 56, 256, 64, 1, 1, 0, 0, 1, 1),
(56, 56, 256, 128, 1, 1, 0, 0, 2, 2),
(28, 28, 128, 512, 1, 1, 0, 0, 1, 1),
(56, 56, 256, 512, 1, 1, 0, 0, 2, 2),
(28, 28, 512, 128, 1, 1, 0, 0, 1, 1),
(28, 28, 512, 256, 1, 1, 0, 0, 2, 2),
(14, 14, 256, 1024, 1, 1, 0, 0, 1, 1),
(28, 28, 512, 1024, 1, 1, 0, 0, 2, 2),
(14, 14, 1024, 256, 1, 1, 0, 0, 1, 1),
(14, 14, 1024, 512, 1, 1, 0, 0, 2, 2),
(7, 7, 512, 2048, 1, 1, 0, 0, 1, 1),
(14, 14, 1024, 2048, 1, 1, 0, 0, 2, 2),
(7, 7, 2048, 512, 1, 1, 0, 0, 1, 1),
]
TARGET_NAME = "llvm -mcpu=skylake-avx512"
NUM_VEC_LANES = 16
DEV = tvm.device(TARGET_NAME, 0)
def get_shape(
im_height, im_width, in_filter, out_filter, k_h, k_w, hpad, wpad, hstride, wstride, out_dtype
):
"""
Finds out the shape of all data structures
"""
## Find shapes
data_shape = (1, in_filter // NUM_VEC_LANES, im_height, im_width, NUM_VEC_LANES)
if out_dtype == "int32":
kernel_shape = (
out_filter // NUM_VEC_LANES,
in_filter // NUM_VEC_LANES,
k_h,
k_w,
NUM_VEC_LANES // 4,
NUM_VEC_LANES,
4,
)
elif out_dtype == "float32":
kernel_shape = (
out_filter // NUM_VEC_LANES,
in_filter // NUM_VEC_LANES,
k_h,
k_w,
NUM_VEC_LANES,
NUM_VEC_LANES,
)
out_height = (im_height + 2 * hpad - k_h) // hstride + 1
out_width = (im_width + 2 * wpad - k_w) // wstride + 1
o_shape = (1, out_filter // NUM_VEC_LANES, out_height, out_width, NUM_VEC_LANES)
return (data_shape, kernel_shape, o_shape)
def run_inference(
data_dtype,
kernel_dtype,
out_dtype,
im_height,
im_width,
in_filter,
out_filter,
k_h,
k_w,
hpad,
wpad,
hstride,
wstride,
):
"""
Runs the inference and checks the functional correctness between
compute and schedule outputs
"""
(data_shape, kernel_shape, o_shape) = get_shape(
im_height,
im_width,
in_filter,
out_filter,
k_h,
k_w,
hpad,
wpad,
hstride,
wstride,
out_dtype,
)
# Create TVM placeholders
data = te.placeholder(data_shape, name="data", dtype=data_dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=kernel_dtype)
# Create the numpy arrays to be used for executing conv models
if data_dtype == "float32":
data_array = tvm.nd.array(np.random.rand(*data_shape).astype(dtype=data_dtype), DEV)
kernel_array = tvm.nd.array(np.random.rand(*kernel_shape).astype(dtype=kernel_dtype), DEV)
else:
data_array = tvm.nd.array(np.random.randint(100, size=data_shape).astype(data_dtype))
kernel_array = tvm.nd.array(np.random.randint(100, size=kernel_shape).astype(kernel_dtype))
# c_orig will be used for declaration ouptut
# c_sch will be used for scheduled computation output
c_orig = tvm.nd.array(np.zeros(o_shape, dtype=out_dtype), DEV)
c_sch = tvm.nd.array(np.zeros(o_shape, dtype=out_dtype), DEV)
with tvm.target.Target(TARGET_NAME):
conv = topi.nn.conv2d_NCHWc(
data,
kernel,
stride=hstride,
padding=hpad,
dilation=(1, 1),
layout="NCHWc",
out_layout="NCHWc",
out_dtype=out_dtype,
)
out = topi.nn.relu(conv)
sch = te.create_schedule(out.op)
func = tvm.build(sch, [data, kernel, out], target=TARGET_NAME, name="out")
func(data_array, kernel_array, c_orig)
LOGGER.debug(tvm.lower(sch, [data, kernel], simple_mode=True))
# Generate and run the optimized schedule
sconv = topi.generic.nn.schedule_conv2d_NCHWc(outs=[out])
func = tvm.build(sconv, [data, kernel, out], target=TARGET_NAME, name="conv")
func(data_array, kernel_array, c_sch)
# Functional check
if data_dtype == "uint8":
np.testing.assert_equal(c_orig.numpy(), c_sch.numpy())
else:
assert np.allclose(c_orig.numpy(), c_sch.numpy())
evaluator = func.time_evaluator(func.entry_name, DEV, number=1000)
LOGGER.debug(tvm.lower(sconv, [data, kernel], simple_mode=True))
return evaluator(data_array, kernel_array, c_sch).mean
if __name__ == "__main__":
LOGGER.info("Workload, Kernel_size, FP32_time, INT8_time, Speedup")
SPEEDUP_ARRAY = []
for i, wkl in enumerate(WORKLOADS):
fp32_time = run_inference("float32", "float32", "float32", *wkl)
int8_time = run_inference("uint8", "int8", "int32", *wkl)
kernel_h = wkl[4]
kernel_w = wkl[5]
LOGGER.info(
"Workload#"
+ str(i)
+ ", "
+ str(kernel_h)
+ "x"
+ str(kernel_w)
+ ", "
+ str(fp32_time)
+ ", "
+ str(int8_time)
+ ", "
+ str(fp32_time / int8_time)
)
SPEEDUP_ARRAY.append(fp32_time / int8_time)
LOGGER.info("Average speedup --> %s" % str(sum(SPEEDUP_ARRAY) / float(len(SPEEDUP_ARRAY))))
| https://github.com/zk-ml/tachikoma |
apps/topi_recipe/gemm/android_gemm_square.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example code to do square matrix multiplication on Android Phone."""
import tvm
from tvm import te
import os
from tvm import rpc
from tvm.contrib import utils, ndk
import numpy as np
# Set to be address of tvm proxy.
proxy_host = os.environ["TVM_ANDROID_RPC_PROXY_HOST"]
proxy_port = 9090
key = "android"
# Change target configuration.
# Run `adb shell cat /proc/cpuinfo` to find the arch.
arch = "arm64"
target = "llvm -mtriple=%s-linux-android" % arch
def ngflops(N):
return 2.0 * float(N * N * N) / (10**9)
dtype = "float32"
def evaluate(func, dev, N, times):
a_np = np.random.uniform(size=(N, N)).astype(dtype)
b_np = np.random.uniform(size=(N, N)).astype(dtype)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros((N, N), dtype=dtype), dev)
time_f = func.time_evaluator(func.entry_name, dev, number=times)
cost = time_f(a, b, c).mean
gf = ngflops(N) / cost
print("%g secs/op, %g GFLOPS" % (cost, gf))
np.testing.assert_almost_equal(c.numpy(), a_np.dot(b_np), decimal=2)
def test_gemm_gpu(N, times, bn, num_block, num_thread):
assert bn <= N
assert num_thread * num_thread * 16 <= N
assert num_block * num_block * 2 <= N
A = te.placeholder((N, N), name="A")
B = te.placeholder((N, N), name="Btmp")
k = te.reduce_axis((0, N), name="k")
packedB = te.compute((N, N / bn, bn), lambda x, y, z: B[x, y * bn + z], name="B")
C = te.compute(
(N, N), lambda ii, jj: te.sum(A[ii, k] * packedB[k, jj / bn, jj % bn], axis=k), name="C"
)
s = te.create_schedule(C.op)
CC = s.cache_write(C, "local")
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
thread_xz = te.thread_axis((0, 2), "vthread", name="vx")
thread_yz = te.thread_axis((0, 2), "vthread", name="vy")
pby, pbi = s[packedB].split(packedB.op.axis[0], nparts=num_thread)
pbx, pbj = s[packedB].split(packedB.op.axis[1], nparts=num_thread)
s[packedB].bind(pby, thread_y)
s[packedB].bind(pbx, thread_x)
pbz, pbk = s[packedB].split(packedB.op.axis[2], factor=8)
s[packedB].vectorize(pbk)
by, yi = s[C].split(C.op.axis[0], nparts=num_block)
bx, xi = s[C].split(C.op.axis[1], nparts=num_thread)
s[C].bind(by, block_y)
s[C].bind(bx, thread_y)
s[C].reorder(by, bx, yi, xi)
tyz, yi = s[C].split(yi, nparts=2)
ty, yi = s[C].split(yi, nparts=num_block)
txz, xi = s[C].split(xi, nparts=2)
tx, xi = s[C].split(xi, nparts=num_thread)
s[C].reorder(tyz, txz, ty, tx, yi, xi)
s[C].bind(tyz, thread_yz)
s[C].bind(txz, thread_xz)
s[C].bind(ty, block_x)
s[C].bind(tx, thread_x)
xyi, xxi = s[C].split(xi, factor=8)
s[C].reorder(tyz, txz, ty, tx, yi, xyi, xxi)
s[C].vectorize(xxi)
s[CC].compute_at(s[C], yi)
yo, xo = CC.op.axis
s[CC].reorder(k, yo, xo)
xo, xi = s[CC].split(xo, factor=8)
s[CC].vectorize(xi)
ko, ki = s[CC].split(k, factor=2)
s[CC].unroll(ki)
print(tvm.lower(s, [A, B, C], simple_mode=True))
f = tvm.build(s, [A, B, C], tvm.target.Target("opencl", host=target), name="gemm_gpu")
temp = utils.tempdir()
path_dso = temp.relpath("gemm_gpu.so")
f.export_library(path_dso, ndk.create_shared)
# connect to the proxy
remote = rpc.connect(proxy_host, proxy_port, key=key)
dev = remote.cl(0)
remote.upload(path_dso)
f = remote.load_module("gemm_gpu.so")
evaluate(f, dev, N, times)
if __name__ == "__main__":
test_gemm_gpu(1024, times=5, bn=8, num_block=2, num_thread=8)
| https://github.com/zk-ml/tachikoma |
apps/topi_recipe/gemm/cuda_gemm_square.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example code to do square matrix multiplication."""
import tvm
from tvm import te
import os
from tvm.contrib import nvcc
from tvm.contrib import spirv
import numpy as np
import tvm.testing
TASK = "gemm"
USE_MANUAL_CODE = False
def test_gemm():
# graph
nn = 2048
n = te.var("n")
n = tvm.runtime.convert(nn)
m, l = n, n
A = te.placeholder((l, n), name="A")
B = te.placeholder((l, m), name="B")
k = te.reduce_axis((0, l), name="k")
C = te.compute((m, n), lambda ii, jj: te.sum(A[k, jj] * B[k, ii], axis=k), name="C")
# schedule
s = te.create_schedule(C.op)
AA = s.cache_read(A, "shared", [C])
BB = s.cache_read(B, "shared", [C])
AL = s.cache_read(AA, "local", [C])
BL = s.cache_read(BB, "local", [C])
CC = s.cache_write(C, "local")
scale = 8
num_thread = 8
block_factor = scale * num_thread
block_x = te.thread_axis("blockIdx.x")
thread_x = te.thread_axis((0, num_thread), "threadIdx.x")
block_y = te.thread_axis("blockIdx.y")
thread_y = te.thread_axis((0, num_thread), "threadIdx.y")
thread_xz = te.thread_axis((0, 2), "vthread", name="vx")
thread_yz = te.thread_axis((0, 2), "vthread", name="vy")
by, yi = s[C].split(C.op.axis[0], factor=block_factor)
bx, xi = s[C].split(C.op.axis[1], factor=block_factor)
s[C].bind(by, block_y)
s[C].bind(bx, block_x)
s[C].reorder(by, bx, yi, xi)
tyz, yi = s[C].split(yi, nparts=2)
ty, yi = s[C].split(yi, nparts=num_thread)
txz, xi = s[C].split(xi, nparts=2)
tx, xi = s[C].split(xi, nparts=num_thread)
s[C].bind(tyz, thread_yz)
s[C].bind(txz, thread_xz)
s[C].bind(ty, thread_y)
s[C].bind(tx, thread_x)
s[C].reorder(tyz, txz, ty, tx, yi, xi)
s[CC].compute_at(s[C], tx)
yo, xo = CC.op.axis
ko, ki = s[CC].split(k, factor=8)
kt, ki = s[CC].split(ki, factor=1)
s[CC].reorder(ko, kt, ki, yo, xo)
s[AA].compute_at(s[CC], ko)
s[BB].compute_at(s[CC], ko)
s[CC].unroll(kt)
s[AL].compute_at(s[CC], kt)
s[BL].compute_at(s[CC], kt)
# Schedule for A's shared memory load
ty, xi = s[AA].split(s[AA].op.axis[0], nparts=num_thread)
_, xi = s[AA].split(s[AA].op.axis[1], factor=num_thread * 4)
tx, xi = s[AA].split(xi, nparts=num_thread)
s[AA].bind(ty, thread_y)
s[AA].bind(tx, thread_x)
s[AA].vectorize(xi)
# Schedule for B' shared memory load
ty, xi = s[BB].split(s[BB].op.axis[0], nparts=num_thread)
_, xi = s[BB].split(s[BB].op.axis[1], factor=num_thread * 4)
tx, xi = s[BB].split(xi, nparts=num_thread)
s[BB].bind(ty, thread_y)
s[BB].bind(tx, thread_x)
s[BB].vectorize(xi)
s[AA].double_buffer()
s[BB].double_buffer()
# correctness
def check_device(device):
dev = tvm.device(device, 0)
if not dev.exist:
print("Skip because %s is not enabled" % device)
return
print("Device %s" % device)
f = tvm.build(s, [A, B, C], device)
# launch the kernel.
n, m, l = nn, nn, nn
a_np = np.random.uniform(size=(n, l)).astype(A.dtype)
b_np = np.random.uniform(size=(m, l)).astype(B.dtype)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros((n, m), dtype=C.dtype), dev)
for i in range(2):
f(a, b, c)
tvm.testing.assert_allclose(c.numpy(), np.dot(b_np.T, a_np), rtol=1e-5)
num_flops = 2 * nn * nn * nn
num_runs = 10
timer_f = f.time_evaluator(f.entry_name, dev, number=num_runs)
t = timer_f(a, b, c).mean
GFLOPS = num_flops / (t * 1e3) / 1e6
print("average time cost of %d runs = %g ms, %g GFLOPS." % (num_runs, t * 1e3, GFLOPS))
for device in ["cuda", "opencl", "rocm", "nvptx", "vulkan"]:
with tvm.transform.PassContext(
config={"tir.UnrollLoop": {"auto_max_step": 128, "explicit_unroll": device != "cuda"}}
):
check_device(device)
if __name__ == "__main__":
test_gemm()
| https://github.com/zk-ml/tachikoma |
apps/topi_recipe/gemm/gemm_int8.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"Example code to perform int8 GEMM"
import logging
import sys
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm.topi.cuda.tensor_intrin import dp4a
DO_TUNING = True
PRETUNED_INDEX = 75333
intrin_dp4a = dp4a("local", "local", "local")
@autotvm.template
def gemm_int8(n, m, l):
A = te.placeholder((n, l), name="A", dtype="int8")
B = te.placeholder((m, l), name="B", dtype="int8")
k = te.reduce_axis((0, l), name="k")
C = te.compute(
(n, m),
lambda i, j: te.sum(A[i, k].astype("int32") * B[j, k].astype("int32"), axis=k),
name="C",
)
cfg = autotvm.get_config()
s = te.create_schedule(C.op)
y, x = C.op.axis
AA = s.cache_read(A, "shared", [C])
BB = s.cache_read(B, "shared", [C])
AL = s.cache_read(AA, "local", [C])
BL = s.cache_read(BB, "local", [C])
CC = s.cache_write(C, "local")
k = CC.op.reduce_axis[0]
cfg.define_split(
"tile_k",
cfg.axis(k),
num_outputs=3,
filter=lambda entity: entity.size[2] == 4 and entity.size[0] * 2 >= entity.size[1],
)
ko, kt, ki = cfg["tile_k"].apply(s, CC, k)
s[CC].tensorize(ki, intrin_dp4a)
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
def block_size_filter(entity):
return (
entity.size[0] * 2 >= entity.size[1] * 2
and entity.size[1] <= 16
and entity.size[3] <= 4
)
cfg.define_split("tile_y", cfg.axis(y), num_outputs=4, filter=block_size_filter)
cfg.define_split("tile_x", cfg.axis(x), num_outputs=4, filter=block_size_filter)
by, tyz, ty, yi = cfg["tile_y"].apply(s, C, y)
bx, txz, tx, xi = cfg["tile_x"].apply(s, C, x)
s[C].bind(by, block_y)
s[C].bind(bx, block_x)
s[C].bind(tyz, te.thread_axis("vthread"))
s[C].bind(txz, te.thread_axis("vthread"))
s[C].bind(ty, thread_y)
s[C].bind(tx, thread_x)
s[C].reorder(by, bx, tyz, txz, ty, tx, yi, xi)
s[CC].compute_at(s[C], tx)
yo, xo = CC.op.axis
s[CC].reorder(ko, kt, yo, xo, ki)
s[CC].unroll(kt)
for stage in [AL, BL]:
s[stage].compute_at(s[CC], kt)
_, xi = s[stage].split(stage.op.axis[1], factor=4)
s[stage].vectorize(xi)
s[stage].double_buffer()
cfg.define_knob("storage_align", [16, 48])
for stage in [AA, BB]:
s[stage].storage_align(s[stage].op.axis[0], cfg["storage_align"].val, 0)
s[stage].compute_at(s[CC], ko)
fused = s[stage].fuse(*s[stage].op.axis)
ty, tx = s[stage].split(fused, nparts=cfg["tile_y"].size[2])
tx, xi = s[stage].split(tx, nparts=cfg["tile_x"].size[2])
_, xi = s[stage].split(xi, factor=16)
s[stage].bind(ty, thread_y)
s[stage].bind(tx, thread_x)
s[stage].vectorize(xi)
cfg.define_knob("auto_unroll_max_step", [512, 1500])
s[C].pragma(by, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[C].pragma(by, "unroll_explicit", False)
cfg.add_flop(n * m * l * 2)
return s, [A, B, C]
if __name__ == "__main__":
N = 2048
n = m = l = N
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
task = autotvm.task.create(gemm_int8, args=(n, m, l), target="cuda")
print(task.config_space)
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.LocalRunner(repeat=3, min_repeat_ms=100, timeout=4),
)
log_name = "gemm_int8.log"
if DO_TUNING:
tuner = autotvm.tuner.XGBTuner(task)
tuner.tune(
n_trial=1000,
measure_option=measure_option,
callbacks=[autotvm.callback.log_to_file(log_name)],
)
dispatch_context = autotvm.apply_history_best(log_name)
best_config = dispatch_context.query(task.target, task.workload)
print("\nBest config:")
print(best_config)
else:
config = task.config_space.get(PRETUNED_INDEX)
dispatch_context = autotvm.task.ApplyConfig(config)
print("Using pretuned config:")
print(config)
with dispatch_context:
with tvm.target.Target("cuda"):
s, arg_bufs = gemm_int8(n, m, l)
f = tvm.build(s, arg_bufs, "cuda", name="gemm_int8")
dev = tvm.device("cuda", 0)
a_np = np.random.randint(size=(n, l), low=-128, high=127, dtype="int8")
b_np = np.random.randint(size=(m, l), low=-128, high=127, dtype="int8")
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros((n, m), dtype="int32"), dev)
f(a, b, c)
tvm.testing.assert_allclose(
c.numpy(), np.dot(a_np.astype("int32"), b_np.T.astype("int32")), rtol=1e-5
)
num_ops = 2 * l * m * n
num_runs = 1000
timer_f = f.time_evaluator(f.entry_name, dev, number=num_runs)
t = timer_f(a, b, c).mean
GOPS = num_ops / (t * 1e3) / 1e6
print("average time cost of %d runs = %g ms, %g GOPS." % (num_runs, t * 1e3, GOPS))
| https://github.com/zk-ml/tachikoma |
apps/topi_recipe/reduce/test_reduce_map.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tvm
from tvm import te
from tvm.contrib import nvcc
import numpy as np
from tvm import topi
TASK = "reduce_map"
USE_MANUAL_CODE = False
def write_code(code, fname):
with open(fname, "w") as f:
f.write(code)
@tvm.register_func
def tvm_callback_cuda_postproc(code):
if not os.path.exists("perf"):
os.mkdir("perf")
write_code(code, "perf/%s_generated.cu" % TASK)
if USE_MANUAL_CODE:
code = open("perf/%s_manual.cu" % TASK).read()
return code
def test_reduce_map(in_shape, axis, keepdims, type="sum", test_id=0):
global TASK
# Build the logic and compile the function
A = te.placeholder(shape=in_shape, name="A")
if type == "sum":
TASK = "sum_map_id%d" % test_id
B = topi.sum(A, axis=axis, keepdims=keepdims)
elif type == "max":
TASK = "max_map_id%d" % test_id
B = topi.max(A, axis=axis, keepdims=keepdims)
elif type == "min":
TASK = "min_map_id%d" % test_id
B = topi.min(A, axis=axis, keepdims=keepdims)
else:
raise NotImplementedError
s = topi.cuda.schedule_reduce(B)
with tvm.transform.PassContext(
config={
"tir.UnrollLoop": {
"auto_max_step": 16,
}
}
):
fcuda = tvm.build(s, [A, B], "cuda", name="sum")
# Test
in_npy = np.random.normal(size=in_shape).astype(np.float32)
if type == "sum":
out_npy = in_npy.sum(axis=axis, keepdims=keepdims)
elif type == "max":
out_npy = in_npy.max(axis=axis, keepdims=keepdims)
elif type == "min":
out_npy = in_npy.min(axis=axis, keepdims=keepdims)
else:
raise NotImplementedError
data_tvm = tvm.nd.array(in_npy, device=tvm.cuda())
out_tvm = tvm.nd.empty(shape=out_npy.shape, device=tvm.cuda())
for _ in range(2):
fcuda(data_tvm, out_tvm)
tvm.testing.assert_allclose(out_tvm.numpy(), out_npy, rtol=4e-4, atol=4e-4)
if __name__ == "__main__":
test_reduce_map(
in_shape=(128, 24, 128, 24), axis=(1, 2, 3), keepdims=True, type="sum", test_id=0
)
test_reduce_map(in_shape=(128, 24 * 128 * 24), axis=(1,), keepdims=False, type="max", test_id=1)
test_reduce_map(in_shape=(32, 128, 24), axis=None, keepdims=True, type="sum", test_id=2)
test_reduce_map(in_shape=(128, 24, 128, 24), axis=(0, 2), keepdims=False, type="min", test_id=3)
| https://github.com/zk-ml/tachikoma |
apps/topi_recipe/rnn/lstm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""LSTM Example, still work in progress.."""
import tvm
from tvm import te
import os
from tvm.contrib import nvcc
import numpy as np
# Quick knobs
TASK = "lstm"
USE_MANUAL_CODE = False
PERSIST_KERNEL = True
DETECT_GLOBAL_BARRIER = PERSIST_KERNEL
SKIP_CHECK = False
UNROLL_WLOAD = True
@tvm.register_func("tvm_callback_cuda_compile", override=True)
def tvm_callback_cuda_compile(code):
"""Use nvcc compiler for better perf."""
ptx = nvcc.compile_cuda(code, target_format="ptx")
return ptx
def write_code(code, fname):
with open(fname, "w") as f:
f.write(code)
@tvm.register_func
def tvm_callback_cuda_postproc(code):
if not os.path.exists("perf"):
os.mkdir("perf")
write_code(code, "perf/%s_generated.cu" % TASK)
if USE_MANUAL_CODE:
code = open("perf/%s_manual.cu" % TASK).read()
return code
def lstm():
if not PERSIST_KERNEL:
raise ValueError("Non persist LSTM not yet supported")
num_thread_y = 8
num_thread_x = 16 * 3 // 2
num_sm = 24
n_num_step = 128
num_step = te.var("num_step")
num_hidden = 1152 // 2
batch_size = 1
# Global transition matrix
# Input hidden channel can be pre-caculated by a gemm
Xi2h = te.placeholder((num_step, batch_size, 4, num_hidden), name="Xi2h")
# Only handle hidden transition, saves space.
Wh2h = te.placeholder((4, num_hidden, num_hidden), name="Wh2h")
# h: output hidden state, c: cell state.
s_state_h = te.placeholder((num_step, batch_size, num_hidden))
s_state_c = te.placeholder((num_step, batch_size, num_hidden))
s_init_c = te.compute((1, batch_size, num_hidden), lambda *i: 0.0, name="init_c")
s_init_h = te.compute((1, batch_size, num_hidden), lambda *i: 0.0, name="init_h")
# LSTM transition
k = te.reduce_axis((0, num_hidden), name="ki2h")
s_h2h = te.compute(
(num_step, batch_size, 4, num_hidden),
lambda t, i, x, j: te.sum(s_state_h[t - 1, i, k] * Wh2h[x, j, k], axis=k),
name="s_h2h",
)
# Gate rules
gates = te.compute(Xi2h.shape, lambda *i: Xi2h(*i) + s_h2h(*i), name="gates")
gshape = (num_step, batch_size, num_hidden)
in_gate = te.compute(gshape, lambda t, i, j: te.sigmoid(gates[t, i, 0, j]), name="in_gate")
in_transform = te.compute(
gshape, lambda t, i, j: te.tanh(gates[t, i, 1, j]), name="in_transform"
)
forget_gate = te.compute(
gshape, lambda t, i, j: te.sigmoid(gates[t, i, 2, j]), name="forget_gate"
)
out_gate = te.compute(gshape, lambda t, i, j: te.sigmoid(gates[t, i, 3, j]), name="out_gate")
next_c = te.compute(
gshape,
lambda t, i, j: forget_gate[t, i, j] * s_state_c[t - 1, i, j]
+ in_gate[t, i, j] * in_transform[t, i, j],
name="next_c",
)
next_h = te.compute(
gshape, lambda t, i, j: out_gate[t, i, j] * te.tanh(next_c[t, i, j]), name="next_h"
)
update_c = te.compute(gshape, lambda *i: next_c(*i), name="update_c")
update_h = te.compute(gshape, lambda *i: next_h(*i), name="update_h")
# schedule
scan_h, scan_c = tvm.te.scan(
[s_init_h, s_init_c],
[update_h, update_c],
[s_state_h, s_state_c],
inputs=[Xi2h],
name="lstm_scan",
)
# schedule
s = te.create_schedule(scan_h.op)
# Inline gate computations
s[gates].compute_inline()
s[in_gate].compute_inline()
s[in_transform].compute_inline()
s[forget_gate].compute_inline()
s[out_gate].compute_inline()
block_x = te.thread_axis((0, num_sm), "blockIdx.x")
thread_x = te.thread_axis((0, num_thread_x), "threadIdx.x")
thread_y = te.thread_axis((0, num_thread_y), "threadIdx.y")
s_state_h_S = s.cache_read(s_state_h, "shared", [s_h2h])
s_state_c_S = s.cache_read(s_state_c, "shared", [next_c])
Wh2hL = s.cache_read(Wh2h, "local", [s_h2h])
ko, ki = s[s_h2h].split(s[s_h2h].op.reduce_axis[0], nparts=num_thread_y)
s_h2h_rf = s.rfactor(s_h2h, ko)
s[s_h2h].bind(s[s_h2h].op.reduce_axis[0], thread_y)
s[s_h2h_rf].compute_at(s[s_h2h], s[s_h2h].op.reduce_axis[0])
if PERSIST_KERNEL:
s[scan_h.op].env_threads([block_x, thread_y, thread_x])
s[Wh2hL].compute_at(s[scan_h.op], thread_x)
else:
s[Wh2hL].compute_at(s[s_h2h], s[s_h2h].op.axis[3])
if UNROLL_WLOAD:
s[Wh2hL].unroll(Wh2hL.op.axis[0])
s[Wh2hL].unroll(Wh2hL.op.axis[2])
s[s_state_h_S].compute_at(s[s_h2h_rf], s[s_h2h_rf].op.axis[3])
s[s_state_c_S].compute_at(s[scan_h.op], s[scan_h].op.scan_axis)
for ss in [s_state_h_S]:
xo, xi = s[ss].split(ss.op.axis[2], factor=num_thread_x * num_thread_y)
ty, xi = s[ss].split(xi, nparts=num_thread_y)
tx, xi = s[ss].split(xi, nparts=num_thread_x)
s[ss].bind(ty, thread_y)
s[ss].bind(tx, thread_x)
for init in [s_init_c, s_init_h]:
bx, xi = s[init].split(init.op.axis[2], nparts=num_sm)
tx, xi = s[init].split(xi, nparts=num_thread_x)
s[init].bind(bx, block_x)
s[init].bind(tx, thread_x)
s[next_c].set_store_predicate(thread_y.equal(0))
s[next_h].set_store_predicate(thread_y.equal(0))
for update in [update_c, update_h]:
bx, xi = s[update].split(s[update].op.axis[2], nparts=num_sm)
tx, xi = s[update].split(xi, nparts=num_thread_x)
s[update].bind(bx, block_x)
s[update].bind(tx, thread_x)
s[update].set_store_predicate(thread_y.equal(0))
# verify we can lower correctly
def check_device(target):
num_step = n_num_step
flstm = tvm.build(s, [Xi2h, Wh2h, scan_h, scan_c], target)
dev = tvm.cuda(0) if target == "cuda" else tvm.cl(0)
# launch the kernel.
scan_h_np = np.zeros((num_step, batch_size, num_hidden)).astype("float32")
scan_c_np = np.zeros((num_step, batch_size, num_hidden)).astype("float32")
Xi2h_np = np.random.normal(size=(num_step, batch_size, 4, num_hidden)).astype("float32")
Wh2h_np = np.random.normal(size=(4, num_hidden, num_hidden)).astype("float32")
scan_h_a = tvm.nd.array(scan_h_np, dev)
scan_c_a = tvm.nd.array(scan_c_np, dev)
Xi2h_a = tvm.nd.array(Xi2h_np, dev)
Wh2h_a = tvm.nd.array(Wh2h_np, dev)
flstm(Xi2h_a, Wh2h_a, scan_h_a, scan_c_a)
dev.sync()
# measure time cost of second step.
evaluator = flstm.time_evaluator(flstm.entry_name, dev, 1, repeat=1000)
eval_result = evaluator(Xi2h_a, Wh2h_a, scan_h_a, scan_c_a)
print("Time cost=%g" % eval_result.mean)
# set unroll_explicit for more readable code.
with tvm.transform.PassContext(
config={
"tir.UnrollLoop": {
"auto_max_step": 128,
},
"tir.detect_global_barrier": DETECT_GLOBAL_BARRIER,
}
):
check_device("cuda")
if __name__ == "__main__":
lstm()
| https://github.com/zk-ml/tachikoma |
apps/topi_recipe/rnn/matexp.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Matrix exponential example.
This is an example for matrix exponential,
which calculates the following recursion formula
```math
X[t] = dot(X[t-1], W)
```
"""
import tvm
from tvm import te
import time
import os
import argparse
from tvm.contrib import nvcc
import numpy as np
# Quick knobs
TASK = "matexp"
USE_MANUAL_CODE = False
PERSIST_KERNEL = True
DETECT_GLOBAL_BARRIER = PERSIST_KERNEL
SKIP_CHECK = False
@tvm.register_func("tvm_callback_cuda_compile", override=True)
def tvm_callback_cuda_compile(code):
"""Use nvcc compiler for better perf."""
ptx = nvcc.compile_cuda(code, target_format="ptx")
return ptx
def write_code(code, fname):
with open(fname, "w") as f:
f.write(code)
@tvm.register_func
def tvm_callback_cuda_postproc(code):
if not os.path.exists("perf"):
os.mkdir("perf")
write_code(code, "perf/%s_generated.cu" % TASK)
if USE_MANUAL_CODE:
code = open("perf/%s_manual.cu" % TASK).read()
return code
def rnn_matexp():
n_num_step = 128
n_num_hidden = 1152
n_batch_size = 4
detect_global_barrier = DETECT_GLOBAL_BARRIER
num_step = te.var("num_step")
num_hidden = tvm.runtime.convert(n_num_hidden)
batch_size = tvm.runtime.convert(n_batch_size)
num_thread_y = 8
num_thread_x = 16 * 3
num_sm = 24
Whh = te.placeholder((num_hidden, num_hidden), name="Whh")
s_init = te.compute((1, batch_size, num_hidden), lambda _, i, j: 1.0, name="init")
s_state = te.placeholder((num_step, batch_size, num_hidden))
kh = te.reduce_axis((0, num_hidden), name="kh")
s_update = te.compute(
(num_step, batch_size, num_hidden),
lambda t, i, j: te.sum(s_state[t - 1, i, kh] * Whh[kh, j], axis=kh),
name="update",
)
s_scan = tvm.te.scan(s_init, s_update, s_state)
# schedule
s = te.create_schedule(s_scan.op)
CL = s_update
SS = s.cache_read(s_state, "shared", [CL])
SL = s.cache_read(SS, "local", [CL])
WhhL = s.cache_read(Whh, "local", [CL])
ko, ki = s[CL].split(s[CL].op.reduce_axis[0], nparts=num_thread_y)
CLF = s.rfactor(CL, ko)
block_x = te.thread_axis((0, num_sm), "blockIdx.x")
thread_x = te.thread_axis((0, num_thread_x), "threadIdx.x")
thread_y = te.thread_axis((0, num_thread_y), "threadIdx.y")
if PERSIST_KERNEL:
s[s_scan.op].env_threads([block_x, thread_y, thread_x])
bx, xi = s[s_init].split(s_init.op.axis[2], nparts=num_sm)
tx, xi = s[s_init].split(xi, nparts=num_thread_x)
s[s_init].bind(bx, block_x)
s[s_init].bind(tx, thread_x)
bx, xi = s[s_update].split(s[CL].op.axis[2], nparts=num_sm)
tx, xi = s[s_update].split(xi, nparts=num_thread_x)
s[s_update].bind(bx, block_x)
s[s_update].bind(tx, thread_x)
s[CL].bind(s[CL].op.reduce_axis[0], thread_y)
s[CLF].compute_at(s[CL], s[CL].op.reduce_axis[0])
# Duplicate store predicate.
s[CL].set_store_predicate(thread_y.equal(0))
if PERSIST_KERNEL:
s[WhhL].compute_at(s[s_scan], thread_x)
s[WhhL].unroll(WhhL.op.axis[0])
else:
s[WhhL].compute_at(s[CLF], CLF.op.axis[3])
kr, ki = s[CLF].split(CLF.op.reduce_axis[0], nparts=1)
ko, ki = s[CLF].split(ki, factor=4)
s[SS].compute_at(s[CLF], kr)
s[SL].compute_at(s[CLF], ko)
xo, xi = s[SS].split(SS.op.axis[2], factor=num_thread_x * num_thread_y * 3)
ty, xi = s[SS].split(xi, nparts=num_thread_y)
tx, xi = s[SS].split(xi, nparts=num_thread_x)
s[SS].bind(ty, thread_y)
s[SS].bind(tx, thread_x)
def check_device(target):
with tvm.transform.PassContext(
config={
"tir.UnrollLoop": {
"auto_max_step": 128,
},
"tir.detect_global_barrier": detect_global_barrier,
}
):
f = tvm.build(s, [s_scan, Whh], target)
dev = tvm.cuda(0) if target == "cuda" else tvm.cl(0)
# launch the kernel.
res_np = np.zeros((n_num_step, n_batch_size, n_num_hidden)).astype("float32")
Whh_np = np.zeros((n_num_hidden, n_num_hidden)).astype("float32")
Whh_np[:] = 2.0 / n_num_hidden
Whh_np[:, n_num_hidden // 2 :] = 0
res_a = tvm.nd.array(res_np, dev)
Whh_a = tvm.nd.array(Whh_np, dev)
# Skip first pass as it is compilation
f(res_a, Whh_a)
dev.sync()
# measure time cost of second step.
tstart = time.time()
f(res_a, Whh_a)
dev.sync()
tgap = time.time() - tstart
print("Time cost=%g" % tgap)
# correctness
if not SKIP_CHECK:
res_cuda = res_a.numpy()
res_cmp = np.ones_like(res_np).astype("float64")
Whh_np = Whh_np.astype("float64")
for t in range(1, n_num_step):
res_cmp[t][:] = np.dot(res_cmp[t - 1], Whh_np)
for i in range(n_num_step):
for j in range(n_num_hidden):
if abs(res_cmp[i, 0, j] - res_cuda[i, 0, j]) > 1e-5:
print("%d, %d: %g vs %g" % (i, j, res_cmp[i, 0, j], res_cuda[i, 0, j]))
tvm.testing.assert_allclose(res_cuda, res_cmp, rtol=1e-3)
check_device("cuda")
if __name__ == "__main__":
rnn_matexp()
| https://github.com/zk-ml/tachikoma |
apps/uma/_template/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Template files for UMA tutorial
"""
| https://github.com/zk-ml/tachikoma |
apps/uma/_template/backend.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""UMA backend for the my_ai_hw accelerator"""
from passes import MyAiHwConv2dPass
from tvm.relay.backend.contrib.uma.api.utils import PassPhase
from tvm.relay.backend.contrib.uma.backend import UMABackend
from codegen import gen_includes
from patterns import conv2d_pattern
class MyAiHwBackend(UMABackend):
"""UMA backend for the MyAiHw accelerator."""
def __init__(self):
super().__init__()
# Target configuration
self._register_target_attr("dimension")
# Relay Pattern registration
self._register_pattern("conv2d", conv2d_pattern())
# Relay to TIR function registration
self._register_tir_pass(PassPhase.TIR_PHASE_0, MyAiHwConv2dPass())
# TIR to runtime function registration
self._register_codegen(fmt="c", includes=gen_includes)
@property
def target_name(self):
return "my_ai_hw"
| https://github.com/zk-ml/tachikoma |
apps/uma/_template/codegen.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""UMA codegen for the my_ai_hw accelerator"""
import tvm
import pathlib
def gen_includes() -> str:
topdir = pathlib.Path(__file__).parent.absolute()
includes = ""
includes += f'#include "{topdir}/conv2dnchw.cc"'
return includes
| https://github.com/zk-ml/tachikoma |
apps/uma/_template/passes.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Transform passes for the my_ai_hw accelerator"""
import tvm
from tvm import tir
from tvm.relay.backend.contrib.uma.api.utils import add_llvm_to_block
@tvm.tir.transform.prim_func_pass(opt_level=2)
class MyAiHwConv2dPass:
_EXTERNAL_FUNCTION_NAME = "my_ai_hw_conv2dnchw"
_TVM_BLOCK_MATCH_NAME = "conv2d_nchw"
def transform_function(
self, func: tvm.tir.PrimFunc, mod: tvm.ir.IRModule, ctx: tvm.ir.transform.PassContext
) -> tvm.tir.PrimFunc:
return self._my_ai_hw_conv2d_pass(func, mod, ctx)
@classmethod
def _my_ai_hw_conv2d_pass(cls, func, mod, ctx):
_loops = dict()
_handles = []
_entry_node = None
def _has_block(name: str, func: tvm.tir.PrimFunc) -> bool:
"""
Determine of a tir.block with `name` exists in `func`
"""
def _hb(op):
if isinstance(op, tvm.tir.Block):
_found_blocks.append(op.name_hint)
_found_blocks = []
tvm.tir.stmt_functor.post_order_visit(func.body, _hb)
return name in _found_blocks
def _detect_and_replace_conv2d(
func: tvm.tir.PrimFunc, mod: tvm.ir.IRModule, ctx: tvm.ir.transform.PassContext
) -> tvm.tir.PrimFunc:
def _replace_conv2d(op):
if op == _entry_node:
irb = tvm.tir.ir_builder.create()
# Collection of buffer address
buffers = [b[1].data for b in _handles]
# extraction of loop offsets
for k, v in _loops.items():
assert v.min.value == 0
offset_order = ["co", "w", "h", "ci", "kh", "kw"]
offsets = [_loops[i].extent.value for i in offset_order]
args = buffers + offsets
irb.emit(tir_call(irb, True, cls._EXTERNAL_FUNCTION_NAME, *args))
irb_result = irb.get()
return irb_result
elif isinstance(op, tvm.tir.SeqStmt):
# Remove that pad block of TOPI's conv2DNCHW by only returning the 2nd statement
return op.seq[1]
return op
sch = tir.Schedule(func)
if _has_block(cls._TVM_BLOCK_MATCH_NAME, func):
conv2d_block = sch.get_block(cls._TVM_BLOCK_MATCH_NAME)
rv_loops = sch.get_loops(conv2d_block)
assert len(rv_loops) == 7
loops = dict(
n=rv_loops[0],
co=rv_loops[1],
h=rv_loops[2],
w=rv_loops[3],
ci=rv_loops[4],
kh=rv_loops[5],
kw=rv_loops[6],
)
_entry_node = sch.get(rv_loops[1])
_loops = {k: sch.get(v) for k, v in loops.items()}
_handles = func.buffer_map.items()
x = tvm.tir.stmt_functor.ir_transform(
func.body, None, _replace_conv2d, ["tir.For", "tir.SeqStmt"]
)
return func.with_body(x)
else:
return func
r = _detect_and_replace_conv2d(func, mod, ctx)
return r
def tir_call(ib: tvm.tir.ir_builder, extern: bool, name: str, *args):
"""
ib: ir_builder
extern: bool
True --> tvm.tir.call_extern
False --> tvm.tir.call_packed
name: str
function name
*args:
arguments for function call
"""
def buf_from_array(ib, arr, dtype):
# Allocate enough memory to store the whole array
var = ib.allocate("int32", (len(arr),), scope="global")
for i, v in enumerate(arr):
var[i] = v
# Declare a buffer, which is basically a view on the chunk of memory that we allocated
buf = tvm.tir.decl_buffer((len(arr),), dtype, data=var, scope="global")
return buf
if extern:
args = [i.data if isinstance(i, tvm.tir.Buffer) else i for i in args]
return tvm.tir.call_extern("int32", name, *args)
else:
args = [
buf_from_array(ib, i, "int32")
if isinstance(i, (tuple, list, tvm.ir.container.Array))
else i
for i in args
]
return tvm.tir.call_packed(name, *args)
| https://github.com/zk-ml/tachikoma |
apps/uma/_template/patterns.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Relay graph patterns for the my_ai_hw accelerator"""
from tvm.relay.dataflow_pattern import is_op, wildcard
def conv2d_pattern():
pattern = is_op("nn.conv2d")(wildcard(), wildcard())
pattern = pattern.has_attr({"strides": [1, 1], "groups": 1})
return pattern
def dense_pattern():
pattern = is_op("nn.dense")(wildcard(), wildcard())
return pattern
| https://github.com/zk-ml/tachikoma |
apps/uma/_template/run.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tvm.micro.testing.aot_test_utils import AOT_DEFAULT_RUNNER
import tvm
from tvm import relay
from backend import MyAiHwBackend
from tvm.relay import transform
from collections import OrderedDict
import numpy as np
from tvm.testing.aot import (
AOTTestModel as AOTModel,
AOTTestRunner as AOTRunner,
generate_ref_data,
compile_and_run,
)
def create_conv2d(groups=1, runner=AOT_DEFAULT_RUNNER, weight_shape=32):
dtype = "float32"
ishape = (1, 32, 14, 14)
wshape = (32, weight_shape, 3, 3)
pass_config = {"tir.usmp.enable": True}
runner = AOTRunner(
makefile=runner.makefile,
prologue=runner.prologue,
epilogue=runner.epilogue,
includes=runner.includes,
parameters=runner.parameters,
pass_config=pass_config,
)
data0 = relay.var("data", shape=ishape, dtype=dtype)
weight0 = relay.var("weight", shape=wshape, dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1), groups=groups)
main_f = relay.Function([data0, weight0], out)
mod = tvm.IRModule()
mod["main"] = main_f
mod = transform.InferType()(mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w1_data = np.random.uniform(0, 1, wshape).astype(dtype)
inputs = OrderedDict([("data", i_data), ("weight", w1_data)])
output_list = generate_ref_data(mod, inputs)
return mod, inputs, output_list, runner
def main():
mod, inputs, output_list, runner = create_conv2d()
uma_backend = MyAiHwBackend()
uma_backend.register()
mod = uma_backend.partition(mod)
target = tvm.target.Target("my_ai_hw", host=tvm.target.Target("c"))
export_directory = tvm.contrib.utils.tempdir(keep_for_debug=True).path
print(f"Generated files are in {export_directory}")
compile_and_run(
AOTModel(module=mod, inputs=inputs, outputs=output_list),
runner,
interface_api="c",
use_unpacked_api=True,
target=target,
test_dir=str(export_directory),
)
if __name__ == "__main__":
main()
| https://github.com/zk-ml/tachikoma |
apps/uma/_template/strategies.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Strategies for the my_ai_hw accelerator"""
# Example how to integrate a custom conv1d strategy:
# @relay.op.strategy.override_native_generic_func("custom_conv1d_strategy")
# def custom_conv1d_strategy(attrs, inputs, out_type, target):
# strategy = _op.OpStrategy()
# strategy.add_implementation(
# wrap_compute_conv1d(custom_conv1d_compute),
# wrap_topi_schedule(custom_conv1d_schedule),
# name="custom_conv1d.generic",
# return strategy
#
# For further details see:
# - github.com/apache/tvm-rfcs/blob/main/rfcs/0060_UMA_Unified_Modular_Accelerator_Interface.md
# - $TVM_HOME/python/tvm/relay/op/strategy/x86.py
| https://github.com/zk-ml/tachikoma |
apps/uma/uma_cli.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
UMA Command Line Interface (CLI)
Tool to create code skeletons for an easy integration of
new AI hardware accelerators/libraries into TVM using UMA
"""
import argparse
import os
import shutil
import sys
import pathlib
from inflection import camelize, underscore
def _parse_args():
parser = argparse.ArgumentParser(description="UMA Interface command line interface")
parser.add_argument(
"--add_hardware",
type=str,
required=True,
)
parser.add_argument(
"--tutorial",
type=str,
)
args = parser.parse_args()
return args
def replace_template_name(
files: list, template_name: str, add_hw_name: str, template_source: str = "_template"
) -> None:
"""
Replace names in template skeleton code by new name
"""
for f in files:
with open(f) as read_file:
data = read_file.read()
for case in [underscore, camelize]:
data = data.replace(case(template_name), case(add_hw_name))
data = data.replace(template_source, underscore(add_hw_name))
with open(f, "w") as write_file:
write_file.write(data)
def main():
"""
UMA Command Line Interface (CLI)
"""
args = _parse_args()
add_hw_name = args.add_hardware
uma_template_path = pathlib.Path(os.getcwd(), "_template").absolute()
add_hw_path = os.path.join(uma_template_path.parent, add_hw_name)
if os.path.exists(add_hw_path):
print(
f"Hardware with name {add_hw_name} already exists in UMA file structure: {add_hw_path}"
)
sys.exit(-1)
else:
os.mkdir(add_hw_path)
uma_files = ["backend.py", "codegen.py", "passes.py", "patterns.py", "run.py", "strategies.py"]
if args.tutorial == "vanilla":
uma_files.append("conv2dnchw.cc")
source_files = [os.path.join(uma_template_path, f) for f in uma_files]
destination_files = [os.path.join(add_hw_path, f) for f in uma_files]
for src, dst in zip(source_files, destination_files):
shutil.copyfile(src, dst)
template_name = "my_ai_hw"
replace_template_name(destination_files, template_name, add_hw_name)
print(f"Success: added {add_hw_name} to {add_hw_path}")
if __name__ == "__main__":
main()
| https://github.com/zk-ml/tachikoma |
apps/wasm-standalone/wasm-graph/build.rs | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
fn main() {
let out_dir = concat!(env!("CARGO_MANIFEST_DIR"), "/lib");
println!("cargo:rustc-link-search=native={}", out_dir);
}
| https://github.com/zk-ml/tachikoma |
apps/wasm-standalone/wasm-graph/src/lib.rs | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate serde_derive;
mod types;
mod utils;
use std::{collections::HashMap, convert::TryFrom, env, sync::Mutex};
use tvm_graph_rt::{Graph, GraphExecutor, SystemLibModule, Tensor as TVMTensor};
use types::Tensor;
extern "C" {
fn __wasm_call_ctors();
}
lazy_static! {
static ref SYSLIB: SystemLibModule = SystemLibModule::default();
static ref GRAPH_EXECUTOR: Mutex<GraphExecutor<'static, 'static>> = {
unsafe {
// This is necessary to invoke TVMBackendRegisterSystemLibSymbol
// API calls.
__wasm_call_ctors();
}
let graph = Graph::try_from(include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/lib/graph.json"
)))
.unwrap();
let params_bytes =
include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/lib/graph.params"));
let params = tvm_graph_rt::load_param_dict(params_bytes)
.unwrap()
.into_iter()
.map(|(k, v)| (k, v.to_owned()))
.collect::<HashMap<String, TVMTensor<'static>>>();
let mut exec = GraphExecutor::new(graph, &*SYSLIB).unwrap();
exec.load_params(params);
Mutex::new(exec)
};
}
#[no_mangle]
pub extern "C" fn run(wasm_addr: i32, in_size: i32) -> i32 {
let in_tensor = unsafe { utils::load_input(wasm_addr, in_size as usize) };
let input: TVMTensor = in_tensor.as_dltensor().into();
// since this executor is not multi-threaded, we can acquire lock once
let mut executor = GRAPH_EXECUTOR.lock().unwrap();
executor.set_input("data", input);
executor.run();
let output = executor.get_output(0).unwrap().as_dltensor(false);
let out_tensor: Tensor = output.into();
let out_size = unsafe { utils::store_output(wasm_addr, out_tensor) };
out_size as i32
}
| https://github.com/zk-ml/tachikoma |
apps/wasm-standalone/wasm-graph/src/types.rs | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
use std::{
any::TypeId,
os::raw::{c_int, c_void},
slice,
};
pub use tvm_sys::ffi::DLTensor;
use tvm_sys::ffi::{
DLDataType, DLDataTypeCode_kDLFloat, DLDataTypeCode_kDLInt, DLDevice, DLDeviceType_kDLCPU,
};
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
pub enum DataType {
FP32,
INT32,
INT8,
}
impl DataType {
pub fn as_dldtype(&self) -> DLDataType {
match self {
DataType::INT32 => DLDataType {
code: DLDataTypeCode_kDLInt as u8,
bits: 32u8,
lanes: 1u16,
},
DataType::INT8 => DLDataType {
code: DLDataTypeCode_kDLInt as u8,
bits: 8u8,
lanes: 1u16,
},
DataType::FP32 => DLDataType {
code: DLDataTypeCode_kDLFloat as u8,
bits: 32u8,
lanes: 1u16,
},
}
}
/// Returns whether this `DataType` represents primitive type `T`.
pub fn is_type<T: 'static>(&self) -> bool {
let typ = TypeId::of::<T>();
typ == TypeId::of::<i32>() || typ == TypeId::of::<i8>() || typ == TypeId::of::<f32>()
}
}
impl From<DLDataType> for DataType {
fn from(dl_dtype: DLDataType) -> Self {
if dl_dtype.code == DLDataTypeCode_kDLInt as u8 && dl_dtype.bits == 32u8 {
DataType::INT32
} else if dl_dtype.code == DLDataTypeCode_kDLInt as u8 && dl_dtype.bits == 8u8 {
DataType::INT8
} else if dl_dtype.code == DLDataTypeCode_kDLFloat as u8 && dl_dtype.bits == 32u8 {
DataType::FP32
} else {
DataType::FP32
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Tensor {
pub(crate) dtype: DataType,
pub(crate) shape: Vec<i64>,
pub(crate) strides: Option<Vec<usize>>,
pub(crate) data: Vec<u8>,
}
#[allow(dead_code)]
impl Tensor {
pub fn new(dtype: DataType, shape: Vec<i64>, strides: Vec<usize>, data: Vec<u8>) -> Self {
Tensor {
dtype,
shape,
strides: Some(strides),
data,
}
}
pub fn dtype(&self) -> DataType {
self.dtype.clone()
}
pub fn ndim(&self) -> usize {
self.shape.len()
}
pub fn shape(&self) -> Vec<i64> {
self.shape.clone()
}
pub fn data(&self) -> Vec<u8> {
self.data.clone()
}
pub fn as_dltensor(&self) -> DLTensor {
DLTensor {
data: self.data.as_ptr() as *mut c_void,
device: DLDevice {
device_type: DLDeviceType_kDLCPU,
device_id: 0 as c_int,
},
ndim: self.shape.len() as c_int,
dtype: self.dtype().as_dldtype(),
shape: self.shape.as_ptr() as *mut i64,
strides: self.strides.as_ref().unwrap().as_ptr() as *mut i64,
byte_offset: 0,
..Default::default()
}
}
/// Returns the data of this `Tensor` as a `Vec`.
///
/// # Panics
///
/// Panics if the `Tensor` does not contain elements of type `T`.
pub fn to_vec<T: 'static + std::fmt::Debug + Clone>(&self) -> Vec<T> {
assert!(self.dtype().is_type::<T>());
unsafe {
slice::from_raw_parts(
self.data().as_ptr() as *const T,
self.shape().iter().map(|v| *v as usize).product::<usize>() as usize,
)
.to_vec()
}
}
}
impl Default for Tensor {
fn default() -> Self {
Self {
dtype: DataType::FP32,
shape: Vec::new(),
strides: None,
data: Vec::new(),
}
}
}
impl From<DLTensor> for Tensor {
fn from(dlt: DLTensor) -> Self {
unsafe {
let shape = slice::from_raw_parts_mut(dlt.shape, dlt.ndim as usize).to_vec();
let size = shape.iter().map(|v| *v as usize).product::<usize>() as usize;
let itemsize: usize = (dlt.dtype.bits >> 3).into();
let data = slice::from_raw_parts(dlt.data as *const u8, size * itemsize).to_vec();
Self {
dtype: DataType::from(dlt.dtype),
shape,
strides: if dlt.strides.is_null() {
None
} else {
Some(
slice::from_raw_parts_mut(dlt.strides as *mut usize, dlt.ndim as usize)
.to_vec(),
)
},
data,
}
}
}
}
| https://github.com/zk-ml/tachikoma |
apps/wasm-standalone/wasm-graph/src/utils.rs | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
use super::types::*;
use serde_json;
use std::ptr;
pub unsafe fn load_input(in_addr: i32, in_size: usize) -> Tensor {
let in_addr = in_addr as *mut u8;
println!("DEBUG: in_addr {:?}, in_size {:?}", in_addr, in_size);
let data_vec = unsafe { std::slice::from_raw_parts(in_addr, in_size) };
let input = serde_json::from_slice(&data_vec);
match input {
Ok(result) => {
println!("DEBUG: SER SUCCEED!!! and Ok");
result
}
Err(e) => {
panic!("DEBUG: SER SUCCEED!!! but Err, {:?}", &e);
}
}
}
pub unsafe fn store_output(out_addr: i32, output: Tensor) -> usize {
let out_addr = out_addr as *mut u8;
let data_vec = serde_json::to_vec(&output).unwrap();
let data_size = data_vec.len();
for i in 0..data_size {
ptr::write(out_addr.offset(i as isize), *data_vec.get(i).unwrap());
}
data_size
}
| https://github.com/zk-ml/tachikoma |
apps/wasm-standalone/wasm-graph/tools/build_graph_lib.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Builds a simple resnet50 graph for testing."""
import argparse
import os
import subprocess
import sys
import onnx
import tvm
from tvm import relay, runtime
from tvm.contrib.download import download_testdata
from tvm.contrib import graph_executor
from PIL import Image
import numpy as np
import tvm.relay as relay
# This example uses resnet50-v2-7 model
model_url = (
"https://github.com/onnx/models/raw/main/"
"vision/classification/resnet/model/"
"resnet50-v2-7.onnx"
)
def build_graph_lib(opt_level):
"""Compiles the pre-trained model with TVM"""
out_dir = os.path.join(sys.path[0], "../lib")
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# Follow the tutorial to download and compile the model
model_path = download_testdata(model_url, "resnet50-v2-7.onnx", module="onnx")
onnx_model = onnx.load(model_path)
img_url = "https://s3.amazonaws.com/model-server/inputs/kitten.jpg"
img_path = download_testdata(img_url, "imagenet_cat.png", module="data")
# Resize it to 224x224
resized_image = Image.open(img_path).resize((224, 224))
img_data = np.asarray(resized_image).astype("float32")
# Our input image is in HWC layout while ONNX expects CHW input, so convert the array
img_data = np.transpose(img_data, (2, 0, 1))
# Normalize according to the ImageNet input specification
imagenet_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
imagenet_stddev = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
norm_img_data = (img_data / 255 - imagenet_mean) / imagenet_stddev
# Add the batch dimension, as we are expecting 4-dimensional input: NCHW.
img_data = np.expand_dims(norm_img_data, axis=0)
input_name = "data"
shape_dict = {input_name: img_data.shape}
mod, params = relay.frontend.from_onnx(onnx_model, shape_dict)
target = "llvm -mtriple=wasm32-unknown-unknown -mattr=+simd128"
with tvm.transform.PassContext(opt_level=opt_level):
factory = relay.build(
mod,
target=target,
params=params,
runtime=tvm.relay.backend.Runtime("cpp", {"system-lib": True}),
)
# Save the model artifacts to obj_file
obj_file = os.path.join(out_dir, "graph.o")
factory.get_lib().save(obj_file)
# Run llvm-ar to archive obj_file into lib_file
lib_file = os.path.join(out_dir, "libgraph_wasm32.a")
cmds = [os.environ.get("LLVM_AR", "llvm-ar-10"), "rcs", lib_file, obj_file]
subprocess.run(cmds)
# Save the json and params
with open(os.path.join(out_dir, "graph.json"), "w") as f_graph:
f_graph.write(factory.get_graph_json())
with open(os.path.join(out_dir, "graph.params"), "wb") as f_params:
f_params.write(runtime.save_param_dict(factory.get_params()))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="ONNX model build example")
parser.add_argument(
"-O",
"--opt-level",
type=int,
default=0,
help="level of optimization. 0 is non-optimized and 3 is the highest level",
)
args = parser.parse_args()
build_graph_lib(args.opt_level)
| https://github.com/zk-ml/tachikoma |
apps/wasm-standalone/wasm-runtime/src/graph.rs | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
use anyhow::Result;
use wasmtime::*;
use wasmtime_wasi::{WasiCtx, WasiCtxBuilder};
use super::Tensor;
pub struct GraphExecutor {
pub(crate) wasm_addr: i32,
pub(crate) input_size: i32,
pub(crate) output_size: i32,
pub(crate) store: Option<Store<WasiCtx>>,
// None-WASI version:
// pub(crate) store: Option<Store<()>>,
pub(crate) instance: Option<Instance>,
}
#[allow(dead_code)]
impl GraphExecutor {
pub fn new() -> Self {
Self {
wasm_addr: 0,
input_size: 0,
output_size: 0,
store: None,
instance: None,
}
}
pub fn instantiate(&mut self, wasm_graph_file: String) -> Result<()> {
// It seems WASI in this example is not necessary
// None WASI version: works with no SIMD
// let engine = Engine::new(Config::new().wasm_simd(true)).unwrap();
// let mut store = Store::new(&engine, ());
// let module = Module::from_file(store.engine(), &wasm_graph_file)?;
// let instance = Instance::new(&mut store, &module, &[])?;
// self.instance = Some(instance);
// self.store = Some(store);
// Ok(())
// WASI version:
let engine = Engine::new(Config::new().wasm_simd(true)).unwrap();
// First set up our linker which is going to be linking modules together. We
// want our linker to have wasi available, so we set that up here as well.
let mut linker = Linker::new(&engine);
wasmtime_wasi::add_to_linker(&mut linker, |s| s)?;
// Create an instance of `Wasi` which contains a `WasiCtx`. Note that
// `WasiCtx` provides a number of ways to configure what the target program
// will have access to.
let wasi = WasiCtxBuilder::new()
.inherit_stdio()
.inherit_args()?
.build();
let mut store = Store::new(&engine, wasi);
let module = Module::from_file(&engine, &wasm_graph_file)?;
self.instance = Some(linker.instantiate(&mut store, &module)?);
self.store = Some(store);
Ok(())
}
pub fn set_input(&mut self, input_data: Tensor) -> Result<()> {
let memory = self
.instance
.as_ref()
.unwrap()
.get_memory(self.store.as_mut().unwrap(), "memory")
.ok_or_else(|| anyhow::format_err!("failed to find `memory` export"))?;
// Specify the wasm address to access the wasm memory.
let wasm_addr = memory.data_size(self.store.as_mut().unwrap());
// Serialize the data into a JSON string.
let in_data = serde_json::to_vec(&input_data)?;
let in_size = in_data.len();
// Grow up memory size according to in_size to avoid memory leak.
memory.grow(self.store.as_mut().unwrap(), (in_size >> 16) as u32 + 1)?;
memory.write(self.store.as_mut().unwrap(), wasm_addr, &in_data)?;
self.wasm_addr = wasm_addr as i32;
self.input_size = in_size as i32;
Ok(())
}
pub fn run(&mut self) -> Result<()> {
// Invoke `run` export.
let run = self
.instance
.as_ref()
.unwrap()
.get_func(self.store.as_mut().unwrap(), "run")
.ok_or_else(|| anyhow::format_err!("failed to find `run` function export!"))?;
let params = [Val::I32(self.wasm_addr), Val::I32(self.input_size)];
let out_size = run.call(self.store.as_mut().unwrap(), ¶ms[..])?;
let out_size = (*out_size)[0].unwrap_i32();
if out_size == 0 {
panic!("graph run failed!");
}
self.output_size = out_size;
Ok(())
}
pub fn get_output(&mut self) -> Result<Tensor> {
let memory = self
.instance
.as_ref()
.unwrap()
.get_memory(self.store.as_mut().unwrap(), "memory")
.ok_or_else(|| anyhow::format_err!("failed to find `memory` export"))?;
let mut out_data = vec![0 as u8; self.output_size as _];
memory.read(
self.store.as_mut().unwrap(),
self.wasm_addr as _,
&mut out_data,
)?;
let out_vec: Tensor = serde_json::from_slice(&out_data).unwrap();
Ok(out_vec)
}
}
impl Default for GraphExecutor {
fn default() -> Self {
Self::new()
}
}
| https://github.com/zk-ml/tachikoma |
apps/wasm-standalone/wasm-runtime/src/lib.rs | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#[macro_use]
extern crate serde_derive;
mod graph;
mod types;
pub use graph::GraphExecutor;
pub use types::Tensor;
| https://github.com/zk-ml/tachikoma |
apps/wasm-standalone/wasm-runtime/src/types.rs | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
use std::{any::TypeId, mem, slice};
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
pub enum DataType {
FP32,
INT32,
INT8,
}
impl DataType {
/// Returns whether this `DataType` represents primitive type `T`.
pub fn is_type<T: 'static>(&self) -> bool {
let typ = TypeId::of::<T>();
typ == TypeId::of::<i32>() || typ == TypeId::of::<i8>() || typ == TypeId::of::<f32>()
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Tensor {
pub(crate) dtype: DataType,
pub(crate) shape: Vec<i64>,
pub(crate) strides: Option<Vec<usize>>,
pub(crate) data: Vec<u8>,
}
#[allow(dead_code)]
impl Tensor {
pub fn new(dtype: DataType, shape: Vec<i64>, strides: Vec<usize>, data: Vec<u8>) -> Self {
Tensor {
dtype,
shape,
strides: Some(strides),
data,
}
}
pub fn dtype(&self) -> DataType {
self.dtype.clone()
}
pub fn ndim(&self) -> usize {
self.shape.len()
}
pub fn shape(&self) -> Vec<i64> {
self.shape.clone()
}
pub fn data(&self) -> Vec<u8> {
self.data.clone()
}
/// Returns the data of this `Tensor` as a `Vec`.
///
/// # Panics
///
/// Panics if the `Tensor` does not contain elements of type `T`.
pub fn to_vec<T: 'static + std::fmt::Debug + Clone>(&self) -> Vec<T> {
assert!(self.dtype().is_type::<T>());
unsafe {
slice::from_raw_parts(
self.data().as_ptr() as *const T,
self.shape().iter().map(|v| *v as usize).product::<usize>() as usize,
)
.to_vec()
}
}
}
impl Default for Tensor {
fn default() -> Self {
Self {
dtype: DataType::FP32,
shape: Vec::new(),
strides: None,
data: Vec::new(),
}
}
}
/// `From` conversions to `Tensor` for `ndarray::Array`.
/// Takes a reference to the `ndarray` since `Tensor` is not owned.
macro_rules! impl_tensor_from_ndarray {
($type:ty, $typecode:expr) => {
impl<D: ndarray::Dimension> From<ndarray::Array<$type, D>> for Tensor {
fn from(arr: ndarray::Array<$type, D>) -> Self {
Tensor {
dtype: $typecode,
shape: arr.shape().iter().map(|v| *v as i64).collect(),
strides: Some(arr.strides().iter().map(|v| *v as usize).collect()),
data: unsafe {
slice::from_raw_parts(
arr.as_ptr() as *const u8,
arr.len() * mem::size_of::<$type>(),
)
.to_vec()
},
}
}
}
};
}
impl_tensor_from_ndarray!(f32, DataType::FP32);
impl_tensor_from_ndarray!(i32, DataType::INT32);
impl_tensor_from_ndarray!(i8, DataType::INT8);
| https://github.com/zk-ml/tachikoma |
apps/wasm-standalone/wasm-runtime/tests/test_graph_resnet50/src/main.rs | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
use getopts::Options;
use image::{FilterType, GenericImageView};
use ndarray::Array;
use std::{collections::HashMap, env, fs::File, io::BufReader};
use wasm_runtime::{GraphExecutor, Tensor};
const IMG_HEIGHT: usize = 224;
const IMG_WIDTH: usize = 224;
fn print_usage(program: &str, opts: Options) {
let brief = format!("Usage: {} [options]", program);
print!("{}", opts.usage(&brief));
}
fn main() {
let args: Vec<String> = env::args().collect();
let program = args[0].clone();
let mut opts = Options::new();
opts.optopt(
"g",
"wasm-graph-file",
"set the path to wasm graph file",
"FILE_PATH",
);
opts.optopt(
"i",
"input-data-file",
"set the path to input image file",
"FILE_PATH",
);
opts.optopt(
"l",
"label-class-file",
"set the path to label class file",
"FILE_PATH",
);
opts.optflag("h", "help", "print this help menu");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => panic!(f.to_string()),
};
if matches.opt_present("h") {
print_usage(&program, opts);
return;
}
let wasm_graph_file: String = match matches.opt_str("g") {
Some(s) => s,
None => String::from(""),
};
let input_data_file: String = match matches.opt_str("i") {
Some(s) => s,
None => String::from(""),
};
let label_class_file: String = match matches.opt_str("l") {
Some(s) => s,
None => String::from(""),
};
let img = image::open(input_data_file).unwrap();
let input = data_preprocess(img);
let mut graph_exec = GraphExecutor::new();
graph_exec.instantiate(wasm_graph_file).unwrap();
graph_exec.set_input(input).unwrap();
graph_exec.run().unwrap();
let output: Tensor = match graph_exec.get_output() {
Ok(m) => m,
Err(f) => panic!(f.to_string()),
};
output_assert(output, label_class_file);
}
fn data_preprocess(img: image::DynamicImage) -> Tensor {
println!("original image dimensions: {:?}", img.dimensions());
let img = img
.resize_exact(IMG_HEIGHT as u32, IMG_WIDTH as u32, FilterType::Nearest)
.to_rgb();
println!("resized image dimensions: {:?}", img.dimensions());
let mut pixels: Vec<f32> = vec![];
for pixel in img.pixels() {
let tmp = pixel.data;
// normalize the RGB channels using mean, std of imagenet1k
let tmp = [
(tmp[0] as f32 - 123.0) / 58.395, // R
(tmp[1] as f32 - 117.0) / 57.12, // G
(tmp[2] as f32 - 104.0) / 57.375, // B
];
for e in &tmp {
pixels.push(*e);
}
}
// (H,W,C) -> (C,H,W)
let arr = Array::from_shape_vec((IMG_HEIGHT, IMG_WIDTH, 3), pixels).unwrap();
let arr = arr.permuted_axes([2, 0, 1]);
let arr = Array::from_iter(arr.into_iter().copied().map(|v| v));
Tensor::from(arr)
}
fn output_assert(out_tensor: Tensor, label_class_file: String) {
let output = out_tensor.to_vec::<f32>();
// Find the maximum entry in the output and its index.
let mut argmax = -1;
let mut max_prob = 0.;
for i in 0..output.len() {
if output[i] > max_prob {
max_prob = output[i];
argmax = i as i32;
}
}
// Create a hash map of (class id, class name)
let mut synset: HashMap<i32, String> = HashMap::new();
let mut rdr = csv::ReaderBuilder::new().from_reader(BufReader::new(
File::open(label_class_file.as_str()).unwrap(),
));
for result in rdr.records() {
let record = result.unwrap();
let id: i32 = record[0].parse().unwrap();
let cls = record[1].to_string();
synset.insert(id, cls);
}
println!(
"input image belongs to the class `{}`",
synset
.get(&argmax)
.expect("cannot find the class id for argmax")
);
}
| https://github.com/zk-ml/tachikoma |
ci/jenkins/generate.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import jinja2
import argparse
import difflib
import datetime
import re
import textwrap
from pathlib import Path
from typing import List
REPO_ROOT = Path(__file__).resolve().parent.parent.parent
JENKINSFILE_TEMPLATE = REPO_ROOT / "ci" / "jenkins" / "Jenkinsfile.j2"
JENKINSFILE = REPO_ROOT / "Jenkinsfile"
class Change:
IMAGES_ONLY = object()
NONE = object()
FULL = object()
data = {
"images": [
{
"name": "ci_arm",
"platform": "ARM",
},
{
"name": "ci_cortexm",
"platform": "CPU",
},
{
"name": "ci_cpu",
"platform": "CPU",
},
{
"name": "ci_gpu",
"platform": "CPU",
},
{
"name": "ci_hexagon",
"platform": "CPU",
},
{
"name": "ci_i386",
"platform": "CPU",
},
{
"name": "ci_lint",
"platform": "CPU",
},
{
"name": "ci_minimal",
"platform": "CPU",
},
{
"name": "ci_riscv",
"platform": "CPU",
},
{
"name": "ci_wasm",
"platform": "CPU",
},
]
}
def lines_without_generated_tag(content):
return [
line for line in content.splitlines(keepends=True) if not line.startswith("// Generated at")
]
def change_type(lines: List[str]) -> Change:
"""
Return True if 'line' only edits an image tag or if 'line' is not a changed
line in a diff
"""
added_images = []
removed_images = []
diff_lines = []
for line in lines[2:]:
if not line.startswith("-") and not line.startswith("+"):
# not a diff line, ignore it
continue
diff_lines.append(line)
if len(diff_lines) == 0:
# no changes made
return Change.NONE
for line in diff_lines:
is_add = line.startswith("+")
line = line.strip().lstrip("+").lstrip("-")
match = re.search(
r"^(ci_[a-zA-Z0-9]+) = \'.*\'$",
line.strip().lstrip("+").lstrip("-"),
flags=re.MULTILINE,
)
if match is None:
# matched a non-image line, quit early
return Change.FULL
if is_add:
added_images.append(match.groups()[0])
else:
removed_images.append(match.groups()[0])
# make sure that the added image lines match the removed image lines
if len(added_images) > 0 and added_images == removed_images:
return Change.IMAGES_ONLY
else:
return Change.FULL
if __name__ == "__main__":
help = "Regenerate Jenkinsfile from template"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--force", action="store_true", help="always overwrite timestamp")
parser.add_argument("--check", action="store_true", help="just verify the output didn't change")
args = parser.parse_args()
with open(JENKINSFILE) as f:
content = f.read()
data["generated_time"] = datetime.datetime.now().isoformat()
timestamp_match = re.search(r"^// Generated at (.*)$", content, flags=re.MULTILINE)
if not timestamp_match:
raise RuntimeError("Could not find timestamp in Jenkinsfile")
original_timestamp = timestamp_match.groups()[0]
environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(REPO_ROOT),
undefined=jinja2.StrictUndefined,
lstrip_blocks=True,
trim_blocks=True,
keep_trailing_newline=True,
)
template = environment.get_template(str(JENKINSFILE_TEMPLATE.relative_to(REPO_ROOT)))
new_content = template.render(**data)
diff = [
line
for line in difflib.unified_diff(
lines_without_generated_tag(content), lines_without_generated_tag(new_content)
)
]
change = change_type(diff)
if not args.force and change == Change.IMAGES_ONLY or change == Change.NONE:
if change != Change.NONE:
print("Detected only Docker-image name changes, skipping timestamp update")
new_content = new_content.replace(data["generated_time"], original_timestamp)
diff = "".join(diff)
if args.check:
if not diff:
print("Success, the newly generated Jenkinsfile matched the one on disk")
exit(0)
else:
print(
textwrap.dedent(
"""
Newly generated Jenkinsfile did not match the one on disk! If you have made
edits to the Jenkinsfile, move them to 'jenkins/Jenkinsfile.j2' and
regenerate the Jenkinsfile from the template with
python3 -m pip install -r jenkins/requirements.txt
python3 jenkins/generate.py
Diffed changes:
"""
).strip()
)
print(diff)
exit(1)
else:
with open(JENKINSFILE, "w") as f:
f.write(new_content)
if not diff:
print(f"Wrote output to {JENKINSFILE.relative_to(REPO_ROOT)}, no changes made")
else:
print(f"Wrote output to {JENKINSFILE.relative_to(REPO_ROOT)}, changes:")
print(diff)
| https://github.com/zk-ml/tachikoma |
ci/scripts/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Package to enable testing of CI scripts"""
from . import github_skipped_tests_comment, github_pr_comment, github_tag_teams, github_docs_comment
| https://github.com/zk-ml/tachikoma |
ci/scripts/check_pr.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import re
import os
import json
import textwrap
from dataclasses import dataclass
from typing import Any, List, Callable
from git_utils import GitHubRepo, parse_remote, git
from cmd_utils import init_log, tags_from_title
GITHUB_USERNAME_REGEX = re.compile(r"(@[a-zA-Z0-9-]+)", flags=re.MULTILINE)
OK = object()
FAIL = object()
@dataclass
class Check:
# check to run, returning OK means it passed, anything else means it failed
check: Callable[[str], Any]
# function to call to generate the error message
error_fn: Callable[[Any], str]
def non_empty(s: str):
if len(s) == 0:
return FAIL
return OK
def usernames(s: str):
m = GITHUB_USERNAME_REGEX.findall(s)
return m if m else OK
def tags(s: str):
items = tags_from_title(s)
if len(items) == 0:
return FAIL
return OK
def trailing_period(s: str):
if s.endswith("."):
return FAIL
return OK
title_checks = [
Check(check=non_empty, error_fn=lambda d: "PR must have a title but title was empty"),
Check(check=trailing_period, error_fn=lambda d: "PR must not end in a tailing '.'"),
# TODO(driazati): enable this check once https://github.com/apache/tvm/issues/12637 is done
# Check(
# check=usernames,
# error_fn=lambda d: f"PR title must not tag anyone but found these usernames: {d}",
# ),
]
body_checks = [
Check(check=non_empty, error_fn=lambda d: "PR must have a body but body was empty"),
# TODO(driazati): enable this check once https://github.com/apache/tvm/issues/12637 is done
# Check(
# check=usernames,
# error_fn=lambda d: f"PR body must not tag anyone but found these usernames: {d}",
# ),
]
def run_checks(checks: List[Check], s: str, name: str) -> bool:
print(f"Running checks for {name}")
print(textwrap.indent(s, prefix=" "))
passed = True
print(" Checks:")
for i, check in enumerate(checks):
result = check.check(s)
if result == OK:
print(f" [{i+1}] {check.check.__name__}: PASSED")
else:
passed = False
msg = check.error_fn(result)
print(f" [{i+1}] {check.check.__name__}: FAILED: {msg}")
return passed
if __name__ == "__main__":
init_log()
help = "Check a PR's title and body for conformance to guidelines"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--pr", required=True)
parser.add_argument("--remote", default="origin", help="ssh remote to parse")
parser.add_argument(
"--pr-data", help="(testing) PR data to use instead of fetching from GitHub"
)
args = parser.parse_args()
try:
pr = int(args.pr)
except ValueError:
print(f"PR was not a number: {args.pr}")
exit(0)
if args.pr_data:
pr = json.loads(args.pr_data)
else:
remote = git(["config", "--get", f"remote.{args.remote}.url"])
user, repo = parse_remote(remote)
github = GitHubRepo(token=os.environ["GITHUB_TOKEN"], user=user, repo=repo)
pr = github.get(f"pulls/{args.pr}")
body = "" if pr["body"] is None else pr["body"].strip()
title = "" if pr["title"] is None else pr["title"].strip()
title_passed = run_checks(checks=title_checks, s=title, name="PR title")
print("")
body_passed = run_checks(checks=body_checks, s=body, name="PR body")
if title_passed and body_passed:
print("All checks passed!")
exit(0)
else:
print(
"Some checks failed, please review the logs above and edit your PR on GitHub accordingly"
)
exit(1)
| https://github.com/zk-ml/tachikoma |
ci/scripts/cmd_utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import subprocess
import os
import logging
import sys
import re
from pathlib import Path
from typing import List
REPO_ROOT = Path(__file__).resolve().parent.parent.parent
class RelativePathFilter(logging.Filter):
def filter(self, record):
path = Path(record.pathname).resolve()
record.relativepath = str(path.relative_to(REPO_ROOT))
return True
def init_log():
logging.basicConfig(
format="[%(relativepath)s:%(lineno)d %(levelname)-1s] %(message)s", level=logging.INFO
)
# Flush on every log call (logging and then calling subprocess.run can make
# the output look confusing)
logging.root.handlers[0].addFilter(RelativePathFilter())
logging.root.handlers[0].flush = sys.stderr.flush
class Sh:
def __init__(self, env=None, cwd=None):
self.env = os.environ.copy()
if env is not None:
self.env.update(env)
self.cwd = cwd
def run(self, cmd: str, **kwargs):
logging.info(f"+ {cmd}")
defaults = {
"check": True,
"shell": True,
"env": self.env,
"encoding": "utf-8",
"cwd": self.cwd,
}
defaults.update(kwargs)
return subprocess.run(cmd, **defaults)
def tags_from_title(title: str) -> List[str]:
tags = re.findall(r"\[(.*?)\]", title)
tags = [t.strip() for t in tags]
return tags
| https://github.com/zk-ml/tachikoma |
ci/scripts/determine_docker_images.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import datetime
import json
import logging
import urllib.error
from pathlib import Path
from typing import Dict, Any
from http_utils import get
from cmd_utils import init_log, REPO_ROOT
DOCKER_API_BASE = "https://hub.docker.com/v2/"
PAGE_SIZE = 25
TEST_DATA = None
def docker_api(url: str, use_pagination: bool = False) -> Dict[str, Any]:
"""
Run a paginated fetch from the public Docker Hub API
"""
if TEST_DATA is not None:
if url not in TEST_DATA:
raise urllib.error.HTTPError(url, 404, "Not found", {}, None)
return TEST_DATA[url]
pagination = ""
if use_pagination:
pagination = f"?page_size={PAGE_SIZE}&page=1"
url = DOCKER_API_BASE + url + pagination
r, headers = get(url)
reset = headers.get("x-ratelimit-reset")
if reset is not None:
reset = datetime.datetime.fromtimestamp(int(reset))
reset = reset.isoformat()
logging.info(
f"Docker API Rate Limit: {headers.get('x-ratelimit-remaining')} / {headers.get('x-ratelimit-limit')} (reset at {reset})"
)
return r
def image_exists(spec: str) -> bool:
name, tag = spec.split(":")
try:
r = docker_api(f"repositories/{name}/tags/{tag}")
logging.info(f"Image exists, got response: {json.dumps(r, indent=2)}")
return True
except urllib.error.HTTPError as e:
# Image was not found
logging.exception(e)
return False
if __name__ == "__main__":
init_log()
parser = argparse.ArgumentParser(
description="Writes out Docker images names to be used to .docker-image-names/"
)
parser.add_argument(
"--testing-docker-data",
help="(testing only) JSON data to mock response from Docker Hub API",
)
parser.add_argument(
"--base-dir",
default=".docker-image-names",
help="(testing only) Folder to write image names to",
)
args, other = parser.parse_known_args()
name_dir = Path(args.base_dir)
images = {}
for item in other:
name, tag = item.split("=")
images[name] = tag
if args.testing_docker_data is not None:
TEST_DATA = json.loads(args.testing_docker_data)
logging.info(f"Checking if these images exist in tlcpack: {images}")
name_dir.mkdir(exist_ok=True)
images_to_use = {}
for filename, spec in images.items():
if image_exists(spec):
logging.info(f"{spec} found in tlcpack")
images_to_use[filename] = spec
else:
logging.info(f"{spec} not found in tlcpack, using tlcpackstaging")
part, tag = spec.split(":")
user, repo = part.split("/")
tlcpackstaging_tag = f"tlcpackstaging/{repo.replace('-', '_')}:{tag}"
images_to_use[filename] = tlcpackstaging_tag
for filename, image in images_to_use.items():
logging.info(f"Writing image {image} to {name_dir / filename}")
with open(name_dir / filename, "w") as f:
f.write(image)
| https://github.com/zk-ml/tachikoma |
ci/scripts/git_skip_ci.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import logging
import argparse
from git_utils import git, GitHubRepo, parse_remote
from cmd_utils import tags_from_title, init_log
if __name__ == "__main__":
help = "Exits with 0 if CI should be skipped, 1 otherwise"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--pr", required=True)
parser.add_argument("--remote", default="origin", help="ssh remote to parse")
parser.add_argument(
"--pr-title", help="(testing) PR title to use instead of fetching from GitHub"
)
args = parser.parse_args()
init_log()
branch = git(["rev-parse", "--abbrev-ref", "HEAD"])
log = git(["log", "--format=%s", "-1"])
# Check the PR's title (don't check this until everything else passes first)
def check_pr_title():
remote = git(["config", "--get", f"remote.{args.remote}.url"])
user, repo = parse_remote(remote)
if args.pr_title:
title = args.pr_title
else:
github = GitHubRepo(token=os.environ["GITHUB_TOKEN"], user=user, repo=repo)
pr = github.get(f"pulls/{args.pr}")
title = pr["title"]
logging.info(f"pr title: {title}")
tags = tags_from_title(title)
logging.info(f"Found title tags: {tags}")
return "skip ci" in tags
if args.pr != "null" and args.pr.strip() != "" and branch != "main" and check_pr_title():
logging.info("PR title starts with '[skip ci]', skipping...")
exit(0)
else:
logging.info(f"Not skipping CI:\nargs.pr: {args.pr}\nbranch: {branch}\ncommit: {log}")
exit(1)
| https://github.com/zk-ml/tachikoma |
ci/scripts/git_skip_ci_globs.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import fnmatch
from typing import Optional
from git_utils import git
globs = [
"*.md",
"conda/*",
".github/*",
".asf.yaml",
".gitignore",
"LICENSE",
"NOTICE",
"KEYS",
# microTVM
"apps/microtvm/poetry.lock",
"apps/microtvm/pyproject.toml",
"tests/lint/*",
"tests/scripts/task_lint.sh",
]
def match_any(f: str) -> Optional[str]:
for glob in globs:
if fnmatch.fnmatch(f, glob):
return glob
return None
if __name__ == "__main__":
help = "Exits with code 1 if a change only touched files, indicating that CI could be skipped for this changeset"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--files", help="(testing only) comma separated list of files to check")
args = parser.parse_args()
print(args)
if args.files is not None:
diff = [x for x in args.files.split(",") if x.strip() != ""]
else:
diff = git(["diff", "--no-commit-id", "--name-only", "-r", "origin/main"])
diff = diff.split("\n")
diff = [d.strip() for d in diff]
diff = [d for d in diff if d != ""]
print(f"Changed files:\n{diff}")
if len(diff) == 0:
print("Found no changed files, skipping CI")
exit(0)
print(f"Checking with globs:\n{globs}")
for file in diff:
match = match_any(file)
if match is None:
print(f"{file} did not match any globs, running CI")
exit(1)
else:
print(f"{file} matched glob {match}")
print("All files matched a glob, skipping CI")
exit(0)
| https://github.com/zk-ml/tachikoma |
ci/scripts/git_utils.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import subprocess
import re
import os
import base64
import logging
from urllib import request, error
from typing import Dict, Tuple, Any, Optional, List
DRY_RUN = object()
def compress_query(query: str) -> str:
query = query.replace("\n", "")
query = re.sub("\s+", " ", query)
return query
def post(url: str, body: Optional[Any] = None, auth: Optional[Tuple[str, str]] = None):
logging.info(f"Requesting POST to", url, "with", body)
headers = {}
req = request.Request(url, headers=headers, method="POST")
if auth is not None:
auth_str = base64.b64encode(f"{auth[0]}:{auth[1]}".encode())
req.add_header("Authorization", f"Basic {auth_str.decode()}")
if body is None:
body = ""
req.add_header("Content-Type", "application/json; charset=utf-8")
data = json.dumps(body)
data = data.encode("utf-8")
req.add_header("Content-Length", len(data))
with request.urlopen(req, data) as response:
return response.read()
def dry_run_token(is_dry_run: bool) -> Any:
if is_dry_run:
return DRY_RUN
return os.environ["GITHUB_TOKEN"]
class GitHubRepo:
GRAPHQL_URL = "https://api.github.com/graphql"
def __init__(self, user, repo, token, test_data=None):
self.token = token
self.user = user
self.repo = repo
self.test_data = test_data
self.num_calls = 0
self.base = f"https://api.github.com/repos/{user}/{repo}/"
def headers(self):
return {
"Authorization": f"Bearer {self.token}",
}
def dry_run(self) -> bool:
return self.token == DRY_RUN
def graphql(self, query: str, variables: Optional[Dict[str, str]] = None) -> Dict[str, Any]:
query = compress_query(query)
if variables is None:
variables = {}
response = self._request(
self.GRAPHQL_URL,
{"query": query, "variables": variables},
method="POST",
)
if self.dry_run():
return self.testing_response("POST", self.GRAPHQL_URL)
if "data" not in response:
msg = f"Error fetching data with query:\n{query}\n\nvariables:\n{variables}\n\nerror:\n{json.dumps(response, indent=2)}"
raise RuntimeError(msg)
return response
def testing_response(self, method: str, url: str) -> Any:
self.num_calls += 1
key = f"[{self.num_calls}] {method} - {url}"
if self.test_data is not None and key in self.test_data:
return self.test_data[key]
logging.info(f"Unknown URL in dry run: {key}")
return {}
def _request(self, full_url: str, body: Dict[str, Any], method: str) -> Dict[str, Any]:
if self.dry_run():
logging.info(f"Dry run, would have requested a {method} to {full_url} with {body}")
return self.testing_response(method, full_url)
logging.info(f"Requesting {method} to {full_url} with {body}")
req = request.Request(full_url, headers=self.headers(), method=method.upper())
req.add_header("Content-Type", "application/json; charset=utf-8")
data = json.dumps(body)
data = data.encode("utf-8")
req.add_header("Content-Length", len(data))
try:
with request.urlopen(req, data) as response:
content = response.read()
except error.HTTPError as e:
msg = str(e)
error_data = e.read().decode()
raise RuntimeError(f"Error response: {msg}\n{error_data}")
logging.info(f"Got response from {full_url}: {content}")
try:
response = json.loads(content)
except json.decoder.JSONDecodeError as e:
return content
return response
def put(self, url: str, data: Dict[str, Any]) -> Dict[str, Any]:
return self._request(self.base + url, data, method="PUT")
def patch(self, url: str, data: Dict[str, Any]) -> Dict[str, Any]:
return self._request(self.base + url, data, method="PATCH")
def post(self, url: str, data: Dict[str, Any]) -> Dict[str, Any]:
return self._request(self.base + url, data, method="POST")
def get(self, url: str) -> Dict[str, Any]:
if self.dry_run():
logging.info(f"Dry run, would have requested a GET to {url}")
return self.testing_response("GET", url)
url = self.base + url
logging.info(f"Requesting GET to {url}")
req = request.Request(url, headers=self.headers())
with request.urlopen(req) as response:
response = json.loads(response.read())
return response
def delete(self, url: str) -> Dict[str, Any]:
if self.dry_run():
logging.info(f"Dry run, would have requested a DELETE to {url}")
return self.testing_response("DELETE", url)
url = self.base + url
logging.info(f"Requesting DELETE to {url}")
req = request.Request(url, headers=self.headers(), method="DELETE")
with request.urlopen(req) as response:
response = json.loads(response.read())
return response
def parse_remote(remote: str) -> Tuple[str, str]:
"""
Get a GitHub (user, repo) pair out of a git remote
"""
if remote.startswith("https://"):
# Parse HTTP remote
parts = remote.split("/")
if len(parts) < 2:
raise RuntimeError(f"Unable to parse remote '{remote}'")
user, repo = parts[-2], parts[-1].replace(".git", "")
else:
# Parse SSH remote
m = re.search(r":(.*)/(.*)\.git", remote)
if m is None or len(m.groups()) != 2:
raise RuntimeError(f"Unable to parse remote '{remote}'")
user, repo = m.groups()
user = os.getenv("DEBUG_USER", user)
repo = os.getenv("DEBUG_REPO", repo)
return user, repo
def git(command, **kwargs):
command = ["git"] + command
logging.info(f"Running {command}")
proc = subprocess.run(command, stdout=subprocess.PIPE, encoding="utf-8", **kwargs)
if proc.returncode != 0:
raise RuntimeError(f"Command failed {command}:\nstdout:\n{proc.stdout}")
return proc.stdout.strip()
def find_ccs(body: str) -> List[str]:
matches = re.findall(r"(cc( @[-A-Za-z0-9]+)+)", body, flags=re.MULTILINE)
matches = [full for full, last in matches]
reviewers = []
for match in matches:
if match.startswith("cc "):
match = match.replace("cc ", "")
users = [x.strip() for x in match.split("@")]
reviewers += users
reviewers = set(x for x in reviewers if x != "")
return list(reviewers)
| https://github.com/zk-ml/tachikoma |
ci/scripts/github_cc_reviewers.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import os
import json
import argparse
import re
from urllib import error
from typing import Dict, Any, List
from git_utils import git, GitHubRepo, parse_remote
def find_reviewers(body: str) -> List[str]:
print(f"Parsing body:\n{body}")
matches = re.findall(r"(cc( @[-A-Za-z0-9]+)+)", body, flags=re.MULTILINE)
matches = [full for full, last in matches]
print("Found matches:", matches)
reviewers = []
for match in matches:
if match.startswith("cc "):
match = match.replace("cc ", "")
users = [x.strip() for x in match.split("@")]
reviewers += users
reviewers = set(x for x in reviewers if x != "")
return sorted(list(reviewers))
if __name__ == "__main__":
help = "Add @cc'ed people in a PR body as reviewers"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--remote", default="origin", help="ssh remote to parse")
parser.add_argument("--testing-reviews-json", help="(testing only) reviews as JSON")
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="run but don't send any request to GitHub",
)
args = parser.parse_args()
remote = git(["config", "--get", f"remote.{args.remote}.url"])
user, repo = parse_remote(remote)
pr = json.loads(os.environ["PR"])
number = pr["number"]
body = pr["body"]
if body is None:
body = ""
new_reviewers = find_reviewers(body)
print("Found these reviewers:", new_reviewers)
if args.testing_reviews_json:
existing_reviews = json.loads(args.testing_reviews_json)
else:
github = GitHubRepo(token=os.environ["GITHUB_TOKEN"], user=user, repo=repo)
existing_reviews = github.get(f"pulls/{number}/reviews")
existing_review_users = [review["user"]["login"] for review in existing_reviews]
print("PR has reviews from these users:", existing_review_users)
existing_review_users = set(r.lower() for r in existing_review_users)
existing_reviewers = [review["login"] for review in pr["requested_reviewers"]]
print("PR already had these reviewers requested:", existing_reviewers)
existing_reviewers_lower = {
existing_reviewer.lower() for existing_reviewer in existing_reviewers
}
to_add = []
for new_reviewer in new_reviewers:
if (
new_reviewer.lower() in existing_reviewers_lower
or new_reviewer.lower() in existing_review_users
):
print(f"{new_reviewer} is already review requested, skipping")
else:
to_add.append(new_reviewer)
print(f"After filtering existing reviewers, adding: {to_add}")
if not args.dry_run:
github = GitHubRepo(token=os.environ["GITHUB_TOKEN"], user=user, repo=repo)
# Add reviewers 1 by 1 since GitHub will error out if any of the
# requested reviewers aren't members / contributors
for reviewer in to_add:
try:
github.post(f"pulls/{number}/requested_reviewers", {"reviewers": [reviewer]})
except KeyboardInterrupt:
sys.exit()
except (RuntimeError, error.HTTPError) as e:
# Catch any exception so other reviewers can be processed
print(f"Failed to add reviewer {reviewer}: {e}")
| https://github.com/zk-ml/tachikoma |
ci/scripts/github_commenter.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import logging
from typing import Dict, Tuple, Any, Optional, List, Union
from git_utils import GitHubRepo
BOT_COMMENT_START = "<!---bot-comment-->"
WELCOME_TEXT = "Thanks for contributing to TVM! Please refer to the contributing guidelines https://tvm.apache.org/docs/contribute/ for useful information and tips. Please request code reviews from [Reviewers](https://github.com/apache/incubator-tvm/blob/master/CONTRIBUTORS.md#reviewers) by @-ing them in a comment."
class BotCommentBuilder:
ALLOWLIST_USERS = {"driazati", "gigiblender", "areusch"}
def __init__(self, github: GitHubRepo, data: Dict[str, Any]):
self.github = github
self.pr_number = data["number"]
self.comment_data = data["comments"]["nodes"]
self.author = data["author"]["login"]
def find_bot_comment(self) -> Optional[Dict[str, Any]]:
"""
Return the existing bot comment or None if it does not exist
"""
for comment in self.comment_data:
logging.info(f"Checking comment {comment}")
if (
comment["author"]["login"] == "github-actions"
and BOT_COMMENT_START in comment["body"]
):
logging.info("Found existing comment")
return comment
logging.info("No existing comment found")
return None
def find_existing_body(self) -> Dict[str, str]:
"""
Find existing dynamic bullet point items
"""
existing_comment = self.find_bot_comment()
if existing_comment is None:
logging.info(f"No existing comment while searching for body items")
return {}
matches = re.findall(
r"<!--bot-comment-([a-z][a-z-]+)-start-->([\S\s]*?)<!--bot-comment-([a-z-]+)-end-->",
existing_comment["body"],
flags=re.MULTILINE,
)
logging.info(f"Fetch body item matches: {matches}")
items = {}
for start, text, end in matches:
if start != end:
raise RuntimeError(
f"Malformed comment found: {start} marker did not have matching end, found instead {end}"
)
items[start] = text.strip().lstrip("* ")
logging.info(f"Found body items: {items}")
return items
def _post_comment(self, body_items: Dict[str, str]):
comment = BOT_COMMENT_START + "\n\n" + WELCOME_TEXT + "\n\n"
for key, content in body_items.items():
line = self.start_key(key) + "\n * " + content.strip() + self.end_key(key)
logging.info(f"Adding line {line}")
comment += line
comment += "\n\n<sub>Generated by [tvm-bot](https://github.com/apache/tvm/blob/main/ci/README.md#github-actions)</sub>"
data = {"body": comment}
url = f"issues/{self.pr_number}/comments"
logging.info(f"Commenting {comment} on {url}")
if self.author not in self.ALLOWLIST_USERS:
logging.info(f"Skipping comment for author {self.author}")
return
existing_comment = self.find_bot_comment()
if existing_comment is None:
# Comment does not exist, post it
r = self.github.post(url, data)
else:
# Comment does exist, update it
comment_url = f"issues/comments/{existing_comment['databaseId']}"
r = self.github.patch(comment_url, data)
logging.info(f"Got response from posting comment: {r}")
def start_key(self, key: str) -> str:
return f"<!--bot-comment-{key}-start-->"
def end_key(self, key: str) -> str:
return f"<!--bot-comment-{key}-end-->"
def post_items(self, items: List[Tuple[str, str]]):
"""
Update or post bullet points in the PR based on 'items' which is a
list of (key, text) pairs
"""
# Find the existing bullet points
body_items = self.find_existing_body()
# Add or update the requested items
for key, text in items:
if text is None or text.strip() == "":
logging.info(f"Skipping {key} since it was empty")
continue
logging.info(f"Updating comment items {key} with {text}")
body_items[key] = text.strip()
# Post or update the comment
# print(body_items)
self._post_comment(body_items=body_items)
| https://github.com/zk-ml/tachikoma |
ci/scripts/github_docs_comment.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Dict, Any
def build_docs_url(base_url_docs, pr_number, build_number):
return f"{base_url_docs}/PR-{str(pr_number)}/{str(build_number)}/docs/index.html"
def find_target_url(pr_head: Dict[str, Any]):
for status in pr_head["statusCheckRollup"]["contexts"]["nodes"]:
if status.get("context", "") == "tvm-ci/pr-head":
return status["targetUrl"]
raise RuntimeError(f"Unable to find tvm-ci/pr-head status in {pr_head}")
def get_pr_and_build_numbers(target_url):
target_url = target_url[target_url.find("PR-") : len(target_url)]
split = target_url.split("/")
pr_number = split[0].strip("PR-")
build_number = split[1]
return {"pr_number": pr_number, "build_number": build_number}
def get_doc_url(pr: Dict[str, Any], base_docs_url: str = "https://pr-docs.tlcpack.ai") -> str:
pr_head = pr["commits"]["nodes"][0]["commit"]
target_url = find_target_url(pr_head)
pr_and_build = get_pr_and_build_numbers(target_url)
commit_sha = pr_head["oid"]
docs_url = build_docs_url(
base_docs_url, pr_and_build["pr_number"], pr_and_build["build_number"]
)
return f"Built docs for commit {commit_sha} can be found [here]({docs_url})."
| https://github.com/zk-ml/tachikoma |
ci/scripts/github_pr_comment.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import os
import json
from git_utils import git, GitHubRepo, parse_remote, DRY_RUN
from cmd_utils import init_log
from github_commenter import BotCommentBuilder
from github_skipped_tests_comment import get_skipped_tests_comment
from github_tag_teams import get_tags
from github_docs_comment import get_doc_url
PR_QUERY = """
query ($owner: String!, $name: String!, $number: Int!) {
repository(owner: $owner, name: $name) {
pullRequest(number: $number) {
title
body
state
number
author {
login
}
labels(first:100) {
nodes {
name
}
}
comments(last: 100) {
pageInfo {
hasPreviousPage
}
nodes {
author {
login
}
databaseId
body
}
}
commits(last: 1) {
nodes {
commit {
oid
statusCheckRollup {
contexts(first: 100) {
pageInfo {
hasNextPage
}
nodes {
... on StatusContext {
state
context
targetUrl
}
}
}
}
}
}
}
}
}
}
"""
if __name__ == "__main__":
help = "Comment a welcome message on PRs"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--remote", default="origin", help="ssh remote to parse")
parser.add_argument("--pr", required=True)
parser.add_argument("--test-data", help="(testing) mock GitHub API data")
parser.add_argument("--test-comments", help="(testing) testing comments")
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="run but don't send any request to GitHub",
)
args = parser.parse_args()
init_log()
remote = git(["config", "--get", f"remote.{args.remote}.url"])
user, repo = parse_remote(remote)
test_data = None
if args.test_data is not None:
test_data = json.loads(args.test_data)
github = GitHubRepo(
user=user,
repo=repo,
token=DRY_RUN if args.dry_run else os.environ["GITHUB_TOKEN"],
test_data=test_data,
)
pr_data = github.graphql(
PR_QUERY,
{
"owner": user,
"name": repo,
"number": int(args.pr),
},
)
pr_data = pr_data["data"]["repository"]["pullRequest"]
commenter = BotCommentBuilder(github=github, data=pr_data)
if args.test_comments is not None:
test_comments = json.loads(args.test_comments)
skipped_tests = test_comments["skipped-tests"]
ccs = test_comments["ccs"]
docs_info = test_comments["docs"]
else:
skipped_tests = get_skipped_tests_comment(pr_data, github=github)
ccs = get_tags(pr_data, github, team_issue=10317)
docs_info = get_doc_url(pr_data)
items = {
"ccs": ccs,
"skipped-tests": skipped_tests,
"docs": docs_info,
}
commenter.post_items(items=items.items())
| https://github.com/zk-ml/tachikoma |
ci/scripts/github_skipped_tests_comment.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import inspect
import json
import os
import logging
import subprocess
from xml.etree import ElementTree
from pathlib import Path
from typing import Dict, Any, Optional
def run_subprocess(command):
logging.info(f"Running command {command}")
proc = subprocess.run(command, shell=True, stdout=subprocess.PIPE, encoding="utf-8")
if proc.returncode != 0:
raise RuntimeError(f"Command failed {command}:\nstdout:\n{proc.stdout}")
return proc
def retrieve_test_report(s3_url, target_dir):
command = f"aws --region us-west-2 s3 cp {s3_url} {target_dir} --recursive --no-sign-request"
run_subprocess(command)
def get_common_commit_sha():
command = "git merge-base origin/main HEAD"
proc = run_subprocess(command)
return proc.stdout.strip()
def get_main_jenkins_build_number(github, common_commit):
json = github.get(f"commits/{common_commit}/status")
for status in reversed(json["statuses"]):
if status["context"] != "tvm-ci/branch":
continue
state = status["state"]
target_url = str(status["target_url"])
build_number = (
target_url[target_url.find("job/main") : len(target_url)]
.strip("job/main/")
.strip("/display/redirect")
)
assert build_number.isdigit()
return {"build_number": build_number, "state": state}
raise RuntimeError(f"Failed to find main build number for commit {common_commit}")
def retrieve_test_reports(
common_main_build, pr_number, build_number, s3_prefix, pr_test_report_dir, main_test_report_dir
):
cur_build_s3_link = (
f"s3://{s3_prefix}/tvm/PR-{str(pr_number)}/{str(build_number)}/pytest-results"
)
retrieve_test_report(cur_build_s3_link, pr_test_report_dir)
common_build_s3_link = f"s3://{s3_prefix}/tvm/main/{common_main_build}/pytest-results"
retrieve_test_report(common_build_s3_link, main_test_report_dir)
def get_pr_and_build_numbers(target_url):
target_url = target_url[target_url.find("PR-") : len(target_url)]
split = target_url.split("/")
pr_number = split[0].strip("PR-")
build_number = split[1]
return {"pr_number": pr_number, "build_number": build_number}
def build_test_set(directory):
directory = Path(directory)
subdir_to_skipped = {}
subdirs = [
item for item in os.listdir(directory) if os.path.isdir(os.path.join(directory, item))
]
for subdir in subdirs:
subdir_to_skipped[subdir] = set()
for root, _, files in os.walk(directory / subdir):
for file in files:
test_report = ElementTree.parse(Path(root) / file)
for testcase in test_report.iter("testcase"):
skipped = testcase.find("skipped")
if skipped is not None:
key = testcase.attrib["classname"] + "#" + testcase.attrib["name"]
subdir_to_skipped[subdir].add(key)
return subdir_to_skipped
def to_node_name(dir_name: str):
return dir_name.replace("_", ": ", 1)
def build_diff_comment_with_main(
common_commit_sha,
skipped_list,
commit_sha,
):
if len(skipped_list) == 0:
return f"No diff in skipped tests with main found in this branch for commit {commit_sha}.\n"
text = (
f"The list below shows tests that ran in main {common_commit_sha} but were "
f"skipped in the CI build of {commit_sha}:\n"
f"```\n"
)
for skip in skipped_list:
text += skip + "\n"
text += f"```\n"
return text
def build_comment(
common_commit_sha,
common_main_build,
skipped_list,
additional_skipped_list,
pr_number,
build_number,
commit_sha,
jenkins_prefix,
):
if common_main_build["state"] != "success":
return f"Unable to run tests bot because main failed to pass CI at {common_commit_sha}."
text = build_diff_comment_with_main(common_commit_sha, skipped_list, commit_sha)
if len(additional_skipped_list) != 0:
text += "\n"
text += (
f"Additional tests that were skipped in the CI build and present in the [`required_tests_to_run`]"
f"(https://github.com/apache/tvm/blob/main/ci/scripts/required_tests_to_run.json) file:"
f"\n```\n"
)
for skip in additional_skipped_list:
text += skip + "\n"
text += f"```\n"
text += (
f"A detailed report of ran tests is [here](https://{jenkins_prefix}/job/tvm/job/PR-{str(pr_number)}"
f"/{str(build_number)}/testReport/)."
)
return text
def find_target_url(pr_head: Dict[str, Any]):
for status in pr_head["statusCheckRollup"]["contexts"]["nodes"]:
if status.get("context", "") == "tvm-ci/pr-head":
return status["targetUrl"]
raise RuntimeError(f"Unable to find tvm-ci/pr-head status in {pr_head}")
def get_skipped_tests_comment(
pr: Dict[str, Any],
github,
s3_prefix: str = "tvm-jenkins-artifacts-prod",
jenkins_prefix: str = "ci.tlcpack.ai",
pr_test_report_dir: str = "pr-reports",
main_test_report_dir: str = "main-reports",
common_commit_sha: Optional[str] = None,
common_main_build: Optional[Dict[str, Any]] = None,
additional_tests_to_check_file: str = "required_tests_to_run.json",
) -> str:
pr_head = pr["commits"]["nodes"][0]["commit"]
target_url = find_target_url(pr_head)
pr_and_build = get_pr_and_build_numbers(target_url)
logging.info(f"Getting comment for {pr_head} with target {target_url}")
commit_sha = pr_head["oid"]
is_dry_run = common_commit_sha is not None
if not is_dry_run:
logging.info("Fetching common commit sha and build info")
common_commit_sha = get_common_commit_sha()
common_main_build = get_main_jenkins_build_number(github, common_commit_sha)
retrieve_test_reports(
common_main_build=common_main_build["build_number"],
pr_number=pr_and_build["pr_number"],
build_number=pr_and_build["build_number"],
s3_prefix=s3_prefix,
main_test_report_dir=main_test_report_dir,
pr_test_report_dir=pr_test_report_dir,
)
else:
logging.info("Dry run, expecting PR and main reports on disk")
main_tests = build_test_set(main_test_report_dir)
build_tests = build_test_set(pr_test_report_dir)
skipped_list = []
for subdir, skipped_set in build_tests.items():
skipped_main = main_tests[subdir]
if skipped_main is None:
logging.warning(f"Could not find directory {subdir} in main.")
continue
diff_set = skipped_set - skipped_main
if len(diff_set) != 0:
for test in diff_set:
skipped_list.append(f"{to_node_name(subdir)} -> {test}")
# Sort the list to maintain an order in the output. Helps when validating the output in tests.
skipped_list.sort()
if len(skipped_list) == 0:
logging.info("No skipped tests found.")
if not is_dry_run:
current_file = Path(__file__).resolve()
additional_tests_to_check_file = Path(current_file).parent / "required_tests_to_run.json"
logging.info(
f"Checking additional tests in file {additional_tests_to_check_file} are not skipped."
)
try:
with open(additional_tests_to_check_file, "r") as f:
additional_tests_to_check = json.load(f)
except IOError:
logging.info(
f"Failed to read additional tests from file: {additional_tests_to_check_file}."
)
additional_tests_to_check = {}
# Assert that tests present in "required_tests_to_run.json" are not skipped.
additional_skipped_tests = []
for subdir, test_set in additional_tests_to_check.items():
if subdir not in build_tests.keys():
logging.warning(f"Could not find directory {subdir} in the build test set.")
continue
for test in test_set:
if test in build_tests[subdir]:
additional_skipped_tests.append(f"{to_node_name(subdir)} -> {test}")
if len(additional_skipped_tests) == 0:
logging.info("No skipped tests found in the additional list.")
body = build_comment(
common_commit_sha,
common_main_build,
skipped_list,
additional_skipped_tests,
pr_and_build["pr_number"],
pr_and_build["build_number"],
commit_sha,
jenkins_prefix,
)
return body
| https://github.com/zk-ml/tachikoma |
ci/scripts/github_tag_teams.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import json
import argparse
import logging
import re
from typing import Dict, Any, List, Tuple, Optional
from git_utils import git, GitHubRepo, parse_remote, find_ccs, dry_run_token
from cmd_utils import tags_from_title, init_log
GITHUB_NAME_REGEX = r"@[a-zA-Z0-9-]+"
def parse_line(line: str) -> Tuple[str, List[str]]:
line = line.lstrip(" -")
line = line.split()
# Parse out the name as anything up until the first tagged person
tag_items = []
tag_end = 0
for i, piece in enumerate(line):
if piece.startswith("@"):
tag_end = i
break
tag_items.append(piece)
tag = " ".join(tag_items).rstrip(":")
# From the last word that was part of the tag name, start looking for users
# tagged with a '@'
users = []
for piece in line[tag_end:]:
if piece.startswith("@"):
users.append(piece.lstrip("@"))
return (tag, list(sorted(users)))
def fetch_issue(github: GitHubRepo, issue_number: int):
query = """query($owner: String!, $name: String!, $number: Int!){
repository(owner: $owner, name: $name) {
issue(number: $number) {
body
comments(first:100) {
nodes {
body
}
}
}
}
}"""
r = github.graphql(
query,
variables={
"owner": github.user,
"name": github.repo,
"number": issue_number,
},
)
return r
def parse_teams(r: Dict[str, Any], issue_number: int) -> Dict[str, str]:
"""
Fetch an issue and parse out series of tagged people from the issue body
and comments
"""
issue = r["data"]["repository"]["issue"]
if issue is None or issue.get("body") is None:
raise RuntimeError(f"Could not find issue #{issue_number}\n\n{json.dumps(r, indent=2)}")
result = {}
def add_tag(tag, users):
if tag in result:
result[tag] += users
else:
result[tag] = users
# Parse the issue body (only bullets are looked at)
for line in issue["body"].split("\n"):
line = line.strip()
if not line.startswith("- "):
continue
if "@" not in line:
continue
tag, users = parse_line(line)
add_tag(tag, users)
# Parse comment bodies
for comment in issue["comments"]["nodes"]:
for line in comment["body"].split("\n"):
if "@" not in line:
continue
tag, users = parse_line(line)
add_tag(tag, users)
# De-duplicate users listed twice for the same tag
for tag in result:
result[tag] = list(set(result[tag]))
return {k.lower(): v for k, v in result.items() if k.strip()}
def tags_from_labels(labels: List[Dict[str, Any]]) -> List[str]:
return [label["name"] for label in labels]
def add_ccs_to_body(body: str, to_cc: List[str]) -> str:
lines = body.split("\n")
cc_line_idx = None
for i, line in enumerate(reversed(lines)):
if line.strip() == "":
continue
if line.startswith("cc @"):
cc_line_idx = len(lines) - i - 1
else:
break
def gen_cc_line(users):
users = sorted(users)
return "cc " + " ".join([f"@{user}" for user in users])
if cc_line_idx is None:
print("Did not find existing cc line")
lines.append("")
lines.append(gen_cc_line(to_cc))
else:
# Edit cc line in place
line = lines[cc_line_idx]
print(f"Found existing cc line at {cc_line_idx}: {line}")
existing_ccs = find_ccs(line)
print(f"Found cc's: {existing_ccs}")
if set(to_cc).issubset(set(existing_ccs)):
# Don't do anything if there is no update needed
return None
line = gen_cc_line(set(existing_ccs + to_cc))
lines[cc_line_idx] = line
return "\n".join(lines)
def determine_users_to_cc(
issue: Dict[str, Any], github: GitHubRepo, team_issue: str, issue_data: Optional[Dict[str, Any]]
) -> List[str]:
if issue_data is None:
issue_data = fetch_issue(github, issue_number=int(team_issue))
# Fetch the list of teams
teams = parse_teams(issue_data, issue_number=int(team_issue))
logging.info(f"Found these teams in issue #{team_issue}\n{json.dumps(teams, indent=2)}")
title = issue["title"]
if "author" in issue:
author = issue["author"]["login"]
else:
author = issue["user"]["login"]
tags = tags_from_title(title)
if isinstance(issue["labels"], dict):
tags += tags_from_labels(issue["labels"]["nodes"])
else:
tags += tags_from_labels(issue["labels"])
tags = [t.lower() for t in tags]
logging.info(f"Found tags: {tags}")
# Update the PR or issue based on tags in the title and GitHub tags
to_cc = [teams.get(t, []) for t in tags]
to_cc = list(set(item for sublist in to_cc for item in sublist))
to_cc = [user for user in to_cc if user != author]
return to_cc
def get_tags(pr_data: Dict[str, Any], github: GitHubRepo, team_issue: int) -> str:
to_cc = determine_users_to_cc(
issue=pr_data, github=github, team_issue=team_issue, issue_data=None
)
logging.info(f"Users to cc based on labels: {to_cc}")
description = "<sub>See [#10317](https://github.com/apache/tvm/issues/10317) for details</sub>"
if len(to_cc) == 0:
return "No users to tag found in teams " + description
return "cc " + ", ".join([f"@{user}" for user in to_cc]) + " " + description
if __name__ == "__main__":
help = "Automatically tag people based on PR / issue labels"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--remote", default="origin", help="ssh remote to parse")
parser.add_argument("--team-issue", default="10317", help="issue number to look at for ccs")
parser.add_argument(
"--team-issue-json", help="(testing only) issue JSON to parse rather than fetch from GitHub"
)
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="run but don't send any request to GitHub",
)
args = parser.parse_args()
init_log()
remote = git(["config", "--get", f"remote.{args.remote}.url"])
user, repo = parse_remote(remote)
github = GitHubRepo(token=dry_run_token(args.dry_run), user=user, repo=repo)
if args.team_issue_json:
issue_data = json.loads(args.team_issue_json)
else:
issue_data = fetch_issue(github, issue_number=int(args.team_issue))
# Extract the payload from GitHub Actions
issue = json.loads(os.getenv("ISSUE", "null"))
pr = json.loads(os.getenv("PR", "null"))
if (issue is None and pr is None) or (issue is not None and pr is not None):
raise RuntimeError("Exactly one of $PR or $ISSUE must be set in the environment")
if pr is not None:
if pr["draft"]:
print(f"Terminating since {pr['number']} is a draft")
exit(0)
# PRs/issues have the same structure for the fields needed here
item = issue if issue is not None else pr
title = item["title"]
body = item["body"]
to_cc = determine_users_to_cc(
issue=item, github=github, team_issue=args.team_issue, issue_data=issue_data
)
existing_tags = list(set(re.findall(GITHUB_NAME_REGEX, body)))
existing_tags = set(tag.replace("@", "") for tag in existing_tags)
logging.info(f"Found existing tags: {existing_tags}")
to_cc = [user for user in to_cc if user not in existing_tags]
logging.info("Users to cc based on labels", to_cc)
# Create the new PR/issue body
if len(to_cc) == 0:
logging.info("No one to cc, exiting")
exit(0)
new_body = add_ccs_to_body(body, to_cc)
if new_body is None:
logging.info(f"Everyone to cc is already cc'ed, no update needed")
exit(0)
logging.info(f"Changing body from:\n----\n{body}\n----\nto:\n----\n{new_body}\n----")
# Set the PR/issue body on GitHub
data = {"body": new_body}
if issue is not None:
issue_number = issue["number"]
url = f"issues/{issue_number}"
elif pr is not None:
pr_number = pr["number"]
url = f"pulls/{pr_number}"
else:
raise RuntimeError("Unreachable, please report a bug with a link to the failed workflow")
if not args.dry_run:
github.post(url, data=data)
else:
logging.info(f"Dry run, would have updated {url} with {data}")
| https://github.com/zk-ml/tachikoma |
ci/scripts/github_tvmbot.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import json
import argparse
import warnings
import logging
import traceback
import re
from typing import Dict, Any, List, Optional, Callable, Union
from pathlib import Path
from git_utils import git, GitHubRepo, parse_remote, post
from cmd_utils import init_log
Review = Dict[str, Any]
CIJob = Dict[str, Any]
Comment = Dict[str, Any]
CommentChecker = Callable[[Comment], bool]
EXPECTED_JOBS = ["tvm-ci/pr-head"]
TVM_BOT_JENKINS_TOKEN = os.environ["TVM_BOT_JENKINS_TOKEN"]
GH_ACTIONS_TOKEN = os.environ["GH_ACTIONS_TOKEN"]
JENKINS_URL = "https://ci.tlcpack.ai/"
THANKS_MESSAGE = r"(\s*)Thanks for contributing to TVM! Please refer to guideline https://tvm.apache.org/docs/contribute/ for useful information and tips. After the pull request is submitted, please request code reviews from \[Reviewers\]\(https://github.com/apache/incubator-tvm/blob/master/CONTRIBUTORS.md#reviewers\) by them in the pull request thread.(\s*)"
def to_json_str(obj: Any) -> str:
return json.dumps(obj, indent=2)
COLLABORATORS_QUERY = """
query ($owner: String!, $name: String!, $user: String!) {
repository(owner: $owner, name: $name) {
collaborators(query: $user, first: 100) {
nodes {
login
}
}
}
}
"""
MENTIONABLE_QUERY = """
query ($owner: String!, $name: String!, $user: String!) {
repository(owner: $owner, name: $name) {
mentionableUsers(query: $user, first: 100) {
nodes {
login
}
}
}
}
"""
PR_QUERY = """
query ($owner: String!, $name: String!, $number: Int!) {
repository(owner: $owner, name: $name) {
pullRequest(number: $number) {
title
body
state
author {
login
}
comments(last: 100) {
pageInfo {
hasPreviousPage
}
nodes {
authorAssociation
author {
login
}
id
updatedAt
body
}
}
authorCommits:commits(last:100) {
nodes {
commit {
authors(first:100) {
nodes {
name
email
}
}
}
}
}
commits(last: 1) {
nodes {
commit {
oid
statusCheckRollup {
contexts(first: 100) {
pageInfo {
hasNextPage
}
nodes {
... on CheckRun {
name
databaseId
checkSuite {
workflowRun {
databaseId
workflow {
name
}
}
}
status
conclusion
url
}
... on StatusContext {
state
context
targetUrl
}
}
}
}
}
}
}
reviewDecision
reviews(last: 100) {
pageInfo {
hasPreviousPage
}
nodes {
body
updatedAt
url
id
authorCanPushToRepository
commit {
oid
}
author {
login
}
state
}
}
}
}
}
"""
def walk(obj, visitor, parent_key=None):
"""
Recursively call 'visitor' on all the children of a dictionary
"""
visitor(obj, parent_key)
if isinstance(obj, dict):
for k, v in obj.items():
walk(v, visitor, parent_key=k)
elif isinstance(obj, list):
for v in obj:
walk(v, visitor)
class PR:
def __init__(
self,
number: int,
owner: str,
repo: str,
dry_run: bool = False,
raw_data: Dict[str, Any] = None,
):
self.owner = owner
self.number = number
self.repo_name = repo
self.dry_run = dry_run
self.has_error = False
if dry_run and raw_data:
# In test mode there is no need to fetch anything
self.raw = raw_data
self.github = None
else:
self.github = GitHubRepo(user=owner, repo=repo, token=os.environ["GITHUB_TOKEN"])
if os.getenv("DEBUG", "0") == "1":
# For local runs fill in the requested data but cache it for
# later use
cached_path = Path("pr.json")
if not cached_path.exists():
self.raw = self.fetch_data()
with open(cached_path, "w") as f:
json.dump(self.raw, f, indent=2)
else:
with open(cached_path) as f:
self.raw = json.load(f)
else:
# Usual path, fetch the PR's data based on the number from
# GitHub
self.raw = self.fetch_data()
def checker(obj, parent_key):
"""
Verify that any paged results don't have extra data (if so the bot
may still work since most relevant comments will be more recent)
"""
if parent_key == "pageInfo":
if obj.get("hasPreviousPage", False):
warnings.warn(f"Found {obj} with a previous page, bot may be missing data")
if obj.get("hasNextPage", False):
warnings.warn(f"Found {obj} with a next page, bot may be missing data")
walk(self.raw, checker)
logging.info(f"Verified data, running with PR {to_json_str(self.raw)}")
def __repr__(self):
return json.dumps(self.raw, indent=2)
def react(self, comment: Dict[str, Any], content: str):
"""
React with a thumbs up to a comment
"""
url = f"issues/comments/{comment['id']}/reactions"
data = {"content": content}
if self.dry_run:
logging.info(f"Dry run, would have +1'ed to {url} with {data}")
else:
self.github.post(url, data=data)
def head_commit(self):
return self.raw["commits"]["nodes"][0]["commit"]
def co_authors(self) -> List[str]:
authors = []
for commit in self.raw["authorCommits"]["nodes"]:
# Co-authors always come after the main author according to the
# GitHub docs, so ignore the first item
for author in commit["commit"]["authors"]["nodes"][1:]:
name = author["name"]
email = author["email"]
authors.append(f"{name} <{email}>")
return list(set(authors))
def head_oid(self):
return self.head_commit()["oid"]
def ci_jobs(self) -> List[CIJob]:
"""
Get a list of all CI jobs (GitHub Actions and other) in a unified format
"""
jobs = []
for item in self.head_commit()["statusCheckRollup"]["contexts"]["nodes"]:
if "checkSuite" in item:
# GitHub Actions job, parse separately
status = item["conclusion"]
if status is None:
# If the 'conclusion' isn't filled out the job hasn't
# finished yet
status = "PENDING"
workflow_name = item["checkSuite"]["workflowRun"]["workflow"]["name"]
if workflow_name != "CI":
# Ignore all jobs that aren't in the main.yml workflow (these are mostly
# automation jobs that run on PRs for tagging / reviews)
continue
check_name = item["name"]
jobs.append(
{
"name": f"{workflow_name} / {check_name}",
"url": item["url"],
"status": status.upper(),
}
)
else:
# GitHub Status (e.g. from Jenkins)
jobs.append(
{
"name": item["context"],
"url": item["targetUrl"],
"status": item["state"].upper(),
}
)
logging.info(f"Found CI jobs for {self.head_commit()['oid']} {to_json_str(jobs)}")
return jobs
def reviews(self) -> List[Review]:
return self.raw["reviews"]["nodes"]
def head_commit_reviews(self) -> List[Review]:
"""
Find reviews associated with the head commit
"""
commits_to_review_status: Dict[str, List[Review]] = {}
for review in self.reviews():
if not review["authorCanPushToRepository"]:
# ignore reviews from non-committers
continue
oid = review["commit"]["oid"]
if oid in commits_to_review_status:
commits_to_review_status[oid].append(review)
else:
commits_to_review_status[oid] = [review]
# Only use the data for the head commit of the PR
head_reviews = commits_to_review_status.get(self.head_oid(), [])
return head_reviews
def fetch_data(self):
"""
Fetch the data for this PR from GitHub
"""
return self.github.graphql(
query=PR_QUERY,
variables={
"owner": self.owner,
"name": self.repo_name,
"number": self.number,
},
)["data"]["repository"]["pullRequest"]
def search_collaborator(self, user: str) -> List[Dict[str, Any]]:
"""
Query GitHub for collaborators matching 'user'
"""
return self.search_users(user, COLLABORATORS_QUERY)["collaborators"]["nodes"]
def search_users(self, user: str, query: str) -> List[Dict[str, Any]]:
return self.github.graphql(
query=query,
variables={
"owner": self.owner,
"name": self.repo_name,
"user": user,
},
)["data"]["repository"]
def search_mentionable_users(self, user: str) -> List[Dict[str, Any]]:
return self.search_users(user, MENTIONABLE_QUERY)["mentionableUsers"]["nodes"]
def comment(self, text: str) -> None:
"""
Leave the comment 'text' on this PR
"""
logging.info(f"Commenting:\n{text}")
# TODO: Update latest comment in-place if there has been no activity
data = {"body": text}
url = f"issues/{self.number}/comments"
if self.dry_run:
logging.info(
f"Dry run, would have commented on url={url} commenting with data={to_json_str(data)}"
)
return
self.github.post(url, data=data)
def state(self) -> str:
"""
PR state (OPEN, CLOSED, MERGED, etc)
"""
return self.raw["state"]
def processed_body(self) -> str:
body = self.raw["body"].strip().replace("\r", "")
# Remove any @-mentions of people
body = re.sub(r"(\s)@", "\g<1>", body)
# Remove the auto-inserted text since it's not useful to have in the commit log
body = re.sub(THANKS_MESSAGE, "\n\n", body)
return body.strip()
def body_with_co_authors(self) -> str:
"""
Add 'Co-authored-by' strings to the PR body based on the prior commits
in the PR
"""
body = self.processed_body()
author_lines = self.co_authors()
logging.info(f"Found co-authors: author_lines={author_lines}")
full_author_lines = [f"Co-authored-by: {author_line}" for author_line in author_lines]
authors_to_add = []
for author_line in author_lines:
if author_line not in body:
authors_to_add.append(f"Co-authored-by: {author_line}")
if len(authors_to_add) > 0:
# If the line isn't already in the PR body (it could have been
# added manually), put it in
full_author_text = "\n".join(authors_to_add)
body = f"{body}\n\n{full_author_text}"
return body
def merge(self) -> None:
"""
Request a merge of this PR via the GitHub API
"""
url = f"pulls/{self.number}/merge"
title = self.raw["title"] + f" (#{self.number})"
body = self.body_with_co_authors()
logging.info(f"Full commit:\n{title}\n\n{body}")
data = {
"commit_title": title,
"commit_message": body,
# The SHA is necessary in case there was an update right when this
# script ran, GitHub will sort out who won
"sha": self.head_oid(),
"merge_method": "squash",
}
if self.dry_run:
logging.info(f"Dry run, would have merged with url={url} and data={to_json_str(data)}")
return
r = self.github.put(url, data=data)
logging.info(f"GitHub merge response: {r}")
return r
def author(self) -> str:
return self.raw["author"]["login"]
def find_failed_ci_jobs(self) -> List[CIJob]:
# NEUTRAL is GitHub Action's way of saying cancelled
return [
job
for job in self.ci_jobs()
if job["status"] not in {"SUCCESS", "SUCCESSFUL", "SKIPPED"}
]
def find_missing_expected_jobs(self) -> List[str]:
# Map of job name: has seen in completed jobs
seen_expected_jobs = {name: False for name in EXPECTED_JOBS}
logging.info(f"Expected to see jobs: {seen_expected_jobs}")
missing_expected_jobs = []
for job in self.ci_jobs():
seen_expected_jobs[job["name"]] = True
for name, seen in seen_expected_jobs.items():
if not seen:
missing_expected_jobs.append(name)
return missing_expected_jobs
def trigger_gha_ci(self, sha: str) -> None:
logging.info(f"POST-ing a workflow_dispatch event to main.yml")
actions_github = GitHubRepo(
user=self.github.user, repo=self.github.repo, token=GH_ACTIONS_TOKEN
)
r = actions_github.post(
url="actions/workflows/main.yml/dispatches",
data={
"ref": "main",
},
)
logging.info(f"Successful workflow_dispatch: {r}")
def merge_if_passed_checks(self) -> Optional[Dict[str, Any]]:
failed_ci_jobs = self.find_failed_ci_jobs()
all_ci_passed = len(failed_ci_jobs) == 0
has_one_approval = False
if not all_ci_passed:
failed_jobs_msg = "\n".join(
[f" * [{job['name']} (`{job['status']}`)]({job['url']})" for job in failed_ci_jobs]
)
self.comment(
f"Cannot merge, these CI jobs are not successful on {self.head_oid()}:\n{failed_jobs_msg}"
)
return None
missing_expected_jobs = self.find_missing_expected_jobs()
if len(missing_expected_jobs) > 0:
missing_jobs_msg = "\n".join([f" * `{name}`" for name in missing_expected_jobs])
self.comment(f"Cannot merge, missing expected jobs:\n{missing_jobs_msg}")
return None
head_commit_reviews = self.head_commit_reviews()
for review in head_commit_reviews:
if review["state"] == "CHANGES_REQUESTED":
self.comment(
f"Cannot merge, found [this review]({review['url']}) on {self.head_oid()} with changes requested"
)
return None
if review["state"] == "APPROVED":
has_one_approval = True
logging.info(f"Found approving review: {to_json_str(review)}")
if has_one_approval and all_ci_passed:
return self.merge()
elif not has_one_approval:
self.comment(
f"Cannot merge, did not find any approving reviews from users with write access on {self.head_oid()}"
)
return None
elif not all_ci_passed:
self.comment(f"Cannot merge, CI did not pass on on {self.head_oid()}")
return None
def rerun_jenkins_ci(self) -> None:
url = JENKINS_URL + f"job/tvm/job/PR-{self.number}/buildWithParameters"
logging.info(f"Rerunning ci with URL={url}")
if self.dry_run:
logging.info("Dry run, not sending POST")
else:
post(url, auth=("tvm-bot", TVM_BOT_JENKINS_TOKEN))
def rerun_github_actions(self) -> None:
workflow_ids = []
for item in self.head_commit()["statusCheckRollup"]["contexts"]["nodes"]:
if "checkSuite" in item and item["conclusion"] == "FAILURE":
workflow_id = item["checkSuite"]["workflowRun"]["databaseId"]
workflow_ids.append(workflow_id)
workflow_ids = list(set(workflow_ids))
logging.info(f"Rerunning GitHub Actions workflows with IDs: {workflow_ids}")
if self.dry_run:
actions_github = None
else:
actions_github = GitHubRepo(
user=self.github.user, repo=self.github.repo, token=GH_ACTIONS_TOKEN
)
for workflow_id in workflow_ids:
if self.dry_run:
logging.info(f"Dry run, not restarting workflow {workflow_id}")
else:
try:
actions_github.post(f"actions/runs/{workflow_id}/rerun-failed-jobs", data={})
except RuntimeError as e:
logging.exception(e)
# Ignore errors about jobs that are part of the same workflow to avoid
# having to figure out which jobs are in which workflows ahead of time
if "The workflow run containing this job is already running" in str(e):
pass
else:
raise e
def comment_failure(self, msg: str, exceptions: Union[Exception, List[Exception]]):
if not isinstance(exceptions, list):
exceptions = [exceptions]
logging.info(f"Failed, commenting {exceptions}")
# Extract all the traceback strings
for item in exceptions:
try:
raise item
except Exception:
item.exception_msg = traceback.format_exc()
comment = f"{msg} in {args.run_url}\n\n"
for exception in exceptions:
comment += f"<details>\n\n```\n{exception.exception_msg}\n```\n\n"
if hasattr(exception, "read"):
comment += f"with response\n\n```\n{exception.read().decode()}\n```\n\n"
comment += "</details>"
pr.comment(comment)
pr.has_error = True
return exception
def check_author(pr, triggering_comment, args):
comment_author = triggering_comment["user"]["login"]
if pr.author() == comment_author:
logging.info("Comment user is PR author, continuing")
return True
return False
def search_users(name, triggering_comment, testing_json, search_fn):
logging.info(f"Checking {name}")
commment_author = triggering_comment["user"]["login"]
if testing_json:
matching_users = json.loads(testing_json)
else:
matching_users = search_fn(commment_author)
logging.info(f"Found {name}: {matching_users}")
user_names = {user["login"] for user in matching_users}
return len(matching_users) > 0 and commment_author in user_names
def check_collaborator(pr, triggering_comment, args):
return search_users(
name="collaborators",
triggering_comment=triggering_comment,
search_fn=pr.search_collaborator,
testing_json=args.testing_collaborators_json,
)
def check_mentionable_users(pr, triggering_comment, args):
return search_users(
name="mentionable users",
triggering_comment=triggering_comment,
search_fn=pr.search_mentionable_users,
testing_json=args.testing_mentionable_users_json,
)
AUTH_CHECKS = {
"metionable_users": check_mentionable_users,
"collaborators": check_collaborator,
"author": check_author,
}
# Stash the keys so they're accessible from the values
AUTH_CHECKS = {k: (k, v) for k, v in AUTH_CHECKS.items()}
class Merge:
triggers = [
"merge",
"merge this",
"merge this pr",
]
auth = [AUTH_CHECKS["collaborators"], AUTH_CHECKS["author"]]
@staticmethod
def run(pr: PR):
info = None
try:
info = pr.merge_if_passed_checks()
except Exception as e:
pr.comment_failure("Failed to process merge request", e)
raise e
if info is not None:
try:
pr.trigger_gha_ci(sha=info["sha"])
except Exception as e:
pr.comment_failure("Failed to trigger GitHub Actions", e)
raise e
class Rerun:
triggers = [
"rerun",
"rerun ci",
"re-run",
"re-run ci",
"run",
"run ci",
]
auth = [AUTH_CHECKS["metionable_users"]]
@staticmethod
def run(pr: PR):
errors = []
try:
pr.rerun_jenkins_ci()
except Exception as e:
errors.append(e)
try:
pr.rerun_github_actions()
except Exception as e:
errors.append(e)
if len(errors) > 0:
pr.comment_failure("Failed to re-run CI", errors)
if __name__ == "__main__":
help = "Check if a PR has comments trying to merge it, and do so based on reviews/CI status"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--remote", default="origin", help="ssh remote to parse")
parser.add_argument("--pr", required=True, help="pr number to check")
parser.add_argument("--run-url", required=True, help="workflow run URL")
parser.add_argument(
"--trigger-comment-json", required=True, help="json of the comment that triggered this run"
)
parser.add_argument("--testing-pr-json", help="(testing only) manual data for testing")
parser.add_argument(
"--testing-collaborators-json", help="(testing only) manual data for testing"
)
parser.add_argument(
"--testing-mentionable-users-json", help="(testing only) manual data for testing"
)
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="run but don't send any request to GitHub",
)
args = parser.parse_args()
init_log()
comment = json.loads(args.trigger_comment_json)
body = comment["body"].strip()
# Check that the comment was addressed to tvm-bot
if not body.startswith("@tvm-bot "):
logging.info(f"Not a bot comment, '{body}' does not start with '@tvm-bot'")
exit(0)
# Find the code to run for the command from the user
user_command = body.lstrip("@tvm-bot").strip()
command_to_run = None
for command in [Merge, Rerun]:
if user_command in command.triggers:
command_to_run = command
break
if command_to_run is None:
logging.info(f"Command '{user_command}' did not match anything")
exit(0)
# Find the remote for querying more data about the PR
remote = git(["config", "--get", f"remote.{args.remote}.url"])
logging.info(f"Using remote remote={remote}")
owner, repo = parse_remote(remote)
if args.pr.strip() == "":
logging.info("No PR number passed")
exit(0)
logging.info(f"Checking owner={owner} repo={repo}")
if args.testing_pr_json:
pr = PR(
number=int(args.pr),
owner=owner,
repo=repo,
dry_run=args.dry_run,
raw_data=json.loads(args.testing_pr_json),
)
else:
pr = PR(number=int(args.pr), owner=owner, repo=repo, dry_run=args.dry_run)
for name, check in command_to_run.auth:
if check(pr, comment, args):
logging.info(f"Passed auth check '{name}', continuing")
# Only one authorization check needs to pass (e.g. just mentionable
# or PR author), not all of them so quit
break
else:
logging.info(f"Failed auth check '{name}', quitting")
# Add a sad face
pr.react(comment, "confused")
exit(0)
# Acknowledge the comment with a react
pr.react(comment, "+1")
state = pr.state()
if state != "OPEN":
logging.info(f"Ignoring event on PR, state was not OPEN, instead was state={state}")
exit(0)
# Run the command
command_to_run.run(pr)
if pr.has_error:
raise RuntimeError("PR commented a failure")
| https://github.com/zk-ml/tachikoma |
ci/scripts/http_utils.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import logging
from urllib import request
from typing import Dict, Any, Optional
def get(url: str, headers: Optional[Dict[str, str]] = None) -> Dict[str, Any]:
logging.info(f"Requesting GET to {url}")
if headers is None:
headers = {}
req = request.Request(url, headers=headers)
with request.urlopen(req) as response:
response_headers = {k: v for k, v in response.getheaders()}
response = json.loads(response.read())
return response, response_headers
| https://github.com/zk-ml/tachikoma |
ci/scripts/open_docker_update_pr.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import re
import logging
import datetime
import os
import json
import re
from urllib import error
from typing import List, Dict, Any, Optional, Callable
from git_utils import git, parse_remote, GitHubRepo
from cmd_utils import REPO_ROOT, init_log, Sh
from should_rebuild_docker import docker_api
JENKINSFILE = REPO_ROOT / "ci" / "jenkins" / "Jenkinsfile.j2"
GENERATED_JENKINSFILE = REPO_ROOT / "Jenkinsfile"
GENERATE_SCRIPT = REPO_ROOT / "ci" / "jenkins" / "generate.py"
GITHUB_TOKEN = os.environ["GITHUB_TOKEN"]
BRANCH = "nightly-docker-update"
def _testing_docker_api(data: Dict[str, Any]) -> Callable[[str], Dict[str, Any]]:
"""Returns a function that can be used in place of docker_api"""
def mock(url: str) -> Dict[str, Any]:
if url in data:
return data[url]
else:
raise error.HTTPError(url, 404, f"Not found: {url}", {}, None)
return mock
def parse_docker_date(d: str) -> datetime.datetime:
"""Turn a date string from the Docker API into a datetime object"""
return datetime.datetime.strptime(d, "%Y-%m-%dT%H:%M:%S.%fZ")
def check_tag(tag: Dict[str, Any]) -> bool:
return re.match(r"^[0-9]+-[0-9]+-[a-z0-9]+$", tag["name"]) is not None
def latest_tag(user: str, repo: str) -> List[Dict[str, Any]]:
"""
Queries Docker Hub and finds the most recent tag for the specified image/repo pair
"""
r = docker_api(f"repositories/{user}/{repo}/tags")
results = r["results"]
for result in results:
result["last_updated"] = parse_docker_date(result["last_updated"])
results = list(sorted(results, key=lambda d: d["last_updated"]))
results = [tag for tag in results if check_tag(tag)]
return results[-1]
def latest_tlcpackstaging_image(source: str) -> Optional[str]:
"""
Finds the latest full tag to use in the Jenkinsfile or returns None if no
update is needed
"""
name, current_tag = source.split(":")
user, repo = name.split("/")
logging.info(
f"Running with name: {name}, current_tag: {current_tag}, user: {user}, repo: {repo}"
)
staging_repo = repo.replace("-", "_")
latest_tlcpackstaging_tag = latest_tag(user="tlcpackstaging", repo=staging_repo)
logging.info(f"Found latest tlcpackstaging tag:\n{latest_tlcpackstaging_tag}")
if latest_tlcpackstaging_tag["name"] == current_tag:
logging.info(f"tlcpackstaging tag is the same as the one in the Jenkinsfile")
latest_tlcpack_tag = latest_tag(user="tlcpack", repo=repo)
logging.info(f"Found latest tlcpack tag:\n{latest_tlcpack_tag}")
if latest_tlcpack_tag["name"] == latest_tlcpackstaging_tag["name"]:
logging.info("Tag names were the same, no update needed")
return None
if latest_tlcpack_tag["last_updated"] > latest_tlcpackstaging_tag["last_updated"]:
new_spec = f"tlcpack/{repo}:{latest_tlcpack_tag['name']}"
else:
# Even if the image doesn't exist in tlcpack, it will fall back to tlcpackstaging
# so hardcode the username here
new_spec = f"tlcpack/{repo}:{latest_tlcpackstaging_tag['name']}"
logging.info("Using tlcpackstaging tag on tlcpack")
logging.info(f"Found newer image, using: {new_spec}")
return new_spec
if __name__ == "__main__":
init_log()
help = "Open a PR to update the Docker images to use the latest available in tlcpackstaging"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--remote", default="origin", help="ssh remote to parse")
parser.add_argument("--dry-run", action="store_true", help="don't send PR to GitHub")
parser.add_argument("--testing-docker-data", help="JSON data to mock Docker Hub API response")
args = parser.parse_args()
# Install test mock if necessary
if args.testing_docker_data is not None:
docker_api = _testing_docker_api(data=json.loads(args.testing_docker_data))
remote = git(["config", "--get", f"remote.{args.remote}.url"])
user, repo = parse_remote(remote)
# Read the existing images from the Jenkinsfile
logging.info(f"Reading {JENKINSFILE}")
with open(JENKINSFILE) as f:
content = f.readlines()
# Build a new Jenkinsfile with the latest images from tlcpack or tlcpackstaging
new_content = []
replacements = {}
for line in content:
m = re.match(r"^(ci_[a-zA-Z0-9]+) = \'(.*)\'", line.strip())
if m is not None:
logging.info(f"Found match on line {line.strip()}")
groups = m.groups()
new_image = latest_tlcpackstaging_image(groups[1])
if new_image is None:
logging.info(f"No new image found")
new_content.append(line)
else:
logging.info(f"Using new image {new_image}")
new_line = f"{groups[0]} = '{new_image}'\n"
new_content.append(new_line)
replacements[line] = new_line
else:
new_content.append(line)
# Write out the new content
if args.dry_run:
logging.info(f"Dry run, would have written new content to {JENKINSFILE}")
else:
logging.info(f"Writing new content to {JENKINSFILE}")
with open(JENKINSFILE, "w") as f:
f.write("".join(new_content))
# Re-generate the Jenkinsfile
logging.info(f"Editing {GENERATED_JENKINSFILE}")
with open(GENERATED_JENKINSFILE) as f:
generated_content = f.read()
for original_line, new_line in replacements.items():
generated_content = generated_content.replace(original_line, new_line)
if args.dry_run:
print(f"Would have written:\n{generated_content}")
else:
with open(GENERATED_JENKINSFILE, "w") as f:
f.write(generated_content)
# Publish the PR
title = "[ci][docker] Nightly Docker image update"
body = "This bumps the Docker images to the latest versions from Docker Hub."
message = f"{title}\n\n\n{body}"
if args.dry_run:
logging.info("Dry run, would have committed Jenkinsfile")
else:
logging.info(f"Creating git commit")
git(["checkout", "-B", BRANCH])
git(["add", str(JENKINSFILE.relative_to(REPO_ROOT))])
git(["add", str(GENERATED_JENKINSFILE.relative_to(REPO_ROOT))])
git(["config", "user.name", "tvm-bot"])
git(["config", "user.email", "[email protected]"])
git(["commit", "-m", message])
git(["push", "--set-upstream", args.remote, BRANCH, "--force"])
logging.info(f"Sending PR to GitHub")
github = GitHubRepo(user=user, repo=repo, token=GITHUB_TOKEN)
data = {
"title": title,
"body": body,
"head": BRANCH,
"base": "main",
"maintainer_can_modify": True,
}
url = "pulls"
if args.dry_run:
logging.info(f"Dry run, would have sent {data} to {url}")
else:
try:
github.post(url, data=data)
except error.HTTPError as e:
# Ignore the exception if the PR already exists (which gives a 422). The
# existing PR will have been updated in place
if e.code == 422:
logging.info("PR already exists, ignoring error")
logging.exception(e)
else:
raise e
| https://github.com/zk-ml/tachikoma |
ci/scripts/ping_reviewers.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import argparse
import re
import datetime
import json
import textwrap
from typing import Dict, Any, List
from git_utils import git, GitHubRepo, parse_remote
GIT_DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
def prs_query(user: str, repo: str, cursor: str = None):
after = ""
if cursor is not None:
after = f', before:"{cursor}"'
time_keys = "createdAt updatedAt lastEditedAt publishedAt"
return f"""
{{
repository(name: "{repo}", owner: "{user}") {{
pullRequests(states: [OPEN], last: 10{after}) {{
edges {{
cursor
}}
nodes {{
number
url
body
{time_keys}
isDraft
author {{
login
}}
reviews(last:100) {{
nodes {{
{time_keys}
bodyText
author {{ login }}
comments(last:100) {{
nodes {{
{time_keys}
bodyText
}}
}}
}}
}}
comments(last:100) {{
nodes {{
authorAssociation
bodyText
{time_keys}
author {{
login
}}
}}
}}
}}
}}
}}
}}
"""
def find_reviewers(body: str) -> List[str]:
matches = re.findall(r"(cc( @[-A-Za-z0-9]+)+)", body, flags=re.MULTILINE)
matches = [full for full, last in matches]
reviewers = []
for match in matches:
if match.startswith("cc "):
match = match.replace("cc ", "")
users = [x.strip() for x in match.split("@")]
reviewers += users
reviewers = set(x for x in reviewers if x != "")
return list(reviewers)
def check_pr(pr, wait_time, now):
last_action = None
author = pr["author"]["login"]
def update_last(new_time, description):
if isinstance(new_time, str):
new_time = datetime.datetime.strptime(new_time, GIT_DATE_FORMAT)
if new_time is None:
print(f" time not found: {description}")
return
nonlocal last_action
if last_action is None or new_time > last_action[0]:
last_action = (new_time, description)
def check_obj(obj, name):
update_last(obj["publishedAt"], f"{name} publishedAt: {obj}")
update_last(obj["updatedAt"], f"{name} updatedAt: {obj}")
update_last(obj["lastEditedAt"], f"{name} lastEditedAt: {obj}")
update_last(obj["createdAt"], f"{name} lastEditedAt: {obj}")
check_obj(pr, "pr")
# GitHub counts comments left as part of a review separately than standalone
# comments
reviews = pr["reviews"]["nodes"]
review_comments = []
for review in reviews:
review_comments += review["comments"]["nodes"]
check_obj(review, "review")
# Collate all comments
comments = pr["comments"]["nodes"] + review_comments
# Find the last date of any comment
for comment in comments:
check_obj(comment, "comment")
time_since_last_action = now - last_action[0]
# Find reviewers in the PR's body
pr_body_reviewers = find_reviewers(pr["body"])
# Pull out reviewers from any cc @... text in a comment
cc_reviewers = [find_reviewers(c["bodyText"]) for c in comments]
cc_reviewers = [r for revs in cc_reviewers for r in revs]
# Anyone that has left a review as a reviewer (this may include the PR
# author since their responses count as reviews)
review_reviewers = list(set(r["author"]["login"] for r in reviews))
reviewers = cc_reviewers + review_reviewers + pr_body_reviewers
reviewers = list(set(reviewers))
reviewers = [r for r in reviewers if r != author]
if time_since_last_action > wait_time:
print(
" Pinging reviewers",
reviewers,
"on",
pr["url"],
"since it has been",
time_since_last_action,
f"since anything happened on that PR (last action: {last_action[1]})",
)
return reviewers
else:
print(
f" Not pinging PR {pr['number']} since it has been only {time_since_last_action} since the last action: {last_action[1]}"
)
return None
def make_ping_message(pr, reviewers):
reviewers = [f"@{r}" for r in reviewers]
author = f'@{pr["author"]["login"]}'
text = (
"It has been a while since this PR was updated, "
+ " ".join(reviewers)
+ " please leave a review or address the outstanding comments. "
+ f"{author} if this PR is still a work in progress, please [convert it to a draft](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/changing-the-stage-of-a-pull-request#converting-a-pull-request-to-a-draft)"
" until it is ready for review."
)
return text
if __name__ == "__main__":
help = "Comment on languishing issues and PRs"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--remote", default="origin", help="ssh remote to parse")
parser.add_argument("--wait-time-minutes", required=True, type=int, help="ssh remote to parse")
parser.add_argument("--cutoff-pr-number", default=0, type=int, help="ssh remote to parse")
parser.add_argument("--dry-run", action="store_true", help="don't update GitHub")
parser.add_argument("--pr-json", help="(testing) data for testing to use instead of GitHub")
parser.add_argument("--now", help="(testing) custom string for current time")
args = parser.parse_args()
remote = git(["config", "--get", f"remote.{args.remote}.url"])
user, repo = parse_remote(remote)
wait_time = datetime.timedelta(minutes=int(args.wait_time_minutes))
cutoff_pr_number = int(args.cutoff_pr_number)
print(
"Running with:\n"
f" time cutoff: {wait_time}\n"
f" number cutoff: {cutoff_pr_number}\n"
f" dry run: {args.dry_run}\n"
f" user/repo: {user}/{repo}\n",
end="",
)
if args.pr_json:
r = json.loads(args.pr_json)
else:
q = prs_query(user, repo)
r = github.graphql(q)
now = datetime.datetime.utcnow()
if args.now:
now = datetime.datetime.strptime(args.now, GIT_DATE_FORMAT)
# Loop until all PRs have been checked
while True:
prs = r["data"]["repository"]["pullRequests"]["nodes"]
# Don't look at draft PRs at all
prs_to_check = []
for pr in prs:
if pr["isDraft"]:
print(f"Skipping #{pr['number']} since it's a draft")
elif pr["number"] <= cutoff_pr_number:
print(
f"Skipping #{pr['number']} since it's too old ({pr['number']} <= {cutoff_pr_number})"
)
else:
print(f"Checking #{pr['number']}")
prs_to_check.append(pr)
print(f"Summary: Checking {len(prs_to_check)} of {len(prs)} fetched")
# Ping reviewers on each PR in the response if necessary
for pr in prs_to_check:
print("Checking", pr["url"])
reviewers = check_pr(pr, wait_time, now)
if reviewers is not None:
message = make_ping_message(pr, reviewers)
if args.dry_run:
print(
f"Would have commented on #{pr['number']}:\n{textwrap.indent(message, prefix=' ')}"
)
else:
r = github.post(f"issues/{pr['number']}/comments", {"body": message})
print(r)
edges = r["data"]["repository"]["pullRequests"]["edges"]
if len(edges) == 0:
# No more results to check
break
cursor = edges[0]["cursor"]
r = github.graphql(prs_query(user, repo, cursor))
| https://github.com/zk-ml/tachikoma |
ci/scripts/pytest_ids.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import io
import argparse
from contextlib import redirect_stdout
class NodeidsCollector:
def pytest_collection_modifyitems(self, items):
self.nodeids = [item.nodeid for item in items]
def main(folder):
collector = NodeidsCollector()
f = io.StringIO()
with redirect_stdout(f):
pytest.main(["-qq", "--collect-only", folder], plugins=[collector])
for nodeid in collector.nodeids:
print(nodeid)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="List pytest nodeids for a folder")
parser.add_argument("--folder", required=True, help="test folder to inspect")
args = parser.parse_args()
main(args.folder)
| https://github.com/zk-ml/tachikoma |
ci/scripts/pytest_wrapper.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import textwrap
import junitparser
import traceback
from pathlib import Path
from typing import List, Optional
import os
import urllib.parse
import logging
from cmd_utils import init_log
REPO_ROOT = Path(__file__).resolve().parent.parent.parent
def lstrip(s: str, prefix: str) -> str:
if s.startswith(prefix):
s = s[len(prefix) :]
return s
def classname_to_file(classname: str) -> str:
classname = lstrip(classname, "cython.")
classname = lstrip(classname, "ctypes.")
return classname.replace(".", "/") + ".py"
def failed_test_ids() -> List[str]:
FAILURE_TYPES = (junitparser.Failure, junitparser.Error)
junit_dir = REPO_ROOT / "build" / "pytest-results"
failed_node_ids = []
for junit in junit_dir.glob("*.xml"):
xml = junitparser.JUnitXml.fromfile(str(junit))
for suite in xml:
# handle suites
for case in suite:
if case.result is None:
logging.warn(f"Incorrectly formatted JUnit found, result was None on {case}")
continue
if len(case.result) > 0 and isinstance(case.result[0], FAILURE_TYPES):
node_id = classname_to_file(case.classname) + "::" + case.name
failed_node_ids.append(node_id)
return list(set(failed_node_ids))
def repro_command(build_type: str, failed_node_ids: List[str]) -> Optional[str]:
"""
Parse available JUnit XML files and output a command that users can run to
reproduce CI failures locally
"""
test_args = [f"--tests {node_id}" for node_id in failed_node_ids]
test_args_str = " ".join(test_args)
return f"python3 tests/scripts/ci.py {build_type} {test_args_str}"
def make_issue_url(failed_node_ids: List[str]) -> str:
names = [f"`{node_id}`" for node_id in failed_node_ids]
run_url = os.getenv("RUN_DISPLAY_URL", "<insert run URL>")
test_bullets = [f" - `{node_id}`" for node_id in failed_node_ids]
params = {
"labels": "test: flaky",
"title": "[Flaky Test] " + ", ".join(names),
"body": textwrap.dedent(
f"""
These tests were found to be flaky (intermittently failing on `main` or failed in a PR with unrelated changes). See [the docs](https://github.com/apache/tvm/blob/main/docs/contribute/ci.rst#handling-flaky-failures) for details.
### Tests(s)\n
"""
)
+ "\n".join(test_bullets)
+ f"\n\n### Jenkins Links\n\n - {run_url}",
}
return "https://github.com/apache/tvm/issues/new?" + urllib.parse.urlencode(params)
def show_failure_help(failed_suites: List[str]) -> None:
failed_node_ids = failed_test_ids()
if len(failed_node_ids) == 0:
return
build_type = os.getenv("PLATFORM")
if build_type is None:
raise RuntimeError("build type was None, cannot show command")
repro = repro_command(build_type=build_type, failed_node_ids=failed_node_ids)
if repro is None:
print("No test failures detected")
return
print(f"Report flaky test shortcut: {make_issue_url(failed_node_ids)}")
print("=============================== PYTEST FAILURES ================================")
print(
"These pytest suites failed to execute. The results can be found in the "
"Jenkins 'Tests' tab or by scrolling up through the raw logs here. "
"If there is no test listed below, the failure likely came from a segmentation "
"fault which you can find in the logs above.\n"
)
if failed_suites is not None and len(failed_suites) > 0:
print("\n".join([f" - {suite}" for suite in failed_suites]))
print("")
print("You can reproduce these specific failures locally with this command:\n")
print(textwrap.indent(repro, prefix=" "))
print("")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Print information about a failed pytest run")
args, other = parser.parse_known_args()
init_log()
try:
show_failure_help(failed_suites=other)
except Exception as e:
# This script shouldn't ever introduce failures since it's just there to
# add extra information, so ignore any errors
logging.exception(e)
| https://github.com/zk-ml/tachikoma |
ci/scripts/should_rebuild_docker.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import datetime
import json
import logging
import subprocess
from typing import Dict, Any, List
from http_utils import get
from cmd_utils import Sh, init_log
DOCKER_API_BASE = "https://hub.docker.com/v2/"
PAGE_SIZE = 25
TEST_DATA = None
def docker_api(url: str) -> Dict[str, Any]:
"""
Run a paginated fetch from the public Docker Hub API
"""
if TEST_DATA is not None:
return TEST_DATA[url]
pagination = f"?page_size={PAGE_SIZE}&page=1"
url = DOCKER_API_BASE + url + pagination
r, headers = get(url)
reset = headers.get("x-ratelimit-reset")
if reset is not None:
reset = datetime.datetime.fromtimestamp(int(reset))
reset = reset.isoformat()
logging.info(
f"Docker API Rate Limit: {headers.get('x-ratelimit-remaining')} / {headers.get('x-ratelimit-limit')} (reset at {reset})"
)
if "results" not in r:
raise RuntimeError(f"Error fetching data, no results found in: {r}")
return r
def any_docker_changes_since(hash: str) -> bool:
"""
Check the docker/ directory, return True if there have been any code changes
since the specified hash
"""
sh = Sh()
cmd = f"git diff {hash} -- docker/"
proc = sh.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout = proc.stdout.strip()
return stdout != "", stdout
def does_commit_exist(hash: str) -> bool:
"""
Returns True if the hash exists in the repo
"""
sh = Sh()
cmd = f"git rev-parse -q {hash}"
proc = sh.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=False)
print(proc.stdout)
if proc.returncode == 0:
return True
if "unknown revision or path not in the working tree" in proc.stdout:
return False
raise RuntimeError(f"Unexpected failure when running: {cmd}")
def find_hash_for_tag(tag: Dict[str, Any]) -> str:
"""
Split the hash off of a name like <date>-<time>-<hash>
"""
name = tag["name"]
name_parts = name.split("-")
if len(name_parts) != 3:
raise RuntimeError(f"Image {name} is not using new naming scheme")
shorthash = name_parts[2]
return shorthash
def find_commit_in_repo(tags: List[Dict[str, Any]]):
"""
Look through all the docker tags, find the most recent one which references
a commit that is present in the repo
"""
for tag in tags["results"]:
shorthash = find_hash_for_tag(tag)
logging.info(f"Hash '{shorthash}' does not exist in repo")
if does_commit_exist(shorthash):
return shorthash, tag
raise RuntimeError(f"No extant hash found in tags:\n{tags}")
def main():
# Fetch all tlcpack images
images = docker_api("repositories/tlcpack")
# Ignore all non-ci images
relevant_images = [image for image in images["results"] if image["name"].startswith("ci-")]
image_names = [image["name"] for image in relevant_images]
logging.info(f"Found {len(relevant_images)} images to check: {', '.join(image_names)}")
for image in relevant_images:
# Check the tags for the image
tags = docker_api(f"repositories/tlcpack/{image['name']}/tags")
# Find the hash of the most recent tag
shorthash, tag = find_commit_in_repo(tags)
name = tag["name"]
logging.info(f"Looking for docker/ changes since {shorthash}")
any_docker_changes, diff = any_docker_changes_since(shorthash)
if any_docker_changes:
logging.info(f"Found docker changes from {shorthash} when checking {name}")
logging.info(diff)
exit(2)
logging.info("Did not find changes, no rebuild necessary")
exit(0)
if __name__ == "__main__":
init_log()
parser = argparse.ArgumentParser(
description="Exits 0 if Docker images don't need to be rebuilt, 1 otherwise"
)
parser.add_argument(
"--testing-docker-data",
help="(testing only) JSON data to mock response from Docker Hub API",
)
args = parser.parse_args()
if args.testing_docker_data is not None:
TEST_DATA = json.loads(args.testing_docker_data)
main()
| https://github.com/zk-ml/tachikoma |
ci/scripts/should_run_slow_tests.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import json
import argparse
import subprocess
import re
import textwrap
from urllib import request
from typing import Dict, Tuple, Any, List, Optional
from git_utils import GitHubRepo, parse_remote, git
SLOW_TEST_TRIGGERS = [
"@tvm-bot run slow tests",
"@tvm-bot run slow test",
"@tvm-bot run slow",
"@tvm-bot slow tests",
"@tvm-bot slow test",
"@tvm-bot slow",
]
def check_match(s: str, searches: List[str]) -> Tuple[bool, Optional[str]]:
for search in searches:
if search in s:
return True, search
return False, None
def display(long_str: str) -> str:
return textwrap.indent(long_str, " ")
if __name__ == "__main__":
help = "Exits with 1 if CI should run slow tests, 0 otherwise"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--pr", required=True)
parser.add_argument("--remote", default="origin", help="ssh remote to parse")
parser.add_argument(
"--pr-body", help="(testing) PR body to use instead of fetching from GitHub"
)
args = parser.parse_args()
branch = git(["rev-parse", "--abbrev-ref", "HEAD"])
# Don't skip slow tests on main or ci-docker-staging
skip_branches = {"main", "ci-docker-staging"}
if branch in skip_branches:
print(f"Branch {branch} is in {skip_branches}, running slow tests")
exit(1)
print(f"Branch {branch} is not in {skip_branches}, checking last commit...")
if args.pr_body:
body = args.pr_body
else:
remote = git(["config", "--get", f"remote.{args.remote}.url"])
user, repo = parse_remote(remote)
github = GitHubRepo(token=os.environ["GITHUB_TOKEN"], user=user, repo=repo)
pr = github.get(f"pulls/{args.pr}")
body = pr["body"]
body_match, reason = check_match(body, SLOW_TEST_TRIGGERS)
if body_match:
print(f"Matched {reason} in PR body:\n{display(body)}, running slow tests")
exit(1)
print(
f"PR Body:\n{display(body)}\ndid not have any of {SLOW_TEST_TRIGGERS}, skipping slow tests"
)
exit(0)
| https://github.com/zk-ml/tachikoma |
ci/scripts/update_branch.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import json
import argparse
import tempfile
from typing import Any, Dict
from git_utils import git, GitHubRepo, parse_remote
_commit_query_fields = """
messageHeadline
oid
statusCheckRollup {
contexts(last:100) {
nodes {
... on CheckRun {
conclusion
status
name
checkSuite {
workflowRun {
workflow {
name
}
}
}
}
... on StatusContext {
context
state
}
}
}
}
"""
def commits_query(user: str, repo: str, cursor: str = None):
"""
Create a GraphQL query to find the last N commits along with their statuses
and some metadata (paginated after 'cursor')
"""
after = ""
if cursor is not None:
after = f', after:"{cursor}"'
return f"""
{{
repository(name: "{repo}", owner: "{user}") {{
defaultBranchRef {{
target {{
... on Commit {{
history(first: 15{after}) {{
edges {{ cursor }}
nodes {{
{_commit_query_fields}
}}
}}
}}
}}
}}
}}
}}
"""
def commit_passed_ci(commit: Dict[str, Any]) -> bool:
"""
Returns true if all of a commit's statuses are SUCCESS
"""
statuses = commit["statusCheckRollup"]["contexts"]["nodes"]
# GitHub Actions statuses are different from external GitHub statuses, so
# unify them into 1 representation
# https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads
unified_statuses = []
for status in statuses:
if "context" in status:
# Parse non-GHA status
unified_statuses.append((status["context"], status["state"] == "SUCCESS"))
else:
# Parse GitHub Actions item
workflow = status["checkSuite"]["workflowRun"]["workflow"]["name"]
name = f"{workflow} / {status['name']}"
unified_statuses.append((name, status["conclusion"] == "SUCCESS"))
print(f"Statuses on {commit['oid']}:", json.dumps(unified_statuses, indent=2))
# Assert that specific jobs are present in the commit statuses (i.e. don't
# approve if CI was broken and didn't schedule a job)
expected_jobs = {"tvm-ci/branch"}
job_names = {name for name, status in unified_statuses}
for job in expected_jobs:
if job not in job_names:
# Did not find expected job name
return False
passed_ci = all(status for name, status in unified_statuses)
return passed_ci
def update_branch(user: str, repo: str, sha: str, branch_name: str) -> None:
git(["fetch", "origin", sha])
git(["reset", "--hard", "FETCH_HEAD"])
try:
git(["branch", "-D", branch_name])
except RuntimeError:
# Ignore failures (i.e. the branch did not exist in the first place)
pass
git(["checkout", "-b", branch_name])
# Create and push the branch
git(["push", "origin", "--force", branch_name])
print(f"Pushed branch {branch_name} with commit {sha}")
if __name__ == "__main__":
help = "Push the a branch to the last commit that passed all CI runs"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--remote", default="origin", help="ssh remote to parse")
parser.add_argument("--dry-run", action="store_true", help="don't submit to GitHub")
parser.add_argument("--branch", default="last-successful", help="branch name")
parser.add_argument(
"--testonly-json", help="(testing) data to use instead of fetching from GitHub"
)
args = parser.parse_args()
remote = git(["config", "--get", f"remote.{args.remote}.url"])
user, repo = parse_remote(remote)
# TODO: Remove this before landing
user, repo = ("apache", "tvm")
if args.testonly_json:
r = json.loads(args.testonly_json)
else:
github = GitHubRepo(token=os.environ["GITHUB_TOKEN"], user=user, repo=repo)
q = commits_query(user, repo)
r = github.graphql(q)
commits = r["data"]["repository"]["defaultBranchRef"]["target"]["history"]["nodes"]
# Limit GraphQL pagination
MAX_COMMITS_TO_CHECK = 50
i = 0
while i < MAX_COMMITS_TO_CHECK:
# Check each commit
for commit in commits:
if commit_passed_ci(commit):
print(f"Found last good commit: {commit['oid']}: {commit['messageHeadline']}")
if not args.dry_run:
update_branch(
user=user,
repo=repo,
sha=commit["oid"],
branch_name=args.branch,
)
# Nothing to do after updating the branch, exit early
exit(0)
# No good commit found, proceed to next page of results
edges = r["data"]["repository"]["defaultBranchRef"]["target"]["history"]["edges"]
if len(edges) == 0:
break
else:
q = commits_query(user, repo, cursor=edges[-1]["cursor"])
r = github.graphql(q)
commits = r["data"]["repository"]["defaultBranchRef"]["target"]["history"]["nodes"]
# Backstop to prevent looking through all the past commits
i += len(commits)
print(f"No good commits found in the last {len(commits)} commits")
exit(1)
| https://github.com/zk-ml/tachikoma |
conda/render_cuda_dockerfiles.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sys
import subprocess
from jinja2 import Template
CUDA_VERSIONS = ["10.0", "9.0"]
# Make sure that the cudnn version you set here is available
# for all the cuda versions that you want both from nvidia
# and from conda.
# These two must be in sync
CUDNN_FULL_VERSION = "7.6.0.64"
CUDNN_VERSION = "7.6.0"
condadir = os.path.dirname(sys.argv[0])
condadir = os.path.abspath(condadir)
srcdir = os.path.dirname(condadir)
with open(os.path.join(condadir, "Dockerfile.template")) as f:
docker_template = Template(f.read())
def render_dockerfile(version):
txt = docker_template.render(
cuda_version=version, cudnn_short_version=CUDNN_VERSION, cudnn_version=CUDNN_FULL_VERSION
)
fname = os.path.join(condadir, "../docker/Dockerfile.conda_cuda" + version.replace(".", ""))
with open(fname, "w") as f:
f.write(txt + "\n")
return fname
if __name__ == "__main__":
build_versions = CUDA_VERSIONS
if len(sys.argv) > 1:
build_versions = sys.argv[1:]
for version in build_versions:
render_dockerfile(version)
| https://github.com/zk-ml/tachikoma |
conftest.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import hashlib
import pytest
import sys
import os
from pathlib import Path
pytest_plugins = ["tvm.testing.plugin"]
IS_IN_CI = os.getenv("CI", "") == "true"
REPO_ROOT = Path(__file__).resolve().parent
# These are long running tests (manually curated and extracted from CI logs)
# that should be allocated to test shards in a round-robin fashion. These are
# taken from the 20 (arbitrary number) of tests as from
# https://ci.tlcpack.ai/job/tvm/job/main/2907/testReport
_slowest_tests = [
"tests/python/frontend/tensorflow/test_forward.py::test_forward_broadcast_args",
"tests/python/frontend/tensorflow/test_forward.py::test_forward_broadcast_to",
"tests/python/topi/python/test_topi_conv2d_int8.py::test_conv2d_nchw[int8]",
"tests/python/topi/python/test_topi_conv2d_int8.py::test_conv2d_nchw[uint8]",
"tests/python/topi/python/test_topi_upsampling.py::test_upsampling3d",
"tests/python/topi/python/test_topi_upsampling.py::test_upsampling3d",
"tests/python/topi/python/test_topi_conv2d_int8.py::test_conv2d_nchw[int8]",
"tests/python/frontend/tflite/test_forward.py::test_all_elemwise",
"tests/python/frontend/pytorch/test_object_detection.py::test_detection_models",
"tests/python/topi/python/test_topi_conv2d_int8.py::test_conv2d_nchw[uint8]",
"tests/python/topi/python/test_topi_conv2d_NCHWc.py::test_conv2d_NCHWc",
"tests/python/topi/python/test_topi_conv2d_hwnc_tensorcore.py::test_conv2d_hwnc_tensorcore",
"tests/python/contrib/test_tensorrt.py::test_binary[compile]",
"tests/python/frontend/pytorch/test_forward.py::test_segmentation_models",
"tests/python/topi/python/test_topi_conv2d_NCHWc.py::test_conv2d_NCHWc",
"tests/python/relay/test_py_converter.py::test_global_recursion",
"tests/python/frontend/tensorflow/test_forward.py::test_forward_ptb",
"tests/python/relay/test_op_level6.py::test_topk",
"tests/python/topi/python/test_topi_conv2d_winograd.py::test_conv2d_nchw",
"tests/python/relay/test_py_converter.py::test_global_recursion",
]
HARDCODED_ALLOCATIONS = {}
for idx, test in enumerate(_slowest_tests):
HARDCODED_ALLOCATIONS[test] = idx
# These rely on running on the same node to pass successfully
FIXED_ALLOCATION_PREFIXES = {
"tests/python/unittest/test_tvm_testing_features.py": 0,
}
def find_shard_index(nodeid: str, num_shards: int) -> int:
"""
Return the index of the shard that should run this test
"""
for prefix, target_shard_idx in FIXED_ALLOCATION_PREFIXES.items():
if nodeid.startswith(prefix):
if target_shard_idx >= num_shards:
raise RuntimeError(
f"Cannot collect sharded tests, {nodeid} has hardcoded shard index {target_shard_idx} among only {num_shards} shards"
)
return target_shard_idx
if nodeid in HARDCODED_ALLOCATIONS:
hash = HARDCODED_ALLOCATIONS[nodeid]
else:
hash = hashlib.md5(nodeid.encode())
hash = int(hash.hexdigest(), 16)
return hash % num_shards
def pytest_collection_modifyitems(config, items):
if not all(k in os.environ for k in ["CI", "TVM_NUM_SHARDS", "TVM_SHARD_INDEX"]):
# Only apportion tests if in CI and in a job that is set up for it
return
num_shards = int(os.environ["TVM_NUM_SHARDS"])
shard_index = int(os.environ["TVM_SHARD_INDEX"])
print(f"Marking tests for shard {shard_index} of {num_shards}")
items_copy = list(items)
for item in items_copy:
item_shard_index = find_shard_index(item.nodeid, num_shards=num_shards)
if item_shard_index != shard_index:
items.remove(item)
def pytest_sessionstart():
if IS_IN_CI:
hook_script_dir = REPO_ROOT / "tests" / "scripts" / "request_hook"
sys.path.append(str(hook_script_dir))
import request_hook # pylint: disable=import-outside-toplevel
request_hook.init()
| https://github.com/zk-ml/tachikoma |
docs/conf.py | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 23 19:40:08 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import gc
import inspect
import os
from pathlib import Path
import re
import sys
import sphinx_gallery
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
curr_path = Path(__file__).expanduser().absolute().parent
if curr_path.name == "_staging":
# Can't use curr_path.parent, because sphinx_gallery requires a relative path.
tvm_path = Path(os.pardir, os.pardir)
else:
tvm_path = Path(os.pardir)
sys.path.insert(0, str(tvm_path.resolve() / "python"))
sys.path.insert(0, str(tvm_path.resolve() / "vta" / "python"))
sys.path.insert(0, str(tvm_path.resolve() / "docs"))
# -- General configuration ------------------------------------------------
# General information about the project.
project = "tvm"
author = "Apache Software Foundation"
copyright = "2020 - 2022, %s" % author
github_doc_root = "https://github.com/apache/tvm/tree/main/docs/"
os.environ["TVM_BUILD_DOC"] = "1"
def git_describe_version(original_version):
"""Get git describe version."""
ver_py = tvm_path.joinpath("version.py")
libver = {"__file__": ver_py}
exec(compile(open(ver_py, "rb").read(), ver_py, "exec"), libver, libver)
_, gd_version = libver["git_describe_version"]()
if gd_version != original_version:
print("Use git describe based version %s" % gd_version)
return gd_version
# Version information.
import tvm
from tvm import topi
from tvm import te
from tvm import testing
version = git_describe_version(tvm.__version__)
release = version
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.mathjax",
"sphinx_gallery.gen_gallery",
"autodocsumm",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# generate autosummary even if no references
autosummary_generate = True
# The main toctree document.
main_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "_staging"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme is set by the make target
html_theme = os.environ.get("TVM_THEME", "rtd")
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
# only import rtd theme and set it if want to build docs locally
if not on_rtd and html_theme == "rtd":
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_theme_options = {
"analytics_id": "UA-75982049-2",
"logo_only": True,
}
html_logo = "_static/img/tvm-logo-small.png"
html_favicon = "_static/img/tvm-logo-square.png"
# Output file base name for HTML help builder.
htmlhelp_basename = project + "doc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(main_doc, "%s.tex" % project, project, author, "manual"),
]
intersphinx_mapping = {
"python": ("https://docs.python.org/{.major}".format(sys.version_info), None),
# "numpy": ("https://numpy.org/doc/stable", None),
# "scipy": ("https://docs.scipy.org/doc/scipy", None),
# "matplotlib": ("https://matplotlib.org/", None),
}
from sphinx_gallery.sorting import ExplicitOrder
examples_dirs = [
tvm_path.joinpath("gallery", "tutorial"),
tvm_path.joinpath("gallery", "how_to", "compile_models"),
tvm_path.joinpath("gallery", "how_to", "deploy_models"),
tvm_path.joinpath("gallery", "how_to", "work_with_relay"),
tvm_path.joinpath("gallery", "how_to", "work_with_schedules"),
tvm_path.joinpath("gallery", "how_to", "optimize_operators"),
tvm_path.joinpath("gallery", "how_to", "tune_with_autotvm"),
tvm_path.joinpath("gallery", "how_to", "tune_with_autoscheduler"),
tvm_path.joinpath("gallery", "how_to", "work_with_microtvm"),
tvm_path.joinpath("gallery", "how_to", "extend_tvm"),
tvm_path.joinpath("vta", "tutorials"),
]
gallery_dirs = [
"tutorial",
"how_to/compile_models",
"how_to/deploy_models",
"how_to/work_with_relay",
"how_to/work_with_schedules",
"how_to/optimize_operators",
"how_to/tune_with_autotvm",
"how_to/tune_with_autoscheduler",
"how_to/work_with_microtvm",
"how_to/extend_tvm",
"topic/vta/tutorials",
]
subsection_order = ExplicitOrder(
str(p)
for p in [
tvm_path / "vta" / "tutorials" / "frontend",
tvm_path / "vta" / "tutorials" / "optimize",
tvm_path / "vta" / "tutorials" / "autotvm",
]
)
# Explicitly define the order within a subsection.
# The listed files are sorted according to the list.
# The unlisted files are sorted by filenames.
# The unlisted files always appear after listed files.
within_subsection_order = {
"tutorial": [
"introduction.py",
"install.py",
"tvmc_command_line_driver.py",
"tvmc_python.py",
"autotvm_relay_x86.py",
"tensor_expr_get_started.py",
"autotvm_matmul_x86.py",
"auto_scheduler_matmul_x86.py",
"tensor_ir_blitz_course.py",
"topi.pi",
"cross_compilation_and_rpc.py",
"relay_quick_start.py",
"uma.py",
],
"compile_models": [
"from_pytorch.py",
"from_tensorflow.py",
"from_mxnet.py",
"from_onnx.py",
"from_keras.py",
"from_tflite.py",
"from_coreml.py",
"from_darknet.py",
"from_caffe2.py",
"from_paddle.py",
],
"work_with_schedules": [
"schedule_primitives.py",
"reduction.py",
"intrin_math.py",
"scan.py",
"extern_op.py",
"tensorize.py",
"tuple_inputs.py",
"tedd.py",
],
"optimize_operators": [
"opt_gemm.py",
"opt_conv_cuda.py",
"opt_conv_tensorcore.py",
],
"tune_with_autotvm": [
"tune_conv2d_cuda.py",
"tune_relay_cuda.py",
"tune_relay_x86.py",
"tune_relay_arm.py",
"tune_relay_mobile_gpu.py",
],
"tune_with_autoscheduler": [
"tune_matmul_x86.py",
"tune_conv2d_layer_cuda.py",
"tune_network_x86.py",
"tune_network_cuda.py",
],
"extend_tvm": [
"low_level_custom_pass.py",
"use_pass_infra.py",
"use_pass_instrument.py",
"bring_your_own_datatypes.py",
],
"micro": [
"micro_train.py",
"micro_autotune.py",
"micro_reference_vm.py",
"micro_tflite.py",
"micro_ethosu.py",
"micro_tvmc.py",
"micro_aot.py",
"micro_pytorch.py",
],
}
class WithinSubsectionOrder:
def __init__(self, src_dir):
self.src_dir = src_dir.split("/")[-1]
def __call__(self, filename):
# If the order is provided, use the provided order
if (
self.src_dir in within_subsection_order
and filename in within_subsection_order[self.src_dir]
):
index = within_subsection_order[self.src_dir].index(filename)
assert index < 1e10
return "\0%010d" % index
# Otherwise, sort by filename
return filename
# When running the tutorials on GPUs we are dependent on the Python garbage collector
# collecting TVM packed function closures for any device memory to also be released. This
# is not a good setup for machines with lots of CPU ram but constrained GPU ram, so force
# a gc after each example.
def force_gc(gallery_conf, fname):
gc.collect()
sphinx_gallery_conf = {
"backreferences_dir": "gen_modules/backreferences",
"doc_module": ("tvm", "numpy"),
"reference_url": {
"tvm": None,
# "matplotlib": "https://matplotlib.org/",
# "numpy": "https://numpy.org/doc/stable",
},
"examples_dirs": examples_dirs,
"within_subsection_order": WithinSubsectionOrder,
"gallery_dirs": gallery_dirs,
"subsection_order": subsection_order,
"filename_pattern": os.environ.get("TVM_TUTORIAL_EXEC_PATTERN", ".py"),
"download_all_examples": False,
"min_reported_time": 60,
"expected_failing_examples": [],
"reset_modules": ("matplotlib", "seaborn", force_gc),
"promote_jupyter_magic": True,
}
autodoc_default_options = {
"member-order": "bysource",
}
# Maps the original namespace to list of potential modules
# that we can import alias from.
tvm_alias_check_map = {
"tvm.te": ["tvm.tir"],
"tvm.tir": ["tvm.ir", "tvm.runtime"],
"tvm.relay": ["tvm.ir", "tvm.tir"],
}
## Setup header and other configs
import tlcpack_sphinx_addon
footer_copyright = "© 2022 Apache Software Foundation | All rights reserved"
footer_note = " ".join(
"""
Copyright © 2022 The Apache Software Foundation. Apache TVM, Apache, the Apache feather,
and the Apache TVM project logo are either trademarks or registered trademarks of
the Apache Software Foundation.""".split(
"\n"
)
).strip()
header_logo = "https://tvm.apache.org/assets/images/logo.svg"
header_logo_link = "https://tvm.apache.org/"
header_links = [
("Community", "https://tvm.apache.org/community"),
("Download", "https://tvm.apache.org/download"),
("VTA", "https://tvm.apache.org/vta"),
("Blog", "https://tvm.apache.org/blog"),
("Docs", "https://tvm.apache.org/docs"),
("Conference", "https://tvmconf.org"),
("Github", "https://github.com/apache/tvm/"),
]
header_dropdown = {
"name": "ASF",
"items": [
("Apache Homepage", "https://apache.org/"),
("License", "https://www.apache.org/licenses/"),
("Sponsorship", "https://www.apache.org/foundation/sponsorship.html"),
("Security", "https://www.apache.org/security/"),
("Thanks", "https://www.apache.org/foundation/thanks.html"),
("Events", "https://www.apache.org/events/current-event"),
],
}
def fixup_tutorials(original_url: str) -> str:
if "docs/tutorial" in original_url:
# tutorials true source is in Python or .txt files, but Sphinx only sees
# the generated .rst files so this maps them back to the source
if original_url.endswith("index.rst"):
# for index pages, go to the README files
return re.sub(
r"docs/tutorial/(.*)index\.rst", "gallery/tutorial/\\1README.txt", original_url
)
else:
# otherwise for tutorials, redirect to python files
return re.sub(r"docs/tutorial/(.*)\.rst", "gallery/tutorial/\\1.py", original_url)
else:
# do nothing for normal non-tutorial .rst files
return original_url
html_context = {
"footer_copyright": footer_copyright,
"footer_note": footer_note,
"header_links": header_links,
"header_dropdown": header_dropdown,
"header_logo": header_logo,
"header_logo_link": header_logo_link,
"version_prefixes": ["main", "v0.8.0/", "v0.9.0/", "v0.10.0/"],
"display_github": True,
"github_user": "apache",
"github_repo": "tvm",
"github_version": "main/docs/",
"theme_vcs_pageview_mode": "edit",
"edit_link_hook_fn": fixup_tutorials,
}
# add additional overrides
templates_path += [tlcpack_sphinx_addon.get_templates_path()]
html_static_path += [tlcpack_sphinx_addon.get_static_path()]
def update_alias_docstring(name, obj, lines):
"""Update the docstring of alias functions.
This function checks if the obj is an alias of another documented object
in a different module.
If it is an alias, then it will append the alias information to the docstring.
Parameters
----------
name : str
The full name of the object in the doc.
obj : object
The original object.
lines : list
The docstring lines, need to be modified inplace.
"""
arr = name.rsplit(".", 1)
if len(arr) != 2:
return
target_mod, target_name = arr
if target_mod not in tvm_alias_check_map:
return
if not hasattr(obj, "__module__"):
return
obj_mod = obj.__module__
for amod in tvm_alias_check_map[target_mod]:
if not obj_mod.startswith(amod):
continue
if hasattr(sys.modules[amod], target_name):
obj_type = ":py:func" if callable(obj) else ":py:class"
lines.append(".. rubric:: Alias of %s:`%s.%s`" % (obj_type, amod, target_name))
def process_docstring(app, what, name, obj, options, lines):
"""Sphinx callback to process docstring"""
if callable(obj) or inspect.isclass(obj):
update_alias_docstring(name, obj, lines)
from legacy_redirect import build_legacy_redirect
def setup(app):
app.connect("autodoc-process-docstring", process_docstring)
app.connect("build-finished", build_legacy_redirect(tvm_path))
| https://github.com/zk-ml/tachikoma |
docs/legacy_redirect.py | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from string import Template
import json
import os
legacy_redirects = [
["dev/benchmark.html", "../arch/benchmark.html"],
["dev/convert_layout.html", "../arch/convert_layout.html"],
["dev/debugger.html", "../arch/debugger.html"],
["dev/device_target_interactions.html", "../arch/device_target_interactions.html"],
["dev/frontend/tensorflow.html", "../../arch/frontend/tensorflow.html"],
["dev/hybrid_script.html", "../arch/hybrid_script.html"],
["dev/index.html", "../arch/index.html"],
["dev/inferbound.html", "../arch/inferbound.html"],
[
"dev/introduction_to_module_serialization.html",
"../arch/introduction_to_module_serialization.html",
],
["dev/microtvm_design.html", "../arch/microtvm_design.html"],
["dev/model_library_format.html", "../arch/model_library_format.html"],
["dev/pass_infra.html", "../arch/pass_infra.html"],
["dev/relay_intro.html", "../arch/relay_intro.html"],
["dev/relay_op_strategy.html", "../arch/relay_op_strategy.html"],
["dev/runtime.html", "../arch/runtime.html"],
["dev/runtimes/vulkan.html", "../../arch/runtimes/vulkan.html"],
["dev/security.html", "../arch/security.html"],
["dev/virtual_machine.html", "../arch/virtual_machine.html"],
["dev/how_to.html", "index.html"],
["dev/pytest_target_parametrization.html", "how_to/pytest_target_parametrization.html"],
["dev/relay_add_op.html", "how_to/relay_add_op.html"],
["dev/relay_add_pass.html", "how_to/relay_add_pass.html"],
["dev/relay_bring_your_own_codegen.html", "how_to/relay_bring_your_own_codegen.html"],
["dev/codebase_walkthrough.html", "tutorial/codebase_walkthrough.html"],
["deploy/android.html", "../how_to/deploy/android.html"],
["deploy/arm_compute_lib.html", "../how_to/deploy/arm_compute_lib.html"],
["deploy/bnns.html", "../how_to/deploy/bnns.html"],
["deploy/cpp_deploy.html", "../how_to/deploy/cpp_deploy.html"],
["deploy/hls.html", "../how_to/deploy/hls.html"],
["deploy/index.html", "../how_to/deploy/index.html"],
["deploy/integrate.html", "../how_to/deploy/integrate.html"],
["deploy/tensorrt.html", "../how_to/deploy/tensorrt.html"],
["deploy/vitis_ai.html", "../how_to/deploy/vitis_ai.html"],
["profiling/index.html", "../how_to/profile/index.html"],
["profiling/papi.html", "../how_to/profile/papi.html"],
["api/links.html", "../reference/api/links.html"],
["api/python/auto_scheduler.html", "../../reference/api/python/auto_scheduler.html"],
["api/python/autotvm.html", "../../reference/api/python/autotvm.html"],
["api/python/contrib.html", "../../reference/api/python/contrib.html"],
["api/python/driver.html", "../../reference/api/python/driver.html"],
["api/python/error.html", "../../reference/api/python/error.html"],
["api/python/graph_executor.html", "../../reference/api/python/graph_executor.html"],
["api/python/index.html", "../../reference/api/python/index.html"],
["api/python/ir.html", "../../reference/api/python/ir.html"],
["api/python/micro.html", "../../reference/api/python/micro.html"],
["api/python/ndarray.html", "../../reference/api/python/ndarray.html"],
["api/python/relay/analysis.html", "../../../reference/api/python/relay/analysis.html"],
["api/python/relay/backend.html", "../../../reference/api/python/relay/backend.html"],
[
"api/python/relay/dataflow_pattern.html",
"../../../reference/api/python/relay/dataflow_pattern.html",
],
["api/python/relay/frontend.html", "../../../reference/api/python/relay/frontend.html"],
["api/python/relay/image.html", "../../../reference/api/python/relay/image.html"],
["api/python/relay/index.html", "../../../reference/api/python/relay/index.html"],
["api/python/relay/nn.html", "../../../reference/api/python/relay/nn.html"],
["api/python/relay/testing.html", "../../../reference/api/python/relay/testing.html"],
["api/python/relay/transform.html", "../../../reference/api/python/relay/transform.html"],
["api/python/relay/vision.html", "../../../reference/api/python/relay/vision.html"],
["api/python/rpc.html", "../../reference/api/python/rpc.html"],
["api/python/runtime.html", "../../reference/api/python/runtime.html"],
["api/python/target.html", "../../reference/api/python/target.html"],
["api/python/te.html", "../../reference/api/python/te.html"],
["api/python/tir.html", "../../reference/api/python/tir.html"],
["api/python/topi.html", "../../reference/api/python/topi.html"],
["api/python/vta/index.html", "../../../reference/api/python/vta/index.html"],
["langref/hybrid_script.html", "../reference/langref/hybrid_script.html"],
["langref/index.html", "../reference/langref/index.html"],
["langref/relay_adt.html", "../reference/langref/relay_adt.html"],
["langref/relay_expr.html", "../reference/langref/relay_expr.html"],
["langref/relay_op.html", "../reference/langref/relay_op.html"],
["langref/relay_pattern.html", "../reference/langref/relay_pattern.html"],
["langref/relay_type.html", "../reference/langref/relay_type.html"],
["microtvm/index.html", "../topic/microtvm/index.html"],
["vta/dev/config.html", "../../topic/vta/dev/config.html"],
["vta/dev/hardware.html", "../../topic/vta/dev/hardware.html"],
["vta/dev/index.html", "../../topic/vta/dev/index.html"],
["vta/index.html", "../topic/vta/index.html"],
["vta/install.html", "../topic/vta/install.html"],
["tutorials/index.html", "../tutorial/index.html"],
["tutorials/frontend/from_caffe2.html", "../../how_to/compile_models/from_caffe2.html"],
["tutorials/frontend/from_coreml.html", "../../how_to/compile_models/from_coreml.html"],
["tutorials/frontend/from_darknet.html", "../../how_to/compile_models/from_darknet.html"],
["tutorials/frontend/from_keras.html", "../../how_to/compile_models/from_keras.html"],
["tutorials/frontend/from_mxnet.html", "../../how_to/compile_models/from_mxnet.html"],
["tutorials/frontend/from_onnx.html", "../../how_to/compile_models/from_onnx.html"],
["tutorials/frontend/from_paddle.html", "../../how_to/compile_models/from_paddle.html"],
["tutorials/frontend/from_pytorch.html", "../../how_to/compile_models/from_pytorch.html"],
["tutorials/frontend/from_tensorflow.html", "../../how_to/compile_models/from_tensorflow.html"],
["tutorials/frontend/from_tflite.html", "../../how_to/compile_models/from_tflite.html"],
[
"tutorials/frontend/deploy_model_on_android.html",
"../../how_to/deploy_models/deploy_model_on_android.html",
],
[
"tutorials/frontend/deploy_model_on_rasp.html",
"../../how_to/deploy_models/deploy_model_on_rasp.html",
],
[
"tutorials/frontend/deploy_object_detection_pytorch.html",
"../../how_to/deploy_models/deploy_object_detection_pytorch.html",
],
[
"tutorials/frontend/deploy_prequantized.html",
"../../how_to/deploy_models/deploy_prequantized.html",
],
[
"tutorials/frontend/deploy_prequantized_tflite.html",
"../../how_to/deploy_models/deploy_prequantized_tflite.html",
],
[
"tutorials/frontend/deploy_quantized.html",
"../../how_to/deploy_models/deploy_quantized.html",
],
["tutorials/frontend/deploy_sparse.html", "../../how_to/deploy_models/deploy_sparse.html"],
[
"tutorials/frontend/deploy_ssd_gluoncv.html",
"../../how_to/deploy_models/deploy_ssd_gluoncv.html",
],
[
"tutorials/dev/bring_your_own_datatypes.html",
"../../how_to/extend_tvm/bring_your_own_datatypes.html",
],
[
"tutorials/dev/low_level_custom_pass.html",
"../../how_to/extend_tvm/low_level_custom_pass.html",
],
["tutorials/dev/use_pass_infra.html", "../../how_to/extend_tvm/use_pass_infra.html"],
["tutorials/dev/use_pass_instrument.html", "../../how_to/extend_tvm/use_pass_instrument.html"],
["tutorials/optimize/opt_conv_cuda.html", "../../how_to/optimize_operators/opt_conv_cuda.html"],
[
"tutorials/optimize/opt_conv_tensorcore.html",
"../../how_to/optimize_operators/opt_conv_tensorcore.html",
],
["tutorials/optimize/opt_gemm.html", "../../how_to/optimize_operators/opt_gemm.html"],
[
"tutorials/auto_scheduler/tune_conv2d_layer_cuda.html",
"../../how_to/tune_with_autoscheduler/tune_conv2d_layer_cuda.html",
],
[
"tutorials/auto_scheduler/tune_network_arm.html",
"../../how_to/tune_with_autoscheduler/tune_network_arm.html",
],
[
"tutorials/auto_scheduler/tune_network_cuda.html",
"../../how_to/tune_with_autoscheduler/tune_network_cuda.html",
],
[
"tutorials/auto_scheduler/tune_network_mali.html",
"../../how_to/tune_with_autoscheduler/tune_network_mali.html",
],
[
"tutorials/auto_scheduler/tune_network_x86.html",
"../../how_to/tune_with_autoscheduler/tune_network_x86.html",
],
[
"tutorials/auto_scheduler/tune_sparse_x86.html",
"../../how_to/tune_with_autoscheduler/tune_sparse_x86.html",
],
[
"tutorials/autotvm/tune_conv2d_cuda.html",
"../../how_to/tune_with_autotvm/tune_conv2d_cuda.html",
],
["tutorials/autotvm/tune_relay_arm.html", "../../how_to/tune_with_autotvm/tune_relay_arm.html"],
[
"tutorials/autotvm/tune_relay_cuda.html",
"../../how_to/tune_with_autotvm/tune_relay_cuda.html",
],
[
"tutorials/autotvm/tune_relay_mobile_gpu.html",
"../../how_to/tune_with_autotvm/tune_relay_mobile_gpu.html",
],
["tutorials/autotvm/tune_relay_x86.html", "../../how_to/tune_with_autotvm/tune_relay_x86.html"],
["tutorials/micro/micro_autotune.html", "../../how_to/work_with_microtvm/micro_autotune.html"],
[
"tutorials/micro/micro_reference_vm.html",
"../../how_to/work_with_microtvm/micro_reference_vm.html",
],
["tutorials/micro/micro_tflite.html", "../../how_to/work_with_microtvm/micro_tflite.html"],
["tutorials/frontend/build_gcn.html", "../../how_to/work_with_relay/build_gcn.html"],
[
"tutorials/frontend/using_external_lib.html",
"../../how_to/work_with_relay/using_external_lib.html",
],
["tutorials/language/extern_op.html", "../../how_to/work_with_schedules/extern_op.html"],
["tutorials/language/intrin_math.html", "../../how_to/work_with_schedules/intrin_math.html"],
["tutorials/language/reduction.html", "../../how_to/work_with_schedules/reduction.html"],
["tutorials/language/scan.html", "../../how_to/work_with_schedules/scan.html"],
[
"tutorials/language/schedule_primitives.html",
"../../how_to/work_with_schedules/schedule_primitives.html",
],
["tutorials/language/tedd.html", "../../how_to/work_with_schedules/tedd.html"],
["tutorials/language/tensorize.html", "../../how_to/work_with_schedules/tensorize.html"],
["tutorials/language/tuple_inputs.html", "../../how_to/work_with_schedules/tuple_inputs.html"],
[
"tutorials/get_started/auto_scheduler_matmul_x86.html",
"../../tutorial/auto_scheduler_matmul_x86.html",
],
["tutorials/get_started/autotvm_matmul_x86.html", "../../tutorial/autotvm_matmul_x86.html"],
["tutorials/get_started/autotvm_relay_x86.html", "../../tutorial/autotvm_relay_x86.html"],
[
"tutorials/get_started/cross_compilation_and_rpc.html",
"../../tutorial/cross_compilation_and_rpc.html",
],
["tutorials/get_started/install.html", "../../tutorial/install.html"],
["tutorials/topi/intro_topi.html", "../../tutorial/intro_topi.html"],
["tutorials/get_started/introduction.html", "../../tutorial/introduction.html"],
["tutorials/get_started/relay_quick_start.html", "../../tutorial/relay_quick_start.html"],
[
"tutorials/get_started/tensor_expr_get_started.html",
"../../tutorial/tensor_expr_get_started.html",
],
[
"tutorials/get_started/tvmc_command_line_driver.html",
"../../tutorial/tvmc_command_line_driver.html",
],
[
"tutorials/get_started/tvmc_python.html",
"../../tutorial/tvmc_python.html",
],
]
redirect_template = """
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="refresh" content="1; url=$to" />
<script>
window.location.href = "$to"
</script>
</head>
</html>
"""
def build_legacy_redirect(tvm_path):
def legacy_redirect(app, docname): # Sphinx expects two arguments
if app.builder.name == "html":
src = Template(redirect_template)
for frm, to in legacy_redirects:
frm = tvm_path.resolve() / "docs" / "_build" / "html" / frm
redirect = src.substitute({"to": to})
os.makedirs(os.path.dirname(frm), exist_ok=True)
with open(frm, "w") as f:
f.write(redirect)
return legacy_redirect
| https://github.com/zk-ml/tachikoma |
docs/script_convert.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import pathlib
BASH = "# bash"
BASH_IGNORE = "# bash-ignore"
BASH_MULTILINE_COMMENT_START = ": '"
BASH_MULTILINE_COMMENT_END = "'"
def bash_to_python(src_path: pathlib.Path, dest_path: pathlib.Path):
"""Convert a bash script file to a Python format compatible with Sphinx doc."""
with open(src_path, "r") as src_f:
with open(dest_path, "w") as dest_f:
line = src_f.readline()
bash_block = []
bash_detected = False
bash_ignore_detected = False
new_line_required = False
while line:
line = line.strip("\n").strip("\r")
if bash_detected:
if line == BASH:
# write the bash block to destination
if new_line_required:
dest_f.write("\n")
python_code = "# .. code-block:: bash\n#\n"
for bash_line in bash_block:
python_code += f"#\t {bash_line}\n"
python_code += "#"
dest_f.write(python_code)
bash_detected = False
bash_block = []
new_line_required = True
else:
# add new bash command line
bash_block.append(line)
elif bash_ignore_detected:
if line == BASH_IGNORE:
bash_ignore_detected = False
new_line_required = True
else:
new_line_required = False
pass
else:
if line == BASH:
bash_detected = True
elif line == BASH_IGNORE:
bash_ignore_detected = True
elif line in [BASH_MULTILINE_COMMENT_START, BASH_MULTILINE_COMMENT_END]:
if new_line_required:
dest_f.write("\n")
dest_f.write('"""')
new_line_required = True
else:
if new_line_required:
dest_f.write("\n")
dest_f.write(f"{line}")
new_line_required = True
line = src_f.readline()
if new_line_required:
dest_f.write("\n")
def main():
parser = argparse.ArgumentParser(description="Convert tutorial script to Python.")
parser.add_argument("script", type=str, help="Path to script file.")
args = parser.parse_args()
src_path = pathlib.Path(args.script)
dest_path = src_path.parent / f"{src_path.stem}.py"
bash_to_python(src_path, dest_path)
if __name__ == "__main__":
main()
| https://github.com/zk-ml/tachikoma |
gallery/how_to/compile_models/from_coreml.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Compile CoreML Models
=====================
**Author**: `Joshua Z. Zhang <https://zhreshold.github.io/>`_, \
`Kazutaka Morita <https://github.com/kazum>`_, \
`Zhao Wu <https://github.com/FrozenGene>`_
This article is an introductory tutorial to deploy CoreML models with Relay.
For us to begin with, coremltools module is required to be installed.
A quick solution is to install via pip
.. code-block:: bash
pip install -U coremltools --user
or please refer to official site
https://github.com/apple/coremltools
"""
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
import tvm
from tvm import te
import tvm.relay as relay
from tvm.contrib.download import download_testdata
import coremltools as cm
import numpy as np
from PIL import Image
######################################################################
# Load pretrained CoreML model
# ----------------------------
# We will download and load a pretrained mobilenet classification network
# provided by apple in this example
model_url = "https://docs-assets.developer.apple.com/coreml/models/MobileNet.mlmodel"
model_file = "mobilenet.mlmodel"
model_path = download_testdata(model_url, model_file, module="coreml")
# Now you have mobilenet.mlmodel on disk
mlmodel = cm.models.MLModel(model_path)
######################################################################
# Load a test image
# ------------------
# A single cat dominates the examples!
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_path = download_testdata(img_url, "cat.png", module="data")
img = Image.open(img_path).resize((224, 224))
# Mobilenet.mlmodel's input is BGR format
img_bgr = np.array(img)[:, :, ::-1]
x = np.transpose(img_bgr, (2, 0, 1))[np.newaxis, :]
######################################################################
# Compile the model on Relay
# ---------------------------
# We should be familiar with the process right now.
target = "llvm"
shape_dict = {"image": x.shape}
# Parse CoreML model and convert into Relay computation graph
mod, params = relay.frontend.from_coreml(mlmodel, shape_dict)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target, params=params)
######################################################################
# Execute on TVM
# -------------------
# The process is no different from other example
from tvm.contrib import graph_executor
dev = tvm.cpu(0)
dtype = "float32"
m = graph_executor.GraphModule(lib["default"](dev))
# set inputs
m.set_input("image", tvm.nd.array(x.astype(dtype)))
# execute
m.run()
# get outputs
tvm_output = m.get_output(0)
top1 = np.argmax(tvm_output.numpy()[0])
#####################################################################
# Look up synset name
# -------------------
# Look up prediction top 1 index in 1000 class synset.
synset_url = "".join(
[
"https://gist.githubusercontent.com/zhreshold/",
"4d0b62f3d01426887599d4f7ede23ee5/raw/",
"596b27d23537e5a1b5751d2b0481ef172f58b539/",
"imagenet1000_clsid_to_human.txt",
]
)
synset_name = "imagenet1000_clsid_to_human.txt"
synset_path = download_testdata(synset_url, synset_name, module="data")
with open(synset_path) as f:
synset = eval(f.read())
# You should see the following result: Top-1 id 282 class name tiger cat
print("Top-1 id", top1, "class name", synset[top1])
| https://github.com/zk-ml/tachikoma |
gallery/how_to/compile_models/from_darknet.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Compile YOLO-V2 and YOLO-V3 in DarkNet Models
=============================================
**Author**: `Siju Samuel <https://siju-samuel.github.io/>`_
This article is an introductory tutorial to deploy darknet models with TVM.
All the required models and libraries will be downloaded from the internet by the script.
This script runs the YOLO-V2 and YOLO-V3 Model with the bounding boxes
Darknet parsing have dependancy with CFFI and CV2 library
Please install CFFI and CV2 before executing this script
.. code-block:: bash
pip install cffi
pip install opencv-python
"""
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
# numpy and matplotlib
import numpy as np
import matplotlib.pyplot as plt
import sys
# tvm, relay
import tvm
from tvm import te
from tvm import relay
from ctypes import *
from tvm.contrib.download import download_testdata
from tvm.relay.testing.darknet import __darknetffi__
import tvm.relay.testing.yolo_detection
import tvm.relay.testing.darknet
######################################################################
# Choose the model
# -----------------------
# Models are: 'yolov2', 'yolov3' or 'yolov3-tiny'
# Model name
MODEL_NAME = "yolov3"
######################################################################
# Download required files
# -----------------------
# Download cfg and weights file if first time.
CFG_NAME = MODEL_NAME + ".cfg"
WEIGHTS_NAME = MODEL_NAME + ".weights"
REPO_URL = "https://github.com/dmlc/web-data/blob/main/darknet/"
CFG_URL = REPO_URL + "cfg/" + CFG_NAME + "?raw=true"
WEIGHTS_URL = "https://pjreddie.com/media/files/" + WEIGHTS_NAME
cfg_path = download_testdata(CFG_URL, CFG_NAME, module="darknet")
weights_path = download_testdata(WEIGHTS_URL, WEIGHTS_NAME, module="darknet")
# Download and Load darknet library
if sys.platform in ["linux", "linux2"]:
DARKNET_LIB = "libdarknet2.0.so"
DARKNET_URL = REPO_URL + "lib/" + DARKNET_LIB + "?raw=true"
elif sys.platform == "darwin":
DARKNET_LIB = "libdarknet_mac2.0.so"
DARKNET_URL = REPO_URL + "lib_osx/" + DARKNET_LIB + "?raw=true"
else:
err = "Darknet lib is not supported on {} platform".format(sys.platform)
raise NotImplementedError(err)
lib_path = download_testdata(DARKNET_URL, DARKNET_LIB, module="darknet")
DARKNET_LIB = __darknetffi__.dlopen(lib_path)
net = DARKNET_LIB.load_network(cfg_path.encode("utf-8"), weights_path.encode("utf-8"), 0)
dtype = "float32"
batch_size = 1
data = np.empty([batch_size, net.c, net.h, net.w], dtype)
shape_dict = {"data": data.shape}
print("Converting darknet to relay functions...")
mod, params = relay.frontend.from_darknet(net, dtype=dtype, shape=data.shape)
######################################################################
# Import the graph to Relay
# -------------------------
# compile the model
target = tvm.target.Target("llvm", host="llvm")
dev = tvm.cpu(0)
data = np.empty([batch_size, net.c, net.h, net.w], dtype)
shape = {"data": data.shape}
print("Compiling the model...")
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params=params)
[neth, netw] = shape["data"][2:] # Current image shape is 608x608
######################################################################
# Load a test image
# -----------------
test_image = "dog.jpg"
print("Loading the test image...")
img_url = REPO_URL + "data/" + test_image + "?raw=true"
img_path = download_testdata(img_url, test_image, "data")
data = tvm.relay.testing.darknet.load_image(img_path, netw, neth)
######################################################################
# Execute on TVM Runtime
# ----------------------
# The process is no different from other examples.
from tvm.contrib import graph_executor
m = graph_executor.GraphModule(lib["default"](dev))
# set inputs
m.set_input("data", tvm.nd.array(data.astype(dtype)))
# execute
print("Running the test image...")
# detection
# thresholds
thresh = 0.5
nms_thresh = 0.45
m.run()
# get outputs
tvm_out = []
if MODEL_NAME == "yolov2":
layer_out = {}
layer_out["type"] = "Region"
# Get the region layer attributes (n, out_c, out_h, out_w, classes, coords, background)
layer_attr = m.get_output(2).numpy()
layer_out["biases"] = m.get_output(1).numpy()
out_shape = (layer_attr[0], layer_attr[1] // layer_attr[0], layer_attr[2], layer_attr[3])
layer_out["output"] = m.get_output(0).numpy().reshape(out_shape)
layer_out["classes"] = layer_attr[4]
layer_out["coords"] = layer_attr[5]
layer_out["background"] = layer_attr[6]
tvm_out.append(layer_out)
elif MODEL_NAME == "yolov3":
for i in range(3):
layer_out = {}
layer_out["type"] = "Yolo"
# Get the yolo layer attributes (n, out_c, out_h, out_w, classes, total)
layer_attr = m.get_output(i * 4 + 3).numpy()
layer_out["biases"] = m.get_output(i * 4 + 2).numpy()
layer_out["mask"] = m.get_output(i * 4 + 1).numpy()
out_shape = (layer_attr[0], layer_attr[1] // layer_attr[0], layer_attr[2], layer_attr[3])
layer_out["output"] = m.get_output(i * 4).numpy().reshape(out_shape)
layer_out["classes"] = layer_attr[4]
tvm_out.append(layer_out)
elif MODEL_NAME == "yolov3-tiny":
for i in range(2):
layer_out = {}
layer_out["type"] = "Yolo"
# Get the yolo layer attributes (n, out_c, out_h, out_w, classes, total)
layer_attr = m.get_output(i * 4 + 3).numpy()
layer_out["biases"] = m.get_output(i * 4 + 2).numpy()
layer_out["mask"] = m.get_output(i * 4 + 1).numpy()
out_shape = (layer_attr[0], layer_attr[1] // layer_attr[0], layer_attr[2], layer_attr[3])
layer_out["output"] = m.get_output(i * 4).numpy().reshape(out_shape)
layer_out["classes"] = layer_attr[4]
tvm_out.append(layer_out)
thresh = 0.560
# do the detection and bring up the bounding boxes
img = tvm.relay.testing.darknet.load_image_color(img_path)
_, im_h, im_w = img.shape
dets = tvm.relay.testing.yolo_detection.fill_network_boxes(
(netw, neth), (im_w, im_h), thresh, 1, tvm_out
)
last_layer = net.layers[net.n - 1]
tvm.relay.testing.yolo_detection.do_nms_sort(dets, last_layer.classes, nms_thresh)
coco_name = "coco.names"
coco_url = REPO_URL + "data/" + coco_name + "?raw=true"
font_name = "arial.ttf"
font_url = REPO_URL + "data/" + font_name + "?raw=true"
coco_path = download_testdata(coco_url, coco_name, module="data")
font_path = download_testdata(font_url, font_name, module="data")
with open(coco_path) as f:
content = f.readlines()
names = [x.strip() for x in content]
tvm.relay.testing.yolo_detection.show_detections(img, dets, thresh, names, last_layer.classes)
tvm.relay.testing.yolo_detection.draw_detections(
font_path, img, dets, thresh, names, last_layer.classes
)
plt.imshow(img.transpose(1, 2, 0))
plt.show()
| https://github.com/zk-ml/tachikoma |
gallery/how_to/compile_models/from_keras.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Compile Keras Models
=====================
**Author**: `Yuwei Hu <https://Huyuwei.github.io/>`_
This article is an introductory tutorial to deploy keras models with Relay.
For us to begin with, keras should be installed.
Tensorflow is also required since it's used as the default backend of keras.
A quick solution is to install via pip
.. code-block:: bash
pip install -U keras --user
pip install -U tensorflow --user
or please refer to official site
https://keras.io/#installation
"""
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
import tvm
from tvm import te
import tvm.relay as relay
from tvm.contrib.download import download_testdata
import keras
import tensorflow as tf
import numpy as np
######################################################################
# Load pretrained keras model
# ----------------------------
# We load a pretrained resnet-50 classification model provided by keras.
if tuple(keras.__version__.split(".")) < ("2", "4", "0"):
weights_url = "".join(
[
"https://github.com/fchollet/deep-learning-models/releases/",
"download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels.h5",
]
)
weights_file = "resnet50_keras_old.h5"
else:
weights_url = "".join(
[
" https://storage.googleapis.com/tensorflow/keras-applications/",
"resnet/resnet50_weights_tf_dim_ordering_tf_kernels.h5",
]
)
weights_file = "resnet50_keras_new.h5"
weights_path = download_testdata(weights_url, weights_file, module="keras")
keras_resnet50 = tf.keras.applications.resnet50.ResNet50(
include_top=True, weights=None, input_shape=(224, 224, 3), classes=1000
)
keras_resnet50.load_weights(weights_path)
######################################################################
# Load a test image
# ------------------
# A single cat dominates the examples!
from PIL import Image
from matplotlib import pyplot as plt
from tensorflow.keras.applications.resnet50 import preprocess_input
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_path = download_testdata(img_url, "cat.png", module="data")
img = Image.open(img_path).resize((224, 224))
plt.imshow(img)
plt.show()
# input preprocess
data = np.array(img)[np.newaxis, :].astype("float32")
data = preprocess_input(data).transpose([0, 3, 1, 2])
print("input_1", data.shape)
######################################################################
# Compile the model with Relay
# ----------------------------
# convert the keras model(NHWC layout) to Relay format(NCHW layout).
shape_dict = {"input_1": data.shape}
mod, params = relay.frontend.from_keras(keras_resnet50, shape_dict)
# compile the model
target = "cuda"
dev = tvm.cuda(0)
# TODO(mbs): opt_level=3 causes nn.contrib_conv2d_winograd_weight_transform
# to end up in the module which fails memory validation on cuda most likely
# due to a latent bug. Note that the pass context only has an effect within
# evaluate() and is not captured by create_executor().
with tvm.transform.PassContext(opt_level=0):
model = relay.build_module.create_executor("graph", mod, dev, target, params).evaluate()
######################################################################
# Execute on TVM
# ---------------
dtype = "float32"
tvm_out = model(tvm.nd.array(data.astype(dtype)))
top1_tvm = np.argmax(tvm_out.numpy()[0])
#####################################################################
# Look up synset name
# -------------------
# Look up prediction top 1 index in 1000 class synset.
synset_url = "".join(
[
"https://gist.githubusercontent.com/zhreshold/",
"4d0b62f3d01426887599d4f7ede23ee5/raw/",
"596b27d23537e5a1b5751d2b0481ef172f58b539/",
"imagenet1000_clsid_to_human.txt",
]
)
synset_name = "imagenet1000_clsid_to_human.txt"
synset_path = download_testdata(synset_url, synset_name, module="data")
with open(synset_path) as f:
synset = eval(f.read())
print("Relay top-1 id: {}, class name: {}".format(top1_tvm, synset[top1_tvm]))
# confirm correctness with keras output
keras_out = keras_resnet50.predict(data.transpose([0, 2, 3, 1]))
top1_keras = np.argmax(keras_out)
print("Keras top-1 id: {}, class name: {}".format(top1_keras, synset[top1_keras]))
| https://github.com/zk-ml/tachikoma |
gallery/how_to/compile_models/from_mxnet.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
.. _tutorial-from-mxnet:
Compile MXNet Models
====================
**Author**: `Joshua Z. Zhang <https://zhreshold.github.io/>`_, \
`Kazutaka Morita <https://github.com/kazum>`_
This article is an introductory tutorial to deploy mxnet models with Relay.
For us to begin with, mxnet module is required to be installed.
A quick solution is
.. code-block:: bash
pip install mxnet --user
or please refer to official installation guide.
https://mxnet.apache.org/versions/master/install/index.html
"""
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
# some standard imports
import mxnet as mx
import tvm
import tvm.relay as relay
import numpy as np
######################################################################
# Download Resnet18 model from Gluon Model Zoo
# ---------------------------------------------
# In this section, we download a pretrained imagenet model and classify an image.
from tvm.contrib.download import download_testdata
from mxnet.gluon.model_zoo.vision import get_model
from PIL import Image
from matplotlib import pyplot as plt
block = get_model("resnet18_v1", pretrained=True)
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_name = "cat.png"
synset_url = "".join(
[
"https://gist.githubusercontent.com/zhreshold/",
"4d0b62f3d01426887599d4f7ede23ee5/raw/",
"596b27d23537e5a1b5751d2b0481ef172f58b539/",
"imagenet1000_clsid_to_human.txt",
]
)
synset_name = "imagenet1000_clsid_to_human.txt"
img_path = download_testdata(img_url, "cat.png", module="data")
synset_path = download_testdata(synset_url, synset_name, module="data")
with open(synset_path) as f:
synset = eval(f.read())
image = Image.open(img_path).resize((224, 224))
plt.imshow(image)
plt.show()
def transform_image(image):
image = np.array(image) - np.array([123.0, 117.0, 104.0])
image /= np.array([58.395, 57.12, 57.375])
image = image.transpose((2, 0, 1))
image = image[np.newaxis, :]
return image
x = transform_image(image)
print("x", x.shape)
######################################################################
# Compile the Graph
# -----------------
# Now we would like to port the Gluon model to a portable computational graph.
# It's as easy as several lines.
# We support MXNet static graph(symbol) and HybridBlock in mxnet.gluon
shape_dict = {"data": x.shape}
mod, params = relay.frontend.from_mxnet(block, shape_dict)
## we want a probability so add a softmax operator
func = mod["main"]
func = relay.Function(func.params, relay.nn.softmax(func.body), None, func.type_params, func.attrs)
######################################################################
# now compile the graph
target = "cuda"
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(func, target, params=params)
######################################################################
# Execute the portable graph on TVM
# ---------------------------------
# Now, we would like to reproduce the same forward computation using TVM.
from tvm.contrib import graph_executor
dev = tvm.cuda(0)
dtype = "float32"
m = graph_executor.GraphModule(lib["default"](dev))
# set inputs
m.set_input("data", tvm.nd.array(x.astype(dtype)))
# execute
m.run()
# get outputs
tvm_output = m.get_output(0)
top1 = np.argmax(tvm_output.numpy()[0])
print("TVM prediction top-1:", top1, synset[top1])
######################################################################
# Use MXNet symbol with pretrained weights
# ----------------------------------------
# MXNet often use `arg_params` and `aux_params` to store network parameters
# separately, here we show how to use these weights with existing API
def block2symbol(block):
data = mx.sym.Variable("data")
sym = block(data)
args = {}
auxs = {}
for k, v in block.collect_params().items():
args[k] = mx.nd.array(v.data().asnumpy())
return sym, args, auxs
mx_sym, args, auxs = block2symbol(block)
# usually we would save/load it as checkpoint
mx.model.save_checkpoint("resnet18_v1", 0, mx_sym, args, auxs)
# there are 'resnet18_v1-0000.params' and 'resnet18_v1-symbol.json' on disk
######################################################################
# for a normal mxnet model, we start from here
mx_sym, args, auxs = mx.model.load_checkpoint("resnet18_v1", 0)
# now we use the same API to get Relay computation graph
mod, relay_params = relay.frontend.from_mxnet(mx_sym, shape_dict, arg_params=args, aux_params=auxs)
# repeat the same steps to run this model using TVM
| https://github.com/zk-ml/tachikoma |
gallery/how_to/compile_models/from_oneflow.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Compile OneFlow Models
======================
**Author**: `Xiaoyu Zhang <https://github.com/BBuf/>`_
This article is an introductory tutorial to deploy OneFlow models with Relay.
For us to begin with, OneFlow package should be installed.
A quick solution is to install via pip
.. code-block:: bash
pip install flowvision==0.1.0
python3 -m pip install -f https://release.oneflow.info oneflow==0.7.0+cpu
or please refer to official site:
https://github.com/Oneflow-Inc/oneflow
Currently, TVM supports OneFlow 0.7.0. Other versions may be unstable.
"""
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
import os, math
from matplotlib import pyplot as plt
import numpy as np
from PIL import Image
# oneflow imports
import flowvision
import oneflow as flow
import oneflow.nn as nn
import tvm
from tvm import relay
from tvm.contrib.download import download_testdata
######################################################################
# Load a pretrained OneFlow model and save model
# ----------------------------------------------
model_name = "resnet18"
model = getattr(flowvision.models, model_name)(pretrained=True)
model = model.eval()
model_dir = "resnet18_model"
if not os.path.exists(model_dir):
flow.save(model.state_dict(), model_dir)
######################################################################
# Load a test image
# -----------------
# Classic cat example!
from PIL import Image
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_path = download_testdata(img_url, "cat.png", module="data")
img = Image.open(img_path).resize((224, 224))
# Preprocess the image and convert to tensor
from flowvision import transforms
my_preprocess = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
img = my_preprocess(img)
img = np.expand_dims(img.numpy(), 0)
######################################################################
# Import the graph to Relay
# -------------------------
# Convert OneFlow graph to Relay graph. The input name can be arbitrary.
class Graph(flow.nn.Graph):
def __init__(self, module):
super().__init__()
self.m = module
def build(self, x):
out = self.m(x)
return out
graph = Graph(model)
_ = graph._compile(flow.randn(1, 3, 224, 224))
mod, params = relay.frontend.from_oneflow(graph, model_dir)
######################################################################
# Relay Build
# -----------
# Compile the graph to llvm target with given input specification.
target = tvm.target.Target("llvm", host="llvm")
dev = tvm.cpu(0)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params=params)
######################################################################
# Execute the portable graph on TVM
# ---------------------------------
# Now we can try deploying the compiled model on target.
target = "cuda"
with tvm.transform.PassContext(opt_level=10):
intrp = relay.build_module.create_executor("graph", mod, tvm.cuda(0), target)
print(type(img))
print(img.shape)
tvm_output = intrp.evaluate()(tvm.nd.array(img.astype("float32")), **params)
#####################################################################
# Look up synset name
# -------------------
# Look up prediction top 1 index in 1000 class synset.
synset_url = "".join(
[
"https://raw.githubusercontent.com/Cadene/",
"pretrained-models.pytorch/master/data/",
"imagenet_synsets.txt",
]
)
synset_name = "imagenet_synsets.txt"
synset_path = download_testdata(synset_url, synset_name, module="data")
with open(synset_path) as f:
synsets = f.readlines()
synsets = [x.strip() for x in synsets]
splits = [line.split(" ") for line in synsets]
key_to_classname = {spl[0]: " ".join(spl[1:]) for spl in splits}
class_url = "".join(
[
"https://raw.githubusercontent.com/Cadene/",
"pretrained-models.pytorch/master/data/",
"imagenet_classes.txt",
]
)
class_name = "imagenet_classes.txt"
class_path = download_testdata(class_url, class_name, module="data")
with open(class_path) as f:
class_id_to_key = f.readlines()
class_id_to_key = [x.strip() for x in class_id_to_key]
# Get top-1 result for TVM
top1_tvm = np.argmax(tvm_output.numpy()[0])
tvm_class_key = class_id_to_key[top1_tvm]
# Convert input to OneFlow variable and get OneFlow result for comparison
with flow.no_grad():
torch_img = flow.from_numpy(img)
output = model(torch_img)
# Get top-1 result for OneFlow
top_oneflow = np.argmax(output.numpy())
oneflow_class_key = class_id_to_key[top_oneflow]
print("Relay top-1 id: {}, class name: {}".format(top1_tvm, key_to_classname[tvm_class_key]))
print(
"OneFlow top-1 id: {}, class name: {}".format(top_oneflow, key_to_classname[oneflow_class_key])
)
| https://github.com/zk-ml/tachikoma |
gallery/how_to/compile_models/from_onnx.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Compile ONNX Models
===================
**Author**: `Joshua Z. Zhang <https://zhreshold.github.io/>`_
This article is an introductory tutorial to deploy ONNX models with Relay.
For us to begin with, ONNX package must be installed.
A quick solution is to install protobuf compiler, and
.. code-block:: bash
pip install --user onnx onnxoptimizer
or please refer to official site.
https://github.com/onnx/onnx
"""
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
import onnx
import numpy as np
import tvm
from tvm import te
import tvm.relay as relay
from tvm.contrib.download import download_testdata
######################################################################
# Load pretrained ONNX model
# ---------------------------------------------
# The example super resolution model used here is exactly the same model in onnx tutorial
# http://pytorch.org/tutorials/advanced/super_resolution_with_caffe2.html
# we skip the pytorch model construction part, and download the saved onnx model
model_url = "".join(
[
"https://gist.github.com/zhreshold/",
"bcda4716699ac97ea44f791c24310193/raw/",
"93672b029103648953c4e5ad3ac3aadf346a4cdc/",
"super_resolution_0.2.onnx",
]
)
model_path = download_testdata(model_url, "super_resolution.onnx", module="onnx")
# now you have super_resolution.onnx on disk
onnx_model = onnx.load(model_path)
######################################################################
# Load a test image
# ---------------------------------------------
# A single cat dominates the examples! This model takes a single input image of size
# 224x224 and outputs a scaled image that is 3x greater than the input along each
# axis, a 672x672 image. Re-scale the cat image to fit this input shape then
# convert to `YCbCr`. The super resolution model will then be applied to the
# luminance (`Y`) channel.
from PIL import Image
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_path = download_testdata(img_url, "cat.png", module="data")
img = Image.open(img_path).resize((224, 224))
img_ycbcr = img.convert("YCbCr") # convert to YCbCr
img_y, img_cb, img_cr = img_ycbcr.split()
x = np.array(img_y)[np.newaxis, np.newaxis, :, :]
######################################################################
# Compile the model with relay
# ---------------------------------------------
# Typically ONNX models mix model input values with parameter values, with
# the input having the name `1`. This model dependent, and you should check
# with the documentation for your model to determine the full input and
# parameter name space.
#
# Passing in the shape dictionary to the `relay.frontend.from_onnx` method
# tells relay which ONNX parameters are inputs, and which are parameters, and
# provides a static definition of the input size.
target = "llvm"
input_name = "1"
shape_dict = {input_name: x.shape}
mod, params = relay.frontend.from_onnx(onnx_model, shape_dict)
with tvm.transform.PassContext(opt_level=1):
executor = relay.build_module.create_executor(
"graph", mod, tvm.cpu(0), target, params
).evaluate()
######################################################################
# Execute on TVM
# ---------------------------------------------
dtype = "float32"
tvm_output = executor(tvm.nd.array(x.astype(dtype))).numpy()
######################################################################
# Display results
# ---------------------------------------------
# We put input and output image neck to neck. The luminance channel, `Y` is the output
# from the model. The chroma channels `Cb` and `Cr` are resized to match with a simple
# bicubic algorithm. The image is then recombined and converted back to `RGB`.
from matplotlib import pyplot as plt
out_y = Image.fromarray(np.uint8((tvm_output[0, 0]).clip(0, 255)), mode="L")
out_cb = img_cb.resize(out_y.size, Image.BICUBIC)
out_cr = img_cr.resize(out_y.size, Image.BICUBIC)
result = Image.merge("YCbCr", [out_y, out_cb, out_cr]).convert("RGB")
canvas = np.full((672, 672 * 2, 3), 255)
canvas[0:224, 0:224, :] = np.asarray(img)
canvas[:, 672:, :] = np.asarray(result)
plt.imshow(canvas.astype(np.uint8))
plt.show()
######################################################################
# Notes
# ---------------------------------------------
# By default, ONNX defines models in terms of dynamic shapes. The ONNX importer
# retains that dynamism upon import, and the compiler attempts to convert the model
# into a static shapes at compile time. If this fails, there may still be dynamic
# operations in the model. Not all TVM kernels currently support dynamic shapes,
# please file an issue on discuss.tvm.apache.org if you hit an error with dynamic kernels.
#
# This particular model was build using an older version of ONNX. During the import
# phase ONNX importer will run the ONNX verifier, which may throw a `Mismatched attribute type`
# warning. Because TVM supports a number of different ONNX versions, the Relay model
# will still be valid.
| https://github.com/zk-ml/tachikoma |
gallery/how_to/compile_models/from_paddle.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Compile PaddlePaddle Models
===========================
**Author**: `Ziyuan Ma <https://github.com/ZiyuanMa/>`_
This article is an introductory tutorial to deploy PaddlePaddle models with Relay.
For us to begin with, PaddlePaddle>=2.1.3 is required to be installed.
A quick solution is
.. code-block:: bash
pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple
or please refer to official site.
https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/pip/linux-pip.html
"""
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
import tarfile
import paddle
import numpy as np
import tvm
from tvm import relay
from tvm.contrib.download import download_testdata
######################################################################
# Load pretrained ResNet50 model
# ---------------------------------------------
# We load a pretrained ResNet50 provided by PaddlePaddle.
url = "https://bj.bcebos.com/x2paddle/models/paddle_resnet50.tar"
model_path = download_testdata(url, "paddle_resnet50.tar", module="model")
with tarfile.open(model_path) as tar:
names = tar.getnames()
for name in names:
tar.extract(name, "./")
model = paddle.jit.load("./paddle_resnet50/model")
######################################################################
# Load a test image
# ---------------------------------------------
# A single cat dominates the examples!
from PIL import Image
import paddle.vision.transforms as T
transforms = T.Compose(
[
T.Resize((256, 256)),
T.CenterCrop(224),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_path = download_testdata(img_url, "cat.png", module="data")
img = Image.open(img_path).resize((224, 224))
img = transforms(img)
img = np.expand_dims(img, axis=0)
######################################################################
# Compile the model with relay
# ---------------------------------------------
target = "llvm"
shape_dict = {"inputs": img.shape}
mod, params = relay.frontend.from_paddle(model, shape_dict)
with tvm.transform.PassContext(opt_level=3):
executor = relay.build_module.create_executor(
"graph", mod, tvm.cpu(0), target, params
).evaluate()
######################################################################
# Execute on TVM
# ---------------------------------------------
dtype = "float32"
tvm_output = executor(tvm.nd.array(img.astype(dtype))).numpy()
######################################################################
# Look up synset name
# ---------------------------------------------
# Look up prediction top 1 index in 1000 class synset.
synset_url = "".join(
[
"https://gist.githubusercontent.com/zhreshold/",
"4d0b62f3d01426887599d4f7ede23ee5/raw/",
"596b27d23537e5a1b5751d2b0481ef172f58b539/",
"imagenet1000_clsid_to_human.txt",
]
)
synset_name = "imagenet1000_clsid_to_human.txt"
synset_path = download_testdata(synset_url, synset_name, module="data")
with open(synset_path) as f:
synset = f.readlines()
top1 = np.argmax(tvm_output[0])
print(f"TVM prediction top-1 id: {top1}, class name: {synset[top1]}")
| https://github.com/zk-ml/tachikoma |
gallery/how_to/compile_models/from_pytorch.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Compile PyTorch Models
======================
**Author**: `Alex Wong <https://github.com/alexwong/>`_
This article is an introductory tutorial to deploy PyTorch models with Relay.
For us to begin with, PyTorch should be installed.
TorchVision is also required since we will be using it as our model zoo.
A quick solution is to install via pip
.. code-block:: bash
pip install torch==1.7.0
pip install torchvision==0.8.1
or please refer to official site
https://pytorch.org/get-started/locally/
PyTorch versions should be backwards compatible but should be used
with the proper TorchVision version.
Currently, TVM supports PyTorch 1.7 and 1.4. Other versions may
be unstable.
"""
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
import tvm
from tvm import relay
import numpy as np
from tvm.contrib.download import download_testdata
# PyTorch imports
import torch
import torchvision
######################################################################
# Load a pretrained PyTorch model
# -------------------------------
model_name = "resnet18"
model = getattr(torchvision.models, model_name)(pretrained=True)
model = model.eval()
# We grab the TorchScripted model via tracing
input_shape = [1, 3, 224, 224]
input_data = torch.randn(input_shape)
scripted_model = torch.jit.trace(model, input_data).eval()
######################################################################
# Load a test image
# -----------------
# Classic cat example!
from PIL import Image
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_path = download_testdata(img_url, "cat.png", module="data")
img = Image.open(img_path).resize((224, 224))
# Preprocess the image and convert to tensor
from torchvision import transforms
my_preprocess = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
img = my_preprocess(img)
img = np.expand_dims(img, 0)
######################################################################
# Import the graph to Relay
# -------------------------
# Convert PyTorch graph to Relay graph. The input name can be arbitrary.
input_name = "input0"
shape_list = [(input_name, img.shape)]
mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)
######################################################################
# Relay Build
# -----------
# Compile the graph to llvm target with given input specification.
target = tvm.target.Target("llvm", host="llvm")
dev = tvm.cpu(0)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params=params)
######################################################################
# Execute the portable graph on TVM
# ---------------------------------
# Now we can try deploying the compiled model on target.
from tvm.contrib import graph_executor
dtype = "float32"
m = graph_executor.GraphModule(lib["default"](dev))
# Set inputs
m.set_input(input_name, tvm.nd.array(img.astype(dtype)))
# Execute
m.run()
# Get outputs
tvm_output = m.get_output(0)
#####################################################################
# Look up synset name
# -------------------
# Look up prediction top 1 index in 1000 class synset.
synset_url = "".join(
[
"https://raw.githubusercontent.com/Cadene/",
"pretrained-models.pytorch/master/data/",
"imagenet_synsets.txt",
]
)
synset_name = "imagenet_synsets.txt"
synset_path = download_testdata(synset_url, synset_name, module="data")
with open(synset_path) as f:
synsets = f.readlines()
synsets = [x.strip() for x in synsets]
splits = [line.split(" ") for line in synsets]
key_to_classname = {spl[0]: " ".join(spl[1:]) for spl in splits}
class_url = "".join(
[
"https://raw.githubusercontent.com/Cadene/",
"pretrained-models.pytorch/master/data/",
"imagenet_classes.txt",
]
)
class_name = "imagenet_classes.txt"
class_path = download_testdata(class_url, class_name, module="data")
with open(class_path) as f:
class_id_to_key = f.readlines()
class_id_to_key = [x.strip() for x in class_id_to_key]
# Get top-1 result for TVM
top1_tvm = np.argmax(tvm_output.numpy()[0])
tvm_class_key = class_id_to_key[top1_tvm]
# Convert input to PyTorch variable and get PyTorch result for comparison
with torch.no_grad():
torch_img = torch.from_numpy(img)
output = model(torch_img)
# Get top-1 result for PyTorch
top1_torch = np.argmax(output.numpy())
torch_class_key = class_id_to_key[top1_torch]
print("Relay top-1 id: {}, class name: {}".format(top1_tvm, key_to_classname[tvm_class_key]))
print("Torch top-1 id: {}, class name: {}".format(top1_torch, key_to_classname[torch_class_key]))
| https://github.com/zk-ml/tachikoma |
gallery/how_to/compile_models/from_tensorflow.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Compile Tensorflow Models
=========================
This article is an introductory tutorial to deploy tensorflow models with TVM.
For us to begin with, tensorflow python module is required to be installed.
Please refer to https://www.tensorflow.org/install
"""
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
# tvm, relay
import tvm
from tvm import te
from tvm import relay
# os and numpy
import numpy as np
import os.path
# Tensorflow imports
import tensorflow as tf
# Ask tensorflow to limit its GPU memory to what's actually needed
# instead of gobbling everything that's available.
# https://www.tensorflow.org/guide/gpu#limiting_gpu_memory_growth
# This way this tutorial is a little more friendly to sphinx-gallery.
gpus = tf.config.list_physical_devices("GPU")
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
print("tensorflow will use experimental.set_memory_growth(True)")
except RuntimeError as e:
print("experimental.set_memory_growth option is not available: {}".format(e))
try:
tf_compat_v1 = tf.compat.v1
except ImportError:
tf_compat_v1 = tf
# Tensorflow utility functions
import tvm.relay.testing.tf as tf_testing
# Base location for model related files.
repo_base = "https://github.com/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/"
# Test image
img_name = "elephant-299.jpg"
image_url = os.path.join(repo_base, img_name)
######################################################################
# Tutorials
# ---------
# Please refer docs/frontend/tensorflow.md for more details for various models
# from tensorflow.
model_name = "classify_image_graph_def-with_shapes.pb"
model_url = os.path.join(repo_base, model_name)
# Image label map
map_proto = "imagenet_2012_challenge_label_map_proto.pbtxt"
map_proto_url = os.path.join(repo_base, map_proto)
# Human readable text for labels
label_map = "imagenet_synset_to_human_label_map.txt"
label_map_url = os.path.join(repo_base, label_map)
# Target settings
# Use these commented settings to build for cuda.
# target = tvm.target.Target("cuda", host="llvm")
# layout = "NCHW"
# dev = tvm.cuda(0)
target = tvm.target.Target("llvm", host="llvm")
layout = None
dev = tvm.cpu(0)
######################################################################
# Download required files
# -----------------------
# Download files listed above.
from tvm.contrib.download import download_testdata
img_path = download_testdata(image_url, img_name, module="data")
model_path = download_testdata(model_url, model_name, module=["tf", "InceptionV1"])
map_proto_path = download_testdata(map_proto_url, map_proto, module="data")
label_path = download_testdata(label_map_url, label_map, module="data")
######################################################################
# Import model
# ------------
# Creates tensorflow graph definition from protobuf file.
with tf_compat_v1.gfile.GFile(model_path, "rb") as f:
graph_def = tf_compat_v1.GraphDef()
graph_def.ParseFromString(f.read())
graph = tf.import_graph_def(graph_def, name="")
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
# Add shapes to the graph.
with tf_compat_v1.Session() as sess:
graph_def = tf_testing.AddShapesToGraphDef(sess, "softmax")
######################################################################
# Decode image
# ------------
# .. note::
#
# tensorflow frontend import doesn't support preprocessing ops like JpegDecode.
# JpegDecode is bypassed (just return source node).
# Hence we supply decoded frame to TVM instead.
#
from PIL import Image
image = Image.open(img_path).resize((299, 299))
x = np.array(image)
######################################################################
# Import the graph to Relay
# -------------------------
# Import tensorflow graph definition to relay frontend.
#
# Results:
# sym: relay expr for given tensorflow protobuf.
# params: params converted from tensorflow params (tensor protobuf).
shape_dict = {"DecodeJpeg/contents": x.shape}
dtype_dict = {"DecodeJpeg/contents": "uint8"}
mod, params = relay.frontend.from_tensorflow(graph_def, layout=layout, shape=shape_dict)
print("Tensorflow protobuf imported to relay frontend.")
######################################################################
# Relay Build
# -----------
# Compile the graph to llvm target with given input specification.
#
# Results:
# graph: Final graph after compilation.
# params: final params after compilation.
# lib: target library which can be deployed on target with TVM runtime.
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target, params=params)
######################################################################
# Execute the portable graph on TVM
# ---------------------------------
# Now we can try deploying the compiled model on target.
from tvm.contrib import graph_executor
dtype = "uint8"
m = graph_executor.GraphModule(lib["default"](dev))
# set inputs
m.set_input("DecodeJpeg/contents", tvm.nd.array(x.astype(dtype)))
# execute
m.run()
# get outputs
tvm_output = m.get_output(0, tvm.nd.empty(((1, 1008)), "float32"))
######################################################################
# Process the output
# ------------------
# Process the model output to human readable text for InceptionV1.
predictions = tvm_output.numpy()
predictions = np.squeeze(predictions)
# Creates node ID --> English string lookup.
node_lookup = tf_testing.NodeLookup(label_lookup_path=map_proto_path, uid_lookup_path=label_path)
# Print top 5 predictions from TVM output.
top_k = predictions.argsort()[-5:][::-1]
for node_id in top_k:
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
print("%s (score = %.5f)" % (human_string, score))
######################################################################
# Inference on tensorflow
# -----------------------
# Run the corresponding model on tensorflow
def create_graph():
"""Creates a graph from saved GraphDef file and returns a saver."""
# Creates graph from saved graph_def.pb.
with tf_compat_v1.gfile.GFile(model_path, "rb") as f:
graph_def = tf_compat_v1.GraphDef()
graph_def.ParseFromString(f.read())
graph = tf.import_graph_def(graph_def, name="")
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
def run_inference_on_image(image):
"""Runs inference on an image.
Parameters
----------
image: String
Image file name.
Returns
-------
Nothing
"""
if not tf_compat_v1.gfile.Exists(image):
tf.logging.fatal("File does not exist %s", image)
image_data = tf_compat_v1.gfile.GFile(image, "rb").read()
# Creates graph from saved GraphDef.
create_graph()
with tf_compat_v1.Session() as sess:
softmax_tensor = sess.graph.get_tensor_by_name("softmax:0")
predictions = sess.run(softmax_tensor, {"DecodeJpeg/contents:0": image_data})
predictions = np.squeeze(predictions)
# Creates node ID --> English string lookup.
node_lookup = tf_testing.NodeLookup(
label_lookup_path=map_proto_path, uid_lookup_path=label_path
)
# Print top 5 predictions from tensorflow.
top_k = predictions.argsort()[-5:][::-1]
print("===== TENSORFLOW RESULTS =======")
for node_id in top_k:
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
print("%s (score = %.5f)" % (human_string, score))
run_inference_on_image(img_path)
| https://github.com/zk-ml/tachikoma |
gallery/how_to/compile_models/from_tflite.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Compile TFLite Models
=====================
**Author**: `Zhao Wu <https://github.com/FrozenGene>`_
This article is an introductory tutorial to deploy TFLite models with Relay.
To get started, TFLite package needs to be installed as prerequisite.
.. code-block:: bash
# install tflite
pip install tflite==2.1.0 --user
or you could generate TFLite package yourself. The steps are the following:
.. code-block:: bash
# Get the flatc compiler.
# Please refer to https://github.com/google/flatbuffers for details
# and make sure it is properly installed.
flatc --version
# Get the TFLite schema.
wget https://raw.githubusercontent.com/tensorflow/tensorflow/r1.13/tensorflow/lite/schema/schema.fbs
# Generate TFLite package.
flatc --python schema.fbs
# Add current folder (which contains generated tflite module) to PYTHONPATH.
export PYTHONPATH=${PYTHONPATH:+$PYTHONPATH:}$(pwd)
Now please check if TFLite package is installed successfully, ``python -c "import tflite"``
Below you can find an example on how to compile TFLite model using TVM.
"""
######################################################################
# Utils for downloading and extracting zip files
# ----------------------------------------------
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
import os
def extract(path):
import tarfile
if path.endswith("tgz") or path.endswith("gz"):
dir_path = os.path.dirname(path)
tar = tarfile.open(path)
tar.extractall(path=dir_path)
tar.close()
else:
raise RuntimeError("Could not decompress the file: " + path)
######################################################################
# Load pretrained TFLite model
# ----------------------------
# Load mobilenet V1 TFLite model provided by Google
from tvm.contrib.download import download_testdata
model_url = "http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz"
# Download model tar file and extract it to get mobilenet_v1_1.0_224.tflite
model_path = download_testdata(model_url, "mobilenet_v1_1.0_224.tgz", module=["tf", "official"])
model_dir = os.path.dirname(model_path)
extract(model_path)
# Now we can open mobilenet_v1_1.0_224.tflite
tflite_model_file = os.path.join(model_dir, "mobilenet_v1_1.0_224.tflite")
tflite_model_buf = open(tflite_model_file, "rb").read()
# Get TFLite model from buffer
try:
import tflite
tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)
except AttributeError:
import tflite.Model
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
######################################################################
# Load a test image
# -----------------
# A single cat dominates the examples!
from PIL import Image
from matplotlib import pyplot as plt
import numpy as np
image_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
image_path = download_testdata(image_url, "cat.png", module="data")
resized_image = Image.open(image_path).resize((224, 224))
plt.imshow(resized_image)
plt.show()
image_data = np.asarray(resized_image).astype("float32")
# Add a dimension to the image so that we have NHWC format layout
image_data = np.expand_dims(image_data, axis=0)
# Preprocess image as described here:
# https://github.com/tensorflow/models/blob/edb6ed22a801665946c63d650ab9a0b23d98e1b1/research/slim/preprocessing/inception_preprocessing.py#L243
image_data[:, :, :, 0] = 2.0 / 255.0 * image_data[:, :, :, 0] - 1
image_data[:, :, :, 1] = 2.0 / 255.0 * image_data[:, :, :, 1] - 1
image_data[:, :, :, 2] = 2.0 / 255.0 * image_data[:, :, :, 2] - 1
print("input", image_data.shape)
######################################################################
# Compile the model with relay
# ----------------------------
# TFLite input tensor name, shape and type
input_tensor = "input"
input_shape = (1, 224, 224, 3)
input_dtype = "float32"
# Parse TFLite model and convert it to a Relay module
from tvm import relay, transform
mod, params = relay.frontend.from_tflite(
tflite_model, shape_dict={input_tensor: input_shape}, dtype_dict={input_tensor: input_dtype}
)
# Build the module against to x86 CPU
target = "llvm"
with transform.PassContext(opt_level=3):
lib = relay.build(mod, target, params=params)
######################################################################
# Execute on TVM
# --------------
import tvm
from tvm import te
from tvm.contrib import graph_executor as runtime
# Create a runtime executor module
module = runtime.GraphModule(lib["default"](tvm.cpu()))
# Feed input data
module.set_input(input_tensor, tvm.nd.array(image_data))
# Run
module.run()
# Get output
tvm_output = module.get_output(0).numpy()
######################################################################
# Display results
# ---------------
# Load label file
label_file_url = "".join(
[
"https://raw.githubusercontent.com/",
"tensorflow/tensorflow/master/tensorflow/lite/java/demo/",
"app/src/main/assets/",
"labels_mobilenet_quant_v1_224.txt",
]
)
label_file = "labels_mobilenet_quant_v1_224.txt"
label_path = download_testdata(label_file_url, label_file, module="data")
# List of 1001 classes
with open(label_path) as f:
labels = f.readlines()
# Convert result to 1D data
predictions = np.squeeze(tvm_output)
# Get top 1 prediction
prediction = np.argmax(predictions)
# Convert id to class name and show the result
print("The image prediction result is: id " + str(prediction) + " name: " + labels[prediction])
| https://github.com/zk-ml/tachikoma |
gallery/how_to/deploy_models/deploy_model_on_android.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
.. _tutorial-deploy-model-on-android:
Deploy the Pretrained Model on Android
=======================================
**Author**: `Tomohiro Kato <https://tkat0.github.io/>`_
This is an example of using Relay to compile a keras model and deploy it on Android device.
"""
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
import os
import numpy as np
from PIL import Image
import keras
from keras.applications.mobilenet_v2 import MobileNetV2
import tvm
from tvm import te
import tvm.relay as relay
from tvm import rpc
from tvm.contrib import utils, ndk, graph_executor as runtime
from tvm.contrib.download import download_testdata
######################################################################
# Setup Environment
# -----------------
# Since there are many required packages for Android, it is recommended to use the official Docker Image.
#
# First, to build and run Docker Image, we can run the following command.
#
# .. code-block:: bash
#
# git clone --recursive https://github.com/apache/tvm tvm
# cd tvm
# docker build -t tvm.demo_android -f docker/Dockerfile.demo_android ./docker
# docker run --pid=host -h tvm -v $PWD:/workspace \
# -w /workspace -p 9190:9190 --name tvm -it tvm.demo_android bash
#
# You are now inside the container. The cloned TVM directory is mounted on /workspace.
# At this time, mount the 9190 port used by RPC described later.
#
# .. note::
#
# Please execute the following steps in the container.
# We can execute :code:`docker exec -it tvm bash` to open a new terminal in the container.
#
# Next we build the TVM.
#
# .. code-block:: bash
#
# mkdir build
# cd build
# cmake -DUSE_LLVM=llvm-config-8 \
# -DUSE_RPC=ON \
# -DUSE_SORT=ON \
# -DUSE_VULKAN=ON \
# -DUSE_GRAPH_EXECUTOR=ON \
# ..
# make -j10
#
# After building TVM successfully, Please set PYTHONPATH.
#
# .. code-block:: bash
#
# echo 'export PYTHONPATH=/workspace/python:/workspace/vta/python:${PYTHONPATH}' >> ~/.bashrc
# source ~/.bashrc
#################################################################
# Start RPC Tracker
# -----------------
# TVM uses RPC session to communicate with Android device.
#
# To start an RPC tracker, run this command in the container. The tracker is
# required during the whole tuning process, so we need to open a new terminal for
# this command:
#
# .. code-block:: bash
#
# python3 -m tvm.exec.rpc_tracker --host=0.0.0.0 --port=9190
#
# The expected output is
#
# .. code-block:: bash
#
# INFO:RPCTracker:bind to 0.0.0.0:9190
#################################################################
# Register Android device to RPC Tracker
# --------------------------------------
# Now we can register our Android device to the tracker.
#
# Follow this `readme page <https://github.com/apache/tvm/tree/main/apps/android_rpc>`_ to
# install TVM RPC APK on the android device.
#
# Here is an example of config.mk. I enabled OpenCL and Vulkan.
#
#
# .. code-block:: bash
#
# APP_ABI = arm64-v8a
#
# APP_PLATFORM = android-24
#
# # whether enable OpenCL during compile
# USE_OPENCL = 1
#
# # whether to enable Vulkan during compile
# USE_VULKAN = 1
#
# ifeq ($(USE_VULKAN), 1)
# # Statically linking vulkan requires API Level 24 or higher
# APP_PLATFORM = android-24
# endif
#
# # the additional include headers you want to add, e.g., SDK_PATH/adrenosdk/Development/Inc
# ADD_C_INCLUDES += /work/adrenosdk-linux-5_0/Development/Inc
# # downloaded from https://github.com/KhronosGroup/OpenCL-Headers
# ADD_C_INCLUDES += /usr/local/OpenCL-Headers/
#
# # the additional link libs you want to add, e.g., ANDROID_LIB_PATH/libOpenCL.so
# ADD_LDLIBS = /workspace/pull-from-android-device/libOpenCL.so
#
# .. note::
#
# At this time, don't forget to `create a standalone toolchain <https://github.com/apache/tvm/tree/main/apps/android_rpc#architecture-and-android-standalone-toolchain>`_ .
#
# for example
#
# .. code-block:: bash
#
# $ANDROID_NDK_HOME/build/tools/make-standalone-toolchain.sh \
# --platform=android-24 --use-llvm --arch=arm64 --install-dir=/opt/android-toolchain-arm64
# export TVM_NDK_CC=/opt/android-toolchain-arm64/bin/aarch64-linux-android-g++
#
# Next, start the Android application and enter the IP address and port of RPC Tracker.
# Then you have already registered your device.
#
# After registering devices, we can confirm it by querying rpc_tracker
#
# .. code-block:: bash
#
# python3 -m tvm.exec.query_rpc_tracker --host=0.0.0.0 --port=9190
#
# For example, if we have 1 Android device.
# the output can be
#
# .. code-block:: bash
#
# Queue Status
# ----------------------------------
# key total free pending
# ----------------------------------
# android 1 1 0
# ----------------------------------
#
# To confirm that you can communicate with Android, we can run following test script.
# If you use OpenCL and Vulkan, please set :code:`test_opencl` and :code:`test_vulkan` in the script.
#
# .. code-block:: bash
#
# export TVM_TRACKER_HOST=0.0.0.0
# export TVM_TRACKER_PORT=9190
#
# .. code-block:: bash
#
# cd /workspace/apps/android_rpc
# python3 tests/android_rpc_test.py
#
######################################################################
# Load pretrained keras model
# ---------------------------
# We load a pretrained MobileNetV2(alpha=0.5) classification model provided by keras.
keras.backend.clear_session() # Destroys the current TF graph and creates a new one.
weights_url = "".join(
[
"https://github.com/JonathanCMitchell/",
"mobilenet_v2_keras/releases/download/v1.1/",
"mobilenet_v2_weights_tf_dim_ordering_tf_kernels_0.5_224.h5",
]
)
weights_file = "mobilenet_v2_weights.h5"
weights_path = download_testdata(weights_url, weights_file, module="keras")
keras_mobilenet_v2 = MobileNetV2(
alpha=0.5, include_top=True, weights=None, input_shape=(224, 224, 3), classes=1000
)
keras_mobilenet_v2.load_weights(weights_path)
######################################################################
# In order to test our model, here we download an image of cat and
# transform its format.
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_name = "cat.png"
img_path = download_testdata(img_url, img_name, module="data")
image = Image.open(img_path).resize((224, 224))
dtype = "float32"
def transform_image(image):
image = np.array(image) - np.array([123.0, 117.0, 104.0])
image /= np.array([58.395, 57.12, 57.375])
image = image.transpose((2, 0, 1))
image = image[np.newaxis, :]
return image
x = transform_image(image)
######################################################################
# synset is used to transform the label from number of ImageNet class to
# the word human can understand.
synset_url = "".join(
[
"https://gist.githubusercontent.com/zhreshold/",
"4d0b62f3d01426887599d4f7ede23ee5/raw/",
"596b27d23537e5a1b5751d2b0481ef172f58b539/",
"imagenet1000_clsid_to_human.txt",
]
)
synset_name = "imagenet1000_clsid_to_human.txt"
synset_path = download_testdata(synset_url, synset_name, module="data")
with open(synset_path) as f:
synset = eval(f.read())
######################################################################
# Compile the model with relay
# ----------------------------
# If we run the example on our x86 server for demonstration, we can simply
# set it as :code:`llvm`. If running it on the Android device, we need to
# specify its instruction set. Set :code:`local_demo` to False if you want
# to run this tutorial with a real device.
local_demo = True
# by default on CPU target will execute.
# select 'cpu', 'opencl' and 'vulkan'
test_target = "cpu"
# Change target configuration.
# Run `adb shell cat /proc/cpuinfo` to find the arch.
arch = "arm64"
target = tvm.target.Target("llvm -mtriple=%s-linux-android" % arch)
if local_demo:
target = tvm.target.Target("llvm")
elif test_target == "opencl":
target = tvm.target.Target("opencl", host=target)
elif test_target == "vulkan":
target = tvm.target.Target("vulkan", host=target)
input_name = "input_1"
shape_dict = {input_name: x.shape}
mod, params = relay.frontend.from_keras(keras_mobilenet_v2, shape_dict)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params=params)
# After `relay.build`, you will get three return values: graph,
# library and the new parameter, since we do some optimization that will
# change the parameters but keep the result of model as the same.
# Save the library at local temporary directory.
tmp = utils.tempdir()
lib_fname = tmp.relpath("net.so")
fcompile = ndk.create_shared if not local_demo else None
lib.export_library(lib_fname, fcompile)
######################################################################
# Deploy the Model Remotely by RPC
# --------------------------------
# With RPC, you can deploy the model remotely from your host machine
# to the remote android device.
tracker_host = os.environ.get("TVM_TRACKER_HOST", "127.0.0.1")
tracker_port = int(os.environ.get("TVM_TRACKER_PORT", 9190))
key = "android"
if local_demo:
remote = rpc.LocalSession()
else:
tracker = rpc.connect_tracker(tracker_host, tracker_port)
# When running a heavy model, we should increase the `session_timeout`
remote = tracker.request(key, priority=0, session_timeout=60)
if local_demo:
dev = remote.cpu(0)
elif test_target == "opencl":
dev = remote.cl(0)
elif test_target == "vulkan":
dev = remote.vulkan(0)
else:
dev = remote.cpu(0)
# upload the library to remote device and load it
remote.upload(lib_fname)
rlib = remote.load_module("net.so")
# create the remote runtime module
module = runtime.GraphModule(rlib["default"](dev))
######################################################################
# Execute on TVM
# --------------
# set input data
module.set_input(input_name, tvm.nd.array(x.astype(dtype)))
# run
module.run()
# get output
out = module.get_output(0)
# get top1 result
top1 = np.argmax(out.numpy())
print("TVM prediction top-1: {}".format(synset[top1]))
print("Evaluate inference time cost...")
print(module.benchmark(dev, number=1, repeat=10))
######################################################################
# Sample Output
# -------------
# The following is the result of 'cpu', 'opencl' and 'vulkan' using Adreno 530 on Snapdragon 820
#
# Although we can run on a GPU, it is slower than CPU.
# To speed up, we need to write and optimize the schedule according to the GPU architecture.
#
# .. code-block:: bash
#
# # cpu
# TVM prediction top-1: tiger cat
# Evaluate inference time cost...
# Mean inference time (std dev): 37.92 ms (19.67 ms)
#
# # opencl
# TVM prediction top-1: tiger cat
# Evaluate inference time cost...
# Mean inference time (std dev): 419.83 ms (7.49 ms)
#
# # vulkan
# TVM prediction top-1: tiger cat
# Evaluate inference time cost...
# Mean inference time (std dev): 465.80 ms (4.52 ms)
| https://github.com/zk-ml/tachikoma |
gallery/how_to/deploy_models/deploy_model_on_nano.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
.. _tutorial-deploy-model-on-nano:
Deploy the Pretrained Model on Jetson Nano
===========================================
**Author**: `BBuf <https://github.com/BBuf>`_
This is an example of using Relay to compile a ResNet model and deploy
it on Jetson Nano.
"""
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
import tvm
from tvm import te
import tvm.relay as relay
from tvm import rpc
from tvm.contrib import utils, graph_executor as runtime
from tvm.contrib.download import download_testdata
######################################################################
# .. _build-tvm-runtime-on-jetson-nano:
#
# Build TVM Runtime on Jetson Nano
# --------------------------------
#
# The first step is to build the TVM runtime on the remote device.
#
# .. note::
#
# All instructions in both this section and next section should be
# executed on the target device, e.g. Jetson Nano. And we assume it
# has Linux running.
#
# Since we do compilation on local machine, the remote device is only used
# for running the generated code. We only need to build tvm runtime on
# the remote device.
#
# .. code-block:: bash
#
# git clone --recursive https://github.com/apache/tvm tvm
# cd tvm
# mkdir build
# cp cmake/config.cmake build
# cd build
# cmake ..
# make runtime -j4
# .. note::
#
# If we want to use Jetson Nano's GPU for inference,
# we need to enable the CUDA option in `config.cmake`,
# that is, `set(USE_CUDA ON)`
#
# After building runtime successfully, we need to set environment varibles
# in :code:`~/.bashrc` file. We can edit :code:`~/.bashrc`
# using :code:`vi ~/.bashrc` and add the line below (Assuming your TVM
# directory is in :code:`~/tvm`):
#
# .. code-block:: bash
#
# export PYTHONPATH=$PYTHONPATH:~/tvm/python
#
# To update the environment variables, execute :code:`source ~/.bashrc`.
######################################################################
# Set Up RPC Server on Device
# ---------------------------
# To start an RPC server, run the following command on your remote device
# (Which is Jetson Nano in our example).
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_server --host 0.0.0.0 --port=9091
#
# If you see the line below, it means the RPC server started
# successfully on your device.
#
# .. code-block:: bash
#
# INFO:RPCServer:bind to 0.0.0.0:9091
#
######################################################################
# Prepare the Pre-trained Model
# -----------------------------
# Back to the host machine, which should have a full TVM installed (with LLVM).
#
# We will use pre-trained model from
# `MXNet Gluon model zoo <https://mxnet.apache.org/api/python/gluon/model_zoo.html>`_.
# You can found more details about this part at tutorial :ref:`tutorial-from-mxnet`.
from mxnet.gluon.model_zoo.vision import get_model
from PIL import Image
import numpy as np
# one line to get the model
block = get_model("resnet18_v1", pretrained=True)
######################################################################
# In order to test our model, here we download an image of cat and
# transform its format.
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_name = "cat.png"
img_path = download_testdata(img_url, img_name, module="data")
image = Image.open(img_path).resize((224, 224))
def transform_image(image):
image = np.array(image) - np.array([123.0, 117.0, 104.0])
image /= np.array([58.395, 57.12, 57.375])
image = image.transpose((2, 0, 1))
image = image[np.newaxis, :]
return image
x = transform_image(image)
######################################################################
# synset is used to transform the label from number of ImageNet class to
# the word human can understand.
synset_url = "".join(
[
"https://gist.githubusercontent.com/zhreshold/",
"4d0b62f3d01426887599d4f7ede23ee5/raw/",
"596b27d23537e5a1b5751d2b0481ef172f58b539/",
"imagenet1000_clsid_to_human.txt",
]
)
synset_name = "imagenet1000_clsid_to_human.txt"
synset_path = download_testdata(synset_url, synset_name, module="data")
with open(synset_path) as f:
synset = eval(f.read())
######################################################################
# Now we would like to port the Gluon model to a portable computational graph.
# It's as easy as several lines.
# We support MXNet static graph(symbol) and HybridBlock in mxnet.gluon
shape_dict = {"data": x.shape}
mod, params = relay.frontend.from_mxnet(block, shape_dict)
# we want a probability so add a softmax operator
func = mod["main"]
func = relay.Function(func.params, relay.nn.softmax(func.body), None, func.type_params, func.attrs)
######################################################################
# Here are some basic data workload configurations.
batch_size = 1
num_classes = 1000
image_shape = (3, 224, 224)
data_shape = (batch_size,) + image_shape
######################################################################
# Compile The Graph
# -----------------
# To compile the graph, we call the :py:func:`relay.build` function
# with the graph configuration and parameters. However, You cannot to
# deploy a x86 program on a device with ARM instruction set. It means
# Relay also needs to know the compilation option of target device,
# apart from arguments :code:`net` and :code:`params` to specify the
# deep learning workload. Actually, the option matters, different option
# will lead to very different performance.
######################################################################
# If we run the example on our x86 server for demonstration, we can simply
# set it as :code:`llvm`. If running it on the Jetson Nano, we need to
# set it as :code:`nvidia/jetson-nano`. Set :code:`local_demo` to False
# if you want to run this tutorial with a real device.
local_demo = True
if local_demo:
target = tvm.target.Target("llvm")
else:
target = tvm.target.Target("nvidia/jetson-nano")
assert target.kind.name == "cuda"
assert target.attrs["arch"] == "sm_53"
assert target.attrs["shared_memory_per_block"] == 49152
assert target.attrs["max_threads_per_block"] == 1024
assert target.attrs["thread_warp_size"] == 32
assert target.attrs["registers_per_block"] == 32768
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(func, target, params=params)
# After `relay.build`, you will get three return values: graph,
# library and the new parameter, since we do some optimization that will
# change the parameters but keep the result of model as the same.
# Save the library at local temporary directory.
tmp = utils.tempdir()
lib_fname = tmp.relpath("net.tar")
lib.export_library(lib_fname)
######################################################################
# Deploy the Model Remotely by RPC
# --------------------------------
# With RPC, you can deploy the model remotely from your host machine
# to the remote device.
# obtain an RPC session from remote device.
if local_demo:
remote = rpc.LocalSession()
else:
# The following is my environment, change this to the IP address of your target device
host = "192.168.1.11"
port = 9091
remote = rpc.connect(host, port)
# upload the library to remote device and load it
remote.upload(lib_fname)
rlib = remote.load_module("net.tar")
# create the remote runtime module
if local_demo:
dev = remote.cpu(0)
else:
dev = remote.cuda(0)
module = runtime.GraphModule(rlib["default"](dev))
# set input data
module.set_input("data", tvm.nd.array(x.astype("float32")))
# run
module.run()
# get output
out = module.get_output(0)
# get top1 result
top1 = np.argmax(out.numpy())
print("TVM prediction top-1: {}".format(synset[top1]))
| https://github.com/zk-ml/tachikoma |
gallery/how_to/deploy_models/deploy_model_on_rasp.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
.. _tutorial-deploy-model-on-rasp:
Deploy the Pretrained Model on Raspberry Pi
===========================================
**Author**: `Ziheng Jiang <https://ziheng.org/>`_, \
`Hiroyuki Makino <https://makihiro.github.io/>`_
This is an example of using Relay to compile a ResNet model and deploy
it on Raspberry Pi.
"""
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
import tvm
from tvm import te
import tvm.relay as relay
from tvm import rpc
from tvm.contrib import utils, graph_executor as runtime
from tvm.contrib.download import download_testdata
######################################################################
# .. _build-tvm-runtime-on-device:
#
# Build TVM Runtime on Device
# ---------------------------
#
# The first step is to build the TVM runtime on the remote device.
#
# .. note::
#
# All instructions in both this section and next section should be
# executed on the target device, e.g. Raspberry Pi. And we assume it
# has Linux running.
#
# Since we do compilation on local machine, the remote device is only used
# for running the generated code. We only need to build tvm runtime on
# the remote device.
#
# .. code-block:: bash
#
# git clone --recursive https://github.com/apache/tvm tvm
# cd tvm
# mkdir build
# cp cmake/config.cmake build
# cd build
# cmake ..
# make runtime -j4
#
# After building runtime successfully, we need to set environment varibles
# in :code:`~/.bashrc` file. We can edit :code:`~/.bashrc`
# using :code:`vi ~/.bashrc` and add the line below (Assuming your TVM
# directory is in :code:`~/tvm`):
#
# .. code-block:: bash
#
# export PYTHONPATH=$PYTHONPATH:~/tvm/python
#
# To update the environment variables, execute :code:`source ~/.bashrc`.
######################################################################
# Set Up RPC Server on Device
# ---------------------------
# To start an RPC server, run the following command on your remote device
# (Which is Raspberry Pi in our example).
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_server --host 0.0.0.0 --port=9090
#
# If you see the line below, it means the RPC server started
# successfully on your device.
#
# .. code-block:: bash
#
# INFO:root:RPCServer: bind to 0.0.0.0:9090
#
######################################################################
# Prepare the Pre-trained Model
# -----------------------------
# Back to the host machine, which should have a full TVM installed (with LLVM).
#
# We will use pre-trained model from
# `MXNet Gluon model zoo <https://mxnet.apache.org/api/python/gluon/model_zoo.html>`_.
# You can found more details about this part at tutorial :ref:`tutorial-from-mxnet`.
from mxnet.gluon.model_zoo.vision import get_model
from PIL import Image
import numpy as np
# one line to get the model
block = get_model("resnet18_v1", pretrained=True)
######################################################################
# In order to test our model, here we download an image of cat and
# transform its format.
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_name = "cat.png"
img_path = download_testdata(img_url, img_name, module="data")
image = Image.open(img_path).resize((224, 224))
def transform_image(image):
image = np.array(image) - np.array([123.0, 117.0, 104.0])
image /= np.array([58.395, 57.12, 57.375])
image = image.transpose((2, 0, 1))
image = image[np.newaxis, :]
return image
x = transform_image(image)
######################################################################
# synset is used to transform the label from number of ImageNet class to
# the word human can understand.
synset_url = "".join(
[
"https://gist.githubusercontent.com/zhreshold/",
"4d0b62f3d01426887599d4f7ede23ee5/raw/",
"596b27d23537e5a1b5751d2b0481ef172f58b539/",
"imagenet1000_clsid_to_human.txt",
]
)
synset_name = "imagenet1000_clsid_to_human.txt"
synset_path = download_testdata(synset_url, synset_name, module="data")
with open(synset_path) as f:
synset = eval(f.read())
######################################################################
# Now we would like to port the Gluon model to a portable computational graph.
# It's as easy as several lines.
# We support MXNet static graph(symbol) and HybridBlock in mxnet.gluon
shape_dict = {"data": x.shape}
mod, params = relay.frontend.from_mxnet(block, shape_dict)
# we want a probability so add a softmax operator
func = mod["main"]
func = relay.Function(func.params, relay.nn.softmax(func.body), None, func.type_params, func.attrs)
######################################################################
# Here are some basic data workload configurations.
batch_size = 1
num_classes = 1000
image_shape = (3, 224, 224)
data_shape = (batch_size,) + image_shape
######################################################################
# Compile The Graph
# -----------------
# To compile the graph, we call the :py:func:`relay.build` function
# with the graph configuration and parameters. However, You cannot to
# deploy a x86 program on a device with ARM instruction set. It means
# Relay also needs to know the compilation option of target device,
# apart from arguments :code:`net` and :code:`params` to specify the
# deep learning workload. Actually, the option matters, different option
# will lead to very different performance.
######################################################################
# If we run the example on our x86 server for demonstration, we can simply
# set it as :code:`llvm`. If running it on the Raspberry Pi, we need to
# specify its instruction set. Set :code:`local_demo` to False if you want
# to run this tutorial with a real device.
local_demo = True
if local_demo:
target = tvm.target.Target("llvm")
else:
target = tvm.target.arm_cpu("rasp3b")
# The above line is a simple form of
# target = tvm.target.Target('llvm -device=arm_cpu -model=bcm2837 -mtriple=armv7l-linux-gnueabihf -mattr=+neon')
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(func, target, params=params)
# After `relay.build`, you will get three return values: graph,
# library and the new parameter, since we do some optimization that will
# change the parameters but keep the result of model as the same.
# Save the library at local temporary directory.
tmp = utils.tempdir()
lib_fname = tmp.relpath("net.tar")
lib.export_library(lib_fname)
######################################################################
# Deploy the Model Remotely by RPC
# --------------------------------
# With RPC, you can deploy the model remotely from your host machine
# to the remote device.
# obtain an RPC session from remote device.
if local_demo:
remote = rpc.LocalSession()
else:
# The following is my environment, change this to the IP address of your target device
host = "10.77.1.162"
port = 9090
remote = rpc.connect(host, port)
# upload the library to remote device and load it
remote.upload(lib_fname)
rlib = remote.load_module("net.tar")
# create the remote runtime module
dev = remote.cpu(0)
module = runtime.GraphModule(rlib["default"](dev))
# set input data
module.set_input("data", tvm.nd.array(x.astype("float32")))
# run
module.run()
# get output
out = module.get_output(0)
# get top1 result
top1 = np.argmax(out.numpy())
print("TVM prediction top-1: {}".format(synset[top1]))
| https://github.com/zk-ml/tachikoma |
gallery/how_to/deploy_models/deploy_object_detection_pytorch.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Compile PyTorch Object Detection Models
=======================================
This article is an introductory tutorial to deploy PyTorch object
detection models with Relay VM.
For us to begin with, PyTorch should be installed.
TorchVision is also required since we will be using it as our model zoo.
A quick solution is to install via pip
.. code-block:: bash
pip install torch==1.7.0
pip install torchvision==0.8.1
or please refer to official site
https://pytorch.org/get-started/locally/
PyTorch versions should be backwards compatible but should be used
with the proper TorchVision version.
Currently, TVM supports PyTorch 1.7 and 1.4. Other versions may
be unstable.
"""
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
import tvm
from tvm import relay
from tvm import relay
from tvm.runtime.vm import VirtualMachine
from tvm.contrib.download import download_testdata
import numpy as np
import cv2
# PyTorch imports
import torch
import torchvision
######################################################################
# Load pre-trained maskrcnn from torchvision and do tracing
# ---------------------------------------------------------
in_size = 300
input_shape = (1, 3, in_size, in_size)
def do_trace(model, inp):
model_trace = torch.jit.trace(model, inp)
model_trace.eval()
return model_trace
def dict_to_tuple(out_dict):
if "masks" in out_dict.keys():
return out_dict["boxes"], out_dict["scores"], out_dict["labels"], out_dict["masks"]
return out_dict["boxes"], out_dict["scores"], out_dict["labels"]
class TraceWrapper(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, inp):
out = self.model(inp)
return dict_to_tuple(out[0])
model_func = torchvision.models.detection.maskrcnn_resnet50_fpn
model = TraceWrapper(model_func(pretrained=True))
model.eval()
inp = torch.Tensor(np.random.uniform(0.0, 250.0, size=(1, 3, in_size, in_size)))
with torch.no_grad():
out = model(inp)
script_module = do_trace(model, inp)
######################################################################
# Download a test image and pre-process
# -------------------------------------
img_url = (
"https://raw.githubusercontent.com/dmlc/web-data/master/gluoncv/detection/street_small.jpg"
)
img_path = download_testdata(img_url, "test_street_small.jpg", module="data")
img = cv2.imread(img_path).astype("float32")
img = cv2.resize(img, (in_size, in_size))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.transpose(img / 255.0, [2, 0, 1])
img = np.expand_dims(img, axis=0)
######################################################################
# Import the graph to Relay
# -------------------------
input_name = "input0"
shape_list = [(input_name, input_shape)]
mod, params = relay.frontend.from_pytorch(script_module, shape_list)
######################################################################
# Compile with Relay VM
# ---------------------
# Note: Currently only CPU target is supported. For x86 target, it is
# highly recommended to build TVM with Intel MKL and Intel OpenMP to get
# best performance, due to the existence of large dense operator in
# torchvision rcnn models.
# Add "-libs=mkl" to get best performance on x86 target.
# For x86 machine supports AVX512, the complete target is
# "llvm -mcpu=skylake-avx512 -libs=mkl"
target = "llvm"
with tvm.transform.PassContext(opt_level=3, disabled_pass=["FoldScaleAxis"]):
vm_exec = relay.vm.compile(mod, target=target, params=params)
######################################################################
# Inference with Relay VM
# -----------------------
dev = tvm.cpu()
vm = VirtualMachine(vm_exec, dev)
vm.set_input("main", **{input_name: img})
tvm_res = vm.run()
######################################################################
# Get boxes with score larger than 0.9
# ------------------------------------
score_threshold = 0.9
boxes = tvm_res[0].numpy().tolist()
valid_boxes = []
for i, score in enumerate(tvm_res[1].numpy().tolist()):
if score > score_threshold:
valid_boxes.append(boxes[i])
else:
break
print("Get {} valid boxes".format(len(valid_boxes)))
| https://github.com/zk-ml/tachikoma |
gallery/how_to/deploy_models/deploy_prequantized.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Deploy a Framework-prequantized Model with TVM
==============================================
**Author**: `Masahiro Masuda <https://github.com/masahi>`_
This is a tutorial on loading models quantized by deep learning frameworks into TVM.
Pre-quantized model import is one of the quantization support we have in TVM. More details on
the quantization story in TVM can be found
`here <https://discuss.tvm.apache.org/t/quantization-story/3920>`_.
Here, we demonstrate how to load and run models quantized by PyTorch, MXNet, and TFLite.
Once loaded, we can run compiled, quantized models on any hardware TVM supports.
"""
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
#################################################################################
# First, necessary imports
from PIL import Image
import numpy as np
import torch
from torchvision.models.quantization import mobilenet as qmobilenet
import tvm
from tvm import relay
from tvm.contrib.download import download_testdata
#################################################################################
# Helper functions to run the demo
def get_transform():
import torchvision.transforms as transforms
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
return transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]
)
def get_real_image(im_height, im_width):
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_path = download_testdata(img_url, "cat.png", module="data")
return Image.open(img_path).resize((im_height, im_width))
def get_imagenet_input():
im = get_real_image(224, 224)
preprocess = get_transform()
pt_tensor = preprocess(im)
return np.expand_dims(pt_tensor.numpy(), 0)
def get_synset():
synset_url = "".join(
[
"https://gist.githubusercontent.com/zhreshold/",
"4d0b62f3d01426887599d4f7ede23ee5/raw/",
"596b27d23537e5a1b5751d2b0481ef172f58b539/",
"imagenet1000_clsid_to_human.txt",
]
)
synset_name = "imagenet1000_clsid_to_human.txt"
synset_path = download_testdata(synset_url, synset_name, module="data")
with open(synset_path) as f:
return eval(f.read())
def run_tvm_model(mod, params, input_name, inp, target="llvm"):
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params=params)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](tvm.device(target, 0)))
runtime.set_input(input_name, inp)
runtime.run()
return runtime.get_output(0).numpy(), runtime
#################################################################################
# A mapping from label to class name, to verify that the outputs from models below
# are reasonable
synset = get_synset()
#################################################################################
# Everyone's favorite cat image for demonstration
inp = get_imagenet_input()
################################################################################
# Deploy a quantized PyTorch Model
# --------------------------------
# First, we demonstrate how to load deep learning models quantized by PyTorch,
# using our PyTorch frontend.
#
# Please refer to the PyTorch static quantization tutorial below to learn about
# their quantization workflow.
# https://pytorch.org/tutorials/advanced/static_quantization_tutorial.html
#
# We use this function to quantize PyTorch models.
# In short, this function takes a floating point model and converts it to uint8.
# The model is per-channel quantized.
def quantize_model(model, inp):
model.fuse_model()
model.qconfig = torch.quantization.get_default_qconfig("fbgemm")
torch.quantization.prepare(model, inplace=True)
# Dummy calibration
model(inp)
torch.quantization.convert(model, inplace=True)
##############################################################################
# Load quantization-ready, pretrained Mobilenet v2 model from torchvision
# -----------------------------------------------------------------------
# We choose mobilenet v2 because this model was trained with quantization aware
# training. Other models require a full post training calibration.
qmodel = qmobilenet.mobilenet_v2(pretrained=True).eval()
##############################################################################
# Quantize, trace and run the PyTorch Mobilenet v2 model
# ------------------------------------------------------
# The details are out of scope for this tutorial. Please refer to the tutorials
# on the PyTorch website to learn about quantization and jit.
pt_inp = torch.from_numpy(inp)
quantize_model(qmodel, pt_inp)
script_module = torch.jit.trace(qmodel, pt_inp).eval()
with torch.no_grad():
pt_result = script_module(pt_inp).numpy()
##############################################################################
# Convert quantized Mobilenet v2 to Relay-QNN using the PyTorch frontend
# ----------------------------------------------------------------------
# The PyTorch frontend has support for converting a quantized PyTorch model to
# an equivalent Relay module enriched with quantization-aware operators.
# We call this representation Relay QNN dialect.
#
# You can print the output from the frontend to see how quantized models are
# represented.
#
# You would see operators specific to quantization such as
# qnn.quantize, qnn.dequantize, qnn.requantize, and qnn.conv2d etc.
input_name = "input" # the input name can be be arbitrary for PyTorch frontend.
input_shapes = [(input_name, (1, 3, 224, 224))]
mod, params = relay.frontend.from_pytorch(script_module, input_shapes)
# print(mod) # comment in to see the QNN IR dump
##############################################################################
# Compile and run the Relay module
# --------------------------------
# Once we obtained the quantized Relay module, the rest of the workflow
# is the same as running floating point models. Please refer to other
# tutorials for more details.
#
# Under the hood, quantization specific operators are lowered to a sequence of
# standard Relay operators before compilation.
target = "llvm"
tvm_result, rt_mod = run_tvm_model(mod, params, input_name, inp, target=target)
##########################################################################
# Compare the output labels
# -------------------------
# We should see identical labels printed.
pt_top3_labels = np.argsort(pt_result[0])[::-1][:3]
tvm_top3_labels = np.argsort(tvm_result[0])[::-1][:3]
print("PyTorch top3 labels:", [synset[label] for label in pt_top3_labels])
print("TVM top3 labels:", [synset[label] for label in tvm_top3_labels])
###########################################################################################
# However, due to the difference in numerics, in general the raw floating point
# outputs are not expected to be identical. Here, we print how many floating point
# output values are identical out of 1000 outputs from mobilenet v2.
print("%d in 1000 raw floating outputs identical." % np.sum(tvm_result[0] == pt_result[0]))
##########################################################################
# Measure performance
# -------------------------
# Here we give an example of how to measure performance of TVM compiled models.
n_repeat = 100 # should be bigger to make the measurement more accurate
dev = tvm.cpu(0)
print(rt_mod.benchmark(dev, number=1, repeat=n_repeat))
######################################################################
# .. note::
#
# We recommend this method for the following reasons:
#
# * Measurements are done in C++, so there is no Python overhead
# * It includes several warm up runs
# * The same method can be used to profile on remote devices (android etc.).
######################################################################
# .. note::
#
# Unless the hardware has special support for fast 8 bit instructions, quantized models are
# not expected to be any faster than FP32 models. Without fast 8 bit instructions, TVM does
# quantized convolution in 16 bit, even if the model itself is 8 bit.
#
# For x86, the best performance can be achieved on CPUs with AVX512 instructions set.
# In this case, TVM utilizes the fastest available 8 bit instructions for the given target.
# This includes support for the VNNI 8 bit dot product instruction (CascadeLake or newer).
#
# Moreover, the following general tips for CPU performance equally applies:
#
# * Set the environment variable TVM_NUM_THREADS to the number of physical cores
# * Choose the best target for your hardware, such as "llvm -mcpu=skylake-avx512" or
# "llvm -mcpu=cascadelake" (more CPUs with AVX512 would come in the future)
###############################################################################
# Deploy a quantized MXNet Model
# ------------------------------
# TODO
###############################################################################
# Deploy a quantized TFLite Model
# -------------------------------
# TODO
| https://github.com/zk-ml/tachikoma |
gallery/how_to/deploy_models/deploy_prequantized_tflite.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Deploy a Framework-prequantized Model with TVM - Part 3 (TFLite)
================================================================
**Author**: `Siju Samuel <https://github.com/siju-samuel>`_
Welcome to part 3 of the Deploy Framework-Prequantized Model with TVM tutorial.
In this part, we will start with a Quantized TFLite graph and then compile and execute it via TVM.
For more details on quantizing the model using TFLite, readers are encouraged to
go through `Converting Quantized Models
<https://www.tensorflow.org/lite/convert/quantization>`_.
The TFLite models can be downloaded from this `link
<https://www.tensorflow.org/lite/guide/hosted_models>`_.
To get started, Tensorflow and TFLite package needs to be installed as prerequisite.
.. code-block:: bash
# install tensorflow and tflite
pip install tensorflow==2.1.0
pip install tflite==2.1.0
Now please check if TFLite package is installed successfully, ``python -c "import tflite"``
"""
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
###############################################################################
# Necessary imports
# -----------------
import os
import numpy as np
import tflite
import tvm
from tvm import relay
######################################################################
# Download pretrained Quantized TFLite model
# ------------------------------------------
# Download mobilenet V2 TFLite model provided by Google
from tvm.contrib.download import download_testdata
model_url = (
"https://storage.googleapis.com/download.tensorflow.org/models/"
"tflite_11_05_08/mobilenet_v2_1.0_224_quant.tgz"
)
# Download model tar file and extract it to get mobilenet_v2_1.0_224.tflite
model_path = download_testdata(
model_url, "mobilenet_v2_1.0_224_quant.tgz", module=["tf", "official"]
)
model_dir = os.path.dirname(model_path)
######################################################################
# Utils for downloading and extracting zip files
# ----------------------------------------------
def extract(path):
import tarfile
if path.endswith("tgz") or path.endswith("gz"):
dir_path = os.path.dirname(path)
tar = tarfile.open(path)
tar.extractall(path=dir_path)
tar.close()
else:
raise RuntimeError("Could not decompress the file: " + path)
extract(model_path)
######################################################################
# Load a test image
# -----------------
#######################################################################
# Get a real image for e2e testing
# --------------------------------
def get_real_image(im_height, im_width):
from PIL import Image
repo_base = "https://github.com/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/"
img_name = "elephant-299.jpg"
image_url = os.path.join(repo_base, img_name)
img_path = download_testdata(image_url, img_name, module="data")
image = Image.open(img_path).resize((im_height, im_width))
x = np.array(image).astype("uint8")
data = np.reshape(x, (1, im_height, im_width, 3))
return data
data = get_real_image(224, 224)
######################################################################
# Load a tflite model
# -------------------
######################################################################
# Now we can open mobilenet_v2_1.0_224.tflite
tflite_model_file = os.path.join(model_dir, "mobilenet_v2_1.0_224_quant.tflite")
tflite_model_buf = open(tflite_model_file, "rb").read()
# Get TFLite model from buffer
try:
import tflite
tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)
except AttributeError:
import tflite.Model
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
###############################################################################
# Lets run TFLite pre-quantized model inference and get the TFLite prediction.
def run_tflite_model(tflite_model_buf, input_data):
"""Generic function to execute TFLite"""
try:
from tensorflow import lite as interpreter_wrapper
except ImportError:
from tensorflow.contrib import lite as interpreter_wrapper
input_data = input_data if isinstance(input_data, list) else [input_data]
interpreter = interpreter_wrapper.Interpreter(model_content=tflite_model_buf)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# set input
assert len(input_data) == len(input_details)
for i in range(len(input_details)):
interpreter.set_tensor(input_details[i]["index"], input_data[i])
# Run
interpreter.invoke()
# get output
tflite_output = list()
for i in range(len(output_details)):
tflite_output.append(interpreter.get_tensor(output_details[i]["index"]))
return tflite_output
###############################################################################
# Lets run TVM compiled pre-quantized model inference and get the TVM prediction.
def run_tvm(lib):
from tvm.contrib import graph_executor
rt_mod = graph_executor.GraphModule(lib["default"](tvm.cpu(0)))
rt_mod.set_input("input", data)
rt_mod.run()
tvm_res = rt_mod.get_output(0).numpy()
tvm_pred = np.squeeze(tvm_res).argsort()[-5:][::-1]
return tvm_pred, rt_mod
###############################################################################
# TFLite inference
# ----------------
###############################################################################
# Run TFLite inference on the quantized model.
tflite_res = run_tflite_model(tflite_model_buf, data)
tflite_pred = np.squeeze(tflite_res).argsort()[-5:][::-1]
###############################################################################
# TVM compilation and inference
# -----------------------------
###############################################################################
# We use the TFLite-Relay parser to convert the TFLite pre-quantized graph into Relay IR. Note that
# frontend parser call for a pre-quantized model is exactly same as frontend parser call for a FP32
# model. We encourage you to remove the comment from print(mod) and inspect the Relay module. You
# will see many QNN operators, like, Requantize, Quantize and QNN Conv2D.
dtype_dict = {"input": data.dtype.name}
shape_dict = {"input": data.shape}
mod, params = relay.frontend.from_tflite(tflite_model, shape_dict=shape_dict, dtype_dict=dtype_dict)
# print(mod)
###############################################################################
# Lets now the compile the Relay module. We use the "llvm" target here. Please replace it with the
# target platform that you are interested in.
target = "llvm"
with tvm.transform.PassContext(opt_level=3):
lib = relay.build_module.build(mod, target=target, params=params)
###############################################################################
# Finally, lets call inference on the TVM compiled module.
tvm_pred, rt_mod = run_tvm(lib)
###############################################################################
# Accuracy comparison
# -------------------
###############################################################################
# Print the top-5 labels for MXNet and TVM inference.
# Checking the labels because the requantize implementation is different between
# TFLite and Relay. This cause final output numbers to mismatch. So, testing accuracy via labels.
print("TVM Top-5 labels:", tvm_pred)
print("TFLite Top-5 labels:", tflite_pred)
##########################################################################
# Measure performance
# -------------------
# Here we give an example of how to measure performance of TVM compiled models.
n_repeat = 100 # should be bigger to make the measurement more accurate
dev = tvm.cpu(0)
print(rt_mod.benchmark(dev, number=1, repeat=n_repeat))
######################################################################
# .. note::
#
# Unless the hardware has special support for fast 8 bit instructions, quantized models are
# not expected to be any faster than FP32 models. Without fast 8 bit instructions, TVM does
# quantized convolution in 16 bit, even if the model itself is 8 bit.
#
# For x86, the best performance can be achieved on CPUs with AVX512 instructions set.
# In this case, TVM utilizes the fastest available 8 bit instructions for the given target.
# This includes support for the VNNI 8 bit dot product instruction (CascadeLake or newer).
# For EC2 C5.12x large instance, TVM latency for this tutorial is ~2 ms.
#
# Intel conv2d NCHWc schedule on ARM gives better end-to-end latency compared to ARM NCHW
# conv2d spatial pack schedule for many TFLite networks. ARM winograd performance is higher but
# it has a high memory footprint.
#
# Moreover, the following general tips for CPU performance equally applies:
#
# * Set the environment variable TVM_NUM_THREADS to the number of physical cores
# * Choose the best target for your hardware, such as "llvm -mcpu=skylake-avx512" or
# "llvm -mcpu=cascadelake" (more CPUs with AVX512 would come in the future)
# * Perform autotuning - :ref:`Auto-tuning a convolution network for x86 CPU
# <tune_relay_x86>`.
# * To get best inference performance on ARM CPU, change target argument
# according to your device and follow :ref:`Auto-tuning a convolution
# network for ARM CPU <tune_relay_arm>`.
| https://github.com/zk-ml/tachikoma |
gallery/how_to/deploy_models/deploy_quantized.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Deploy a Quantized Model on Cuda
================================
**Author**: `Wuwei Lin <https://github.com/vinx13>`_
This article is an introductory tutorial of automatic quantization with TVM.
Automatic quantization is one of the quantization modes in TVM. More details on
the quantization story in TVM can be found
`here <https://discuss.tvm.apache.org/t/quantization-story/3920>`_.
In this tutorial, we will import a GluonCV pre-trained model on ImageNet to
Relay, quantize the Relay model and then perform the inference.
"""
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
import tvm
from tvm import te
from tvm import relay
import mxnet as mx
from tvm.contrib.download import download_testdata
from mxnet import gluon
import logging
import os
batch_size = 1
model_name = "resnet18_v1"
target = "cuda"
dev = tvm.device(target)
###############################################################################
# Prepare the Dataset
# -------------------
# We will demonstrate how to prepare the calibration dataset for quantization.
# We first download the validation set of ImageNet and pre-process the dataset.
calibration_rec = download_testdata(
"http://data.mxnet.io.s3-website-us-west-1.amazonaws.com/data/val_256_q90.rec",
"val_256_q90.rec",
)
def get_val_data(num_workers=4):
mean_rgb = [123.68, 116.779, 103.939]
std_rgb = [58.393, 57.12, 57.375]
def batch_fn(batch):
return batch.data[0].asnumpy(), batch.label[0].asnumpy()
img_size = 299 if model_name == "inceptionv3" else 224
val_data = mx.io.ImageRecordIter(
path_imgrec=calibration_rec,
preprocess_threads=num_workers,
shuffle=False,
batch_size=batch_size,
resize=256,
data_shape=(3, img_size, img_size),
mean_r=mean_rgb[0],
mean_g=mean_rgb[1],
mean_b=mean_rgb[2],
std_r=std_rgb[0],
std_g=std_rgb[1],
std_b=std_rgb[2],
)
return val_data, batch_fn
###############################################################################
# The calibration dataset should be an iterable object. We define the
# calibration dataset as a generator object in Python. In this tutorial, we
# only use a few samples for calibration.
calibration_samples = 10
def calibrate_dataset():
val_data, batch_fn = get_val_data()
val_data.reset()
for i, batch in enumerate(val_data):
if i * batch_size >= calibration_samples:
break
data, _ = batch_fn(batch)
yield {"data": data}
###############################################################################
# Import the model
# ----------------
# We use the Relay MxNet frontend to import a model from the Gluon model zoo.
def get_model():
gluon_model = gluon.model_zoo.vision.get_model(model_name, pretrained=True)
img_size = 299 if model_name == "inceptionv3" else 224
data_shape = (batch_size, 3, img_size, img_size)
mod, params = relay.frontend.from_mxnet(gluon_model, {"data": data_shape})
return mod, params
###############################################################################
# Quantize the Model
# ------------------
# In quantization, we need to find the scale for each weight and intermediate
# feature map tensor of each layer.
#
# For weights, the scales are directly calculated based on the value of the
# weights. Two modes are supported: `power2` and `max`. Both modes find the
# maximum value within the weight tensor first. In `power2` mode, the maximum
# is rounded down to power of two. If the scales of both weights and
# intermediate feature maps are power of two, we can leverage bit shifting for
# multiplications. This make it computationally more efficient. In `max` mode,
# the maximum is used as the scale. Without rounding, `max` mode might have
# better accuracy in some cases. When the scales are not powers of two, fixed
# point multiplications will be used.
#
# For intermediate feature maps, we can find the scales with data-aware
# quantization. Data-aware quantization takes a calibration dataset as the
# input argument. Scales are calculated by minimizing the KL divergence between
# distribution of activation before and after quantization.
# Alternatively, we can also use pre-defined global scales. This saves the time
# for calibration. But the accuracy might be impacted.
def quantize(mod, params, data_aware):
if data_aware:
with relay.quantize.qconfig(calibrate_mode="kl_divergence", weight_scale="max"):
mod = relay.quantize.quantize(mod, params, dataset=calibrate_dataset())
else:
with relay.quantize.qconfig(calibrate_mode="global_scale", global_scale=8.0):
mod = relay.quantize.quantize(mod, params)
return mod
###############################################################################
# Run Inference
# -------------
# We create a Relay VM to build and execute the model.
def run_inference(mod):
model = relay.create_executor("vm", mod, dev, target).evaluate()
val_data, batch_fn = get_val_data()
for i, batch in enumerate(val_data):
data, label = batch_fn(batch)
prediction = model(data)
if i > 10: # only run inference on a few samples in this tutorial
break
def main():
mod, params = get_model()
mod = quantize(mod, params, data_aware=True)
run_inference(mod)
if __name__ == "__main__":
main()
| https://github.com/zk-ml/tachikoma |
gallery/how_to/deploy_models/deploy_sparse.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Deploy a Hugging Face Pruned Model on CPU
=========================================
**Author**: `Josh Fromm <https://github.com/jwfromm>`_
This tutorial demonstrates how to take any pruned model, in this case `PruneBert
from Hugging Face
<https://huggingface.co/huggingface/prunebert-base-uncased-6-finepruned-w-distil-squad>`_,
and use TVM to leverage the model's sparsity support to produce real speedups. Although
the primary purpose of this tutorial is to realize speedups on already pruned
models, it may also be useful to estimate how fast a model would be *if* it were
pruned. To this end, we also provide a function that takes an unpruned model and
replaces its weights
with random and pruned weights at a specified sparsity. This may be a useful
feature when trying to decide if a model is worth pruning or not.
Before we get into the code, it's useful to discuss sparsity and pruning
and dig into the two
different types of sparsity: **structured** and **unstructured**.
Pruning is a technique primarily used to reduce the parameter size of a model
by replacing weight values with 0s. Although many methods exist for choosing which
weights should be set to 0, the most straight forward is by picking the
weights with the smallest value. Typically, weights are pruned to a desired
sparsity percentage. For example, a 95% sparse model would have only 5% of
its weights non-zero. Pruning to very high sparsities often requires
fine-tuning or full retraining as it tends to be a lossy approximation.
Although parameter size benefits are quite easy to obtain from a pruned model
through simple compression, leveraging sparsity to yield runtime speedups
is more complicated.
In structured sparsity weights are pruned with the goal of clustering
pruned weights together. In other words, they are pruned using both their
value and location. The benefit of bunching up pruned weights is that it allows
an algorithm such as matrix multiplication to skip entire blocks. It turns out
that some degree of *block sparsity* is very important to realizing significant
speedups on most hardware available today.
This is because when loading memory in most CPUs or GPUs,
it doesn't save any work to skip reading a single value at a time, instead an entire
chunk or tile is read in and executed using something like vectorized instructions.
Unstructured sparse weights are those that are pruned only on the value of
the original weights. They may appear to be scattered randomly throughout
a tensor rather than in chunks like we'd see in block sparse weights.
At low sparsities, unstructured pruning techniques are difficult to
accelerate. However, at high sparsities many blocks of all 0 values
will naturally appear, making it possible to accelerate.
This tutorial interacts with both structured and unstructured sparsity.
Hugging Face's PruneBert model is unstructured but 95% sparse, allowing us
to apply TVM's block sparse optimizations to it, even if not optimally.
When generating random sparse weights for an unpruned model, we do so with structured
sparsity. A fun exercise is comparing the real speed of PruneBert with the block
sparse speed using fake weights to see the benefit of structured sparsity.
"""
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
###############################################################################
# Load Required Modules
# ---------------------
# Other than TVM, scipy, the latest transformers, and
# tensorflow 2.2+ are required.
import os
import tvm
import time
import itertools
import numpy as np
import tensorflow as tf
from tvm import relay, runtime
from tvm.contrib import graph_executor
from tvm.relay import data_dep_optimization as ddo
from tensorflow.python.framework.convert_to_constants import (
convert_variables_to_constants_v2,
)
import scipy.sparse as sp
# Ask tensorflow to limit its GPU memory to what's actually needed
# instead of gobbling everything that's available.
# https://www.tensorflow.org/guide/gpu#limiting_gpu_memory_growth
# This way this tutorial is a little more friendly to sphinx-gallery.
gpus = tf.config.list_physical_devices("GPU")
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
print("tensorflow will use experimental.set_memory_growth(True)")
except RuntimeError as e:
print("experimental.set_memory_growth option is not available: {}".format(e))
###############################################################################
# Configure Settings
# ------------------
# Let's start by defining some parameters that define the type of model
# and sparsity to run.
# The name of the transformer model to download and run.
name = "huggingface/prunebert-base-uncased-6-finepruned-w-distil-squad"
# The number of batches in an input.
batch_size = 1
# The length of each input sequence.
seq_len = 128
# TVM platform identifier. Note that best cpu performance can be achieved by setting -mcpu
# appropriately for your specific machine. CUDA and ROCm are also supported.
target = "llvm"
# Which device to run on. Should be one of tvm.cpu() or tvm.cuda().
dev = tvm.cpu()
# If true, then a sparse variant of the network will be run and
# benchmarked.
measure_sparse = True
# The block size of structured sparsity to convert weight tensors
# into. Changing this parameter may yield speedups for some platforms.
bs_r = 1
# For models besides PruneBert (which is 95% sparse), this parameter
# determines how sparse the generated weights should be. The higher
# the sparsity, the faster the result.
sparsity = 0.85
###############################################################################
# Download and Convert Transformers Model
# ---------------------------------------
# Now we'll grab a model from the transformers module, download it,
# convert it into a TensorFlow graphdef in preperation for converting that graphdef into
# a relay graph that we can optimize and deploy.
def load_keras_model(module, name, seq_len, batch_size, report_runtime=True):
model = module.from_pretrained(name)
dummy_input = tf.keras.Input(shape=[seq_len], batch_size=batch_size, dtype="int32")
dummy_out = model(dummy_input) # Propagate shapes through the keras model.
if report_runtime:
np_input = np.random.uniform(size=[batch_size, seq_len], low=0, high=seq_len).astype(
"int32"
)
start = time.time()
repeats = 50
for i in range(repeats):
np_out = model(np_input)
end = time.time()
print("Keras Runtime: %f ms." % (1000 * ((end - start) / repeats)))
return model
def convert_to_graphdef(model, batch_size, seq_len):
model_func = tf.function(lambda x: model(x))
input_dict = model._saved_model_inputs_spec
input_spec = input_dict[list(input_dict.keys())[0]]
model_func = model_func.get_concrete_function(
tf.TensorSpec([batch_size, seq_len], input_spec.dtype)
)
frozen_func = convert_variables_to_constants_v2(model_func)
return frozen_func.graph.as_graph_def()
def download_model(name, batch_size, seq_len):
import transformers
module = getattr(transformers, "TFBertForSequenceClassification")
model = load_keras_model(module, name=name, batch_size=batch_size, seq_len=seq_len)
return convert_to_graphdef(model, batch_size, seq_len)
###############################################################################
# Convert to Relay Graph
# ----------------------
# We now have all the tooling to get a transformers model in the right format
# for relay conversion. Let's import it! In the following function we
# save the imported graph in relay's json format so that we dont have
# to reimport from tensorflow each time this script is run.
def import_graphdef(
name,
batch_size,
seq_len,
save_relay=True,
relay_file="model.json",
relay_params="model.params",
):
abs_path = os.path.dirname(os.path.abspath(__file__))
shape_dict = {"input_1": (batch_size, seq_len)}
relay_file = ("%s_%d_%d_%s" % (name, batch_size, seq_len, relay_file)).replace("/", "_")
relay_params = ("%s_%d_%d_%s" % (name, batch_size, seq_len, relay_params)).replace("/", "_")
if os.path.exists(os.path.join(abs_path, relay_file)) and os.path.exists(
os.path.join(abs_path, relay_params)
):
with open(os.path.join(abs_path, relay_file), "r") as fi:
mod = tvm.ir.load_json(fi.read())
with open(os.path.join(abs_path, relay_params), "rb") as fi:
params = relay.load_param_dict(fi.read())
else:
graph_def = download_model(name, batch_size, seq_len)
mod, params = relay.frontend.from_tensorflow(graph_def, shape=shape_dict)
if save_relay:
with open(os.path.join(abs_path, relay_file), "w") as fo:
fo.write(tvm.ir.save_json(mod))
with open(os.path.join(abs_path, relay_params), "wb") as fo:
fo.write(runtime.save_param_dict(params))
return mod, dict(params.items()), shape_dict
###############################################################################
# Run the Dense Graph
# -------------------
# Let's run the default version of the imported model. Note that even if
# the weights are sparse, we won't see any speedup because we are using
# regular dense matrix multiplications on these dense (but mostly zero)
# tensors instead of sparse aware kernels.
def run_relay_graph(mod, params, shape_dict, target, dev):
with relay.build_config(opt_level=3):
lib = relay.build(mod, target=target, params=params)
input_shape = shape_dict["input_1"]
dummy_data = np.random.uniform(size=input_shape, low=0, high=input_shape[1]).astype("int32")
m = graph_executor.GraphModule(lib["default"](dev))
m.set_input(0, dummy_data)
m.run()
tvm_output = m.get_output(0)
print(m.benchmark(dev, repeat=5, number=5))
return tvm_output
def run_dense(mod, params, shape_dict, target, dev):
print("Dense Model Benchmark:")
return run_relay_graph(mod, params, shape_dict, target, dev)
###############################################################################
# Run the Sparse Graph
# --------------------
# Next we'll convert the graph into a sparse representation and generate
# fake sparse weights if needed. Then we'll use the same benchmarking
# script as dense to see how much faster we go! We apply a few relay passes
# to the graph to get it leveraging sparsity. First we use
# `simplify_fc_transpose` to use transposes on the weights of dense layers
# into the parameters. This makes it easier to convert to matrix multiplies
# to sparse versions. Next we apply `bsr_dense.convert` to identify all
# weight matrices that can be sparse, and automatically replace them.
#
# The `bsr_dense.convert` call below is doing the heavy lifting of identifying
# which weights in the model can be made sparse by checking if they are
# at least `sparsity_threshold` percent sparse. If so, it converts those
# weights into *Block Compressed Row Format (BSR)*. BSR is essentially
# a representation that indexes into the nonzero chunks of the tensor,
# making it easy for an algorithm to load those non-zero chunks and ignore
# the rest of the tensor. Once the sparse weights are in BSR format,
# `relay.transform.DenseToSparse` is applied to actually replace
# `relay.dense` operations with `relay.sparse_dense` calls that can be
# run faster.
def random_bsr_matrix(M, N, BS_R, BS_C, density, dtype="float32"):
Y = np.zeros((M, N), dtype=dtype)
assert M % BS_R == 0
assert N % BS_C == 0
nnz = int(density * M * N)
num_blocks = int(nnz / (BS_R * BS_C)) + 1
candidate_blocks = np.asarray(list(itertools.product(range(0, M, BS_R), range(0, N, BS_C))))
assert candidate_blocks.shape[0] == M // BS_R * N // BS_C
chosen_blocks = candidate_blocks[
np.random.choice(candidate_blocks.shape[0], size=num_blocks, replace=False)
]
for i in range(len(chosen_blocks)):
r, c = chosen_blocks[i]
Y[r : r + BS_R, c : c + BS_C] = np.random.uniform(-0.1, 0.1, (BS_R, BS_C))
s = sp.bsr_matrix(Y, blocksize=(BS_R, BS_C))
assert s.data.shape == (num_blocks, BS_R, BS_C)
assert s.data.size >= nnz
assert s.indices.shape == (num_blocks,)
assert s.indptr.shape == (M // BS_R + 1,)
return s.todense()
def random_sparse_bert_params(func, params, density, BS_R, BS_C):
def deepcopy(param_dic):
ret = {}
for k, v in param_dic.items():
ret[k] = tvm.nd.array(v.numpy())
return ret
new_params = deepcopy(params)
dense_weight_names = relay.analysis.sparse_dense._search_dense_op_weight(func)
for item in dense_weight_names:
name = str(item)
shape = new_params[name].shape
if shape[0] % BS_R == 0 and shape[1] % BS_C == 0:
new_w = random_bsr_matrix(shape[0], shape[1], BS_R, BS_C, density)
new_params[name] = tvm.nd.array(new_w)
return new_params
def run_sparse(mod, params, shape_dict, target, dev, bs_r, sparsity, gen_weights):
mod, params = ddo.simplify_fc_transpose.convert(mod["main"], params)
if gen_weights:
params = random_sparse_bert_params(mod, params, BS_R=bs_r, BS_C=1, density=1 - sparsity)
mod, params = ddo.bsr_dense.convert(mod, params, (bs_r, 1), sparsity_threshold=0.8)
print("Block Sparse Model with {blocksize}x1 blocks:".format(blocksize=bs_r))
return run_relay_graph(mod, params, shape_dict, target, dev)
###############################################################################
# Run All the Code!
# -----------------
# And that's it! Now we'll simply call all the needed function to benchmark
# the model according to the set parameters. Note that to run this code
# you'll need to uncomment the last line first.
def benchmark():
mod, params, shape_dict = import_graphdef(name, batch_size, seq_len)
run_dense(mod, params, shape_dict, target, dev)
if measure_sparse:
gen_weights = "prune" not in name
run_sparse(mod, params, shape_dict, target, dev, bs_r, sparsity, gen_weights)
# benchmark()
###############################################################################
# Sample Output
# -------------
# For reference, below is the output of the script when run on an AMD CPU
# and shows about a 2.5X speedup from using sparsity.
# Dense Model Benchmark:
# Cannot find config for target=llvm, workload=('dense_nopack.x86', ('TENSOR', (1, 768), 'float32'), ('TENSOR', (2, 768), 'float32'), None, 'float32'). A fallback configuration is used, which may bring great performance regression.
# Cannot find config for target=llvm, workload=('dense_nopack.x86', ('TENSOR', (1, 768), 'float32'), ('TENSOR', (768, 768), 'float32'), None, 'float32'). A fallback configuration is used, which may bring great performance regression.
# Cannot find config for target=llvm, workload=('dense_nopack.x86', ('TENSOR', (128, 3072), 'float32'), ('TENSOR', (768, 3072), 'float32'), None, 'float32'). A fallback configuration is used, which may bring great performance regression.
# Cannot find config for target=llvm, workload=('dense_nopack.x86', ('TENSOR', (128, 768), 'float32'), ('TENSOR', (3072, 768), 'float32'), None, 'float32'). A fallback configuration is used, which may bring great performance regression.
# Cannot find config for target=llvm, workload=('dense_nopack.x86', ('TENSOR', (128, 768), 'float32'), ('TENSOR', (768, 768), 'float32'), None, 'float32'). A fallback configuration is used, which may bring great performance regression.
# Cannot find config for target=llvm, workload=('batch_matmul.x86', ('TENSOR', (12, 128, 128), 'float32'), ('TENSOR', (12, 64, 128), 'float32')). A fallback configuration is used, which may bring great performance regression.
# Cannot find config for target=llvm, workload=('batch_matmul.x86', ('TENSOR', (12, 128, 64), 'float32'), ('TENSOR', (12, 128, 64), 'float32')). A fallback configuration is used, which may bring great performance regression.
# Runtime: 165.26 ms (12.83 ms)
# Block Sparse Model with 1x1 blocks:
# Runtime: 67.75 ms (8.83 ms)
# Here is the output of this script on a GPU (GTX 1070) with the target "cuda -libs=cublas".
#
# Dense Model Benchmark:
# Cannot find config for target=cuda -keys=cuda,gpu -libs=cublas -max_num_threads=1024 -thread_warp_size=32, workload=('dense_cublas.cuda', ('TENSOR', (1, 768), 'float32'), ('TENSOR', (2, 768), 'float32'), None, 'float32'). A fallback configuration is used, which may bring great performance regression.
# Cannot find config for target=cuda -keys=cuda,gpu -libs=cublas -max_num_threads=1024 -thread_warp_size=32, workload=('dense_cublas.cuda', ('TENSOR', (1, 768), 'float32'), ('TENSOR', (768, 768), 'float32'), None, 'float32'). A fallback configuration is used, which may bring great performance regression.
# Cannot find config for target=cuda -keys=cuda,gpu -libs=cublas -max_num_threads=1024 -thread_warp_size=32, workload=('dense_cublas.cuda', ('TENSOR', (128, 3072), 'float32'), ('TENSOR', (768, 3072), 'float32'), None, 'float32'). A fallback configuration is used, which may bring great performance regression.
# Cannot find config for target=cuda -keys=cuda,gpu -libs=cublas -max_num_threads=1024 -thread_warp_size=32, workload=('dense_cublas.cuda', ('TENSOR', (128, 768), 'float32'), ('TENSOR', (3072, 768), 'float32'), None, 'float32'). A fallback configuration is used, which may bring great performance regression.
# Cannot find config for target=cuda -keys=cuda,gpu -libs=cublas -max_num_threads=1024 -thread_warp_size=32, workload=('dense_cublas.cuda', ('TENSOR', (128, 768), 'float32'), ('TENSOR', (768, 768), 'float32'), None, 'float32'). A fallback configuration is used, which may bring great performance regression.
# Cannot find config for target=cuda -keys=cuda,gpu -libs=cublas -max_num_threads=1024 -thread_warp_size=32, workload=('batch_matmul_cublas.cuda', ('TENSOR', (12, 128, 128), 'float32'), ('TENSOR', (12, 64, 128), 'float32'), (12, 128, 64)). A fallback configuration is used, which may bring great performance regression.
# Cannot find config for target=cuda -keys=cuda,gpu -libs=cublas -max_num_threads=1024 -thread_warp_size=32, workload=('batch_matmul_cublas.cuda', ('TENSOR', (12, 128, 64), 'float32'), ('TENSOR', (12, 128, 64), 'float32'), (12, 128, 128)). A fallback configuration is used, which may bring great performance regression.
# Runtime: 10.64 ms (0.29 ms)
# Block Sparse Model with 1x1 blocks:
# Runtime: 6.46 ms (0.05 ms)
| https://github.com/zk-ml/tachikoma |
gallery/how_to/deploy_models/deploy_ssd_gluoncv.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Deploy Single Shot Multibox Detector(SSD) model
===============================================
**Author**: `Yao Wang <https://github.com/kevinthesun>`_
`Leyuan Wang <https://github.com/Laurawly>`_
This article is an introductory tutorial to deploy SSD models with TVM.
We will use GluonCV pre-trained SSD model and convert it to Relay IR
"""
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
import tvm
from tvm import te
from matplotlib import pyplot as plt
from tvm import relay
from tvm.contrib import graph_executor
from tvm.contrib.download import download_testdata
from gluoncv import model_zoo, data, utils
######################################################################
# Preliminary and Set parameters
# ------------------------------
# .. note::
#
# We support compiling SSD on both CPUs and GPUs now.
#
# To get best inference performance on CPU, change
# target argument according to your device and
# follow the :ref:`tune_relay_x86` to tune x86 CPU and
# :ref:`tune_relay_arm` for arm CPU.
#
# To get best inference performance on Intel graphics,
# change target argument to :code:`opencl -device=intel_graphics`.
# But when using Intel graphics on Mac, target needs to
# be set to `opencl` only for the reason that Intel subgroup
# extension is not supported on Mac.
#
# To get best inference performance on CUDA-based GPUs,
# change the target argument to :code:`cuda`; and for
# OPENCL-based GPUs, change target argument to
# :code:`opencl` followed by device argument according
# to your device.
supported_model = [
"ssd_512_resnet50_v1_voc",
"ssd_512_resnet50_v1_coco",
"ssd_512_resnet101_v2_voc",
"ssd_512_mobilenet1.0_voc",
"ssd_512_mobilenet1.0_coco",
"ssd_300_vgg16_atrous_voc" "ssd_512_vgg16_atrous_coco",
]
model_name = supported_model[0]
dshape = (1, 3, 512, 512)
######################################################################
# Download and pre-process demo image
im_fname = download_testdata(
"https://github.com/dmlc/web-data/blob/main/" + "gluoncv/detection/street_small.jpg?raw=true",
"street_small.jpg",
module="data",
)
x, img = data.transforms.presets.ssd.load_test(im_fname, short=512)
######################################################################
# Convert and compile model for CPU.
block = model_zoo.get_model(model_name, pretrained=True)
def build(target):
mod, params = relay.frontend.from_mxnet(block, {"data": dshape})
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target, params=params)
return lib
######################################################################
# Create TVM runtime and do inference
# .. note::
#
# Use target = "cuda -libs" to enable thrust based sort, if you
# enabled thrust during cmake by -DUSE_THRUST=ON.
def run(lib, dev):
# Build TVM runtime
m = graph_executor.GraphModule(lib["default"](dev))
tvm_input = tvm.nd.array(x.asnumpy(), device=dev)
m.set_input("data", tvm_input)
# execute
m.run()
# get outputs
class_IDs, scores, bounding_boxs = m.get_output(0), m.get_output(1), m.get_output(2)
return class_IDs, scores, bounding_boxs
for target in ["llvm", "cuda"]:
dev = tvm.device(target, 0)
if dev.exist:
lib = build(target)
class_IDs, scores, bounding_boxs = run(lib, dev)
######################################################################
# Display result
ax = utils.viz.plot_bbox(
img,
bounding_boxs.numpy()[0],
scores.numpy()[0],
class_IDs.numpy()[0],
class_names=block.classes,
)
plt.show()
| https://github.com/zk-ml/tachikoma |
gallery/how_to/extend_tvm/bring_your_own_datatypes.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Bring Your Own Datatypes to TVM
===============================
**Authors**: `Gus Smith <https://github.com/gussmith23>`_, `Andrew Liu <https://github.com/hypercubestart>`_
In this tutorial, we will show you how to utilize the Bring Your Own Datatypes framework to use your own custom datatypes in TVM.
Note that the Bring Your Own Datatypes framework currently only handles **software emulated versions of datatypes**.
The framework does not support compiling for custom accelerator datatypes out-of-the-box.
Datatype Libraries
------------------
The Bring Your Own Datatypes allows users to register their own datatype implementations alongside TVM's native datatypes (such as ``float``).
In the wild, these datatype implementations often appear as libraries.
For example:
- `libposit <https://github.com/cjdelisle/libposit>`_, a posit library
- `Stillwater Universal <https://github.com/stillwater-sc/universal>`_, a library with posits, fixed-point numbers, and other types
- `SoftFloat <https://github.com/ucb-bar/berkeley-softfloat-3>`_, Berkeley's software implementation of IEEE 754 floating-point
The Bring Your Own Datatypes enables users to plug these datatype implementations into TVM!
In this section, we will use an example library we have already implemented, located at ``3rdparty/byodt/myfloat.cc``.
This datatype, which we dubbed "myfloat", is really just a IEE-754 float under-the-hood, but it serves a useful example
to show that any datatype can be used in the BYODT framework.
Setup
-----
Since we do not use any 3rdparty library, there is no setup needed.
If you would like to try this with your own datatype library, first bring the library's functions into the process space with ``CDLL``:
.. code-block :: python
ctypes.CDLL('my-datatype-lib.so', ctypes.RTLD_GLOBAL)
"""
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
######################
# A Simple TVM Program
# --------------------
#
# We'll begin by writing a simple program in TVM; afterwards, we will re-write it to use custom datatypes.
import tvm
from tvm import relay
# Our basic program: Z = X + Y
x = relay.var("x", shape=(3,), dtype="float32")
y = relay.var("y", shape=(3,), dtype="float32")
z = x + y
program = relay.Function([x, y], z)
module = tvm.IRModule.from_expr(program)
######################################################################
# Now, we create random inputs to feed into this program using numpy:
import numpy as np
np.random.seed(23) # for reproducibility
x_input = np.random.rand(3).astype("float32")
y_input = np.random.rand(3).astype("float32")
print("x: {}".format(x_input))
print("y: {}".format(y_input))
######################################################################
# Finally, we're ready to run the program:
z_output = relay.create_executor(mod=module).evaluate()(x_input, y_input)
print("z: {}".format(z_output))
######################################################################
# Adding Custom Datatypes
# -----------------------
# Now, we will do the same, but we will use a custom datatype for our intermediate computation.
#
# We use the same input variables ``x`` and ``y`` as above, but before adding ``x + y``, we first cast both ``x`` and ``y`` to a custom datatype via the ``relay.cast(...)`` call.
#
# Note how we specify the custom datatype: we indicate it using the special ``custom[...]`` syntax.
# Additionally, note the "32" after the datatype: this is the bitwidth of the custom datatype. This tells TVM that each instance of ``myfloat`` is 32 bits wide.
try:
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
x_myfloat = relay.cast(x, dtype="custom[myfloat]32")
y_myfloat = relay.cast(y, dtype="custom[myfloat]32")
z_myfloat = x_myfloat + y_myfloat
z = relay.cast(z_myfloat, dtype="float32")
except tvm.TVMError as e:
# Print last line of error
print(str(e).split("\n")[-1])
######################################################################
# Trying to generate this program throws an error from TVM.
# TVM does not know how to handle any custom datatype out of the box!
# We first have to register the custom type with TVM, giving it a name and a type code:
tvm.target.datatype.register("myfloat", 150)
######################################################################
# Note that the type code, 150, is currently chosen manually by the user.
# See ``TVMTypeCode::kCustomBegin`` in `include/tvm/runtime/c_runtime_api.h <https://github.com/apache/tvm/blob/main/include/tvm/runtime/data_type.h>`_.
# Now we can generate our program again:
x_myfloat = relay.cast(x, dtype="custom[myfloat]32")
y_myfloat = relay.cast(y, dtype="custom[myfloat]32")
z_myfloat = x_myfloat + y_myfloat
z = relay.cast(z_myfloat, dtype="float32")
program = relay.Function([x, y], z)
module = tvm.IRModule.from_expr(program)
module = relay.transform.InferType()(module)
######################################################################
# Now we have a Relay program that uses myfloat!
print(program)
######################################################################
# Now that we can express our program without errors, let's try running it!
try:
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
z_output_myfloat = relay.create_executor("graph", mod=module).evaluate()(x_input, y_input)
print("z: {}".format(y_myfloat))
except tvm.TVMError as e:
# Print last line of error
print(str(e).split("\n")[-1])
######################################################################
# Now, trying to compile this program throws an error.
# Let's dissect this error.
#
# The error is occurring during the process of lowering the custom datatype code to code that TVM can compile and run.
# TVM is telling us that it cannot find a *lowering function* for the ``Cast`` operation, when casting from source type 2 (``float``, in TVM), to destination type 150 (our custom datatype).
# When lowering custom datatypes, if TVM encounters an operation over a custom datatype, it looks for a user-registered *lowering function*, which tells it how to lower the operation to an operation over datatypes it understands.
# We have not told TVM how to lower ``Cast`` operations for our custom datatypes; thus, the source of this error.
#
# To fix this error, we simply need to specify a lowering function:
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func(
{
(32, 32): "FloatToCustom32", # cast from float32 to myfloat32
}
),
"Cast",
"llvm",
"float",
"myfloat",
)
######################################################################
# The ``register_op(...)`` call takes a lowering function, and a number of parameters which specify exactly the operation which should be lowered with the provided lowering function.
# In this case, the arguments we pass specify that this lowering function is for lowering a ``Cast`` from ``float`` to ``myfloat`` for target ``"llvm"``.
#
# The lowering function passed into this call is very general: it should take an operation of the specified type (in this case, `Cast`) and return another operation which only uses datatypes which TVM understands.
#
# In the general case, we expect users to implement operations over their custom datatypes using calls to an external library.
# In our example, our ``myfloat`` library implements a ``Cast`` from ``float`` to 32-bit ``myfloat`` in the function ``FloatToCustom32``.
# To provide for the general case, we have made a helper function, ``create_lower_func(...)``,
# which does just this: given a dictionary, it replaces the given operation with a ``Call`` to the appropriate function name provided based on the op and the bit widths.
# It additionally removes usages of the custom datatype by storing the custom datatype in an opaque ``uint`` of the appropriate width; in our case, a ``uint32_t``.
# For more information, see `the source code <https://github.com/apache/tvm/blob/main/python/tvm/target/datatype.py>`_.
# We can now re-try running the program:
try:
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
z_output_myfloat = relay.create_executor("graph", mod=module).evaluate()(x_input, y_input)
print("z: {}".format(z_output_myfloat))
except tvm.TVMError as e:
# Print last line of error
print(str(e).split("\n")[-1])
######################################################################
# This new error tells us that the ``Add`` lowering function is not found, which is good news, as it's no longer complaining about the ``Cast``!
# We know what to do from here: we just need to register the lowering functions for the other operations in our program.
#
# Note that for ``Add``, ``create_lower_func`` takes in a dict where the key is an integer.
# For ``Cast`` operations, we require a 2-tuple to specify the ``src_bit_length`` and the ``dest_bit_length``,
# while for all other operations, the bit length is the same between the operands so we only require one integer to specify ``bit_length``.
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "Custom32Add"}),
"Add",
"llvm",
"myfloat",
)
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({(32, 32): "Custom32ToFloat"}),
"Cast",
"llvm",
"myfloat",
"float",
)
# Now, we can run our program without errors.
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
z_output_myfloat = relay.create_executor(mod=module).evaluate()(x_input, y_input)
print("z: {}".format(z_output_myfloat))
print("x:\t\t{}".format(x_input))
print("y:\t\t{}".format(y_input))
print("z (float32):\t{}".format(z_output))
print("z (myfloat32):\t{}".format(z_output_myfloat))
# Perhaps as expected, the ``myfloat32`` results and ``float32`` are exactly the same!
######################################################################
# Running Models With Custom Datatypes
# ------------------------------------
#
# We will first choose the model which we would like to run with myfloat.
# In this case we use `Mobilenet <https://arxiv.org/abs/1704.04861>`_.
# We choose Mobilenet due to its small size.
# In this alpha state of the Bring Your Own Datatypes framework, we have not implemented any software optimizations for running software emulations of custom datatypes; the result is poor performance due to many calls into our datatype emulation library.
#
# First let us define two helper functions to get the mobilenet model and a cat image.
def get_mobilenet():
dshape = (1, 3, 224, 224)
from mxnet.gluon.model_zoo.vision import get_model
block = get_model("mobilenet0.25", pretrained=True)
shape_dict = {"data": dshape}
return relay.frontend.from_mxnet(block, shape_dict)
def get_cat_image():
from tvm.contrib.download import download_testdata
from PIL import Image
url = "https://gist.githubusercontent.com/zhreshold/bcda4716699ac97ea44f791c24310193/raw/fa7ef0e9c9a5daea686d6473a62aacd1a5885849/cat.png"
dst = "cat.png"
real_dst = download_testdata(url, dst, module="data")
img = Image.open(real_dst).resize((224, 224))
# CoreML's standard model image format is BGR
img_bgr = np.array(img)[:, :, ::-1]
img = np.transpose(img_bgr, (2, 0, 1))[np.newaxis, :]
return np.asarray(img, dtype="float32")
module, params = get_mobilenet()
######################################################################
# It's easy to execute MobileNet with native TVM:
ex = tvm.relay.create_executor("graph", mod=module, params=params)
input = get_cat_image()
result = ex.evaluate()(input).numpy()
# print first 10 elements
print(result.flatten()[:10])
######################################################################
# Now, we would like to change the model to use myfloat internally. To do so, we need to convert the network. To do this, we first define a function which will help us convert tensors:
def convert_ndarray(dst_dtype, array):
"""Converts an NDArray into the specified datatype"""
x = relay.var("x", shape=array.shape, dtype=str(array.dtype))
cast = relay.Function([x], x.astype(dst_dtype))
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
return relay.create_executor("graph").evaluate(cast)(array)
######################################################################
# Now, to actually convert the entire network, we have written `a pass in Relay <https://github.com/gussmith23/tvm/blob/ea174c01c54a2529e19ca71e125f5884e728da6e/python/tvm/relay/frontend/change_datatype.py#L21>`_ which simply converts all nodes within the model to use the new datatype.
from tvm.relay.frontend.change_datatype import ChangeDatatype
src_dtype = "float32"
dst_dtype = "custom[myfloat]32"
module = relay.transform.InferType()(module)
# Currently, custom datatypes only work if you run simplify_inference beforehand
module = tvm.relay.transform.SimplifyInference()(module)
# Run type inference before changing datatype
module = tvm.relay.transform.InferType()(module)
# Change datatype from float to myfloat and re-infer types
cdtype = ChangeDatatype(src_dtype, dst_dtype)
expr = cdtype.visit(module["main"])
module = tvm.relay.transform.InferType()(module)
# We also convert the parameters:
params = {k: convert_ndarray(dst_dtype, v) for k, v in params.items()}
# We also need to convert our input:
input = convert_ndarray(dst_dtype, input)
# Finally, we can try to run the converted model:
try:
# Vectorization is not implemented with custom datatypes.
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
result_myfloat = tvm.relay.create_executor("graph", mod=module).evaluate(expr)(
input, **params
)
except tvm.TVMError as e:
print(str(e).split("\n")[-1])
######################################################################
# When we attempt to run the model, we get a familiar error telling us that more functions need to be registered for myfloat.
#
# Because this is a neural network, many more operations are required.
# Here, we register all the needed functions:
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "FloatToCustom32"}),
"FloatImm",
"llvm",
"myfloat",
)
tvm.target.datatype.register_op(
tvm.target.datatype.lower_ite, "Call", "llvm", "myfloat", intrinsic_name="tir.if_then_else"
)
tvm.target.datatype.register_op(
tvm.target.datatype.lower_call_pure_extern,
"Call",
"llvm",
"myfloat",
intrinsic_name="tir.call_pure_extern",
)
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "Custom32Mul"}),
"Mul",
"llvm",
"myfloat",
)
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "Custom32Div"}),
"Div",
"llvm",
"myfloat",
)
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "Custom32Sqrt"}),
"Call",
"llvm",
"myfloat",
intrinsic_name="tir.sqrt",
)
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "Custom32Sub"}),
"Sub",
"llvm",
"myfloat",
)
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "Custom32Exp"}),
"Call",
"llvm",
"myfloat",
intrinsic_name="tir.exp",
)
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "Custom32Max"}),
"Max",
"llvm",
"myfloat",
)
tvm.target.datatype.register_min_func(
tvm.target.datatype.create_min_lower_func({32: "MinCustom32"}, "myfloat"),
"myfloat",
)
######################################################################
# Note we are making use of two new functions: ``register_min_func`` and ``create_min_lower_func``.
#
# ``register_min_func`` takes in an integer ``num_bits`` for the bit length, and should return an operation
# representing the minimum finite representable value for the custom data type with the specified bit length.
#
# Similar to ``register_op`` and ``create_lower_func``, the ``create_min_lower_func`` handles the general case
# where the minimum representable custom datatype value is implemented using calls to an external library.
#
# Now we can finally run the model:
# Vectorization is not implemented with custom datatypes.
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
result_myfloat = relay.create_executor(mod=module).evaluate(expr)(input, **params)
result_myfloat = convert_ndarray(src_dtype, result_myfloat).numpy()
# print first 10 elements
print(result_myfloat.flatten()[:10])
# Again, note that the output using 32-bit myfloat exactly the same as 32-bit floats,
# because myfloat is exactly a float!
np.testing.assert_array_equal(result, result_myfloat)
| https://github.com/zk-ml/tachikoma |
gallery/how_to/extend_tvm/low_level_custom_pass.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Writing a Customized Pass
=========================
**Author**: `Jian Weng <https://were.github.io>`_
TVM is a framework that abstracts away the heterogenity of machine learning accelerators.
Sometimes users may want customize some analysis and IR transformations
to adapt TVM to their own specialized hardware. This tutorial helps users write
a customized pass in TVM.
Prerequisites
-------------
Before reading this tutorial, we assume readers have already known these topics well:
- Writing an algorithm in TVM and schedule it. Otherwise, see example tutorials like
:ref:`opt-gemm`.
- The basic structure of HalideIR. Otherwise, see ``HalideIR/src/ir/IR.h`` to learn what
attributes of IR nodes are defined.
- Visitor design pattern. Otherwise, check the
`Python AST module <https://docs.python.org/3/library/ast.html>`_ to see how an AST
visitor is implemented.
- How a Schedule is lowered to either an IRModule class or a LLVM module. Otherwise,
take a look at ``python/tvm/build_module.py`` to get some basics.
"""
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
import tvm
from tvm import te
import numpy as np
######################################################################
# We first write a very simple vector add and build it with the default schedule. Then, we use
# our customized lowering pass to manipulate the IR directly instead of using schedule primitives.
#
n = tvm.tir.const(128, "int32")
a = te.placeholder((n,), name="a")
b = te.placeholder((n,), name="b")
c = te.compute((n,), lambda i: a[i] + b[i], name="c")
sch = te.create_schedule(c.op)
ir = tvm.lower(sch, [a, b, c])
print(ir)
######################################################################
# Writing a Pass
# --------------
# Essentially, an "IR transformation pass" is a function which maps a statement to a new statement.
# Thus, we define this vectorize function and implement it step by step.
#
######################################################################
# TVM already provides two class for users to both analyze and transform IR.
#
# IR Visitor
# ~~~~~~~~~~
# We can use ``tvm.tir.stmt_functor.post_order_visit(stmt, func)`` to gather information from the Halide IR.
# ``func`` is a function callback. This function will be called before exiting the current IR node,
# i.e. post-order visit. Then we leverage side effects to store the result of IR visit, because the
# return value of ``func`` will be ignored.
#
# .. note::
#
# You MUST use some array to store the result of IR visit. Even the value is a single variable.
# This is mainly due to the constraints in the Python-C runtime. The variable values will be
# refreshed every recursion but the array values will be preserved.
#
loops = []
def find_width8(op):
"""Find all the 'tir.For' nodes whose extent can be divided by 8."""
if isinstance(op, tvm.tir.For):
if isinstance(op.extent, tvm.tir.IntImm):
if op.extent.value % 8 == 0:
loops.append(op)
#####################################################################
# IR Transformation
# ~~~~~~~~~~~~~~~~~
# The transformation interface is slightly different from the visitor interface. There is only a
# post-order callback in the visitor, but transformation visitor supports both a pre-order and a
# post-order callback. If you want to keep the origin IR node, just return None. If you want to
# change the current node to some node, use TVM IR maker interface to build it and return
# this value.
#
# .. note::
#
# If the pre-order function is called and returns a value which is not None, the post-order
# function will be skipped.
#
def vectorize8(op):
"""Split can vectorize the loops found in `find_width8`."""
if op in loops:
extent = op.extent.value
name = op.loop_var.name
lo, li = te.var(name + ".outer"), te.var(name + ".inner")
body = tvm.tir.stmt_functor.substitute(op.body, {op.loop_var: lo * 8 + li})
body = tvm.tir.For(li, 0, 8, tvm.tir.ForKind.VECTORIZED, body)
body = tvm.tir.For(lo, 0, extent // 8, tvm.tir.ForKind.SERIAL, body)
return body
return None
@tvm.tir.transform.prim_func_pass(opt_level=0)
def vectorize(f, mod, ctx):
global loops
tvm.tir.stmt_functor.post_order_visit(f.body, find_width8)
if not loops:
return f
# The last list arugment indicates what kinds of nodes will be transformed.
# Thus, in this case only `For` nodes will call `vectorize8`
return f.with_body(tvm.tir.stmt_functor.ir_transform(f.body, None, vectorize8, ["tir.For"]))
#####################################################################
# Glue to Lowering
# ----------------
# So far, we are done with writing this IR transformation pass. What we need to do next is to glue
# this pass to TVM's lower pass.
#
# In this case, we inject the pass written above into the TVM standard lowering
# pass by feeding **a list of tuple** as argument to ``tir.add_lower_pass``. "Tuple" indicates different
# phases of lowering. In TVM, there are four phases of lowering and user-customized ones will be
# called after each phase is done.
#
# .. note::
# Here are the essential transformations done by each phase:
# - Phase 0 generates the raw IR and loop levels.
# - Phase 1 flattens the array storage.
# - Phase 2 transforms loops, like unroll, vectorization and thread-binding.
# - Phase 3 does some cleanup work.
#
# Thus, a good place to put this transformation pass is just after Phase 1.
#
with tvm.transform.PassContext(config={"tir.add_lower_pass": [(1, vectorize)]}):
print(tvm.lower(sch, [a, b, c]))
#####################################################################
# Quick View
# ----------
# This tutorial gives a quick view of writing a customized IR transformation pass:
# - Use ``tvm.tir.stmt_functor.post_order_visit`` to gather information on each IR nodes.
# - Use ``tvm.tir.stmt_functor.ir_transform`` to transform IR nodes.
# - Wrap up two above to write an IR-transformation function.
# - Use ``tvm.transform.PassContext`` to put this function to TVM lowering pass
#
| https://github.com/zk-ml/tachikoma |
gallery/how_to/extend_tvm/use_pass_infra.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=line-too-long
"""
.. _tutorial-use-pass-infra:
How to Use TVM Pass Infra
=========================
**Author**: `Zhi Chen <https://github.com/zhiics>`_
As the number of optimization passes increases in Relay/tir, it becomes intractable to
execute them and maintain their dependencies manually. Therefore, we have
introduced an infrastructure to manage the optimization passes and make it
applicable to different layers of the IR in the TVM stack.
The optimizations of a Relay/tir program could be applied at various granularity,
namely function-level and module-level using :py:class:`tvm.relay.transform.FunctionPass`/
:py:class:`tvm.tir.transform.PrimFuncPass` and :py:class:`tvm.transform.ModulePass`
respectively. Or users can rely on :py:class:`tvm.transform.Sequential` to apply a sequence of passes
on a Relay/tir program where the dependencies between passes can be resolved by the
pass infra. For more details about each type of these passes, please refer to
the :ref:`pass-infra`
This tutorial mainly demonstrates how developers can use the pass infra to perform
a certain optimization and create an optimization pipeline for a Relay program.
The same approach can be used for tir as well.
"""
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
import numpy as np
import tvm
from tvm import te
import tvm.relay as relay
###############################################################################
# Create An Example Relay Program
# -------------------------------
# First of all, we create a simple Relay program for the tutorial. This program
# will be used by various optimizations of the examples in this tutorial.
# Similarly, users can write a tir primitive function and apply the tir passes.
def example():
shape = (1, 64, 54, 54)
c_data = np.empty(shape).astype("float32")
c = relay.const(c_data)
weight = relay.var("weight", shape=(64, 64, 3, 3))
x = relay.var("x", relay.TensorType((1, 64, 56, 56), "float32"))
conv = relay.nn.conv2d(x, weight)
y = relay.add(c, c)
y = relay.multiply(y, relay.const(2, "float32"))
y = relay.add(conv, y)
z = relay.add(y, c)
z1 = relay.add(y, c)
z2 = relay.add(z, z1)
return relay.Function([x, weight], z2)
###############################################################################
# Optimize the Program
# --------------------
# Now we would like to optimize the program. Relay features a host of
# optimizations. We will select some of them to apply on this example program.
#
# There are multiple ways to optimize a Relay program. Below we will provide
# examples for each of them.
#
# Manually Apply Optimization Passes
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Let's first create a relay Module which contains one or multiple Relay
# functions for optimization.
f = example()
mod = tvm.IRModule.from_expr(f)
# Now we can apply constant folding on the module.
# fold_const here is a callback that doesn't take any parameters.
fold_const = relay.transform.FoldConstant()
# Then, we can invoke the pass on the given module. Note that the constant
# folding pass works at the function-level. That being said, each function in
# the module will be applied with the optimization. Users don't need to iterate
# through individual functions manually to apply this pass.
mod = fold_const(mod)
# We can see from the updated program that the constants are folded.
print(mod)
###############################################################################
# More optimizations can be applied in the similar manner. For instance, we can
# eliminate the common expressions that used by `z` and `z1`.
mod = relay.transform.EliminateCommonSubexpr()(mod)
print(mod)
###############################################################################
# Some optimizations, such as fusion, are parametric as well. For example,
# opt level 0 will not allow operators to be fused together. Users can pass the
# `fuse_opt_level` to enable this.
mod = relay.transform.FuseOps(fuse_opt_level=0)(mod)
# We can observe that the optimized module contains functions that only have
# a signle primitive op.
print(mod)
###############################################################################
# Use Sequential to Apply a Sequence of Passes
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Applying passes as above is actually tedious and it may require users to have
# better understanding about the dependencies between them. For example, fusion
# currently doesn't work well on let bindings. Therefore, we would not be able
# to fuse operators that were fusable if :py:func:`relay.transform.ToANormalForm` is applied before
# fusion, as this pass generates let bindings for each expression to
# canonicalize a Relay program.
#
# Relay, hence, provides :py:class:`tvm.transform.Sequential` to alleviate developers from handling
# these issues explicitly by specifying the required passes of each pass and
# packing them as a whole to execute. For example, the same passes can now be
# applied using the sequential style as the following. :py:class:`tvm.transform.Sequential` is
# similar to `torch.nn.sequential <https://pytorch.org/docs/stable/nn.html#torch.nn.Sequential>`_
# and `mxnet.gluon.block <https://mxnet.apache.org/api/python/docs/_modules/mxnet/gluon/block.html>`_.
# For example, `torch.nn.sequential` is used to contain a sequence of PyTorch
# `Modules` that will be added to build a network. It focuses on the network
# layers. Instead, the :py:class:`tvm.transform.Sequential` in our pass infra works on the optimizing
# pass.
# Now let's execute some passes through :py:class:`tvm.transform.Sequential`
f = example()
mod = tvm.IRModule.from_expr(f)
# Glob the interested passes.
seq = tvm.transform.Sequential(
[
relay.transform.FoldConstant(),
relay.transform.EliminateCommonSubexpr(),
relay.transform.FuseOps(fuse_opt_level=2),
]
)
mod1 = seq(mod)
print(mod1)
###############################################################################
# From the transformed Relay program, we can see that there are still two
# identical addition operations. This is because ``EliminateCommonSubexpr``
# was not actually performed. The reason is because only the passes that have
# optimization level less or equal to 2 will be executed by default under
# :py:class:`tvm.transform.Sequential`. The pass infra,
# however, provides a configuration interface
# for users to customize the optimization level that they want to execute.
with tvm.transform.PassContext(opt_level=3):
mod2 = seq(mod)
print(mod2)
###############################################################################
# Now we can see that only one of the two identical additions is kept.
#
# In addition, users can selectively disable some passes using the
# `disabled_pass` config, which is similar to the `-fno-xxx` option used the
# general purpose compilers, such as Clang and GCC. For example, we can disable
# EliminateCommonSubexpr as following. The printed module will again show two
# identical addition operations.
with tvm.transform.PassContext(opt_level=3, disabled_pass=["EliminateCommonSubexpr"]):
mod3 = seq(mod)
print(mod3)
##############################################################################
# Implement a Pass Using Python Decorator
# ------------------------------------------
# The next example illustrates how we can orchestrate a customized optimization
# pipeline through the pass infra using Python decorators. This functionality
# greatly eases the implementation of passes. For example, users can simply
# define a decorated class to do function-level optimizations as the following
# example shows. `transform_function` wraps a class to replace all constants
# with a multiple of `c`. Later on, each function in a given module will be
# visited and each constant in the function will be replaced when we invoke the
# customized pass.
@relay.transform.function_pass(opt_level=1)
class CustomPipeline:
"""Simple test function to replace one argument to another."""
def __init__(self, multiplier):
self.multiplier = multiplier
# This function can define a pass.
def transform_function(self, func, mod, ctx):
obj = self
class ReplaceConstant(tvm.relay.ExprMutator):
def visit_constant(self, c):
return relay.multiply(obj.multiplier, c)
return ReplaceConstant().visit(func)
f = example()
mod = tvm.IRModule.from_expr(f)
custom_pass = CustomPipeline(multiplier=relay.const(3, "float32"))
assert custom_pass.info.name == "CustomPipeline"
mod3 = custom_pass(mod)
print(mod3)
##############################################################################
# Debug a Pass
# ------------
# TVM provides users a plug-and-play style debugging pass that print the IR
# after a certain pass is done through a special pass (``PrintIR``) to dump the IR of the
# whole module. A slightly modified version of the sequential pass example
# could be like the following to enable IR dumping for ``FoldConstant`` optimization.
f = example()
mod = tvm.IRModule.from_expr(f)
seq = tvm.transform.Sequential(
[
relay.transform.FoldConstant(),
tvm.transform.PrintIR(),
relay.transform.EliminateCommonSubexpr(),
relay.transform.FuseOps(),
]
)
###############################################################################
# By inserting the ``PrintIR`` pass after ``FoldConstant``, the pass infra will
# dump out the module IR when ``FoldConstant`` is done. Users can plug in this
# pass after any pass they want to debug for viewing the optimization effect.
#
# There is a more flexible debugging mechanism. One can implement a ``PassInstrument``
# class to execute arbitrary code not only before and/or after each pass but also
# at entering/exiting ``PassContext``. See :ref:`pass_instrument_cpp_backend`
# for more details.
#
# Here we use :py::func`tvm.instrument.pass_instrument` decorator to implement
# a PassInsturment class printing IR before execution of each passes:
@tvm.instrument.pass_instrument
class PrintIR:
"""Print the name of the pass, the IR, only before passes execute."""
def run_before_pass(self, mod, info):
print("Running pass: {}", info)
print(mod)
with tvm.transform.PassContext(opt_level=3, instruments=[PrintIR()]):
with tvm.target.Target("llvm"):
# Perform the optimizations.
mod = seq(mod)
print(mod)
print("done")
##############################################################################
# Summary
# -------
# This tutorial has covered how we can write and invoke passes in TVM more
# conveniently using the pass infra. Different ways of invoking a pass are also
# discussed. Using :py:class:`tvm.transform.Sequential` can largely help
# users to ease the work of handling multiple optimization passes and their
# dependencies. In addition, an example is provided to illustrate
# how we can debug a pass using the ``PrintIR`` and tracing.
| https://github.com/zk-ml/tachikoma |
gallery/how_to/extend_tvm/use_pass_instrument.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=line-too-long
"""
.. _tutorial-use-pass-instrument:
How to Use TVM Pass Instrument
==============================
**Author**: `Chi-Wei Wang <https://github.com/chiwwang>`_
As more and more passes are implemented, it becomes useful to instrument
pass execution, analyze per-pass effects, and observe various events.
We can instrument passes by providing a list of :py:class:`tvm.ir.instrument.PassInstrument`
instances to :py:class:`tvm.transform.PassContext`. We provide a pass instrument
for collecting timing information (:py:class:`tvm.ir.instrument.PassTimingInstrument`),
but an extension mechanism is available via the :py:func:`tvm.instrument.pass_instrument` decorator.
This tutorial demonstrates how developers can use ``PassContext`` to instrument
passes. Please also refer to the :ref:`pass-infra`.
"""
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
import tvm
import tvm.relay as relay
from tvm.relay.testing import resnet
from tvm.contrib.download import download_testdata
from tvm.relay.build_module import bind_params_by_name
from tvm.ir.instrument import (
PassTimingInstrument,
pass_instrument,
)
###############################################################################
# Create An Example Relay Program
# -------------------------------
# We use pre-defined resnet-18 network in Relay.
batch_size = 1
num_of_image_class = 1000
image_shape = (3, 224, 224)
output_shape = (batch_size, num_of_image_class)
relay_mod, relay_params = resnet.get_workload(num_layers=18, batch_size=1, image_shape=image_shape)
print("Printing the IR module...")
print(relay_mod.astext(show_meta_data=False))
###############################################################################
# Create PassContext With Instruments
# -----------------------------------
# To run all passes with an instrument, pass it via the ``instruments`` argument to
# the ``PassContext`` constructor. A built-in ``PassTimingInstrument`` is used to
# profile the execution time of each passes.
timing_inst = PassTimingInstrument()
with tvm.transform.PassContext(instruments=[timing_inst]):
relay_mod = relay.transform.InferType()(relay_mod)
relay_mod = relay.transform.FoldScaleAxis()(relay_mod)
# before exiting the context, get profile results.
profiles = timing_inst.render()
print("Printing results of timing profile...")
print(profiles)
###############################################################################
# Use Current PassContext With Instruments
# ----------------------------------------
# One can also use the current ``PassContext`` and register
# ``PassInstrument`` instances by ``override_instruments`` method.
# Note that ``override_instruments`` executes ``exit_pass_ctx`` method
# if any instrument already exists. Then it switches to new instruments
# and calls ``enter_pass_ctx`` method of new instruments.
# Refer to following sections and :py:func:`tvm.instrument.pass_instrument` for these methods.
cur_pass_ctx = tvm.transform.PassContext.current()
cur_pass_ctx.override_instruments([timing_inst])
relay_mod = relay.transform.InferType()(relay_mod)
relay_mod = relay.transform.FoldScaleAxis()(relay_mod)
profiles = timing_inst.render()
print("Printing results of timing profile...")
print(profiles)
###############################################################################
# Register empty list to clear existing instruments.
#
# Note that ``exit_pass_ctx`` of ``PassTimingInstrument`` is called.
# Profiles are cleared so nothing is printed.
cur_pass_ctx.override_instruments([])
# Uncomment the call to .render() to see a warning like:
# Warning: no passes have been profiled, did you enable pass profiling?
# profiles = timing_inst.render()
###############################################################################
# Create Customized Instrument Class
# ----------------------------------
# A customized instrument class can be created using the
# :py:func:`tvm.instrument.pass_instrument` decorator.
#
# Let's create an instrument class which calculates the change in number of
# occurrences of each operator caused by each pass. We can look at ``op.name`` to
# find the name of each operator. And we do this before and after passes to calculate the difference.
@pass_instrument
class RelayCallNodeDiffer:
def __init__(self):
self._op_diff = []
# Passes can be nested.
# Use stack to make sure we get correct before/after pairs.
self._op_cnt_before_stack = []
def enter_pass_ctx(self):
self._op_diff = []
self._op_cnt_before_stack = []
def exit_pass_ctx(self):
assert len(self._op_cnt_before_stack) == 0, "The stack is not empty. Something wrong."
def run_before_pass(self, mod, info):
self._op_cnt_before_stack.append((info.name, self._count_nodes(mod)))
def run_after_pass(self, mod, info):
# Pop out the latest recorded pass.
name_before, op_to_cnt_before = self._op_cnt_before_stack.pop()
assert name_before == info.name, "name_before: {}, info.name: {} doesn't match".format(
name_before, info.name
)
cur_depth = len(self._op_cnt_before_stack)
op_to_cnt_after = self._count_nodes(mod)
op_diff = self._diff(op_to_cnt_after, op_to_cnt_before)
# only record passes causing differences.
if op_diff:
self._op_diff.append((cur_depth, info.name, op_diff))
def get_pass_to_op_diff(self):
"""
return [
(depth, pass_name, {op_name: diff_num, ...}), ...
]
"""
return self._op_diff
@staticmethod
def _count_nodes(mod):
"""Count the number of occurrences of each operator in the module"""
ret = {}
def visit(node):
if isinstance(node, relay.expr.Call):
if hasattr(node.op, "name"):
op_name = node.op.name
else:
# Some CallNode may not have 'name' such as relay.Function
return
ret[op_name] = ret.get(op_name, 0) + 1
relay.analysis.post_order_visit(mod["main"], visit)
return ret
@staticmethod
def _diff(d_after, d_before):
"""Calculate the difference of two dictionary along their keys.
The result is values in d_after minus values in d_before.
"""
ret = {}
key_after, key_before = set(d_after), set(d_before)
for k in key_before & key_after:
tmp = d_after[k] - d_before[k]
if tmp:
ret[k] = d_after[k] - d_before[k]
for k in key_after - key_before:
ret[k] = d_after[k]
for k in key_before - key_after:
ret[k] = -d_before[k]
return ret
###############################################################################
# Apply Passes and Multiple Instrument Classes
# --------------------------------------------
# We can use multiple instrument classes in a ``PassContext``.
# However, it should be noted that instrument methods are executed sequentially,
# obeying the order of ``instruments`` argument.
# So for instrument classes like ``PassTimingInstrument``, it is inevitable to
# count-up the execution time of other instrument classes to the final
# profile result.
call_node_inst = RelayCallNodeDiffer()
desired_layouts = {
"nn.conv2d": ["NHWC", "HWIO"],
}
pass_seq = tvm.transform.Sequential(
[
relay.transform.FoldConstant(),
relay.transform.ConvertLayout(desired_layouts),
relay.transform.FoldConstant(),
]
)
relay_mod["main"] = bind_params_by_name(relay_mod["main"], relay_params)
# timing_inst is put after call_node_inst.
# So the execution time of ``call_node.inst.run_after_pass()`` is also counted.
with tvm.transform.PassContext(opt_level=3, instruments=[call_node_inst, timing_inst]):
relay_mod = pass_seq(relay_mod)
profiles = timing_inst.render()
# Uncomment the next line to see timing-profile results.
# print(profiles)
###############################################################################
# We can see how many CallNode increase/decrease per op type.
from pprint import pprint
print("Printing the change in number of occurrences of each operator caused by each pass...")
pprint(call_node_inst.get_pass_to_op_diff())
###############################################################################
# Exception Handling
# ------------------
# Let's see what happens if an exception occurs in a method of a ``PassInstrument``.
#
# Define ``PassInstrument`` classes which raise exceptions in enter/exit ``PassContext``:
class PassExampleBase:
def __init__(self, name):
self._name = name
def enter_pass_ctx(self):
print(self._name, "enter_pass_ctx")
def exit_pass_ctx(self):
print(self._name, "exit_pass_ctx")
def should_run(self, mod, info):
print(self._name, "should_run")
return True
def run_before_pass(self, mod, pass_info):
print(self._name, "run_before_pass")
def run_after_pass(self, mod, pass_info):
print(self._name, "run_after_pass")
@pass_instrument
class PassFine(PassExampleBase):
pass
@pass_instrument
class PassBadEnterCtx(PassExampleBase):
def enter_pass_ctx(self):
print(self._name, "bad enter_pass_ctx!!!")
raise ValueError("{} bad enter_pass_ctx".format(self._name))
@pass_instrument
class PassBadExitCtx(PassExampleBase):
def exit_pass_ctx(self):
print(self._name, "bad exit_pass_ctx!!!")
raise ValueError("{} bad exit_pass_ctx".format(self._name))
###############################################################################
# If an exception occurs in ``enter_pass_ctx``, ``PassContext`` will disable the pass
# instrumentation. And it will run the ``exit_pass_ctx`` of each ``PassInstrument``
# which successfully finished ``enter_pass_ctx``.
#
# In following example, we can see ``exit_pass_ctx`` of `PassFine_0` is executed after exception.
demo_ctx = tvm.transform.PassContext(
instruments=[
PassFine("PassFine_0"),
PassBadEnterCtx("PassBadEnterCtx"),
PassFine("PassFine_1"),
]
)
try:
with demo_ctx:
relay_mod = relay.transform.InferType()(relay_mod)
except ValueError as ex:
print("Catching", str(ex).split("\n")[-1])
###############################################################################
# Exceptions in ``PassInstrument`` instances cause all instruments of the current ``PassContext``
# to be cleared, so nothing is printed when ``override_instruments`` is called.
demo_ctx.override_instruments([]) # no PassFine_0 exit_pass_ctx printed....etc
###############################################################################
# If an exception occurs in ``exit_pass_ctx``, then the pass instrument is disabled.
# Then exception is propagated. That means ``PassInstrument`` instances registered
# after the one throwing the exception do not execute ``exit_pass_ctx``.
demo_ctx = tvm.transform.PassContext(
instruments=[
PassFine("PassFine_0"),
PassBadExitCtx("PassBadExitCtx"),
PassFine("PassFine_1"),
]
)
try:
# PassFine_1 execute enter_pass_ctx, but not exit_pass_ctx.
with demo_ctx:
relay_mod = relay.transform.InferType()(relay_mod)
except ValueError as ex:
print("Catching", str(ex).split("\n")[-1])
###############################################################################
# Exceptions occurred in ``should_run``, ``run_before_pass``, ``run_after_pass``
# are not handled explicitly -- we rely on the context manager (the ``with`` syntax)
# to exit ``PassContext`` safely.
#
# We use ``run_before_pass`` as an example:
@pass_instrument
class PassBadRunBefore(PassExampleBase):
def run_before_pass(self, mod, pass_info):
print(self._name, "bad run_before_pass!!!")
raise ValueError("{} bad run_before_pass".format(self._name))
demo_ctx = tvm.transform.PassContext(
instruments=[
PassFine("PassFine_0"),
PassBadRunBefore("PassBadRunBefore"),
PassFine("PassFine_1"),
]
)
try:
# All exit_pass_ctx are called.
with demo_ctx:
relay_mod = relay.transform.InferType()(relay_mod)
except ValueError as ex:
print("Catching", str(ex).split("\n")[-1])
###############################################################################
# Also note that pass instrumentation is not disable. So if we call
# ``override_instruments``, the ``exit_pass_ctx`` of old registered ``PassInstrument``
# is called.
demo_ctx.override_instruments([])
###############################################################################
# If we don't wrap pass execution with ``with`` syntax, ``exit_pass_ctx`` is not
# called. Let try this with current ``PassContext``:
cur_pass_ctx = tvm.transform.PassContext.current()
cur_pass_ctx.override_instruments(
[
PassFine("PassFine_0"),
PassBadRunBefore("PassBadRunBefore"),
PassFine("PassFine_1"),
]
)
###############################################################################
# Then call passes. ``exit_pass_ctx`` is not executed after the exception,
# as expectation.
try:
# No ``exit_pass_ctx`` got executed.
relay_mod = relay.transform.InferType()(relay_mod)
except ValueError as ex:
print("Catching", str(ex).split("\n")[-1])
###############################################################################
# Clear instruments.
cur_pass_ctx.override_instruments([])
| https://github.com/zk-ml/tachikoma |
gallery/how_to/optimize_operators/opt_conv_cuda.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
.. _opt-conv-gpu:
How to optimize convolution on GPU
==================================
**Author**: `Haichen Shen <https://homes.cs.washington.edu/~haichen/>`_
In this tutorial, we will demonstrate how to write a high performance
convolution implementation in TVM. We use square size input tensors and filters
as an example, and assume the input to convolution has a large batch. In this
example, we use a different layout to store the data in order to achieve better
data locality. The buffer layout is HWCN, which stands for height, width,
channel, batch.
"""
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
################################################################
# Preparation and Algorithm
# -------------------------
#
# We use the fixed size for input tensors with 256 channels and 14 x 14
# dimensions. The batch size is 256. Convolution filters contain 512 filters
# of size 3 x 3. We use stride size 1 and padding size 1 for the
# convolution. The following code defines the convolution algorithm in TVM.
#
import numpy as np
import tvm
from tvm import te
# The sizes of inputs and filters
batch = 256
in_channel = 256
out_channel = 512
in_size = 14
kernel = 3
pad = 1
stride = 1
# Algorithm
A = te.placeholder((in_size, in_size, in_channel, batch), name="A")
W = te.placeholder((kernel, kernel, in_channel, out_channel), name="W")
out_size = (in_size - kernel + 2 * pad) // stride + 1
# Pad input
Apad = te.compute(
(in_size + 2 * pad, in_size + 2 * pad, in_channel, batch),
lambda yy, xx, cc, nn: tvm.tir.if_then_else(
tvm.tir.all(yy >= pad, yy - pad < in_size, xx >= pad, xx - pad < in_size),
A[yy - pad, xx - pad, cc, nn],
tvm.tir.const(0.0, "float32"),
),
name="Apad",
)
# Create reduction variables
rc = te.reduce_axis((0, in_channel), name="rc")
ry = te.reduce_axis((0, kernel), name="ry")
rx = te.reduce_axis((0, kernel), name="rx")
# Compute the convolution
B = te.compute(
(out_size, out_size, out_channel, batch),
lambda yy, xx, ff, nn: te.sum(
Apad[yy * stride + ry, xx * stride + rx, rc, nn] * W[ry, rx, rc, ff], axis=[ry, rx, rc]
),
name="B",
)
###############################################################################
# Memory Hierarchy
# ----------------
#
# We first specify the memory hierarchy for buffers. The figure below shows the
# GPU memory hierarchy. One important difference from CPU memory hierarchy is
# that GPU provides a cache buffer called shared memory, which is managed by
# programmers. Thus how to maximize the data reuse in the shared memory is
# critical to achieve high performance in GPU kernels.
#
# .. image:: https://github.com/dmlc/web-data/raw/main/tvm/tutorial/gpu_memory_hierarchy.png
# :align: center
# :height: 319px
# :width: 271px
#
# In this example, we load both Apad and W into buffer AA and WW, which are
# stored in the shared memory. These buffers will be later shared by all
# threads within the same thread block to compute the convolution. Each thread
# then loads its own part from shared buffer into their local registers, AL and
# WL. BL is a local cache of output B, which is also stored in the thread local
# registers.
#
# Designate the memory hierarchy
s = te.create_schedule(B.op)
s[Apad].compute_inline() # compute Apad inline
AA = s.cache_read(Apad, "shared", [B])
WW = s.cache_read(W, "shared", [B])
AL = s.cache_read(AA, "local", [B])
WL = s.cache_read(WW, "local", [B])
BL = s.cache_write(B, "local")
###############################################################################
# Blocking
# --------
#
# The following code splits the workload into thread blocks and individual
# threads. We follow the blocking scheme in the matrix multiply. As shown in the
# figure below, given a pixel coordinate (y, x), a thread block is responsible
# for computing a region of block_factor x block_factor (64 x 64) for output
# channels and batch. Due to the limit of shared memory space, we only load step
# x block_factor (8 x 64) data from Apad and B each time to buffers in the
# shared memory.
#
# .. image:: https://github.com/dmlc/web-data/raw/main/tvm/tutorial/conv_gpu_blocking.png
# :align: center
# :height: 308px
# :width: 317px
#
# tile consts
tile = 8
num_thread = 8
block_factor = tile * num_thread
step = 8
vthread = 2
# Get the GPU thread indices
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
block_z = te.thread_axis("blockIdx.z")
thread_x = te.thread_axis((0, num_thread), "threadIdx.x")
thread_y = te.thread_axis((0, num_thread), "threadIdx.y")
thread_xz = te.thread_axis((0, vthread), "vthread", name="vx")
thread_yz = te.thread_axis((0, vthread), "vthread", name="vy")
# Split the workloads
hi, wi, fi, ni = s[B].op.axis
bz = s[B].fuse(hi, wi)
by, fi = s[B].split(fi, factor=block_factor)
bx, ni = s[B].split(ni, factor=block_factor)
# Bind the iteration variables to GPU thread indices
s[B].bind(bz, block_z)
s[B].bind(by, block_y)
s[B].bind(bx, block_x)
###############################################################################
# Virtual Thread Split
# --------------------
#
# We further split the workload from a thread block to individual threads. To
# avoid *memory bank conflict*, we use virtual thread to split the area into 4
# parts, and then tile into 8x8 grids. Therefore, shown in the figure below,
# each thread computes 4 strided grids, where size of each grid is 4 x 4.
#
# .. image:: https://github.com/dmlc/web-data/raw/main/tvm/tutorial/conv_gpu_vthread.png
# :align: center
# :height: 188px
# :width: 268px
#
tyz, fi = s[B].split(fi, nparts=vthread) # virtual thread split
txz, ni = s[B].split(ni, nparts=vthread) # virtual thread split
ty, fi = s[B].split(fi, nparts=num_thread)
tx, ni = s[B].split(ni, nparts=num_thread)
s[B].reorder(bz, by, bx, tyz, txz, ty, tx, fi, ni)
s[B].bind(tyz, thread_yz)
s[B].bind(txz, thread_xz)
s[B].bind(ty, thread_y)
s[B].bind(tx, thread_x)
###############################################################################
# Cooperative Fetching
# --------------------
#
# As mentioned before, each time step we need to transfer step x block_factor
# data from GPU global memory to shared memory. In order to reduce the memory
# transfer per thread, the following code lets threads in the same thread block
# coopertively fetch dependent data from global memory.
#
# Schedule BL local write
s[BL].compute_at(s[B], tx)
yi, xi, fi, ni = s[BL].op.axis
ry, rx, rc = s[BL].op.reduce_axis
rco, rci = s[BL].split(rc, factor=step)
s[BL].reorder(rco, ry, rx, rci, fi, ni)
# Attach computation to iteration variables
s[AA].compute_at(s[BL], rx)
s[WW].compute_at(s[BL], rx)
s[AL].compute_at(s[BL], rci)
s[WL].compute_at(s[BL], rci)
# Schedule for A's shared memory load
yi, xi, ci, ni = s[AA].op.axis
ty, ci = s[AA].split(ci, nparts=num_thread)
tx, ni = s[AA].split(ni, nparts=num_thread)
_, ni = s[AA].split(ni, factor=4)
s[AA].reorder(ty, tx, yi, xi, ci, ni)
s[AA].bind(ty, thread_y)
s[AA].bind(tx, thread_x)
s[AA].vectorize(ni) # vectorize memory load
# Schedule for W's shared memory load
yi, xi, ci, fi = s[WW].op.axis
ty, ci = s[WW].split(ci, nparts=num_thread)
tx, fi = s[WW].split(fi, nparts=num_thread)
_, fi = s[WW].split(fi, factor=4)
s[WW].reorder(ty, tx, yi, xi, ci, fi)
s[WW].bind(ty, thread_y)
s[WW].bind(tx, thread_x)
s[WW].vectorize(fi) # vectorize memory load
###############################################################################
# Generate CUDA Kernel
# --------------------
#
# Finally we use TVM to generate and compile the CUDA kernel, and evaluate the
# latency of convolution.
#
func = tvm.build(s, [A, W, B], "cuda")
dev = tvm.cuda(0)
a_np = np.random.uniform(size=(in_size, in_size, in_channel, batch)).astype(A.dtype)
w_np = np.random.uniform(size=(kernel, kernel, in_channel, out_channel)).astype(W.dtype)
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros((out_size, out_size, out_channel, batch), dtype=B.dtype), dev)
func(a, w, b)
evaluator = func.time_evaluator(func.entry_name, dev, number=1)
print("Convolution: %f ms" % (evaluator(a, w, b).mean * 1e3))
| https://github.com/zk-ml/tachikoma |
gallery/how_to/optimize_operators/opt_conv_tensorcore.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
.. _opt-conv-tensorcore:
How to optimize convolution using TensorCores
=============================================
**Author**: `Siyuan Feng <https://github.com/Hzfengsy>`_
In this tutorial, we will demonstrate how to write a high performance convolution
schedule using TensorCores in TVM. In this example, we assume the input to
convolution has a large batch. We strongly recommend covering the :ref:`opt-conv-gpu` tutorial first.
"""
################################################################
# TensorCore Introduction
# -----------------------
# Each Tensor Core provides a 4x4x4 matrix processing array that operates
# :code:`D = A * B + C`, where A, B, C and D are 4x4 matrices as Figure shows.
# The matrix multiplication inputs A and B are FP16 matrices, while the accumulation
# matrices C and D may be FP16 or FP32 matrices.
#
# However, CUDA programmers can only use warp-level primitive
# :code:`wmma::mma_sync(acc_frag, a_frag, b_frag, acc_frag)` to perform
# 16x16x16 half-precision matrix multiplication on tensor cores. Before invoking
# the matrix multiplication, programmers must load data from memory into registers
# with primitive :code:`wmma::load_matrix_sync`, explicitly. The NVCC compiler translates
# that primitive into multiple memory load instructions. At run time, every thread loads
# 16 elements from matrix A and 16 elements from B.
################################################################
# Preparation and Algorithm
# -------------------------
# We use the fixed size for input tensors with 256 channels and 14 x 14 dimensions.
# The batch size is 256. Convolution filters contain 512 filters of size 3 x 3.
# We use stride size 1 and padding size 1 for the convolution. In the example, we use
# NHWCnc memory layout.The following code defines the convolution algorithm in TVM.
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
import tvm
from tvm import te
import numpy as np
from tvm.contrib import nvcc
# The sizes of inputs and filters
batch_size = 256
height = 14
width = 14
in_channels = 256
out_channels = 512
kernel_h = 3
kernel_w = 3
pad_h = 1
pad_w = 1
stride_h = 1
stride_w = 1
# TensorCore shape
block_size = 16
assert batch_size % block_size == 0
assert in_channels % block_size == 0
assert out_channels % block_size == 0
# Input feature map: (N, H, W, IC, n, ic)
data_shape = (
batch_size // block_size,
height,
width,
in_channels // block_size,
block_size,
block_size,
)
# Kernel: (H, W, IC, OC, ic, oc)
kernel_shape = (
kernel_h,
kernel_w,
in_channels // block_size,
out_channels // block_size,
block_size,
block_size,
)
# Output feature map: (N, H, W, OC, n, oc)
output_shape = (
batch_size // block_size,
height,
width,
out_channels // block_size,
block_size,
block_size,
)
# Reduction axes
kh = te.reduce_axis((0, kernel_h), name="kh")
kw = te.reduce_axis((0, kernel_w), name="kw")
ic = te.reduce_axis((0, in_channels // block_size), name="ic")
ii = te.reduce_axis((0, block_size), name="ii")
# Algorithm
A = te.placeholder(data_shape, name="A", dtype="float16")
W = te.placeholder(kernel_shape, name="W", dtype="float16")
Apad = te.compute(
(
batch_size // block_size,
height + 2 * pad_h,
width + 2 * pad_w,
in_channels // block_size,
block_size,
block_size,
),
lambda n, h, w, i, nn, ii: tvm.tir.if_then_else(
tvm.tir.all(h >= pad_h, h - pad_h < height, w >= pad_w, w - pad_w < width),
A[n, h - pad_h, w - pad_w, i, nn, ii],
tvm.tir.const(0.0, "float16"),
),
name="Apad",
)
Conv = te.compute(
output_shape,
lambda n, h, w, o, nn, oo: te.sum(
Apad[n, h * stride_h + kh, w * stride_w + kw, ic, nn, ii].astype("float32")
* W[kh, kw, ic, o, ii, oo].astype("float32"),
axis=[ic, kh, kw, ii],
),
name="Conv",
)
s = te.create_schedule(Conv.op)
s[Apad].compute_inline()
###############################################################################
# Memory Scope
# ------------
# In traditional GPU schedule, we have global, shared and local memory scope.
# To support TensorCores, we add another three special memory scope: :code:`wmma.matrix_a`,
# :code:`wmma.matrix_b` and :code:`wmma.accumulator`. On hardware, all fragments scope
# stores at the on-chip registers level, the same place with local memory.
# Designate the memory hierarchy
AS = s.cache_read(Apad, "shared", [Conv])
WS = s.cache_read(W, "shared", [Conv])
AF = s.cache_read(AS, "wmma.matrix_a", [Conv])
WF = s.cache_read(WS, "wmma.matrix_b", [Conv])
ConvF = s.cache_write(Conv, "wmma.accumulator")
###############################################################################
# Define Tensor Intrinsic
# -----------------------
# In fact, TensorCore is a special hardware operation. So, we can just use tensorize
# to replace a unit of computation with the TensorCore instruction. The first thing is
# that we need to define tensor intrinsic.
#
# There are four basic operation in TensorCore: :code:`fill_fragment`, :code:`load_matrix`,
# :code:`mma_sync` and :code:`store_matrix`. Since :code:`fill_fragment` and :code:`mma_sync`
# are both used in matrix multiplication, so we can just write following three intrinsics.
def intrin_wmma_load_matrix(scope):
n = 16
A = te.placeholder((n, n), name="A", dtype="float16")
BA = tvm.tir.decl_buffer(A.shape, A.dtype, scope="shared", data_alignment=32, offset_factor=256)
C = te.compute((n, n), lambda i, j: A[i, j], name="C")
BC = tvm.tir.decl_buffer(C.shape, C.dtype, scope=scope, data_alignment=32, offset_factor=256)
def intrin_func(ins, outs):
ib = tvm.tir.ir_builder.create()
BA = ins[0]
BC = outs[0]
ib.emit(
tvm.tir.call_intrin(
"handle",
"tir.tvm_load_matrix_sync",
BC.data,
n,
n,
n,
BC.elem_offset // 256,
BA.access_ptr("r"),
n,
"row_major",
)
)
return ib.get()
return te.decl_tensor_intrin(C.op, intrin_func, binds={A: BA, C: BC})
def intrin_wmma_gemm():
n = 16
A = te.placeholder((n, n), name="A", dtype="float16")
B = te.placeholder((n, n), name="B", dtype="float16")
k = te.reduce_axis((0, n), name="k")
C = te.compute(
(n, n),
lambda ii, jj: te.sum(A[ii, k].astype("float") * B[k, jj].astype("float"), axis=k),
name="C",
)
BA = tvm.tir.decl_buffer(
A.shape, A.dtype, name="BA", scope="wmma.matrix_a", data_alignment=32, offset_factor=256
)
BB = tvm.tir.decl_buffer(
B.shape, B.dtype, name="BB", scope="wmma.matrix_b", data_alignment=32, offset_factor=256
)
BC = tvm.tir.decl_buffer(
C.shape, C.dtype, name="BC", scope="wmma.accumulator", data_alignment=32, offset_factor=256
)
def intrin_func(ins, outs):
BA, BB = ins
(BC,) = outs
def init():
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_intrin(
"handle", "tir.tvm_fill_fragment", BC.data, n, n, n, BC.elem_offset // 256, 0.0
)
)
return ib.get()
def update():
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_intrin(
"handle",
"tir.tvm_mma_sync",
BC.data,
BC.elem_offset // 256,
BA.data,
BA.elem_offset // 256,
BB.data,
BB.elem_offset // 256,
BC.data,
BC.elem_offset // 256,
)
)
return ib.get()
return update(), init(), update()
return te.decl_tensor_intrin(C.op, intrin_func, binds={A: BA, B: BB, C: BC})
def intrin_wmma_store_matrix():
n = 16
A = te.placeholder((n, n), name="A", dtype="float32")
BA = tvm.tir.decl_buffer(
A.shape, A.dtype, scope="wmma.accumulator", data_alignment=32, offset_factor=256
)
C = te.compute((n, n), lambda i, j: A[i, j], name="C")
BC = tvm.tir.decl_buffer(C.shape, C.dtype, scope="global", data_alignment=32, offset_factor=256)
def intrin_func(ins, outs):
ib = tvm.tir.ir_builder.create()
BA = ins[0]
BC = outs[0]
ib.emit(
tvm.tir.call_intrin(
"handle",
"tir.tvm_store_matrix_sync",
BA.data,
n,
n,
n,
BA.elem_offset // 256,
BC.access_ptr("w"),
n,
"row_major",
)
)
return ib.get()
return te.decl_tensor_intrin(C.op, intrin_func, binds={A: BA, C: BC})
###############################################################################
# Scheduling the Computation
# --------------------------
# To use TensorCores in TVM, we must schedule the computation into specific structure
# to match the tensor intrinsic. The same as traditional GPU programs, we can also use
# shared memory to boost the speed. If you have any questions about blocking and shared
# memory, please refer :ref:`opt-conv-gpu`.
#
# In this example, each block contains 2x4 warps, and each warp calls 4x2 TensorCore
# instructions. Thus, the output shape of each warp is 64x32 and each block outputs
# 128x128 titles. Due to the limit of shared memory space, we only load 2 blocks (2x128x128 tiles)
# one time.
#
# .. note::
#
# *Warp-level Operation*
#
# Note that all TensorCore instructions are warp-level instructions, which means all 32 threads
# in a warp should do this instruction simultaneously. Making threadIdx.x extent=32 is one of the
# easiest way to solve this. Then We can bind threadIdx.x to any loops except those contain
# TensorCore intrinsics directly or indirectly. Also note that it is not the unique solution.
# The only thing we should do is to make sure all threads in a warp can call TensorCore at the same time.
# Define tiling sizes
block_row_warps = 4
block_col_warps = 2
warp_row_tiles = 2
warp_col_tiles = 4
warp_size = 32
chunk = 2
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
block_z = te.thread_axis("blockIdx.z")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
thread_z = te.thread_axis("threadIdx.z")
nc, hc, wc, oc, nnc, ooc = Conv.op.axis
block_k = s[Conv].fuse(hc, wc)
s[Conv].bind(block_k, block_z)
nc, nci = s[Conv].split(nc, factor=warp_row_tiles)
block_i, nc = s[Conv].split(nc, factor=block_row_warps)
oc, oci = s[Conv].split(oc, factor=warp_col_tiles)
block_j, oc = s[Conv].split(oc, factor=block_col_warps)
s[Conv].reorder(block_k, block_i, block_j, nc, oc, nci, oci, nnc, ooc)
s[Conv].bind(block_i, block_x)
s[Conv].bind(block_j, block_y)
s[Conv].bind(nc, thread_y)
s[Conv].bind(oc, thread_z)
# Schedule local computation
s[ConvF].compute_at(s[Conv], oc)
n, h, w, o, nnf, oof = ConvF.op.axis
ko, ki = s[ConvF].split(ic, factor=chunk)
s[ConvF].reorder(ko, kh, ki, kw, n, o, nnf, oof, ii)
# Move intermediate computation into each output compute tile
s[AF].compute_at(s[ConvF], kw)
s[WF].compute_at(s[ConvF], kw)
# Schedule for A's share memory
s[AS].compute_at(s[ConvF], kh)
n, h, w, i, nn, ii = AS.op.axis
tx, xo = s[AS].split(n, nparts=block_row_warps)
ty, yo = s[AS].split(xo, nparts=block_col_warps)
t = s[AS].fuse(nn, ii)
to, ti = s[AS].split(t, factor=warp_size)
s[AS].bind(tx, thread_y)
s[AS].bind(ty, thread_z)
s[AS].bind(ti, thread_x)
# Schedule for W's share memory
s[WS].compute_at(s[ConvF], kh)
kh, kw, ic, o, ii, oo = WS.op.axis
tx, xo = s[WS].split(o, nparts=block_row_warps)
ty, yo = s[WS].split(xo, nparts=block_col_warps)
t = s[WS].fuse(ii, oo)
to, ti = s[WS].split(t, nparts=warp_size)
s[WS].bind(tx, thread_y)
s[WS].bind(ty, thread_z)
s[WS].bind(to, thread_x)
s[WS].vectorize(ti)
print(tvm.lower(s, [A, W, Conv], simple_mode=True))
###############################################################################
# Lowering Computation to Intrinsics
# ----------------------------------
# The last phase is to lower the computation loops down to TensorCore hardware intrinsics
# by mapping the 2D convolution to tensor intrinsics
s[AF].tensorize(AF.op.axis[-2], intrin_wmma_load_matrix("wmma.matrix_a"))
s[WF].tensorize(WF.op.axis[-2], intrin_wmma_load_matrix("wmma.matrix_b"))
s[Conv].tensorize(nnc, intrin_wmma_store_matrix())
s[ConvF].tensorize(nnf, intrin_wmma_gemm())
print(tvm.lower(s, [A, W, Conv], simple_mode=True))
###############################################################################
# Generate CUDA Kernel
# --------------------
# Finally we use TVM to generate and compile the CUDA kernel, and evaluate the latency of convolution.
# Since TensorCores are only supported in NVIDIA GPU with Compute Capability 7.0 or higher, it may not
# be able to run on our build server
dev = tvm.cuda(0)
if nvcc.have_tensorcore(dev.compute_version):
with tvm.transform.PassContext(config={"tir.UnrollLoop": {"auto_max_step": 16}}):
func = tvm.build(s, [A, W, Conv], "cuda")
a_np = np.random.uniform(size=data_shape).astype(A.dtype)
w_np = np.random.uniform(size=kernel_shape).astype(W.dtype)
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
c = tvm.nd.array(np.zeros(output_shape, dtype=Conv.dtype), dev)
evaluator = func.time_evaluator(func.entry_name, dev, number=10)
print("conv2d with tensor core: %f ms" % (evaluator(a, w, c).mean * 1e3))
###############################################################################
# Summary
# -------
# This tutorial demonstrates how TVM scheduling primitives can be used to
# call TensorCores on specific GPUs.
| https://github.com/zk-ml/tachikoma |
gallery/how_to/optimize_operators/opt_gemm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
.. _opt-gemm:
How to optimize GEMM on CPU
===========================
**Author**: `Jian Weng <https://github.com/were>`_, \
`Ruofei Yu <https://github.com/yuruofeifei>`_
(TL;DR) TVM provides abstract interfaces which allows users to depict an algorithm and the
algorithm's implementing organization (the so-called schedule) separately. Typically, writing
algorithm in high-performance schedule breaks the algorithm's readability and modularity. Also,
trying various seemingly promising schedules is time-consuming. With the help of TVM, we can
try these schedules efficiently to enhance the performance.
In this tutorial, we will demonstrate how to use TVM to optimize square matrix multiplication
and achieve 200 times faster than baseline by simply adding 18 extra lines of code.
There are two important optimizations on intense computation applications executed on CPU:
1. Increase the cache hit rate of memory access. Both complex numerical computation and hot-spot
memory access can be accelerated from high cache hit rate. This requires us to transform the
origin memory access pattern to the pattern fits the cache policy.
2. SIMD (Single instruction multi-data), or we call it vector processing unit. Every time, a
small batch of data, rather than a single grid, will be processed. This requires us to
transform the data access pattern in the loop body in uniform pattern so that the LLVM
backend can lower it to SIMD.
Actually, all the methodologies used in this tutorial is a subset of tricks mentioned in this
`repo <https://github.com/flame/how-to-optimize-gemm>`_. Some of them have been applied by TVM
abstraction automatically, but some of them cannot be simply applied due to TVM constraints.
All the experiment results mentioned below, are executed on 2015's 15' MacBook equipped with
Intel i7-4770HQ CPU. The cache line size should be 64 bytes for all the x86 CPUs.
"""
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
################################################################################################
# Preparation and Baseline
# ------------------------
# In this tutorial, we will demo how to use TVM to optimize matrix multiplication.
# Before actually demonstrating, we first define these variables.
# Then we write a baseline implementation, the simplest way to write a matrix multiplication in TVM.
import tvm
import tvm.testing
from tvm import te
import numpy
import timeit
# The size of the matrix
# (M, K) x (K, N)
# You are free to try out different shapes, sometimes TVM optimization outperforms numpy with MKL.
M = 1024
K = 1024
N = 1024
# The default tensor type in tvm
dtype = "float32"
# using Intel AVX2(Advanced Vector Extensions) ISA for SIMD
# To get the best performance, please change the following line
# to llvm -mcpu=core-avx2, or specific type of CPU you use
target = "llvm"
dev = tvm.device(target, 0)
# Random generated tensor for testing
a = tvm.nd.array(numpy.random.rand(M, K).astype(dtype), dev)
b = tvm.nd.array(numpy.random.rand(K, N).astype(dtype), dev)
np_repeat = 100
np_runing_time = timeit.timeit(
setup="import numpy\n"
"M = " + str(M) + "\n"
"K = " + str(K) + "\n"
"N = " + str(N) + "\n"
'dtype = "float32"\n'
"a = numpy.random.rand(M, K).astype(dtype)\n"
"b = numpy.random.rand(K, N).astype(dtype)\n",
stmt="answer = numpy.dot(a, b)",
number=np_repeat,
)
print("Numpy running time: %f" % (np_runing_time / np_repeat))
answer = numpy.dot(a.numpy(), b.numpy())
# Algorithm
k = te.reduce_axis((0, K), "k")
A = te.placeholder((M, K), name="A")
B = te.placeholder((K, N), name="B")
C = te.compute((M, N), lambda m, n: te.sum(A[m, k] * B[k, n], axis=k), name="C")
# Default schedule
s = te.create_schedule(C.op)
func = tvm.build(s, [A, B, C], target=target, name="mmult")
assert func
c = tvm.nd.array(numpy.zeros((M, N), dtype=dtype), dev)
func(a, b, c)
tvm.testing.assert_allclose(c.numpy(), answer, rtol=1e-5)
evaluator = func.time_evaluator(func.entry_name, dev, number=1)
print("Baseline: %f" % evaluator(a, b, c).mean)
################################################################################################
# In TVM, we can always inspect lower level IR to debug or optimize our schedule.
# Here is the generated IR using our baseline schedule.
print(tvm.lower(s, [A, B, C], simple_mode=True))
################################################################################################
# Blocking
# --------
# A important trick to enhance the cache hit rate is blocking --- data chunk will be computed
# block by block. The memory access inside the block is a small neighbourhood which is with high
# memory locality. In this tutorial, I picked up 32 as the blocking factor. So the block will
# fill 32 * 32 * sizeof(float) which is 4KB in the cache whose total size is 32KB (L1 data cache)
bn = 32
kfactor = 4
s = te.create_schedule(C.op)
# Blocking by loop tiling
mo, no, mi, ni = s[C].tile(C.op.axis[0], C.op.axis[1], bn, bn)
(kaxis,) = s[C].op.reduce_axis
ko, ki = s[C].split(kaxis, factor=kfactor)
# Hoist reduction domain outside the blocking loop
s[C].reorder(mo, no, ko, ki, mi, ni)
func = tvm.build(s, [A, B, C], target=target, name="mmult")
assert func
c = tvm.nd.array(numpy.zeros((M, N), dtype=dtype), dev)
func(a, b, c)
tvm.testing.assert_allclose(c.numpy(), answer, rtol=1e-5)
# By simply tiling the loop 32x32, and hoisting ko, ki outside the blocking loops,
# we can see big speedup compared with the baseline.
evaluator = func.time_evaluator(func.entry_name, dev, number=10)
print("Opt1: %f" % evaluator(a, b, c).mean)
################################################################################################
# Here is the generated IR after blocking.
print(tvm.lower(s, [A, B, C], simple_mode=True))
###################################################################################################
# Vectorization
# -------------
# Another important trick is vectorization. When the memory access pattern is uniform,
# the compiler can detect this pattern and pass the continuous memory to vector processor. In TVM,
# we can use `vectorize` interface to hint the compiler this pattern, so that we can accelerate it
# vastly.
#
# In this tutorial, we chose to vectorize the inner loop row data since it is cache friendly.
s = te.create_schedule(C.op)
mo, no, mi, ni = s[C].tile(C.op.axis[0], C.op.axis[1], bn, bn)
(kaxis,) = s[C].op.reduce_axis
ko, ki = s[C].split(kaxis, factor=kfactor)
s[C].reorder(mo, no, ko, ki, mi, ni)
# Vectorization
s[C].vectorize(ni)
func = tvm.build(s, [A, B, C], target=target, name="mmult")
assert func
c = tvm.nd.array(numpy.zeros((M, N), dtype=dtype), dev)
func(a, b, c)
tvm.testing.assert_allclose(c.numpy(), answer, rtol=1e-5)
evaluator = func.time_evaluator(func.entry_name, dev, number=10)
print("Opt2: %f" % evaluator(a, b, c).mean)
################################################################################################
# Here is the generated IR after vectorization.
print(tvm.lower(s, [A, B, C], simple_mode=True))
###################################################################################################
# Loop Permutation
# ----------------
# If we look at the above IR, we can see the inner loop row data is vectorized for both B and C.
# Next we will look at the access pattern of A. In current schedule, A is accessed column by column
# which is not cache friendly. If we change the nested loop order of ki and inner axes mi,
# the access pattern for A matrix is more cache friendly.
s = te.create_schedule(C.op)
mo, no, mi, ni = s[C].tile(C.op.axis[0], C.op.axis[1], bn, bn)
(kaxis,) = s[C].op.reduce_axis
ko, ki = s[C].split(kaxis, factor=kfactor)
# re-ordering
s[C].reorder(mo, no, ko, mi, ki, ni)
s[C].vectorize(ni)
func = tvm.build(s, [A, B, C], target=target, name="mmult")
assert func
c = tvm.nd.array(numpy.zeros((M, N), dtype=dtype), dev)
func(a, b, c)
tvm.testing.assert_allclose(c.numpy(), answer, rtol=1e-5)
evaluator = func.time_evaluator(func.entry_name, dev, number=10)
print("Opt3: %f" % evaluator(a, b, c).mean)
################################################################################################
# Here is the generated IR after loop permutation.
print(tvm.lower(s, [A, B, C], simple_mode=True))
###################################################################################################
# Array Packing
# -------------
# Another important trick is array packing. The trick is to reorder the storage of a multi-
# dimensional array so that it is accessed sequentially after it is flattened and stored in one-
# dimensional memory.
#
# .. image:: https://github.com/dmlc/web-data/raw/main/tvm/tutorial/array-packing.png
# :align: center
#
# NOTE: This figure is a general illustration of how array packing works.
###################################################################################################
# We can use array packing to address the access pattern for B. Observe the array access pattern of
# B after flattening which is not sequential as we iterate over the K dimension. We can reorder B
# with dimensions [K][N] so that it has dimensions [N/bn][K][bn] where bn is the blocking factor and
# also the vector size for B in the inner loop. This reorder splits N into two dimensions ---
# bigN (N/bn) and littleN (bn) --- and the new dimensions [N/bn][K][bn] match the indexing of B
# from outer to inner loops (no, ko, ki, ni) resulting in a sequential access pattern for B after
# flattening.
# We have to re-write the algorithm slightly.
packedB = te.compute(
(N / bn, K, bn), lambda bigN, k, littleN: B[k, bigN * bn + littleN], name="packedB"
)
C = te.compute(
(M, N),
lambda m, n: te.sum(A[m, k] * packedB[n // bn, k, tvm.tir.indexmod(n, bn)], axis=k),
name="C",
)
s = te.create_schedule(C.op)
mo, no, mi, ni = s[C].tile(C.op.axis[0], C.op.axis[1], bn, bn)
(kaxis,) = s[C].op.reduce_axis
ko, ki = s[C].split(kaxis, factor=kfactor)
s[C].reorder(mo, no, ko, mi, ki, ni)
s[C].vectorize(ni)
bigN, _, littleN = s[packedB].op.axis
s[packedB].vectorize(littleN)
s[packedB].parallel(bigN)
func = tvm.build(s, [A, B, C], target=target, name="mmult")
assert func
c = tvm.nd.array(numpy.zeros((M, N), dtype=dtype), dev)
func(a, b, c)
tvm.testing.assert_allclose(c.numpy(), answer, rtol=1e-5)
evaluator = func.time_evaluator(func.entry_name, dev, number=10)
print("Opt4: %f" % evaluator(a, b, c).mean)
################################################################################################
# Here is the generated IR after array packing.
print(tvm.lower(s, [A, B, C], simple_mode=True))
################################################################################################
# Write cache for blocks
# ----------------------
# After blocking, the program will write result to C block by block, the access pattern
# is not sequential. So we can use a sequential cache array to hold the block results and
# write to C when all the block results are ready.
#
s = te.create_schedule(C.op)
# Allocate write cache
CC = s.cache_write(C, "global")
mo, no, mi, ni = s[C].tile(C.op.axis[0], C.op.axis[1], bn, bn)
# Write cache is computed at no
s[CC].compute_at(s[C], no)
# New inner axes
mc, nc = s[CC].op.axis
(kaxis,) = s[CC].op.reduce_axis
ko, ki = s[CC].split(kaxis, factor=kfactor)
s[CC].reorder(ko, mc, ki, nc)
s[CC].vectorize(nc)
# TODO: Add separate optimization step to discuss loop unrolling
# unrolling is a loop optimization strategy which can reduce branch
# prediction failures and increases the chance of concurrent execution
# unroll kfactor loops
s[CC].unroll(ki)
bigN, _, littleN = s[packedB].op.axis
s[packedB].vectorize(littleN)
s[packedB].parallel(bigN)
func = tvm.build(s, [A, B, C], target=target, name="mmult")
assert func
c = tvm.nd.array(numpy.zeros((M, N), dtype=dtype), dev)
func(a, b, c)
tvm.testing.assert_allclose(c.numpy(), answer, rtol=1e-5)
evaluator = func.time_evaluator(func.entry_name, dev, number=10)
print("Opt5: %f" % evaluator(a, b, c).mean)
################################################################################################
# Here is the generated IR after blocking.
print(tvm.lower(s, [A, B, C], simple_mode=True))
###################################################################################################
# Parallel
# --------
# Furthermore, we can also utilize multi-core processors to do the thread-level parallelization.
s = te.create_schedule(C.op)
CC = s.cache_write(C, "global")
mo, no, mi, ni = s[C].tile(C.op.axis[0], C.op.axis[1], bn, bn)
s[CC].compute_at(s[C], no)
mc, nc = s[CC].op.axis
(kaxis,) = s[CC].op.reduce_axis
ko, ki = s[CC].split(kaxis, factor=kfactor)
s[CC].reorder(ko, mc, ki, nc)
s[CC].vectorize(nc)
s[CC].unroll(ki)
# parallel
s[C].parallel(mo)
bigN, _, littleN = s[packedB].op.axis
s[packedB].vectorize(littleN)
s[packedB].parallel(bigN)
func = tvm.build(s, [A, B, C], target=target, name="mmult")
assert func
c = tvm.nd.array(numpy.zeros((M, N), dtype=dtype), dev)
func(a, b, c)
tvm.testing.assert_allclose(c.numpy(), answer, rtol=1e-5)
evaluator = func.time_evaluator(func.entry_name, dev, number=50)
opt6_time = evaluator(a, b, c).mean
print("Opt6: %f" % opt6_time)
################################################################################################
# Here is the generated IR after parallelization.
print(tvm.lower(s, [A, B, C], simple_mode=True))
###################################################################################################
##################################################################################################
# Summary
# -------
# After applying the above simple optimizations with only 18 lines of code,
# our generated code can achieve 60% of the `numpy` performance with MKL.
# Note that the outputs on the web page reflect the running times on a non-exclusive
# Docker container, thereby they are *unreliable*. It is highly encouraged to run the
# tutorial by yourself to observe the performance gain achieved by TVM.
| https://github.com/zk-ml/tachikoma |
gallery/how_to/tune_with_autoscheduler/tune_conv2d_layer_cuda.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
.. _auto-scheduler-conv-gpu:
Auto-scheduling a Convolution Layer for GPU
===========================================
**Author**: `Lianmin Zheng <https://github.com/merrymercy>`_, \
`Chengfan Jia <https://github.com/jcf94/>`_
This is a tutorial on how to use the auto-scheduler for GPUs.
Different from the template-based :ref:`autotvm <tutorials-autotvm-sec>` which relies on
manual templates to define the search space, the auto-scheduler does not require any templates.
Users only need to write the computation declaration without any schedule commands or templates.
The auto-scheduler can automatically generate a large search space and
find a good schedule in the space.
We use a convolution layer as an example in this tutorial.
Note that this tutorial will not run on Windows or recent versions of macOS. To
get it to run, you will need to wrap the body of this tutorial in a :code:`if
__name__ == "__main__":` block.
"""
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
import os
import numpy as np
import tvm
from tvm import te, auto_scheduler, topi
from tvm.topi.testing import conv2d_nchw_python
######################################################################
# Define the computation
# ^^^^^^^^^^^^^^^^^^^^^^
# To begin with, let us define the computation of a convolution layer.
# The function should return the list of input/output tensors.
# From these tensors, the auto-scheduler can get the whole computational graph.
@auto_scheduler.register_workload
def conv2d_layer(N, H, W, CO, CI, KH, KW, stride, padding):
data = te.placeholder((N, CI, H, W), name="data")
kernel = te.placeholder((CO, CI, KH, KW), name="kernel")
bias = te.placeholder((1, CO, 1, 1), name="bias")
conv = topi.nn.conv2d_nchw(data, kernel, stride, padding, dilation=1, out_dtype="float32")
out = topi.nn.relu(conv + bias)
return [data, kernel, bias, out]
######################################################################
# Create the search task
# ^^^^^^^^^^^^^^^^^^^^^^
# We then create a search task for the last convolution layer in the resnet.
target = tvm.target.Target("cuda")
# Use the last layer in ResNet-50
N, H, W, CO, CI, KH, KW, strides, padding = 1, 7, 7, 512, 512, 3, 3, (1, 1), (1, 1)
task = auto_scheduler.SearchTask(
func=conv2d_layer, args=(N, H, W, CO, CI, KH, KW, strides, padding), target=target
)
# Inspect the computational graph
print("Computational DAG:")
print(task.compute_dag)
######################################################################
# Next, we set parameters for the auto-scheduler. These parameters
# mainly specify how we do the measurement during the search.
#
# * :code:`measure_ctx` launches a different process for measurement to
# provide isolation. It can protect the main process from GPU crashes
# during measurement and avoid other runtime conflicts.
# * :code:`min_repeat_ms` defines the minimum duration of one "repeat" in every measurement.
# This can warmup the GPU, which is necessary to get accurate measurement results.
# Typically, we recommend a value >= 300 ms.
# * :code:`num_measure_trials` is the number of measurement trials we can use during the search.
# We only make 10 trials in this tutorial for a fast demonstration. In practice, 1000 is a
# good value for the search to converge. You can do more trials according to your time budget.
# * In addition, we use :code:`RecordToFile` to dump measurement records into a file `conv2d.json`.
# The measurement records can be used to query the history best, resume the search,
# and do more analyses later.
# * see :any:`auto_scheduler.TuningOptions`,
# :any:`auto_scheduler.LocalRPCMeasureContext` for more parameters.
log_file = "conv2d.json"
measure_ctx = auto_scheduler.LocalRPCMeasureContext(min_repeat_ms=300)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=10, # change this to 1000 to achieve the best performance
runner=measure_ctx.runner,
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
verbose=2,
)
######################################################################
# Run the search
# ^^^^^^^^^^^^^^
# Now we get all inputs ready. Pretty simple, isn't it?
# We can kick off the search and let the auto-scheduler do its magic.
# After some measurement trials, we can load the best schedule from the log
# file and apply it.
# Run auto-tuning (search)
task.tune(tune_option)
# Apply the best schedule
sch, args = task.apply_best(log_file)
# Kill the measurement process
del measure_ctx
######################################################################
# We can lower the schedule to see the IR after auto-scheduling.
# The auto-scheduler correctly performs optimizations including multi-level tiling,
# cooperative fetching, unrolling and operator fusion.
print("Lowered TIR:")
print(tvm.lower(sch, args, simple_mode=True))
######################################################################
# Check correctness and evaluate performance
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# We build the binary and check its correctness and performance.
func = tvm.build(sch, args, target)
# Check correctness
data_np = np.random.uniform(size=(N, CI, H, W)).astype(np.float32)
weight_np = np.random.uniform(size=(CO, CI, KH, KW)).astype(np.float32)
bias_np = np.random.uniform(size=(1, CO, 1, 1)).astype(np.float32)
conv_np = conv2d_nchw_python(data_np, weight_np, strides, padding)
out_np = np.maximum(conv_np + bias_np, 0.0)
dev = tvm.cuda()
data_tvm = tvm.nd.array(data_np, device=dev)
weight_tvm = tvm.nd.array(weight_np, device=dev)
bias_tvm = tvm.nd.array(bias_np, device=dev)
out_tvm = tvm.nd.empty(out_np.shape, device=dev)
func(data_tvm, weight_tvm, bias_tvm, out_tvm)
# Check results
np.testing.assert_allclose(out_np, out_tvm.numpy(), rtol=1e-3)
# Evaluate execution time
evaluator = func.time_evaluator(func.entry_name, dev, min_repeat_ms=500)
print(
"Execution time of this operator: %.3f ms"
% (np.median(evaluator(data_tvm, weight_tvm, bias_tvm, out_tvm).results) * 1000)
)
######################################################################
# Using the record file
# ^^^^^^^^^^^^^^^^^^^^^
# During the search, all measurement records are dumped into the record
# file "conv2d.json". The measurement records can be used to re-apply search results,
# resume the search, and perform other analyses.
######################################################################
# Here is an example where we load the best schedule from a file,
# print the equivalent python schedule API and CUDA source code.
# They can be used for debugging and learning the behavior of the auto-scheduler.
print("Equivalent python schedule:")
print(task.print_best(log_file, print_mode="schedule"))
print("CUDA source code:")
print(task.print_best(log_file, print_mode="cuda"))
######################################################################
# A more complicated example is to resume the search.
# In this case, we need to create the search policy and cost model by ourselves
# and resume the status of search policy and cost model with the log file.
# In the example below we resume the status and do more 5 trials.
def resume_search(task, log_file):
print("Resume search:")
cost_model = auto_scheduler.XGBModel()
cost_model.update_from_file(log_file)
search_policy = auto_scheduler.SketchPolicy(
task, cost_model, init_search_callbacks=[auto_scheduler.PreloadMeasuredStates(log_file)]
)
measure_ctx = auto_scheduler.LocalRPCMeasureContext(min_repeat_ms=300)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=5,
runner=measure_ctx.runner,
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)
task.tune(tune_option, search_policy=search_policy)
# Kill the measurement process
del measure_ctx
resume_search(task, log_file)
| https://github.com/zk-ml/tachikoma |
gallery/how_to/tune_with_autoscheduler/tune_network_arm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Auto-scheduling a Neural Network for ARM CPU
=============================================
**Author**: `Thierry Moreau <https://github.com/tmoreau89>`_, \
`Lianmin Zheng <https://github.com/merrymercy>`_, \
`Chengfan Jia <https://github.com/jcf94/>`_
Auto-tuning for specific devices and workloads is critical for getting the
best performance. This is a tutorial on how to tune a whole neural
network for ARM CPU with the auto-scheduler via RPC.
To auto-tune a neural network, we partition the network into small subgraphs and
tune them independently. Each subgraph is treated as one search task.
A task scheduler slices the time and dynamically allocates time resources to
these tasks. The task scheduler predicts the impact of each task on the end-to-end
execution time and prioritizes the one that can reduce the execution time the most.
For each subgraph, we use the compute declaration in :code:`tvm/python/topi` to
get the computational DAG in the tensor expression form.
We then use the auto-scheduler to construct a search space of this DAG and search
for good schedules (low-level optimizations).
Different from the template-based :ref:`autotvm <tutorials-autotvm-sec>` which relies on
manual templates to define the search space, the auto-scheduler does not require any
schedule templates. In other words, the auto-scheduler only uses the compute declarations
in :code:`tvm/python/topi` and does not use existing schedule templates.
Note that this tutorial will not run on Windows or recent versions of macOS. To
get it to run, you will need to wrap the body of this tutorial in a :code:`if
__name__ == "__main__":` block.
"""
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
import numpy as np
import os
import tvm
from tvm import relay, auto_scheduler
from tvm.relay import data_dep_optimization as ddo
import tvm.relay.testing
from tvm.contrib import graph_executor
from tvm.contrib.utils import tempdir
#################################################################
# Define a Network
# ----------------
# First, we need to define the network with relay frontend API.
# We can load some pre-defined network from :code:`tvm.relay.testing`.
# We can also load models from MXNet, ONNX, PyTorch, and TensorFlow
# (see :ref:`front end tutorials<tutorial-frontend>`).
#
# For convolutional neural networks, although auto-scheduler can work correctly
# with any layout, we found the best performance is typically achieved with NHWC layout.
# We also implemented more optimizations for NHWC layout with the auto-scheduler.
# So it is recommended to convert your models to NHWC layout to use the auto-scheduler.
# You can use :ref:`ConvertLayout <convert-layout-usage>` pass to do the layout conversion in TVM.
def get_network(name, batch_size, layout="NHWC", dtype="float32", use_sparse=False):
"""Get the symbol definition and random weight of a network"""
# auto-scheduler prefers NHWC layout
if layout == "NHWC":
image_shape = (224, 224, 3)
elif layout == "NCHW":
image_shape = (3, 224, 224)
else:
raise ValueError("Invalid layout: " + layout)
input_shape = (batch_size,) + image_shape
output_shape = (batch_size, 1000)
if name.startswith("resnet-"):
n_layer = int(name.split("-")[1])
mod, params = relay.testing.resnet.get_workload(
num_layers=n_layer,
batch_size=batch_size,
layout=layout,
dtype=dtype,
image_shape=image_shape,
)
elif name.startswith("resnet3d-"):
n_layer = int(name.split("-")[1])
mod, params = relay.testing.resnet.get_workload(
num_layers=n_layer,
batch_size=batch_size,
layout=layout,
dtype=dtype,
image_shape=image_shape,
)
elif name == "mobilenet":
mod, params = relay.testing.mobilenet.get_workload(
batch_size=batch_size, layout=layout, dtype=dtype, image_shape=image_shape
)
elif name == "squeezenet_v1.1":
assert layout == "NCHW", "squeezenet_v1.1 only supports NCHW layout"
mod, params = relay.testing.squeezenet.get_workload(
version="1.1",
batch_size=batch_size,
dtype=dtype,
image_shape=image_shape,
)
elif name == "inception_v3":
input_shape = (batch_size, 3, 299, 299) if layout == "NCHW" else (batch_size, 299, 299, 3)
mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "mxnet":
# an example for mxnet model
from mxnet.gluon.model_zoo.vision import get_model
assert layout == "NCHW"
block = get_model("resnet50_v1", pretrained=True)
mod, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype)
net = mod["main"]
net = relay.Function(
net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs
)
mod = tvm.IRModule.from_expr(net)
elif name == "mlp":
mod, params = relay.testing.mlp.get_workload(
batch_size=batch_size, dtype=dtype, image_shape=image_shape, num_classes=1000
)
else:
raise ValueError("Network not found.")
if use_sparse:
from tvm.topi.sparse.utils import convert_model_dense_to_sparse
mod, params = convert_model_dense_to_sparse(mod, params, random_params=True)
return mod, params, input_shape, output_shape
#################################################################
# Start RPC Tracker
# -----------------
# TVM uses RPC session to communicate with ARM boards.
# During tuning, the tuner will send the generated code to the board and
# measure the speed of code on the board.
#
# To scale up the tuning, TVM uses RPC Tracker to manage distributed devices.
# The RPC Tracker is a centralized controller node. We can register all devices to
# the tracker. For example, if we have 10 phones, we can register all of them
# to the tracker, and run 10 measurements in parallel, accelerating the tuning process.
#
# To start an RPC tracker, run this command on the host machine. The tracker is
# required during the whole tuning process, so we need to open a new terminal for
# this command:
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_tracker --host=0.0.0.0 --port=9190
#
# The expected output is
#
# .. code-block:: bash
#
# INFO:RPCTracker:bind to 0.0.0.0:9190
#################################################################
# Register Devices to RPC Tracker
# -----------------------------------
# Now we can register our devices to the tracker. The first step is to
# build the TVM runtime for the ARM devices.
#
# * For Linux:
# Follow this section :ref:`build-tvm-runtime-on-device` to build
# the TVM runtime on the device. Then register the device to tracker by
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_server --tracker=[HOST_IP]:9190 --key=rasp4b-64
#
# (replace :code:`[HOST_IP]` with the IP address of your host machine)
#
# * For Android:
# Follow this `readme page <https://github.com/apache/tvm/tree/main/apps/android_rpc>`_ to
# install the TVM RPC APK on the android device. Make sure you can pass the android rpc test.
# Then you have already registered your device. During tuning, you have to go to developer option
# and enable "Keep screen awake during changing" and charge your phone to make it stable.
#
# After registering devices, we can confirm it by querying rpc_tracker
#
# .. code-block:: bash
#
# python -m tvm.exec.query_rpc_tracker --host=0.0.0.0 --port=9190
#
# For example, if we have 2 Huawei mate10 pro, 11 Raspberry Pi 4B with 64bit OS, and 2 rk3399,
# the output can be
#
# .. code-block:: bash
#
# Queue Status
# ----------------------------------
# key total free pending
# ----------------------------------
# mate10pro 2 2 0
# rk3399 2 2 0
# rasp4b-64 11 11 0
# ----------------------------------
#
# You can register multiple devices to the tracker to accelerate the measurement in tuning.
###########################################
# Set Tuning Options
# ------------------
# Before tuning, we should apply some configurations. Here I use a Raspberry Pi 4b 4GB board
# as example with a 64bit OS (Ubuntu 20.04). In your setting, you should modify the target
# and device_key accordingly.
# set :code:`use_ndk` to True if you use android phone.
#### DEVICE CONFIG ####
# Replace "aarch64-linux-gnu" with the correct target of your board.
# This target is used for cross compilation. You can query it by :code:`gcc -v` on your device.
# FIXME(tmoreau89, merrymercy): We leave '-device=arm_cpu' out of the target string
# because we're sharing x86 op strategy.
target = tvm.target.Target("llvm -mtriple=aarch64-linux-gnu -mattr=+neon")
# Also replace this with the device key, rpc host and rpc port in your tracker
device_key = "rasp4b-64"
rpc_host = "127.0.0.1"
rpc_port = 9190
# Set this to True if you use ndk tools for cross compiling
# And also set the environment variable below to point to the cross compiler
use_ndk = False
# os.environ["TVM_NDK_CC"] = "/usr/bin/aarch64-linux-gnu-g++"
#### TUNING OPTION ####
network = "mobilenet"
use_sparse = False
batch_size = 1
layout = "NHWC"
dtype = "float32"
log_file = "%s-%s-B%d-%s.json" % (network, layout, batch_size, target.kind.name)
#################################################################
# Extract Search Tasks
# --------------------
# Next, we extract the search tasks and their weights from a network.
# The weight of a task is the number of appearances of the task's subgraph
# in the whole network.
# By using the weight, we can approximate the end-to-end latency of the network
# as :code:`sum(latency[t] * weight[t])`, where :code:`latency[t]` is the
# latency of a task and :code:`weight[t]` is the weight of the task.
# The task scheduler will just optimize this objective.
# Extract tasks from the network
print("Get model...")
mod, params, input_shape, output_shape = get_network(
network, batch_size, layout, dtype=dtype, use_sparse=use_sparse
)
print("Extract tasks...")
tasks, task_weights = auto_scheduler.extract_tasks(mod["main"], params, target)
for idx, task in enumerate(tasks):
print("========== Task %d (workload key: %s) ==========" % (idx, task.workload_key))
print(task.compute_dag)
#################################################################
# Tuning and Evaluation
# ---------------------
# Now, we set some options for tuning and launch the search tasks
#
# * :code:`num_measure_trials` is the number of measurement trials we can use during the tuning.
# You can set it to a small number (e.g., 200) for a fast demonstrative run.
# In practice, we recommend setting it around :code:`800 * len(tasks)`,
# which is typically enough for the search to converge.
# For example, there are 29 tasks in resnet-50, so we can set it as 20000.
# You can adjust this parameter according to your time budget.
# * In addition, we use :code:`RecordToFile` to dump measurement records into a log file,
# The measurement records can be used to query the history best, resume the search,
# and do more analyses later.
# * see :any:`auto_scheduler.TuningOptions`,
# :any:`auto_scheduler.LocalRunner` for more parameters.
#
# After auto-tuning, we can compile the network with the best schedules we found.
# All measurement records are dumped into the log file during auto-tuning,
# so we can read the log file and load the best schedules.
def tune_and_evaluate():
print("Begin tuning...")
tuner = auto_scheduler.TaskScheduler(tasks, task_weights)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=200, # change this to 20000 to achieve the best performance
builder=auto_scheduler.LocalBuilder(build_func="ndk" if use_ndk else "default"),
runner=auto_scheduler.RPCRunner(
device_key,
host=rpc_host,
port=rpc_port,
timeout=30,
repeat=1,
min_repeat_ms=200,
enable_cpu_cache_flush=True,
),
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)
tuner.tune(tune_option)
# Compile with the history best
print("Compile...")
with auto_scheduler.ApplyHistoryBest(log_file):
with tvm.transform.PassContext(
opt_level=3, config={"relay.backend.use_auto_scheduler": True}
):
lib = relay.build(mod, target=target, params=params)
# Export library
tmp = tempdir()
if use_ndk:
from tvm.contrib import ndk
filename = "net.so"
lib.export_library(tmp.relpath(filename), ndk.create_shared)
else:
filename = "net.tar"
lib.export_library(tmp.relpath(filename))
# Upload module to device
print("Upload...")
remote = auto_scheduler.utils.request_remote(device_key, rpc_host, rpc_port, timeout=10000)
remote.upload(tmp.relpath(filename))
rlib = remote.load_module(filename)
# Create graph executor
dev = remote.cpu()
module = graph_executor.GraphModule(rlib["default"](dev))
data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))
module.set_input("data", data_tvm)
# Evaluate
print("Evaluate inference time cost...")
print(module.benchmark(dev, repeat=3, min_repeat_ms=500))
# We do not run the tuning in our webpage server since the server doesn't have a Raspberry Pi,
# or device tracker running.
# Uncomment the following line to run it by yourself.
# tune_and_evaluate()
######################################################################
# .. note:: Explaining the printed information during tuning
#
# During the tuning, a lot of information will be printed on the console.
# They are used for debugging purposes. The most important info is the output
# of the task scheduler. The following table is a sample output.
#
# .. code-block:: c
#
# ----------------------------------------------------------------------
# ------------------------------ [ Task Scheduler ]
# ----------------------------------------------------------------------
# | ID | Latency (ms) | Speed (GFLOPS) | Trials |
# -------------------------------------------------
# | 0 | 0.013 | 0.31 | 64 |
# | 1 | 0.845 | 2.43 | 448 |
# | 2 | 0.046 | -0.00 | 64 |
# | 3 | 4.194 | 24.53 | 2112 |
# | 4 | 0.109 | 9.21 | 64 |
# | 5 | 1.759 | 29.27 | 896 |
# | 6 | 0.083 | 6.01 | 64 |
# | 7 | 3.084 | 33.38 | 7680 |
# | 8 | 0.136 | 14.78 | 384 |
# | 9 | 1.349 | 38.23 | 768 |
# | 10 | 0.133 | 7.55 | 128 |
# | 11 | 2.747 | 37.56 | 1536 |
# | 12 | 0.338 | 11.87 | 192 |
# | 13 | 1.295 | 40.00 | 704 |
# | 14 | 0.482 | 4.16 | 256 |
# | 15 | 2.686 | 38.56 | 1344 |
# | 16 | 0.884 | 9.08 | 448 |
# | 17 | 1.332 | 39.18 | 704 |
# | 18 | 1.045 | 3.84 | 576 |
# | 19 | 1.391 | 38.09 | 704 |
# | 20 | 0.777 | 10.34 | 448 |
# | 21 | 0.739 | 30.97 | 448 |
# -------------------------------------------------
# Estimated total latency: 38.347 ms Trials: 19992 Used time : 19260 s Next ID: 3
#
# This table lists the latency and (estimated) speed of all tasks.
# It also lists the allocation of measurement trials for all tasks.
# The last line prints the total weighted latency of these tasks,
# which can be a rough estimation of the end-to-end execution time
# of the network.
# The last line also prints the total number of measurement trials,
# total time spent on auto-tuning and the id of the next task to tune.
#
# There will also be some "dmlc::Error"s errors, because the
# auto-scheduler will try some invalid schedules.
# You can safely ignore them if the tuning can continue, because these
# errors are isolated from the main process.
#
######################################################################
# .. note:: Terminate the tuning earlier
#
# You can terminate the tuning earlier by forcibly killing this process.
# As long as you get at least one valid schedule for each task in the log file,
# you should be able to do the compilation (the secion below).
#
#################################################################
# Other Tips
# ----------
# 1. During the tuning, the auto-scheduler needs to compile many programs and
# extract feature from them. This part is CPU-intensive,
# so a high-performance CPU with many cores is recommended for faster search.
# 2. You can use :code:`python3 -m tvm.auto_scheduler.measure_record --mode distill -i log.json`
# to distill the large log file and only save the best useful records.
# 3. You can resume a search from the previous log file. You just need to
# add a new argument :code:`load_log_file` when creating the task scheduler
# in function :code:`run_tuning`. Say,
# :code:`tuner = auto_scheduler.TaskScheduler(tasks, task_weights, load_log_file=log_file)`
# 4. If you have multiple target CPUs, you can use all of them for measurements to
# parallelize the measurements. Check this :ref:`section <tutorials-autotvm-scale-up-rpc-tracker>`
# to learn how to use the RPC Tracker and RPC Server.
# To use the RPC Tracker in auto-scheduler, replace the runner in :code:`TuningOptions`
# with :any:`auto_scheduler.RPCRunner`.
| https://github.com/zk-ml/tachikoma |
gallery/how_to/tune_with_autoscheduler/tune_network_cuda.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Auto-scheduling a Neural Network for NVIDIA GPU
===============================================
**Author**: `Lianmin Zheng <https://github.com/merrymercy>`_
Auto-tuning for specific devices and workloads is critical for getting the
best performance. This is a tutorial on how to tune a whole neural
network for NVIDIA GPU with the auto-scheduler.
To auto-tune a neural network, we partition the network into small subgraphs and
tune them independently. Each subgraph is treated as one search task.
A task scheduler slices the time and dynamically allocates time resources to
these tasks. The task scheduler predicts the impact of each task on the end-to-end
execution time and prioritizes the one that can reduce the execution time the most.
For each subgraph, we use the compute declaration in :code:`tvm/python/topi` to
get the computational DAG in the tensor expression form.
We then use the auto-scheduler to construct a search space of this DAG and search
for good schedules (low-level optimizations).
Different from the template-based :ref:`autotvm <tutorials-autotvm-sec>` which relies on
manual templates to define the search space, the auto-scheduler does not require any
schedule templates. In other words, the auto-scheduler only uses the compute declarations
in :code:`tvm/python/topi` and does not use existing schedule templates.
Note that this tutorial will not run on Windows or recent versions of macOS. To
get it to run, you will need to wrap the body of this tutorial in a :code:`if
__name__ == "__main__":` block.
"""
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
import numpy as np
import tvm
from tvm import relay, auto_scheduler
import tvm.relay.testing
from tvm.contrib import graph_executor
#################################################################
# Define a Network
# ----------------
# First, we need to define the network with relay frontend API.
# We can load some pre-defined network from :code:`tvm.relay.testing`.
# We can also load models from MXNet, ONNX, PyTorch, and TensorFlow
# (see :ref:`front end tutorials<tutorial-frontend>`).
#
# For convolutional neural networks, although auto-scheduler can work correctly
# with any layout, we found the best performance is typically achieved with NHWC layout.
# We also implemented more optimizations for NHWC layout with the auto-scheduler.
# So it is recommended to convert your models to NHWC layout to use the auto-scheduler.
# You can use :ref:`ConvertLayout <convert-layout-usage>` pass to do the layout conversion in TVM.
def get_network(name, batch_size, layout="NHWC", dtype="float32"):
"""Get the symbol definition and random weight of a network"""
# auto-scheduler prefers NHWC layout
if layout == "NHWC":
image_shape = (224, 224, 3)
elif layout == "NCHW":
image_shape = (3, 224, 224)
else:
raise ValueError("Invalid layout: " + layout)
input_shape = (batch_size,) + image_shape
output_shape = (batch_size, 1000)
if name.startswith("resnet-"):
n_layer = int(name.split("-")[1])
mod, params = relay.testing.resnet.get_workload(
num_layers=n_layer,
batch_size=batch_size,
layout=layout,
dtype=dtype,
image_shape=image_shape,
)
elif name.startswith("resnet3d-"):
n_layer = int(name.split("-")[1])
mod, params = relay.testing.resnet.get_workload(
num_layers=n_layer,
batch_size=batch_size,
layout=layout,
dtype=dtype,
image_shape=image_shape,
)
elif name == "mobilenet":
mod, params = relay.testing.mobilenet.get_workload(
batch_size=batch_size, layout=layout, dtype=dtype, image_shape=image_shape
)
elif name == "squeezenet_v1.1":
assert layout == "NCHW", "squeezenet_v1.1 only supports NCHW layout"
mod, params = relay.testing.squeezenet.get_workload(
version="1.1",
batch_size=batch_size,
dtype=dtype,
image_shape=image_shape,
)
elif name == "inception_v3":
input_shape = (batch_size, 3, 299, 299) if layout == "NCHW" else (batch_size, 299, 299, 3)
mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "mxnet":
# an example for mxnet model
from mxnet.gluon.model_zoo.vision import get_model
assert layout == "NCHW"
block = get_model("resnet18_v1", pretrained=True)
mod, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype)
net = mod["main"]
net = relay.Function(
net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs
)
mod = tvm.IRModule.from_expr(net)
return mod, params, input_shape, output_shape
# Define the neural network and compilation target
network = "resnet-18"
batch_size = 1
layout = "NHWC"
target = tvm.target.Target("cuda")
dtype = "float32"
log_file = "%s-%s-B%d-%s.json" % (network, layout, batch_size, target.kind.name)
#################################################################
# Extract Search Tasks
# --------------------
# Next, we extract the search tasks and their weights from a network.
# The weight of a task is the number of appearances of the task's subgraph
# in the whole network.
# By using the weight, we can approximate the end-to-end latency of the network
# as :code:`sum(latency[t] * weight[t])`, where :code:`latency[t]` is the
# latency of a task and :code:`weight[t]` is the weight of the task.
# The task scheduler will just optimize this objective.
# Extract tasks from the network
print("Extract tasks...")
mod, params, input_shape, output_shape = get_network(network, batch_size, layout, dtype=dtype)
tasks, task_weights = auto_scheduler.extract_tasks(mod["main"], params, target)
for idx, task in enumerate(tasks):
print("========== Task %d (workload key: %s) ==========" % (idx, task.workload_key))
print(task.compute_dag)
#################################################################
# Begin Tuning
# ------------
# Now, we set some options for tuning and launch the search tasks
#
# * :code:`measure_ctx` launches a different process for measurement to
# provide isolation. It can protect the main process from GPU crashes
# during measurement and avoid other runtime conflicts.
# * :code:`min_repeat_ms` defines the minimum duration of one "repeat" in every measurement.
# This can warmup the GPU, which is necessary to get accurate measurement results.
# Typically, we recommend a value >= 300 ms.
# * :code:`num_measure_trials` is the number of measurement trials we can use during the tuning.
# You can set it to a small number (e.g., 200) for a fast demonstrative run.
# In practice, we recommend setting it around :code:`900 * len(tasks)`,
# which is typically enough for the search to converge.
# For example, there are 24 tasks in resnet-18, so we can set it as 20000.
# You can adjust this parameter according to your time budget.
# * In addition, we use :code:`RecordToFile` to dump measurement records into a log file,
# The measurement records can be used to query the history best, resume the search,
# and do more analyses later.
# * see :any:`auto_scheduler.TuningOptions`,
# :any:`auto_scheduler.LocalRPCMeasureContext` for more parameters.
#
def run_tuning():
print("Begin tuning...")
measure_ctx = auto_scheduler.LocalRPCMeasureContext(repeat=1, min_repeat_ms=300, timeout=10)
tuner = auto_scheduler.TaskScheduler(tasks, task_weights)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=200, # change this to 20000 to achieve the best performance
runner=measure_ctx.runner,
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)
tuner.tune(tune_option)
# We do not run the tuning in our webpage server since it takes too long.
# Uncomment the following line to run it by yourself.
# run_tuning()
######################################################################
# .. note:: Explain the printed information during tuning
#
# During the tuning, a lot of information will be printed on the console.
# They are used for debugging purposes. The most important info is the output
# of the task scheduler. The following table is a sample output.
#
# .. code-block:: c
#
# ----------------------------------------------------------------------
# ------------------------------ [ Task Scheduler ]
# ----------------------------------------------------------------------
# | ID | Latency (ms) | Speed (GFLOPS) | Trials |
# -------------------------------------------------
# | 0 | 0.005 | 0.88 | 64 |
# | 1 | 0.010 | 99.10 | 64 |
# | 2 | 0.006 | 0.00 | 64 |
# | 3 | 0.145 | 979.78 | 384 |
# | 4 | 0.130 | 1097.02 | 384 |
# | 5 | 0.143 | 992.69 | 384 |
# | 6 | 0.076 | 1526.86 | 192 |
# | 7 | 0.115 | 999.44 | 320 |
# | 8 | 0.079 | 1449.39 | 320 |
# | 9 | 0.122 | 938.73 | 384 |
# | 10 | 0.063 | 1832.98 | 192 |
# | 11 | 0.072 | 1763.62 | 256 |
# | 12 | 0.062 | 2036.40 | 192 |
# | 13 | 0.068 | 1874.44 | 192 |
# | 14 | 0.049 | 2346.50 | 128 |
# | 15 | 0.076 | 1694.31 | 256 |
# | 16 | 0.067 | 1933.30 | 448 |
# | 17 | 0.076 | 1680.90 | 256 |
# | 18 | 0.022 | 98.43 | 64 |
# | 19 | 0.076 | 3112.55 | 192 |
# | 20 | 0.013 | 2026.44 | 64 |
# | 21 | 0.011 | 1136.69 | 64 |
# | 22 | 0.013 | 992.47 | 64 |
# | 23 | 0.020 | 627.56 | 64 |
# -------------------------------------------------
# Estimated total latency: 1.587 ms Trials: 4992 Used time : 13296 s Next ID: 3
#
# This table lists the latency and (estimated) speed of all tasks.
# It also lists the allocation of measurement trials for all tasks.
# The last line prints the total weighted latency of these tasks,
# which can be a rough estimation of the end-to-end execution time
# of the network.
# The last line also prints the total number of measurement trials,
# total time spent on auto-tuning and the id of the next task to tune.
#
# There will also be some "tvm::Error"s and CUDA errors, because the
# auto-scheduler will try some invalid schedules.
# You can safely ignore them if the tuning can continue, because these
# errors are isolated from the main process.
#
######################################################################
# .. note:: Terminate the tuning earlier
#
# You can terminate the tuning earlier by forcibly killing this process.
# As long as you get at least one valid schedule for each task in the log file,
# you should be able to do the compilation (the secion below).
#
#################################################################
# Compile and Evaluate
# --------------------
# After auto-tuning, we can compile the network with the best schedules we found.
# All measurement records are dumped into the log file during auto-tuning,
# so we can read the log file and load the best schedules.
# Compile with the history best
print("Compile...")
with auto_scheduler.ApplyHistoryBest(log_file):
with tvm.transform.PassContext(opt_level=3, config={"relay.backend.use_auto_scheduler": True}):
lib = relay.build(mod, target=target, params=params)
# Create graph executor
dev = tvm.device(str(target), 0)
module = graph_executor.GraphModule(lib["default"](dev))
data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))
module.set_input("data", data_tvm)
# Evaluate
print("Evaluate inference time cost...")
print(module.benchmark(dev, repeat=3, min_repeat_ms=500))
#################################################################
# Other Tips
# ----------
# 1. During the tuning, the auto-scheduler needs to compile many programs and
# extract feature from them. This part is CPU-intensive,
# so a high-performance CPU with many cores is recommended for faster search.
# 2. You can use :code:`python3 -m tvm.auto_scheduler.measure_record --mode distill -i log.json`
# to distill the large log file and only save the best useful records.
# 3. You can resume a search from the previous log file. You just need to
# add a new argument :code:`load_log_file` when creating the task scheduler
# in function :code:`run_tuning`. Say,
# :code:`tuner = auto_scheduler.TaskScheduler(tasks, task_weights, load_log_file=log_file)`
# 4. If you have multiple target GPUs, you can use all of them for measurements to
# parallelize the measurements. Check this :ref:`section <tutorials-autotvm-scale-up-rpc-tracker>`
# to learn how to use the RPC Tracker and RPC Server.
# To use the RPC Tracker in auto-scheduler, replace the runner in :code:`TuningOptions`
# with :any:`auto_scheduler.RPCRunner`.
| https://github.com/zk-ml/tachikoma |
gallery/how_to/tune_with_autoscheduler/tune_network_mali.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Auto-scheduling a Neural Network for mali GPU
=============================================
**Author**: `Zhao Wu <https://github.com/FrozenGene>`_
Auto-tuning for specific devices and workloads is critical for getting the
best performance. This is a tutorial on how to tune a whole neural
network for mali GPU with the auto-scheduler.
To auto-tune a neural network, we partition the network into small subgraphs and
tune them independently. Each subgraph is treated as one search task.
A task scheduler slices the time and dynamically allocates time resources to
these tasks. The task scheduler predicts the impact of each task on the end-to-end
execution time and prioritizes the one that can reduce the execution time the most.
For each subgraph, we use the compute declaration in :code:`tvm/python/topi` to
get the computational DAG in the tensor expression form.
We then use the auto-scheduler to construct a search space of this DAG and search
for good schedules (low-level optimizations).
Different from the template-based :ref:`autotvm <tutorials-autotvm-sec>` which relies on
manual templates to define the search space, the auto-scheduler does not require any
schedule templates. In other words, the auto-scheduler only uses the compute declarations
in :code:`tvm/python/topi` and does not use existing schedule templates.
Note that this tutorial will not run on Windows or recent versions of macOS. To
get it to run, you will need to wrap the body of this tutorial in a :code:`if
__name__ == "__main__":` block.
"""
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
import numpy as np
import tvm
from tvm import relay, auto_scheduler
import tvm.relay.testing
from tvm.contrib import graph_executor
import os
#################################################################
# Define a Network
# ----------------
# First, we need to define the network with relay frontend API.
# We can load some pre-defined network from :code:`tvm.relay.testing`.
# We can also load models from MXNet, ONNX, PyTorch, and TensorFlow
# (see :ref:`front end tutorials<tutorial-frontend>`).
#
# For convolutional neural networks, although auto-scheduler can work correctly
# with any layout, we found the best performance is typically achieved with NHWC layout.
# We also implemented more optimizations for NHWC layout with the auto-scheduler.
# So it is recommended to convert your models to NHWC layout to use the auto-scheduler.
# You can use :ref:`ConvertLayout <convert-layout-usage>` pass to do the layout conversion in TVM.
def get_network(name, batch_size, layout="NHWC", dtype="float32"):
"""Get the symbol definition and random weight of a network"""
# auto-scheduler prefers NHWC layout
if layout == "NHWC":
image_shape = (224, 224, 3)
elif layout == "NCHW":
image_shape = (3, 224, 224)
else:
raise ValueError("Invalid layout: " + layout)
input_shape = (batch_size,) + image_shape
output_shape = (batch_size, 1000)
if name.startswith("resnet-"):
n_layer = int(name.split("-")[1])
mod, params = relay.testing.resnet.get_workload(
num_layers=n_layer,
batch_size=batch_size,
layout=layout,
dtype=dtype,
image_shape=image_shape,
)
elif name.startswith("resnet3d-"):
n_layer = int(name.split("-")[1])
mod, params = relay.testing.resnet.get_workload(
num_layers=n_layer,
batch_size=batch_size,
layout=layout,
dtype=dtype,
image_shape=image_shape,
)
elif name == "mobilenet":
mod, params = relay.testing.mobilenet.get_workload(
batch_size=batch_size, layout=layout, dtype=dtype, image_shape=image_shape
)
elif name == "squeezenet_v1.1":
assert layout == "NCHW", "squeezenet_v1.1 only supports NCHW layout"
mod, params = relay.testing.squeezenet.get_workload(
version="1.1",
batch_size=batch_size,
dtype=dtype,
image_shape=image_shape,
)
elif name == "inception_v3":
input_shape = (batch_size, 3, 299, 299) if layout == "NCHW" else (batch_size, 299, 299, 3)
mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "mxnet":
# an example for mxnet model
from mxnet.gluon.model_zoo.vision import get_model
assert layout == "NCHW"
block = get_model("resnet50_v1", pretrained=True)
mod, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype)
net = mod["main"]
net = relay.Function(
net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs
)
mod = tvm.IRModule.from_expr(net)
return mod, params, input_shape, output_shape
# Define the neural network and compilation target.
network = "mobilenet"
batch_size = 1
layout = "NHWC"
# Set this to True if you use ndk tools for cross compiling
use_ndk = True
# Path to cross compiler
os.environ["TVM_NDK_CC"] = "/usr/bin/aarch64-linux-gnu-g++"
target = tvm.target.Target("opencl -device=mali", host="llvm -mtriple=aarch64-linux-gnu")
dtype = "float32"
log_file = "%s-%s-B%d-%s.json" % (network, layout, batch_size, target.kind.name)
#################################################################
# Start an RPC Tracker and Register Devices to the Tracker
# --------------------------------------------------------
# Please refer to the "Start RPC Tracker" and "Register Devices to RPC Tracker" setions
# in this :ref:`tutorial <tutorials-autotvm-start-rpc-tracker>` to start an RPC tracker
# and register devices to the tracker.
# Replace this with the device key in your tracker
device_key = "rk3399"
#################################################################
# Extract Search Tasks
# --------------------
# Next, we extract the search tasks and their weights from a network.
# The weight of a task is the number of appearances of the task's subgraph
# in the whole network.
# By using the weight, we can approximate the end-to-end latency of the network
# as :code:`sum(latency[t] * weight[t])`, where :code:`latency[t]` is the
# latency of a task and :code:`weight[t]` is the weight of the task.
# The task scheduler will just optimize this objective.
# Extract tasks from the network
print("Extract tasks...")
mod, params, input_shape, output_shape = get_network(network, batch_size, layout, dtype=dtype)
tasks, task_weights = auto_scheduler.extract_tasks(mod["main"], params, target)
for idx, task in enumerate(tasks):
print("========== Task %d (workload key: %s) ==========" % (idx, task.workload_key))
print(task.compute_dag)
######################################################################
# .. note:: How to get the hardware parameters from remote device
#
# .. code-block:: python
#
# from tvm.auto_scheduler.utils import request_remote
# remote = request_remote(device_key, "127.0.0.1", 9190)
# dev = remote.cl()
# max_shared_memory_per_block = dev.max_shared_memory_per_block
# # There is no explicit local memory limition
# # so we can use INT32_MAX to disable the check on local_memory.
# max_local_memory_per_block = 2147483647 # INT32_MAX
# max_threads_per_block = dev.max_threads_per_block
# max_vthread_extent = int(dev.warp_size / 4) if int(dev.warp_size / 4) > 1 else dev.warp_size
# warp_size = dev.warp_size
# hardware_params = auto_scheduler.HardwareParams(-1, 16, 64,
# max_shared_memory_per_block, max_local_memory_per_block,
# max_threads_per_block, max_vthread_extent, warp_size)
#
# Now you could pass it to search task and tune
#
# .. code-block:: python
#
# tasks, task_weights = auto_scheduler.extract_tasks(
# mod["main"], params, target, hardware_params = hardware_params
# )
#
#################################################################
# Tuning and Evaluate
# -------------------
# Now, we set some options for tuning, launch the search tasks and evaluate the end-to-end performance
#
# * :code:`num_measure_trials` is the number of measurement trials we can use during the tuning.
# You can set it to a small number (e.g., 200) for a fast demonstrative run.
# In practice, we recommend setting it around :code:`800 * len(tasks)`,
# which is typically enough for the search to converge.
# For example, there are 29 tasks in resnet-50, so we can set it as 20000.
# You can adjust this parameter according to your time budget.
# * In addition, we use :code:`RecordToFile` to dump measurement records into a log file,
# The measurement records can be used to query the history best, resume the search,
# and do more analyses later.
# * see :any:`auto_scheduler.TuningOptions`,
# :any:`auto_scheduler.LocalRunner` for more parameters.
#
def tune_and_evaluate():
print("Begin tuning...")
tuner = auto_scheduler.TaskScheduler(tasks, task_weights)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=200, # change this to 20000 to achieve the best performance
builder=auto_scheduler.LocalBuilder(build_func="ndk" if use_ndk else "default"),
runner=auto_scheduler.RPCRunner(
device_key, host="127.0.0.1", port=9190, repeat=3, timeout=50
),
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)
tuner.tune(tune_option)
# Compile the whole network
print("Compile...")
with auto_scheduler.ApplyHistoryBest(log_file):
with tvm.transform.PassContext(
opt_level=3, config={"relay.backend.use_auto_scheduler": True}
):
lib = relay.build(mod, target, params=params)
# Create graph executor
print("=============== Request Remote ===============")
from tvm.auto_scheduler.utils import request_remote
remote = request_remote(device_key, "127.0.0.1", 9190)
dev = remote.cl()
from tvm.contrib import utils, ndk
temp = utils.tempdir()
filename = "deploy_lib.so"
path_lib = temp.relpath(filename)
lib.export_library(path_lib, ndk.create_shared)
remote.upload(path_lib)
loaded_lib = remote.load_module(filename)
module = graph_executor.GraphModule(loaded_lib["default"](dev))
data = (np.random.uniform(size=input_shape)).astype(dtype)
data_tvm = tvm.nd.array(data)
module.set_input("data", data_tvm)
# Evaluate
print("Evaluate inference time cost...")
print(module.benchmark(dev, repeat=3, min_repeat_ms=500))
# We do not run the tuning in our webpage server since server doesn't have mali gpu.
# Uncomment the following line to run it by yourself.
# tune_and_evaluate()
######################################################################
# .. note:: Explain the printed information during tuning
#
# During the tuning, a lot of information will be printed on the console.
# They are used for debugging purposes. The most important info is the output
# of the task scheduler. The following table is a sample output.
#
# .. code-block:: c
#
# ----------------------------------------------------------------------
# ------------------------------ [ Task Scheduler ]
# ----------------------------------------------------------------------
# | ID | Latency (ms) | Speed (GFLOPS) | Trials |
# -------------------------------------------------
# | 0 | 0.010 | 0.40 | 64 |
# | 1 | 0.087 | 47.19 | 64 |
# | 2 | 0.008 | -0.00 | 64 |
# | 3 | 0.177 | 582.07 | 64 |
# | 4 | 0.268 | 862.37 | 256 |
# | 5 | 0.166 | 621.13 | 128 |
# | 6 | 0.170 | 605.10 | 128 |
# | 7 | 0.128 | 403.20 | 64 |
# | 8 | 0.189 | 545.71 | 64 |
# | 9 | 0.231 | 1001.01 | 448 |
# | 10 | 0.155 | 664.80 | 256 |
# | 11 | 0.155 | 662.86 | 256 |
# | 12 | 0.119 | 434.08 | 64 |
# | 13 | 0.199 | 522.13 | 64 |
# | 14 | 0.235 | 986.56 | 320 |
# | 15 | 0.149 | 689.13 | 128 |
# | 16 | 0.155 | 664.80 | 192 |
# | 17 | 0.151 | 340.64 | 64 |
# | 18 | 0.176 | 597.55 | 128 |
# | 19 | 0.220 | 1054.37 | 192 |
# | 20 | 0.150 | 686.01 | 128 |
# | 21 | 0.159 | 650.88 | 128 |
# | 22 | 0.073 | 358.19 | 64 |
# | 23 | 0.031 | 70.63 | 64 |
# | 24 | 0.251 | 947.73 | 128 |
# | 25 | 0.157 | 652.47 | 128 |
# | 26 | 0.215 | 954.84 | 128 |
# | 27 | 0.237 | 868.92 | 128 |
# | 28 | 0.266 | 774.06 | 128 |
# -------------------------------------------------
# Estimated total latency: 10.016 ms Trials: 3992 Used time : 1131 s Next ID: 15
#
# This table lists the latency and (estimated) speed of all tasks.
# It also lists the allocation of measurement trials for all tasks.
# The last line prints the total weighted latency of these tasks,
# which can be a rough estimation of the end-to-end execution time
# of the network.
# The last line also prints the total number of measurement trials,
# total time spent on auto-tuning and the id of the next task to tune.
#
# There will also be some "tvm::Error"s errors, because the
# auto-scheduler will try some invalid schedules.
# You can safely ignore them if the tuning can continue, because these
# errors are isolated from the main process.
#
######################################################################
# .. note:: Terminate the tuning earlier
#
# You can terminate the tuning earlier by forcibly killing this process.
# As long as you get at least one valid schedule for each task in the log file,
# you should be able to do the compilation (the secion below).
#
#################################################################
# Other Tips
# ----------
# 1. During the tuning, the auto-scheduler needs to compile many programs and
# extract feature from them. This part is CPU-intensive,
# so a high-performance CPU with many cores is recommended for faster search.
# 2. You can use :code:`python3 -m tvm.auto_scheduler.measure_record --mode distill -i log.json`
# to distill the large log file and only save the best useful records.
# 3. You can resume a search from the previous log file. You just need to
# add a new argument :code:`load_log_file` when creating the task scheduler
# in function :code:`run_tuning`. Say,
# :code:`tuner = auto_scheduler.TaskScheduler(tasks, task_weights, load_log_file=log_file)`
# 4. If you have multiple target GPUs, you can use all of them for measurements to
# parallelize the measurements. Check this :ref:`section <tutorials-autotvm-scale-up-rpc-tracker>`
# to learn how to use the RPC Tracker and RPC Server.
# To use the RPC Tracker in auto-scheduler, replace the runner in :code:`TuningOptions`
# with :any:`auto_scheduler.RPCRunner`.
| https://github.com/zk-ml/tachikoma |
gallery/how_to/tune_with_autoscheduler/tune_network_x86.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Auto-scheduling a Neural Network for x86 CPU
============================================
**Author**: `Lianmin Zheng <https://github.com/merrymercy>`_, \
`Chengfan Jia <https://github.com/jcf94/>`_
Auto-tuning for specific devices and workloads is critical for getting the
best performance. This is a tutorial on how to tune a whole neural
network for x86 CPU with the auto-scheduler.
To auto-tune a neural network, we partition the network into small subgraphs and
tune them independently. Each subgraph is treated as one search task.
A task scheduler slices the time and dynamically allocates time resources to
these tasks. The task scheduler predicts the impact of each task on the end-to-end
execution time and prioritizes the one that can reduce the execution time the most.
For each subgraph, we use the compute declaration in :code:`tvm/python/topi` to
get the computational DAG in the tensor expression form.
We then use the auto-scheduler to construct a search space of this DAG and search
for good schedules (low-level optimizations).
Different from the template-based :ref:`autotvm <tutorials-autotvm-sec>` which relies on
manual templates to define the search space, the auto-scheduler does not require any
schedule templates. In other words, the auto-scheduler only uses the compute declarations
in :code:`tvm/python/topi` and does not use existing schedule templates.
Note that this tutorial will not run on Windows or recent versions of macOS. To
get it to run, you will need to wrap the body of this tutorial in a :code:`if
__name__ == "__main__":` block.
"""
# sphinx_gallery_start_ignore
from tvm import testing
testing.utils.install_request_hook(depth=3)
# sphinx_gallery_end_ignore
import numpy as np
import tvm
from tvm import relay, auto_scheduler
from tvm.relay import data_dep_optimization as ddo
import tvm.relay.testing
from tvm.contrib import graph_executor
#################################################################
# Define a Network
# ----------------
# First, we need to define the network with relay frontend API.
# We can load some pre-defined network from :code:`tvm.relay.testing`.
# We can also load models from MXNet, ONNX, PyTorch, and TensorFlow
# (see :ref:`front end tutorials<tutorial-frontend>`).
#
# For convolutional neural networks, although auto-scheduler can work correctly
# with any layout, we found the best performance is typically achieved with NHWC layout.
# We also implemented more optimizations for NHWC layout with the auto-scheduler.
# So it is recommended to convert your models to NHWC layout to use the auto-scheduler.
# You can use :ref:`ConvertLayout <convert-layout-usage>` pass to do the layout conversion in TVM.
def get_network(name, batch_size, layout="NHWC", dtype="float32", use_sparse=False):
"""Get the symbol definition and random weight of a network"""
# auto-scheduler prefers NHWC layout
if layout == "NHWC":
image_shape = (224, 224, 3)
elif layout == "NCHW":
image_shape = (3, 224, 224)
else:
raise ValueError("Invalid layout: " + layout)
input_shape = (batch_size,) + image_shape
output_shape = (batch_size, 1000)
if name.startswith("resnet-"):
n_layer = int(name.split("-")[1])
mod, params = relay.testing.resnet.get_workload(
num_layers=n_layer,
batch_size=batch_size,
layout=layout,
dtype=dtype,
image_shape=image_shape,
)
elif name.startswith("resnet3d-"):
n_layer = int(name.split("-")[1])
mod, params = relay.testing.resnet.get_workload(
num_layers=n_layer,
batch_size=batch_size,
layout=layout,
dtype=dtype,
image_shape=image_shape,
)
elif name == "mobilenet":
mod, params = relay.testing.mobilenet.get_workload(
batch_size=batch_size, layout=layout, dtype=dtype, image_shape=image_shape
)
elif name == "squeezenet_v1.1":
assert layout == "NCHW", "squeezenet_v1.1 only supports NCHW layout"
mod, params = relay.testing.squeezenet.get_workload(
version="1.1",
batch_size=batch_size,
dtype=dtype,
image_shape=image_shape,
)
elif name == "inception_v3":
input_shape = (batch_size, 3, 299, 299) if layout == "NCHW" else (batch_size, 299, 299, 3)
mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "mxnet":
# an example for mxnet model
from mxnet.gluon.model_zoo.vision import get_model
assert layout == "NCHW"
block = get_model("resnet50_v1", pretrained=True)
mod, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype)
net = mod["main"]
net = relay.Function(
net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs
)
mod = tvm.IRModule.from_expr(net)
elif name == "mlp":
mod, params = relay.testing.mlp.get_workload(
batch_size=batch_size, dtype=dtype, image_shape=image_shape, num_classes=1000
)
else:
raise ValueError("Network not found.")
if use_sparse:
from tvm.topi.sparse.utils import convert_model_dense_to_sparse
mod, params = convert_model_dense_to_sparse(mod, params, bs_r=4, random_params=True)
return mod, params, input_shape, output_shape
# Define the neural network and compilation target.
# If the target machine supports avx512 instructions, replace the
# "llvm -mcpu=core-avx2" with "llvm -mcpu=skylake-avx512"
network = "resnet-50"
use_sparse = False
batch_size = 1
layout = "NHWC"
target = tvm.target.Target("llvm -mcpu=core-avx2")
dtype = "float32"
log_file = "%s-%s-B%d-%s.json" % (network, layout, batch_size, target.kind.name)
#################################################################
# Extract Search Tasks
# --------------------
# Next, we extract the search tasks and their weights from a network.
# The weight of a task is the number of appearances of the task's subgraph
# in the whole network.
# By using the weight, we can approximate the end-to-end latency of the network
# as :code:`sum(latency[t] * weight[t])`, where :code:`latency[t]` is the
# latency of a task and :code:`weight[t]` is the weight of the task.
# The task scheduler will just optimize this objective.
# Extract tasks from the network
print("Get model...")
mod, params, input_shape, output_shape = get_network(
network,
batch_size,
layout,
dtype=dtype,
use_sparse=use_sparse,
)
print("Extract tasks...")
tasks, task_weights = auto_scheduler.extract_tasks(mod["main"], params, target)
for idx, task in enumerate(tasks):
print("========== Task %d (workload key: %s) ==========" % (idx, task.workload_key))
print(task.compute_dag)
#################################################################
# Begin Tuning
# ------------
# Now, we set some options for tuning and launch the search tasks
#
# * :code:`num_measure_trials` is the number of measurement trials we can use during the tuning.
# You can set it to a small number (e.g., 200) for a fast demonstrative run.
# In practice, we recommend setting it around :code:`800 * len(tasks)`,
# which is typically enough for the search to converge.
# For example, there are 29 tasks in resnet-50, so we can set it as 20000.
# You can adjust this parameter according to your time budget.
# * In addition, we use :code:`RecordToFile` to dump measurement records into a log file,
# The measurement records can be used to query the history best, resume the search,
# and do more analyses later.
# * see :any:`auto_scheduler.TuningOptions`,
# :any:`auto_scheduler.LocalRunner` for more parameters.
#
def run_tuning():
print("Begin tuning...")
tuner = auto_scheduler.TaskScheduler(tasks, task_weights)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=200, # change this to 20000 to achieve the best performance
runner=auto_scheduler.LocalRunner(repeat=10, enable_cpu_cache_flush=True),
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)
if use_sparse:
from tvm.topi.sparse.utils import sparse_sketch_rules
search_policy = [
auto_scheduler.SketchPolicy(
task,
program_cost_model=auto_scheduler.XGBModel(),
init_search_callbacks=sparse_sketch_rules(),
)
for task in tasks
]
tuner.tune(tune_option, search_policy=search_policy)
else:
tuner.tune(tune_option)
# We do not run the tuning in our webpage server since it takes too long.
# Uncomment the following line to run it by yourself.
# run_tuning()
######################################################################
# .. note:: Explain the printed information during tuning
#
# During the tuning, a lot of information will be printed on the console.
# They are used for debugging purposes. The most important info is the output
# of the task scheduler. The following table is a sample output.
#
# .. code-block:: c
#
# ----------------------------------------------------------------------
# ------------------------------ [ Task Scheduler ]
# ----------------------------------------------------------------------
# | ID | Latency (ms) | Speed (GFLOPS) | Trials |
# -------------------------------------------------
# | 0 | 0.010 | 0.40 | 64 |
# | 1 | 0.087 | 47.19 | 64 |
# | 2 | 0.008 | -0.00 | 64 |
# | 3 | 0.177 | 582.07 | 64 |
# | 4 | 0.268 | 862.37 | 256 |
# | 5 | 0.166 | 621.13 | 128 |
# | 6 | 0.170 | 605.10 | 128 |
# | 7 | 0.128 | 403.20 | 64 |
# | 8 | 0.189 | 545.71 | 64 |
# | 9 | 0.231 | 1001.01 | 448 |
# | 10 | 0.155 | 664.80 | 256 |
# | 11 | 0.155 | 662.86 | 256 |
# | 12 | 0.119 | 434.08 | 64 |
# | 13 | 0.199 | 522.13 | 64 |
# | 14 | 0.235 | 986.56 | 320 |
# | 15 | 0.149 | 689.13 | 128 |
# | 16 | 0.155 | 664.80 | 192 |
# | 17 | 0.151 | 340.64 | 64 |
# | 18 | 0.176 | 597.55 | 128 |
# | 19 | 0.220 | 1054.37 | 192 |
# | 20 | 0.150 | 686.01 | 128 |
# | 21 | 0.159 | 650.88 | 128 |
# | 22 | 0.073 | 358.19 | 64 |
# | 23 | 0.031 | 70.63 | 64 |
# | 24 | 0.251 | 947.73 | 128 |
# | 25 | 0.157 | 652.47 | 128 |
# | 26 | 0.215 | 954.84 | 128 |
# | 27 | 0.237 | 868.92 | 128 |
# | 28 | 0.266 | 774.06 | 128 |
# -------------------------------------------------
# Estimated total latency: 10.016 ms Trials: 3992 Used time : 1131 s Next ID: 15
#
# This table lists the latency and (estimated) speed of all tasks.
# It also lists the allocation of measurement trials for all tasks.
# The last line prints the total weighted latency of these tasks,
# which can be a rough estimation of the end-to-end execution time
# of the network.
# The last line also prints the total number of measurement trials,
# total time spent on auto-tuning and the id of the next task to tune.
#
# There will also be some "tvm::Error"s errors, because the
# auto-scheduler will try some invalid schedules.
# You can safely ignore them if the tuning can continue, because these
# errors are isolated from the main process.
#
######################################################################
# .. note:: Terminate the tuning earlier
#
# You can terminate the tuning earlier by forcibly killing this process.
# As long as you get at least one valid schedule for each task in the log file,
# you should be able to do the compilation (the secion below).
#
#################################################################
# Compile and Evaluate
# --------------------
# After auto-tuning, we can compile the network with the best schedules we found.
# All measurement records are dumped into the log file during auto-tuning,
# so we can read the log file and load the best schedules.
# Compile with the history best
print("Compile...")
with auto_scheduler.ApplyHistoryBest(log_file):
with tvm.transform.PassContext(opt_level=3, config={"relay.backend.use_auto_scheduler": True}):
lib = relay.build(mod, target=target, params=params)
# Create graph executor
dev = tvm.device(str(target), 0)
module = graph_executor.GraphModule(lib["default"](dev))
data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))
module.set_input("data", data_tvm)
# Evaluate
print("Evaluate inference time cost...")
print(module.benchmark(dev, repeat=3, min_repeat_ms=500))
#################################################################
# Other Tips
# ----------
# 1. During the tuning, the auto-scheduler needs to compile many programs and
# extract feature from them. This part is CPU-intensive,
# so a high-performance CPU with many cores is recommended for faster search.
# 2. You can use :code:`python3 -m tvm.auto_scheduler.measure_record --mode distill -i log.json`
# to distill the large log file and only save the best useful records.
# 3. You can resume a search from the previous log file. You just need to
# add a new argument :code:`load_log_file` when creating the task scheduler
# in function :code:`run_tuning`. Say,
# :code:`tuner = auto_scheduler.TaskScheduler(tasks, task_weights, load_log_file=log_file)`
# 4. If you have multiple target CPUs, you can use all of them for measurements to
# parallelize the measurements. Check this :ref:`section <tutorials-autotvm-scale-up-rpc-tracker>`
# to learn how to use the RPC Tracker and RPC Server.
# To use the RPC Tracker in auto-scheduler, replace the runner in :code:`TuningOptions`
# with :any:`auto_scheduler.RPCRunner`.
| https://github.com/zk-ml/tachikoma |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.