file_path
stringlengths 7
180
| content
stringlengths 0
811k
| repo
stringclasses 11
values |
---|---|---|
tests/python/unittest/test_tir_transform_coproc_sync.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
# register the ops
tvm.ir.register_op_attr("tir.cop.coproc_sync", "TGlobalSymbol", "coproc_sync")
tvm.ir.register_op_attr("tir.cop.coproc_read_barrier", "TGlobalSymbol", "coproc_readb")
tvm.ir.register_op_attr("tir.cop.coproc_write_barrier", "TGlobalSymbol", "coproc_writeb")
tvm.ir.register_op_attr("tir.cop.coproc_dep_push", "TGlobalSymbol", "coproc_dep_push")
tvm.ir.register_op_attr("tir.cop.coproc_dep_pop", "TGlobalSymbol", "coproc_dep_pop")
def test_coproc_sync():
@tvm.register_func("tvm.info.mem.global.cache")
def meminfo_cache():
return tvm.ir.make_node(
"MemoryInfo",
unit_bits=8,
max_simd_bits=32,
max_num_bits=128,
head_address=tvm.tir.call_extern("handle", "global_cache"),
)
ib = tvm.tir.ir_builder.create()
n = te.size_var("n")
cp = te.thread_axis((0, 1), "cop")
A = ib.allocate("float32", 128, name="A", scope="global.cache")
with ib.for_range(0, n, name="i") as i:
A[i] = A[i] + 1
with ib.for_range(0, 8, name="k") as k:
with ib.for_range(0, 10, name="j") as j:
ib.scope_attr(cp, "coproc_scope", 1)
A[j] = A[j + k * 10] + 2
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], stmt))
stmt = tvm.tir.transform.CoProcSync()(mod)["main"].body
body = stmt.body.body
blist = tvm.tir.stmt_list(body)
assert blist[1].value.op.same_as(tvm.ir.Op.get("tir.cop.coproc_read_barrier"))
assert blist[1].value.args[3].value == 80
assert blist[-2].value.op.same_as(tvm.ir.Op.get("tir.cop.coproc_sync"))
assert blist[-1].value.op.same_as(tvm.ir.Op.get("tir.cop.coproc_write_barrier"))
assert blist[-1].value.args[3].value == 10
def test_coproc_sync2():
ib = tvm.tir.ir_builder.create()
n = te.size_var("n")
cp = te.thread_axis((0, 1), "cop")
ty = te.thread_axis("cthread")
A = ib.allocate("float32", 128, name="A")
ib.scope_attr(ty, "virtual_thread", 2)
with ib.new_scope():
ib.scope_attr(cp, "coproc_scope", 2)
A[ty] = 0.0
with ib.for_range(0, n, name="i") as i:
with ib.new_scope():
ib.scope_attr(cp, "coproc_scope", 1)
A[ty] = 1.0
with ib.new_scope():
ib.scope_attr(cp, "coproc_scope", 2)
A[ty] = 1.0
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], stmt))
stmt = tvm.tir.transform.CoProcSync()(mod)["main"].body
def test_coproc_sync3():
def __check_list(tvm_array, py_list):
for ti, li in zip(tvm_array, py_list):
if ti.value != li:
return False
return True
ib = tvm.tir.ir_builder.create()
n = te.size_var("n")
cp = te.thread_axis((0, 1), "cop")
A = ib.allocate("float32", 128, name="A", scope="global.cache")
with ib.for_range(0, n, name="i") as i:
with ib.for_range(0, n, name="i") as j:
with ib.new_scope():
ib.scope_attr(cp, "coproc_scope", 1)
A[i] = 1.0
with ib.new_scope():
ib.scope_attr(cp, "coproc_scope", 2)
A[i] = 1.0
with ib.new_scope():
ib.scope_attr(cp, "coproc_scope", 3)
A[0] = 0.0
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], stmt))
stmt = tvm.tir.transform.CoProcSync()(mod)["main"].body
slist = tvm.tir.stmt_list(stmt[0].body)
push_st = slist[2]
slist = tvm.tir.stmt_list(slist[-1])
pop_st = slist[0].body[0]
assert push_st.value.op.same_as(tvm.ir.Op.get("tir.cop.coproc_dep_push"))
assert __check_list(push_st.value.args, [2, 3])
assert pop_st.value.op.same_as(tvm.ir.Op.get("tir.cop.coproc_dep_pop"))
assert __check_list(pop_st.value.args, [2, 3])
if __name__ == "__main__":
test_coproc_sync()
test_coproc_sync2()
test_coproc_sync3()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_decorate_device_scope.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def test_decorate_device():
x = te.var("x")
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x)))
stmt = tvm.tir.transform.DecorateDeviceScope()(mod)["main"].body
assert stmt.attr_key == "device_scope"
if __name__ == "__main__":
test_decorate_device()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_extract_constants.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import tir
from tvm.script import tir as T
import tvm.testing
@tvm.script.ir_module
class Module4:
@T.prim_func
def constant1(a: T.handle) -> None:
A = T.match_buffer(a, (10), "int32")
B = T.alloc_buffer((10), "int32")
K_data = T.allocate_const([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "int32", [10])
K = T.buffer_decl(shape=(10), dtype="int32", data=K_data)
for x in T.serial(0, 10):
B[x] = A[x] + K[x]
@T.prim_func
def constant2(a: T.handle) -> None:
A = T.match_buffer(a, (10), "int32")
B = T.alloc_buffer((10), "int32")
K_data = T.allocate_const([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "int32", [10])
K = T.buffer_decl(shape=(10), dtype="int32", data=K_data)
for x in T.serial(0, 10):
B[x] = A[x] + K[x]
@T.prim_func
def constant3(a: T.handle) -> None:
A = T.match_buffer(a, (10), "int32")
B = T.alloc_buffer((10), "int32")
K_data = T.allocate_const([1, 2, 3, 1, 1, 1, 1, 1, 1, 1], "int32", [10])
K = T.buffer_decl(shape=(10), dtype="int32", data=K_data)
for x in T.serial(0, 10):
B[x] = A[x] + K[x]
def test_const_extraction():
mod = tvm.tir.transform.ExtractPrimFuncConstants()(Module4)
constants = mod.attrs["constants"]
assert len(constants) == 2
def _visit(stmt):
if isinstance(stmt, tvm.tir.AllocateConst):
assert np.array_equal(stmt.data.numpy(), constants[int(stmt.irmod_storage_idx)].numpy())
for n, f in mod.functions.items():
tvm.tir.stmt_functor.post_order_visit(f.body, _visit)
tvm.lower(mod)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_flatten_buffer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
from tvm.script import tir as T
class BaseCompare(tvm.testing.CompareBeforeAfter):
transform = tvm.transform.Sequential(
[
tvm.tir.transform.FlattenBuffer(),
tvm.tir.transform.Simplify(),
]
)
class TestElementwise(BaseCompare):
"""2-d buffers are flattened to 1-d"""
def before(A: T.Buffer[(16, 16), "float32"], C: T.Buffer[(16, 16), "float32"]):
for i in T.serial(0, 16):
B_new = T.decl_buffer([1, 16], "float32")
for j in T.serial(0, 16):
B_new[0, j] = A[i, j] + 1.0
for j in T.serial(0, 16):
C[i, j] = B_new[0, j] * 2.0
def expected(A: T.Buffer[256, "float32"], C: T.Buffer[256, "float32"]):
T.preflattened_buffer(A, (16, 16), dtype="float32", data=A.data)
T.preflattened_buffer(C, (16, 16), dtype="float32", data=C.data)
for i in T.serial(0, 16):
B_new_data = T.allocate([16], "float32", scope="global")
B_new = T.buffer_decl([16], "float32", scope="global", data=B_new_data)
for j in T.serial(0, 16):
B_new[j] = A[((i * 16) + j)] + 1.0
for j in T.serial(0, 16):
C[((i * 16) + j)] = B_new[j] * 2.0
class TestElementwiseWithoutDeclBuffer(BaseCompare):
"""2-d buffers are flattened to 1-d
Like TestElementwise, but the TIR doesn't have the DeclBuffer
node. The T.buffer_decl declaration applies only during the
parsing the TVMScript, and doesn't occur in the TIR itself. In
this case, the allocation should be assumed to be targeting flat
memory, and should be flattened to a 1-d allocation.
"""
def before(A: T.Buffer[(16, 16), "float32"], C: T.Buffer[(16, 16), "float32"]):
for i in T.serial(0, 16):
B_new_data = T.allocate([1, 16], "float32", "global")
B_new = T.buffer_decl([1, 16], "float32", data=B_new_data)
for j in T.serial(0, 16):
B_new[0, j] = A[i, j] + 1.0
for j in T.serial(0, 16):
C[i, j] = B_new[0, j] * 2.0
def expected(A: T.Buffer[256, "float32"], C: T.Buffer[256, "float32"]):
T.preflattened_buffer(A, (16, 16), dtype="float32", data=A.data)
T.preflattened_buffer(C, (16, 16), dtype="float32", data=C.data)
for i in T.serial(0, 16):
B_new_data = T.allocate([16], "float32", "global")
B_new = T.buffer_decl(16, "float32", data=B_new_data)
for j in T.serial(0, 16):
B_new[j] = A[((i * 16) + j)] + 1.0
for j in T.serial(0, 16):
C[((i * 16) + j)] = B_new[j] * 2.0
class TestGPU(BaseCompare):
"""Buffer flattening may have indices based on GPU thread vars"""
def before(A: T.Buffer[(16, 16), "float32"], C: T.Buffer[(16, 16), "float32"]):
i0 = T.env_thread("blockIdx.x")
i1 = T.env_thread("threadIdx.x")
i2 = T.env_thread("vthread")
T.launch_thread(i0, 4)
T.launch_thread(i1, 2)
T.launch_thread(i2, 2)
B = T.decl_buffer([1, 16], "float32", scope="local")
for j in range(0, 16):
B[0, j] = A[i0 * 4 + i1 * 2 + i2, j] + 1.0
for j in range(0, 16):
C[i0 * 4 + i1 * 2 + i2, j] = B[0, j] * 2.0
def expected(A: T.Buffer[256, "float32"], C: T.Buffer[256, "float32"]):
T.preflattened_buffer(A, (16, 16), dtype="float32", data=A.data)
T.preflattened_buffer(C, (16, 16), dtype="float32", data=C.data)
i0 = T.env_thread("blockIdx.x")
i1 = T.env_thread("threadIdx.x")
i2 = T.env_thread("vthread")
T.launch_thread(i0, 4)
T.launch_thread(i1, 2)
T.launch_thread(i2, 2)
B_data = T.allocate([16], "float32", scope="local")
B = T.buffer_decl([16], "float32", scope="local", data=B_data)
for j in range(0, 16):
B[j] = A[i0 * 64 + i1 * 32 + i2 * 16 + j] + 1.0
for j in range(0, 16):
C[i0 * 64 + i1 * 32 + i2 * 16 + j] = B[j] * 2.0
class TestSymbolic(BaseCompare):
"""Dynamically-sized arrrays are flattened"""
def before(a: T.handle, c: T.handle, n: T.int32, m: T.int32) -> None:
A = T.match_buffer(a, (n, m), "float32")
C = T.match_buffer(c, (n, m), "float32")
for i in range(0, n):
B = T.decl_buffer([m], "float32")
for j in range(0, m):
B[j] = A[i, j] + 1.0
for j in range(0, m):
C[i, j] = B[j] * 2.0
def expected(a: T.handle, c: T.handle, n: T.int32, m: T.int32) -> None:
A = T.match_buffer(a, n * m, "float32")
C = T.match_buffer(c, n * m, "float32")
T.preflattened_buffer(A, (n, m), "float32", data=A.data)
T.preflattened_buffer(C, (n, m), "float32", data=C.data)
for i in range(0, n):
B_data = T.allocate([m], "float32", scope="global")
B = T.buffer_decl([m], "float32", scope="global", data=B_data)
for j in range(0, m):
B[j] = A[i * m + j] + 1.0
for j in range(0, m):
C[i * m + j] = B[j] * 2.0
class TestMultiAlloc(BaseCompare):
"""If multiple allocations occur, all are flattened."""
def before(A: T.Buffer[(4, 32), "float32"], D: T.Buffer[(4, 32), "float32"]):
for i, j in T.grid(4, 32):
B = T.decl_buffer((4, 32), "float32", scope="global")
C = T.decl_buffer((4, 32), "float32", scope="global")
B[i, j] = A[i, j] + 1.0
C[i, j] = A[i, j] + B[i, j]
D[i, j] = C[i, j] * 2.0
def expected(A: T.Buffer[128, "float32"], D: T.Buffer[128, "float32"]):
T.preflattened_buffer(A, (4, 32), "float32", data=A.data)
T.preflattened_buffer(D, (4, 32), "float32", data=D.data)
for i, j in T.grid(4, 32):
B_data = T.allocate([128], "float32", scope="global")
B = T.buffer_decl([128], "float32", scope="global", data=B_data)
C_data = T.allocate([128], "float32", scope="global")
C = T.buffer_decl([128], "float32", scope="global", data=C_data)
B[i * 32 + j] = A[i * 32 + j] + 1.0
C[i * 32 + j] = A[i * 32 + j] + B[i * 32 + j]
D[i * 32 + j] = C[i * 32 + j] * 2.0
class TestStrided(BaseCompare):
"""Indices for flattened buffers use the specified striding."""
def before(A: T.Buffer[(16, 16), "float32"], C: T.Buffer[(16, 16), "float32"]):
for i0 in T.serial(4):
B = T.decl_buffer([4, 17], "float32")
B_1 = T.buffer_decl([4, 16], dtype="float32", data=B.data, strides=[17, 1])
for i1, j in T.grid(4, 16):
B_1[i1, j] = A[i0 * 4 + i1, j] + 1.0
for i1, j in T.grid(4, 16):
C[i0 * 4 + i1, j] = B_1[i1, j] * 2.0
def expected(A: T.Buffer[256, "float32"], C: T.Buffer[256, "float32"]):
T.preflattened_buffer(A, [16, 16], dtype="float32", data=A.data)
T.preflattened_buffer(C, [16, 16], dtype="float32", data=C.data)
for i0 in T.serial(0, 4):
B_new_data = T.allocate([68], "float32", scope="global")
B_new = T.buffer_decl([68], "float32", scope="global", data=B_new_data)
for i1 in T.serial(0, 4):
for j in T.serial(0, 16):
B_new[i1 * 17 + j] = A[i0 * 64 + i1 * 16 + j] + 1.0
for i1 in T.serial(0, 4):
for j in T.serial(0, 16):
C[i0 * 64 + i1 * 16 + j] = B_new[i1 * 17 + j] * 2.0
class TestBoolean(BaseCompare):
"""Boolean buffers should be replaced by a backing int8 array"""
def before(A: T.Buffer[10, "bool"], B: T.Buffer[10, "bool"]) -> None:
for i0 in T.serial(10):
B[i0] = A[i0]
def expected(A: T.Buffer[10, "int8"], B: T.Buffer[10, "int8"]) -> None:
T.preflattened_buffer(A, [10], dtype="bool", data=A.data)
T.preflattened_buffer(B, [10], dtype="bool", data=B.data)
# body
for i0 in T.serial(10):
B[i0] = T.cast(T.cast(A[i0], "bool"), "int8")
class TestLowerTE(BaseCompare):
"""FlattenBuffer should do nothing on TE-based functions"""
def before(self):
x = te.placeholder((1,))
y = te.compute((1,), lambda i: x[i] + 2)
s = te.create_schedule(y.op)
mod = tvm.driver.build_module.schedule_to_module(s, [x, y])
return mod["main"]
expected = before
class TestFlattenInsideBlock(BaseCompare):
"""Flattening access inside a block flattens the accessed region."""
def before():
A = T.alloc_buffer([32, 32])
for i, j in T.grid(32, 32):
with T.block("block"):
T.reads(A[i, j])
T.evaluate(A[i, j])
def expected():
A = T.alloc_buffer([1024])
for i, j in T.grid(32, 32):
with T.block("block"):
T.reads(A[i * 32 + j])
T.evaluate(A[i * 32 + j])
class TestNoChangeTo2DPhysicalBuffer(BaseCompare):
"""Flattening preserves axis separators."""
def before():
A = T.alloc_buffer([32, 32], axis_separators=[1])
for i, j in T.grid(32, 32):
T.evaluate(A[i, j])
expected = before
class TestFlattenAllocBufferWithAxisSeparators(BaseCompare):
"""Flattening preserves axis separators"""
def before():
A = T.alloc_buffer([2, 3, 5, 7, 11, 13], axis_separators=[3])
for i0, i1, i2, i3, i4, i5 in T.grid(2, 3, 5, 7, 11, 13):
T.evaluate(A[i0, i1, i2, i3, i4, i5])
def expected():
A = T.alloc_buffer([30, 1001], axis_separators=[1])
for i0, i1, i2, i3, i4, i5 in T.grid(2, 3, 5, 7, 11, 13):
T.evaluate(A[i0 * 15 + i1 * 5 + i2, i3 * 143 + i4 * 13 + i5])
class TestFlattenDeclBufferWithAxisSeparators(BaseCompare):
"""Flattening preserves axis separators
Like TestFlattenAllocBufferWithAxisSeparators, but the allocations
is done using Allocate/DeclBuffer, rather than through
BlockNode::alloc_buffers.
"""
def before():
A = T.decl_buffer([2, 3, 5, 7, 11, 13], axis_separators=[3])
for i0, i1, i2, i3, i4, i5 in T.grid(2, 3, 5, 7, 11, 13):
T.evaluate(A[i0, i1, i2, i3, i4, i5])
def expected():
A_data = T.allocate([30, 1001], dtype="float32", scope="global")
A = T.buffer_decl(
[30, 1001], dtype="float32", scope="global", axis_separators=[1], data=A_data
)
for i0, i1, i2, i3, i4, i5 in T.grid(2, 3, 5, 7, 11, 13):
T.evaluate(A[i0 * 15 + i1 * 5 + i2, i3 * 143 + i4 * 13 + i5])
def test_lower_2d_physical_memory():
"""Axis separators should preserve 2-d buffers through lowering.
A catch-all test to ensure that defining axis_separators is
sufficient to maintain non-flat buffer descriptions through all
lowering steps.
"""
# This test doesn't use CompareBeforeAfter, because the after step
# is not currently expressible in TVMScript. This test can be
# re-written after https://github.com/apache/tvm/pull/12412.
@T.prim_func
def func():
buf = T.alloc_buffer(
[1, 1],
dtype="int32",
scope="global",
axis_separators=[1],
)
buf[0, 0] = 0
lowered = tvm.lower(func)["main"]
assert isinstance(lowered.body, tvm.tir.Allocate)
assert list(lowered.body.extents) == [1, 1], (
"Non-flat buffer allocations, "
"marked by axis_separators, "
"flattened to flat memory allocation."
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_helpers.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm.script import tir as T
import tvm.testing
def test_annotate_entry_func_single_primfunc():
@tvm.script.ir_module
class MockModule:
@T.prim_func
def func1(A: T.Buffer[(16,), "float32"]):
for i in T.serial(16):
if i == 5:
if i == 5:
A[i] = 0.0
mod = MockModule
assert mod
assert mod["func1"].attrs is None
after = tvm.tir.transform.AnnotateEntryFunc()(mod)
assert (
after["func1"].attrs
and "tir.is_entry_func" in after["func1"].attrs
and after["func1"].attrs["tir.is_entry_func"]
)
# Test module
@tvm.script.ir_module
class MockModule:
@T.prim_func
def func1(A: T.Buffer[(16,), "float32"]):
for i in T.serial(16):
if i == 5:
if i == 5:
A[i] = 0.0
@T.prim_func
def func2(A: T.Buffer[(32,), "float32"]):
for i in T.serial(32):
if i == 15:
if i == 15:
A[i] = 0.0
@pytest.mark.xfail
def test_annotate_entry_func_multiple_primfunc():
mod = MockModule
assert mod
assert mod["func1"].attrs is None
assert mod["func2"].attrs is None
# This should fail
after = tvm.tir.transform.AnnotateEntryFunc()(mod)
def test_bind_target():
mod = MockModule
assert mod
target = tvm.target.Target("cuda")
assert mod["func1"].attrs is None
assert mod["func2"].attrs is None
after = tvm.tir.transform.BindTarget(target)(mod)
assert after["func1"].attrs and "target" in after["func1"].attrs
assert after["func1"].attrs["target"] == target
assert after["func2"].attrs and "target" in after["func2"].attrs
assert after["func2"].attrs["target"] == target
def test_filter_primfunc():
mod = MockModule
assert mod
# Annotate each function for testing
mod["func1"] = mod["func1"].with_attr("temp", "test1")
mod["func2"] = mod["func2"].with_attr("temp", "test2")
# Test condition that does not filter out anything
def checker_filter_out_none(func: tvm.tir.PrimFunc):
return (func.attrs is not None) and ("temp" in func.attrs)
after = tvm.tir.transform.Filter(checker_filter_out_none)(mod)
assert len(after.functions) == 2
# Filtered functions should satisfy the given condition.
assert checker_filter_out_none(after["func1"])
assert checker_filter_out_none(after["func2"])
# Test condition that selectively filters out primfuncs
def checker_filter_out_one(func: tvm.tir.PrimFunc):
return (func.attrs is not None) and ("temp" in func.attrs) and func.attrs["temp"] == "test1"
after = tvm.tir.transform.Filter(checker_filter_out_one)(mod)
assert len(after.functions) == 1
# Filtered functions should satisfy the given condition.
assert checker_filter_out_one(after["func1"])
# Test condition that filters out everything
def checker_filter_out_both(func: tvm.tir.PrimFunc):
return (func.attrs is not None) and ("invalid_attr" in func.attrs)
after = tvm.tir.transform.Filter(checker_filter_out_both)(mod)
assert len(after.functions) == 0
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_hoist_expression.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import tir
import tvm.testing
from tvm.script import tir as T
from tvm.tir.transform import HoistExpression, HoistedConditionals, HoistedLetBindings
class BaseBeforeAfter:
hoisted_conditionals = tvm.testing.parameter(HoistedConditionals.All)
hoisted_let_bindings = tvm.testing.parameter(HoistedLetBindings.All)
def test_hoist(self, hoisted_conditionals, hoisted_let_bindings):
before = self.before
before_mod = tvm.IRModule.from_expr(before)
config = {
"tir.HoistExpression": {
"hoisted_conditionals": hoisted_conditionals.value,
"hoisted_let_bindings": hoisted_let_bindings.value,
}
}
with tvm.transform.PassContext(config=config):
after_mod = tvm.tir.transform.HoistExpression()(before_mod)
after = after_mod["main"]
expected = self.expected
try:
tvm.ir.assert_structural_equal(after, expected)
except ValueError as err:
script = tvm.IRModule({"expected": expected, "after": after, "before": before}).script()
raise ValueError(
f"Function after simplification did not match expected:\n{script}"
) from err
class TestHoistToTop(BaseBeforeAfter):
hoisted_conditionals = tvm.testing.parameter(
HoistedConditionals.IfElseStmt,
HoistedConditionals.All,
)
@T.prim_func
def before(A: T.Buffer[(16,), "float32"], n: T.int32):
for i in T.serial(16):
if n != 0:
A[i] = 0.0
@T.prim_func
def expected(A: T.Buffer[(16,), "float32"], n: T.int32):
if n != 0:
for i in T.serial(16):
A[i] = 0.0
class TestSuppressHoistIfElse(BaseBeforeAfter):
hoisted_conditionals = tvm.testing.parameter(
HoistedConditionals.Never,
HoistedConditionals.IfElseExpr,
)
@T.prim_func
def before(A: T.Buffer[(16,), "float32"], n: T.int32):
for i in T.serial(16):
if n != 0:
A[i] = 0.0
expected = before
class TestHoistBlockVar(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer[(128, 16), "float32"], n: T.int32):
i = T.env_thread("threadIdx.x")
T.launch_thread(i, 128)
for j in T.serial(16):
if i < 32:
A[i, j] = 0.0
@T.prim_func
def expected(A: T.Buffer[(128, 16), "float32"], n: T.int32):
i = T.env_thread("threadIdx.x")
T.launch_thread(i, 128)
if i < 32:
for j in T.serial(16):
A[i, j] = 0.0
class TestSuppressHoistBlockVar(BaseBeforeAfter):
hoisted_conditionals = tvm.testing.parameter(
HoistedConditionals.All & ~HoistedConditionals.UsingBlockVar
)
@T.prim_func
def before(A: T.Buffer[(128, 16), "float32"], n: T.int32):
thread_x = T.env_thread("threadIdx.x")
T.launch_thread(thread_x, 128)
for i in T.thread_binding(0, 128, thread="threadIdx.x"):
if i < 32:
for j in T.serial(16):
A[i, j] = 0.0
expected = before
class TestHoistAcrossBlockVar(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer[(128, 16), "float32"], n: T.int32):
thread_x = T.env_thread("threadIdx.x")
T.launch_thread(thread_x, 128)
for i in T.thread_binding(0, 128, thread="threadIdx.x"):
if n == 0:
for j in T.serial(16):
A[i, j] = 0.0
@T.prim_func
def expected(A: T.Buffer[(128, 16), "float32"], n: T.int32):
thread_x = T.env_thread("threadIdx.x")
if n == 0:
T.launch_thread(thread_x, 128)
for i in T.thread_binding(0, 128, thread="threadIdx.x"):
for j in T.serial(16):
A[i, j] = 0.0
class TestSuppressHoistAcrossBlockVar(BaseBeforeAfter):
hoisted_conditionals = tvm.testing.parameter(
HoistedConditionals.All & ~HoistedConditionals.UsingBlockVar
)
@T.prim_func
def before(A: T.Buffer[(128, 16), "float32"], n: T.int32):
thread_x = T.env_thread("threadIdx.x")
T.launch_thread(thread_x, 128)
for i in T.thread_binding(0, 128, thread="threadIdx.x"):
for j in T.serial(16):
if n == 0:
A[i, j] = 0.0
@T.prim_func
def expected(A: T.Buffer[(128, 16), "float32"], n: T.int32):
thread_x = T.env_thread("threadIdx.x")
T.launch_thread(thread_x, 128)
if n == 0:
for i in T.thread_binding(0, 128, thread="threadIdx.x"):
for j in T.serial(16):
A[i, j] = 0.0
class TestHoistToMiddle(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
for j in T.serial(4):
if i < 3:
A[i, j] = 0.0
@T.prim_func
def expected(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
if i < 3:
for j in T.serial(4):
A[i, j] = 0.0
class TestHoistWithLet(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
for j in T.serial(4):
condition = i < 3
if condition:
A[i, j] = 0.0
@T.prim_func
def expected(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
condition = i < 3
if condition:
for j in T.serial(4):
A[i, j] = 0.0
class TestHoistDisableLet(BaseBeforeAfter):
"""As TestHoistWithLet, but forbid hoisting of LetStmt
Because the condition depends on the let binding, it should no
longer be hoisted.
"""
hoisted_let_bindings = tvm.testing.parameter(HoistedLetBindings.Never)
@T.prim_func
def before(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
for j in T.serial(4):
condition = i < 3
if condition:
A[i, j] = 0.0
expected = before
class TestHoistIfElse(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
for j in T.serial(4):
if i < 3:
A[i, j] = 0.0
else:
A[i, j] = 1.0
@T.prim_func
def expected(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
if i < 3:
for j in T.serial(4):
A[i, j] = 0.0
else:
for j in T.serial(4):
A[i, j] = 1.0
class TestHoistSequentialAssign(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer[(4, 4), "float32"], B: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
for j in T.serial(4):
if i < 3:
A[i, j] = 0.0
B[i, j] = 0.0
else:
A[i, j] = 1.0
B[i, j] = 1.0
@T.prim_func
def expected(A: T.Buffer[(4, 4), "float32"], B: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
if i < 3:
for j in T.serial(4):
A[i, j] = 0.0
B[i, j] = 0.0
else:
for j in T.serial(4):
A[i, j] = 1.0
B[i, j] = 1.0
class TestHoistMultiIf(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
for j in T.serial(4):
for k in T.serial(4):
if j < 3:
if i < 2:
A[i, j] = 0.0
@T.prim_func
def expected(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
if i < 2:
for j in T.serial(4):
if j < 3:
for k in T.serial(4):
A[i, j] = 0.0
class TestHoistComplexConditional(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer[(4, 4), "float32"]):
for i, j, k in T.grid(4, 4, 4):
if j < 3 and i < 2:
A[i, j] = 0.0
@T.prim_func
def expected(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
if i < 2:
for j in T.serial(4):
if j < 3:
for k in T.serial(4):
A[i, j] = 0.0
class TestSuppressSplittingConditional(BaseBeforeAfter):
hoisted_conditionals = tvm.testing.parameter(
HoistedConditionals.All & ~HoistedConditionals.BooleanExpression
)
@T.prim_func
def before(A: T.Buffer[(4, 4), "float32"]):
for i, j, k in T.grid(4, 4, 4):
if j < 3 and i < 2:
A[i, j] = 0.0
@T.prim_func
def expected(A: T.Buffer[(4, 4), "float32"]):
for i, j in T.grid(4, 4):
if j < 3 and i < 2:
for k in T.serial(4):
A[i, j] = 0.0
class TestHoistMultiIfElse(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
for j in T.serial(4):
for k in T.serial(4):
if j < 3:
if i < 2:
A[i, j] = 0.0
else:
A[i, j] = 1.0
else:
if i < 2:
A[i, j] = 2.0
else:
A[i, j] = 3.0
@T.prim_func
def expected(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
if i < 2:
for j in T.serial(4):
if j < 3:
for k in T.serial(4):
A[i, j] = 0.0
else:
for k in T.serial(4):
A[i, j] = 2.0
else:
for j in T.serial(4):
if j < 3:
for k in T.serial(4):
A[i, j] = 1.0
else:
for k in T.serial(4):
A[i, j] = 3.0
class TestHoistMultiIfElseDifferentBranches(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
for j in T.serial(4):
for k in T.serial(4):
if j < 3:
if i < 2:
A[i, j] = 0.0
else:
A[i, j] = 1.0
else:
if i < 1:
A[i, j] = 2.0
else:
A[i, j] = 3.0
@T.prim_func
def expected(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
if i < 2:
if i < 1:
for j in T.serial(4):
if j < 3:
for k in T.serial(4):
A[i, j] = 0.0
else:
for k in T.serial(4):
A[i, j] = 2.0
else:
for j in T.serial(4):
if j < 3:
for k in T.serial(4):
A[i, j] = 0.0
else:
for k in T.serial(4):
A[i, j] = 3.0
else:
for j in T.serial(4):
if j < 3:
for k in T.serial(4):
A[i, j] = 1.0
else:
for k in T.serial(4):
A[i, j] = 3.0
class TestHoistIfElseExpr(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer[(4, 4), "float32"]):
for i, j in T.grid(4, 4):
A[i, j] = T.if_then_else(i < 2, 1.0, 2.0, dtype="float32")
@T.prim_func
def expected(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
if i < 2:
for j in T.serial(4):
A[i, j] = 1.0
else:
for j in T.serial(4):
A[i, j] = 2.0
class TestSuppressHoistIfElseExpr(TestHoistIfElseExpr):
hoisted_conditionals = tvm.testing.parameter(
HoistedConditionals.All & ~HoistedConditionals.IfElseExpr
)
@T.prim_func
def before(A: T.Buffer[(4, 4), "float32"]):
for i, j in T.grid(4, 4):
A[i, j] = T.if_then_else(i < 2, 1.0, 2.0, dtype="float32")
expected = before
class TestHoistLetExpr(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer[(4, 4), "float32"]):
for i, j in T.grid(4, 4):
x = T.var("float32")
A[i, j] = T.Let(x, T.cast(i + 1, "float32"), 5.0 * x + T.cast(j, "float32"))
@T.prim_func
def expected(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
x = T.cast(i + 1, "float32")
for j in T.serial(4):
A[i, j] = 5.0 * x + T.cast(j, "float32")
class TestSuppressHoistLetExpr(BaseBeforeAfter):
hoisted_let_bindings = tvm.testing.parameter(
HoistedLetBindings.All & ~HoistedLetBindings.LetExpr
)
@T.prim_func
def before(A: T.Buffer[(4, 4), "float32"]):
for i, j in T.grid(4, 4):
x = T.var("float32")
A[i, j] = T.Let(x, T.cast(i + 1, "float32"), 5.0 * x + T.cast(j, "float32"))
expected = before
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_hoist_if.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import relay
import numpy as np
import pytest
from tvm.testing import enabled_targets
var_list = []
def verify_structure(stmt, expected_struct):
node_dict = {}
struct = {}
def _extract_vars(op):
global var_list
if isinstance(op, tvm.tir.Var):
var_list.append(op.name)
def _visit(op):
key = op
if isinstance(op, tvm.tir.IfThenElse):
global var_list
tvm.tir.stmt_functor.post_order_visit(op.condition, _extract_vars)
val = [(op.then_case, op.else_case), ("tir.IfThenElse", tuple(var_list))]
var_list.clear()
elif isinstance(op, tvm.tir.For):
val = [(op.body,), ("tir.For", op.loop_var.name)]
elif isinstance(op, tvm.tir.AttrStmt):
val = [(op.body,), ("tir.AttrStmt", op.attr_key, int(op.value))]
else:
return
node_dict[key] = val
tvm.tir.stmt_functor.post_order_visit(stmt, _visit)
for key, val in node_dict.items():
struct[val[1]] = tuple(
node_dict[child][1] if child in node_dict else None for child in val[0]
)
assert struct == expected_struct, "Structure mismatch: expect %s but got %s" % (
expected_struct,
struct,
)
var_list.clear()
def _opaque_eval(var):
return tvm.tir.Evaluate(tvm.tir.call_extern("int32", "dummy", var))
def test_hoist_top_for():
ib = tvm.tir.ir_builder.create()
l = te.var("l")
m = te.var("m")
n = te.var("n")
data = ib.pointer("float32", name="data")
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
with ib.if_scope(ib.likely(i < 2)):
ib.emit(_opaque_eval(m))
with ib.else_scope():
ib.emit(_opaque_eval(n))
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
expected_struct = {
("tir.For", "k"): (None,),
("tir.For", "j"): (("tir.For", "k"),),
("tir.IfThenElse", ("i",)): (("tir.For", "j"), ("tir.For", "j")),
("tir.For", "i"): (("tir.IfThenElse", ("i",)),),
}
verify_structure(new_stmt, expected_struct)
def test_hoist_multi_var_if():
ib = tvm.tir.ir_builder.create()
l = te.var("l")
m = te.var("m")
n = te.var("n")
data = ib.pointer("float32", name="data")
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
with ib.if_scope(ib.likely(i + j < 2)):
ib.emit(_opaque_eval(m))
with ib.else_scope():
ib.emit(_opaque_eval(n))
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_mod = tvm.tir.transform.HoistIfThenElse()(mod)
new_stmt = new_mod["main"].body
expected_struct = {
("tir.For", "k"): (None,),
("tir.IfThenElse", ("i", "j")): (("tir.For", "k"), ("tir.For", "k")),
("tir.For", "j"): (("tir.IfThenElse", ("i", "j")),),
("tir.For", "i"): (("tir.For", "j"),),
}
verify_structure(new_stmt, expected_struct)
def test_hoist_no_match_for():
ib = tvm.tir.ir_builder.create()
l = te.var("l")
m = te.var("m")
n = te.var("n")
data = ib.pointer("float32", name="data")
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
data[i * 3 + j] = data[i * 3 + j] + 0.5
with ib.for_range(0, n, "k") as k:
with ib.if_scope(ib.likely(i < 2)):
ib.emit(_opaque_eval(m))
with ib.else_scope():
ib.emit(_opaque_eval(n))
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
expected_struct = {
("tir.For", "k"): (None,),
("tir.IfThenElse", ("i",)): (("tir.For", "k"), ("tir.For", "k")),
("tir.For", "j"): (None,),
("tir.For", "i"): (("tir.For", "j"),),
}
verify_structure(new_stmt, expected_struct)
def test_no_else():
ib = tvm.tir.ir_builder.create()
l = te.var("l")
m = te.var("m")
n = te.var("n")
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
with ib.if_scope(ib.likely(i < 2)):
ib.emit(_opaque_eval(m))
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
expected_struct = {
("tir.For", "k"): (None,),
("tir.For", "j"): (("tir.For", "k"),),
("tir.IfThenElse", ("i",)): (("tir.For", "j"), None),
("tir.For", "i"): (("tir.IfThenElse", ("i",)),),
}
verify_structure(new_stmt, expected_struct)
def test_attr_stmt():
ib = tvm.tir.ir_builder.create()
dshape = (32, 64)
data = ib.pointer("float32", name="data")
l = te.var("l")
m = te.var("m")
n = te.var("n")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", dshape[0])
ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
with ib.if_scope(tvm.tir.any(i < 4, j >= 8)):
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 0.5
with ib.else_scope():
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 1.0
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
expected_struct = {
("tir.For", "k"): (None,),
("tir.IfThenElse", ("i", "j")): (("tir.For", "k"), ("tir.For", "k")),
("tir.For", "j"): (("tir.IfThenElse", ("i", "j")),),
("tir.For", "i"): (("tir.For", "j"),),
("tir.AttrStmt", "thread_extent", 64): (("tir.For", "i"),),
("tir.AttrStmt", "thread_extent", 32): (("tir.AttrStmt", "thread_extent", 64),),
}
verify_structure(new_stmt, expected_struct)
def test_nested_for():
ib = tvm.tir.ir_builder.create()
data = ib.pointer("float32", name="data")
with ib.for_range(0, 5, "i") as i:
with ib.for_range(0, 10, "j") as j:
with ib.if_scope(i >= 3):
data[i * 3 + j] = data[i * 3 + j] + 0.5
with ib.for_range(0, 15, "k") as k:
with ib.for_range(0, 20, "l") as l:
with ib.if_scope(tvm.tir.any(i < 4, j >= 8)):
data[i * 3 + j + k + l] = data[i * 3 + j + k + l] * 2
with ib.else_scope():
data[i * 3 + j + k + l] = data[i * 3 + j + k + l] * 1.5
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
expected_struct = {
("tir.For", "l"): (None,),
("tir.For", "k"): (("tir.For", "l"),),
("tir.IfThenElse", ("i", "j")): (("tir.For", "k"), ("tir.For", "k")),
("tir.For", "j"): (None,),
("tir.IfThenElse", ("i",)): (("tir.For", "j"), None),
("tir.For", "i"): (("tir.IfThenElse", ("i",)),),
}
verify_structure(new_stmt, expected_struct)
def test_if_block():
ib = tvm.tir.ir_builder.create()
data = ib.pointer("float32", name="data")
n = te.var("n")
with ib.for_range(0, 5, "i") as i:
with ib.for_range(0, 10, "j") as j:
with ib.if_scope(i >= 3):
data[i * 3 + j] = data[i * 3 + j] + 0.5
with ib.for_range(0, 15, "k") as k:
with ib.for_range(0, 20, "l") as l:
with ib.if_scope(tvm.tir.any(i < 4, j >= 8)):
data[i * 3 + j + k + l] = data[i * 3 + j + k + l] * 2
with ib.else_scope():
data[i * 3 + j + k + l] = data[i * 3 + j + k + l] * 1.5
with ib.if_scope(j < 5):
data[i * 3 + j + k + l] = data[i * 3 + j + k + l] - 1
with ib.for_range(0, 5, "i") as i:
with ib.for_range(0, 10, "j") as j:
with ib.for_range(0, 15, "k") as k:
with ib.if_scope(n >= 3):
data[i * 3 + j + k] = data[i * 3 + j + k] + 0.6
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
expected_struct = {
("tir.IfThenElse", ("i", "j")): (None, None),
("tir.IfThenElse", ("j",)): (None, None),
("tir.For", "l"): (None,),
("tir.For", "k"): (None,),
("tir.For", "j"): (("tir.For", "j"),),
("tir.IfThenElse", ("i",)): (("tir.For", "j"), None),
("tir.For", "i"): (("tir.IfThenElse", ("i",)),),
("tir.IfThenElse", ("n",)): (("tir.For", "j"), None),
}
verify_structure(new_stmt, expected_struct)
def test_multi_if():
ib = tvm.tir.ir_builder.create()
data = ib.pointer("float32", name="data")
with ib.for_range(0, 10, "i") as i:
with ib.for_range(0, 10, "j") as j:
with ib.for_range(0, 10, "k") as k:
with ib.if_scope(3 <= i):
with ib.if_scope(3 <= j):
data[i * 100 + j * 10 + k] = data[i * 100 + j * 10 + k] + 0.5
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_mod = tvm.tir.transform.HoistIfThenElse()(mod)
new_stmt = new_mod["main"].body
expected_struct = {
("tir.For", "k"): (None,),
("tir.IfThenElse", ("j",)): (("tir.For", "k"), None),
("tir.For", "j"): (("tir.IfThenElse", ("j",)),),
("tir.IfThenElse", ("i",)): (("tir.For", "j"), None),
("tir.For", "i"): (("tir.IfThenElse", ("i",)),),
}
verify_structure(new_stmt, expected_struct)
def test_no_hoisting_1():
ib = tvm.tir.ir_builder.create()
data = ib.pointer("float32", name="data")
n = te.var("n")
with ib.for_range(0, 10, "i") as i:
with ib.for_range(0, 10, "j") as j:
with ib.for_range(0, 10, "k") as k:
with ib.if_scope(k <= 3):
data[i * 100 + j * 10 + k] = data[i * 100 + j * 10 + k] + 0.5
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
def test_no_hoisting_2():
ib = tvm.tir.ir_builder.create()
data = ib.pointer("float32", name="data")
n = te.var("n")
x = te.var("x")
with ib.for_range(0, 10, "i") as i:
with ib.for_range(0, 10, "j") as j:
with ib.for_range(0, 10, "k") as k:
with ib.if_scope(i <= 3):
data[i * 100 + j * 10 + k] = data[i * 100 + j * 10 + k] + 0.3
data[i * 100 + j * 10 + k] = data[i * 100 + j * 10 + k] + 0.5
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
@pytest.mark.xfail(reason="Inconsistent thread_extent", strict=True)
def test_no_hoisting_3():
ib = tvm.tir.ir_builder.create()
dshape = (32, 64)
dshape_inner = (33, 63)
data = ib.pointer("float32", name="data")
l = te.var("l")
m = te.var("m")
n = te.var("n")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", dshape[0])
ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
ib.scope_attr(tx, "thread_extent", dshape_inner[0])
ib.scope_attr(bx, "thread_extent", dshape_inner[1])
with ib.if_scope(tx < 3):
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 0.3
with ib.else_scope():
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 1.3
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
def test_no_hoisting_4():
ib = tvm.tir.ir_builder.create()
dshape = (32, 64)
dshape_inner = (33, 63)
data = ib.pointer("float32", name="data")
l = te.var("l")
m = te.var("m")
n = te.var("n")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
ib.scope_attr(tx, "thread_extent", dshape_inner[0])
with ib.if_scope(tx < 3):
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 0.3
with ib.else_scope():
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 1.3
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
@pytest.mark.xfail(reason="Inconsistent thread_extent", strict=True)
def test_no_hoisting_5():
ib = tvm.tir.ir_builder.create()
dshape = (32, 64)
dshape_inner = (33, 63)
data = ib.pointer("float32", name="data")
l = te.var("l")
m = te.var("m")
n = te.var("n")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", dshape[0])
ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
ib.scope_attr(bx, "thread_extent", dshape_inner[1])
with ib.for_range(0, n, "k") as k:
ib.scope_attr(tx, "thread_extent", dshape_inner[0])
with ib.if_scope(tx < 3):
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 0.3
with ib.else_scope():
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 1.3
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
def test_no_hoisting_6():
ib = tvm.tir.ir_builder.create()
dshape = (32, 64)
data = ib.pointer("float32", name="data")
l = te.var("l")
m = te.var("m")
n = te.var("n")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", dshape[0])
ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
with ib.if_scope((tx + k) < 3):
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 0.3
with ib.else_scope():
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 1.3
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
def test_no_hoisting_7():
ib = tvm.tir.ir_builder.create()
dshape = (32, 64)
data = ib.pointer("float32", name="data")
l = te.var("l")
m = te.var("m")
n = te.var("n")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", dshape[0])
ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.if_scope((tx + j) < 9):
with ib.for_range(0, n, "k") as k:
with ib.if_scope((tx + k) < 3):
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 0.3
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
def test_hoisting_block_scope_1():
n = te.size_var("n")
m = te.size_var("m")
A = te.placeholder((n, m), name="A")
k = te.reduce_axis((0, m), "k")
B = te.compute((n,), lambda i: te.sum(A[i, k], axis=k), name="B")
s = te.create_schedule(B.op)
ko, ki = s[B].split(B.op.reduce_axis[0], factor=16)
BF = s.rfactor(B, ki)
xo, xi = s[B].split(s[B].op.axis[0], factor=32)
s[B.op].bind(xo, te.thread_axis("blockIdx.x"))
s[B.op].bind(xi, te.thread_axis("threadIdx.y"))
s[B].bind(s[B].op.reduce_axis[0], te.thread_axis("threadIdx.x"))
s[BF].compute_at(s[B], s[B].op.reduce_axis[0])
mod = tvm.driver.build_module.schedule_to_module(s, [A, B], "main", None)
mod = tvm.tir.transform.Simplify()(mod)
mod = tvm.tir.transform.RemoveNoOp()(mod)
stmt = mod["main"].body
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
assert not tvm.ir.structural_equal(new_stmt, stmt)
def test_hoisting_block_scope_2():
ib = tvm.tir.ir_builder.create()
dshape = (32, 64)
dshape_inner = (33, 63)
data = ib.pointer("float32", name="data")
l = te.var("l")
m = te.var("m")
n = te.var("n")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", dshape[0])
# ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.if_scope(tx < 3):
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 0.3
with ib.else_scope():
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 1.3
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
mod = tvm.tir.transform.Simplify()(mod)
mod = tvm.tir.transform.RemoveNoOp()(mod)
stmt = mod["main"].body
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
assert not tvm.ir.structural_equal(new_stmt, stmt)
@pytest.mark.xfail(reason="Inconsistent thread_extent", strict=True)
def test_hoisting_block_scope_3():
ib = tvm.tir.ir_builder.create()
dshape = (32, 64)
dshape_inner = (33, 63)
data = ib.pointer("float32", name="data")
l = te.var("l")
m = te.var("m")
n = te.var("n")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", dshape[0])
ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
ib.scope_attr(tx, "thread_extent", dshape_inner[0])
ib.scope_attr(bx, "thread_extent", dshape_inner[1])
with ib.for_range(0, n, "k") as k:
with ib.if_scope(tx < 3):
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 0.3
with ib.else_scope():
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 1.3
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
assert not tvm.ir.structural_equal(new_stmt, stmt)
def test_hoisting_block_scope_4():
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
AA = te.compute((n,), lambda *i: A(*i), name="A")
BB = te.compute((n,), lambda *i: B(*i), name="B")
T = te.compute(A.shape, lambda *i: AA(*i) + BB(*i), name="T")
C = te.compute(A.shape, lambda *i: T(*i), name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=4)
xo1, xo2 = s[C].split(xo, factor=13)
s[C].parallel(xo2)
s[C].pragma(xo1, "parallel_launch_point")
s[C].pragma(xo2, "parallel_stride_pattern")
s[C].pragma(xo2, "parallel_barrier_when_finish")
s[C].vectorize(xi)
mod = tvm.driver.build_module.schedule_to_module(s, [A, B, C], "main", None)
mod = tvm.tir.transform.Simplify()(mod)
stmt = mod["main"].body
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
assert not tvm.ir.structural_equal(new_stmt, stmt)
def test_hoisting_block_scope_5():
ib = tvm.tir.ir_builder.create()
data = ib.pointer("float32", name="data", scope="global")
l = te.var("l")
m = te.var("m")
n = te.var("n")
g = te.var("g")
ib.scope_attr(data, "storage_scope", "global")
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
with ib.if_scope(data[g] < 3):
data[9 * j + 3 * j * k] = data[9 * j + 3 * j * k] + 0.3
with ib.else_scope():
data[9 * j + 3 * j * k] = data[9 * j + 3 * j * k] + 1.3
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
assert not tvm.ir.structural_equal(new_stmt, stmt)
stmt = new_stmt
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
def test_hoisting_block_scope_6():
ib = tvm.tir.ir_builder.create()
dshape = (32, 64)
data = ib.pointer("float32", name="data")
l = te.var("l")
m = te.var("m")
n = te.var("n")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", dshape[0])
ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
with ib.if_scope((tx + n) < 3):
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 0.3
with ib.else_scope():
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 1.3
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
assert not tvm.ir.structural_equal(new_stmt, stmt)
def test_hoisting_block_scope_7():
ib = tvm.tir.ir_builder.create()
dshape = (32, 64)
data = ib.pointer("float32", name="data")
l = te.var("l")
m = te.var("m")
n = te.var("n")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", dshape[0])
ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
with ib.if_scope((tx + i) < 3):
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 0.3
with ib.else_scope():
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 1.3
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
assert not tvm.ir.structural_equal(new_stmt, stmt)
@pytest.mark.skip()
def test_hoisting_op_conv():
dtype = "float32"
dshape = (1, 80, 73, 73)
kshape = (192, 80, 3, 3)
padding = (1, 1)
groups = 1
dilation = (1, 1)
kernel_size = (3, 3)
channels = 192
scale = 1
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", shape=kshape, dtype=dtype)
y = relay.nn.conv2d(
x,
w,
padding=padding,
dilation=dilation,
groups=groups,
channels=channels,
kernel_size=kernel_size,
)
func = relay.Function([x, w], y)
mod = tvm.IRModule()
mod["main"] = func
mod = relay.transform.InferType()(mod)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
params = {"w": tvm.nd.array(kernel)}
for target, dev in enabled_targets():
with tvm.transform.PassContext(opt_level=3):
lib = relay.build_module.build(mod, target=target, params=params)
m = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
x = np.random.uniform(size=dshape)
data_tvm = tvm.nd.array(data)
m.set_input("x", data_tvm)
m.run()
e = m.module.time_evaluator("run", dev, number=300, repeat=3)
t1 = e(data_tvm).results
t1 = np.array(t1) * 1000
print("{} ms".format(t1.mean()))
with tvm.transform.PassContext(
opt_level=3, config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
lib = relay.build_module.build(mod, target=target, params=params)
m = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
x = np.random.uniform(size=dshape)
data_tvm = tvm.nd.array(data)
m.set_input("x", data_tvm)
m.set_input(**params)
m.run()
e = m.module.time_evaluator("run", dev, number=300, repeat=3)
t2 = e(data_tvm).results
t2 = np.array(t2) * 1000
print("{} ms".format(t2.mean()))
tvm.testing.assert_allclose(t1.mean(), t2.mean(), atol=1, rtol=1e-1)
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_inject_copy_intrin.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
from tvm.driver.build_module import schedule_to_module
def test_copy2d():
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), name="A")
B = te.compute((m, l), lambda i, j: A[i, j], name="B")
s = te.create_schedule(B.op)
s[B].pragma(B.op.axis[0], "memcpy")
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
func = tvm.te.schedule.SchedulePostProcToPrimFunc([A, B], stmt, None)
mod = tvm.IRModule.from_expr(func)
mod = tvm.tir.transform.StorageFlatten(64)(mod)
def cb(src, dst, pad_before, pad_after, pad_value):
assert dst.strides[0] == l
assert dst.strides[1].value == 1
assert src.strides[0] == l
assert tuple(src.shape) == (m, l)
return tvm.tir.Evaluate(0)
stmt = tvm.tir.transform.InjectCopyIntrin("memcpy", cb)(mod)["main"].body
def test_copy_pad():
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), name="A")
B = te.compute(
(m + 2, l),
lambda i, j: tvm.tir.if_then_else(tvm.tir.all(i >= 1, i < m + 1), A[i - 1, j], 1.0),
name="B",
)
s = te.create_schedule(B.op)
s[B].pragma(B.op.axis[0], "memcpy")
mod = schedule_to_module(s, [A, B])
mod = tvm.tir.transform.StorageFlatten(64)(mod)
def cb(src, dst, pad_before, pad_after, pad_value):
tvm.testing.assert_prim_expr_equal(src.elem_offset, 0)
assert pad_before[0].value == 1
assert pad_before[1].value == 0
assert pad_after[0].value == 1
assert pad_after[1].value == 0
assert pad_value.value == 1.0
return tvm.tir.Evaluate(0)
stmt = tvm.tir.transform.InjectCopyIntrin("memcpy", cb)(mod)["main"].body
def test_single_point_test():
A = te.placeholder((1,), name="A")
B = te.compute((1,), lambda i: A[i], name="B")
s = te.create_schedule(B.op)
s[B].pragma(B.op.axis[0], "memcpy")
mod = schedule_to_module(s, [A, B])
mod = tvm.tir.transform.StorageFlatten(64)(mod)
def cb(src, dst, pad_before, pad_after, pad_value):
tvm.testing.assert_prim_expr_equal(src.elem_offset, 0)
tvm.testing.assert_prim_expr_equal(dst.elem_offset, 0)
tvm.testing.assert_prim_expr_equal(src.strides[0], 1)
tvm.testing.assert_prim_expr_equal(dst.strides[0], 1)
return tvm.tir.Evaluate(0)
stmt = tvm.tir.transform.InjectCopyIntrin("memcpy", cb)(mod)["main"].body
def test_copy_pad_split():
m = 4 * 3
A = te.placeholder((m,), name="A")
Apad = te.compute(
(m + 2,), lambda i: tvm.tir.if_then_else(tvm.tir.all(i >= 1, i <= m), A[i - 1], 0.0), "Apad"
)
B = te.compute((m,), lambda i: Apad[i] + Apad[i + 1] + Apad[i + 2])
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=4)
s[Apad].compute_at(s[B], xo)
s[Apad].pragma(s[Apad].op.axis[0], "memcpy")
mod = schedule_to_module(s, [A, B])
mod = tvm.tir.transform.StorageFlatten(64)(mod._move())
mod = tvm.tir.transform.Simplify()(mod._move())
def cb(src, dst, pad_before, pad_after, pad_value):
assert dst.elem_offset.value == 0
tvm.testing.assert_prim_expr_equal(src.elem_offset, tvm.te.max(xo * 4, 1) - 1)
rpad_before = tvm.te.max(1 - xo * 4, 0)
rpad_after = tvm.te.max(xo * 4 - 7, 0)
tvm.testing.assert_prim_expr_equal(pad_before[0], rpad_before)
tvm.testing.assert_prim_expr_equal(pad_after[0], rpad_after)
tvm.testing.assert_prim_expr_equal(src.shape[0], 6 - rpad_before - rpad_after)
return tvm.tir.Evaluate(0)
stmt = tvm.tir.transform.InjectCopyIntrin("memcpy", cb)(mod)["main"].body
if __name__ == "__main__":
test_copy2d()
test_copy_pad()
test_copy_pad_split()
test_single_point_test()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_inject_double_buffer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def test_double_buffer():
dtype = "int64"
n = 100
m = 4
tx = te.thread_axis("threadIdx.x")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
C = ib.pointer("float32", name="C")
ib.scope_attr(tx, "thread_extent", 1)
with ib.for_range(0, n) as i:
B = ib.allocate("float32", m, name="B", scope="shared")
with ib.new_scope():
ib.scope_attr(B.asobject().data, "double_buffer_scope", 1)
with ib.for_range(0, m) as j:
B[j] = A[i * 4 + j]
with ib.for_range(0, m) as j:
C[j] = B[j] + 1
stmt = ib.get()
mod = tvm.IRModule({"db": tvm.tir.PrimFunc([A.asobject(), C.asobject()], stmt)})
opt = tvm.transform.Sequential(
[tvm.tir.transform.InjectDoubleBuffer(), tvm.tir.transform.Simplify()]
)
with tvm.transform.PassContext(config={"tir.InjectDoubleBuffer": {"split_loop": 2}}):
mod = opt(mod)
stmt = mod["db"].body
assert isinstance(stmt.body, tvm.tir.Allocate)
assert list(stmt.body.extents) == [m * 2]
f = tvm.tir.transform.ThreadSync("shared")(mod)["db"]
count = [0]
def count_sync(op):
if isinstance(op, tvm.tir.Call) and op.op.same_as(tvm.ir.Op.get("tir.tvm_storage_sync")):
count[0] += 1
tvm.tir.stmt_functor.post_order_visit(f.body, count_sync)
assert count[0] == 4
if __name__ == "__main__":
test_double_buffer()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_inject_ptx_async_copy.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm.script import tir as T
import numpy as np
import tvm.testing
def count_cp_async(stmt):
num_alloc = [0]
def verify(n):
if isinstance(n, tvm.tir.Call) and str(n.op) == "tir.ptx_cp_async":
num_alloc[0] += 1
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
return num_alloc[0]
def generate_global_to_shared_vectorized_copy(dtype, vector_size):
num_iters = 128 // vector_size
vector_size_expr = tvm.runtime.convert(vector_size)
@T.prim_func
def ptx_global_to_shared_copy(
A: T.Buffer[(32, 128), dtype], B: T.Buffer[(32, 128), dtype]
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
bx = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(bx, 1)
T.launch_thread(tx, 32)
with T.block():
A_shared = T.alloc_buffer([32, 128], dtype, scope="shared")
T.reads(A[0:32, 0:128])
T.writes(B[0:32, 0:128])
T.attr("default", "async_scope", 1)
for i in T.serial(num_iters):
for j in T.vectorized(vector_size):
A_shared[tx, i * vector_size_expr + j] = A[tx, i * vector_size_expr + j]
T.evaluate(T.ptx_commit_group(dtype=""))
T.evaluate(T.ptx_wait_group(0, dtype=""))
for i in range(128):
B[tx, i] = A_shared[tx, i]
return ptx_global_to_shared_copy
@T.prim_func
def ptx_global_to_shared_copy_fp32x1(
A: T.Buffer[(32, 128), "float32"], B: T.Buffer[(32, 128), "float32"]
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
bx = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(bx, 1)
T.launch_thread(tx, 32)
with T.block():
A_shared = T.alloc_buffer([32, 128], "float32", scope="shared")
T.reads(A[0:32, 0:128])
T.writes(B[0:32, 0:128])
T.attr("default", "async_scope", 1)
for i in T.serial(128):
A_shared[tx, i] = A[tx, i]
T.evaluate(T.ptx_commit_group(dtype=""))
T.evaluate(T.ptx_wait_group(0, dtype=""))
for i in range(128):
B[tx, i] = A_shared[tx, i]
@T.prim_func
def ptx_global_to_shared_dyn_copy_fp16x8(
A: T.Buffer[(32, 128), "float16"],
B: T.Buffer[(32, 128), "float16"],
C: T.Buffer[(32, 128), "float16"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
bx = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(bx, 1)
T.launch_thread(tx, 32)
with T.block():
A_shared = T.alloc_buffer([32, 128], "float16", scope="shared.dyn")
B_shared = T.alloc_buffer([32, 128], "float16", scope="shared.dyn")
T.reads(A[0:32, 0:128], B[0:32, 0:128])
T.writes(C[0:32, 0:128])
T.attr("default", "async_scope", 1)
for i in T.serial(16):
for j in T.vectorized(8):
A_shared[tx, i * 8 + j] = A[tx, i * 8 + j]
B_shared[tx, i * 8 + j] = B[tx, i * 8 + j]
T.evaluate(T.ptx_commit_group(dtype=""))
T.evaluate(T.ptx_wait_group(0, dtype=""))
for i in range(128):
C[tx, i] = A_shared[tx, i] + B_shared[tx, i]
@tvm.testing.requires_cuda
def test_inject_async_copy():
for dtype, vec_size in [("float16", 8), ("float16", 4), ("float32", 4), ("float32", 1)]:
if vec_size == 1:
f = ptx_global_to_shared_copy_fp32x1
else:
f = generate_global_to_shared_vectorized_copy(dtype, vec_size)
mod = tvm.IRModule.from_expr(f)
mod = tvm.tir.transform.LowerOpaqueBlock()(mod)
mod = tvm.tir.transform.FlattenBuffer()(mod)
if vec_size > 1:
mod = tvm.tir.transform.VectorizeLoop()(mod)
mod = tvm.tir.transform.InjectPTXAsyncCopy()(mod)
assert count_cp_async(mod["main"].body) == 1
if not tvm.testing.is_ampere_or_newer():
continue
with tvm.transform.PassContext(config={"tir.use_async_copy": 1}):
mod = tvm.build(tvm.IRModule.from_expr(f), target="cuda")
A_np = np.random.rand(32, 128).astype(dtype)
B_np = np.zeros((32, 128)).astype(dtype)
dev = tvm.cuda(0)
A_nd = tvm.nd.array(A_np, device=dev)
B_nd = tvm.nd.array(B_np, device=dev)
mod(A_nd, B_nd)
tvm.testing.assert_allclose(B_nd.numpy(), A_np)
@tvm.testing.requires_cuda
def test_inject_async_copy_shared_dyn():
f = ptx_global_to_shared_dyn_copy_fp16x8
mod = tvm.IRModule.from_expr(f)
mod = tvm.tir.transform.LowerOpaqueBlock()(mod)
mod = tvm.tir.transform.FlattenBuffer()(mod)
mod = tvm.tir.transform.VectorizeLoop()(mod)
mod = tvm.tir.transform.MergeDynamicSharedMemoryAllocations()(mod)
mod = tvm.tir.transform.InjectPTXAsyncCopy()(mod)
assert count_cp_async(mod["main"].body) == 2
if not tvm.testing.is_ampere_or_newer():
return
with tvm.transform.PassContext(config={"tir.use_async_copy": 1}):
mod = tvm.build(tvm.IRModule.from_expr(f), target="cuda")
A_np = np.random.rand(32, 128).astype("float16")
B_np = np.random.rand(32, 128).astype("float16")
C_np = np.zeros((32, 128)).astype("float16")
dev = tvm.cuda(0)
A_nd = tvm.nd.array(A_np, device=dev)
B_nd = tvm.nd.array(B_np, device=dev)
C_nd = tvm.nd.array(C_np, device=dev)
mod(A_nd, B_nd, C_nd)
tvm.testing.assert_allclose(C_nd.numpy(), A_np + B_np)
if __name__ == "__main__":
test_inject_async_copy()
test_inject_async_copy_shared_dyn()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_inject_rolling_buffer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.script
from tvm.script import tir as T
from tvm import te
from tvm import topi
from tvm.driver.build_module import get_binds
import numpy as np
import pytest
def _tile_nd(s, tensor, tile):
outer_indices = []
inner_indices = []
for i, size in enumerate(tile):
outer, inner = s[tensor].split(tensor.op.axis[i], size)
outer_indices.append(outer)
inner_indices.append(inner)
s[tensor].reorder(*outer_indices, *inner_indices)
return outer_indices, inner_indices
@tvm.tir.transform.prim_func_pass(opt_level=0)
def remove_rolling_buffer_attr(func, mod, ctx):
def unwrap(node):
if isinstance(node, tvm.tir.AttrStmt) and node.attr_key == "rolling_buffer_scope":
return node.body
else:
return node
return func.with_body(
tvm.tir.stmt_functor.ir_transform(
func.body, None, postorder=unwrap, only_enable=["tir.AttrStmt"]
)
)
@tvm.tir.transform.prim_func_pass(opt_level=0)
def verify_no_rolling_buffer_attr(func, mod, ctx):
def verify(node):
if isinstance(node, tvm.tir.AttrStmt):
assert node.attr_key != "rolling_buffer_scope", "Failed to lower rolling buffers"
tvm.tir.stmt_functor.post_order_visit(func.body, verify)
return func
def _verify_schedule(sch, inputs, output):
user_pass_lists = [
[(0, remove_rolling_buffer_attr), (0, verify_no_rolling_buffer_attr)],
[(0, tvm.tir.transform.InjectRollingBuffer()), (0, verify_no_rolling_buffer_attr)],
]
built_funcs = []
for user_pass_list in user_pass_lists:
with tvm.transform.PassContext(config={"tir.add_lower_pass": user_pass_list}):
built_funcs.append(tvm.build(sch, inputs + [output]))
outputs = []
ctx = tvm.cpu(0)
input_data = []
for tensor in inputs:
shape = [i.value for i in tensor.shape]
input_data.append(
tvm.nd.array(np.random.randint(low=-100, high=100, size=shape).astype("int8"), ctx)
)
shape = [i.value for i in output.shape]
out = tvm.nd.array(np.zeros(shape, dtype="int8"), ctx)
for func in built_funcs:
func(*input_data, out)
outputs.append(out.numpy())
np.testing.assert_equal(outputs[0], outputs[1])
@pytest.mark.parametrize("tile_shape", [(1, 4, 8, 16), (1, 8, 7, 11), (1, 8, 3, 8), (1, 7, 5, 3)])
def test_tile_shapes(tile_shape):
A = te.placeholder((1, 12, 14, 16), name="A", dtype="int8")
pool_a = topi.nn.pool2d(A, (3, 3), (1, 1), (1, 1), (0, 0, 0, 0), "max", layout="NHWC")
pool_b = topi.nn.pool2d(pool_a, (3, 5), (1, 1), (1, 1), (0, 0, 0, 0), "max", layout="NHWC")
sch = tvm.te.create_schedule([pool_b.op])
oi, ii = _tile_nd(sch, pool_b, tile_shape)
sch[pool_a].compute_at(sch[pool_b], oi[-1])
sch[pool_a].rolling_buffer()
_verify_schedule(sch, [A], pool_b)
def test_implied_split():
A = te.placeholder((1, 12, 12, 16), name="A", dtype="int8")
pool_a = topi.nn.pool2d(A, (3, 3), (1, 1), (1, 1), (0, 0, 0, 0), "max", layout="NHWC")
pool_b = topi.nn.pool2d(pool_a, (3, 3), (1, 1), (1, 1), (0, 0, 0, 0), "max", layout="NHWC")
sch = tvm.te.create_schedule([pool_b.op])
n, h, w, c = pool_b.op.axis
oi, ii = sch[pool_b].split(w, 4)
sch[pool_a].compute_at(sch[pool_b], oi)
sch[pool_a].rolling_buffer()
_verify_schedule(sch, [A], pool_b)
@pytest.mark.parametrize("kernel_shape", [(1, 1), (3, 3)])
def test_upscale(kernel_shape):
output_shape = (1, 24, 24, 16)
input_shape = (
output_shape[0],
output_shape[1] // 2 + 2 * (kernel_shape[0] - 1),
output_shape[2] // 2 + 2 * (kernel_shape[1] - 1),
output_shape[3],
)
A = te.placeholder(input_shape, name="A", dtype="int8")
pool_a = topi.nn.pool2d(A, kernel_shape, (1, 1), (1, 1), (0, 0, 0, 0), "max", layout="NHWC")
pool_b = topi.nn.pool2d(
pool_a, kernel_shape, (1, 1), (1, 1), (0, 0, 0, 0), "max", layout="NHWC"
)
upscale = te.compute((1, 24, 24, 16), lambda nn, hh, ww, cc: pool_b[nn, hh // 2, ww // 2, cc])
sch = tvm.te.create_schedule([upscale.op])
oi, ii = _tile_nd(sch, upscale, (1, 5, 5, 16))
sch[pool_b].compute_at(sch[upscale], oi[-1])
sch[pool_b].rolling_buffer()
sch[pool_a].compute_at(sch[upscale], oi[-1])
sch[pool_a].rolling_buffer()
_verify_schedule(sch, [A], upscale)
@pytest.mark.parametrize("tile_shape", [(1, 4, 8, 16), (1, 8, 7, 11), (1, 8, 3, 8), (1, 7, 5, 3)])
def test_3_tiled_poolings(tile_shape):
A = te.placeholder((1, 14, 14, 16), name="A", dtype="int8")
pool_a = topi.nn.pool2d(A, (3, 3), (1, 1), (1, 1), (0, 0, 0, 0), "max", layout="NHWC")
pool_b = topi.nn.pool2d(pool_a, (3, 3), (1, 1), (1, 1), (0, 0, 0, 0), "max", layout="NHWC")
pool_c = topi.nn.pool2d(pool_b, (3, 3), (1, 1), (1, 1), (0, 0, 0, 0), "max", layout="NHWC")
sch = tvm.te.create_schedule([pool_c.op])
oi, ii = _tile_nd(sch, pool_c, tile_shape)
sch[pool_b].compute_at(sch[pool_c], oi[-1])
sch[pool_b].rolling_buffer()
sch[pool_a].compute_at(sch[pool_c], oi[-1])
sch[pool_a].rolling_buffer()
_verify_schedule(sch, [A], pool_c)
@pytest.mark.parametrize("tile_shape", [(1, 4, 8, 16), (1, 8, 7, 11), (1, 8, 3, 8), (1, 7, 5, 3)])
def test_tiled_added_poolings(tile_shape):
A = te.placeholder((1, 12, 12, 16), name="A", dtype="int8")
B = te.placeholder((1, 14, 14, 16), name="A", dtype="int8")
pool_a = topi.nn.pool2d(A, (3, 3), (1, 1), (1, 1), (0, 0, 0, 0), "max", layout="NHWC")
pool_b = topi.nn.pool2d(B, (5, 5), (1, 1), (1, 1), (0, 0, 0, 0), "max", layout="NHWC")
add = topi.add(pool_a, pool_b)
pool_c = topi.nn.pool2d(add, (3, 3), (1, 1), (1, 1), (0, 0, 0, 0), "max", layout="NHWC")
sch = tvm.te.create_schedule([pool_c.op])
oi, ii = _tile_nd(sch, pool_c, tile_shape)
sch[add].compute_at(sch[pool_c], oi[-1])
sch[add].rolling_buffer()
sch[pool_b].compute_at(sch[pool_c], oi[-1])
sch[pool_b].rolling_buffer()
sch[pool_a].compute_at(sch[pool_c], oi[-1])
sch[pool_a].rolling_buffer()
_verify_schedule(sch, [A, B], pool_c)
@pytest.mark.parametrize("make_rolling", [(0, 0), (1, 0), (0, 1), (1, 1)])
def test_mixed_buffers(make_rolling):
A = te.placeholder((1, 14, 14, 16), name="A", dtype="int8")
pool_a = topi.nn.pool2d(A, (3, 3), (1, 1), (1, 1), (0, 0, 0, 0), "max", layout="NHWC")
pool_b = topi.nn.pool2d(pool_a, (3, 3), (1, 1), (1, 1), (0, 0, 0, 0), "max", layout="NHWC")
pool_c = topi.nn.pool2d(pool_b, (3, 3), (1, 1), (1, 1), (0, 0, 0, 0), "max", layout="NHWC")
sch = tvm.te.create_schedule([pool_c.op])
oi, ii = _tile_nd(sch, pool_c, (1, 4, 8, 16))
sch[pool_b].compute_at(sch[pool_c], oi[-1])
if make_rolling[0]:
sch[pool_b].rolling_buffer()
sch[pool_a].compute_at(sch[pool_c], oi[-1])
if make_rolling[1]:
sch[pool_a].rolling_buffer()
_verify_schedule(sch, [A], pool_c)
# fmt: off
@tvm.script.ir_module
class PreRollingBuffer:
@T.prim_func
def main(A: T.handle, tensor: T.handle) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
# buffer definition
tensor_2 = T.buffer_decl([1, 10, 12, 16], dtype="int8", elem_offset=0, align=64, offset_factor=1)
A_1 = T.match_buffer(A, [1, 12, 14, 16], dtype="int8", elem_offset=0, align=64, offset_factor=1)
tensor_1 = T.match_buffer(tensor, [1, 8, 8, 16], dtype="int8", elem_offset=0, align=64, offset_factor=1)
# body
T.realize(tensor_1[0:1, 0:8, 0:8, 0:16], "")
for ax1_outer in T.serial(0, 2):
T.realize(tensor_2[0:1, (ax1_outer*4):((ax1_outer*4) + 6), 0:12, 0:16], "")
T.attr(tensor_2, "rolling_buffer_scope", True)
for ax1 in T.serial(0, 6):
for ax2 in T.serial(0, 12):
for ax3 in T.serial(0, 16):
tensor_2[0, (ax1 + (ax1_outer*4)), ax2, ax3] = T.int8(0)
for dh in T.serial(0, 3):
for dw in T.serial(0, 3):
tensor_2[0, (ax1 + (ax1_outer*4)), ax2, ax3] = T.max(tensor_2[0, (ax1 + (ax1_outer*4)), ax2, ax3], A_1[0, ((ax1 + (ax1_outer*4)) + dh), (ax2 + dw), ax3])
for ax1_inner in T.serial(0, 4):
for ax2_inner in T.serial(0, 8):
for ax3_inner in T.serial(0, 16):
tensor_1[0, (ax1_inner + (ax1_outer*4)), ax2_inner, ax3_inner] = T.int8(0)
for dh_1 in T.serial(0, 3):
for dw_1 in T.serial(0, 5):
tensor_1[0, (ax1_inner + (ax1_outer*4)), ax2_inner, ax3_inner] = T.max(tensor_1[0, (ax1_inner + (ax1_outer*4)), ax2_inner, ax3_inner], tensor_2[0, ((ax1_inner + (ax1_outer*4)) + dh_1), (ax2_inner + dw_1), ax3_inner])
__tvm_meta__ = None
@tvm.script.ir_module
class PostRollingBuffer:
@T.prim_func
def main(A: T.handle, tensor: T.handle) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
# buffer definition
tensor_2 = T.buffer_decl([1, 10, 12, 16], dtype="int8", elem_offset=0, align=64, offset_factor=1)
A_1 = T.match_buffer(A, [1, 12, 14, 16], dtype="int8", elem_offset=0, align=64, offset_factor=1)
tensor_1 = T.match_buffer(tensor, [1, 8, 8, 16], dtype="int8", elem_offset=0, align=64, offset_factor=1)
# body
T.realize(tensor_1[0:1, 0:8, 0:8, 0:16], "")
T.realize(tensor_2[0:1, 0:6, 0:12, 0:16], "")
for ax1_outer in T.serial(0, 2):
for ax1 in T.serial(0, 6):
for ax2 in T.serial(0, 12):
for ax3 in T.serial(0, 16):
if T.likely(((ax1_outer < 1) or (ax1 >= 2)), dtype='bool') :
tensor_2[0, T.floormod((ax1 + (ax1_outer*4)), 6), ax2, ax3] = T.int8(0)
for dh in T.serial(0, 3):
for dw in T.serial(0, 3):
if T.likely(((ax1_outer < 1) or (ax1 >= 2)), dtype='bool'):
tensor_2[0, T.floormod((ax1 + (ax1_outer*4)), 6), ax2, ax3] = T.max(tensor_2[0, T.floormod((ax1 + (ax1_outer*4)), 6), ax2, ax3], A_1[0, ((ax1 + (ax1_outer*4)) + dh), (ax2 + dw), ax3])
for ax1_inner in T.serial(0, 4):
for ax2_inner in T.serial(0, 8):
for ax3_inner in T.serial(0, 16):
tensor_1[0, (ax1_inner + (ax1_outer*4)), ax2_inner, ax3_inner] = T.int8(0)
for dh_1 in T.serial(0, 3):
for dw_1 in T.serial(0, 5):
tensor_1[0, (ax1_inner + (ax1_outer*4)), ax2_inner, ax3_inner] = T.max(tensor_1[0, (ax1_inner + (ax1_outer*4)), ax2_inner, ax3_inner], tensor_2[0, T.floormod(((ax1_inner + (ax1_outer*4)) + dh_1), 6), (ax2_inner + dw_1), ax3_inner])
__tvm_meta__ = None
# fmt: on
def test_rolling_buffer_ir_transform():
mod = PreRollingBuffer
mod = tvm.tir.transform.InjectRollingBuffer()(mod)
script = mod.script(show_meta=True)
mod = tvm.script.from_source(script)
tvm.ir.assert_structural_equal(mod["main"], PostRollingBuffer["main"], True)
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_inject_software_pipeline.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import numpy as np
import pytest
import tvm
import tvm.testing
import tvm.tir.tensor_intrin.cuda
from tvm import TVMError, te, tir
from tvm.meta_schedule.testing import te_workload
from tvm.script import tir as T
from tvm.testing.tir import mma_schedule
from tvm.tir.tensor_intrin.cuda import (
LDMATRIX_16x16_A_DYN_INTRIN,
LDMATRIX_16x16_B_DYN_INTRIN,
MMA_f16f16f32_INTRIN,
MMA_fill_16x16_f32_INTRIN,
MMA_store_16x16_f32_global_INTRIN,
shared_16x16_to_ldmatrix_32x8_layout,
)
def _check(original, transformed):
func = original
mod = tvm.IRModule.from_expr(func)
mod = tvm.tir.transform.InjectSoftwarePipeline()(mod)
mod = tvm.tir.transform.Simplify()(mod)
tvm.ir.assert_structural_equal(mod["main"], transformed, True)
def _check_error(func):
mod = tvm.IRModule.from_expr(func)
with pytest.raises(ValueError):
tvm.tir.transform.InjectSoftwarePipeline()(mod)
@T.prim_func
def trivial_pipeline(A: T.Buffer[(16, 1), "float32"], C: T.Buffer[(16, 1), "float32"]):
for tx in T.thread_binding(0, 16, thread="threadIdx.x"):
for i in T.serial(
0, 1, annotations={"software_pipeline_stage": [0, 1], "software_pipeline_order": [0, 1]}
):
with T.block():
T.reads(A[tx, i])
T.writes(C[tx, i])
B = T.alloc_buffer((16, 1), dtype="float32", scope="shared")
with T.block():
T.reads(A[tx, i])
T.writes(B[tx, 0])
B[tx, 0] = A[tx, i] * T.float32(2)
with T.block():
T.reads(B[tx, 0])
T.writes(C[tx, i])
C[tx, i] = B[tx, 0] + T.float32(1)
@T.prim_func
def transformed_trivial_pipeline(
A: T.Buffer[(16, 1), "float32"], C: T.Buffer[(16, 1), "float32"]
) -> None:
for tx in T.thread_binding(16, thread="threadIdx.x"):
with T.block():
T.reads(A[tx, 0])
T.writes(C[tx, 0])
B = T.alloc_buffer([2, 16, 1], dtype="float32", scope="shared")
with T.block():
T.reads(A[tx, 0])
T.writes(B[0, tx, 0])
B[0, tx, 0] = A[tx, 0] * T.float32(2)
with T.block():
T.reads()
T.writes()
T.evaluate(0)
with T.block():
T.reads(B[0, tx, 0])
T.writes(C[tx, 0])
C[tx, 0] = B[0, tx, 0] + T.float32(1)
def gen_simple_compute(num_stages):
@T.prim_func
def simple_compute(A: T.Buffer[(16, 16), "float32"], C: T.Buffer[(16, 16), "float32"]):
for tx in T.thread_binding(0, 16, thread="threadIdx.x"):
for i in T.serial(
0,
16,
annotations={
"software_pipeline_stage": [0, num_stages],
"software_pipeline_order": [0, 1],
},
):
with T.block("compute"):
T.reads(A[tx, i])
T.writes(C[tx, i])
B = T.alloc_buffer((16, 1), dtype="float32", scope="shared")
with T.block():
T.reads(A[tx, i])
T.writes(B[tx, 0])
B[tx, 0] = A[tx, i] * T.float32(2)
with T.block():
T.reads(B[tx, 0])
T.writes(C[tx, i])
C[tx, i] = B[tx, 0] + T.float32(1)
return simple_compute
@T.prim_func
def transformed_simple_compute(
A: T.Buffer[(16, 16), "float32"], C: T.Buffer[(16, 16), "float32"]
) -> None:
for tx in T.thread_binding(0, 16, thread="threadIdx.x"):
with T.block():
T.reads([A[tx, 0:16]])
T.writes([C[tx, 0:16]])
B = T.alloc_buffer([2, 16, 1], dtype="float32", scope="shared")
with T.block():
T.reads([A[tx, 0]])
T.writes([B[0, tx, 0]])
B[0, tx, 0] = A[tx, 0] * T.float32(2)
with T.block():
T.reads([A[tx, 1:16], B[0:2, tx, 0]])
T.writes([B[0:2, tx, 0], C[tx, 0:15]])
for i in T.serial(0, 15):
with T.block():
T.reads([A[tx, i + 1]])
T.writes([B[(i + 1) % 2, tx, 0]])
B[(i + 1) % 2, tx, 0] = A[tx, i + 1] * T.float32(2)
with T.block():
T.reads([B[i % 2, tx, 0]])
T.writes([C[tx, i]])
C[tx, i] = B[i % 2, tx, 0] + T.float32(1)
with T.block():
T.reads([B[1, tx, 0]])
T.writes([C[tx, 15]])
C[tx, 15] = B[1, tx, 0] + T.float32(1)
@T.prim_func
def simple_compute_with_other_annotation(
A: T.Buffer[(16, 16), "float32"], C: T.Buffer[(16, 16), "float32"]
):
for tx in T.thread_binding(0, 16, thread="threadIdx.x"):
for i in T.serial(
0,
16,
annotations={
"software_pipeline_stage": [0, 1],
"software_pipeline_order": [0, 1],
"pragma_loop_partition_hint": True,
},
):
with T.block("compute"):
T.reads(A[tx, i])
T.writes(C[tx, i])
B = T.alloc_buffer((16, 1), dtype="float32", scope="shared")
with T.block():
T.reads(A[tx, i])
T.writes(B[tx, 0])
B[tx, 0] = A[tx, i] * T.float32(2)
with T.block():
T.reads(B[tx, 0])
T.writes(C[tx, i])
C[tx, i] = B[tx, 0] + T.float32(1)
@T.prim_func
def transformed_simple_compute_with_other_annotation(
A: T.Buffer[(16, 16), "float32"], C: T.Buffer[(16, 16), "float32"]
) -> None:
for tx in T.thread_binding(0, 16, thread="threadIdx.x"):
with T.block():
T.reads([A[tx, 0:16]])
T.writes([C[tx, 0:16]])
B = T.alloc_buffer([2, 16, 1], dtype="float32", scope="shared")
with T.block():
T.reads([A[tx, 0]])
T.writes([B[0, tx, 0]])
B[0, tx, 0] = A[tx, 0] * T.float32(2)
with T.block():
T.reads([A[tx, 1:16], B[0:2, tx, 0]])
T.writes([B[0:2, tx, 0], C[tx, 0:15]])
for i in T.serial(
0,
15,
annotations={"pragma_loop_partition_hint": True},
):
with T.block():
T.reads([A[tx, i + 1]])
T.writes([B[(i + 1) % 2, tx, 0]])
B[(i + 1) % 2, tx, 0] = A[tx, i + 1] * T.float32(2)
with T.block():
T.reads([B[i % 2, tx, 0]])
T.writes([C[tx, i]])
C[tx, i] = B[i % 2, tx, 0] + T.float32(1)
with T.block():
T.reads([B[1, tx, 0]])
T.writes([C[tx, 15]])
C[tx, 15] = B[1, tx, 0] + T.float32(1)
@T.prim_func
def three_stage_compute(A: T.Buffer[(16, 16), "float32"], D: T.Buffer[(16, 16), "float32"]):
for tx in T.thread_binding(0, 16, thread="threadIdx.x"):
for i in T.serial(
0,
16,
annotations={
"software_pipeline_stage": [0, 1, 2],
"software_pipeline_order": [0, 1, 2],
},
):
with T.block("compute"):
T.reads(A[tx, i])
T.writes(D[tx, i])
B = T.alloc_buffer((16, 1), dtype="float32", scope="shared")
C = T.alloc_buffer((16, 1), dtype="float32", scope="shared")
with T.block():
T.reads(A[tx, i])
T.writes(B[tx, 0])
B[tx, 0] = A[tx, i] * T.float32(2)
with T.block():
T.reads(B[tx, 0])
T.writes(C[tx, 0])
C[tx, 0] = B[tx, 0] + T.float32(2)
with T.block():
T.reads(C[tx, 0])
T.writes(D[tx, i])
D[tx, i] = C[tx, 0] + T.float32(1)
@T.prim_func
def transformed_three_stage_compute(
A: T.Buffer[(16, 16), "float32"], D: T.Buffer[(16, 16), "float32"]
) -> None:
for tx in T.thread_binding(16, thread="threadIdx.x"):
with T.block():
T.reads(A[tx, 0:16])
T.writes(D[tx, 0:16])
B = T.alloc_buffer([2, 16, 1], dtype="float32", scope="shared")
C = T.alloc_buffer([2, 16, 1], dtype="float32", scope="shared")
with T.block():
T.reads(A[tx, 0:2], B[0:2, tx, 0])
T.writes(B[0:2, tx, 0], C[0:2, tx, 0])
for i in T.unroll(2):
with T.block():
T.reads(A[tx, i])
T.writes(B[0:2, tx, 0])
B[i, tx, 0] = A[tx, i] * T.float32(2)
with T.block():
T.where(i == 1)
T.reads(B[0:2, tx, 0])
T.writes(C[0:2, tx, 0])
C[(i + 1) % 2, tx, 0] = B[(i + 1) % 2, tx, 0] + T.float32(2)
with T.block():
T.reads(A[tx, 2:16], B[0:2, tx, 0], C[0:2, tx, 0])
T.writes(B[0:2, tx, 0], C[0:2, tx, 0], D[tx, 0:14])
for i in T.serial(14):
with T.block():
T.reads(A[tx, i + 2])
T.writes(B[0:2, tx, 0])
B[i % 2, tx, 0] = A[tx, i + 2] * T.float32(2)
with T.block():
T.reads(B[0:2, tx, 0])
T.writes(C[0:2, tx, 0])
C[(i + 1) % 2, tx, 0] = B[(i + 1) % 2, tx, 0] + T.float32(2)
with T.block():
T.reads(C[0:2, tx, 0])
T.writes(D[tx, i])
D[tx, i] = C[i % 2, tx, 0] + T.float32(1)
with T.block():
T.reads(B[0:2, tx, 0], C[0:2, tx, 0])
T.writes(C[0:2, tx, 0], D[tx, 14:16])
for i in T.unroll(2):
with T.block():
T.where(i < 1)
T.reads(B[0:2, tx, 0])
T.writes(C[0:2, tx, 0])
C[(i + 1) % 2, tx, 0] = B[(i + 1) % 2, tx, 0] + T.float32(2)
with T.block():
T.reads(C[0:2, tx, 0])
T.writes(D[tx, i + 14])
D[tx, i + 14] = C[i, tx, 0] + T.float32(1)
@T.prim_func
def dag_interleaving(
A: T.Buffer[(16, 16), "float32"],
B: T.Buffer[(16, 16), "float32"],
C: T.Buffer[(16, 16), "float32"],
) -> None:
for tx in T.thread_binding(0, 16, thread="threadIdx.x"):
for i in T.serial(
0,
16,
annotations={
"software_pipeline_stage": [0, 0, 0, 0, 1],
"software_pipeline_order": [0, 2, 1, 3, 4],
},
):
with T.block():
T.reads(A[tx, i])
T.writes(C[tx, i])
AS = T.alloc_buffer((16, 1), dtype="float32", scope="shared")
BS = T.alloc_buffer((16, 1), dtype="float32", scope="shared")
AL = T.alloc_buffer((1, 1), dtype="float32", scope="local")
BL = T.alloc_buffer((1, 1), dtype="float32", scope="local")
with T.block():
T.reads(A[tx, i])
T.writes(AS[tx, 0])
AS[tx, 0] = A[tx, i] * T.float32(2)
with T.block():
T.reads(AS[tx, 0])
T.writes(AL[0, 0])
AL[0, 0] = AS[tx, 0]
with T.block():
T.reads(B[tx, i])
T.writes(BS[tx, 0])
BS[tx, 0] = B[tx, i] + T.float32(2)
with T.block():
T.reads(BS[tx, 0])
T.writes(BL[0, 0])
BL[0, 0] = BS[tx, 0]
with T.block():
T.reads(AL[0, 0], BL[0, 0])
T.writes(C[tx, i])
C[tx, i] = AL[0, 0] * BL[0, 0]
@T.prim_func
def transformed_dag_interleaving(
A: T.Buffer[(16, 16), "float32"],
B: T.Buffer[(16, 16), "float32"],
C: T.Buffer[(16, 16), "float32"],
) -> None:
for tx in T.thread_binding(16, thread="threadIdx.x"):
with T.block():
T.reads(A[tx, 0:16], B[tx, 0:16])
T.writes(C[tx, 0:16])
AS = T.alloc_buffer([16, 1], dtype="float32", scope="shared")
BS = T.alloc_buffer([16, 1], dtype="float32", scope="shared")
AL = T.alloc_buffer([2, 1, 1], dtype="float32", scope="local")
BL = T.alloc_buffer([2, 1, 1], dtype="float32", scope="local")
with T.block():
T.reads(A[tx, 0], B[tx, 0], AS[tx, 0], BS[tx, 0])
T.writes(AS[tx, 0], BS[tx, 0], AL[0, 0, 0], BL[0, 0, 0])
with T.block():
T.reads(A[tx, 0])
T.writes(AS[tx, 0])
AS[tx, 0] = A[tx, 0] * T.float32(2)
with T.block():
T.reads(B[tx, 0])
T.writes(BS[tx, 0])
BS[tx, 0] = B[tx, 0] + T.float32(2)
with T.block():
T.reads(AS[tx, 0])
T.writes(AL[0, 0, 0])
AL[0, 0, 0] = AS[tx, 0]
with T.block():
T.reads(BS[tx, 0])
T.writes(BL[0, 0, 0])
BL[0, 0, 0] = BS[tx, 0]
with T.block():
T.reads(
A[tx, 1:16], B[tx, 1:16], AS[tx, 0], BS[tx, 0], AL[0:2, 0, 0], BL[0:2, 0, 0]
)
T.writes(AS[tx, 0], BS[tx, 0], AL[0:2, 0, 0], BL[0:2, 0, 0], C[tx, 0:15])
for i in T.serial(15):
with T.block():
T.reads(A[tx, i + 1])
T.writes(AS[tx, 0])
AS[tx, 0] = A[tx, i + 1] * T.float32(2)
with T.block():
T.reads(B[tx, i + 1])
T.writes(BS[tx, 0])
BS[tx, 0] = B[tx, i + 1] + T.float32(2)
with T.block():
T.reads(AS[tx, 0])
T.writes(AL[(i + 1) % 2, 0, 0])
AL[(i + 1) % 2, 0, 0] = AS[tx, 0]
with T.block():
T.reads(BS[tx, 0])
T.writes(BL[(i + 1) % 2, 0, 0])
BL[(i + 1) % 2, 0, 0] = BS[tx, 0]
with T.block():
T.reads(AL[i % 2, 0, 0], BL[i % 2, 0, 0])
T.writes(C[tx, i])
C[tx, i] = AL[i % 2, 0, 0] * BL[i % 2, 0, 0]
with T.block():
T.reads(AL[1, 0, 0], BL[1, 0, 0])
T.writes(C[tx, 15])
C[tx, 15] = AL[1, 0, 0] * BL[1, 0, 0]
@T.prim_func
def nested_pipeline_simple(
A: T.Buffer[(16, 16, 16), "float32"], C: T.Buffer[(16, 16, 16), "float32"]
):
for tx in T.thread_binding(0, 16, thread="threadIdx.x"):
for i in T.serial(
0,
16,
annotations={
"software_pipeline_stage": [0, 1, 1, 1],
"software_pipeline_order": [0, 1, 2, 3],
},
):
with T.block():
T.reads(A[tx, i, 0:16])
T.writes(C[tx, i, 0:16])
A_shared = T.alloc_buffer((16, 1, 16), dtype="float32", scope="shared")
for j in T.serial(0, 16):
with T.block():
T.reads(A[tx, i, j])
T.writes(A_shared[tx, 0, j])
A_shared[tx, 0, j] = A[tx, i, j]
for j in T.serial(
0,
16,
annotations={
"software_pipeline_stage": [0, 1],
"software_pipeline_order": [0, 1],
},
):
with T.block():
T.reads(A_shared[tx, 0, j])
T.writes(C[tx, i, j])
B = T.alloc_buffer((16, 1, 1), dtype="float32", scope="shared")
with T.block():
T.reads(A_shared[tx, i, j])
T.writes(B[tx, i, 0])
B[tx, i, 0] = A_shared[tx, 0, j] * T.float32(2)
with T.block():
T.reads(B[tx, i, 0])
T.writes(C[tx, i, j])
C[tx, i, j] = B[tx, i, 0] + T.float32(1)
@T.prim_func
def transformed_nested_pipeline_simple(
A: T.Buffer[(16, 16, 16), "float32"], C: T.Buffer[(16, 16, 16), "float32"]
) -> None:
for tx in T.thread_binding(0, 16, thread="threadIdx.x"):
with T.block():
T.reads([A[tx, 0:16, 0:16]])
T.writes([C[tx, 0:16, 0:16]])
A_shared = T.alloc_buffer([2, 16, 1, 16], dtype="float32", scope="shared")
B = T.alloc_buffer([2, 16, 1, 1], dtype="float32", scope="shared")
with T.block():
T.reads([A[tx, 0, 0:16]])
T.writes([A_shared[0, tx, 0, 0:16]])
for j in T.serial(0, 16):
with T.block():
T.reads([A[tx, 0, j]])
T.writes([A_shared[0, tx, 0, j]])
A_shared[0, tx, 0, j] = A[tx, 0, j]
with T.block():
T.reads([A[tx, 1:16, 0:16], A_shared[0:2, tx, 0:15, 0:16], B[0:2, tx, 0:15, 0]])
T.writes([A_shared[0:2, tx, 0, 0:16], B[0:2, tx, 0:15, 0], C[tx, 0:15, 0:16]])
for i in T.serial(0, 15):
with T.block():
T.reads([A[tx, i + 1, 0:16]])
T.writes([A_shared[(i + 1) % 2, tx, 0, 0:16]])
for j in T.serial(0, 16):
with T.block():
T.reads([A[tx, i + 1, j]])
T.writes([A_shared[(i + 1) % 2, tx, 0, j]])
A_shared[(i + 1) % 2, tx, 0, j] = A[tx, i + 1, j]
with T.block():
T.reads([A_shared[i % 2, tx, i, 0]])
T.writes([B[0, tx, i, 0]])
B[0, tx, i, 0] = A_shared[i % 2, tx, 0, 0] * T.float32(2)
with T.block():
T.reads([A_shared[i % 2, tx, i, 1:16], B[0:2, tx, i, 0]])
T.writes([B[0:2, tx, i, 0], C[tx, i, 0:15]])
for j in T.serial(0, 15):
with T.block():
T.reads([A_shared[i % 2, tx, i, j + 1]])
T.writes([B[(j + 1) % 2, tx, i, 0]])
B[(j + 1) % 2, tx, i, 0] = A_shared[
i % 2, tx, 0, j + 1
] * T.float32(2)
with T.block():
T.reads([B[j % 2, tx, i, 0]])
T.writes([C[tx, i, j]])
C[tx, i, j] = B[j % 2, tx, i, 0] + T.float32(1)
with T.block():
T.reads([B[1, tx, i, 0]])
T.writes([C[tx, i, 15]])
C[tx, i, 15] = B[1, tx, i, 0] + T.float32(1)
with T.block():
T.reads([A_shared[1, tx, 15, 0:16], B[0:2, tx, 15, 0]])
T.writes([B[0:2, tx, 15, 0], C[tx, 15, 0:16]])
with T.block():
T.reads([A_shared[1, tx, 15, 0]])
T.writes([B[0, tx, 15, 0]])
B[0, tx, 15, 0] = A_shared[1, tx, 0, 0] * T.float32(2)
with T.block():
T.reads([A_shared[1, tx, 15, 1:16], B[0:2, tx, 15, 0]])
T.writes([B[0:2, tx, 15, 0], C[tx, 15, 0:15]])
for j in T.serial(0, 15):
with T.block():
T.reads([A_shared[1, tx, 15, j + 1]])
T.writes([B[(j + 1) % 2, tx, 15, 0]])
B[(j + 1) % 2, tx, 15, 0] = A_shared[1, tx, 0, j + 1] * T.float32(2)
with T.block():
T.reads([B[j % 2, tx, 15, 0]])
T.writes([C[tx, 15, j]])
C[tx, 15, j] = B[j % 2, tx, 15, 0] + T.float32(1)
with T.block():
T.reads([B[1, tx, 15, 0]])
T.writes([C[tx, 15, 15]])
C[tx, 15, 15] = B[1, tx, 15, 0] + T.float32(1)
@T.prim_func
def nested_pipeline_prefetch_inner(
A: T.Buffer[(16, 16, 16), "float32"], C: T.Buffer[(16, 16, 16), "float32"]
):
for tx in T.thread_binding(0, 16, thread="threadIdx.x"):
for i in T.serial(
0,
16,
annotations={
"software_pipeline_stage": [0, 0, 1, 1],
"software_pipeline_order": [0, 2, 1, 3],
},
):
with T.block():
T.reads(A[tx, i, 0:16])
T.writes(C[tx, i, 0:16])
A_shared = T.alloc_buffer((16, 1, 16), dtype="float32", scope="shared")
for j in T.serial(0, 16):
with T.block():
T.reads(A[tx, i, j])
T.writes(A_shared[tx, 0, j])
A_shared[tx, 0, j] = A[tx, i, j]
for j in T.serial(
0,
16,
annotations={
"software_pipeline_stage": [0, 1],
"software_pipeline_order": [0, 1],
},
):
with T.block():
T.reads(A_shared[tx, 0, j])
T.writes(C[tx, i, j])
B = T.alloc_buffer((16, 1, 1), dtype="float32", scope="shared")
with T.block():
T.reads(A_shared[tx, i, j])
T.writes(B[tx, i, 0])
B[tx, i, 0] = A_shared[tx, 0, j] * T.float32(2)
with T.block():
T.reads(B[tx, i, 0])
T.writes(C[tx, i, j])
C[tx, i, j] = B[tx, i, 0] + T.float32(1)
@T.prim_func
def transformed_nested_pipeline_prefetch_inner(
A: T.Buffer[(16, 16, 16), "float32"], C: T.Buffer[(16, 16, 16), "float32"]
) -> None:
for tx in T.thread_binding(0, 16, thread="threadIdx.x"):
with T.block():
T.reads([A[tx, 0:16, 0:16]])
T.writes([C[tx, 0:16, 0:16]])
A_shared = T.alloc_buffer([2, 16, 1, 16], dtype="float32", scope="shared")
B = T.alloc_buffer([2, 16, 1, 1], dtype="float32", scope="shared")
with T.block():
T.reads([A[tx, 0, 0:16], A_shared[0, tx, 0, 0]])
T.writes([A_shared[0, tx, 0, 0:16], B[0, tx, 0, 0]])
with T.block():
T.reads([A[tx, 0, 0:16]])
T.writes([A_shared[0, tx, 0, 0:16]])
for j in T.serial(0, 16):
with T.block():
T.reads([A[tx, 0, j]])
T.writes([A_shared[0, tx, 0, j]])
A_shared[0, tx, 0, j] = A[tx, 0, j]
with T.block():
T.reads([A_shared[0, tx, 0, 0]])
T.writes([B[0, tx, 0, 0]])
B[0, tx, 0, 0] = A_shared[0, tx, 0, 0] * T.float32(2)
with T.block():
T.reads([A[tx, 1:16, 0:16], A_shared[0:2, tx, 0:16, 0:16], B[0:2, tx, 0:15, 0]])
T.writes([A_shared[0:2, tx, 0, 0:16], B[0:2, tx, 0:16, 0], C[tx, 0:15, 0:16]])
for i in T.serial(0, 15):
with T.block():
T.reads([A[tx, i + 1, 0:16]])
T.writes([A_shared[(i + 1) % 2, tx, 0, 0:16]])
for j in T.serial(0, 16):
with T.block():
T.reads([A[tx, i + 1, j]])
T.writes([A_shared[(i + 1) % 2, tx, 0, j]])
A_shared[(i + 1) % 2, tx, 0, j] = A[tx, i + 1, j]
with T.block():
T.reads([A_shared[i % 2, tx, i, 1:16], B[0:2, tx, i, 0]])
T.writes([B[0:2, tx, i, 0], C[tx, i, 0:15]])
for j in T.serial(0, 15):
with T.block():
T.reads([A_shared[i % 2, tx, i, j + 1]])
T.writes([B[(j + 1) % 2, tx, i, 0]])
B[(j + 1) % 2, tx, i, 0] = A_shared[
i % 2, tx, 0, j + 1
] * T.float32(2)
with T.block():
T.reads([B[j % 2, tx, i, 0]])
T.writes([C[tx, i, j]])
C[tx, i, j] = B[j % 2, tx, i, 0] + T.float32(1)
with T.block():
T.reads([A_shared[(i + 1) % 2, tx, i + 1, 0]])
T.writes([B[0, tx, i + 1, 0]])
B[0, tx, i + 1, 0] = A_shared[(i + 1) % 2, tx, 0, 0] * T.float32(2)
with T.block():
T.reads([B[1, tx, i, 0]])
T.writes([C[tx, i, 15]])
C[tx, i, 15] = B[1, tx, i, 0] + T.float32(1)
with T.block():
T.reads([A_shared[1, tx, 15, 1:16], B[0:2, tx, 15, 0]])
T.writes([B[0:2, tx, 15, 0], C[tx, 15, 0:16]])
with T.block():
T.reads([A_shared[1, tx, 15, 1:16], B[0:2, tx, 15, 0]])
T.writes([B[0:2, tx, 15, 0], C[tx, 15, 0:15]])
for j in T.serial(0, 15):
with T.block():
T.reads([A_shared[1, tx, 15, j + 1]])
T.writes([B[(j + 1) % 2, tx, 15, 0]])
B[(j + 1) % 2, tx, 15, 0] = A_shared[1, tx, 0, j + 1] * T.float32(2)
with T.block():
T.reads([B[j % 2, tx, 15, 0]])
T.writes([C[tx, 15, j]])
C[tx, 15, j] = B[j % 2, tx, 15, 0] + T.float32(1)
with T.block():
T.reads([B[1, tx, 15, 0]])
T.writes([C[tx, 15, 15]])
C[tx, 15, 15] = B[1, tx, 15, 0] + T.float32(1)
@T.prim_func
def nested_pipeline_interleaving(
A: T.Buffer[(16, 16, 16), "float32"], C: T.Buffer[(16, 16, 16), "float32"]
):
for tx in T.thread_binding(0, 16, thread="threadIdx.x"):
for i in T.serial(
0,
16,
annotations={
"software_pipeline_stage": [0, 0, 0, 1, 1],
"software_pipeline_order": [0, 2, 3, 1, 4],
},
):
with T.block():
T.reads(A[tx, i, 0:16])
T.writes(C[tx, i, 0:16])
A_shared = T.alloc_buffer((16, 1, 16), dtype="float32", scope="shared")
A_local = T.alloc_buffer((1, 1, 16), dtype="float32", scope="local")
for j in T.serial(0, 16):
with T.block():
T.reads(A[tx, i, j])
T.writes(A_shared[tx, 0, j])
A_shared[tx, 0, j] = A[tx, i, j]
for j in T.serial(0, 16):
with T.block():
T.reads(A_shared[tx, 0, j])
T.writes(A_local[0, 0, j])
A_local[0, 0, j] = A_shared[tx, i, j]
for j in T.serial(
0,
16,
annotations={
"software_pipeline_stage": [0, 1],
"software_pipeline_order": [0, 1],
},
):
with T.block():
T.reads(A_local[0, 0, j])
T.writes(C[tx, i, j])
B = T.alloc_buffer((16, 1, 1), dtype="float32", scope="shared")
with T.block():
T.reads(A_local[tx, i, j])
T.writes(B[tx, i, 0])
B[tx, i, 0] = A_local[0, 0, j] * T.float32(2)
with T.block():
T.reads(B[tx, i, 0])
T.writes(C[tx, i, j])
C[tx, i, j] = B[tx, i, 0] + T.float32(1)
@T.prim_func
def transformed_nested_pipeline_interleaving(
A: T.Buffer[(16, 16, 16), "float32"], C: T.Buffer[(16, 16, 16), "float32"]
) -> None:
for tx in T.thread_binding(0, 16, thread="threadIdx.x"):
with T.block():
T.reads([A[tx, 0:16, 0:16]])
T.writes([C[tx, 0:16, 0:16]])
A_shared = T.alloc_buffer([16, 1, 16], dtype="float32", scope="shared")
A_local = T.alloc_buffer([1, 1, 16], dtype="float32", scope="local")
B = T.alloc_buffer([2, 16, 1, 1], dtype="float32", scope="shared")
with T.block():
T.reads([A[tx, 0, 0:16], A_shared[tx, 0, 0:16], A_local[tx, 0, 0]])
T.writes([A_shared[tx, 0, 0:16], A_local[0, 0, 0:16], B[0, tx, 0, 0]])
with T.block():
T.reads([A[tx, 0, 0:16]])
T.writes([A_shared[tx, 0, 0:16]])
for j in T.serial(0, 16):
with T.block():
T.reads([A[tx, 0, j]])
T.writes([A_shared[tx, 0, j]])
A_shared[tx, 0, j] = A[tx, 0, j]
with T.block():
T.reads([A_shared[tx, 0, 0:16]])
T.writes([A_local[0, 0, 0:16]])
for j in T.serial(0, 16):
with T.block():
T.reads([A_shared[tx, 0, j]])
T.writes([A_local[0, 0, j]])
A_local[0, 0, j] = A_shared[tx, 0, j]
with T.block():
T.reads([A_local[tx, 0, 0]])
T.writes([B[0, tx, 0, 0]])
B[0, tx, 0, 0] = A_local[0, 0, 0] * T.float32(2)
with T.block():
T.reads(
[
A[tx, 1:16, 0:16],
A_local[tx, 0:16, 0:16],
B[0:2, tx, 0:15, 0],
A_shared[tx, 0, 0:16],
]
)
T.writes(
[
A_shared[tx, 0, 0:16],
B[0:2, tx, 0:16, 0],
C[tx, 0:15, 0:16],
A_local[0, 0, 0:16],
]
)
for i in T.serial(0, 15):
with T.block():
T.reads([A[tx, i + 1, 0:16]])
T.writes([A_shared[tx, 0, 0:16]])
for j in T.serial(0, 16):
with T.block():
T.reads([A[tx, i + 1, j]])
T.writes([A_shared[tx, 0, j]])
A_shared[tx, 0, j] = A[tx, i + 1, j]
with T.block():
T.reads([A_local[tx, i, 1:16], B[0:2, tx, i, 0]])
T.writes([B[0:2, tx, i, 0], C[tx, i, 0:15]])
for j in T.serial(0, 15):
with T.block():
T.reads([A_local[tx, i, j + 1]])
T.writes([B[(j + 1) % 2, tx, i, 0]])
B[(j + 1) % 2, tx, i, 0] = A_local[0, 0, j + 1] * T.float32(2)
with T.block():
T.reads([B[j % 2, tx, i, 0]])
T.writes([C[tx, i, j]])
C[tx, i, j] = B[j % 2, tx, i, 0] + T.float32(1)
with T.block():
T.reads([A_shared[tx, 0, 0:16]])
T.writes([A_local[0, 0, 0:16]])
for j in T.serial(0, 16):
with T.block():
T.reads([A_shared[tx, 0, j]])
T.writes([A_local[0, 0, j]])
A_local[0, 0, j] = A_shared[tx, i + 1, j]
with T.block():
T.reads([A_local[tx, i + 1, 0]])
T.writes([B[0, tx, i + 1, 0]])
B[0, tx, i + 1, 0] = A_local[0, 0, 0] * T.float32(2)
with T.block():
T.reads([B[1, tx, i, 0]])
T.writes([C[tx, i, 15]])
C[tx, i, 15] = B[1, tx, i, 0] + T.float32(1)
with T.block():
T.reads([A_local[tx, 15, 1:16], B[0:2, tx, 15, 0]])
T.writes([B[0:2, tx, 15, 0], C[tx, 15, 0:16]])
with T.block():
T.reads([A_local[tx, 15, 1:16], B[0:2, tx, 15, 0]])
T.writes([B[0:2, tx, 15, 0], C[tx, 15, 0:15]])
for j in T.serial(0, 15):
with T.block():
T.reads([A_local[tx, 15, j + 1]])
T.writes([B[(j + 1) % 2, tx, 15, 0]])
B[(j + 1) % 2, tx, 15, 0] = A_local[0, 0, j + 1] * T.float32(2)
with T.block():
T.reads([B[j % 2, tx, 15, 0]])
T.writes([C[tx, 15, j]])
C[tx, 15, j] = B[j % 2, tx, 15, 0] + T.float32(1)
with T.block():
T.reads([B[1, tx, 15, 0]])
T.writes([C[tx, 15, 15]])
C[tx, 15, 15] = B[1, tx, 15, 0] + T.float32(1)
@T.prim_func
def nested_pipeline_double_buffer(
A: T.Buffer[(16, 16, 16), "float32"], C: T.Buffer[(16, 16, 16), "float32"]
):
for tx in T.thread_binding(0, 16, thread="threadIdx.x"):
for i in T.serial(
0,
16,
annotations={
"software_pipeline_stage": [0, 0, 0, 1, 1],
"software_pipeline_order": [0, 2, 3, 1, 4],
},
):
with T.block():
T.reads(A[tx, i, 0:16])
T.writes(C[tx, i, 0:16])
A_shared = T.alloc_buffer((16, 1, 16), dtype="float32", scope="shared")
A_local = T.alloc_buffer((1, 1, 16), dtype="float32", scope="local")
for j in T.serial(0, 16):
with T.block():
T.reads(A[tx, i, j])
T.writes(A_shared[tx, 0, j])
A_shared[tx, 0, j] = A[tx, i, j]
for j in T.serial(0, 16):
with T.block():
T.block_attr({"double_buffer_scope": 0})
T.reads(A_shared[tx, 0, j])
T.writes(A_local[0, 0, j])
A_local[0, 0, j] = A_shared[tx, i, j]
for j in T.serial(
0,
16,
annotations={
"software_pipeline_stage": [0, 1],
"software_pipeline_order": [0, 1],
},
):
with T.block():
T.reads(A_local[0, 0, j])
T.writes(C[tx, i, j])
B = T.alloc_buffer((16, 1, 1), dtype="float32", scope="shared")
with T.block():
T.reads(A_local[tx, i, j])
T.writes(B[tx, i, 0])
B[tx, i, 0] = A_local[0, 0, j] * T.float32(2)
with T.block():
T.reads(B[tx, i, 0])
T.writes(C[tx, i, j])
C[tx, i, j] = B[tx, i, 0] + T.float32(1)
@T.prim_func
def transformed_nested_pipeline_double_buffer(
A: T.Buffer[(16, 16, 16), "float32"], C: T.Buffer[(16, 16, 16), "float32"]
) -> None:
for tx in T.thread_binding(0, 16, thread="threadIdx.x"):
with T.block():
T.reads([A[tx, 0:16, 0:16]])
T.writes([C[tx, 0:16, 0:16]])
A_shared = T.alloc_buffer([16, 1, 16], dtype="float32", scope="shared")
A_local = T.alloc_buffer([2, 1, 1, 16], dtype="float32", scope="local")
B = T.alloc_buffer([2, 16, 1, 1], dtype="float32", scope="shared")
with T.block():
T.reads([A[tx, 0, 0:16], A_shared[tx, 0, 0:16], A_local[0, tx, 0, 0]])
T.writes([A_shared[tx, 0, 0:16], A_local[0, 0, 0, 0:16], B[0, tx, 0, 0]])
with T.block():
T.reads([A[tx, 0, 0:16]])
T.writes([A_shared[tx, 0, 0:16]])
for j in T.serial(0, 16):
with T.block():
T.reads([A[tx, 0, j]])
T.writes([A_shared[tx, 0, j]])
A_shared[tx, 0, j] = A[tx, 0, j]
with T.block():
T.reads([A_shared[tx, 0, 0:16]])
T.writes([A_local[0, 0, 0, 0:16]])
for j in T.serial(0, 16):
with T.block():
T.reads([A_shared[tx, 0, j]])
T.writes([A_local[0, 0, 0, j]])
T.block_attr({"double_buffer_scope": 0})
A_local[0, 0, 0, j] = A_shared[tx, 0, j]
with T.block():
T.reads([A_local[0, tx, 0, 0]])
T.writes([B[0, tx, 0, 0]])
B[0, tx, 0, 0] = A_local[0, 0, 0, 0] * T.float32(2)
with T.block():
T.reads(
[
A[tx, 1:16, 0:16],
A_local[0:2, tx, 0:16, 0:16],
B[0:2, tx, 0:15, 0],
A_shared[tx, 0, 0:16],
]
)
T.writes(
[
A_shared[tx, 0, 0:16],
B[0:2, tx, 0:16, 0],
C[tx, 0:15, 0:16],
A_local[0:2, 0, 0, 0:16],
]
)
for i in T.serial(0, 15):
with T.block():
T.reads([A[tx, i + 1, 0:16]])
T.writes([A_shared[tx, 0, 0:16]])
for j in T.serial(0, 16):
with T.block():
T.reads([A[tx, i + 1, j]])
T.writes([A_shared[tx, 0, j]])
A_shared[tx, 0, j] = A[tx, i + 1, j]
with T.block():
T.reads([A_local[i % 2, tx, i, 1:16], B[0:2, tx, i, 0]])
T.writes([B[0:2, tx, i, 0], C[tx, i, 0:15]])
for j in T.serial(0, 15):
with T.block():
T.reads([A_local[i % 2, tx, i, j + 1]])
T.writes([B[(j + 1) % 2, tx, i, 0]])
B[(j + 1) % 2, tx, i, 0] = A_local[i % 2, 0, 0, j + 1] * T.float32(
2
)
with T.block():
T.reads([B[j % 2, tx, i, 0]])
T.writes([C[tx, i, j]])
C[tx, i, j] = B[j % 2, tx, i, 0] + T.float32(1)
with T.block():
T.reads([A_shared[tx, 0, 0:16]])
T.writes([A_local[(i + 1) % 2, 0, 0, 0:16]])
for j in T.serial(0, 16):
with T.block():
T.reads([A_shared[tx, 0, j]])
T.writes([A_local[(i + 1) % 2, 0, 0, j]])
T.block_attr({"double_buffer_scope": 0})
A_local[(i + 1) % 2, 0, 0, j] = A_shared[tx, i + 1, j]
with T.block():
T.reads([A_local[(i + 1) % 2, tx, i + 1, 0]])
T.writes([B[0, tx, i + 1, 0]])
B[0, tx, i + 1, 0] = A_local[(i + 1) % 2, 0, 0, 0] * T.float32(2)
with T.block():
T.reads([B[1, tx, i, 0]])
T.writes([C[tx, i, 15]])
C[tx, i, 15] = B[1, tx, i, 0] + T.float32(1)
with T.block():
T.reads([A_local[1, tx, 15, 1:16], B[0:2, tx, 15, 0]])
T.writes([B[0:2, tx, 15, 0], C[tx, 15, 0:16]])
with T.block():
T.reads([A_local[1, tx, 15, 1:16], B[0:2, tx, 15, 0]])
T.writes([B[0:2, tx, 15, 0], C[tx, 15, 0:15]])
for j in T.serial(0, 15):
with T.block():
T.reads([A_local[1, tx, 15, j + 1]])
T.writes([B[(j + 1) % 2, tx, 15, 0]])
B[(j + 1) % 2, tx, 15, 0] = A_local[1, 0, 0, j + 1] * T.float32(2)
with T.block():
T.reads([B[j % 2, tx, 15, 0]])
T.writes([C[tx, 15, j]])
C[tx, 15, j] = B[j % 2, tx, 15, 0] + T.float32(1)
with T.block():
T.reads([B[1, tx, 15, 0]])
T.writes([C[tx, 15, 15]])
C[tx, 15, 15] = B[1, tx, 15, 0] + T.float32(1)
@T.prim_func
def simple_compute_incorrect_reorder(
A: T.Buffer[(16, 16), "float32"], D: T.Buffer[(16, 16), "float32"]
):
for tx in T.thread_binding(0, 16, thread="threadIdx.x"):
for i in T.serial(
0,
16,
annotations={
"software_pipeline_stage": [0, 1, 1],
"software_pipeline_order": [0, 2, 1],
},
):
with T.block():
T.reads(A[tx, i])
T.writes(D[tx, i])
B = T.alloc_buffer((16, 1), dtype="float32", scope="shared")
C = T.alloc_buffer((16, 1), dtype="float32", scope="shared")
with T.block():
T.reads(A[tx, i])
T.writes(B[tx, 0])
B[tx, 0] = A[tx, i] * T.float32(2)
with T.block():
T.reads(B[tx, 0])
T.writes(C[tx, 0])
C[tx, 0] = B[tx, 0] + T.float32(2)
with T.block():
T.reads(C[tx, 0])
T.writes(D[tx, i])
D[tx, i] = C[tx, 0] + T.float32(1)
@T.prim_func
def simple_compute_conflicting_order(
A: T.Buffer[(16, 16), "float32"], D: T.Buffer[(16, 16), "float32"]
):
for tx in T.thread_binding(0, 16, thread="threadIdx.x"):
for i in T.serial(
0,
16,
annotations={
"software_pipeline_stage": [0, 1, 1],
"software_pipeline_order": [0, 1, 1],
},
):
with T.block():
T.reads(A[tx, i])
T.writes(D[tx, i])
B = T.alloc_buffer((16, 1), dtype="float32", scope="shared")
C = T.alloc_buffer((16, 1), dtype="float32", scope="shared")
with T.block():
T.reads(A[tx, i])
T.writes(B[tx, 0])
B[tx, 0] = A[tx, i] * T.float32(2)
with T.block():
T.reads(B[tx, 0])
T.writes(C[tx, 0])
C[tx, 0] = B[tx, 0] + T.float32(2)
with T.block():
T.reads(C[tx, 0])
T.writes(D[tx, i])
D[tx, i] = C[tx, 0] + T.float32(1)
@T.prim_func
def simple_compute_missing_annotation(
A: T.Buffer[(16, 16), "float32"], C: T.Buffer[(16, 16), "float32"]
):
for tx in T.thread_binding(0, 16, thread="threadIdx.x"):
for i in T.serial(0, 16, annotations={"software_pipeline_stage": [0, 1]}):
with T.block():
T.reads(A[tx, i])
T.writes(C[tx, i])
B = T.alloc_buffer((16, 1), dtype="float32", scope="shared")
with T.block():
T.reads(A[tx, i])
T.writes(B[tx, 0])
B[tx, 0] = A[tx, i] * T.float32(2)
with T.block():
T.reads(B[tx, 0])
T.writes(C[tx, i])
C[tx, i] = B[tx, 0] + T.float32(1)
def test_simple_compute():
_check(gen_simple_compute(1), transformed_simple_compute)
def test_simple_compute_with_other_annotation():
_check(simple_compute_with_other_annotation, transformed_simple_compute_with_other_annotation)
def test_trivial_pipeline():
_check(trivial_pipeline, transformed_trivial_pipeline)
def test_three_stage_compute():
_check(three_stage_compute, transformed_three_stage_compute)
def test_dag_interleaving():
_check(dag_interleaving, transformed_dag_interleaving)
def test_nest_pipeline_simple():
_check(nested_pipeline_simple, transformed_nested_pipeline_simple)
def test_nest_pipeline_prefetch_inner():
_check(nested_pipeline_prefetch_inner, transformed_nested_pipeline_prefetch_inner)
def test_nest_pipeline_interleaving():
_check(nested_pipeline_interleaving, transformed_nested_pipeline_interleaving)
def test_nest_pipeline_double_buffer():
_check(nested_pipeline_double_buffer, transformed_nested_pipeline_double_buffer)
def test_error_reorder():
_check_error(simple_compute_incorrect_reorder)
def test_error_conflicting_order():
_check_error(simple_compute_conflicting_order)
def test_error_missing_annotation():
_check_error(simple_compute_missing_annotation)
def test_simple_compute_async():
mod = tvm.IRModule.from_expr(gen_simple_compute(1))
sch = tvm.tir.Schedule(mod)
_, loop = sch.get_loops(sch.get_block("compute"))
sch.annotate(loop, ann_key="software_pipeline_async_stages", ann_val=[0])
mod = tvm.tir.transform.InjectSoftwarePipeline()(sch.mod)
@T.prim_func
def ref(A: T.Buffer[(16, 16), "float32"], C: T.Buffer[(16, 16), "float32"]):
for tx in T.thread_binding(16, thread="threadIdx.x"):
with T.block():
T.reads(A[tx, 0:16])
T.writes(C[tx, 0:16])
B = T.alloc_buffer([2, 16, 1], dtype="float32", scope="shared")
with T.block():
T.reads(A[tx, 0])
T.writes(B[0, tx, 0])
with T.attr(0, "async_commit_queue_scope", 0):
with T.attr(0, "async_scope", 1):
B[T.FloorMod(0, 2), tx, 0] = A[tx, 0] * T.float32(2)
with T.block():
T.reads(A[tx, 1:16], B[0:2, tx, 0])
T.writes(B[0:2, tx, 0], C[tx, 0:15])
for i in T.serial(15):
with T.block():
T.where(i + 1 < 16)
T.reads(A[tx, i + 1])
T.writes(B[(i + 1) % 2, tx, 0])
with T.attr(0, "async_commit_queue_scope", 0):
with T.attr(0, "async_scope", 1):
B[(i + 1) % 2, tx, 0] = A[tx, i + 1] * T.float32(2)
with T.block():
T.where(i + 1 - 1 < 16)
T.reads(B[(i - 1 + 1) % 2, tx, 0])
T.writes(C[tx, i - 1 + 1])
with T.attr(0, "async_wait_queue_scope", 0):
with T.attr(0, "async_wait_inflight_count", 1):
C[tx, i - 1 + 1] = B[(i - 1 + 1) % 2, tx, 0] + T.float32(1)
with T.block():
T.reads(B[T.FloorMod(15, 2), tx, 0])
T.writes(C[tx, 15])
with T.attr(0, "async_wait_queue_scope", 0):
with T.attr(0, "async_wait_inflight_count", 0):
C[tx, 15] = B[T.FloorMod(15, 2), tx, 0] + T.float32(1)
tvm.ir.assert_structural_equal(mod["main"], ref, True)
mod = tvm.IRModule.from_expr(gen_simple_compute(3))
sch = tvm.tir.Schedule(mod)
_, loop = sch.get_loops(sch.get_block("compute"))
sch.annotate(loop, ann_key="software_pipeline_async_stages", ann_val=[0])
mod = tvm.tir.transform.InjectSoftwarePipeline()(sch.mod)
@T.prim_func
def ref(A: T.Buffer[(16, 16), "float32"], C: T.Buffer[(16, 16), "float32"]) -> None:
for tx in T.thread_binding(16, thread="threadIdx.x"):
with T.block():
T.reads(A[tx, 0:16])
T.writes(C[tx, 0:16])
B = T.alloc_buffer([4, 16, 1], dtype="float32", scope="shared")
with T.block():
T.reads(A[tx, 0:3])
T.writes(B[0:3, tx, 0])
for i in T.unroll(3):
with T.block():
T.where(i < 16)
T.reads(A[tx, i])
T.writes(B[i % 4, tx, 0])
T.attr(0, "async_commit_queue_scope", 0)
T.attr(0, "async_scope", 1)
B[i % 4, tx, 0] = A[tx, i] * T.float32(2)
with T.block():
T.reads(A[tx, 3:16], B[0:4, tx, 0])
T.writes(B[0:4, tx, 0], C[tx, 0:13])
for i in T.serial(13):
with T.block():
T.where(i + 3 < 16)
T.reads(A[tx, i + 3])
T.writes(B[(i + 3) % 4, tx, 0])
T.attr(0, "async_commit_queue_scope", 0)
T.attr(0, "async_scope", 1)
B[(i + 3) % 4, tx, 0] = A[tx, i + 3] * T.float32(2)
with T.block():
T.where(i + 3 - 3 < 16)
T.reads(B[0:4, tx, 0])
T.writes(C[tx, i - 3 + 3])
with T.attr(0, "async_wait_queue_scope", 0):
with T.attr(0, "async_wait_inflight_count", 3):
C[tx, i - 3 + 3] = B[(i - 3 + 3) % 4, tx, 0] + T.float32(1)
with T.block():
T.reads(B[0:4, tx, 0])
T.writes(C[tx, 13:16])
for i in T.unroll(3):
with T.block():
T.where(i + 16 - 3 < 16)
T.reads(B[0:4, tx, 0])
T.writes(C[tx, i - 3 + 16])
with T.attr(0, "async_wait_queue_scope", 0):
with T.attr(0, "async_wait_inflight_count", 2 - i):
C[tx, i - 3 + 16] = B[(i - 3 + 16) % 4, tx, 0] + T.float32(1)
tvm.ir.assert_structural_equal(mod["main"], ref, True)
def test_async_producer_interleaving():
@T.prim_func
def simple_compute(
A: T.Buffer[(16, 16), "float32"],
B: T.Buffer[(16, 16), "float32"],
C: T.Buffer[(16, 16), "float32"],
):
for tx in T.thread_binding(0, 16, thread="threadIdx.x"):
for i in range(16):
with T.block("compute"):
T.reads(A[tx, i])
T.writes(C[tx, i])
A_shared = T.alloc_buffer((16, 1), dtype="float32", scope="shared")
B_shared = T.alloc_buffer((16, 1), dtype="float32", scope="shared")
with T.block():
T.reads(A[tx, i])
T.writes(A_shared[tx, 0])
A_shared[tx, 0] = A[tx, i]
with T.block():
T.reads(B[tx, i])
T.writes(B_shared[tx, 0])
B_shared[tx, 0] = B[tx, i]
with T.block():
T.reads(A_shared[tx, 0], B_shared[tx, 0])
T.writes(C[tx, i])
C[tx, i] = A_shared[tx, 0] + B_shared[tx, 0]
mod = tvm.IRModule.from_expr(simple_compute)
sch = tvm.tir.Schedule(mod)
_, loop = sch.get_loops(sch.get_block("compute"))
sch.annotate(loop, ann_key="software_pipeline_stage", ann_val=[0, 0, 3])
sch.annotate(loop, ann_key="software_pipeline_order", ann_val=[0, 2, 1])
sch.annotate(loop, ann_key="software_pipeline_async_stages", ann_val=[0])
mod = tvm.tir.transform.InjectSoftwarePipeline()(sch.mod)
@T.prim_func
def ref(
A: T.Buffer[(16, 16), "float32"],
B: T.Buffer[(16, 16), "float32"],
C: T.Buffer[(16, 16), "float32"],
) -> None:
for tx in T.thread_binding(16, thread="threadIdx.x"):
with T.block():
T.reads(A[tx, 0:16], B[tx, 0:16])
T.writes(C[tx, 0:16])
A_shared = T.alloc_buffer([4, 16, 1], dtype="float32", scope="shared")
B_shared = T.alloc_buffer([4, 16, 1], dtype="float32", scope="shared")
with T.block():
T.reads(A[tx, 0:3], B[tx, 0:3])
T.writes(A_shared[0:3, tx, 0], B_shared[0:3, tx, 0])
for i in T.unroll(3):
with T.block():
T.where(i < 16)
T.reads(A[tx, i], B[tx, i])
T.writes(A_shared[i % 4, tx, 0], B_shared[i % 4, tx, 0])
with T.attr(0, "async_commit_queue_scope", 0):
with T.attr(0, "async_scope", 1):
A_shared[i % 4, tx, 0] = A[tx, i]
with T.attr(0, "async_scope", 1):
B_shared[i % 4, tx, 0] = B[tx, i]
with T.block():
T.reads(A[tx, 3:16], A_shared[0:4, tx, 0], B_shared[0:4, tx, 0], B[tx, 3:16])
T.writes(A_shared[0:4, tx, 0], C[tx, 0:13], B_shared[0:4, tx, 0])
for i in T.serial(13):
with T.block():
T.where(i + 3 < 16)
T.reads(A[tx, i + 3])
T.writes(A_shared[(i + 3) % 4, tx, 0])
with T.attr(0, "async_commit_queue_scope", 0):
with T.attr(0, "async_scope", 1):
A_shared[(i + 3) % 4, tx, 0] = A[tx, i + 3]
with T.block():
T.where(i + 3 - 3 < 16)
T.reads(A_shared[0:4, tx, 0], B_shared[0:4, tx, 0])
T.writes(C[tx, i - 3 + 3])
with T.attr(0, "async_wait_queue_scope", 0):
with T.attr(0, "async_wait_inflight_count", 5):
C[tx, i - 3 + 3] = (
A_shared[(i - 3 + 3) % 4, tx, 0]
+ B_shared[(i - 3 + 3) % 4, tx, 0]
)
with T.block():
T.where(i + 3 < 16)
T.reads(B[tx, i + 3])
T.writes(B_shared[(i + 3) % 4, tx, 0])
with T.attr(0, "async_commit_queue_scope", 0):
with T.attr(0, "async_scope", 1):
B_shared[(i + 3) % 4, tx, 0] = B[tx, i + 3]
with T.block():
T.reads(A_shared[0:4, tx, 0], B_shared[0:4, tx, 0])
T.writes(C[tx, 13:16])
for i in T.unroll(3):
with T.block():
T.where(i + 16 - 3 < 16)
T.reads(A_shared[0:4, tx, 0], B_shared[0:4, tx, 0])
T.writes(C[tx, i - 3 + 16])
with T.attr(0, "async_wait_queue_scope", 0):
with T.attr(0, "async_wait_inflight_count", 2 - i):
C[tx, i - 3 + 16] = (
A_shared[(i - 3 + 16) % 4, tx, 0]
+ B_shared[(i - 3 + 16) % 4, tx, 0]
)
tvm.ir.assert_structural_equal(mod["main"], ref, True)
def test_three_stage_compute_two_stage_async():
mod = tvm.IRModule.from_expr(three_stage_compute)
sch = tvm.tir.Schedule(mod)
_, loop = sch.get_loops(sch.get_block("compute"))
sch.annotate(loop, ann_key="software_pipeline_async_stages", ann_val=[0, 1])
mod = tvm.tir.transform.InjectSoftwarePipeline()(sch.mod)
@T.prim_func
def ref(A: T.Buffer[(16, 16), "float32"], D: T.Buffer[(16, 16), "float32"]) -> None:
for tx in T.thread_binding(16, thread="threadIdx.x"):
with T.block():
T.reads(A[tx, 0:16])
T.writes(D[tx, 0:16])
B = T.alloc_buffer([2, 16, 1], dtype="float32", scope="shared")
C = T.alloc_buffer([2, 16, 1], dtype="float32", scope="shared")
with T.block():
T.reads(A[tx, 0:2], B[0:2, tx, 0])
T.writes(B[0:2, tx, 0], C[0:2, tx, 0])
for i in T.unroll(2):
with T.block():
T.where(i < 16)
T.reads(A[tx, i])
T.writes(B[i % 2, tx, 0])
with T.attr(0, "async_commit_queue_scope", 0):
with T.attr(0, "async_scope", 1):
B[i % 2, tx, 0] = A[tx, i] * T.float32(2)
with T.block():
T.where(i == 1 and i - 1 < 16)
T.reads(B[(i + 1) % 2, tx, 0])
T.writes(C[(i + 1) % 2, tx, 0])
with T.attr(0, "async_commit_queue_scope", 1):
with T.attr(0, "async_wait_queue_scope", 0):
with T.attr(0, "async_wait_inflight_count", 1):
with T.attr(0, "async_scope", 1):
C[(i - 1) % 2, tx, 0] = B[
(i - 1) % 2, tx, 0
] + T.float32(2)
with T.block():
T.reads(A[tx, 2:16], B[0:2, tx, 0], C[0:2, tx, 0])
T.writes(B[0:2, tx, 0], C[0:2, tx, 0], D[tx, 0:14])
for i in T.serial(14):
with T.block():
T.where(i + 2 < 16)
T.reads(A[tx, i + 2])
T.writes(B[i % 2, tx, 0])
with T.attr(0, "async_commit_queue_scope", 0):
with T.attr(0, "async_scope", 1):
B[(i + 2) % 2, tx, 0] = A[tx, i + 2] * T.float32(2)
with T.block():
T.where(i + 2 - 1 < 16)
T.reads(B[(i + 1) % 2, tx, 0])
T.writes(C[(i + 1) % 2, tx, 0])
with T.attr(0, "async_commit_queue_scope", 1):
with T.attr(0, "async_wait_queue_scope", 0):
with T.attr(0, "async_wait_inflight_count", 1):
with T.attr(0, "async_scope", 1):
C[(i - 1 + 2) % 2, tx, 0] = B[
(i - 1 + 2) % 2, tx, 0
] + T.float32(2)
with T.block():
T.where(i + 2 - 2 < 16)
T.reads(C[0:2, tx, 0])
T.writes(D[tx, i - 2 + 2])
with T.attr(0, "async_wait_queue_scope", 1):
with T.attr(0, "async_wait_inflight_count", 1):
D[tx, i - 2 + 2] = C[(i - 2 + 2) % 2, tx, 0] + T.float32(1)
with T.block():
T.reads(B[0:2, tx, 0], C[0:2, tx, 0])
T.writes(C[0:2, tx, 0], D[tx, 14:16])
for i in T.unroll(2):
with T.block():
T.where(i + 16 - 1 < 16)
T.reads(B[(i + 1) % 2, tx, 0])
T.writes(C[(i + 1) % 2, tx, 0])
with T.attr(0, "async_commit_queue_scope", 1):
with T.attr(0, "async_wait_queue_scope", 0):
with T.attr(0, "async_wait_inflight_count", 0 - i):
with T.attr(0, "async_scope", 1):
C[(i - 1 + 16) % 2, tx, 0] = B[
(i - 1 + 16) % 2, tx, 0
] + T.float32(2)
with T.block():
T.where(i + 16 - 2 < 16)
T.reads(C[0:2, tx, 0])
T.writes(D[tx, i - 2 + 16])
with T.attr(0, "async_wait_queue_scope", 1):
with T.attr(
0,
"async_wait_inflight_count",
T.if_then_else(i + 16 - 1 < 16, 1, 0, dtype="int32"),
):
D[tx, i - 2 + 16] = C[(i - 2 + 16) % 2, tx, 0] + T.float32(1)
tvm.ir.assert_structural_equal(mod["main"], ref, True)
N = K = M = 4096
def get_mma_schedule():
i_factors, j_factors, k_factors = [1, 32, 1, 4, 2], [16, 2, 4, 1, 2], [128, 2, 1]
def index_map(i, j):
return (
i // 16,
j // 16,
*shared_16x16_to_ldmatrix_32x8_layout(i % 16, j % 16),
)
workload = te.create_prim_func(
te_workload.matmul(N, M, K, in_dtype="float16", out_dtype="float32")
)
return mma_schedule(
workload,
16,
"float16",
False,
i_factors,
j_factors,
k_factors,
index_map,
index_map,
index_map,
LDMATRIX_16x16_A_DYN_INTRIN,
LDMATRIX_16x16_B_DYN_INTRIN,
MMA_f16f16f32_INTRIN,
MMA_fill_16x16_f32_INTRIN,
MMA_store_16x16_f32_global_INTRIN,
"shared.dyn",
)
def build_and_run(sch):
if tvm.testing.is_ampere_or_newer():
with tvm.transform.PassContext(config={"tir.use_async_copy": 1}):
f = tvm.build(sch.mod["main"], target="cuda")
dev = tvm.device("cuda", 0)
a_np = np.random.uniform(size=(N, K)).astype("float16")
b_np = np.random.uniform(size=(K, M)).astype("float16")
c_np = np.dot(a_np.astype("float32"), b_np.astype("float32"))
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros((N, M), dtype="float32"), dev)
f(a, b, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-3)
@tvm.testing.requires_cuda
def test_async_pipelined_mma_gemm_simple():
sch = get_mma_schedule()
k0 = sch.get_loops(sch.get_block("C_o_update"))[3]
sch.annotate(k0, ann_key="software_pipeline_stage", ann_val=[0, 0, 3])
sch.annotate(k0, ann_key="software_pipeline_order", ann_val=[0, 1, 2])
sch.annotate(k0, ann_key="software_pipeline_async_stages", ann_val=[0])
seq = tvm.transform.Sequential(
[
tvm.tir.transform.PlanAndUpdateBufferAllocationLocation(),
tvm.tir.transform.ConvertBlocksToOpaque(),
tvm.tir.transform.UnifyThreadBinding(),
tvm.tir.transform.LowerMatchBuffer(),
tvm.tir.transform.InjectSoftwarePipeline(),
]
)
mod = seq(sch.mod)
pipeline = mod["main"].body.block.body.body.body.body.body.block.body[1].block.body
prologue, body, epilogue = pipeline
commit_queue_scope = prologue.block.body.body.block.body
assert len(commit_queue_scope.body) == 2
assert commit_queue_scope.value == 0
commit_queue_scope = body.block.body.body[0].block.body
assert len(commit_queue_scope.body) == 2
assert commit_queue_scope.value == 0
assert body.block.body.body[1].block.body.body.attr_key == "async_wait_inflight_count"
assert body.block.body.body[1].block.body.body.value == 3
assert epilogue.block.body.body.block.body.body.attr_key == "async_wait_inflight_count"
assert str(epilogue.block.body.body.block.body.body.value) == "(2 - i2_0_0: int32)"
build_and_run(sch)
@tvm.testing.requires_cuda
def test_async_nested_pipeline_mma_gemm_ideal_annotation():
sch = get_mma_schedule()
k0 = sch.get_loops(sch.get_block("C_o_update"))[3]
k1 = sch.get_loops(sch.get_block("C_o_update"))[4]
sch.annotate(k0, ann_key="software_pipeline_stage", ann_val=[0, 0, 2, 3, 3])
sch.annotate(k0, ann_key="software_pipeline_order", ann_val=[0, 1, 3, 2, 4])
sch.annotate(k0, ann_key="software_pipeline_async_stages", ann_val=[0])
sch.annotate(k1, ann_key="software_pipeline_stage", ann_val=[0, 0, 1])
sch.annotate(k1, ann_key="software_pipeline_order", ann_val=[0, 1, 2])
seq = tvm.transform.Sequential(
[
tvm.tir.transform.PlanAndUpdateBufferAllocationLocation(),
tvm.tir.transform.ConvertBlocksToOpaque(),
tvm.tir.transform.UnifyThreadBinding(),
tvm.tir.transform.LowerMatchBuffer(),
tvm.tir.transform.InjectSoftwarePipeline(),
]
)
mod = seq(sch.mod)
pipeline = mod["main"].body.block.body.body.body.body.body.block.body[1].block.body
prologue, body, epilogue = pipeline
commit_queue_scope = prologue.block.body.body[0].block.body
assert len(commit_queue_scope.body) == 2
assert commit_queue_scope.value == 0
assert prologue.block.body.body[1].block.body.body.attr_key == "async_wait_inflight_count"
assert prologue.block.body.body[1].block.body.body.value == 2
commit_queue_scope = body.block.body.body[0].block.body
assert len(commit_queue_scope.body) == 2
assert commit_queue_scope.value == 0
assert body.block.body.body[1].block.body.body.attr_key == "async_wait_inflight_count"
assert body.block.body.body[1].block.body.body.value == 2
assert str(epilogue.block.body.body[0].block.body.body.value) == "(1 - i2_0_0: int32)"
build_and_run(sch)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_inject_virtual_thread.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
from tvm.script import tir as T
vthread_name = tvm.testing.parameter("vthread", "cthread")
def test_vthread(vthread_name):
dtype = "int64"
n = 100
m = 4
nthread = 2
def get_vthread(name):
tx = te.thread_axis(name)
ty = te.thread_axis(name)
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
C = ib.pointer("float32", name="C")
with ib.for_range(0, n) as i:
ib.scope_attr(tx, "virtual_thread", nthread)
ib.scope_attr(ty, "virtual_thread", nthread)
B = ib.allocate("float32", m, name="B", scope="shared")
B[i] = A[i * nthread + tx]
bbuffer = B.asobject()
ib.emit(
tvm.tir.call_extern(
"int32",
"Run",
bbuffer.access_ptr("r"),
tvm.tir.call_intrin("int32", "tir.tvm_context_id"),
)
)
C[i * nthread + tx] = B[i] + 1
return ib.get()
if vthread_name == "vthread":
B_expected_alloc = m * nthread
elif vthread_name == "cthread":
B_expected_alloc = m * nthread * nthread
stmt = tvm.tir.transform.InjectVirtualThread()(
tvm.IRModule.from_expr(tvm.tir.PrimFunc([], get_vthread(vthread_name)))
)["main"]
assert list(stmt.body.body.extents) == [B_expected_alloc]
def test_vthread_extern(vthread_name):
dtype = "int64"
n = 100
m = 4
nthread = 2
def get_vthread(name):
tx = te.thread_axis(name)
ty = te.thread_axis(name)
ib = tvm.tir.ir_builder.create()
with ib.for_range(0, n) as i:
ib.scope_attr(tx, "virtual_thread", nthread)
ib.scope_attr(ty, "virtual_thread", nthread)
A = ib.allocate("float32", m, name="A", scope="shared")
B = ib.allocate("float32", m, name="B", scope="shared")
C = ib.allocate("float32", m, name="C", scope="shared")
abuffer = A.asobject()
bbuffer = B.asobject()
cbuffer = C.asobject()
A[tx] = tx + 1.0
B[ty] = ty + 1.0
ib.emit(
tvm.tir.call_extern(
"int32",
"Run",
abuffer.access_ptr("r"),
bbuffer.access_ptr("r"),
cbuffer.access_ptr("rw"),
)
)
return ib.get()
if vthread_name == "vthread":
A_expected_alloc = m * nthread
elif vthread_name == "cthread":
A_expected_alloc = m * nthread * nthread
C_expected_alloc = m * nthread * nthread
stmt = tvm.tir.transform.InjectVirtualThread()(
tvm.IRModule.from_expr(tvm.tir.PrimFunc([], get_vthread(vthread_name)))
)["main"]
assert list(stmt.body.body.extents) == [A_expected_alloc]
assert list(stmt.body.body.body.body.extents) == [C_expected_alloc]
def test_vthread_if_then_else():
nthread = 2
tx = te.thread_axis("vthread")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 100) as i:
ib.scope_attr(tx, "virtual_thread", nthread)
B = ib.allocate("float32", 128, name="B", scope="shared")
with ib.if_scope(i == 0):
B[i] = A[i * nthread + tx]
with ib.else_scope():
B[i] = A[i * nthread + tx] + 1
with ib.if_scope(i == 0):
B[i] = A[i * nthread + tx] + 2
stmt = ib.get()
stmt = tvm.tir.transform.InjectVirtualThread()(
tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
)["main"]
assert stmt.body.body.body[0].else_case != None
assert stmt.body.body.body[1].else_case == None
def test_vthread_simplified():
"""Indices resulting from vthread injection should simplified
This ensures that downstream passes that check for Ramp nodes do
not need to each simplify the indices.
"""
@T.prim_func
def before_func():
vthread = T.env_thread("vthread")
T.launch_thread(vthread, 4)
B_data = T.allocate([4], "int32", scope="shared")
B = T.buffer_decl([4], "int32", data=B_data, scope="shared")
B[0:4] = T.broadcast(vthread, 4)
@T.prim_func
def expected_func():
B_data = T.allocate([16], "int32", scope="shared")
B = T.buffer_decl([16], "int32", data=B_data, scope="shared")
# The indices for B should each be a single Ramp node, and
# should not be the sum of a Ramp and Broadcast node.
B[T.Mul(0, 4) : T.Mul(0, 4) + 4] = T.broadcast(0, 4)
B[T.Mul(1, 4) : T.Mul(1, 4) + 4] = T.broadcast(1, 4)
B[T.Mul(2, 4) : T.Mul(2, 4) + 4] = T.broadcast(2, 4)
B[T.Mul(3, 4) : T.Mul(3, 4) + 4] = T.broadcast(3, 4)
before_mod = tvm.IRModule.from_expr(before_func)
after_mod = tvm.tir.transform.InjectVirtualThread()(before_mod)
after_func = after_mod["main"]
tvm.ir.assert_structural_equal(after_func, expected_func)
def test_vthread_vectorized():
"""Use of vthread is compatible with vector allocations"""
@T.prim_func
def before_func():
vthread = T.env_thread("vthread")
T.launch_thread(vthread, 4)
B_data = T.allocate([4], "int32", "shared")
B = T.buffer_decl([4], "int32", data=B_data, scope="shared")
B[0:4] = T.broadcast(vthread, 4)
@T.prim_func
def expected_func():
B_data = T.allocate([4], "int32x4", "shared")
B = T.buffer_decl([4], "int32x4", data=B_data, scope="shared")
B[T.Mul(0, 4) / 4] = T.broadcast(0, 4)
B[T.Mul(1, 4) / 4] = T.broadcast(1, 4)
B[T.Mul(2, 4) / 4] = T.broadcast(2, 4)
B[T.Mul(3, 4) / 4] = T.broadcast(3, 4)
before_mod = tvm.IRModule.from_expr(before_func)
intermediate_mod = tvm.tir.transform.InjectVirtualThread()(before_mod)
after_mod = tvm.tir.transform.StorageRewrite()(intermediate_mod)
after_func = after_mod["main"]
tvm.ir.assert_structural_equal(after_func, expected_func)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_instrument_bound_checkers.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
import tvm.testing
from tvm import te
import numpy as np
def collect_visit(stmt, f):
ret = []
tvm.tir.stmt_functor.post_order_visit(stmt, lambda x: ret.append(f(x)))
return ret
@tvm.testing.requires_llvm
@pytest.mark.xfail
def test_out_of_bounds_llvm(index_a, index_b):
n = te.size_var("n")
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
C = te.compute(A.shape, lambda i: A[i + index_a] + B[i + index_b], name="C")
s = te.create_schedule(C.op)
tgt = "llvm"
tgt_host = "llvm"
stmt = tvm.lower(s, [A, B, C], simple_mode=True)
print(stmt)
tgt = tvm.target.Target(tgt, tgt_host)
fadd = tvm.build(s, [A, B, C], target=tgt, name="myadd")
dev = tvm.device(tgt.kind.name, 0)
a = tvm.nd.array(np.random.uniform(size=1024).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=1024).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros(1024, dtype=C.dtype), dev)
fadd(a, b, c)
@tvm.testing.requires_llvm
def test_in_bounds_llvm():
n = te.size_var("n")
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
C = te.compute(A.shape, lambda i: A[i] + B[i], name="C")
s = te.create_schedule(C.op)
tgt = "llvm"
tgt_host = "llvm"
stmt = tvm.lower(s, [A, B, C], simple_mode=True)
tgt = tvm.target.Target(tgt, tgt_host)
fadd = tvm.build(s, [A, B, C], target=tgt, name="myadd")
dev = tvm.device(tgt.kind.name, 0)
a = tvm.nd.array(np.random.uniform(size=1024).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=1024).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros(1024, dtype=C.dtype), dev)
fadd(a, b, c)
@tvm.testing.requires_llvm
@pytest.mark.xfail
def test_out_of_bounds_vectorize_llvm(nn, index_a, index_b):
n = tvm.runtime.convert(nn)
a = te.placeholder((n), name="a")
b = te.placeholder((n), name="b")
c = te.compute((n,), lambda i: a[i + index_a] + b[i + index_b], name="c")
s = te.create_schedule(c.op)
xo, xi = s[c].split(c.op.axis[0], factor=8)
s[c].parallel(xo)
s[c].vectorize(xi)
tgt = "llvm"
tgt_host = "llvm"
stmt = tvm.lower(s, [a, b, c], simple_mode=True)
tgt = tvm.target.Target(tgt, tgt_host)
f = tvm.build(s, [a, b, c], target=tgt, name="myaddvec")
dev = tvm.cpu(0)
n = nn
a = tvm.nd.array(np.random.uniform(size=(n)).astype(a.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=(n)).astype(a.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=c.dtype), dev)
f(a, b, c)
@tvm.testing.requires_llvm
def test_in_bounds_vectorize_llvm():
n = 512
lanes = 2
A = te.placeholder((n,), name="A", dtype="float32x%d" % lanes)
B = te.compute((n,), lambda i: A[i], name="B")
C = te.compute((n,), lambda i: B[i] + tvm.tir.const(1, A.dtype), name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], nparts=2)
_, xi = s[C].split(xi, factor=2)
s[C].parallel(xo)
s[C].vectorize(xi)
s[B].compute_at(s[C], xo)
xo, xi = s[B].split(B.op.axis[0], factor=2)
s[B].vectorize(xi)
# build and invoke the kernel.
lowered_func = tvm.lower(s, [A, C], "llvm", simple_mode=False)
f = tvm.build(s, [A, C], "llvm")
dev = tvm.cpu(0)
# launch the kernel.
a = tvm.nd.empty((n,), A.dtype).copyfrom(
np.random.uniform(size=[n] + ([] if lanes == 1 else [lanes]))
)
c = tvm.nd.empty((n,), C.dtype, dev)
f(a, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1)
@tvm.testing.requires_llvm
def test_in_bounds_loop_partition_basic_llvm():
n = te.size_var("n")
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
T = te.compute((n,), lambda i: A[i] + B[i])
s = te.create_schedule(T.op)
xo, xi = s[T].split(T.op.axis[0], factor=4)
lowered_func = tvm.lower(s, [A, B, T], "llvm", simple_mode=False)
dev = tvm.cpu(0)
f = tvm.build(s, [A, B, T], "llvm")
a = tvm.nd.array(np.random.uniform(size=(32,)).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=(32,)).astype(B.dtype), dev)
t = tvm.nd.empty((32,), T.dtype, dev)
f(a, b, t)
@tvm.testing.requires_llvm
@pytest.mark.xfail
def test_out_of_bounds_loop_partition_basic_llvm(index_a, index_b):
n = te.size_var("n")
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
T = te.compute((n,), lambda i: A[i + index_a] + B[i + index_b])
s = te.create_schedule(T.op)
xo, xi = s[T].split(T.op.axis[0], factor=4)
lowered_func = tvm.lower(s, [A, B, T], "llvm", simple_mode=False)
dev = tvm.cpu(0)
f = tvm.build(s, [A, B, T], "llvm")
a = tvm.nd.array(np.random.uniform(size=(32,)).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=(32,)).astype(B.dtype), dev)
t = tvm.nd.empty((32,), T.dtype, dev)
f(a, b, t)
def test_in_bounds_const_loop_partition_ir():
def check_attr_stmt(x):
if (
isinstance(x, tvm.tir.AttrStmt)
and x.attr_key == "buffer_bound"
and tvm.ir.structural_equal(x.value.args, [n])
):
return True
return False
def check_branch_stmt(x):
if isinstance(x, tvm.tir.IfThenElse):
return True
return False
def assert_bound_instrumentation(stmt, f, nums):
count = 0
for i in collect_visit(stmt, f):
if i is True:
count = count + 1
assert count == nums
def collect_branch_stmt(x):
if isinstance(x, tvm.tir.IfThenElse):
branch_collector.append(x)
n = 21
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
T = te.compute((n,), lambda i: A[i] + B[i])
s = te.create_schedule(T.op)
xo, xi = s[T].split(T.op.axis[0], factor=4)
with tvm.transform.PassContext(
config={
"tir.instrument_bound_checkers": True,
"tir.LoopPartition": {"partition_const_loop": True},
}
):
mod = tvm.driver.lower(s, [A, B, T], name="main")
stmt = mod["main"].body
# after instrumentation
assert_bound_instrumentation(stmt, check_attr_stmt, 2 * 3)
assert_bound_instrumentation(stmt, check_branch_stmt, 2)
branch_collector = list()
collect_visit(stmt, collect_branch_stmt)
assert len(branch_collector) == 2
@tvm.testing.requires_llvm
def test_in_bounds_const_loop_partition_llvm():
with tvm.transform.PassContext(
config={
"tir.instrument_bound_checkers": True,
"tir.LoopPartition": {"partition_const_loop": True},
}
):
n = 21
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
T = te.compute((n,), lambda i: A[i] + B[i])
s = te.create_schedule(T.op)
xo, xi = s[T].split(T.op.axis[0], factor=4)
lowered_func = tvm.lower(s, [A, B, T], "llvm", simple_mode=False)
dev = tvm.cpu(0)
f = tvm.build(s, [A, B, T], "llvm")
a = tvm.nd.array(np.random.uniform(size=(n,)).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=(n,)).astype(B.dtype), dev)
t = tvm.nd.empty((n,), T.dtype, dev)
f(a, b, t)
@tvm.testing.requires_llvm
@pytest.mark.xfail
def test_out_of_bounds_const_loop_partition_llvm(index_a, index_b):
with tvm.transform.PassContext(
config={
"tir.instrument_bound_checkers": True,
"tir.LoopPartition": {"partition_const_loop": True},
}
):
n = 21
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
T = te.compute((n,), lambda i: A[i + index_a] + B[i + index_b])
s = te.create_schedule(T.op)
xo, xi = s[T].split(T.op.axis[0], factor=4)
lowered_func = tvm.lower(s, [A, B, T], "llvm", simple_mode=False)
dev = tvm.cpu(0)
f = tvm.build(s, [A, B, T], "llvm")
a = tvm.nd.array(np.random.uniform(size=(n,)).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=(n,)).astype(B.dtype), dev)
t = tvm.nd.empty((n,), T.dtype, dev)
f(a, b, t)
@tvm.testing.requires_llvm
def test_in_bounds_conv_llvm(loop_tiling=False):
HSTR = WSTR = 1
in_channel = 128
kernel_height = kernel_width = 3
out_channel = 64
batch_size = 1
in_height = in_width = 64
out_height = out_width = in_height - kernel_height + 1
data = te.placeholder((batch_size, in_channel, in_height, in_width), name="data")
kernel = te.placeholder((kernel_height, kernel_width, in_channel, out_channel), name="kernel")
ic = te.reduce_axis((0, in_channel), name="ic")
kh = te.reduce_axis((0, kernel_height), name="kh")
kw = te.reduce_axis((0, kernel_width), name="kw")
conv = te.compute(
(batch_size, out_channel, out_height, out_width),
lambda n, oc, oh, ow: te.sum(
data[n, ic, oh * HSTR + kh, ow * WSTR + kw] * kernel[kh, kw, ic, oc], axis=[ic, kh, kw]
),
name="conv2d",
)
s = te.create_schedule(conv.op)
n, oc, oh, ow = conv.op.axis
if loop_tiling:
oho, owo, ohi, owi = s[conv].tile(oh, ow, 16, 16)
lowered_func = tvm.lower(s, [data, kernel, conv], simple_mode=True)
dev = tvm.cpu(0)
f = tvm.build(s, [data, kernel, conv], "llvm")
data_input = tvm.nd.array(
np.random.uniform(size=(batch_size, in_channel, in_height, in_width)).astype("float32"), dev
)
kernel_input = tvm.nd.array(
np.random.uniform(size=(kernel_height, kernel_width, in_channel, out_channel)).astype(
"float32"
),
dev,
)
conv_out = tvm.nd.empty((batch_size, out_channel, out_height, out_width), "float32", dev)
f(data_input, kernel_input, conv_out)
@tvm.testing.requires_llvm
@pytest.mark.xfail
def test_out_of_bounds_conv_llvm(data_offsets, kernel_offsets, loop_tiling=False):
HSTR = WSTR = 1
in_channel = 128
kernel_height = kernel_width = 3
out_channel = 64
batch_size = 1
in_height = in_width = 64
out_height = out_width = in_height - kernel_height + 1
data = te.placeholder((batch_size, in_channel, in_height, in_width), name="data")
kernel = te.placeholder((kernel_height, kernel_width, in_channel, out_channel), name="kernel")
ic = te.reduce_axis((0, in_channel), name="ic")
kh = te.reduce_axis((0, kernel_height), name="kh")
kw = te.reduce_axis((0, kernel_width), name="kw")
conv = te.compute(
(batch_size, out_channel, out_height, out_width),
lambda n, oc, oh, ow: te.sum(
data[
n + data_offsets[0],
ic + data_offsets[1],
oh * HSTR + kh + data_offsets[2],
ow * WSTR + kw + data_offsets[3],
]
* kernel[
kh + kernel_offsets[0],
kw + kernel_offsets[1],
ic + kernel_offsets[2],
oc + kernel_offsets[3],
],
axis=[ic, kh, kw],
),
name="conv2d",
)
s = te.create_schedule(conv.op)
n, oc, oh, ow = conv.op.axis
if loop_tiling:
oho, owo, ohi, owi = s[conv].tile(oh, ow, 16, 16)
lowered_func = tvm.lower(s, [data, kernel, conv], simple_mode=True)
dev = tvm.cpu(0)
f = tvm.build(s, [data, kernel, conv], "llvm")
data_input = tvm.nd.array(
np.random.uniform(size=(batch_size, in_channel, in_height, in_width)).astype("float32"), dev
)
kernel_input = tvm.nd.array(
np.random.uniform(size=(kernel_height, kernel_width, in_channel, out_channel)).astype(
"float32"
),
dev,
)
conv_out = tvm.nd.empty((batch_size, out_channel, out_height, out_width), "float32", dev)
f(data_input, kernel_input, conv_out)
@tvm.testing.requires_llvm
def test_in_bounds_tensors_with_same_shapes1D_llvm():
n = te.size_var("n")
k = te.size_var("k")
m = te.size_var("m")
A = te.placeholder((n,), name="A")
B = te.placeholder((k,), name="B")
T = te.compute((m,), lambda i: A[i] * B[i])
s = te.create_schedule(T.op)
lowered_func = tvm.lower(s, [A, B, T], "llvm", simple_mode=False)
dev = tvm.cpu(0)
f = tvm.build(s, [A, B, T], "llvm")
a = tvm.nd.array(np.random.uniform(size=(32,)).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=(32,)).astype(B.dtype), dev)
t = tvm.nd.empty((32,), T.dtype, dev)
f(a, b, t)
@tvm.testing.requires_llvm
@pytest.mark.xfail
def test_out_of_bounds_tensors_with_diff_shapes1D_llvm(a_shape, b_shape, c_shape):
n = te.size_var("n")
k = te.size_var("k")
m = te.size_var("m")
A = te.placeholder((n,), name="A")
B = te.placeholder((k,), name="B")
T = te.compute((m,), lambda i: A[i] * B[i])
s = te.create_schedule(T.op)
lowered_func = tvm.lower(s, [A, B, T], "llvm", simple_mode=False)
dev = tvm.cpu(0)
f = tvm.build(s, [A, B, T], "llvm")
a = tvm.nd.array(np.random.uniform(size=(a_shape,)).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=(b_shape,)).astype(B.dtype), dev)
t = tvm.nd.empty((c_shape,), T.dtype, dev)
f(a, b, t)
@tvm.testing.requires_llvm
def test_in_bounds_tensors_with_same_shapes2D_llvm():
n = te.size_var("n")
k = te.size_var("k")
m = te.size_var("m")
A = te.placeholder((n, n), name="A")
B = te.placeholder((k, k), name="B")
T = te.compute((m, m), lambda i, j: A[i][j] * B[i][j])
s = te.create_schedule(T.op)
lowered_func = tvm.lower(s, [A, B, T], "llvm", simple_mode=False)
dev = tvm.cpu(0)
f = tvm.build(s, [A, B, T], "llvm")
a = tvm.nd.array(np.random.uniform(size=(32, 32)).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=(32, 32)).astype(B.dtype), dev)
t = tvm.nd.empty((32, 32), T.dtype, dev)
f(a, b, t)
@tvm.testing.requires_llvm
@pytest.mark.xfail
def test_out_of_bounds_tensors_with_diff_shapes2D_llvm(a_shape, b_shape, c_shape):
n = te.size_var("n")
k = te.size_var("k")
m = te.size_var("m")
A = te.placeholder((n, n), name="A")
B = te.placeholder((k, k), name="B")
T = te.compute((m, m), lambda i, j: A[i][j] * B[i][j])
s = te.create_schedule(T.op)
lowered_func = tvm.lower(s, [A, B, T], "llvm", simple_mode=False)
dev = tvm.cpu(0)
f = tvm.build(s, [A, B, T], "llvm")
a = tvm.nd.array(np.random.uniform(size=(a_shape[0], a_shape[1])).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=(b_shape[0], b_shape[1])).astype(B.dtype), dev)
t = tvm.nd.empty((c_shape[0], c_shape[1]), T.dtype, dev)
f(a, b, t)
@tvm.testing.requires_llvm
def test_in_bounds_tensors_with_same_shapes3D_llvm():
n = te.size_var("n")
k = te.size_var("k")
m = te.size_var("m")
A = te.placeholder((n, n, n), name="A")
B = te.placeholder((k, k, k), name="B")
T = te.compute((m, m, m), lambda i, j, p: A[i][j][p] * B[i][j][p])
s = te.create_schedule(T.op)
lowered_func = tvm.lower(s, [A, B, T], "llvm", simple_mode=False)
dev = tvm.cpu(0)
f = tvm.build(s, [A, B, T], "llvm")
a = tvm.nd.array(np.random.uniform(size=(32, 32, 32)).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=(32, 32, 32)).astype(B.dtype), dev)
t = tvm.nd.empty((32, 32, 32), T.dtype, dev)
f(a, b, t)
@tvm.testing.requires_llvm
@pytest.mark.xfail
def test_out_of_bounds_tensors_with_diff_shapes3D_llvm(a_shape, b_shape, c_shape):
n = te.size_var("n")
k = te.size_var("k")
m = te.size_var("m")
A = te.placeholder((n, n, n), name="A")
B = te.placeholder((k, k, k), name="B")
T = te.compute((m, m, m), lambda i, j, p: A[i][j][p] * B[i][j][p])
s = te.create_schedule(T.op)
lowered_func = tvm.lower(s, [A, B, T], "llvm", simple_mode=False)
dev = tvm.cpu(0)
f = tvm.build(s, [A, B, T], "llvm")
a = tvm.nd.array(
np.random.uniform(size=(a_shape[0], a_shape[1], c_shape[2])).astype(A.dtype), dev
)
b = tvm.nd.array(
np.random.uniform(size=(b_shape[0], b_shape[1], b_shape[2])).astype(B.dtype), dev
)
t = tvm.nd.empty((c_shape[0], c_shape[1], c_shape[2]), T.dtype, dev)
f(a, b, t)
@tvm.testing.requires_llvm
@pytest.mark.xfail
def test_out_of_bounds_tensors_with_zero_shape_op_with_not_zero_shape_llvm():
n = 64
A = te.placeholder((n,), name="A")
scale = te.placeholder((), name="scale")
k = te.reduce_axis((0, n), name="k")
C = te.compute((), lambda: te.sum(A[k + k + k] * scale, axis=k), name="C")
D = te.compute((), lambda: C + 1)
s = te.create_schedule(D.op)
stmt = tvm.lower(s, [A, scale, D], simple_mode=True)
# build and invoke the kernel.
f = tvm.build(s, [A, scale, D], "llvm")
dev = tvm.cpu(0)
# launch the kernel.
a = tvm.nd.array(np.random.randint(0, 2, size=(n,)).astype(A.dtype), dev)
sc = tvm.nd.array(np.random.randint(0, 2, size=()).astype(scale.dtype), dev)
d = tvm.nd.empty((), D.dtype, dev)
f(a, sc, d)
d_np = np.sum(a.numpy()) * sc.numpy() + 1
tvm.testing.assert_allclose(d.numpy(), d_np)
if __name__ == "__main__":
with tvm.transform.PassContext(
config={
"tir.instrument_bound_checkers": True,
}
):
# zero scale
test_out_of_bounds_tensors_with_zero_shape_op_with_not_zero_shape_llvm()
# in bound
test_in_bounds_llvm()
# upper bound
test_out_of_bounds_llvm(1, 0)
test_out_of_bounds_llvm(0, 1)
test_out_of_bounds_llvm(1, 1)
test_out_of_bounds_llvm(10000, 0)
test_out_of_bounds_llvm(0, 10000)
test_out_of_bounds_llvm(10000, 10000)
# lower bound
test_out_of_bounds_llvm(-1, 0)
test_out_of_bounds_llvm(0, -1)
test_out_of_bounds_llvm(-1, -1)
test_out_of_bounds_llvm(-10000, 0)
test_out_of_bounds_llvm(0, -10000)
test_out_of_bounds_llvm(-10000, -10000)
# vectorize in bound
test_in_bounds_vectorize_llvm()
# vectorization upper bound
test_out_of_bounds_vectorize_llvm(1024, 1000, 0)
test_out_of_bounds_vectorize_llvm(1024, 0, 10000)
# vectorization lower bound
test_out_of_bounds_vectorize_llvm(1024, -1000, 0)
test_out_of_bounds_vectorize_llvm(1024, 0, -10000)
test_in_bounds_const_loop_partition_llvm()
test_out_of_bounds_const_loop_partition_llvm(1, 0)
test_out_of_bounds_const_loop_partition_llvm(0, 1)
test_out_of_bounds_const_loop_partition_llvm(-1, 0)
test_out_of_bounds_const_loop_partition_llvm(0, -1)
test_in_bounds_loop_partition_basic_llvm()
test_out_of_bounds_loop_partition_basic_llvm(32, 0)
test_out_of_bounds_loop_partition_basic_llvm(0, 32)
test_out_of_bounds_loop_partition_basic_llvm(-32, 0)
test_out_of_bounds_loop_partition_basic_llvm(0, -32)
# conv
test_in_bounds_conv_llvm()
test_out_of_bounds_conv_llvm([1, 0, 0, 0], [0, 0, 0, 0])
test_out_of_bounds_conv_llvm([0, 1, 0, 0], [0, 0, 0, 0])
test_out_of_bounds_conv_llvm([0, 0, 1, 0], [0, 0, 0, 0])
test_out_of_bounds_conv_llvm([0, 0, 0, 1], [0, 0, 0, 0])
test_out_of_bounds_conv_llvm([-1, 0, 0, 0], [0, 0, 0, 0])
test_out_of_bounds_conv_llvm([0, -1, 0, 0], [0, 0, 0, 0])
test_out_of_bounds_conv_llvm([0, 0, -1, 0], [0, 0, 0, 0])
test_out_of_bounds_conv_llvm([0, 0, 0, -1], [0, 0, 0, 0])
test_out_of_bounds_conv_llvm([0, 0, 0, 0], [1, 0, 0, 0])
test_out_of_bounds_conv_llvm([0, 0, 0, 0], [0, 1, 0, 0])
test_out_of_bounds_conv_llvm([0, 0, 0, 0], [0, 0, 1, 0])
test_out_of_bounds_conv_llvm([0, 0, 0, 0], [0, 0, 0, 1])
test_out_of_bounds_conv_llvm([0, 0, 0, 0], [-1, 0, 0, 0])
test_out_of_bounds_conv_llvm([0, 0, 0, 0], [0, -1, 0, 0])
test_out_of_bounds_conv_llvm([0, 0, 0, 0], [0, 0, -1, 0])
test_out_of_bounds_conv_llvm([0, 0, 0, 0], [0, 0, 0, -1])
# loop tiling
test_in_bounds_conv_llvm(True)
test_out_of_bounds_conv_llvm([1, 0, 0, 0], [0, 0, 0, 0], True)
test_out_of_bounds_conv_llvm([0, 1, 0, 0], [0, 0, 0, 0], True)
test_out_of_bounds_conv_llvm([0, 0, 1, 0], [0, 0, 0, 0], True)
test_out_of_bounds_conv_llvm([0, 0, 0, 1], [0, 0, 0, 0], True)
test_out_of_bounds_conv_llvm([-1, 0, 0, 0], [0, 0, 0, 0], True)
test_out_of_bounds_conv_llvm([0, -1, 0, 0], [0, 0, 0, 0], True)
test_out_of_bounds_conv_llvm([0, 0, -1, 0], [0, 0, 0, 0], True)
test_out_of_bounds_conv_llvm([0, 0, 0, -1], [0, 0, 0, 0], True)
test_out_of_bounds_conv_llvm([0, 0, 0, 0], [1, 0, 0, 0], True)
test_out_of_bounds_conv_llvm([0, 0, 0, 0], [0, 1, 0, 0], True)
test_out_of_bounds_conv_llvm([0, 0, 0, 0], [0, 0, 1, 0], True)
test_out_of_bounds_conv_llvm([0, 0, 0, 0], [0, 0, 0, 1], True)
test_out_of_bounds_conv_llvm([0, 0, 0, 0], [-1, 0, 0, 0], True)
test_out_of_bounds_conv_llvm([0, 0, 0, 0], [0, -1, 0, 0], True)
test_out_of_bounds_conv_llvm([0, 0, 0, 0], [0, 0, -1, 0], True)
test_out_of_bounds_conv_llvm([0, 0, 0, 0], [0, 0, 0, -1], True)
# tensors with diff shapes basic operation such as mul
test_out_of_bounds_tensors_with_diff_shapes1D_llvm(32, 64, 64)
test_out_of_bounds_tensors_with_diff_shapes1D_llvm(64, 32, 64)
test_out_of_bounds_tensors_with_diff_shapes2D_llvm([64, 64], [32, 32], [64, 64])
test_out_of_bounds_tensors_with_diff_shapes2D_llvm([32, 32], [64, 64], [64, 64])
test_out_of_bounds_tensors_with_diff_shapes3D_llvm([64, 64, 64], [32, 32, 32], [64, 64, 64])
test_out_of_bounds_tensors_with_diff_shapes3D_llvm([32, 32, 32], [64, 64, 64], [64, 64, 64])
# check tensors with the same shapes
test_in_bounds_tensors_with_same_shapes1D_llvm()
test_in_bounds_tensors_with_same_shapes2D_llvm()
test_in_bounds_tensors_with_same_shapes3D_llvm()
# ir tests
test_in_bounds_const_loop_partition_ir()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_ir_utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import tir, ir
def test_convert_ssa():
dtype = "int32"
zero = tir.const(0)
nop = tir.Evaluate(zero)
var_type = ir.PointerType(ir.PrimType(dtype))
v = tir.Var("i1", var_type)
buf = tir.decl_buffer([16], dtype=dtype, data=v)
let = tir.LetStmt(v, v, nop)
load = tir.Evaluate(tir.BufferLoad(buf, [zero]))
seq = tir.SeqStmt([let, let, load])
func = tir.PrimFunc([], seq)
mod = tvm.IRModule({"main": func})
mod = tir.transform.InjectVirtualThread()(
mod
) # Use pass InjectVirtualThread to invoke ConvertSSA
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_lift_attr_scope.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def test_coproc_lift():
ib = tvm.tir.ir_builder.create()
n = te.var("n")
cp = te.thread_axis((0, 1), "cop")
value = tvm.tir.StringImm("xxx")
A = ib.allocate("float32", n, name="A", scope="global")
with ib.for_range(0, n, name="i") as i:
with ib.for_range(0, 10, name="j") as j:
ib.scope_attr(cp, "coproc_uop_scope", value)
A[i] = A[i] + 1
with ib.if_scope(i.equal(0)):
with ib.for_range(0, 10, name="j") as j:
ib.scope_attr(cp, "coproc_uop_scope", value)
A[j] = A[j] + 2
A[j] = A[j] + 3
A[j] = A[j] + 3
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))
body = tvm.tir.transform.LiftAttrScope("coproc_uop_scope")(mod)["main"]
assert body.body.body.node == cp
# only able to lift to the common pattern of the last two fors.
ib = tvm.tir.ir_builder.create()
A = ib.allocate("float32", n, name="A", scope="global")
with ib.for_range(0, n, name="i") as i:
with ib.for_range(0, 10, name="j") as j:
A[j] = A[j] + 1
with ib.for_range(0, 10, name="j") as j:
ib.scope_attr(cp, "coproc_uop_scope", value)
A[i] = A[i] + 1
with ib.for_range(0, 10, name="j") as j:
ib.scope_attr(cp, "coproc_uop_scope", value)
A[i] = A[i] + 2
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))
body = tvm.tir.transform.LiftAttrScope("coproc_uop_scope")(mod)["main"]
assert body.body.body.body[1].node == cp
assert len(body.body.body.body) == 2
if __name__ == "__main__":
test_coproc_lift()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_loop_partition.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
from tvm.ir.module import IRModule
from tvm.script import tir as T
import numpy
def collect_visit(stmt, f):
ret = []
tvm.tir.stmt_functor.post_order_visit(stmt, lambda x: ret.append(f(x)))
return ret
def test_basic():
n = te.size_var("n")
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
T = te.compute((n,), lambda i: A[i] + B[i])
s = te.create_schedule(T.op)
xo, xi = s[T].split(T.op.axis[0], factor=4)
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], stmt))
mod = tvm.tir.transform.LoopPartition()(mod)
stmt = tvm.tir.transform.Simplify()(mod)["main"]
assert not any(collect_visit(stmt.body.body[0], lambda x: isinstance(x, tvm.tir.IfThenElse)))
assert any(collect_visit(stmt.body.body[1], lambda x: isinstance(x, tvm.tir.IfThenElse)))
def test_const_loop():
n = 21
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
T = te.compute((n,), lambda i: A[i] + B[i])
s = te.create_schedule(T.op)
xo, xi = s[T].split(T.op.axis[0], factor=4)
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
with tvm.transform.PassContext(config={"tir.LoopPartition": {"partition_const_loop": True}}):
mod = tvm.tir.transform.LoopPartition()(mod)
stmt = tvm.tir.transform.Simplify()(mod)["main"].body
assert not any(collect_visit(stmt, lambda x: isinstance(x, tvm.tir.IfThenElse)))
def test_no_unroll_loop():
n = 21
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
T = te.compute((n,), lambda i: A[i] + B[i])
s = te.create_schedule(T.op)
xo, xi = s[T].split(T.op.axis[0], factor=4)
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
with tvm.transform.PassContext(
config={
"tir.LoopPartition": {
"partition_const_loop": True,
"no_unroll_loop_with_extent_one": True,
}
}
):
mod = tvm.tir.transform.LoopPartition()(mod)
mod = tvm.tir.transform.Simplify()(mod)
stmt = tvm.tir.transform.RemoveNoOp()(mod)["main"].body
assert sum(collect_visit(stmt, lambda x: isinstance(x, tvm.tir.For))) == 4
def test_multi_loop():
ib = tvm.tir.ir_builder.create()
m = te.size_var("m")
n = te.size_var("n")
with ib.for_range(0, 4, "i") as i:
with ib.for_range(0, n, "j") as j:
with ib.for_range(0, m, "k") as k:
with ib.if_scope(ib.likely(i * m + j + k < n)):
ib.emit(tvm.tir.Evaluate(m))
with ib.else_scope():
ib.emit(tvm.tir.Evaluate(n))
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n, m], stmt))
mod = tvm.tir.transform.LoopPartition()(mod)
stmt = tvm.tir.transform.Simplify()(mod)["main"].body
assert not any(collect_visit(stmt.body[0], lambda x: isinstance(x, tvm.tir.IfThenElse)))
def test_multi_if():
ib = tvm.tir.ir_builder.create()
m = te.size_var("m")
n = te.size_var("n")
with ib.for_range(0, 4, "i") as i:
with ib.for_range(0, n, "j") as j:
with ib.for_range(0, m, "k") as k:
with ib.if_scope(ib.likely(i * m + j + k < n)):
ib.emit(tvm.tir.Evaluate(m))
with ib.else_scope():
ib.emit(tvm.tir.Evaluate(n))
with ib.if_scope(ib.likely(i * m + j - k < n)):
ib.emit(tvm.tir.Evaluate(m))
with ib.else_scope():
ib.emit(tvm.tir.Evaluate(n))
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
mod = tvm.tir.transform.LoopPartition()(mod)
stmt = tvm.tir.transform.Simplify()(mod)["main"].body
assert not any(collect_visit(stmt.body[0], lambda x: isinstance(x, tvm.tir.IfThenElse)))
def test_thread_axis():
m = te.size_var("m")
l = te.size_var("l")
A = te.placeholder((m, l), name="A")
B = te.compute((m, l), lambda i, j: A[i, j] + 3, name="B")
s = te.create_schedule(B.op)
s[B].set_scope("shared")
num_thread = 16
xo, xi = s[B].split(B.op.axis[0], 32)
xi0, xi1 = s[B].split(xi, nparts=num_thread)
s[B].bind(xi0, te.thread_axis("threadIdx.x"))
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
mod = tvm.tir.transform.LoopPartition()(mod)
stmt = tvm.tir.transform.Simplify()(mod)["main"]
assert not any(collect_visit(stmt.body.body[0], lambda x: isinstance(x, tvm.tir.IfThenElse)))
def test_vectorize():
n = te.size_var("n")
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
bias = te.size_var("bias", dtype="float32")
scale = te.size_var("scale", dtype="float32")
C = te.compute(A.shape, lambda *i: A(*i) + B(*i) * scale + bias, name="C")
# schedule
s = te.create_schedule(C.op)
# create iter var and assign them tags.
num_thread = 32
bx, x = s[C].split(C.op.axis[0], factor=num_thread * 4)
tx, x = s[C].split(x, nparts=num_thread)
_, x = s[C].split(x, factor=4)
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
s[C].vectorize(x)
stmt = tvm.lower(s, [A, B], name="main")["main"]
body = stmt.body.body.body.body
assert x.var.name not in str(body.condition)
assert any(collect_visit(body.then_case, lambda x: isinstance(x, tvm.tir.Ramp)))
def test_condition():
ib = tvm.tir.ir_builder.create()
m = te.size_var("m")
n = te.size_var("n")
with ib.for_range(0, tvm.tir.truncdiv(n + 3, 4), "i") as i:
with ib.for_range(0, 4, "j") as j:
ib.emit(tvm.tir.Evaluate(tvm.tir.Select(ib.likely(i * 4 + j < n), m, n)))
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([m, n], stmt))
mod = tvm.tir.transform.LoopPartition()(mod)
stmt = tvm.tir.transform.Simplify()(mod)["main"].body
assert not any(collect_visit(stmt[0], lambda x: isinstance(x, tvm.tir.Select)))
def test_condition_EQ():
ib = tvm.tir.ir_builder.create()
m = te.size_var("m")
n = te.size_var("n")
with ib.for_range(0, 10, "i") as i:
ib.emit(tvm.tir.Evaluate(tvm.tir.Select(ib.likely(tvm.tir.EQ(i, 5)), m, n)))
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([m, n], stmt))
with tvm.transform.PassContext(config={"tir.LoopPartition": {"partition_const_loop": True}}):
mod = tvm.tir.transform.LoopPartition()(mod)
stmt = tvm.tir.transform.Simplify()(mod)["main"].body
assert not any(collect_visit(stmt[0], lambda x: isinstance(x, tvm.tir.Select)))
def test_thread_axis2():
n = tvm.runtime.convert(4096)
m = te.size_var("m")
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
C = te.compute(A.shape, lambda i: A[i] + B[i], name="C")
s = te.create_schedule(C.op)
num_thread = 32
bx, x = s[C].split(C.op.axis[0], factor=32)
tx, x = s[C].split(x, nparts=num_thread)
_, x = s[C].split(x, factor=m)
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
stmt = tvm.lower(s, [A, B], name="main")["main"]
for_body = stmt.body.body.body.body[0]
assert "threadIdx" not in str(for_body.extent)
def test_everything_during_deduction():
m = te.size_var("m")
n = te.size_var("n")
ib = tvm.tir.ir_builder.create()
with ib.for_range(0, n, "i") as i:
with ib.for_range(0, 32, "j") as j:
with ib.if_scope(ib.likely(tvm.tir.truncdiv(i, j) < m)):
# this guard will produce everything during deduction
ib.emit(tvm.tir.Evaluate(m))
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([m, n], stmt))
mod = tvm.tir.transform.LoopPartition()(mod)
stmt = tvm.tir.transform.Simplify()(mod)["main"].body
assert isinstance(stmt.body.body, tvm.tir.IfThenElse)
def test_single_likely():
n = 60
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
T = te.compute((n,), lambda i: A[i] + B[i])
s = te.create_schedule(T.op)
x = T.op.axis[0]
xo, xi = s[T].split(x, factor=16)
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
with tvm.transform.PassContext(config={"tir.LoopPartition": {"partition_const_loop": True}}):
mod = tvm.tir.transform.LoopPartition()(mod)
stmt = tvm.tir.transform.Simplify()(mod)["main"].body
assert not any(collect_visit(stmt, lambda x: isinstance(x, tvm.tir.IfThenElse)))
def test_multi_likely():
n = 94
m = 62
A = te.placeholder((n, m), name="A")
B = te.placeholder((n, m), name="B")
T = te.compute((n, m), lambda i, j: A[i, j] + B[i, j])
s = te.create_schedule(T.op)
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
x, y = T.op.axis
xo, xi = s[T].split(x, factor=16)
yo, yi = s[T].split(y, factor=16)
s[T].reorder(xo, yo, xi, yi)
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
with tvm.transform.PassContext(config={"tir.LoopPartition": {"partition_const_loop": True}}):
mod = tvm.tir.transform.LoopPartition()(mod)
stmt = tvm.tir.transform.Simplify()(mod)["main"].body
assert not any(collect_visit(stmt, lambda x: isinstance(x, tvm.tir.IfThenElse)))
def test_oneD_pool():
m = te.size_var("m")
ib = tvm.tir.ir_builder.create()
# data = te.placeholder((16,), name = 'data')
data = ib.pointer("float32", name="A")
out = ib.pointer("float32", name="A")
with ib.for_range(0, 16, "ow") as ow:
with ib.for_range(0, 3, "kw") as kw:
with ib.if_scope(ib.likely(ow > 0)):
with ib.if_scope(ib.likely(ow < 15)):
out[ow] = tvm.te.max(out[ow], data[ow + kw - 1])
with ib.for_range(0, 16, "ow") as ow:
with ib.for_range(0, 3, "kw") as kw:
with ib.if_scope(ib.likely(ow < 1)):
with ib.if_scope(ib.likely(kw > 0)):
out[ow] = tvm.te.max(out[ow], data[ow + kw - 1])
with ib.for_range(0, 16, "ow") as ow:
with ib.for_range(0, 3, "kw") as kw:
with ib.if_scope(ib.likely(ow > 14)):
with ib.if_scope(ib.likely(kw < 2)):
out[ow] = tvm.te.max(out[ow], data[ow + kw - 1])
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([m, data, out], stmt))
with tvm.transform.PassContext(config={"tir.LoopPartition": {"partition_const_loop": True}}):
mod = tvm.tir.transform.LoopPartition()(mod)
stmt = tvm.tir.transform.Simplify()(mod)["main"].body
assert not any(collect_visit(stmt, lambda x: isinstance(x, tvm.tir.IfThenElse)))
def test_cce_loop_1():
ib = tvm.tir.ir_builder.create()
dtype = "float16"
n = 514
m = 514
_A = te.placeholder((n * m,), name="A")
Ab = tvm.tir.decl_buffer((n * m,), dtype, name="A")
A = ib.buffer_ptr(Ab)
_B = te.placeholder((n * m,), name="B")
Bb = tvm.tir.decl_buffer((n * m,), dtype, name="B")
B = ib.buffer_ptr(Bb)
# for i in 0 to n-1:
with ib.for_range(0, 11, name="i") as i:
with ib.for_range(0, 160, name="j") as j:
with ib.if_scope(ib.likely(((i * 160) + j) < 1600)):
A[(i + 1) * m + j + 1] = (
B[(i) * m + j + 1] + B[(i + 1) * m + j + 1] + B[(i + 2) * m + j + 1]
)
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab, Bb], stmt))
with tvm.transform.PassContext(config={"tir.LoopPartition": {"partition_const_loop": True}}):
mod = tvm.tir.transform.LoopPartition()(mod)
stmt = tvm.tir.transform.Simplify()(mod)["main"].body
assert not any(collect_visit(stmt, lambda x: isinstance(x, tvm.tir.IfThenElse)))
def test_cce_loop_2():
ib = tvm.tir.ir_builder.create()
len = 112
tile = 32
loop = (len + tile - 1) // tile
with ib.for_range(0, loop, "i") as i:
head = i * tile
with ib.if_scope(ib.likely(head + tile > len)):
tail = len
ib.emit(tvm.tir.call_extern("float32", "cce_intrisic", head, tail))
with ib.else_scope():
tail = head + tile
ib.emit(tvm.tir.call_extern("float32", "cce_intrisic", head, tail))
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
with tvm.transform.PassContext(config={"tir.LoopPartition": {"partition_const_loop": True}}):
mod = tvm.tir.transform.LoopPartition()(mod)
stmt = tvm.tir.transform.Simplify()(mod)["main"].body
assert not any(collect_visit(stmt, lambda x: isinstance(x, tvm.tir.IfThenElse)))
def test_cce_loop_3():
ib = tvm.tir.ir_builder.create()
loop1 = 4
loop2 = 9998
tile = 39991
with ib.for_range(0, loop2, "i") as i:
with ib.for_range(0, loop1, "j") as j:
head1 = i
head2 = j
with ib.if_scope(ib.likely(head1 * loop1 + head2 < tile)):
ib.emit(tvm.tir.call_extern("float16", "cce_intrisic", head1))
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
with tvm.transform.PassContext(config={"tir.LoopPartition": {"partition_const_loop": True}}):
mod = tvm.tir.transform.LoopPartition()(mod)
stmt = tvm.tir.transform.Simplify()(mod)["main"].body
assert not any(collect_visit(stmt, lambda x: isinstance(x, tvm.tir.IfThenElse)))
def test_conv_tiling():
HSTR = WSTR = 1
in_channel = 128
kernel_height = kernel_width = 3
out_channel = 64
batch_size = 1
in_height = in_width = 64
out_height = out_width = in_height - kernel_height + 1
data = te.placeholder((batch_size, in_channel, in_height, in_width), name="data")
kernel = te.placeholder((kernel_height, kernel_width, in_channel, out_channel), name="kernel")
ic = te.reduce_axis((0, in_channel), name="ic")
kh = te.reduce_axis((0, kernel_height), name="kh")
kw = te.reduce_axis((0, kernel_width), name="kw")
conv = te.compute(
(batch_size, out_channel, out_height, out_width),
lambda n, oc, oh, ow: te.sum(
data[n, ic, oh * HSTR + kh, ow * WSTR + kw] * kernel[kh, kw, ic, oc], axis=[ic, kh, kw]
),
name="conv2d",
)
s = te.create_schedule(conv.op)
n, oc, oh, ow = conv.op.axis
oho, owo, ohi, owi = s[conv].tile(oh, ow, 16, 16)
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
with tvm.transform.PassContext(config={"tir.LoopPartition": {"partition_const_loop": True}}):
mod = tvm.tir.transform.LoopPartition()(mod)
stmt = tvm.tir.transform.Simplify()(mod)["main"].body
assert not any(collect_visit(stmt, lambda x: isinstance(x, tvm.tir.IfThenElse)))
def test_multilevel_splitting_with_indivisble_factors():
from tvm import topi
A = te.placeholder((130,), dtype="float32")
B = topi.nn.relu(A)
s = te.create_schedule(B.op)
(y,) = s[B].op.axis
(yo, yi) = s[B].split(y, factor=8)
(yoo, yoi) = s[B].split(yo, factor=16)
s[B].reorder(yoo, yoi, yi)
s[B].unroll(yi)
## But this does the right thing.
with tvm.transform.PassContext(config={"tir.LoopPartition": {"partition_const_loop": True}}):
lowered_body = tvm.lower(s, [A, B], name="x")["x"].body
def visit_stmt(op):
return isinstance(op, tvm.tir.Max)
num_max = collect_visit(lowered_body, visit_stmt)
assert num_max.count(True) == 10
def test_double_splitting_with_indivisible_factors():
m = 48
dtype = "float32"
A = te.placeholder((m,), name="A", dtype=dtype)
C = te.compute((m,), lambda i: A[i], name="C")
D = te.compute((m,), lambda i: C[i], name="D")
s = te.create_schedule(D.op)
co, ci = s[C].split(C.op.axis[0], factor=10)
do, di = s[D].split(D.op.axis[0], 32)
s[C].compute_at(s[D], do)
target = "llvm"
with tvm.transform.PassContext(config={"tir.LoopPartition": {"partition_const_loop": True}}):
f = tvm.lower(s, [A, C, D], name="fadd1", simple_mode=False)
func = tvm.build(f, target=target)
top_produce = f["fadd1"].body
assert not any(collect_visit(top_produce, lambda x: isinstance(x, tvm.tir.IfThenElse)))
# check functional correctness of generated code
dev = tvm.device(target, 0)
a = tvm.nd.array(
numpy.ones(
m,
).astype(dtype),
dev,
)
c = tvm.nd.array(
numpy.zeros(
m,
).astype(dtype),
dev,
)
d = tvm.nd.array(
numpy.zeros(
m,
).astype(dtype),
dev,
)
func(a, c, d)
tvm.testing.assert_allclose(c.numpy(), a.numpy(), rtol=1e-5)
tvm.testing.assert_allclose(d.numpy(), a.numpy(), rtol=1e-5)
def test_simple_rfactor():
K = 16 * 4 + 4
k = te.reduce_axis((0, K), "k")
A = te.placeholder((1, K), name="A")
B = te.compute((1,), lambda b: te.sum(A[b, k], axis=k), name="B")
s = te.create_schedule(B.op)
ko, _ = s[B].split(s[B].op.reduce_axis[0], 16)
BF = s.rfactor(B, ko, 0)
s.normalize()
bounds = tvm.te.schedule.InferBound(s)
stmt1 = tvm.te.schedule.ScheduleOps(s, bounds)
mod1 = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt1))
stmt1 = tvm.tir.transform.Simplify()(mod1)["main"].body
with tvm.transform.PassContext(config={"tir.LoopPartition": {"partition_const_loop": True}}):
mod2 = tvm.tir.transform.LoopPartition()(mod1)
stmt2 = tvm.tir.transform.Simplify()(mod2)["main"].body
# make sure loop partition actually did something
assert not tvm.ir.structural_equal(stmt1.body, stmt2.body)
@T.prim_func
def partitioned_concat(
A: T.Buffer[(16,), "float32"], B: T.Buffer[(16,), "float32"], C: T.Buffer[(32,), "float32"]
) -> None:
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
T.preflattened_buffer(A, [16], data=A.data)
T.preflattened_buffer(B, [16], data=B.data)
T.preflattened_buffer(C, [32], data=C.data)
for i in T.serial(0, 16):
C[i] = A[i]
for i in T.serial(0, 16):
C[i + 16] = B[i + 16]
def test_explicit_partition_hint():
A = te.placeholder((16,), name="A")
B = te.placeholder((16,), name="B")
C = te.compute((32,), lambda i: te.if_then_else(i < 16, A[i], B[i]), name="C")
s = te.create_schedule(C.op)
s.normalize()
s[C].pragma(s[C].op.axis[0], "loop_partition_hint", True)
mod = tvm.driver.build_module.schedule_to_module(s, [A, B, C], "main", None)
with tvm.transform.PassContext(config={"tir.LoopPartition": {"partition_const_loop": True}}):
mod = tvm.tir.transform.StorageFlatten(64)(mod)
mod = tvm.tir.transform.LoopPartition()(mod)
mod = tvm.tir.transform.Simplify()(mod)
assert tvm.ir.structural_equal(mod["main"], partitioned_concat)
def partition_from_scheduled_tir(prim_func, pass_cfg):
with tvm.transform.PassContext(config=pass_cfg):
mod = IRModule.from_expr(prim_func)
mod = tvm.tir.transform.LowerOpaqueBlock()(mod)
mod = tvm.tir.transform.FlattenBuffer()(mod)
mod = tvm.tir.transform.LoopPartition()(mod)
mod = tvm.tir.transform.Simplify()(mod)
mod = tvm.tir.transform.RemoveNoOp()(mod)
return mod
@T.prim_func
def partitioned_concat_3(
placeholder: T.Buffer[(50176,), "int8"],
placeholder_1: T.Buffer[(25088,), "int8"],
placeholder_2: T.Buffer[(25088,), "int8"],
T_concat: T.Buffer[(100352,), "int8"],
) -> None:
T.preflattened_buffer(placeholder, [1, 64, 28, 28], "int8", data=placeholder.data)
T.preflattened_buffer(placeholder_1, [1, 32, 28, 28], "int8", data=placeholder_1.data)
T.preflattened_buffer(placeholder_2, [1, 32, 28, 28], "int8", data=placeholder_2.data)
T.preflattened_buffer(T_concat, [1, 128, 28, 28], "int8", data=T_concat.data)
for i1, i2, i3 in T.grid(64, 28, 28):
T_concat[i1 * 784 + i2 * 28 + i3] = placeholder[i1 * 784 + i2 * 28 + i3]
for i1, i2, i3 in T.grid(32, 28, 28):
T_concat[i1 * 784 + i2 * 28 + i3 + 50176] = placeholder_1[i1 * 784 + i2 * 28 + i3]
for i1, i2, i3 in T.grid(32, 28, 28):
T_concat[i1 * 784 + i2 * 28 + i3 + 75264] = placeholder_2[i1 * 784 + i2 * 28 + i3]
@T.prim_func
def concat_func_3(
placeholder: T.Buffer[(50176,), "int8"],
placeholder_1: T.Buffer[(25088,), "int8"],
placeholder_2: T.Buffer[(25088,), "int8"],
T_concat: T.Buffer[(100352,), "int8"],
) -> None:
T.preflattened_buffer(placeholder, (1, 64, 28, 28), "int8", data=placeholder.data)
T.preflattened_buffer(placeholder_1, (1, 32, 28, 28), "int8", data=placeholder_1.data)
T.preflattened_buffer(placeholder_2, (1, 32, 28, 28), "int8", data=placeholder_2.data)
T.preflattened_buffer(T_concat, (1, 128, 28, 28), "int8", data=T_concat.data)
for i1 in T.serial(128, annotations={"pragma_loop_partition_hint": 1}):
for i2, i3 in T.grid(28, 28):
if 96 <= i1:
T_concat[i1 * 784 + i2 * 28 + i3] = placeholder_2[i1 * 784 + i2 * 28 + i3 - 75264]
if 64 <= i1 and i1 < 96:
T_concat[i1 * 784 + i2 * 28 + i3] = placeholder_1[i1 * 784 + i2 * 28 + i3 - 50176]
if i1 < 64:
T_concat[i1 * 784 + i2 * 28 + i3] = placeholder[i1 * 784 + i2 * 28 + i3]
def test_condition_mutually_exclusive():
mod = partition_from_scheduled_tir(
concat_func_3, {"tir.LoopPartition": {"partition_const_loop": True}}
)
assert tvm.ir.structural_equal(mod["main"], partitioned_concat_3)
def test_loop_partition_unroll_hint():
@T.prim_func
def main(A: T.Buffer[150528, "int8"], B: T.Buffer[25088, "int8"]) -> None:
T.preflattened_buffer(A, [1, 3, 224, 224], "int8", data=A.data)
T.preflattened_buffer(B, [1, 224, 7, 16], "int8", data=B.data)
for ax0 in T.serial(
112,
annotations={"pragma_loop_partition_hint": True},
):
for ax1, ax2, ax3 in T.grid(224, 7, 16):
if 3 <= ax0 * 2 + ax2 and ax0 * 2 + ax2 < 227 and ax3 < 3:
B[ax1 * 112 + ax2 * 16 + ax3] = A[ax3 * 50176 + ax1 * 224 + ax0 * 2 + ax2 - 3]
@T.prim_func
def partitioned_main(A: T.Buffer[150528, "int8"], B: T.Buffer[25088, "int8"]) -> None:
T.preflattened_buffer(A, [1, 3, 224, 224], dtype="int8", data=A.data)
T.preflattened_buffer(B, [1, 224, 7, 16], dtype="int8", data=B.data)
# body
for ax1, ax2, ax3 in T.grid(224, 7, 16):
if 3 <= ax2 and ax3 < 3:
B[ax1 * 112 + ax2 * 16 + ax3] = A[ax3 * 50176 + ax1 * 224 + ax2 - 3]
for ax1, ax2, ax3 in T.grid(224, 7, 16):
if 1 <= ax2 and ax3 < 3:
B[ax1 * 112 + ax2 * 16 + ax3] = A[ax3 * 50176 + ax1 * 224 + ax2 - 1]
for ax0, ax1, ax2, ax3 in T.grid(109, 224, 7, 16):
if ax3 < 3:
B[ax1 * 112 + ax2 * 16 + ax3] = A[ax3 * 50176 + ax1 * 224 + ax0 * 2 + ax2 + 1]
for ax1, ax2, ax3 in T.grid(224, 7, 16):
if ax2 < 5 and ax3 < 3:
B[ax1 * 112 + ax2 * 16 + ax3] = A[ax3 * 50176 + ax1 * 224 + ax2 + 219]
mod = partition_from_scheduled_tir(
main,
{
"tir.LoopPartition": {
"partition_const_loop": True,
"unroll_loop_with_partition_hint_no_interval": True,
}
},
)
mod = tvm.tir.transform.UnrollLoop()(mod)
mod = tvm.tir.transform.RemoveNoOp()(mod)
mod = tvm.tir.transform.Simplify()(mod)
assert tvm.ir.structural_equal(mod["main"], partitioned_main)
def test_loop_partition_keep_loop_annotations():
@T.prim_func
def before(A: T.Buffer[160, "int32"], B: T.Buffer[160, "int32"]) -> None:
for i in T.serial(
160,
annotations={"pragma_loop_partition_hint": True, "key": "value"},
):
if i < 10:
B[i] = A[i] + 1
elif 10 <= i and i < 150:
B[i] = A[i] + 2
else:
B[i] = A[i] + 3
@T.prim_func
def after(A: T.Buffer[160, "int32"], B: T.Buffer[160, "int32"]) -> None:
T.preflattened_buffer(A, [160], dtype="int32", data=A.data)
T.preflattened_buffer(B, [160], dtype="int32", data=B.data)
for i in T.serial(10, annotations={"key": "value"}):
B[i] = A[i] + 1
for i in T.serial(140, annotations={"key": "value"}):
B[i + 10] = A[i + 10] + 2
for i in T.serial(10, annotations={"key": "value"}):
B[i + 150] = A[i + 150] + 3
mod = partition_from_scheduled_tir(
before,
{
"tir.LoopPartition": {
"partition_const_loop": True,
}
},
)
assert tvm.ir.structural_equal(mod["main"], after)
def test_loop_partition_with_unit_loop_in_condition():
@T.prim_func
def before(
placeholder: T.Buffer[(50176,), "int8"],
placeholder_1: T.Buffer[(25088,), "int8"],
placeholder_2: T.Buffer[(25088,), "int8"],
T_concat: T.Buffer[(100352,), "int8"],
) -> None:
for k in range(1, annotations={"preserve_unit_loop": True}):
for i1 in T.serial(128, annotations={"pragma_loop_partition_hint": 1}):
for i2, i3 in T.grid(28, 28):
if 96 <= k * 128 + i1:
T_concat[k * i1 * 784 + i2 * 28 + i3] = placeholder_2[
i1 * 784 + i2 * 28 + i3 - 75264
]
if 64 <= k * 128 + i1 and k * 128 + i1 < 96:
T_concat[i1 * 784 + i2 * 28 + i3] = placeholder_1[
i1 * 784 + i2 * 28 + i3 - 50176
]
if k * 128 + i1 < 64:
T_concat[i1 * 784 + i2 * 28 + i3] = placeholder[i1 * 784 + i2 * 28 + i3]
@T.prim_func
def after(
placeholder: T.Buffer[50176, "int8"],
placeholder_1: T.Buffer[25088, "int8"],
placeholder_2: T.Buffer[25088, "int8"],
T_concat: T.Buffer[100352, "int8"],
) -> None:
T.preflattened_buffer(placeholder, [50176], dtype="int8", data=placeholder.data)
T.preflattened_buffer(placeholder_1, [25088], dtype="int8", data=placeholder_1.data)
T.preflattened_buffer(placeholder_2, [25088], dtype="int8", data=placeholder_2.data)
T.preflattened_buffer(T_concat, [100352], dtype="int8", data=T_concat.data)
for _ in T.serial(1, annotations={"preserve_unit_loop": True}):
for i1, i2, i3 in T.grid(64, 28, 28):
T_concat[i1 * 784 + i2 * 28 + i3] = placeholder[i1 * 784 + i2 * 28 + i3]
for i1, i2, i3 in T.grid(32, 28, 28):
T_concat[i1 * 784 + i2 * 28 + i3 + 50176] = placeholder_1[i1 * 784 + i2 * 28 + i3]
for i1, i2, i3 in T.grid(32, 28, 28):
T_concat[i2 * 28 + i3] = placeholder_2[i1 * 784 + i2 * 28 + i3]
mod = partition_from_scheduled_tir(
before,
{
"tir.LoopPartition": {
"partition_const_loop": True,
}
},
)
assert tvm.ir.structural_equal(mod["main"], after)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_lower_cross_thread_reduction.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import sys
import pytest
import tvm
import tvm.testing
from tvm import te
from tvm.script import tir as T
# pylint: disable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
def _check(original, transformed):
mod = tvm.IRModule.from_expr(original)
mod = tvm.tir.transform.LowerCrossThreadReduction()(mod)
tvm.ir.assert_structural_equal(mod["main"], transformed, True)
def _check_fail(original):
mod = tvm.IRModule.from_expr(original)
with pytest.raises(ValueError):
tvm.tir.transform.LowerCrossThreadReduction()(mod)
@T.prim_func
def loop_split(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
for i, ko in T.grid(128, 4):
for ki in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("B"):
vi = T.axis.S(128, i)
vk = T.axis.R(128, ko * 32 + ki)
T.reads([A[vi, vk]])
T.writes([B[vi]])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def lowered_loop_split(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
normal_reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i in T.serial(0, 128):
for ki in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("B_in_thread_init"):
T.reads([])
T.writes([normal_reduce_temp0[0]])
normal_reduce_temp0[0] = T.float32(0)
for ko in T.serial(0, 4):
with T.block("B_normal_reduction"):
vi = T.axis.S(128, i)
vk = T.axis.R(128, ko * 32 + ki)
T.reads([A[vi, vk]])
T.writes([normal_reduce_temp0[0]])
normal_reduce_temp0[0] = normal_reduce_temp0[0] + A[vi, vk]
with T.block("B_cross_thread_reduction"):
T.reads([normal_reduce_temp0[0]])
T.writes([reduce_temp0[0]])
T.attr(
T.comm_reducer(lambda x, y: x + y, [T.float32(0)]),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1),
normal_reduce_temp0[0],
True,
reduce_temp0[0],
ki,
dtype="handle",
)
)
with T.block("B_write_back"):
vi = T.axis.S(128, i)
T.reads([reduce_temp0[0]])
T.writes([B[vi]])
B[vi] = reduce_temp0[0]
@T.prim_func
def no_normal_reduction(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
for i in T.serial(0, 128):
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
T.reads([A[vi, vk]])
T.writes([B[vi]])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def lowered_no_normal_reduction(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i in T.serial(0, 128):
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B_cross_thread_reduction"):
vi, vk = T.axis.remap("SR", [i, k])
T.reads([A[vi, vk]])
T.writes([reduce_temp0[0]])
T.attr(
T.comm_reducer(lambda x, y: x + y, [T.float32(0)]),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1), A[vi, vk], True, reduce_temp0[0], k, dtype="handle"
)
)
with T.block("B_write_back"):
vi = T.axis.spatial(128, i)
T.reads([reduce_temp0[0]])
T.writes([B[vi]])
B[vi] = reduce_temp0[0]
@T.prim_func
def two_bound_loops(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
for i in T.serial(0, 128):
for ko in T.thread_binding(0, 4, thread="threadIdx.x"):
for ki in T.thread_binding(0, 32, thread="threadIdx.y"):
with T.block("B"):
vi = T.axis.spatial(128, i)
vk = T.axis.reduce(128, ko * 32 + ki)
T.reads([A[vi, vk]])
T.writes([B[vi]])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def lowered_two_bound_loops(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i in T.serial(0, 128):
for ko in T.thread_binding(0, 4, thread="threadIdx.x"):
for ki in T.thread_binding(0, 32, thread="threadIdx.y"):
with T.block("B_cross_thread_reduction"):
vi = T.axis.spatial(128, i)
vk = T.axis.reduce(128, ko * 32 + ki)
T.reads([A[vi, vk]])
T.writes([reduce_temp0[0]])
T.attr(
T.comm_reducer(lambda x, y: x + y, [T.float32(0)]),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1), A[vi, vk], True, reduce_temp0[0], ko, ki, dtype="handle"
)
)
with T.block("B_write_back"):
vi = T.axis.spatial(128, i)
T.reads([reduce_temp0[0]])
T.writes([B[vi]])
B[vi] = reduce_temp0[0]
@T.prim_func
def multiple_blocks_under_reduction_loop(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [16, 16, 16], dtype="float32")
B = T.match_buffer(b, [16], dtype="float32")
B_rf_local = T.alloc_buffer([16, 16], dtype="float32", scope="local")
for i in T.thread_binding(0, 16, thread="blockIdx.x"):
for k0o in T.thread_binding(0, 4, thread="threadIdx.x"):
for k0i0, k1 in T.grid(4, 16):
with T.block("B_rf"):
vk0 = T.axis.spatial(16, k0o * 4 + k0i0)
vi, vk1 = T.axis.remap("SR", [i, k1])
T.reads([A[vi, vk0, vk1]])
T.writes([B_rf_local[vk0, vi]])
with T.init():
B_rf_local[vk0, vi] = T.float32(0)
B_rf_local[vk0, vi] = B_rf_local[vk0, vi] + A[vi, vk0, vk1]
for k0i1 in T.serial(0, 4):
with T.block("B"):
vk0 = T.axis.reduce(16, k0o * 4 + k0i1)
vi = T.axis.spatial(16, i)
T.reads([B_rf_local[vk0, vi]])
T.writes([B[vi]])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + B_rf_local[vk0, vi]
@T.prim_func
def lowered_multiple_blocks_under_reduction_loop(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [16, 16, 16], dtype="float32")
B = T.match_buffer(b, [16], dtype="float32")
B_rf_local = T.alloc_buffer([16, 16], dtype="float32", scope="local")
reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
normal_reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i in T.thread_binding(0, 16, thread="blockIdx.x"):
for k0o in T.thread_binding(0, 4, thread="threadIdx.x"):
with T.block("B_in_thread_init"):
T.reads([])
T.writes([normal_reduce_temp0[0]])
normal_reduce_temp0[0] = T.float32(0)
for k0i0, k1 in T.grid(4, 16):
with T.block("B_rf"):
vk0 = T.axis.spatial(16, k0o * 4 + k0i0)
vi, vk1 = T.axis.remap("SR", [i, k1])
T.reads([A[vi, vk0, vk1]])
T.writes([B_rf_local[vk0, vi]])
with T.init():
B_rf_local[vk0, vi] = T.float32(0)
B_rf_local[vk0, vi] = B_rf_local[vk0, vi] + A[vi, vk0, vk1]
for k0i1 in T.serial(0, 4):
with T.block("B_normal_reduction"):
vk0 = T.axis.reduce(16, k0o * 4 + k0i1)
vi = T.axis.spatial(16, i)
T.reads([B_rf_local[vk0, vi]])
T.writes([normal_reduce_temp0[0]])
normal_reduce_temp0[0] = normal_reduce_temp0[0] + B_rf_local[vk0, vi]
with T.block("B_cross_thread_reduction"):
T.reads([normal_reduce_temp0[0]])
T.writes([reduce_temp0[0]])
T.attr(
T.comm_reducer(lambda x, y: x + y, [T.float32(0)]),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1),
normal_reduce_temp0[0],
True,
reduce_temp0[0],
k0o,
dtype="handle",
)
)
with T.block("B_write_back"):
vi = T.axis.spatial(16, i)
T.reads([reduce_temp0[0]])
T.writes([B[vi]])
B[vi] = reduce_temp0[0]
@T.prim_func
def with_block_predicate(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 120], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
for i, ko in T.grid(128, 4):
for ki in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("B"):
vi = T.axis.spatial(128, i)
vk = T.axis.reduce(120, ko * 32 + ki)
T.where(ko * 32 + ki < 120)
T.reads([A[vi, vk]])
T.writes([B[vi]])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def lowered_with_block_predicate(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 120], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
normal_reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i in T.serial(0, 128):
for ki in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("B_in_thread_init"):
T.reads([])
T.writes([normal_reduce_temp0[0]])
normal_reduce_temp0[0] = T.float32(0)
for ko in T.serial(0, 4):
with T.block("B_normal_reduction"):
vi = T.axis.spatial(128, i)
vk = T.axis.reduce(120, ko * 32 + ki)
T.where(ko * 32 + ki < 120)
T.reads([A[vi, vk]])
T.writes([normal_reduce_temp0[0]])
normal_reduce_temp0[0] = normal_reduce_temp0[0] + A[vi, vk]
with T.block("B_cross_thread_reduction"):
T.reads([normal_reduce_temp0[0]])
T.writes([reduce_temp0[0]])
T.attr(
T.comm_reducer(lambda x, y: x + y, [T.float32(0)]),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1),
normal_reduce_temp0[0],
True,
reduce_temp0[0],
ki,
dtype="handle",
)
)
with T.block("B_write_back"):
vi = T.axis.spatial(128, i)
T.reads([reduce_temp0[0]])
T.writes([B[vi]])
B[vi] = reduce_temp0[0]
@T.prim_func
def single_reduction_loop_with_block_predicate(
A: T.Buffer[(256, 256), "float32"], T_softmax_norm: T.Buffer[(256, 256), "float32"]
) -> None:
T_softmax_maxelem_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
T_softmax_expsum_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
for i0 in T.serial(256):
for ax0, ax1_0 in T.grid(1, 1):
for ax1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_maxelem"):
i0_1 = T.axis.spatial(256, i0 + ax0)
k = T.axis.reduce(256, ax1_0 * 512 + ax1_1)
T.where(ax1_0 * 512 + ax1_1 < 256)
T.reads(A[i0_1, k])
T.writes(T_softmax_maxelem_shared[i0_1])
with T.init():
T_softmax_maxelem_shared[i0_1] = T.float32(-3.4028234663852886e38)
T_softmax_maxelem_shared[i0_1] = T.max(
T_softmax_maxelem_shared[i0_1], A[i0_1, k]
)
for ax0, ax1_0 in T.grid(1, 1):
for ax1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_expsum"):
i0_2 = T.axis.spatial(256, i0 + ax0)
k = T.axis.reduce(256, ax1_0 * 512 + ax1_1)
T.where(ax1_0 * 512 + ax1_1 < 256)
T.reads(A[i0_2, k], T_softmax_maxelem_shared[i0_2])
T.writes(T_softmax_expsum_shared[i0_2])
with T.init():
T_softmax_expsum_shared[i0_2] = T.float32(0)
T_softmax_expsum_shared[i0_2] = T_softmax_expsum_shared[i0_2] + T.exp(
A[i0_2, k] - T_softmax_maxelem_shared[i0_2], dtype="float32"
)
for i1_0 in T.serial(1):
for i1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_norm"):
i0_3 = T.axis.spatial(256, i0)
i1 = T.axis.spatial(256, i1_0 * 512 + i1_1)
T.where(i1_0 * 512 + i1_1 < 256)
T.reads(
A[i0_3, i1], T_softmax_maxelem_shared[i0_3], T_softmax_expsum_shared[i0_3]
)
T.writes(T_softmax_norm[i0_3, i1])
T.block_attr({"axis": 1})
T_softmax_norm[i0_3, i1] = (
T.exp(A[i0_3, i1] - T_softmax_maxelem_shared[i0_3], dtype="float32")
/ T_softmax_expsum_shared[i0_3]
)
@T.prim_func
def lowered_single_reduction_loop_with_block_predicate(
A: T.Buffer[(256, 256), "float32"], T_softmax_norm: T.Buffer[(256, 256), "float32"]
) -> None:
T_softmax_maxelem_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
T_softmax_expsum_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
cross_thread_0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
in_thread_0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
cross_thread_1 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
in_thread_1 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i0 in T.serial(256):
for ax0 in T.serial(1):
for ax1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_maxelem_in_thread_init"):
T.reads()
T.writes(in_thread_0[0])
in_thread_0[0] = T.float32(-3.4028234663852886e38)
for ax1_0 in T.serial(1):
with T.block("T_softmax_maxelem_in_thread"):
T.where(ax1_0 * 512 + ax1_1 < 256)
i0_1 = T.axis.spatial(256, i0 + ax0)
k = T.axis.reduce(256, ax1_0 * 512 + ax1_1)
T.reads(A[i0_1, k])
T.writes(in_thread_0[0])
in_thread_0[0] = T.max(in_thread_0[0], A[i0_1, k])
with T.block("T_softmax_maxelem_cross_thread"):
T.reads(in_thread_0[0])
T.writes(cross_thread_0[0])
T.attr(
T.comm_reducer(
lambda x, y: T.max(x, y), [T.float32(-3.4028234663852886e38)]
),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1),
in_thread_0[0],
True,
cross_thread_0[0],
ax1_1,
dtype="handle",
)
)
with T.block("T_softmax_maxelem_write_back"):
i0_2 = T.axis.spatial(256, i0 + ax0)
T.reads(cross_thread_0[0])
T.writes(T_softmax_maxelem_shared[i0_2])
T_softmax_maxelem_shared[i0_2] = cross_thread_0[0]
for ax0 in T.serial(1):
for ax1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_expsum_in_thread_init"):
T.reads()
T.writes(in_thread_1[0])
in_thread_1[0] = T.float32(0)
for ax1_0 in T.serial(1):
with T.block("T_softmax_expsum_in_thread"):
T.where(ax1_0 * 512 + ax1_1 < 256)
i0_3 = T.axis.spatial(256, i0 + ax0)
k = T.axis.reduce(256, ax1_0 * 512 + ax1_1)
T.reads(A[i0_3, k], T_softmax_maxelem_shared[i0_3])
T.writes(in_thread_1[0])
in_thread_1[0] = in_thread_1[0] + T.exp(
A[i0_3, k] - T_softmax_maxelem_shared[i0_3], dtype="float32"
)
with T.block("T_softmax_expsum_cross_thread"):
T.reads(in_thread_1[0])
T.writes(cross_thread_1[0])
T.attr(
T.comm_reducer(lambda x_1, y_1: x_1 + y_1, [T.float32(0)]),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1),
in_thread_1[0],
True,
cross_thread_1[0],
ax1_1,
dtype="handle",
)
)
with T.block("T_softmax_expsum_write_back"):
i0_4 = T.axis.spatial(256, i0 + ax0)
T.reads(cross_thread_1[0])
T.writes(T_softmax_expsum_shared[i0_4])
T_softmax_expsum_shared[i0_4] = cross_thread_1[0]
for i1_0 in T.serial(1):
for i1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_norm"):
i0_5 = T.axis.spatial(256, i0)
i1 = T.axis.spatial(256, i1_0 * 512 + i1_1)
T.where(i1_0 * 512 + i1_1 < 256)
T.reads(
A[i0_5, i1], T_softmax_maxelem_shared[i0_5], T_softmax_expsum_shared[i0_5]
)
T.writes(T_softmax_norm[i0_5, i1])
T.block_attr({"axis": 1})
T_softmax_norm[i0_5, i1] = (
T.exp(A[i0_5, i1] - T_softmax_maxelem_shared[i0_5], dtype="float32")
/ T_softmax_expsum_shared[i0_5]
)
@T.prim_func
def single_reduction_loop_with_tensorize(
input_A: T.Buffer[(1, 64, 7, 7, 32), "uint8"],
input_B: T.Buffer[(16, 64, 1, 1, 8, 32, 4), "int8"],
output: T.Buffer[(1, 16, 7, 7, 32), "int32"],
) -> None:
# body
# with T.block("root")
for i1, i2, i3, i4, i5 in T.grid(16, 4, 98, 2, 32):
with T.block("compute_o"):
n = T.axis.spatial(1, 0)
oc_chunk = T.axis.spatial(16, i1)
oh = T.axis.spatial(7, (i2 * 6272 + i3 * 64 + i4 * 32 + i5) // 3584)
ow = T.axis.spatial(7, (i2 * 6272 + i3 * 64 + i4 * 32 + i5) % 3584 // 512)
kh = T.axis.reduce(1, 0)
kw = T.axis.reduce(1, 0)
ic_outer = T.axis.reduce(64, (i2 * 6272 + i3 * 64 + i4 * 32 + i5) % 512 // 8)
ic_f_inner = T.axis.reduce(8, (i2 * 6272 + i3 * 64 + i4 * 32 + i5) % 8)
T.reads(
input_A[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 : ic_f_inner * 4 + 4],
input_B[oc_chunk, ic_outer, kh, kw, ic_f_inner, 0:32, 0:4],
)
T.writes(output[n, oc_chunk, oh, ow, 0:32])
with T.init():
for x in T.serial(32):
with T.block("compute_init"):
oc_block_i_init = T.axis.spatial(32, x)
T.reads()
T.writes(output[n, oc_chunk, oh, ow, oc_block_i_init])
output[n, oc_chunk, oh, ow, oc_block_i_init] = 0
with T.block("compute_o"):
T.reads(
output[n, oc_chunk, oh, ow, 0:32],
input_A[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 : ic_f_inner * 4 + 4],
input_B[oc_chunk, ic_outer, kh, kw, ic_f_inner, 0:32, 0:4],
)
T.writes(output[n, oc_chunk, oh, ow, 0:32])
A = T.match_buffer(
input_A[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 : ic_f_inner * 4 + 4],
[4],
dtype="uint8",
offset_factor=1,
)
B = T.match_buffer(
input_B[oc_chunk, ic_outer, kh, kw, ic_f_inner, 0:32, 0:4],
[32, 4],
dtype="int8",
offset_factor=1,
)
C = T.match_buffer(
output[n, oc_chunk, oh, ow, 0:32], [32], dtype="int32", offset_factor=1
)
A_u8x4: T.uint8x4 = A[0:4]
A_i32: T.int32 = T.reinterpret(A_u8x4, dtype="int32")
B_i8x128 = B[0, 0:128]
B_i32x32: T.int32x32 = T.reinterpret(B_i8x128, dtype="int32x32")
C[0:32] = T.call_llvm_pure_intrin(
4217, T.uint32(3), C[0:32], T.broadcast(A_i32, 32), B_i32x32, dtype="int32x32"
)
@T.prim_func
def reducer_max(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
for i in T.serial(0, 128):
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
T.reads([A[vi, vk]])
T.writes([B[vi]])
with T.init():
B[vi] = T.min_value("float32")
B[vi] = T.max(B[vi], A[vi, vk])
@T.prim_func
def lowered_reducer_max(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i in T.serial(0, 128):
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B_cross_thread_reduction"):
vi, vk = T.axis.remap("SR", [i, k])
T.reads([A[vi, vk]])
T.writes([reduce_temp0[0]])
T.attr(
T.comm_reducer(lambda x, y: T.max(x, y), [T.min_value("float32")]),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1), A[vi, vk], True, reduce_temp0[0], k, dtype="handle"
)
)
with T.block("B_write_back"):
vi = T.axis.spatial(128, i)
T.reads([reduce_temp0[0]])
T.writes([B[vi]])
B[vi] = reduce_temp0[0]
@T.prim_func
def zero_rank_buffer(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128], dtype="float32")
B = T.match_buffer(b, [], dtype="float32")
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B"):
vk = T.axis.reduce(128, k)
T.reads([A[vk]])
T.writes([B[()]])
with T.init():
B[()] = T.float32(0)
B[()] = B[()] + A[vk]
@T.prim_func
def lowered_zero_rank_buffer(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128], dtype="float32")
B = T.match_buffer(b, [], dtype="float32")
reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B_cross_thread_reduction"):
vk = T.axis.reduce(128, k)
T.reads([A[vk]])
T.writes([reduce_temp0[0]])
T.attr(
T.comm_reducer(lambda x, y: x + y, [T.float32(0)]),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(T.uint32(1), A[vk], True, reduce_temp0[0], k, dtype="handle")
)
with T.block("B_write_back"):
T.reads([reduce_temp0[0]])
T.writes([B[()]])
B[()] = reduce_temp0[0]
@T.prim_func
def multiple_bufferstore(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
C = T.alloc_buffer([], dtype="float32")
for i in T.serial(0, 128):
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
T.reads([A[vi, vk], B[vi], C[()]])
T.writes([B[vi], C[()]])
with T.init():
B[vi] = T.float32(0)
C[()] = A[vi, vk]
B[vi] = B[vi] + C[()]
@T.prim_func
def reduction_loop_not_deepest(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
for i in T.serial(0, 128):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
T.reads([A[vi, vk]])
T.writes([B[vi]])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def reduction_loop_bound_to_blockidx(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
for i in T.serial(0, 128):
for k in T.thread_binding(0, 128, thread="blockIdx.x"):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
T.reads([A[vi, vk]])
T.writes([B[vi]])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def different_access_indices(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128, 128], dtype="float32")
B = T.match_buffer(b, [128, 128], dtype="float32")
for i, j in T.grid(128, 128):
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
T.reads([A[vi, vj, vk]])
T.writes(
[
B[
T.min(vj, vi) : T.min(vj, vi) + (T.max(vj, vi) + 1 - T.min(vj, vi)),
T.min(vi, vj) : T.min(vi, vj) + (T.max(vi, vj) + 1 - T.min(vi, vj)),
]
]
)
with T.init():
B[vj, vi] = T.float32(0)
B[vi, vj] = B[vi, vj] + A[vi, vj, vk]
@T.prim_func
def invalid_reducer(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
for i in T.serial(0, 128):
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
T.reads([A[vi, vk]])
T.writes([B[vi]])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] - A[vi, vk]
@T.prim_func
def softmax(var_A: T.handle, var_T_softmax_norm: T.handle) -> None:
A = T.match_buffer(var_A, [256, 256], dtype="float32")
T_softmax_norm = T.match_buffer(var_T_softmax_norm, [256, 256], dtype="float32")
T_softmax_maxelem_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
T_softmax_expsum_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
for i0 in T.thread_binding(0, 256, thread="blockIdx.x"):
for ax0_0 in T.serial(0, 8):
for ax0_1 in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("T_softmax_maxelem"):
i0_1 = T.axis.spatial(256, i0)
k = T.axis.reduce(256, ax0_0 * 32 + ax0_1)
T.reads([A[i0_1, k]])
T.writes([T_softmax_maxelem_shared[i0_1]])
with T.init():
T_softmax_maxelem_shared[i0_1] = T.min_value("float32")
T_softmax_maxelem_shared[i0_1] = T.max(
T_softmax_maxelem_shared[i0_1], A[i0_1, k]
)
for ax0_0 in T.serial(0, 8):
for ax0_1 in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("T_softmax_expsum"):
i0_2 = T.axis.spatial(256, i0)
k = T.axis.reduce(256, ax0_0 * 32 + ax0_1)
T.reads(
[
A[i0_2, k],
T_softmax_maxelem_shared[i0_2],
]
)
T.writes([T_softmax_expsum_shared[i0_2]])
with T.init():
T_softmax_expsum_shared[i0_2] = T.float32(0)
T_softmax_expsum_shared[i0_2] = T_softmax_expsum_shared[i0_2] + T.exp(
A[i0_2, k] - T_softmax_maxelem_shared[i0_2], dtype="float32"
)
for i1_0 in T.serial(0, 8):
for i1_1 in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("T_softmax_norm"):
i0_3 = T.axis.spatial(256, i0)
i1 = T.axis.spatial(256, i1_0 * 32 + i1_1)
T.reads(
[
A[i0_3, i1],
T_softmax_maxelem_shared[i0_3],
T_softmax_expsum_shared[i0_3],
]
)
T.writes([T_softmax_norm[i0_3, i1]])
T.block_attr({"axis": 1})
T_softmax_norm[i0_3, i1] = (
T.exp(
A[i0_3, i1] - T_softmax_maxelem_shared[i0_3],
dtype="float32",
)
/ T_softmax_expsum_shared[i0_3]
)
@T.prim_func
def lowered_softmax(var_A: T.handle, var_T_softmax_norm: T.handle) -> None:
A = T.match_buffer(var_A, [256, 256], dtype="float32")
T_softmax_norm = T.match_buffer(var_T_softmax_norm, [256, 256], dtype="float32")
T_softmax_maxelem_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
T_softmax_expsum_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
normal_reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
reduce_temp1 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
normal_reduce_temp1 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i0 in T.thread_binding(0, 256, thread="blockIdx.x"):
for ax0_1 in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("T_softmax_maxelem_normal_reduction_init"):
T.reads([])
T.writes([normal_reduce_temp0[0]])
normal_reduce_temp0[0] = T.min_value("float32")
for ax0_0 in T.serial(0, 8):
with T.block("T_softmax_maxelem_normal_reduction"):
i0_1 = T.axis.spatial(256, i0)
k = T.axis.reduce(256, ax0_0 * 32 + ax0_1)
T.reads([A[i0_1, k]])
T.writes([normal_reduce_temp0[0]])
normal_reduce_temp0[0] = T.max(normal_reduce_temp0[0], A[i0_1, k])
with T.block("T_softmax_maxelem_cross_thread_reduction"):
T.reads([normal_reduce_temp0[0]])
T.writes([reduce_temp0[0]])
T.attr(
T.comm_reducer(lambda x, y: T.max(x, y), [T.min_value("float32")]),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1),
normal_reduce_temp0[0],
True,
reduce_temp0[0],
ax0_1,
dtype="handle",
)
)
with T.block("T_softmax_maxelem_write_back"):
i0_2 = T.axis.spatial(256, i0)
T.reads([reduce_temp0[0]])
T.writes([T_softmax_maxelem_shared[i0_2]])
T_softmax_maxelem_shared[i0_2] = reduce_temp0[0]
for ax0_1 in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("T_softmax_expsum_normal_reduction_init"):
T.reads([])
T.writes([normal_reduce_temp1[0]])
normal_reduce_temp1[0] = T.float32(0)
for ax0_0 in T.serial(0, 8):
with T.block("T_softmax_expsum_normal_reduction"):
i0_3 = T.axis.spatial(256, i0)
k = T.axis.reduce(256, ax0_0 * 32 + ax0_1)
T.reads(
[
A[i0_3, k],
T_softmax_maxelem_shared[i0_3],
]
)
T.writes([normal_reduce_temp1[0]])
normal_reduce_temp1[0] = normal_reduce_temp1[0] + T.exp(
A[i0_3, k] - T_softmax_maxelem_shared[i0_3], dtype="float32"
)
with T.block("T_softmax_expsum_cross_thread_reduction"):
T.reads([normal_reduce_temp1[0]])
T.writes([reduce_temp1[0]])
T.attr(
T.comm_reducer(lambda x_1, y_1: x_1 + y_1, [T.float32(0)]),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1),
normal_reduce_temp1[0],
True,
reduce_temp1[0],
ax0_1,
dtype="handle",
)
)
with T.block("T_softmax_expsum_write_back"):
i0_4 = T.axis.spatial(256, i0)
T.reads([reduce_temp1[0]])
T.writes([T_softmax_expsum_shared[i0_4]])
T_softmax_expsum_shared[i0_4] = reduce_temp1[0]
for i1_0 in T.serial(0, 8):
for i1_1 in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("T_softmax_norm"):
i0_5 = T.axis.spatial(256, i0)
i1 = T.axis.spatial(256, i1_0 * 32 + i1_1)
T.reads(
[
A[i0_5, i1],
T_softmax_maxelem_shared[i0_5],
T_softmax_expsum_shared[i0_5],
]
)
T.writes([T_softmax_norm[i0_5, i1]])
T.block_attr({"axis": 1})
T_softmax_norm[i0_5, i1] = (
T.exp(
A[i0_5, i1] - T_softmax_maxelem_shared[i0_5],
dtype="float32",
)
/ T_softmax_expsum_shared[i0_5]
)
@T.prim_func
def argmax_split(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1_0 in T.grid(128, 4):
for i1_1 in T.thread_binding(32, thread="threadIdx.x"):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.float32(-3.4028234663852886e38)
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(
argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k]
)
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def lowered_argmax_split(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
cross_thread_argmax_v0 = T.alloc_buffer([1], dtype="int32", strides=[1], scope="local")
cross_thread_argmax_v1 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
in_thread_argmax_v0 = T.alloc_buffer([1], dtype="int32", strides=[1], scope="local")
in_thread_argmax_v1 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i0 in T.serial(128):
for i1_1 in T.thread_binding(32, thread="threadIdx.x"):
with T.block("argmax_in_thread_init"):
T.reads()
T.writes(in_thread_argmax_v0[0], in_thread_argmax_v1[0])
in_thread_argmax_v0[0] = -1
in_thread_argmax_v1[0] = T.float32(-3.4028234663852886e38)
for i1_0 in T.serial(4):
with T.block("argmax_in_thread"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(in_thread_argmax_v0[0], in_thread_argmax_v1[0])
v_argmax_v0: T.int32 = T.Select(
in_thread_argmax_v1[0] >= val[i, k], in_thread_argmax_v0[0], idx[i, k]
)
v_argmax_v1: T.float32 = T.Select(
in_thread_argmax_v1[0] >= val[i, k], in_thread_argmax_v1[0], val[i, k]
)
in_thread_argmax_v0[0] = v_argmax_v0
in_thread_argmax_v1[0] = v_argmax_v1
with T.block("argmax_cross_thread"):
T.reads(in_thread_argmax_v0[0], in_thread_argmax_v1[0])
T.writes(cross_thread_argmax_v0[0], cross_thread_argmax_v1[0])
T.attr(
T.comm_reducer(
lambda x0, x1, y0, y1: (
T.Select(x1 >= y1, x0, y0),
T.Select(x1 >= y1, x1, y1),
),
[-1, T.float32(-3.4028234663852886e38)],
),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(2),
in_thread_argmax_v0[0],
in_thread_argmax_v1[0],
True,
cross_thread_argmax_v0[0],
cross_thread_argmax_v1[0],
i1_1,
dtype="handle",
)
)
with T.block("argmax_write_back"):
i = T.axis.spatial(128, i0)
T.reads(cross_thread_argmax_v0[0], cross_thread_argmax_v1[0])
T.writes(argmax_v0[i], argmax_v1[i])
argmax_v0[i] = cross_thread_argmax_v0[0]
argmax_v1[i] = cross_thread_argmax_v1[0]
@T.prim_func
def argmin_split_init_update_reordered(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmin_v0: T.Buffer[(128,), "int32"],
argmin_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1_0 in T.grid(128, 4):
for i1_1 in T.thread_binding(32, thread="threadIdx.x"):
with T.block("argmin"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmin_v0[i], argmin_v1[i])
with T.init():
argmin_v1[i] = T.float32(3.4028234663852886e38)
argmin_v0[i] = -1
v_argmin_v0: T.int32 = T.Select(argmin_v1[i] <= val[i, k], argmin_v0[i], idx[i, k])
v_argmin_v1: T.float32 = T.Select(
argmin_v1[i] <= val[i, k], argmin_v1[i], val[i, k]
)
argmin_v1[i] = v_argmin_v1
argmin_v0[i] = v_argmin_v0
@T.prim_func
def lowered_argmin_split_init_update_reordered(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmin_v0: T.Buffer[(128,), "int32"],
argmin_v1: T.Buffer[(128,), "float32"],
) -> None:
cross_thread_argmin_v0 = T.alloc_buffer([1], dtype="int32", strides=[1], scope="local")
cross_thread_argmin_v1 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
in_thread_argmin_v0 = T.alloc_buffer([1], dtype="int32", strides=[1], scope="local")
in_thread_argmin_v1 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i0 in T.serial(128):
for i1_1 in T.thread_binding(32, thread="threadIdx.x"):
with T.block("argmin_in_thread_init"):
T.reads()
T.writes(in_thread_argmin_v0[0], in_thread_argmin_v1[0])
in_thread_argmin_v0[0] = -1
in_thread_argmin_v1[0] = T.float32(3.4028234663852886e38)
for i1_0 in T.serial(4):
with T.block("argmin_in_thread"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(in_thread_argmin_v0[0], in_thread_argmin_v1[0])
v_argmin_v0: T.int32 = T.Select(
in_thread_argmin_v1[0] <= val[i, k], in_thread_argmin_v0[0], idx[i, k]
)
v_argmin_v1: T.float32 = T.Select(
in_thread_argmin_v1[0] <= val[i, k], in_thread_argmin_v1[0], val[i, k]
)
in_thread_argmin_v1[0] = v_argmin_v1
in_thread_argmin_v0[0] = v_argmin_v0
with T.block("argmin_cross_thread"):
T.reads(in_thread_argmin_v0[0], in_thread_argmin_v1[0])
T.writes(cross_thread_argmin_v0[0], cross_thread_argmin_v1[0])
T.attr(
T.comm_reducer(
lambda x0, x1, y0, y1: (
T.Select(x1 <= y1, x0, y0),
T.Select(x1 <= y1, x1, y1),
),
[-1, T.float32(3.4028234663852886e38)],
),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(2),
in_thread_argmin_v0[0],
in_thread_argmin_v1[0],
True,
cross_thread_argmin_v0[0],
cross_thread_argmin_v1[0],
i1_1,
dtype="handle",
)
)
with T.block("argmin_write_back"):
i = T.axis.spatial(128, i0)
T.reads(cross_thread_argmin_v0[0], cross_thread_argmin_v1[0])
T.writes(argmin_v0[i], argmin_v1[i])
argmin_v0[i] = cross_thread_argmin_v0[0]
argmin_v1[i] = cross_thread_argmin_v1[0]
@T.prim_func
def layer_norm_tuple_sum(
data: T.Buffer[(128, 768), "float32"],
gamma: T.Buffer[768, "float32"],
bias: T.Buffer[768, "float32"],
T_layer_norm: T.Buffer[(128, 768), "float32"],
) -> None:
data_red_temp_v0 = T.alloc_buffer([128], dtype="float32")
data_red_temp_v1 = T.alloc_buffer([128], dtype="float32")
for i0_fused in T.thread_binding(128, thread="blockIdx.x"):
for i1_0 in T.serial(24):
for i1_1 in T.thread_binding(32, thread="threadIdx.x"):
with T.block("data_red_temp"):
ax0 = T.axis.spatial(128, i0_fused)
k1 = T.axis.reduce(768, i1_0 * 32 + i1_1)
T.reads(data[ax0, k1])
T.writes(data_red_temp_v0[ax0], data_red_temp_v1[ax0])
with T.init():
data_red_temp_v0[ax0] = T.float32(0)
data_red_temp_v1[ax0] = T.float32(0)
v_data_red_temp_v0: T.float32 = data_red_temp_v0[ax0] + data[ax0, k1]
v_data_red_temp_v1: T.float32 = (
data_red_temp_v1[ax0] + data[ax0, k1] * data[ax0, k1]
)
data_red_temp_v0[ax0] = v_data_red_temp_v0
data_red_temp_v1[ax0] = v_data_red_temp_v1
for i0_i1_fused_0 in T.thread_binding(384, thread="blockIdx.x"):
for i0_i1_fused_1 in T.thread_binding(256, thread="threadIdx.x"):
with T.block("T_layer_norm"):
ax0 = T.axis.spatial(128, (i0_i1_fused_0 * 256 + i0_i1_fused_1) // 768)
ax1 = T.axis.spatial(768, (i0_i1_fused_0 * 256 + i0_i1_fused_1) % 768)
T.reads(
data[ax0, ax1],
data_red_temp_v0[ax0],
data_red_temp_v1[ax0],
gamma[ax1],
bias[ax1],
)
T.writes(T_layer_norm[ax0, ax1])
T_layer_norm[ax0, ax1] = (
data[ax0, ax1] - data_red_temp_v0[ax0] * T.float32(0.0013020833333333333)
) * T.rsqrt(
data_red_temp_v1[ax0] * T.float32(0.0013020833333333333)
- data_red_temp_v0[ax0]
* T.float32(0.0013020833333333333)
* (data_red_temp_v0[ax0] * T.float32(0.0013020833333333333))
+ T.float32(1.0000000000000001e-05),
dtype="float32",
) * gamma[
ax1
] + bias[
ax1
]
@T.prim_func
def lowered_layer_norm_tuple_sum(
data: T.Buffer[(128, 768), "float32"],
gamma: T.Buffer[768, "float32"],
bias: T.Buffer[768, "float32"],
T_layer_norm: T.Buffer[(128, 768), "float32"],
) -> None:
# with T.block("root")
data_red_temp_v0 = T.alloc_buffer([128], dtype="float32")
data_red_temp_v1 = T.alloc_buffer([128], dtype="float32")
cross_thread_data_red_temp_v0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
cross_thread_data_red_temp_v1 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
in_thread_data_red_temp_v0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
in_thread_data_red_temp_v1 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i0_fused in T.thread_binding(128, thread="blockIdx.x"):
for i1_1 in T.thread_binding(32, thread="threadIdx.x"):
with T.block("data_red_temp_in_thread_init"):
T.reads()
T.writes(in_thread_data_red_temp_v0[0], in_thread_data_red_temp_v1[0])
in_thread_data_red_temp_v0[0] = T.float32(0)
in_thread_data_red_temp_v1[0] = T.float32(0)
for i1_0 in T.serial(24):
with T.block("data_red_temp_in_thread"):
ax0 = T.axis.spatial(128, i0_fused)
k1 = T.axis.reduce(768, i1_0 * 32 + i1_1)
T.reads(data[ax0, k1])
T.writes(in_thread_data_red_temp_v0[0], in_thread_data_red_temp_v1[0])
v_data_red_temp_v0: T.float32 = in_thread_data_red_temp_v0[0] + data[ax0, k1]
v_data_red_temp_v1: T.float32 = (
in_thread_data_red_temp_v1[0] + data[ax0, k1] * data[ax0, k1]
)
in_thread_data_red_temp_v0[0] = v_data_red_temp_v0
in_thread_data_red_temp_v1[0] = v_data_red_temp_v1
with T.block("data_red_temp_cross_thread"):
T.reads(in_thread_data_red_temp_v0[0], in_thread_data_red_temp_v1[0])
T.writes(cross_thread_data_red_temp_v0[0], cross_thread_data_red_temp_v1[0])
T.attr(
T.comm_reducer(
lambda x0, x1, y0, y1: (x0 + y0, x1 + y1), [T.float32(0), T.float32(0)]
),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(2),
in_thread_data_red_temp_v0[0],
in_thread_data_red_temp_v1[0],
True,
cross_thread_data_red_temp_v0[0],
cross_thread_data_red_temp_v1[0],
i1_1,
dtype="handle",
)
)
with T.block("data_red_temp_write_back"):
ax0 = T.axis.spatial(128, i0_fused)
T.reads(cross_thread_data_red_temp_v0[0], cross_thread_data_red_temp_v1[0])
T.writes(data_red_temp_v0[ax0], data_red_temp_v1[ax0])
data_red_temp_v0[ax0] = cross_thread_data_red_temp_v0[0]
data_red_temp_v1[ax0] = cross_thread_data_red_temp_v1[0]
for i0_i1_fused_0 in T.thread_binding(384, thread="blockIdx.x"):
for i0_i1_fused_1 in T.thread_binding(256, thread="threadIdx.x"):
with T.block("T_layer_norm"):
ax0 = T.axis.spatial(128, (i0_i1_fused_0 * 256 + i0_i1_fused_1) // 768)
ax1 = T.axis.spatial(768, (i0_i1_fused_0 * 256 + i0_i1_fused_1) % 768)
T.reads(
data[ax0, ax1],
data_red_temp_v0[ax0],
data_red_temp_v1[ax0],
gamma[ax1],
bias[ax1],
)
T.writes(T_layer_norm[ax0, ax1])
T_layer_norm[ax0, ax1] = (
data[ax0, ax1] - data_red_temp_v0[ax0] * T.float32(0.0013020833333333333)
) * T.rsqrt(
data_red_temp_v1[ax0] * T.float32(0.0013020833333333333)
- data_red_temp_v0[ax0]
* T.float32(0.0013020833333333333)
* (data_red_temp_v0[ax0] * T.float32(0.0013020833333333333))
+ T.float32(1.0000000000000001e-05),
dtype="float32",
) * gamma[
ax1
] + bias[
ax1
]
# pylint: enable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
def test_loop_split():
_check(loop_split, lowered_loop_split)
def test_no_normal_reduction():
_check(no_normal_reduction, lowered_no_normal_reduction)
def test_two_bound_loops():
_check(two_bound_loops, lowered_two_bound_loops)
def test_multiple_blocks_under_reduction_loop():
_check(multiple_blocks_under_reduction_loop, lowered_multiple_blocks_under_reduction_loop)
def test_with_block_predicate():
_check(with_block_predicate, lowered_with_block_predicate)
def test_single_reduction_loop_with_block_predicate():
_check(
single_reduction_loop_with_block_predicate,
lowered_single_reduction_loop_with_block_predicate,
)
def test_single_reduction_loop_with_tensorize():
_check(
single_reduction_loop_with_tensorize,
single_reduction_loop_with_tensorize,
)
def test_reducer_max():
_check(reducer_max, lowered_reducer_max)
def test_zero_rank_buffer():
_check(zero_rank_buffer, lowered_zero_rank_buffer)
def test_multiple_bufferstore():
_check_fail(multiple_bufferstore)
def test_reduction_block_not_deepest():
_check_fail(reduction_loop_not_deepest)
def test_reduction_loop_bound_to_blockidx():
_check_fail(reduction_loop_bound_to_blockidx)
def test_different_access_indices():
_check_fail(different_access_indices)
def test_invalid_reducer():
_check_fail(invalid_reducer)
def test_softmax():
_check(softmax, lowered_softmax)
def test_argmax_split():
_check(argmax_split, lowered_argmax_split)
def test_argmin_split_init_update_reordered():
_check(argmin_split_init_update_reordered, lowered_argmin_split_init_update_reordered)
def test_lower_te():
a = te.placeholder((32, 2, 2))
k1 = te.reduce_axis((0, 2), "k1")
k2 = te.reduce_axis((0, 2), "k2")
b = te.compute((32,), lambda i: te.sum(a[i, k1, k2], axis=[k1, k2]))
s = te.create_schedule(b.op)
s[b].bind(k1, te.thread_axis("threadIdx.x"))
s[b].bind(k2, te.thread_axis("threadIdx.y"))
orig_mod = tvm.driver.build_module.schedule_to_module(s, [a, b])
mod = tvm.tir.transform.LowerCrossThreadReduction()(orig_mod)
tvm.ir.assert_structural_equal(
mod, orig_mod
) # LowerCrossThreadReduction should do nothing on TE
def test_layer_norm_tuple_sum():
_check(layer_norm_tuple_sum, lowered_layer_norm_tuple_sum)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_lower_init_block.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm.script import tir as T
# pylint: disable=no-self-argument
@tvm.script.ir_module
class WithInit:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [64, 64, 64])
B = T.match_buffer(b, [64])
for i0, j0 in T.grid(64, 64):
for k0 in T.serial(32, 64):
with T.block():
i, j, k = T.axis.remap("SRR", [i0, j0, k0])
with T.init():
B[i] = T.float32(0)
B[i] += A[i, j, k]
@tvm.script.ir_module
class WithBranch:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [64, 64, 64])
B = T.match_buffer(b, [64])
for i0, j0 in T.grid(64, 64):
for k0 in T.serial(32, 64):
with T.block():
i, j, k = T.axis.remap("SRR", [i0, j0, k0])
T.reads(A[i, j, k])
T.writes(B[i])
if (j == 0) and (k == 32):
B[i] = T.float32(0)
B[i] += A[i, j, k]
@tvm.script.ir_module
class InitWithMatchBuffer:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [64, 64, 64])
B = T.match_buffer(b, [64])
for i0, j0 in T.grid(64, 64):
for k0 in T.serial(32, 64):
with T.block():
i, j, k = T.axis.remap("SRR", [i0, j0, k0])
BB = T.match_buffer(B[i], ())
AA = T.match_buffer(A[i, 0:64, 0:64], (64, 64))
with T.init():
BB[()] = T.float32(0)
BB[()] += AA[j, k]
@tvm.script.ir_module
class BranchWithMatchBuffer:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [64, 64, 64])
B = T.match_buffer(b, [64])
for i0, j0 in T.grid(64, 64):
for k0 in T.serial(32, 64):
with T.block():
i, j, k = T.axis.remap("SRR", [i0, j0, k0])
T.reads(A[i, j, k])
T.writes(B[i])
BB = T.match_buffer(B[i], ())
AA = T.match_buffer(A[i, 0:64, 0:64], (64, 64))
if (j == 0) and (k == 32):
BB[()] = T.float32(0)
BB[()] += AA[j, k]
def test_lower_reduction():
origin_mod = WithInit
mod = tvm.tir.transform.LowerInitBlock()(origin_mod)
tvm.ir.assert_structural_equal(mod, WithBranch, True)
def test_lower_match_buffer():
origin_mod = InitWithMatchBuffer
mod = tvm.tir.transform.LowerInitBlock()(origin_mod)
tvm.ir.assert_structural_equal(mod, BranchWithMatchBuffer, True)
def test_lower_te():
x = te.placeholder((1,))
y = te.compute((1,), lambda i: x[i] + 2)
s = te.create_schedule(y.op)
orig_mod = tvm.driver.build_module.schedule_to_module(s, [x, y])
mod = tvm.tir.transform.LowerInitBlock()(orig_mod)
tvm.ir.assert_structural_equal(mod, orig_mod) # LowerInitBlock should do nothing on TE
if __name__ == "__main__":
test_lower_reduction()
test_lower_match_buffer()
test_lower_te()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_lower_intrin.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
import numpy as np
def lower_intrin(params, stmt):
"""wrapper to call transformation in stmt"""
lower_expr = isinstance(stmt, tvm.tir.PrimExpr)
stmt = tvm.tir.Evaluate(stmt) if lower_expr else stmt
mod = tvm.IRModule.from_expr(
tvm.tir.PrimFunc(params, stmt).with_attr("target", tvm.target.Target("llvm"))
)
mod = tvm.transform.Sequential([tvm.tir.transform.Simplify(), tvm.tir.transform.LowerIntrin()])(
mod
)
func = mod["main"]
stmt = func.body
return stmt.value if lower_expr else stmt.body
def check_value(expr, vx, vy, data, fref):
n = len(data)
A = te.placeholder((n,), name="A", dtype=expr.dtype)
B = te.placeholder((n,), name="B", dtype=expr.dtype)
def make_binds(i):
x = expr
x = tvm.tir.Let(vx, A[i], x)
x = tvm.tir.Let(vy, B[i], x)
return x
C = te.compute((n,), make_binds)
s = te.create_schedule([C.op])
f = tvm.build(s, [A, B, C], "llvm")
a = tvm.nd.array(np.array([x for x, y in data], dtype=expr.dtype))
b = tvm.nd.array(np.array([y for x, y in data], dtype=expr.dtype))
c = tvm.nd.array(np.zeros(len(data), dtype=expr.dtype))
f(a, b, c)
cref = np.array([fref(x, y) for x, y in data])
np.testing.assert_equal(c.numpy(), cref)
def get_ref_data():
"""Get reference data for every pairs"""
import itertools
x = range(-10, 10)
y = list(range(-10, 10))
y.remove(0)
return list(itertools.product(x, y))
@tvm.testing.requires_llvm
def test_lower_floordiv():
data = get_ref_data()
for dtype in ["int32", "int64", "int16"]:
x = te.var("x", dtype=dtype)
y = te.var("y", dtype=dtype)
zero = tvm.tir.const(0, dtype)
# no constraints
res = lower_intrin([x, y], tvm.te.floordiv(x, y))
check_value(res, x, y, data, lambda a, b: a // b)
# rhs >= 0
res = lower_intrin([x, y], tvm.tir.Select(y >= 0, tvm.te.floordiv(x, y), zero))
check_value(res, x, y, data, lambda a, b: a // b if b > 0 else 0)
# involves max
res = lower_intrin(
[x, y], tvm.tir.Select(y >= 0, tvm.te.max(tvm.te.floordiv(x, y), zero), zero)
)
check_value(res, x, y, data, lambda a, b: max(a // b, 0) if b > 0 else 0)
# lhs >= 0
res = lower_intrin(
[x, y], tvm.tir.Select(tvm.tir.all(y >= 0, x >= 0), tvm.te.floordiv(x, y), zero)
)
check_value(res, x, y, data, lambda a, b: a // b if b > 0 and a >= 0 else 0)
# const power of two
res = lower_intrin([x, y], tvm.te.floordiv(x, tvm.tir.const(8, dtype=dtype)))
check_value(res, x, y, [(a, b) for a, b in data if b == 8], lambda a, b: a // b)
@tvm.testing.requires_llvm
def test_lower_floormod():
data = get_ref_data()
for dtype in ["int32", "int64", "int16"]:
x = te.var("x", dtype=dtype)
y = te.var("y", dtype=dtype)
zero = tvm.tir.const(0, dtype)
# no constraints
res = lower_intrin([x, y], tvm.te.floormod(x, y))
check_value(res, x, y, data, lambda a, b: a % b)
# rhs >= 0
res = lower_intrin([x, y], tvm.tir.Select(y >= 0, tvm.te.floormod(x, y), zero))
check_value(res, x, y, data, lambda a, b: a % b if b > 0 else 0)
# lhs >= 0
res = lower_intrin(
[x, y], tvm.tir.Select(tvm.tir.all(y >= 0, x >= 0), tvm.te.floormod(x, y), zero)
)
check_value(res, x, y, data, lambda a, b: a % b if b > 0 and a >= 0 else 0)
# const power of two
res = lower_intrin([x, y], tvm.te.floormod(x, tvm.tir.const(8, dtype=dtype)))
check_value(res, x, y, [(a, b) for a, b in data if b == 8], lambda a, b: a % b)
if __name__ == "__main__":
test_lower_floordiv()
test_lower_floormod()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_lower_opaque_block.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
from tvm.script import tir as T
def _check(original, transformed):
func = original
mod = tvm.IRModule.from_expr(func)
mod = tvm.tir.transform.LowerOpaqueBlock()(mod)
mod = tvm.tir.transform.Simplify()(mod)
tvm.ir.assert_structural_equal(mod["main"], transformed, True)
@T.prim_func
def compacted_elementwise_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
C = T.match_buffer(c, (16, 16), "float32")
for i in range(0, 16):
with T.block():
T.reads(A[i, 0:16])
T.writes(C[i, 0:16])
B = T.alloc_buffer([1, 16], "float32", scope="global")
for j in range(0, 16):
with T.block():
T.reads(A[i, j])
T.writes(B[0, j])
B[0, j] = A[i, j] + 1.0
for j in range(0, 16):
with T.block():
T.reads(B[0, j])
T.writes(C[i, j])
C[i, j] = B[0, j] * 2.0
@T.prim_func
def transformed_elementwise_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
C = T.match_buffer(c, (16, 16), "float32")
for i in T.serial(0, 16):
B_new = T.decl_buffer(shape=[1, 16], dtype="float32")
for j in T.serial(0, 16):
B_new[0, j] = A[i, j] + 1.0
for j in T.serial(0, 16):
C[i, j] = B_new[0, j] * 2.0
@T.prim_func
def compacted_gpu_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
C = T.match_buffer(c, (16, 16), "float32")
for i0 in T.thread_binding(0, 4, thread="blockIdx.x"):
for i1 in T.thread_binding(0, 2, thread="threadIdx.x"):
for i2 in T.thread_binding(0, 2, thread="vthread"):
with T.block():
T.reads(A[i0 * 4 + i1 * 2 + i2, 0:16])
T.writes(C[i0 * 4 + i1 * 2 + i2, 0:16])
B = T.alloc_buffer([1, 16], "float32", scope="local")
for j in range(0, 16):
with T.block():
T.reads(A[i0 * 4 + i1 * 2 + i2, j])
T.writes(B[0, j])
B[0, j] = A[i0 * 4 + i1 * 2 + i2, j] + 1.0
for j in range(0, 16):
with T.block():
T.reads(B[0, j])
T.writes(C[i0 * 4 + i1 * 2 + i2, j])
C[i0 * 4 + i1 * 2 + i2, j] = B[0, j] * 2.0
@T.prim_func
def transformed_gpu_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
C = T.match_buffer(c, (16, 16), "float32")
i0 = T.env_thread("blockIdx.x")
i1 = T.env_thread("threadIdx.x")
i2 = T.env_thread("vthread")
T.launch_thread(i0, 4)
T.launch_thread(i1, 2)
T.launch_thread(i2, 2)
B = T.decl_buffer(shape=[1, 16], dtype="float32", scope="local")
for j in range(0, 16):
B[0, j] = A[i0 * 4 + i1 * 2 + i2, j] + 1.0
for j in range(0, 16):
C[i0 * 4 + i1 * 2 + i2, j] = B[0, j] * 2.0
@T.prim_func
def compacted_symbolic_func(a: T.handle, c: T.handle, n: T.int32, m: T.int32) -> None:
A = T.match_buffer(a, (n, m), "float32")
C = T.match_buffer(c, (n, m), "float32")
for i in range(0, n):
with T.block():
T.reads(A[i, m])
T.writes(C[i, m])
B = T.alloc_buffer((m,), "float32", scope="global")
for j in range(0, m):
with T.block():
T.reads(A[i, j])
T.writes(B[j])
B[j] = A[i, j] + 1.0
for j in range(0, m):
with T.block():
T.reads(B[j])
T.writes(C[i, j])
C[i, j] = B[j] * 2.0
@T.prim_func
def transformed_symbolic_func(a: T.handle, c: T.handle, n: T.int32, m: T.int32) -> None:
A = T.match_buffer(a, (n, m), "float32")
C = T.match_buffer(c, (n, m), "float32")
for i in range(0, n):
B = T.decl_buffer(shape=[m], dtype="float32")
for j in range(0, m):
B[j] = A[i, j] + 1.0
for j in range(0, m):
C[i, j] = B[j] * 2.0
@T.prim_func
def compacted_predicate_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (32), "float32")
C = T.match_buffer(c, (32), "float32")
for i, j in T.grid(5, 7):
with T.block():
T.reads(A[i * 7 + j])
T.writes(C[i * 7 + j])
T.where(i * 7 + j < 32)
C[i * 7 + j] = A[i * 7 + j] + 1.0
@T.prim_func
def transformed_predicate_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (32), "float32")
C = T.match_buffer(c, (32), "float32")
for i, j in T.grid(5, 7):
if i * 7 + j < 32:
C[i * 7 + j] = A[i * 7 + j] + 1.0
@T.prim_func
def compacted_unit_loop_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (32), "float32")
C = T.match_buffer(c, (32), "float32")
for x, y, z in T.grid(4, 1, 8):
with T.block():
T.reads(A[x * 8 + y * 8 + z])
T.writes(C[x * 8 + y * 8 + z])
C[x * 8 + y * 8 + z] = A[x * 8 + y * 8 + z] + 1.0
@T.prim_func
def transformed_unit_loop_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (32), "float32")
C = T.match_buffer(c, (32), "float32")
for x, z in T.grid(4, 8):
C[x * 8 + z] = A[x * 8 + z] + 1.0
@T.prim_func
def compacted_multi_alloc_func(a: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (32), "float32")
D = T.match_buffer(d, (32), "float32")
for i in range(0, 32):
with T.block():
T.reads(A[i])
T.writes(D[i])
B = T.alloc_buffer((32,), scope="global")
C = T.alloc_buffer((32,), scope="global")
B[i] = A[i] + 1.0
C[i] = A[i] + B[i]
D[i] = C[i] * 2.0
@T.prim_func
def transformed_multi_alloc_func(a: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (32), "float32")
D = T.match_buffer(d, (32), "float32")
for i in range(0, 32):
B = T.decl_buffer(shape=(32,), dtype="float32")
C = T.decl_buffer(shape=(32,), dtype="float32")
B[i] = A[i] + 1.0
C[i] = A[i] + B[i]
D[i] = C[i] * 2.0
@T.prim_func
def compacted_strided_buffer_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
C = T.match_buffer(c, (16, 16), "float32")
for i0 in range(0, 4):
with T.block():
T.reads(A[i0 * 4 : i0 * 4 + 4, 0:16])
T.writes(C[i0 * 4 : i0 * 4 + 4, 0:16])
B = T.alloc_buffer([4, 16], "float32", strides=[17, 1], scope="global")
for i1 in range(0, 4):
for j in range(0, 16):
with T.block():
T.reads(A[i0 * 4 + i1, j])
T.writes(B[i1, j])
B[i1, j] = A[i0 * 4 + i1, j] + 1.0
for i1 in range(0, 4):
for j in range(0, 16):
with T.block():
T.reads(B[i1, j])
T.writes(C[i0 * 4 + i1, j])
C[i0 * 4 + i1, j] = B[i1, j] * 2.0
@T.prim_func
def transformed_strided_buffer_func(
A: T.Buffer[(16, 16), "float32"], C: T.Buffer[(16, 16), "float32"]
) -> None:
# body
for i0 in T.serial(4):
B_data = T.allocate([4, 17], "float32", "global")
B = T.decl_buffer(shape=[4, 16], dtype="float32", strides=[17, 1], data=B_data)
for i1, j in T.grid(4, 16):
B[i1, j] = A[i0 * 4 + i1, j] + T.float32(1)
for i1, j in T.grid(4, 16):
C[i0 * 4 + i1, j] = B[i1, j] * T.float32(2)
@T.prim_func
def annotated_loops(a: T.handle) -> None:
A = T.match_buffer(a, (16,), "float32")
for i in range(0, 16, annotations={"pragma_1": "str_value", "pragma_2": 1, "pragma_3": 0.0}):
A[i] = 0.0
@T.prim_func
def boolean_handling_before(a: T.Buffer[10, "bool"], b: T.Buffer[10, "bool"]) -> None:
for i0 in T.serial(10):
with T.block("b"):
T.reads(a[i0])
T.writes(b[i0])
b[i0] = a[i0]
@T.prim_func
def boolean_handling_after(a: T.Buffer[10, "bool"], b: T.Buffer[10, "bool"]) -> None:
# body
for i0 in T.serial(10):
b[i0] = a[i0]
def test_elementwise():
_check(compacted_elementwise_func, transformed_elementwise_func)
def test_gpu_workload():
_check(compacted_gpu_func, transformed_gpu_func)
def test_symbolic_shape():
_check(compacted_symbolic_func, transformed_symbolic_func)
def test_predicate():
_check(compacted_predicate_func, transformed_predicate_func)
def test_unit_loops():
_check(compacted_unit_loop_func, transformed_unit_loop_func)
def test_multi_alloc():
_check(compacted_multi_alloc_func, transformed_multi_alloc_func)
def test_strided_buffer():
_check(compacted_strided_buffer_func, transformed_strided_buffer_func)
def test_lower_te():
x = te.placeholder((1,))
y = te.compute((1,), lambda i: x[i] + 2)
s = te.create_schedule(y.op)
orig_mod = tvm.driver.build_module.schedule_to_module(s, [x, y])
mod = tvm.tir.transform.LowerOpaqueBlock()(orig_mod)
tvm.ir.assert_structural_equal(mod, orig_mod) # LowerOpaqueBlock should do nothing on TE
def test_annotated_loops():
mod = tvm.IRModule.from_expr(annotated_loops)
mod = tvm.tir.transform.LowerOpaqueBlock()(mod)
attr1 = mod["main"].body
attr2 = attr1.body
attr3 = attr2.body
assert attr1.attr_key == "pragma_1" and attr1.value == "str_value"
assert attr2.attr_key == "pragma_2"
tvm.ir.assert_structural_equal(attr2.value, tvm.tir.IntImm("int32", 1))
assert attr3.attr_key == "pragma_3"
tvm.ir.assert_structural_equal(attr3.value, tvm.tir.FloatImm("float32", 0.0))
def test_annotated_block():
@T.prim_func
def annotated_block() -> None:
with T.block():
T.block_attr({"pragma_1": "str_value", "pragma_2": 1, "pragma_3": 0.0})
T.evaluate(0)
mod = tvm.IRModule.from_expr(annotated_block)
mod = tvm.tir.transform.LowerOpaqueBlock()(mod)
attr1 = mod["main"].body
attr2 = attr1.body
attr3 = attr2.body
assert attr1.attr_key == "pragma_1" and attr1.value == "str_value"
assert attr2.attr_key == "pragma_2"
tvm.ir.assert_structural_equal(attr2.value, tvm.tir.IntImm("int32", 1))
assert attr3.attr_key == "pragma_3"
tvm.ir.assert_structural_equal(attr3.value, tvm.tir.FloatImm("float32", 0.0))
def test_preserved_annotations():
@T.prim_func
def before(A: T.Buffer[8, "float32"], B: T.Buffer[8, "float32"]):
for i in T.serial(8, annotations={"k_0": 1, "k_1": [2, 3], "k_2": 3.14}):
with T.block("block"):
T.block_attr({"k_3": "oops"})
B[i] = A[i] + 1.0
@T.prim_func
def after(A: T.Buffer[8, "float32"], B: T.Buffer[8, "float32"]):
for i in T.serial(8, annotations={"k_0": 1, "k_1": [2, 3], "k_2": 3.14}):
B[i] = A[i] + 1.0
mod = tvm.IRModule.from_expr(before)
mod = tvm.tir.transform.LowerOpaqueBlock()(mod)
tvm.ir.assert_structural_equal(mod["main"], after)
def test_boolean_handling():
_check(boolean_handling_before, boolean_handling_after)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_lower_tvm_builtin.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import numpy as np
from tvm import testing
@tvm.register_func("tvm.test_matmul")
def my_matmul(a, b, c):
c.copyfrom(np.dot(a.numpy(), b.numpy()))
def check_packed_func(target="llvm"):
ib = tvm.tir.ir_builder.create()
m = n = k = 16
#
# Prepare buffer for a, b and c:
#
a = te.placeholder((m, k), name="a", dtype="float64")
b = te.placeholder((k, n), name="b", dtype="float64")
k = te.reduce_axis((0, k), name="k")
c = te.compute((m, n), lambda i, j: te.sum(a[i, k] * b[k, j], axis=k), name="c")
a_buffer = tvm.tir.decl_buffer(
a.shape, a.dtype, name="a_buffer", offset_factor=1, strides=[te.var("s1"), 1]
)
b_buffer = tvm.tir.decl_buffer(
b.shape, b.dtype, name="b_buffer", offset_factor=1, strides=[te.var("s2"), 1]
)
c_buffer = tvm.tir.decl_buffer(
c.shape, c.dtype, name="c_buffer", offset_factor=1, strides=[te.var("s3"), 1]
)
with ib.for_range(0, 10, "i", kind="parallel"):
ib.emit(tvm.tir.call_packed("tvm.test_matmul", a_buffer, b_buffer, c_buffer))
stmt = ib.get()
# Construct a valid IRModule to be lowered:
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([a_buffer, b_buffer, c_buffer], stmt))
target = tvm.target.Target(target)
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", target))(mod)
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("global_symbol", "main"))(mod)
mod = tvm.tir.transform.MakePackedAPI()(mod)
# Do the lowering:
mod = tvm.tir.transform.LowerTVMBuiltin()(mod)
# Get the PrimFunc from module:
prim_func = mod.functions.items()[0][1]
node = prim_func.body
# Recursively visit PrimFunc until we meet the for-loop:
while isinstance(node, (tvm.tir.AssertStmt, tvm.tir.LetStmt, tvm.tir.AttrStmt)):
node = node.body
# For-loop:
assert isinstance(node, tvm.tir.stmt.For)
#
# let stack_tcode = tir.tvm_stack_alloca("arg_tcode", 4)
#
alloca_tcode = node.body
assert isinstance(alloca_tcode, tvm.tir.LetStmt)
expected_value = tvm.tir.call_intrin(
"handle", tvm.ir.Op.get("tir.tvm_stack_alloca"), "arg_tcode", 4
)
expected_var = alloca_tcode.var
expected_stmt = tvm.tir.LetStmt(expected_var, expected_value, alloca_tcode.body)
tvm.ir.assert_structural_equal(alloca_tcode, expected_stmt, map_free_vars=True)
#
# let stack_value = tir.tvm_stack_alloca("arg_value", 4)
#
alloca_value = alloca_tcode.body
assert isinstance(alloca_value, tvm.tir.LetStmt)
expected_value = tvm.tir.call_intrin(
"handle", tvm.ir.Op.get("tir.tvm_stack_alloca"), "arg_value", 4
)
expected_var = alloca_value.var
expected_stmt = tvm.tir.LetStmt(expected_var, expected_value, alloca_value.body)
tvm.ir.assert_structural_equal(alloca_value, expected_stmt, map_free_vars=True)
#
# let stack_array = tir.tvm_stack_alloca("array", 3)
#
alloca_array = alloca_value.body
assert isinstance(alloca_array, tvm.tir.LetStmt)
expected_value = tvm.tir.call_intrin(
"handle", tvm.ir.Op.get("tir.tvm_stack_alloca"), "array", 3
)
expected_var = alloca_array.var
expected_stmt = tvm.tir.LetStmt(expected_var, expected_value, alloca_array.body)
tvm.ir.assert_structural_equal(alloca_array, expected_stmt, map_free_vars=True)
#
# let stack_shape = tir.tvm_stack_alloca("shape", 12)
#
alloca_shape = alloca_array.body
assert isinstance(alloca_shape, tvm.tir.LetStmt)
expected_value = tvm.tir.call_intrin(
"handle", tvm.ir.Op.get("tir.tvm_stack_alloca"), "shape", 12
)
expected_var = alloca_shape.var
expected_stmt = tvm.tir.LetStmt(expected_var, expected_value, alloca_shape.body)
tvm.ir.assert_structural_equal(alloca_shape, expected_stmt, map_free_vars=True)
def test_lower_packed_func():
check_packed_func("llvm")
check_packed_func("stackvm")
@tvm.testing.requires_llvm
def test_call_packed_return_non_i32():
# This call packed that return non i32 types
expected_value = np.array([1.2, 1.4], dtype="float32")
def packed_echo(value):
return tvm.tir.call_intrin(
value.dtype, tvm.ir.Op.get("tir.tvm_call_packed"), "testing.echo", value
)
def build_tir():
Ab = tvm.tir.decl_buffer((2,), "float32")
ib = tvm.tir.ir_builder.create()
Aptr = ib.buffer_ptr(Ab)
# return f32
# Aptr[0] = testing.echo(expected_value[0])
Aptr[0] = packed_echo(tvm.tir.const(expected_value[0], "float32"))
# return handle
# let Aptr_var = testing.echo(Aptr) in Aptr_var[1] = expected_value[1]
Aptr_var = ib.let("Aptr_dup", packed_echo(Aptr.asobject().data))
ib.emit(tvm.tir.BufferStore(Aptr, tvm.tir.const(expected_value[1], "float32"), [1]))
stmt = ib.get()
return tvm.IRModule.from_expr(
tvm.tir.PrimFunc([Ab], stmt).with_attr("global_symbol", "packed_test")
)
mod = build_tir()
f = tvm.build(mod, None, "llvm")
a = tvm.nd.array(np.zeros(2, dtype="float32"))
f(a)
tvm.testing.assert_allclose(a.numpy(), expected_value)
if __name__ == "__main__":
test_call_packed_return_non_i32()
test_lower_packed_func()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_lower_warp_memory.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm.contrib.nvcc import have_fp16
import numpy as np
import tvm.testing
import pytest
@tvm.testing.requires_cuda
def test_lower_warp_memory_local_scope():
m = 128
A = te.placeholder((m,), name="A")
B = te.compute((m,), lambda i: A[i] + 3, name="B")
s = te.create_schedule(B.op)
AA = s.cache_read(A, "warp", [B])
xo, xi = s[B].split(B.op.axis[0], 64)
xi0, xi1 = s[B].split(xi, factor=32)
tx = te.thread_axis("threadIdx.x")
s[B].bind(xi1, tx)
s[B].bind(xo, te.thread_axis("blockIdx.x"))
s[AA].compute_at(s[B], xo)
xo, xi = s[AA].split(s[AA].op.axis[0], 32)
s[AA].bind(xi, tx)
cuda_target = tvm.target.Target("cuda")
assert cuda_target.thread_warp_size == 32
# lowering with the CSE pass disabled as otherwise it would do some commoning
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
mod = tvm.lower(s, [A, B], name="f")
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", cuda_target))(mod)
fdevice = tvm.tir.transform.SplitHostDevice()(mod)["f_kernel0"]
mod = tvm.IRModule.from_expr(fdevice)
fdevice = tvm.tir.transform.LowerWarpMemory()(mod)["f_kernel0"]
allocate = fdevice.body.body
assert allocate.buffer_var.type_annotation.storage_scope == "local"
assert fdevice.body.body.extents[0].value == 2
@tvm.testing.requires_cuda
def test_lower_warp_memory_correct_indices():
n = 32
A = te.placeholder((2, n, n), name="A", dtype="float32")
C = te.compute((2, n, n), lambda x, i, j: A(x, i, (j + 1) % n), name="C")
s = te.create_schedule(C.op)
bk_x = te.thread_axis("blockIdx.x")
th_y = te.thread_axis("threadIdx.y")
th_x = te.thread_axis("threadIdx.x")
B = s.cache_read(A, "warp", [C])
cx, ci, cj = C.op.axis
bx, bi, bj = B.op.axis
s[C].bind(cj, th_x)
s[C].bind(cx, bk_x)
s[B].compute_at(s[C], cx)
s[B].bind(bi, th_y)
s[B].bind(bj, th_x)
bounds = tvm.te.schedule.InferBound(s)
ir = tvm.te.schedule.ScheduleOps(s, bounds)
inner_func = ir.body.body.body
store_A_warp = inner_func.seq[0].body.body
indices = list(store_A_warp.indices)
# A.warp is actually many buffers, one for each warp, although they are all called A.warp
# 1. If we are accessing from different threads within a same warp (different
# threadIdx.x), we need to distinguish between each elements using threadIdx.x,
# so threadIdx.x is one if the indices.
# 2. If we are accessing from different warps (different threadIdx.y), we are actually
# assessing different buffers, so there is no need to distinguish from elements,
# and therefore threadIdx.y is NOT a index.
idx_names = map(lambda x: x.name, filter(lambda x: type(x) is tvm.tir.expr.Var, indices))
assert "threadIdx.x" in idx_names
assert "threadIdx.y" not in idx_names
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_lower_warp_memory_cuda_end_to_end():
def check_cuda(dtype):
if dtype == "float16" and not have_fp16(tvm.cuda(0).compute_version):
print("Skip because gpu does not have fp16 support")
return
m = 128
A = te.placeholder((m,), name="A", dtype=dtype)
B = te.compute((m,), lambda i: A[i // 32 * 32 + (i + 1) % 32], name="B")
cuda_target = tvm.target.Target("cuda")
assert cuda_target.thread_warp_size == 32
with cuda_target:
s = te.create_schedule(B.op)
AA = s.cache_read(A, "warp", [B])
xo, xi = s[B].split(B.op.axis[0], 64)
xi0, xi1 = s[B].split(xi, factor=32)
tx = te.thread_axis("threadIdx.x")
s[B].bind(xi1, tx)
s[B].bind(xo, te.thread_axis("blockIdx.x"))
s[AA].compute_at(s[B], xo)
xo, xi = s[AA].split(s[AA].op.axis[0], 32)
s[AA].bind(xi, tx)
dev = tvm.cuda(0)
# building with the CSE pass disabled as otherwise it would do some commoning
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
func = tvm.build(s, [A, B], "cuda")
A_np = np.array(list(range(m)), dtype=dtype)
B_np = np.array(
list(range(1, 32))
+ [0]
+ list(range(33, 64))
+ [32]
+ list(range(65, 96))
+ [64]
+ list(range(97, 128))
+ [96],
dtype=dtype,
)
A_nd = tvm.nd.array(A_np, dev)
B_nd = tvm.nd.array(np.zeros(B_np.shape, dtype=B_np.dtype), dev)
func(A_nd, B_nd)
tvm.testing.assert_allclose(B_nd.numpy(), B_np, rtol=1e-3)
check_cuda("float32")
check_cuda("float16")
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_lower_warp_memory_cuda_half_a_warp():
def check_cuda(dtype):
if dtype == "float16" and not have_fp16(tvm.cuda(0).compute_version):
print("Skip because gpu does not have fp16 support")
return
n, m = 16, 16
A = te.placeholder(
(
n,
m,
),
name="A",
dtype=dtype,
)
B = te.compute(
(
n,
m,
),
lambda j, i: A[j, (i + 1) % m],
name="B",
)
cuda_target = tvm.target.Target("cuda")
assert cuda_target.thread_warp_size == 2 * m
with cuda_target:
s = te.create_schedule(B.op)
tx = te.thread_axis("threadIdx.x")
ty = te.thread_axis("threadIdx.y")
bx = te.thread_axis("blockIdx.x")
AA = s.cache_read(A, "warp", [B])
y, x = B.op.axis
z, y = s[B].split(y, nparts=2)
s[B].bind(x, tx)
s[B].bind(y, ty)
s[B].bind(z, bx)
s[AA].compute_at(s[B], y)
_, x = AA.op.axis
s[AA].bind(x, tx)
dev = tvm.cuda(0)
# building with the CSE pass disabled as otherwise it would do some commoning
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
func = tvm.build(s, [A, B], "cuda")
A_np = np.array([list(range(i, m + i)) for i in range(n)], dtype=dtype)
B_np = np.array([list(range(1 + i, m + i)) + [i] for i in range(n)], dtype=dtype)
A_nd = tvm.nd.array(A_np, dev)
B_nd = tvm.nd.array(np.zeros(B_np.shape, dtype=B_np.dtype), dev)
func(A_nd, B_nd)
tvm.testing.assert_allclose(B_nd.numpy(), B_np, rtol=1e-3)
check_cuda("float32")
check_cuda("float16")
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_lower_warp_memory_cuda_2_buffers():
def check_cuda(dtype):
if dtype == "float16" and not have_fp16(tvm.cuda(0).compute_version):
print("Skip because gpu does not have fp16 support")
return
m = 32
A = te.placeholder((m,), name="A", dtype=dtype)
B = te.placeholder((m,), name="B", dtype=dtype)
C = te.compute((m,), lambda i: A[(i + 1) % m] + B[(i + 1) % m], name="C")
cuda_target = tvm.target.Target("cuda")
assert m <= cuda_target.thread_warp_size
with cuda_target:
s = te.create_schedule(C.op)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
AA = s.cache_read(A, "warp", [C])
BB = s.cache_read(B, "warp", [C])
xo, xi = s[C].split(C.op.axis[0], nparts=1)
s[C].bind(xi, tx)
s[C].bind(xo, bx)
s[AA].compute_at(s[C], xo)
s[BB].compute_at(s[C], xo)
xo, xi = s[AA].split(s[AA].op.axis[0], nparts=1)
s[AA].bind(xo, bx)
s[AA].bind(xi, tx)
xo, xi = s[BB].split(s[BB].op.axis[0], nparts=1)
s[BB].bind(xo, bx)
s[BB].bind(xi, tx)
dev = tvm.cuda(0)
# building with the CSE pass disabled as otherwise it would do some commoning
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
func = tvm.build(s, [A, B, C], "cuda")
AB_np = np.array(list(range(m)), dtype=dtype)
C_np = np.array(list(range(1, m)) + [0], dtype=dtype) * 2
A_nd = tvm.nd.array(AB_np, dev)
B_nd = tvm.nd.array(AB_np, dev)
C_nd = tvm.nd.array(np.zeros(C_np.shape, dtype=C_np.dtype), dev)
func(A_nd, B_nd, C_nd)
tvm.testing.assert_allclose(C_nd.numpy(), C_np, rtol=1e-3)
check_cuda("float32")
check_cuda("float16")
@tvm.testing.requires_gpu
def test_lower_warp_memory_roundup():
def check(device, m):
A = te.placeholder((m,), name="A")
B = te.compute((m,), lambda i: A[i] + 1, name="B")
with tvm.target.Target(device):
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=32)
tx = te.thread_axis("threadIdx.x")
s[B].bind(xo, te.thread_axis("blockIdx.x"))
s[B].bind(xi, tx)
AA = s.cache_read(A, "warp", [B])
_, yi = s[AA].split(s[AA].op.axis[0], factor=32)
s[AA].bind(yi, tx)
s[AA].compute_at(s[B], xo)
dev = tvm.device(device, 0)
# building with the CSE pass disabled as otherwise it would do some commoning
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
func = tvm.build(s, [A, B], device)
A_np = np.random.uniform(size=(m,)).astype(A.dtype)
B_np = np.zeros(shape=(m,)).astype(B.dtype)
A_nd = tvm.nd.array(A_np, dev)
B_nd = tvm.nd.array(B_np, dev)
func(A_nd, B_nd)
B_np = A_np + 1
tvm.testing.assert_allclose(B_nd.numpy(), B_np)
for device in ["cuda", "rocm"]:
if not tvm.testing.device_enabled(device):
print("skip because", device, "is not enabled..")
continue
check(device, m=31)
check(device, m=32)
check(device, m=33)
check(device, m=63)
check(device, m=64)
check(device, m=65)
@tvm.testing.requires_cuda
def test_lower_warp_memory_same_thread():
m = n = 128
A = te.placeholder((m, n), name="A")
k = te.reduce_axis((0, n), name="k")
B = te.compute((m,), lambda i: te.sum(A[i, k], axis=[k]))
s = te.create_schedule(B.op)
BB = s.cache_write(B, "warp")
tx = te.thread_axis("threadIdx.x")
xo, xi = s[B].split(B.op.axis[0], factor=32)
s[B].bind(xi, tx)
s[B].bind(xo, te.thread_axis("blockIdx.x"))
s[BB].compute_at(s[B], xo)
xo, xi = s[BB].split(s[BB].op.axis[0], factor=32)
s[BB].bind(xi, tx)
cuda_target = tvm.target.Target("cuda")
assert cuda_target.thread_warp_size == 32
# lowering with the CSE pass disabled as otherwise it would do some commoning
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
mod = tvm.lower(s, [A, B], name="f")
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", cuda_target))(mod)
fdevice = tvm.tir.transform.SplitHostDevice()(mod)["f_kernel0"]
mod = tvm.IRModule.from_expr(fdevice)
fdevice = tvm.tir.transform.LowerWarpMemory()(mod)["f_kernel0"]
assert "tvm_warp_shuffle" not in fdevice.astext()
@tvm.testing.requires_cuda
def test_lower_warp_memory_divide_by_factor():
ib = tvm.tir.ir_builder.IRBuilder()
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
with ib.new_scope():
ib.scope_attr(bx, "thread_extent", 32)
ib.scope_attr(tx, "thread_extent", 32)
t = ib.allocate("float32", 16, name="t", scope="warp")
n = ib.allocate("float32", 16, name="n", scope="local")
n[0] = t[0]
stmt = ib.get()
func = tvm.tir.PrimFunc([], stmt)
func = func.with_attr("from_legacy_te_schedule", True)
cuda_target = tvm.target.Target("cuda")
# lowering with the CSE pass disabled as otherwise it would do some commoning
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
mod = tvm.lower(func, name="f")
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", cuda_target))(mod)
with pytest.raises(tvm.error.TVMError, match="Divide by zero") as cm:
tvm.tir.transform.LowerWarpMemory()(mod)["f_kernel0"]
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_make_packed_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm.driver.build_module import schedule_to_module
def test_makeapi():
"""Not yet working, mock design"""
n = te.size_var("n")
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
C = te.compute(A.shape, lambda *i: A(*i) + B(*i), name="C")
s = te.create_schedule(C.op)
mod = schedule_to_module(s, [n, A, B, C])
mod = tvm.tir.transform.StorageFlatten(64)(mod)
mod = tvm.tir.transform.Apply(
lambda f: f.with_attr(
{
"target": tvm.target.Target("llvm"),
"global_symbol": "main",
}
)
)(mod)
f = tvm.tir.transform.MakePackedAPI()(mod)["main"]
assert len(f.params) == 6
def _find_assignment(stmt, var_name):
while not isinstance(stmt, tvm.tir.LetStmt):
stmt = stmt.body
if stmt.var.name != var_name:
return _find_assignment(stmt.body, var_name)
return stmt
def _find_next(stmt, type):
while not isinstance(stmt, type):
stmt = stmt.body
return stmt
def test_variable_passed_from_args():
ib = tvm.tir.ir_builder.create()
input_buffer = tvm.tir.decl_buffer(name="input_buffer", shape=[1])
not_device_context = tvm.tir.Var("not_device_context", dtype="handle")
ib.emit(
tvm.tir.call_extern("float32", "some_external_call", input_buffer.data, not_device_context),
)
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([input_buffer, not_device_context], stmt))
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", tvm.target.Target("llvm")))(mod)
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("global_symbol", "main"))(mod)
func = tvm.tir.transform.MakePackedAPI()(mod)["main"]
num_args = func.params[2]
# num_args assertion
assert func.body.condition.a == num_args
assert func.body.condition.b == 2
# Arguments unpacking
assignment = _find_assignment(func.body, "arg.input_buffer")
assert str(assignment.value) == "@tir.tvm_struct_get(args: handle, 0, 12, dtype=handle)"
assignment = _find_assignment(func.body, "arg.not_device_context")
assert str(assignment.value) == "@tir.tvm_struct_get(args: handle, 1, 12, dtype=handle)"
assignment = _find_assignment(func.body, "input_buffer")
assert (
str(assignment.value) == "@tir.tvm_struct_get(arg.input_buffer: handle, 0, 1, dtype=handle)"
)
unpacked_input_buffer = assignment.var
assignment = _find_assignment(func.body, "not_device_context")
assert str(assignment.value) == "arg.not_device_context: handle"
unpacked_not_device_context = assignment.var
seq_stmt = _find_next(assignment, tvm.tir.SeqStmt)
call = _find_next(seq_stmt[1], tvm.tir.Evaluate)
call_extern = call.value
assert call_extern.args[1] == unpacked_input_buffer
assert call_extern.args[2] == unpacked_not_device_context
def test_device_api_context_implicit_resource_handle():
ib = tvm.tir.ir_builder.create()
input_buffer = tvm.tir.decl_buffer(name="input_buffer", shape=[1])
device_context = tvm.tir.Var("device_api_context", dtype="handle")
ib.emit(
tvm.tir.call_extern("float32", "some_external_call", input_buffer.data, device_context),
)
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([input_buffer, device_context], stmt))
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", tvm.target.Target("llvm")))(mod)
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("global_symbol", "main"))(mod)
func = tvm.tir.transform.MakePackedAPI()(mod)["main"]
num_args = func.params[2]
device_context_in_resource_handle = func.params[5]
# num_args assertion
assert func.body.condition.a == num_args
assert func.body.condition.b == 1
# Arguments unpacking
assignment = _find_assignment(func.body, "arg.input_buffer")
assert str(assignment.value) == "@tir.tvm_struct_get(args: handle, 0, 12, dtype=handle)"
assignment = _find_assignment(func.body, "input_buffer")
assert (
str(assignment.value) == "@tir.tvm_struct_get(arg.input_buffer: handle, 0, 1, dtype=handle)"
)
unpacked_input_buffer = assignment.var
seq_stmt = _find_next(assignment, tvm.tir.SeqStmt)
call = _find_next(seq_stmt[1], tvm.tir.Evaluate)
call_extern = call.value
assert call_extern.args[1] == unpacked_input_buffer
assert call_extern.args[2] == device_context_in_resource_handle
if __name__ == "__main__":
test_makeapi()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_make_unpacked_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import te
import numpy
@pytest.fixture
def mod_without_attrs():
ib = tvm.tir.ir_builder.create()
A = tvm.tir.decl_buffer(name="A", shape=[1])
stmt = ib.get()
return tvm.IRModule.from_expr(tvm.tir.PrimFunc([A], stmt))
@pytest.fixture
def mod(mod_without_attrs):
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", tvm.target.Target("llvm")))(
mod_without_attrs
)
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("global_symbol", "main"))(mod)
return mod
def test_fails_if_not_global_symbol(mod_without_attrs):
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", tvm.target.Target("llvm")))(
mod_without_attrs
)
with pytest.raises(tvm.TVMError, match="Expect PrimFunc to have the global_symbol attribute"):
f = tvm.tir.transform.MakeUnpackedAPI()(mod)["main"]
def test_fails_if_no_target(mod_without_attrs):
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("global_symbol", "main"))(mod_without_attrs)
with pytest.raises(tvm.TVMError, match="Require the target attribute"):
f = tvm.tir.transform.MakeUnpackedAPI()(mod)["main"]
@tvm.testing.parametrize_targets("c", "llvm", "cuda")
def test_device_setup(mod, target, dev):
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", tvm.target.Target(target)))(mod)
f = tvm.tir.transform.MakeUnpackedAPI()(mod)["main"]
assert len(f.params) == 1
assert f.params[0].name == "A"
assert f.body.node == "default"
assert f.body.attr_key == "device_id"
assert f.body.value == 0
assert f.body.body.node == "default"
assert f.body.body.attr_key == "device_type"
assert f.body.body.value == dev.device_type
def test_no_buffers_no_device_setup():
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A], stmt))
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", tvm.target.Target("llvm")))(mod)
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("global_symbol", "main"))(mod)
f = tvm.tir.transform.MakeUnpackedAPI()(mod)["main"]
assert len(f.params) == 1
assert f.params[0].name == "A"
def test_argument_mapping(mod):
f = tvm.tir.transform.MakeUnpackedAPI()(mod)["main"]
assert len(f.params) == 1
assert f.params[0].name == "A"
def test_argument_mapping_multiple():
ib = tvm.tir.ir_builder.create()
A = tvm.tir.decl_buffer(name="A", shape=[1])
B = tvm.tir.decl_buffer(name="B", shape=[1])
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, B], stmt))
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", tvm.target.Target("llvm")))(mod)
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("global_symbol", "main"))(mod)
f = tvm.tir.transform.MakeUnpackedAPI()(mod)["main"]
assert len(f.params) == 2
assert f.params[0].name == "A"
assert f.params[1].name == "B"
def test_argument_mapping_multiple_matching():
ib = tvm.tir.ir_builder.create()
A = tvm.tir.decl_buffer(name="A", shape=[1])
B = tvm.tir.decl_buffer(name="B", shape=[1])
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, A], stmt))
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", tvm.target.Target("llvm")))(mod)
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("global_symbol", "main"))(mod)
f = tvm.tir.transform.MakeUnpackedAPI()(mod)["main"]
assert len(f.params) == 2
assert f.params[0].name == "A"
assert f.params[1].name == "A"
def test_body():
ib = tvm.tir.ir_builder.create()
A = tvm.tir.decl_buffer(name="A", shape=[1])
B = tvm.tir.decl_buffer(name="B", shape=[1])
C = ib.buffer_ptr(A)
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, B, C], stmt))
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", tvm.target.Target("llvm")))(mod)
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("global_symbol", "main"))(mod)
f = tvm.tir.transform.MakeUnpackedAPI()(mod)["main"]
assert len(f.params) == 3
assert f.params[0].name == "A"
assert f.params[1].name == "B"
assert f.params[2].name == "A"
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_manifest_shared_memory_local_stage.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm.script import tir as T
# fmt: off
# pylint: disable=no-member,invalid-name,unused-variable,line-too-long,redefined-outer-name,unexpected-keyword-arg,too-many-nested-blocks
@tvm.script.ir_module
class MatmulBefore:
@T.prim_func
def main(A: T.Buffer[(1024, 1024), "float32"], B: T.Buffer[(1024, 1024), "float32"], C: T.Buffer[(1024, 1024), "float32"]) -> None:
# function attr dict
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
# body
# with T.block("root")
for blockIdx_y in T.thread_binding(32, thread="blockIdx.y"):
for blockIdx_x in T.thread_binding(32, thread="blockIdx.x"):
for threadIdx_y in T.thread_binding(2, thread="threadIdx.y"):
for threadIdx_x in T.thread_binding(2, thread="threadIdx.x"):
for k_0 in T.serial(32):
with T.block():
T.reads(A[blockIdx_y * 32 : blockIdx_y * 32 + 32, k_0 * 32 : k_0 * 32 + 32], B[k_0 * 32 : k_0 * 32 + 32, blockIdx_x * 32 : blockIdx_x * 32 + 32])
T.writes(C[blockIdx_y * 32 : blockIdx_y * 32 + 32, blockIdx_x * 32 : blockIdx_x * 32 + 32])
A_shared = T.alloc_buffer([1024, 1024], dtype="float32", scope="shared")
B_shared = T.alloc_buffer([1024, 1024], dtype="float32", scope="shared")
for ax0_ax1_fused_0 in T.serial(64):
for ax0_ax1_fused_3 in T.vectorized(4):
with T.block("A_shared"):
T.reads(A[blockIdx_y * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32])
T.writes(A_shared[blockIdx_y * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32])
T.block_attr({"tir.manifest_shared_memory_local_stage":1})
A_shared[blockIdx_y * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32] = A[blockIdx_y * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32]
for ax0_ax1_fused_0 in T.serial(64):
for ax0_ax1_fused_3 in T.vectorized(4):
with T.block("B_shared"):
T.reads(B[k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, blockIdx_x * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32])
T.writes(B_shared[k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, blockIdx_x * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32])
T.block_attr({"tir.manifest_shared_memory_local_stage":1})
B_shared[k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, blockIdx_x * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32] = B[k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, blockIdx_x * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32]
for k_1, i_2, j_2, k_2 in T.grid(2, 16, 16, 16):
with T.block("C"):
T.reads(A_shared[blockIdx_y * 32 + threadIdx_y * 16 + i_2, k_0 * 32 + k_1 * 16 + k_2], B_shared[k_0 * 32 + k_1 * 16 + k_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2])
T.writes(C[blockIdx_y * 32 + threadIdx_y * 16 + i_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2])
if k_0 * 32 + k_1 * 16 + k_2 == 0:
C[blockIdx_y * 32 + threadIdx_y * 16 + i_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2] = T.float32(0)
C[blockIdx_y * 32 + threadIdx_y * 16 + i_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2] = C[blockIdx_y * 32 + threadIdx_y * 16 + i_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2] + A_shared[blockIdx_y * 32 + threadIdx_y * 16 + i_2, k_0 * 32 + k_1 * 16 + k_2] * B_shared[k_0 * 32 + k_1 * 16 + k_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2]
@tvm.script.ir_module
class MatmulAfter:
@T.prim_func
def main(A: T.Buffer[(1024, 1024), "float32"], B: T.Buffer[(1024, 1024), "float32"], C: T.Buffer[(1024, 1024), "float32"]) -> None:
# function attr dict
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
# body
# with T.block("root")
for blockIdx_y in T.thread_binding(32, thread="blockIdx.y"):
for blockIdx_x in T.thread_binding(32, thread="blockIdx.x"):
for threadIdx_y in T.thread_binding(2, thread="threadIdx.y"):
for threadIdx_x in T.thread_binding(2, thread="threadIdx.x"):
for k_0 in T.serial(32):
with T.block():
T.reads(A[blockIdx_y * 32 : blockIdx_y * 32 + 32, k_0 * 32 : k_0 * 32 + 32], B[k_0 * 32 : k_0 * 32 + 32, blockIdx_x * 32 : blockIdx_x * 32 + 32])
T.writes(C[blockIdx_y * 32 : blockIdx_y * 32 + 32, blockIdx_x * 32 : blockIdx_x * 32 + 32])
A_shared = T.alloc_buffer([1024, 1024], dtype="float32", scope="shared")
B_shared = T.alloc_buffer([1024, 1024], dtype="float32", scope="shared")
A_shared_local = T.alloc_buffer([64, 4], dtype="float32", scope="local")
B_shared_local = T.alloc_buffer([64, 4], dtype="float32", scope="local")
for ax0_ax1_fused_0 in T.serial(64):
for ax0_ax1_fused_3 in T.vectorized(4):
with T.block():
T.reads(A[blockIdx_y * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32])
T.writes(A_shared_local[ax0_ax1_fused_0, ax0_ax1_fused_3])
A_shared_local[ax0_ax1_fused_0, ax0_ax1_fused_3] = A[blockIdx_y * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32]
for ax0_ax1_fused_0 in T.serial(64):
for ax0_ax1_fused_3 in T.vectorized(4):
with T.block("A_shared"):
T.reads(A[blockIdx_y * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32])
T.writes(A_shared[blockIdx_y * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32])
A_shared[blockIdx_y * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32] = A_shared_local[ax0_ax1_fused_0, ax0_ax1_fused_3]
for ax0_ax1_fused_0 in T.serial(64):
for ax0_ax1_fused_3 in T.vectorized(4):
with T.block():
T.reads(B[k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, blockIdx_x * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32])
T.writes(B_shared_local[ax0_ax1_fused_0, ax0_ax1_fused_3])
B_shared_local[ax0_ax1_fused_0, ax0_ax1_fused_3] = B[k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, blockIdx_x * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32]
for ax0_ax1_fused_0 in T.serial(64):
for ax0_ax1_fused_3 in T.vectorized(4):
with T.block("B_shared"):
T.reads(B[k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, blockIdx_x * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32])
T.writes(B_shared[k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, blockIdx_x * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32])
B_shared[k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, blockIdx_x * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32] = B_shared_local[ax0_ax1_fused_0, ax0_ax1_fused_3]
for k_1, i_2, j_2, k_2 in T.grid(2, 16, 16, 16):
with T.block("C"):
T.reads(A_shared[blockIdx_y * 32 + threadIdx_y * 16 + i_2, k_0 * 32 + k_1 * 16 + k_2], B_shared[k_0 * 32 + k_1 * 16 + k_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2])
T.writes(C[blockIdx_y * 32 + threadIdx_y * 16 + i_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2])
if k_0 * 32 + k_1 * 16 + k_2 == 0:
C[blockIdx_y * 32 + threadIdx_y * 16 + i_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2] = T.float32(0)
C[blockIdx_y * 32 + threadIdx_y * 16 + i_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2] = C[blockIdx_y * 32 + threadIdx_y * 16 + i_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2] + A_shared[blockIdx_y * 32 + threadIdx_y * 16 + i_2, k_0 * 32 + k_1 * 16 + k_2] * B_shared[k_0 * 32 + k_1 * 16 + k_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2]
# fmt: on
# pylint: enable=no-member,invalid-name,unused-variable,line-too-long,redefined-outer-name,unexpected-keyword-arg,too-many-nested-blocks
def _check(before, expected):
after = tvm.tir.transform.ManifestSharedMemoryLocalStage()(before)
tvm.ir.assert_structural_equal(after, expected)
def test_transform_matmul():
_check(MatmulBefore, MatmulAfter)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_merge_dynamic_shared_memory_allocations.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import te
from tvm.driver.build_module import schedule_to_module
from tvm.topi.math import cast
def run_passes(sch, args):
mod = schedule_to_module(sch, args)
return tvm.transform.Sequential(
[
tvm.tir.transform.StorageFlatten(64),
tvm.tir.transform.Simplify(),
tvm.tir.transform.VectorizeLoop(),
tvm.tir.transform.StorageRewrite(),
tvm.tir.transform.MergeDynamicSharedMemoryAllocations(),
]
)(mod)
def verify_single_allocation(stmt, alloc_size=None):
num_alloc = [0]
alloc_extents = []
def verify(n):
if (
isinstance(n, tvm.tir.Allocate)
and n.buffer_var.type_annotation.storage_scope == "shared.dyn"
):
num_alloc[0] += 1
alloc_extents.append(n.extents[0])
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
assert num_alloc[0] == 1
if alloc_size:
assert alloc_extents[0] == alloc_size
@tvm.testing.requires_gpu
def test_matmul_dyn_shared():
n = 1024
block = 16
A = te.placeholder((n, n), name="A", dtype="float16")
B = te.placeholder((n, n), name="B", dtype="float16")
def syncthread():
return tvm.tir.Call(None, "tir.tvm_storage_sync", tvm.runtime.convert(["shared"]))
def test_matmul_ir(A, B, C):
ib = tvm.tir.ir_builder.create()
tx = te.thread_axis("threadIdx.x")
ty = te.thread_axis("threadIdx.y")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
ib.scope_attr(tx, "thread_extent", block)
ib.scope_attr(ty, "thread_extent", block)
ib.scope_attr(bx, "thread_extent", n // block)
ib.scope_attr(by, "thread_extent", n // block)
A_sh = ib.allocate(A.dtype, (block, block), scope="shared.dyn", name="A_sh") # fp16
B_sh = ib.allocate(B.dtype, (block, block), scope="shared.dyn", name="B_sh") # fp16
# Create a dynamic shared memory for the accumulation.
# This is for testing merging dynamic shared memory alloctions with different data type.
# In practice, there is no need to allocate a shared memory for C.
C_local = ib.allocate(C.dtype, (1,), scope="local", name="C_local")
C_sh = ib.allocate(C.dtype, (block, block), scope="shared.dyn", name="C_sh") # fp32
A_ptr = ib.buffer_ptr(A)
B_ptr = ib.buffer_ptr(B)
C_ptr = ib.buffer_ptr(C)
C_local[0] = 0.0
with ib.for_range(0, n // block, name="i") as i:
A_sh[ty, tx] = A_ptr[by * block + ty, i * block + tx]
B_sh[ty, tx] = B_ptr[i * block + ty, bx * block + tx]
ib.emit(syncthread())
with ib.for_range(0, block, name="k") as k:
C_local[0] += cast(A_sh[ty, k] * B_sh[k, tx], "float32")
ib.emit(syncthread())
C_sh[ty, tx] = C_local[0]
C_ptr[by * block + ty, bx * block + tx] = C_sh[ty, tx]
return ib.get()
C = te.extern(
A.shape,
[A, B],
lambda ins, outs: test_matmul_ir(ins[0], ins[1], outs[0]),
name="matmul",
dtype="float32",
)
s = te.create_schedule(C.op)
mod = run_passes(s, [A, B, C])
# C can be allocated at the start of A, so we only need to allocate 2 block * block memory with dtype = float16
expected_alloc_size = block * block * 4
verify_single_allocation(mod["main"].body, expected_alloc_size)
def check_target(target):
if not tvm.testing.device_enabled(target):
return
fmatmul = tvm.build(s, [A, B, C], target)
dev = tvm.device(target, 0)
size = (n, n)
a_np = np.random.uniform(size=size).astype(A.dtype)
b_np = np.random.uniform(size=size).astype(B.dtype)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(size, dtype=C.dtype), dev)
fmatmul(a, b, c)
np_ref = np.dot(a_np.astype("float32"), b_np.astype("float32"))
tvm.testing.assert_allclose(c.numpy(), np_ref, 1e-4, 1e-4)
for target in ["cuda", "nvptx"]:
check_target(target)
@tvm.testing.requires_gpu
def test_dyn_shared_vectorized_store():
"""Test vectorized store into dynamic shared memory"""
n = te.size_var("n")
A = te.placeholder((n,), name="A", dtype="float16")
B = te.placeholder((n,), name="B", dtype="float32")
def test_device_ir(A, B, C):
n = A.shape[0]
ib = tvm.tir.ir_builder.create()
values_per_thread = 4
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(tx, "thread_extent", tvm.tir.indexdiv(n, values_per_thread))
A_sh = ib.allocate(A.dtype, (n,), scope="shared.dyn") # fp16
B_sh = ib.allocate(B.dtype, (n,), scope="shared.dyn") # fp32
Aptr = ib.buffer_ptr(A)
Bptr = ib.buffer_ptr(B)
Cptr = ib.buffer_ptr(C)
with ib.for_range(0, values_per_thread, kind="vectorize") as i:
A_sh[tx * values_per_thread + i] = Aptr[tx * values_per_thread + i]
B_sh[tx * values_per_thread + i] = Bptr[tx * values_per_thread + i]
with ib.for_range(0, values_per_thread) as i:
Cptr[tx * values_per_thread + i] = (
cast(A_sh[tx * values_per_thread + i], "float32") + B_sh[tx * values_per_thread + i]
)
return ib.get()
C = te.extern(
(n,),
[A, B],
lambda ins, outs: test_device_ir(ins[0], ins[1], outs[0]),
name="vadd",
dtype="float32",
)
s = te.create_schedule(C.op)
mod = run_passes(s, [A, B, C])
verify_single_allocation(mod["main"].body)
def check_target(target):
if not tvm.testing.device_enabled(target):
return
fadd = tvm.build(s, [A, B, C], target)
dev = tvm.device(target, 0)
for n in [512, 1024]:
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros((n,), dtype=C.dtype), dev)
fadd(a, b, c)
tvm.testing.assert_allclose(
c.numpy(), a.numpy().astype("float32") + b.numpy(), 1e-4, 1e-4
)
for target in ["cuda", "nvptx"]:
check_target(target)
@tvm.testing.requires_gpu
def test_dyn_shared_reuse_and_merge():
n = 64
A = te.placeholder((n,), name="A", dtype="float32")
B = te.placeholder((n,), name="B", dtype="float32")
C = te.placeholder((te.size_var("n_dyn"),), name="C", dtype="float32")
def test_device_ir(A, B, C, D):
ib = tvm.tir.ir_builder.create()
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(tx, "thread_extent", n)
A_sh = ib.allocate(A.dtype, (n,), scope="shared.dyn", name="A_sh")
B_sh = ib.allocate(B.dtype, (n,), scope="shared.dyn", name="B_sh")
C_sh = ib.allocate(C.dtype, (C.shape[0],), scope="shared.dyn", name="C_sh")
Aptr = ib.buffer_ptr(A)
Bptr = ib.buffer_ptr(B)
Cptr = ib.buffer_ptr(C)
Dptr = ib.buffer_ptr(D)
A_sh[tx] = Aptr[tx]
Dptr[tx] = A_sh[tx]
B_sh[tx] = Bptr[tx]
Dptr[tx] += B_sh[tx]
C_sh[tx] = Cptr[tx] # C cannot reuse other buffers since it size is dynamic
Dptr[tx] += C_sh[tx]
return ib.get()
D = te.extern(
(n,),
[A, B, C],
lambda ins, outs: test_device_ir(ins[0], ins[1], ins[2], outs[0]),
name="vadd",
dtype="float32",
)
s = te.create_schedule(D.op)
mod = run_passes(s, [A, B, C, D])
# merged allocation
# allocate(buf_dyn_shmem: Pointer(shared.dyn uint8), uint8, [((n_dyn*4) + 256)]);
verify_single_allocation(mod["main"].body)
def check_target(target):
if not tvm.testing.device_enabled(target):
return
fadd = tvm.build(s, [A, B, C, D], target)
dev = tvm.device(target, 0)
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev)
c = tvm.nd.array(np.random.uniform(size=n).astype(C.dtype), dev)
d = tvm.nd.array(np.zeros((n,), dtype=D.dtype), dev)
fadd(a, b, c, d)
tvm.testing.assert_allclose(d.numpy(), a.numpy() + b.numpy() + c.numpy(), 1e-4, 1e-4)
for target in ["cuda", "nvptx"]:
check_target(target)
def test_dyn_shared_more_dtype():
"""Test vectorized store into dynamic shared memory"""
n = 512
A = te.placeholder((n,), name="A", dtype="int8")
B = te.placeholder((n,), name="B", dtype="int16")
def test_device_ir(A, B, C):
n = A.shape[0]
ib = tvm.tir.ir_builder.create()
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(tx, "thread_extent", n)
A_sh = ib.allocate(A.dtype, (n,), scope="shared.dyn") # i8
B_sh = ib.allocate(B.dtype, (n,), scope="shared.dyn") # i16
C_sh = ib.allocate(C.dtype, (n,), scope="shared.dyn") # i32
Aptr = ib.buffer_ptr(A)
Bptr = ib.buffer_ptr(B)
Cptr = ib.buffer_ptr(C)
A_sh[tx] = Aptr[tx]
B_sh[tx] = Bptr[tx]
C_sh[tx] = cast(A_sh[tx], "int32") + cast(B_sh[tx], "int32")
Cptr[tx] = C_sh[tx]
return ib.get()
C = te.extern(
(n,),
[A, B],
lambda ins, outs: test_device_ir(ins[0], ins[1], outs[0]),
name="vadd",
dtype="int32",
)
s = te.create_schedule(C.op)
mod = run_passes(s, [A, B, C])
verify_single_allocation(mod["main"].body, n * 4)
def check_target(target):
if not tvm.testing.device_enabled(target):
return
fadd = tvm.build(s, [A, B, C], target)
dev = tvm.device(target, 0)
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros((n,), dtype=C.dtype), dev)
fadd(a, b, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy().astype("float32") + b.numpy(), 1e-4, 1e-4)
for target in ["cuda", "nvptx"]:
check_target(target)
if __name__ == "__main__":
test_matmul_dyn_shared()
test_dyn_shared_vectorized_store()
test_dyn_shared_reuse_and_merge()
test_dyn_shared_more_dtype()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_narrow_datatype.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import relay, te
from tvm.driver.build_module import schedule_to_module
from tvm.script import tir as T
from tvm.tir import const
def lower_stmt(params, stmt, target_bits):
func = tvm.tir.PrimFunc(params, stmt)
func = tvm.tir.transform.NarrowDataType(target_bits)(tvm.IRModule.from_expr(func))["main"]
stmt = func.body
return stmt
def lower_sch(sch, args, target_bits, extra_passes=None):
binds = {}
arg_list = []
for x in args:
if isinstance(x, te.tensor.Tensor):
buf = tvm.tir.decl_buffer(x.shape, dtype=x.dtype, name=x.name)
assert x not in binds
binds[x] = buf
arg_list.append(buf)
else:
raise ValueError("args must be Tensor, Buffer or Var")
sch = sch.normalize()
mod = schedule_to_module(sch, args)
mod = tvm.tir.transform.StorageFlatten(64)(mod)
if extra_passes:
for p in extra_passes:
mod = p(mod)
return tvm.tir.transform.NarrowDataType(target_bits)(mod)["main"].body
def test_basic():
def check(m, n, target_bits, target_dtype):
ib = tvm.tir.ir_builder.create()
Ab = tvm.tir.decl_buffer([m * n], name="A")
A = ib.buffer_ptr(Ab)
Bb = tvm.tir.decl_buffer([m * n], name="B")
B = ib.buffer_ptr(Bb)
with ib.for_range(0, m, name="i") as i:
with ib.for_range(0, n, name="j") as j:
B[i * n + j] = A[i * n + j] + 1
stmt = ib.get()
stmt = lower_stmt([Ab, Bb], stmt, target_bits)
assert stmt.loop_var.dtype == target_dtype
assert stmt.body.loop_var.dtype == target_dtype
# const shape
# i32 -> i32
check(2, 2, 32, "int32")
# i64 -> i32
check(const(2, dtype="int64"), const(2, dtype="int64"), 32, "int32")
check(const(2**16, dtype="int64"), const(2**16, dtype="int64"), 32, "int64")
# i32 -> i16
check(2, 2, 16, "int16")
check(2**10, 2**10, 16, "int32")
# symbolic shape
check(te.size_var(name="m", dtype="int32"), te.size_var(name="n", dtype="int32"), 32, "int32")
check(te.size_var(name="m", dtype="int64"), te.size_var(name="n", dtype="int64"), 32, "int64")
def test_thread_axis():
def check(m, n, target_bits, target_dtype):
ib = tvm.tir.ir_builder.create()
Ab = tvm.tir.decl_buffer([m * n], name="A")
A = ib.buffer_ptr(Ab)
Bb = tvm.tir.decl_buffer([m * n], name="B")
B = ib.buffer_ptr(Bb)
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(bx, "thread_extent", m)
ib.scope_attr(tx, "thread_extent", n)
B[bx * n + tx] = A[bx * n + tx] + 1
stmt = ib.get()
stmt = lower_stmt([Ab, Bb], stmt, target_bits)
assert stmt.node.var.dtype == target_dtype
assert stmt.body.node.var.dtype == target_dtype
# i32 -> i32
check(2, 32, target_bits=32, target_dtype="int32")
# i64 -> i32
check(const(2, dtype="int64"), const(32, dtype="int64"), target_bits=32, target_dtype="int32")
check(
const(2**30, dtype="int64"),
const(32, dtype="int64"),
target_bits=32,
target_dtype="int64",
)
# i32 -> i16
check(2, 32, target_bits=16, target_dtype="int16")
check(2**14, 32, target_bits=16, target_dtype="int32")
def test_thread_axis_2():
# fmt: off
@tvm.script.ir_module
class Before:
@T.prim_func
def main(T_reshape: T.Buffer[(1, 12, 384, 384), "float32"], placeholder_1: T.Buffer[(T.int64(1), T.int64(12), T.int64(384), 384), "bool"], T_where: T.Buffer[(T.int64(1), T.int64(12), T.int64(384), 384), "float32"]) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
for i0_i1_i2_i3_fused_1 in T.thread_binding(T.int64(256), thread="blockIdx.x"):
for i0_i1_i2_i3_fused_2 in T.thread_binding(T.int64(1024), thread="threadIdx.x"):
for i0_i1_i2_i3_fused_0 in T.serial(T.int64(7)):
with T.block("T_where"):
ax0 = T.axis.spatial(T.int64(1), T.int64(0))
ax1 = T.axis.spatial(T.int64(12), ((i0_i1_i2_i3_fused_0 * T.int64(256) + i0_i1_i2_i3_fused_1) * T.int64(1024) + i0_i1_i2_i3_fused_2) % T.int64(1769472) // T.int64(147456))
ax2 = T.axis.spatial(T.int64(384), ((i0_i1_i2_i3_fused_0 * T.int64(256) + i0_i1_i2_i3_fused_1) * T.int64(1024) + i0_i1_i2_i3_fused_2) % T.int64(147456) // T.int64(384))
ax3 = T.axis.spatial(384, T.cast(((i0_i1_i2_i3_fused_0 * T.int64(256) + i0_i1_i2_i3_fused_1) * T.int64(1024) + i0_i1_i2_i3_fused_2) % T.int64(384), "int32"))
T.where((i0_i1_i2_i3_fused_0 * T.int64(256) + i0_i1_i2_i3_fused_1) * T.int64(1024) + i0_i1_i2_i3_fused_2 < T.int64(1769472))
T.reads(placeholder_1[ax0, ax1, ax2, ax3], T_reshape[ax0, ax1, ax2, ax3])
T.writes(T_where[ax0, ax1, ax2, ax3])
T_where[ax0, ax1, ax2, ax3] = T.Select(T.cast(placeholder_1[ax0, ax1, ax2, ax3], "int32") != 0, T.float32(-1000000000), T_reshape[ax0, ax1, ax2, ax3])
# fmt: on
# TODO(@junrushao1994): make this test more "unit" after the new TVMScript printer/parser lands
tvm.lower(Before)
def test_multilanes():
def check(m, lanes, target_bits, target_dtype):
ib = tvm.tir.ir_builder.create()
Ab = tvm.tir.decl_buffer((m,), dtype="float32x{}".format(lanes), name="A")
A = ib.buffer_ptr(Ab)
Bb = tvm.tir.decl_buffer((m,), dtype="float32x{}".format(lanes), name="B")
B = ib.buffer_ptr(Bb)
with ib.for_range(0, m, name="i", dtype=m.dtype) as i:
B[i] = A[i] + 1
A[0] = B[1]
stmt = ib.get()
stmt = lower_stmt([Ab, Bb], stmt, target_bits)
assert stmt.seq[0].loop_var.dtype == target_dtype
# i32 -> i32
check(const(2**10, dtype="int32"), 2, target_bits=32, target_dtype="int32")
# i64 -> i32
check(const(2**10, dtype="int64"), 2, target_bits=32, target_dtype="int32")
check(const(2**32, dtype="int64"), 2, target_bits=32, target_dtype="int64")
# i32 -> i16
check(const(2**10, dtype="int32"), 2, target_bits=16, target_dtype="int16")
check(const(2**16, dtype="int32"), 2, target_bits=16, target_dtype="int32")
def test_reduce():
def check(m, target_bits, target_dtype):
A = te.placeholder((m,), name="A", dtype="float32")
k = te.reduce_axis((0, m), "k")
B = te.compute((), lambda *idx: te.sum(A[k], axis=k), name="B")
s = te.create_schedule(B.op)
stmt = lower_sch(s, [A, B], target_bits)
assert stmt[1].loop_var.dtype == target_dtype
# i32 -> i32
check(const(64, dtype="int32"), 32, "int32")
# i64 -> i32
check(const(64, dtype="int64"), 32, "int32")
# i32 -> i16
check(const(64, dtype="int32"), 16, "int16")
check(const(2**16, dtype="int32"), 16, "int32")
# symbolic
check(te.var("n", dtype="int32"), 32, "int32")
check(te.var("n", dtype="int64"), 32, "int64")
def test_slice():
def check(m, n, target_bits, target_dtype):
# The index may overflow in B, while not in A
ib = tvm.tir.ir_builder.create()
Ab = tvm.tir.decl_buffer([m * n], name="A")
A = ib.buffer_ptr(Ab)
Bb = tvm.tir.decl_buffer([m * n * 2], name="B")
B = ib.buffer_ptr(Bb)
with ib.for_range(0, m, name="i") as i:
with ib.for_range(0, n, name="j") as j:
A[i * n + j] = B[i * 2 * n + 2 * j] + 1
stmt = ib.get()
stmt = lower_stmt([Ab, Bb], stmt, target_bits)
assert stmt.loop_var.dtype == target_dtype
assert stmt.body.loop_var.dtype == target_dtype
# The maximum index is (2**15 * 2**15 - 1) * 2 <= 2**31 - 1
check(const(2**15, "int64"), const(2**15, "int64"), target_bits=32, target_dtype="int32")
# The maximum index is (2**15 * 2**15 - 1 + 2**15) * 2 > 2**31 - 1
check(
const(2**15, "int64"), const((2**15 + 1), "int64"), target_bits=32, target_dtype="int64"
)
def test_relay_basic():
engine = relay.backend.te_compiler.get()
def check(shapex, shapey, target_bits, target_dtype):
x = relay.var("x", shape=shapex)
y = relay.var("y", shape=shapey)
z = relay.add(x, y)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
func = mod["main"]
z = engine.lower(func, "llvm")
stmt = lower_sch(z.schedule, tuple(z.inputs) + tuple(z.outputs), 32)
# outer loop
assert stmt.loop_var.dtype == target_dtype
# inner loop
if len(shapex) > 1 or len(shapey) > 1:
assert stmt.body.loop_var.dtype == target_dtype
check(
(const(2**16, "int64"), const(2**15 + 1, "int64")),
(1, const(2**15 + 1, "int64")),
target_bits=32,
target_dtype="int64",
)
check(
(const(2**16, "int64"), const(2**15, "int64")),
(1, const(2**15, "int64")),
target_bits=32,
target_dtype="int32",
)
check(
(const(2**31, "int64"),), (const(2**31, "int64"),), target_bits=32, target_dtype="int32"
)
check(
(const(2**31 + 1, "int64"),),
(const(2**31 + 1, "int64"),),
target_bits=32,
target_dtype="int64",
)
def test_relay_take():
engine = relay.backend.te_compiler.get()
def check(shape, index, target_bits, target_dtype):
x = relay.var("x", shape=shape)
y = relay.op.take(x, indices=index)
func = relay.Function([x], y)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
func = mod["main"]
z = engine.lower(func, "llvm")
stmt = lower_sch(z.schedule, tuple(z.inputs) + tuple(z.outputs), 32)
assert stmt.value.indices[0].dtype == target_dtype
check(
(const(2**16, "int64"), const(2**15 + 1, "int64")),
relay.const(0, dtype="int64"),
target_bits=32,
target_dtype="int32",
)
check(
(const(2**16, "int64"), const(2**15 + 1, "int64")),
relay.const(2**31, dtype="int64"),
target_bits=32,
target_dtype="int64",
)
def test_ramp_dtype_consistency():
"""
for (i :int64, (int64)0, (int64)4) {
A[ramp(i*(int64)2, (int64)1, 2)] = cast(int64, 2 ** 31 - 1) * i;
}
The infer result:
base: int64 -> int64 (since i is involved in another int64 expr)
stride: int64 -> int32
Thus ramp should still use int64 for both stride and base after rewrite.
"""
n = tvm.tir.IntImm("int64", 4)
m = tvm.tir.IntImm("int64", 2)
A = te.compute((n, m), lambda i, j: tvm.tir.Cast("int64", 2**31 - 1) * i, name="A")
s = te.create_schedule(A.op)
s[A].vectorize(A.op.axis[1])
lower_sch(s, [A], 32, extra_passes=[tvm.tir.transform.VectorizeLoop()])
def test_condition():
@T.prim_func
def before(A: T.Buffer[(128,), "float32"], B: T.Buffer[(130,), "float32"]):
for i, j in T.grid(T.int64(2), T.int64(65)):
if i * T.int64(65) + j >= T.int64(0) and i * T.int64(65) + j < T.int64(128):
A[i * T.int64(65) + j] = 0.0
for i, j in T.grid(T.int64(2), T.int64(65)):
B[i * T.int64(65) + j] = T.if_then_else(
i * T.int64(65) + j >= T.int64(0) and i * T.int64(65) + j < T.int64(128),
A[i * T.int64(65) + j],
0.0,
dtype="float32",
)
@T.prim_func
def expected_after(A: T.Buffer[128, "float32"], B: T.Buffer[130, "float32"]):
for i, j in T.grid(2, 65):
if i * 65 + j >= 0 and i * 65 + j < 128:
A[i * 65 + j] = T.float32(0)
for i, j in T.grid(2, 65):
B[i * 65 + j] = T.if_then_else(
i * 65 + j >= 0 and i * 65 + j < 128, A[i * 65 + j], T.float32(0), dtype="float32"
)
after = tvm.tir.transform.NarrowDataType(32)(tvm.IRModule.from_expr(before))["main"]
tvm.ir.assert_structural_equal(after, expected_after)
if __name__ == "__main__":
test_basic()
test_thread_axis()
test_thread_axis_2()
test_multilanes()
test_reduce()
test_slice()
test_relay_basic()
test_relay_take()
test_ramp_dtype_consistency()
test_condition()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_plan_update_buffer_allocation_location.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
from tvm.script import tir as T
def _check(original, transformed):
func = original
mod = tvm.IRModule.from_expr(func)
mod = tvm.tir.transform.PlanAndUpdateBufferAllocationLocation()(mod)
tvm.ir.assert_structural_equal(mod["main"], transformed)
@T.prim_func
def element_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16))
C = T.match_buffer(c, (16, 16))
B = T.alloc_buffer((16, 16))
for i0 in range(0, 16):
for j0 in range(0, 16):
with T.block():
i, j = T.axis.remap("SS", [i0, j0])
B[i, j] = A[i, j] + 1.0
for j0 in range(0, 16):
with T.block():
i, j = T.axis.remap("SS", [i0, j0])
C[i, j] = B[i, j] * 2.0
@T.prim_func
def transformed_element_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [16, 16])
C = T.match_buffer(c, [16, 16])
for i_0 in range(0, 16):
with T.block():
T.reads([A[i_0, 0:16]])
T.writes([C[i_0, 0:16]])
B = T.alloc_buffer([16, 16])
for j_0 in T.serial(0, 16):
with T.block():
i, j = T.axis.remap("SS", [i_0, j_0])
B[i, j] = A[i, j] + 1.0
for j_0 in T.serial(0, 16):
with T.block():
i, j = T.axis.remap("SS", [i_0, j_0])
C[i, j] = B[i, j] * 2.0
@T.prim_func
def original_func() -> None:
A = T.alloc_buffer((128, 128), "float32")
for i0, j0 in T.grid(128, 128):
with T.block():
i, j = T.axis.remap("SS", [i0, j0])
A[i, j] = T.float32(0)
for i0, j0, k0 in T.grid(32, 32, 32):
with T.block():
i, j, k = T.axis.remap("SSR", [i0, j0, k0])
B = T.alloc_buffer((128, 128), "float32")
C = T.alloc_buffer((128, 128), "float32")
D = T.alloc_buffer((128, 128), "float32")
if k == 0:
for ii, jj in T.grid(4, 4):
B[i * 4 + ii, j * 4 + jj] = A[i * 4 + ii, j * 4 + jj]
for ii, jj in T.grid(4, 4):
for kk in range(0, 4):
B[i * 4 + ii, j * 4 + jj] += C[i * 4 + ii, k * 4 + kk]
for kk in range(0, 4):
B[i * 4 + ii, j * 4 + jj] += (
D[j * 4 + jj, k * 4 + kk] * C[i * 4 + ii, k * 4 + kk]
)
@T.prim_func
def transformed_func() -> None:
A = T.alloc_buffer([128, 128])
for i0, j0 in T.grid(128, 128):
with T.block():
i, j = T.axis.remap("SS", [i0, j0])
A[i, j] = T.float32(0)
for i0, j0, k0 in T.grid(32, 32, 32):
with T.block():
i, j, k = T.axis.remap("SSR", [i0, j0, k0])
B = T.alloc_buffer([128, 128])
if k == 0:
for ii, jj in T.grid(4, 4):
B[i * 4 + ii, j * 4 + jj] = A[i * 4 + ii, j * 4 + jj]
for ii, jj in T.grid(4, 4):
with T.block(""):
T.reads([B[((i * 4) + ii), ((j * 4) + jj)]])
T.writes([B[((i * 4) + ii), ((j * 4) + jj)]])
C = T.alloc_buffer([128, 128])
for kk in T.serial(0, 4):
B[((i * 4) + ii), ((j * 4) + jj)] = (
B[((i * 4) + ii), ((j * 4) + jj)] + C[((i * 4) + ii), ((k * 4) + kk)]
)
for kk in T.serial(0, 4):
with T.block(""):
T.reads(
[
B[((i * 4) + ii), ((j * 4) + jj)],
C[((i * 4) + ii), ((k * 4) + kk)],
]
)
T.writes([B[((i * 4) + ii), ((j * 4) + jj)]])
D = T.alloc_buffer([128, 128])
B[((i * 4) + ii), ((j * 4) + jj)] = B[
((i * 4) + ii), ((j * 4) + jj)
] + (
D[((j * 4) + jj), ((k * 4) + kk)]
* C[((i * 4) + ii), ((k * 4) + kk)]
)
@T.prim_func
def match_buffer_func() -> None:
C = T.alloc_buffer((128, 128))
for i in range(128):
with T.block():
vi = T.axis.S(128, i)
C0 = T.match_buffer(C[vi, 0:128], (128))
for j in range(128):
with T.block():
jj = T.axis.S(128, j)
C1 = T.match_buffer(C0[jj], ())
C1[()] = 0
@T.prim_func
def transformed_match_buffer_func() -> None:
for i in range(0, 128):
with T.block():
vi = T.axis.S(128, i)
C = T.alloc_buffer((128, 128))
C0 = T.match_buffer(C[vi, 0:128], (128))
for j in range(128):
with T.block():
jj = T.axis.S(128, j)
C1 = T.match_buffer(C0[jj], ())
C1[()] = 0
@T.prim_func
def opaque_access(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [1024])
B = T.match_buffer(b, [1024])
A_cache = T.alloc_buffer([1024])
for i in T.serial(0, 8):
with T.block():
vi = T.axis.S(8, i)
with T.block():
v = T.axis.S(8, vi)
T.reads([A[(v * 128) : ((v * 128) + 128)]])
T.writes([A_cache[(v * 128) : ((v * 128) + 128)]])
T.evaluate(
T.call_extern(
"test",
A_cache.data,
(v * 128),
128,
A.data,
(v * 128),
128,
dtype="float32",
)
)
for j in T.serial(0, 128):
with T.block():
v = T.axis.S(1024, vi * 128 + j)
T.reads([A_cache[v]])
T.writes([B[v]])
B[v] = A_cache[v]
@T.prim_func
def transformed_opaque_access(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [1024])
B = T.match_buffer(b, [1024])
for i in T.serial(0, 8):
with T.block():
vi = T.axis.S(8, i)
T.reads(A[vi * 128 : vi * 128 + 128])
T.writes(B[vi * 128 : vi * 128 + 128])
A_cache = T.alloc_buffer([1024])
with T.block():
v = T.axis.S(8, vi)
T.reads([A[v * 128 : v * 128 + 128]])
T.writes([A_cache[v * 128 : v * 128 + 128]])
T.evaluate(
T.call_extern(
"test", A_cache.data, v * 128, 128, A.data, v * 128, 128, dtype="float32"
)
)
for j in T.serial(0, 128):
with T.block():
v = T.axis.S(1024, vi * 128 + j)
T.reads([A_cache[v]])
T.writes([B[v]])
B[v] = A_cache[v]
def test_elementwise():
_check(element_func, transformed_element_func)
def test_locate_buffer_allocation():
_check(original_func, transformed_func)
def test_match_buffer_allocation():
_check(match_buffer_func, transformed_match_buffer_func)
def test_opaque_access():
_check(opaque_access, transformed_opaque_access)
def test_lower_te():
x = te.placeholder((1,))
y = te.compute((1,), lambda i: x[i] + 2)
s = te.create_schedule(y.op)
orig_mod = tvm.driver.build_module.schedule_to_module(s, [x, y])
mod = tvm.tir.transform.PlanAndUpdateBufferAllocationLocation()(orig_mod)
tvm.ir.assert_structural_equal(
mod, orig_mod
) # PlanAndUpdateBufferAllocationLocation should do nothing on TE
def test_loop_carried_dependency():
"""The buffer allocation should be above opaque iter var's loop scopes
such that buffer accesses with loop carried dependencies are covered."""
@T.prim_func
def before(A: T.Buffer[(8, 8, 8), "int32"], B: T.Buffer[(8, 8, 8), "int32"]):
C = T.alloc_buffer([8, 8, 8], dtype="int32")
for i in T.serial(8):
for j in T.serial(8):
for k in T.serial(8):
with T.block("b0"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
C[vi, vj, vk] = A[vi, vj, vk] + 1
for k in T.serial(8):
with T.block("b1"):
vi, vk = T.axis.remap("SS", [i, k])
vj = T.axis.opaque(8, j)
B[vi, vj, vk] = C[vi, vj, vk] + T.if_then_else(
0 < vj, C[vi, vj - 1, vk], 0, dtype="int32"
)
@T.prim_func
def after(A: T.Buffer[(8, 8, 8), "int32"], B: T.Buffer[(8, 8, 8), "int32"]) -> None:
for i in T.serial(8):
with T.block():
T.reads(A[i, 0:8, 0:8])
T.writes(B[i, 0:8, 0:8])
C = T.alloc_buffer([8, 8, 8], dtype="int32")
for j in T.serial(8):
for k in T.serial(8):
with T.block("b0"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
C[vi, vj, vk] = A[vi, vj, vk] + 1
for k in T.serial(8):
with T.block("b1"):
vi, vk = T.axis.remap("SS", [i, k])
vj = T.axis.opaque(8, j)
B[vi, vj, vk] = C[vi, vj, vk] + T.if_then_else(
0 < vj, C[vi, vj - 1, vk], 0, dtype="int32"
)
_check(before, after)
def test_1D_cascade_op_rolling_buffer():
"""The intermediate buffer must be allocated above rolling buffer's rolling loop,
which is marked as opaque in consumer block's iter mappings."""
@T.prim_func
def before(A: T.Buffer[(4, 16), "int32"], C: T.Buffer[(4, 8), "int32"]):
B = T.alloc_buffer((4, 6), "int32")
for c in T.serial(4):
for i in T.serial(0, 2):
for j in T.serial(0, 6):
for k in T.serial(3):
with T.block("P1"):
T.where(i < 1 or j >= 2)
cc, vi, vj, vk = T.axis.remap("SSSR", [c, i, j, k])
if vk == 0:
B[cc, T.floormod(vi * 4 + vj, 6)] = 0
B[cc, T.floormod(vi * 4 + vj, 6)] = (
B[cc, T.floormod(vi * 4 + vj, 6)] + A[cc, vi * 4 + vj + vk]
)
for j in T.serial(0, 4):
for k in T.serial(3):
with T.block("P2"):
vi = T.axis.opaque(2, i)
cc, vj, vk = T.axis.remap("SSR", [c, j, k])
if vk == 0:
C[cc, vi * 4 + vj] = 0
C[cc, vi * 4 + vj] = (
C[cc, vi * 4 + vj] + B[cc, T.floormod(vi * 4 + vj + vk, 6)]
)
@T.prim_func
def after(A: T.Buffer[(4, 16), "int32"], C: T.Buffer[(4, 8), "int32"]):
for c in T.serial(4):
with T.block():
T.reads(A[c, 0:12], C[c, 0:8])
T.writes(C[c, 0:8])
B = T.alloc_buffer([4, 6], dtype="int32")
for i in T.serial(2):
for j, k in T.grid(6, 3):
with T.block("P1"):
T.where(i < 1 or j >= 2)
cc, vi, vj, vk = T.axis.remap("SSSR", [c, i, j, k])
if vk == 0:
B[cc, (vi * 4 + vj) % 6] = 0
B[cc, (vi * 4 + vj) % 6] = (
B[cc, (vi * 4 + vj) % 6] + A[cc, vi * 4 + vj + vk]
)
for j, k in T.grid(4, 3):
with T.block("P2"):
vi = T.axis.opaque(2, i)
cc, vj, vk = T.axis.remap("SSR", [c, j, k])
if vk == 0:
C[cc, vi * 4 + vj] = 0
C[cc, vi * 4 + vj] = C[cc, vi * 4 + vj] + B[cc, (vi * 4 + vj + vk) % 6]
_check(before, after)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_prim_func_pass.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
def test_prim_func_pass():
@tvm.tir.transform.prim_func_pass(opt_level=1)
class TestReplaceFunc:
"""Simple test function to replace one argument to another."""
def __init__(self, new_func):
self.new_func = new_func
def transform_function(self, func, mod, ctx):
return self.new_func
x = te.var("x")
y = te.var("y")
b = tvm.tir.decl_buffer((x,), "float32")
stmt = tvm.tir.LetStmt(x, 10, tvm.tir.Evaluate(x + 1))
func = tvm.tir.PrimFunc([x, y, b], stmt)
new_func = tvm.tir.PrimFunc([x, y, b], tvm.tir.Evaluate(0))
mod = tvm.IRModule({"main": func})
mod = TestReplaceFunc(new_func)(mod)
assert tvm.ir.structural_equal(mod["main"].body, new_func.body)
def test_cow_pass():
def fapply(f):
assert tvm.testing.object_use_count(f) == 1
return f
pidentity = tvm.tir.transform.Apply(fapply)
x = te.var("x")
func = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x)).with_attr("target_bits", 32)
func_hash = func.__hash__()
mod = tvm.IRModule({"main": func})
del func
# copy on write
mod_hash = mod.__hash__()
mod = tvm.transform.Sequential([pidentity, tvm.tir.transform.NarrowDataType(32)])(mod._move())
assert mod_hash == mod.__hash__()
assert func_hash == mod["main"].__hash__()
if __name__ == "__main__":
test_cow_pass()
test_prim_func_pass()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_profiling_instr.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
from tvm.ir.module import IRModule
from tvm.script import tir as T
import numpy
default_lwp_test_config = {
"tir.instrument_lwp": True,
"tir.lwp_disable_func_prof": True,
"tir.reset_start_id": True,
}
@T.prim_func
def input1(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (8, 8, 128), dtype="int32")
B = T.match_buffer(b, (8, 8, 128), dtype="int32")
C = T.match_buffer(c, (8, 8, 128), dtype="int32")
for i, j in T.grid(8, 8):
for k, l in T.grid(8, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk * 16 + vl] = A[vi, vj, vk * 16 + vl] * 2
for k, l in T.grid(8, 16):
with T.block("C"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
C[vi, vj, vk * 16 + vl] = B[vi, vj, vk * 16 + vl] * 2
@T.prim_func
def input2(a: T.handle, b: T.handle, c: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (8, 8, 128), dtype="int32")
B = T.match_buffer(b, (8, 8, 128), dtype="int32")
C = T.match_buffer(c, (8, 8, 128), dtype="int32")
D = T.match_buffer(d, (8, 8, 128), dtype="int32")
for i in T.serial(0, 8):
for j in T.serial(0, 8):
for k, l in T.grid(8, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk * 16 + vl] = A[vi, vj, vk * 16 + vl] * 2
for k, l in T.grid(8, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk * 16 + vl] = B[vi, vj, vk * 16 + vl] * D[vi, vj, vk * 16 + vl]
for j in T.serial(0, 8):
for k, l in T.grid(8, 16):
with T.block("C"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
C[vi, vj, vk * 16 + vl] = B[vi, vj, vk * 16 + vl] + 2
for k, l in T.grid(8, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
C[vi, vj, vk * 16 + vl] = C[vi, vj, vk * 16 + vl] * D[vi, vj, vk * 16 + vl]
@T.prim_func
def input3(a: T.handle, b: T.handle, c: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (8, 8, 128), dtype="int32")
B = T.match_buffer(b, (8, 8, 128), dtype="int32")
C = T.match_buffer(c, (8, 8, 128), dtype="int32")
D = T.match_buffer(d, (8, 8, 128), dtype="int32")
for i in T.serial(0, 8):
for j in T.parallel(0, 8):
for k in T.serial(0, 8):
for l in T.serial(0, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk * 16 + vl] = A[vi, vj, vk * 16 + vl] * 2
for k in T.serial(0, 8):
for l in T.serial(0, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk * 16 + vl] = B[vi, vj, vk * 16 + vl] * D[vi, vj, vk * 16 + vl]
for j in T.serial(0, 8):
for k in T.parallel(0, 8):
for l in T.serial(0, 16):
with T.block("C"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
C[vi, vj, vk * 16 + vl] = B[vi, vj, vk * 16 + vl] + 2
for k in T.parallel(0, 8):
for l in T.serial(0, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
C[vi, vj, vk * 16 + vl] = C[vi, vj, vk * 16 + vl] * D[vi, vj, vk * 16 + vl]
@T.prim_func
def test1_expected_output(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (8, 8, 128), dtype="int32")
B = T.match_buffer(b, (8, 8, 128), dtype="int32")
C = T.match_buffer(c, (8, 8, 128), dtype="int32")
for i, j in T.grid(8, 8):
T.evaluate(T.start_profile_intrinsic(3, dtype="handle"))
for k, l in T.grid(8, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk * 16 + vl] = A[vi, vj, vk * 16 + vl] * 2
T.evaluate(T.end_profile_intrinsic(3, dtype="handle"))
T.evaluate(T.start_profile_intrinsic(5, dtype="handle"))
for k, l in T.grid(8, 16):
with T.block("C"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
C[vi, vj, vk * 16 + vl] = B[vi, vj, vk * 16 + vl] * 2
T.evaluate(T.end_profile_intrinsic(5, dtype="handle"))
@T.prim_func
def test2_expected_output(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (8, 8, 128), dtype="int32")
B = T.match_buffer(b, (8, 8, 128), dtype="int32")
C = T.match_buffer(c, (8, 8, 128), dtype="int32")
T.evaluate(T.start_profile_intrinsic(1, dtype="handle"))
for i in T.serial(0, 8):
T.evaluate(T.start_profile_intrinsic(2, dtype="handle"))
for j in T.serial(0, 8):
for k in T.serial(0, 8):
for l in T.serial(0, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk * 16 + vl] = A[vi, vj, vk * 16 + vl] * 2
for k in T.serial(0, 8):
for l in T.serial(0, 16):
with T.block("C"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
C[vi, vj, vk * 16 + vl] = B[vi, vj, vk * 16 + vl] * 2
T.evaluate(T.end_profile_intrinsic(2, dtype="handle"))
T.evaluate(T.end_profile_intrinsic(1, dtype="handle"))
@T.prim_func
def test3_expected_output(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (8, 8, 128), dtype="int32")
B = T.match_buffer(b, (8, 8, 128), dtype="int32")
C = T.match_buffer(c, (8, 8, 128), dtype="int32")
T.evaluate(T.start_profile_intrinsic(1, dtype="handle"))
for i in T.serial(0, 8):
T.evaluate(T.start_profile_intrinsic(2, dtype="handle"))
for j in T.serial(0, 8):
T.evaluate(T.start_profile_intrinsic(3, dtype="handle"))
for k in T.serial(0, 8):
for l in T.serial(0, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk * 16 + vl] = A[vi, vj, vk * 16 + vl] * 2
T.evaluate(T.end_profile_intrinsic(3, dtype="handle"))
T.evaluate(T.start_profile_intrinsic(5, dtype="handle"))
for k in T.serial(0, 8):
for l in T.serial(0, 16):
with T.block("C"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
C[vi, vj, vk * 16 + vl] = B[vi, vj, vk * 16 + vl] * 2
T.evaluate(T.end_profile_intrinsic(5, dtype="handle"))
T.evaluate(T.end_profile_intrinsic(2, dtype="handle"))
T.evaluate(T.end_profile_intrinsic(1, dtype="handle"))
@T.prim_func
def test4_expected_output(a: T.handle, b: T.handle, c: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (8, 8, 128), dtype="int32")
B = T.match_buffer(b, (8, 8, 128), dtype="int32")
C = T.match_buffer(c, (8, 8, 128), dtype="int32")
D = T.match_buffer(d, (8, 8, 128), dtype="int32")
for i in T.serial(0, 8):
T.evaluate(T.start_profile_intrinsic(2, dtype="handle"))
for j in T.serial(0, 8):
T.evaluate(T.start_profile_intrinsic(3, dtype="handle"))
for k, l in T.grid(8, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk * 16 + vl] = A[vi, vj, vk * 16 + vl] * 2
T.evaluate(T.end_profile_intrinsic(3, dtype="handle"))
T.evaluate(T.start_profile_intrinsic(5, dtype="handle"))
for k, l in T.grid(8, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk * 16 + vl] = B[vi, vj, vk * 16 + vl] * D[vi, vj, vk * 16 + vl]
T.evaluate(T.end_profile_intrinsic(5, dtype="handle"))
T.evaluate(T.end_profile_intrinsic(2, dtype="handle"))
T.evaluate(T.start_profile_intrinsic(7, dtype="handle"))
for j in T.serial(0, 8):
T.evaluate(T.start_profile_intrinsic(8, dtype="handle"))
for k, l in T.grid(8, 16):
with T.block("C"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
C[vi, vj, vk * 16 + vl] = B[vi, vj, vk * 16 + vl] + 2
T.evaluate(T.end_profile_intrinsic(8, dtype="handle"))
T.evaluate(T.start_profile_intrinsic(10, dtype="handle"))
for k, l in T.grid(8, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
C[vi, vj, vk * 16 + vl] = C[vi, vj, vk * 16 + vl] * D[vi, vj, vk * 16 + vl]
T.evaluate(T.end_profile_intrinsic(10, dtype="handle"))
T.evaluate(T.end_profile_intrinsic(7, dtype="handle"))
@T.prim_func
def test5_expected_output(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (8, 8, 128), dtype="int32")
B = T.match_buffer(b, (8, 8, 128), dtype="int32")
C = T.match_buffer(c, (8, 8, 128), dtype="int32")
T.evaluate(T.start_profile_intrinsic(1, dtype="handle"))
for i in T.serial(0, 8):
T.evaluate(T.start_profile_intrinsic(2, dtype="handle"))
for j in T.serial(0, 8):
for k in T.serial(0, 8):
for l in T.serial(0, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk * 16 + vl] = A[vi, vj, vk * 16 + vl] * 2
for k in T.serial(0, 8):
for l in T.serial(0, 16):
with T.block("C"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
C[vi, vj, vk * 16 + vl] = B[vi, vj, vk * 16 + vl] * 2
T.evaluate(T.end_profile_intrinsic(2, dtype="handle"))
T.evaluate(T.end_profile_intrinsic(1, dtype="handle"))
@T.prim_func
def test6_expected_output(a: T.handle, b: T.handle, c: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (8, 8, 128), dtype="int32")
B = T.match_buffer(b, (8, 8, 128), dtype="int32")
C = T.match_buffer(c, (8, 8, 128), dtype="int32")
D = T.match_buffer(d, (8, 8, 128), dtype="int32")
for i in T.serial(0, 8):
T.evaluate(T.start_profile_intrinsic(2, dtype="handle"))
for j in T.parallel(0, 8):
for k in T.serial(0, 8):
for l in T.serial(0, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk * 16 + vl] = A[vi, vj, vk * 16 + vl] * 2
for k in T.serial(0, 8):
for l in T.serial(0, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk * 16 + vl] = B[vi, vj, vk * 16 + vl] * D[vi, vj, vk * 16 + vl]
T.evaluate(T.end_profile_intrinsic(2, dtype="handle"))
T.evaluate(T.start_profile_intrinsic(7, dtype="handle"))
for j in T.serial(0, 8):
T.evaluate(T.start_profile_intrinsic(8, dtype="handle"))
for k in T.parallel(0, 8):
for l in T.serial(0, 16):
with T.block("C"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
C[vi, vj, vk * 16 + vl] = B[vi, vj, vk * 16 + vl] + 2
T.evaluate(T.end_profile_intrinsic(8, dtype="handle"))
T.evaluate(T.start_profile_intrinsic(10, dtype="handle"))
for k in T.parallel(0, 8):
for l in T.serial(0, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
C[vi, vj, vk * 16 + vl] = C[vi, vj, vk * 16 + vl] * D[vi, vj, vk * 16 + vl]
T.evaluate(T.end_profile_intrinsic(10, dtype="handle"))
T.evaluate(T.end_profile_intrinsic(7, dtype="handle"))
# By default, only loops with siblings are instrumented.
def test1():
with tvm.transform.PassContext(config=default_lwp_test_config):
mod = tvm.IRModule.from_expr(input1)
mod = tvm.tir.transform.InstrumentProfileIntrinsics()(mod)
tvm.ir.assert_structural_equal(mod["main"], test1_expected_output)
# By default, only loops with siblings are instrumented. Here, 'lwp_max_depth'
# doesn't have any effect unless 'instr_siblings' is set to False (ex: test3).
def test2():
test2_config = default_lwp_test_config.copy()
test2_config.update({"tir.lwp_max_depth": 3})
with tvm.transform.PassContext(config=test2_config):
mod = tvm.IRModule.from_expr(input1)
mod = tvm.tir.transform.InstrumentProfileIntrinsics()(mod)
tvm.ir.assert_structural_equal(mod["main"], test1_expected_output)
# test3: Use 'lwp_max_depth' to instrument loops upto a certain depth. This flag
# is effective only when 'instr_siblings' is disabled. Also, note that inner-most
# loops are always excluded from instrumentation unless overwritten using
# 'lwp_min_height' (ex: test5)
def test3():
test3_config = default_lwp_test_config.copy()
test3_config.update({"tir.lwp_max_depth": 3, "tir.instr_siblings": False})
with tvm.transform.PassContext(config=test3_config):
mod = tvm.IRModule.from_expr(input1)
mod = tvm.tir.transform.InstrumentProfileIntrinsics()(mod)
tvm.ir.assert_structural_equal(mod["main"], test3_expected_output)
# test4: Use 'lwp_min_height' to exclude inner loops upto a certain height from
# instrumentation.
def test4():
with tvm.transform.PassContext(config=default_lwp_test_config):
mod = tvm.IRModule.from_expr(input2)
mod = tvm.tir.transform.InstrumentProfileIntrinsics()(mod)
tvm.ir.assert_structural_equal(mod["main"], test4_expected_output)
# test5: Use both 'lwp_min_height' and 'lwp_max_depth'.
# instrumentation.
def test5():
test5_config = default_lwp_test_config.copy()
test5_config.update(
{"tir.lwp_max_depth": 3, "tir.instr_siblings": False, "tir.lwp_min_height": 2}
)
with tvm.transform.PassContext(config=test5_config):
mod = tvm.IRModule.from_expr(input1)
mod = tvm.tir.transform.InstrumentProfileIntrinsics()(mod)
tvm.ir.assert_structural_equal(mod["main"], test5_expected_output)
# test6: Tests instrumentation for the parallel loops
def test6():
with tvm.transform.PassContext(config=default_lwp_test_config):
mod = tvm.IRModule.from_expr(input3)
mod = tvm.tir.transform.InstrumentProfileIntrinsics()(mod)
tvm.ir.assert_structural_equal(mod["main"], test6_expected_output)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_remove_assume.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import TVMError
from tvm.script import tir as T
class BaseBeforeAfter(tvm.testing.CompareBeforeAfter):
@tvm.testing.fixture
def transform(self):
return tvm.tir.transform.RemoveAssume()
class TestRemoveAssume(BaseBeforeAfter):
"""Remove any instance of T.assume"""
def before(A: T.Buffer[1, "int32"]):
T.evaluate(T.assume(A[0] == 5))
A[0] = 10
def expected(A: T.Buffer[1, "int32"]):
A[0] = 10
class TestRemoveAssumeLoop(BaseBeforeAfter):
"""Loops containing only T.assume should be removed"""
def before(A: T.Buffer[16, "int32"]):
for i in T.serial(16):
T.evaluate(T.assume(A[i] == 0))
for i in T.serial(16):
A[i] = 10
def expected(A: T.Buffer[16, "int32"]):
for i in T.serial(16):
A[i] = 10
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_remove_no_op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm.script import tir as T
import tvm.testing
def nop():
return tvm.tir.Evaluate(0)
def test_remove_no_op():
i = te.var("i")
j = te.var("j")
k = te.var("k")
m = te.var("m")
n = te.var("n")
dtype = "int64"
Ab = tvm.tir.decl_buffer((n,), dtype)
stmt = tvm.tir.For(
i,
0,
4,
tvm.tir.ForKind.SERIAL,
tvm.tir.For(
j,
0,
n,
tvm.tir.ForKind.SERIAL,
tvm.tir.For(
k,
0,
m,
tvm.tir.ForKind.SERIAL,
tvm.tir.IfThenElse((i * m + j + k < n), tvm.tir.Evaluate(m), tvm.tir.Evaluate(n)),
),
),
)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], stmt))
ret = tvm.tir.transform.RemoveNoOp()(mod)["main"].body
assert isinstance(ret, tvm.tir.Evaluate)
store = tvm.tir.BufferStore(Ab, tvm.tir.BufferLoad(Ab, [i]) + 1, [i + 1])
stmt2 = tvm.tir.SeqStmt([nop(), tvm.tir.SeqStmt([store, nop()])])
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], stmt2))
ret = tvm.tir.transform.RemoveNoOp()(mod)["main"].body
assert ret == store
# remove zero extent loop
stmt3 = tvm.tir.For(i, 0, 0, tvm.tir.ForKind.SERIAL, store)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], stmt3))
ret = tvm.tir.transform.RemoveNoOp()(mod)["main"].body
assert isinstance(ret, tvm.tir.Evaluate)
def test_remove_no_op_with_invalid_extent():
@T.prim_func
def main(A: T.Buffer[(16), "int32"], B: T.Buffer[(16), "int32"]) -> None:
for i in T.serial(16):
for j in T.serial(i - 20):
B[i] = A[i] + j
mod = tvm.ir.module.IRModule.from_expr(main)
ret = tvm.tir.transform.RemoveNoOp()(mod)["main"].body
assert isinstance(ret, tvm.tir.Evaluate)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_remove_undef.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm.script import tir as T
from tvm import TVMError
class BaseBeforeAfter(tvm.testing.CompareBeforeAfter):
@tvm.testing.fixture
def transform(self):
return tvm.tir.transform.RemoveStoreUndef()
class TestRemoveStoreUndef(BaseBeforeAfter):
"""Remove a store whose value is T.undef()"""
def before(A: T.Buffer[1, "int32"]):
A[0] = T.undef(dtype="int32")
def expected(A: T.Buffer[1, "int32"]):
T.evaluate(0)
class TestRemoveStoreUndefExpression(BaseBeforeAfter):
"""Expressions containing T.undef() are removed"""
def before(A: T.Buffer[1, "int32"]):
A[0] = 1 + T.undef(dtype="int32")
def expected(A: T.Buffer[1, "int32"]):
T.evaluate(0)
class TestKeepOtherCallNodes(BaseBeforeAfter):
"""Expressions containing other CallNodes are not removed"""
def before(A: T.Buffer[1, "int32"], n: T.int32):
A[0] = T.shift_left(n, 1, dtype="int32")
expected = before
class TestRemoveLetUndef(BaseBeforeAfter):
"""Remove a store whose value is bound to T.undef()"""
def before(A: T.Buffer[1, "int32"]):
val = T.undef(dtype="int32")
A[0] = val
def expected(A: T.Buffer[1, "int32"]):
T.evaluate(0)
class TestRaiseErrorForUndefAsStoreIndices(BaseBeforeAfter):
"""Use of T.undef() as buffer indices is an error"""
def before(A: T.Buffer[1, "int32"]):
val = T.undef(dtype="int32")
A[val] = 5
expected = TVMError
class TestRaiseErrorForUndefAsLoadIndices(BaseBeforeAfter):
"""Use of T.undef() as buffer indices is an error
Even though this occurs as part of the BufferStore's value, the
T.undef() may not appear in a buffer's indices.
"""
def before(A: T.Buffer[1, "int32"], B: T.Buffer[1, "int32"]):
B[0] = A[T.undef(dtype="int32")]
expected = TVMError
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_remove_weight_layout_rewrite_block.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import tvm
from tvm.ir.module import IRModule
from tvm.script import tir as T
from tvm.tir.function import PrimFunc
def _check(before, expect):
if isinstance(before, PrimFunc):
before = IRModule({"main": before})
if isinstance(expect, PrimFunc):
expect = IRModule({"main": expect})
mod = tvm.tir.transform.RemoveWeightLayoutRewriteBlock()(before)
tvm.ir.assert_structural_equal(mod, expect)
def test_matmul():
@T.prim_func
def before(
A: T.Buffer[(16, 16), "float32"],
B: T.Buffer[(16, 16), "float32"],
C: T.Buffer[(16, 16), "float32"],
) -> None:
T.func_attr({"layout_free_buffers": [1]})
B_ = T.alloc_buffer([16, 4, 4], dtype="float32")
for i0_o, i1_o in T.grid(16, 16):
with T.block("layout_rewrite"):
i0, i1 = T.axis.remap("SS", [i0_o, i1_o])
T.reads(B[i0, i1])
T.writes(B_[i1, i0 // 4, i0 % 4])
T.block_attr({"meta_schedule.layout_rewrite_preproc": True})
B_[i1, i0 // 4, i0 % 4] = B[i0, i1]
for i0, j, k0, i1, k1 in T.grid(4, 16, 4, 4, 4):
with T.block("matmul"):
vi = T.axis.spatial(16, i0 * 4 + i1)
vj = T.axis.spatial(16, j)
vk = T.axis.reduce(16, k0 * 4 + k1)
T.reads(A[vi, vk], B_[vj, vk // 4, vk % 4])
T.writes(C[vi, vj])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B_[vj, vk // 4, vk % 4]
@T.prim_func
def after(
A: T.Buffer[(16, 16), "float32"],
B: T.Buffer[(16, 4, 4), "float32"],
C: T.Buffer[(16, 16), "float32"],
) -> None:
T.func_attr({"layout_free_buffers": [1]})
for i0_o, i1_o in T.grid(16, 16):
with T.block("layout_rewrite"):
i0, i1 = T.axis.remap("SS", [i0_o, i1_o])
T.reads()
T.writes()
T.block_attr({"meta_schedule.layout_rewrite_preproc": True})
T.evaluate(0)
for i0, j, k0, i1, k1 in T.grid(4, 16, 4, 4, 4):
with T.block("matmul"):
vi = T.axis.spatial(16, i0 * 4 + i1)
vj = T.axis.spatial(16, j)
vk = T.axis.reduce(16, k0 * 4 + k1)
T.reads(A[vi, vk], B[vj, vk // 4, vk % 4])
T.writes(C[vi, vj])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk // 4, vk % 4]
_check(before, after)
if __name__ == "__main__":
test_matmul()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_renormalize_split_pattern.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm.script import tir as T
# fmt: off
# pylint: disable=no-member,invalid-name,unused-variable,line-too-long,redefined-outer-name,redundant-keyword-arg
@tvm.script.ir_module
class Before:
@T.prim_func
def main(inputs: T.Buffer[(8192,), "float32"], weight: T.Buffer[(2097152,), "float32"], conv2d_transpose_nhwc: T.Buffer[(16384,), "float32"]) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
T.preflattened_buffer(inputs, [1, 4, 4, 512], dtype="float32", data=inputs.data)
T.preflattened_buffer(weight, [4, 4, 512, 256], dtype="float32", data=weight.data)
T.preflattened_buffer(conv2d_transpose_nhwc, [1, 8, 8, 256], dtype="float32", data=conv2d_transpose_nhwc.data)
# var definition
threadIdx_x = T.env_thread("threadIdx.x")
blockIdx_x = T.env_thread("blockIdx.x")
# body
T.launch_thread(blockIdx_x, 64)
conv2d_transpose_nhwc_local = T.decl_buffer([8], "float32", scope="local")
PadInput_shared = T.decl_buffer([768], "float32", scope="shared")
weight_shared = T.decl_buffer([4096], "float32", scope="shared")
T.launch_thread(threadIdx_x, 32)
for i2_3_init, i1_4_init, i2_4_init in T.grid(2, 2, 2):
conv2d_transpose_nhwc_local[i1_4_init * 4 + i2_3_init * 2 + i2_4_init] = T.float32(0)
for i6_0 in T.serial(16):
for ax0_ax1_ax2_ax3_fused_0 in T.serial(24):
PadInput_shared[ax0_ax1_ax2_ax3_fused_0 * 32 + threadIdx_x] = T.if_then_else(128 <= ax0_ax1_ax2_ax3_fused_0 * 32 + threadIdx_x and ax0_ax1_ax2_ax3_fused_0 * 32 + threadIdx_x < 640 and 1 <= blockIdx_x // 32 * 2 + (ax0_ax1_ax2_ax3_fused_0 * 32 + threadIdx_x) % 128 // 32 and blockIdx_x // 32 * 2 + (ax0_ax1_ax2_ax3_fused_0 * 32 + threadIdx_x) % 128 // 32 < 5, inputs[blockIdx_x // 32 * 1024 + ax0_ax1_ax2_ax3_fused_0 * 512 + i6_0 * 32 + threadIdx_x - 2560], T.float32(0), dtype="float32")
for ax0_ax1_ax2_ax3_fused_0 in T.serial(32):
weight_shared[T.ramp(ax0_ax1_ax2_ax3_fused_0 * 128 + threadIdx_x * 4, 1, 4)] = weight[T.ramp((ax0_ax1_ax2_ax3_fused_0 * 128 + threadIdx_x * 4) // 256 * 131072 + i6_0 * 8192 + (ax0_ax1_ax2_ax3_fused_0 * 128 + threadIdx_x * 4) % 256 // 8 * 256 + blockIdx_x % 32 * 8 + threadIdx_x % 2 * 4, 1, 4)]
for i6_1, i2_3, i4_2, i5_2, i6_2, i1_4, i2_4 in T.grid(4, 2, 4, 4, 8, 2, 2):
conv2d_transpose_nhwc_local[i1_4 * 4 + i2_3 * 2 + i2_4] = conv2d_transpose_nhwc_local[i1_4 * 4 + i2_3 * 2 + i2_4] + T.if_then_else((i1_4 + i4_2) % 2 == 0 and (i2_4 + i5_2) % 2 == 0, PadInput_shared[threadIdx_x // 8 * 128 + (i1_4 + i4_2) // 2 * 128 + (i2_4 + i5_2) // 2 * 32 + i2_3 * 32 + i6_1 * 8 + i6_2], T.float32(0), dtype="float32") * weight_shared[i6_1 * 64 + i6_2 * 8 + threadIdx_x % 8 + 3840 - i5_2 * 256 - i4_2 * 1024]
for ax1, ax2 in T.grid(2, 4):
conv2d_transpose_nhwc[threadIdx_x // 8 * 4096 + ax1 * 2048 + blockIdx_x // 32 * 1024 + ax2 * 256 + blockIdx_x % 32 * 8 + threadIdx_x % 8] = conv2d_transpose_nhwc_local[ax1 * 4 + ax2]
@tvm.script.ir_module
class After:
@T.prim_func
def main(inputs: T.Buffer[(8192,), "float32"], weight: T.Buffer[(2097152,), "float32"], conv2d_transpose_nhwc: T.Buffer[(16384,), "float32"]) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
T.preflattened_buffer(inputs, [1, 4, 4, 512], dtype="float32", data=inputs.data)
T.preflattened_buffer(weight, [4, 4, 512, 256], dtype="float32", data=weight.data)
T.preflattened_buffer(conv2d_transpose_nhwc, [1, 8, 8, 256], dtype="float32", data=conv2d_transpose_nhwc.data)
# var definition
threadIdx_x = T.env_thread("threadIdx.x")
blockIdx_x = T.env_thread("blockIdx.x")
# body
T.launch_thread(blockIdx_x, 64)
conv2d_transpose_nhwc_local = T.decl_buffer([8], "float32", scope="local")
PadInput_shared = T.decl_buffer([768], "float32", scope="shared")
weight_shared = T.decl_buffer([4096], "float32", scope="shared")
T.launch_thread(threadIdx_x, 32)
for i2_3_init, i1_4_init, i2_4_init in T.grid(2, 2, 2):
conv2d_transpose_nhwc_local[i1_4_init * 4 + i2_3_init * 2 + i2_4_init] = T.float32(0)
for i6_0 in T.serial(16):
for ax0_ax1_ax2_ax3_fused_0 in T.serial(24):
PadInput_shared[ax0_ax1_ax2_ax3_fused_0 * 32 + threadIdx_x] = T.if_then_else(1 <= (ax0_ax1_ax2_ax3_fused_0 + threadIdx_x // 32) // 4 and (ax0_ax1_ax2_ax3_fused_0 + threadIdx_x // 32) // 20 < 1 and 1 <= blockIdx_x // 32 * 2 + (ax0_ax1_ax2_ax3_fused_0 + threadIdx_x // 32) % 4 and (blockIdx_x // 32 * 2 + (ax0_ax1_ax2_ax3_fused_0 + threadIdx_x // 32) % 4) // 5 < 1, inputs[blockIdx_x // 32 * 1024 + ax0_ax1_ax2_ax3_fused_0 * 512 + i6_0 * 32 + threadIdx_x - 2560], T.float32(0), dtype="float32")
for ax0_ax1_ax2_ax3_fused_0 in T.serial(32):
weight_shared[T.ramp(ax0_ax1_ax2_ax3_fused_0 * 128 + threadIdx_x * 4, 1, 4)] = weight[T.ramp((ax0_ax1_ax2_ax3_fused_0 + threadIdx_x * 4 // 128) // 2 * 131072 + i6_0 * 8192 + (ax0_ax1_ax2_ax3_fused_0 * 16 + threadIdx_x * 4 // 8) % 32 * 256 + blockIdx_x % 32 * 8 + threadIdx_x % 2 * 4, 1, 4)]
for i6_1, i2_3, i4_2, i5_2, i6_2, i1_4, i2_4 in T.grid(4, 2, 4, 4, 8, 2, 2):
conv2d_transpose_nhwc_local[i1_4 * 4 + i2_3 * 2 + i2_4] = conv2d_transpose_nhwc_local[i1_4 * 4 + i2_3 * 2 + i2_4] + T.if_then_else((i1_4 + i4_2) % 2 == 0 and (i2_4 + i5_2) % 2 == 0, PadInput_shared[threadIdx_x // 8 * 128 + (i1_4 + i4_2) // 2 * 128 + (i2_4 + i5_2) // 2 * 32 + i2_3 * 32 + i6_1 * 8 + i6_2], T.float32(0), dtype="float32") * weight_shared[i6_1 * 64 + i6_2 * 8 + threadIdx_x % 8 + 3840 - i5_2 * 256 - i4_2 * 1024]
for ax1, ax2 in T.grid(2, 4):
conv2d_transpose_nhwc[threadIdx_x // 8 * 4096 + ax1 * 2048 + blockIdx_x // 32 * 1024 + ax2 * 256 + blockIdx_x % 32 * 8 + threadIdx_x % 8] = conv2d_transpose_nhwc_local[ax1 * 4 + ax2]
@tvm.script.ir_module
class After_simplified:
@T.prim_func
def main(inputs: T.Buffer[(8192,), "float32"], weight: T.Buffer[(2097152,), "float32"], conv2d_transpose_nhwc: T.Buffer[(16384,), "float32"]) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# var definition
threadIdx_x = T.env_thread("threadIdx.x")
blockIdx_x = T.env_thread("blockIdx.x")
T.preflattened_buffer(inputs, [1, 4, 4, 512], dtype="float32", data=inputs.data)
T.preflattened_buffer(weight, [4, 4, 512, 256], dtype="float32", data=weight.data)
T.preflattened_buffer(conv2d_transpose_nhwc, [1, 8, 8, 256], dtype="float32", data=conv2d_transpose_nhwc.data)
# body
T.launch_thread(blockIdx_x, 64)
conv2d_transpose_nhwc_local = T.decl_buffer([8], "float32", scope="local")
PadInput_shared = T.decl_buffer([768], "float32", scope="shared")
weight_shared = T.decl_buffer([4096], "float32", scope="shared")
T.launch_thread(threadIdx_x, 32)
for i2_3_init, i1_4_init, i2_4_init in T.grid(2, 2, 2):
conv2d_transpose_nhwc_local[i1_4_init * 4 + i2_3_init * 2 + i2_4_init] = T.float32(0)
for i6_0 in T.serial(16):
for ax0_ax1_ax2_ax3_fused_0 in T.serial(24):
PadInput_shared[ax0_ax1_ax2_ax3_fused_0 * 32 + threadIdx_x] = T.if_then_else(4 <= ax0_ax1_ax2_ax3_fused_0 and ax0_ax1_ax2_ax3_fused_0 < 20 and 1 <= blockIdx_x // 32 * 2 + ax0_ax1_ax2_ax3_fused_0 % 4 and blockIdx_x // 32 * 2 + ax0_ax1_ax2_ax3_fused_0 % 4 < 5, inputs[blockIdx_x // 32 * 1024 + ax0_ax1_ax2_ax3_fused_0 * 512 + i6_0 * 32 + threadIdx_x - 2560], T.float32(0), dtype="float32")
for ax0_ax1_ax2_ax3_fused_0 in T.serial(32):
weight_shared[T.ramp(ax0_ax1_ax2_ax3_fused_0 * 128 + threadIdx_x * 4, 1, 4)] = weight[T.ramp(ax0_ax1_ax2_ax3_fused_0 // 2 * 131072 + i6_0 * 8192 + ax0_ax1_ax2_ax3_fused_0 % 2 * 4096 + threadIdx_x // 2 * 256 + blockIdx_x % 32 * 8 + threadIdx_x % 2 * 4, 1, 4)]
for i6_1, i2_3, i4_2, i5_2, i6_2, i1_4, i2_4 in T.grid(4, 2, 4, 4, 8, 2, 2):
conv2d_transpose_nhwc_local[i1_4 * 4 + i2_3 * 2 + i2_4] = conv2d_transpose_nhwc_local[i1_4 * 4 + i2_3 * 2 + i2_4] + T.if_then_else((i1_4 + i4_2) % 2 == 0 and (i2_4 + i5_2) % 2 == 0, PadInput_shared[threadIdx_x // 8 * 128 + (i1_4 + i4_2) // 2 * 128 + (i2_4 + i5_2) // 2 * 32 + i2_3 * 32 + i6_1 * 8 + i6_2], T.float32(0), dtype="float32") * weight_shared[i6_1 * 64 + i6_2 * 8 + threadIdx_x % 8 + 3840 - i5_2 * 256 - i4_2 * 1024]
for ax1, ax2 in T.grid(2, 4):
conv2d_transpose_nhwc[threadIdx_x // 8 * 4096 + ax1 * 2048 + blockIdx_x // 32 * 1024 + ax2 * 256 + blockIdx_x % 32 * 8 + threadIdx_x % 8] = conv2d_transpose_nhwc_local[ax1 * 4 + ax2]
# pylint: enable=no-member,invalid-name,unused-variable,line-too-long,redefined-outer-name,redundant-keyword-arg
# fmt: on
def test_renormalize_split_pattern():
after = tvm.tir.transform.RenormalizeSplitPattern()(Before)
tvm.ir.assert_structural_equal(after, After)
after = tvm.tir.transform.Simplify()(after)
tvm.ir.assert_structural_equal(after, After_simplified)
@T.prim_func
def impossible_equality(n: T.int32):
# Prior to bugfix, this conditional defined the expression "2" as
# equal to zero within the then_case. [min_value=2, max_value=0]
if 2 == 0:
# Then this expression evaluates n/2, using the min/max values
# of "2", which is caught as a divide by zero error.
if n // 2 >= 16:
T.evaluate(0)
@T.prim_func
def impossible_inequality(n: T.int32):
# Prior to bugfix, this conditional set up a range of possible
# values for the expression "-2" as [0, kPosInf].
if -1 < -2:
if n // (-2) >= 16:
T.evaluate(0)
integer_condition = tvm.testing.parameter(
impossible_equality,
impossible_inequality,
)
def test_analyze_inside_integer_conditional(integer_condition):
"""Avoid crash occurring in ConstIntBoundAnalyzer.
Crash occurred when simplifying some expressions with provably
false integer expressions. If the expressions were renormalized
before calling Simplify, conditional statements could assign a
range of possible values to integers, as if they were variables.
This would result in divide by zero throwing an exception,
followed by a second exception during stack unwinding causing the
program to crash.
"""
# Similar issue would occur in most transformations that subclass
# IRMutatorWithAnalyzer. tir.transform.Simplify() is an
# exception, as it rewrites the integer conditionals first. These
# tests are written using RenormalizeSplitPattern as it is the
# first case identified.
transform = tvm.tir.transform.RenormalizeSplitPattern()
# Issue would result in an error through while applying the transformation.
mod = tvm.IRModule.from_expr(integer_condition)
transform(mod)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_rewrite_unsafe_select.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def test_rewrite_Select():
ib = tvm.tir.ir_builder.create()
A = ib.allocate("float32", 100, name="A", scope="global")
i = te.var("i")
y = tvm.tir.Select(i > 1, A[i - 1], 1.0)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([i], tvm.tir.Evaluate(y)))
yy = tvm.tir.transform.RewriteUnsafeSelect()(mod)["main"].body.value
z = tvm.tir.Select(tvm.tir.Select(i > 1, A[i - 1], 1.0) > 0.0, A[i], 0.1)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([i], tvm.tir.Evaluate(z)))
zz = tvm.tir.transform.RewriteUnsafeSelect()(mod)["main"].body.value
a = tvm.tir.Select(tvm.tir.floordiv(i, 4) > 10, y, z)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([i], tvm.tir.Evaluate(a)))
aa = tvm.tir.transform.RewriteUnsafeSelect()(mod)["main"].body.value
builtin_if_then_else = tvm.ir.Op.get("tir.if_then_else")
assert yy.op.same_as(builtin_if_then_else)
assert yy.op.same_as(builtin_if_then_else)
assert isinstance(aa, tvm.tir.Select)
if __name__ == "__main__":
test_rewrite_Select()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_simplify.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
from tvm.script import tir as T
def test_stmt_simplify():
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
C = ib.pointer("float32", name="C")
n = te.size_var("n")
with ib.for_range(0, n, name="i") as i:
with ib.if_scope(i < 12):
A[i] = C[i]
body = tvm.tir.LetStmt(n, 10, ib.get())
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, C, n], body))
body = tvm.tir.transform.Simplify()(mod)["main"].body
assert isinstance(body.body, tvm.tir.BufferStore)
def test_thread_extent_simplify():
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
C = ib.pointer("float32", name="C")
n = te.size_var("n")
tx = te.thread_axis("threadIdx.x")
ty = te.thread_axis("threadIdx.y")
ib.scope_attr(tx, "thread_extent", n)
ib.scope_attr(tx, "thread_extent", n)
ib.scope_attr(ty, "thread_extent", 1)
with ib.if_scope(tx + ty < 12):
A[tx] = C[tx + ty]
body = tvm.tir.LetStmt(n, 10, ib.get())
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, C, n], body))
body = tvm.tir.transform.Simplify()(mod)["main"].body
assert isinstance(body.body.body.body, tvm.tir.BufferStore)
def test_if_likely():
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
C = ib.pointer("float32", name="C")
n = te.size_var("n")
tx = te.thread_axis("threadIdx.x")
ty = te.thread_axis("threadIdx.y")
ib.scope_attr(tx, "thread_extent", 32)
ib.scope_attr(ty, "thread_extent", 32)
with ib.if_scope(ib.likely(tx * 32 + ty < n)):
with ib.if_scope(ib.likely(tx * 32 + ty < n)):
A[tx] = C[tx * 32 + ty]
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, C, n], body))
body = tvm.tir.transform.Simplify()(mod)["main"].body
assert isinstance(body.body.body, tvm.tir.IfThenElse)
assert not isinstance(body.body.body.then_case, tvm.tir.IfThenElse)
def test_basic_likely_elimination():
n = te.size_var("n")
X = te.placeholder(shape=(n,), name="x")
W = te.placeholder(shape=(n + 1,), dtype="int32", name="w")
def f(i):
start = W[i]
extent = W[i + 1] - W[i]
rv = te.reduce_axis((0, extent))
return te.sum(X[rv + start], axis=rv)
Y = te.compute(X.shape, f, name="y")
s = te.create_schedule([Y.op])
stmt = tvm.lower(s, [X, W, Y], simple_mode=True)
assert "if" not in str(stmt)
def test_complex_likely_elimination():
def cumsum(X):
"""
Y[i] = sum(X[:i])
"""
(m,) = X.shape
s_state = te.placeholder((m + 1,), dtype="int32", name="state")
s_init = te.compute((1,), lambda _: tvm.tir.const(0, "int32"))
s_update = te.compute((m + 1,), lambda l: s_state[l - 1] + X[l - 1])
return tvm.te.scan(s_init, s_update, s_state, inputs=[X], name="cumsum")
def sparse_lengths_sum(data, indices, lengths):
oshape = list(data.shape)
oshape[0] = lengths.shape[0]
length_offsets = cumsum(lengths)
def sls(n, d):
gg = te.reduce_axis((0, lengths[n]))
indices_idx = length_offsets[n] + gg
data_idx = indices[indices_idx]
data_val = data[data_idx, d]
return te.sum(data_val, axis=gg)
return te.compute(oshape, sls)
m, n, d, i, l = (
te.size_var("m"),
te.size_var("n"),
te.size_var("d"),
te.size_var("i"),
te.size_var("l"),
)
data_ph = te.placeholder((m, d * 32), name="data")
indices_ph = te.placeholder((i,), name="indices", dtype="int32")
lengths_ph = te.placeholder((n,), name="lengths", dtype="int32")
Y = sparse_lengths_sum(data_ph, indices_ph, lengths_ph)
s = te.create_schedule([Y.op])
(n, d) = s[Y].op.axis
(do, di) = s[Y].split(d, factor=32)
(gg,) = s[Y].op.reduce_axis
s[Y].reorder(n, do, gg, di)
s[Y].vectorize(di)
stmt = tvm.lower(s, [data_ph, indices_ph, lengths_ph, Y], simple_mode=True)
assert "if" not in str(stmt)
class BaseBeforeAfter(tvm.testing.CompareBeforeAfter):
transitively_prove_inequalities = False
convert_boolean_to_and_of_ors = False
apply_constraints_to_boolean_branches = False
def transform(self):
def inner(mod):
config = {
"tir.Simplify": {
"transitively_prove_inequalities": self.transitively_prove_inequalities,
"convert_boolean_to_and_of_ors": self.convert_boolean_to_and_of_ors,
"apply_constraints_to_boolean_branches": self.apply_constraints_to_boolean_branches,
}
}
with tvm.transform.PassContext(config=config):
mod = tvm.tir.transform.Simplify()(mod)
return mod
return inner
class TestLoadStoreNoop(BaseBeforeAfter):
"""Store of a value that was just read from the same location is a no-op."""
def before(A: T.Buffer[(1,), "float32"]):
A[0] = A[0]
def expected(A: T.Buffer[(1,), "float32"]):
T.evaluate(0)
class TestLoadStoreNoopAfterSimplify(BaseBeforeAfter):
"""As test_load_store_noop, but requiring simplification to identify.
Previously, a bug caused the self-assignment of a buffer to
checked based on the pre-simplification assignment, not the
post-simplification. This test is to identify any similar
regression.
"""
def before(A: T.Buffer[(1,), "float32"]):
A[0] = A[0] + (5.0 - 5.0)
def expected(A: T.Buffer[(1,), "float32"]):
T.evaluate(0)
class TestNestedCondition(BaseBeforeAfter):
"""Nested IfThenElse with the same condition can be simplified.
Requires const_int_bound to narrow scope of i within the
conditional, or for rewrite_simplify to recognize the literal
constraint.
"""
def before(A: T.Buffer[(16,), "float32"]):
for i in T.serial(16):
if i == 5:
if i == 5:
A[i] = 0.0
def expected(A: T.Buffer[(16,), "float32"]):
for i in T.serial(16):
if i == 5:
A[i] = 0.0
class TestNestedProvableCondition(BaseBeforeAfter):
"""Simplify inner conditional using constraint from outer.
Requires const_int_bound to narrow scope of i within the
conditional.
"""
def before(A: T.Buffer[(16,), "float32"]):
for i in T.serial(16):
if i == 5:
if i < 7:
A[i] = 0.0
def expected(A: T.Buffer[(16,), "float32"]):
for i in T.serial(16):
if i == 5:
A[i] = 0.0
class TestNestedVarCondition(BaseBeforeAfter):
"""Simplify inner conditional using constraint from outer.
Requires for rewrite_simplify to recognize the repeated
constraint.
"""
def before(A: T.Buffer[(16,), "float32"], n: T.int32):
for i in T.serial(16):
if i == n:
if i == n:
A[i] = 0.0
def expected(A: T.Buffer[(16,), "float32"], n: T.int32):
for i in T.serial(16):
if i == n:
A[i] = 0.0
class TestAlteredBufferContents(BaseBeforeAfter):
"""No simplification of data-dependent conditionals.
A literal constraint must not be propagated if the values
referenced may change. TIR requires single assignment of
variables, so Var objects may be assumed constant, but BufferLoad
may not.
"""
def before(A: T.Buffer[(1,), "int32"], n: T.int32):
if A[0] == n:
A[0] = A[0] + 1
if A[0] == n:
A[0] = 0
expected = before
class TestNegationOfCondition(BaseBeforeAfter):
"""Use negation of outer condition to simplify innner.
Within the body of an if statement, the negation of the
condition is known to be false.
"""
def before(A: T.Buffer[(16,), "int32"]):
for i in T.serial(16):
if i == 5:
if i != 5:
A[i] = 0
else:
A[i] = 1
def expected(A: T.Buffer[(16,), "int32"]):
for i in T.serial(16):
if i == 5:
A[i] = 1
class TestNegationOfNotEqual(BaseBeforeAfter):
"""As TestNegationOfVarCondition, but with a != outer condition.
Because ConstIntBoundAnalyzer only tracks the min and max allowed
values, the outer i!=5 condition does provide a constraint on the
bounds. This test relies on RewriteSimplifier to recognize
``i==5`` as the negation of a literal constraint.
"""
def before(A: T.Buffer[(16,), "int32"]):
for i in T.serial(16):
if i != 5:
if i == 5:
A[i] = 0
else:
A[i] = 1
def expected(A: T.Buffer[(16,), "int32"]):
for i in T.serial(16):
if i != 5:
A[i] = 1
class TestNegationOfVarCondition(BaseBeforeAfter):
"""As TestNegationOfVarCondition, but with a dynamic condition.
This simplification cannot be done with ConstIntBoundAnalyzer, and
must rely on RewriteSimplifier recognizing the repeated literal.
"""
def before(A: T.Buffer[(16,), "int32"], n: T.int32):
for i in T.serial(16):
if i == n:
if i != n:
A[i] = 0
else:
A[i] = 1
def expected(A: T.Buffer[(16,), "int32"], n: T.int32):
for i in T.serial(16):
if i == n:
A[i] = 1
class TestLiteralConstraintSplitBooleanAnd(BaseBeforeAfter):
"""Split a boolean AND into independent constraints
A single if condition may impose multiple literal constraints.
Each constraint that is ANDed together to form the condition
should be treated as an independent constraint. The use of n in
the condition is to ensure we exercise RewriteSimplifier.
"""
def before(A: T.Buffer[(16, 16), "int32"], n: T.int32):
for i, j in T.grid(16, 16):
if i == n and j == n:
if i == n:
A[i, j] = 0
def expected(A: T.Buffer[(16, 16), "int32"], n: T.int32):
for i, j in T.grid(16, 16):
if i == n and j == n:
A[i, j] = 0
class TestLiteralConstraintSplitBooleanOr(BaseBeforeAfter):
"""Split a boolean OR into independent constraints
Similar to TestLiteralConstraintSplitBooleanAnd, but splitting a
boolean OR into independent conditions. This uses the
simplification that ``!(x || y) == !x && !y``.
The use of ``n`` in the condition is to ensure we exercise
RewriteSimplifier.
"""
def before(A: T.Buffer[(16, 16), "int32"], n: T.int32):
for i, j in T.grid(16, 16):
if i == n or j == n:
A[i, j] = 0
else:
if i == n:
A[i, j] = 1
else:
A[i, j] = 2
def expected(A: T.Buffer[(16, 16), "int32"], n: T.int32):
for i, j in T.grid(16, 16):
if i == n or j == n:
A[i, j] = 0
else:
A[i, j] = 2
class TestProveConditionUsingLet(BaseBeforeAfter):
"""Simplify conditions using non-inlined let bindings
Not all let bindings are inlined when they occur in later
expressions. However, even if they are not inlined, they may be
used to prove the value of a condition.
"""
@T.prim_func
def before(A: T.Buffer[4, "bool"]):
for i in T.serial(4):
condition = i < 3
if condition or i >= 3:
A[i] = condition
@T.prim_func
def expected(A: T.Buffer[4, "bool"]):
for i in T.serial(4):
condition = i < 3
A[i] = condition
class TestProveLetCondition(BaseBeforeAfter):
"""Simplify conditions using non-inlined let bindings
Not all let bindings are inlined when they occur in later
expressions. However, even if they are not inlined, they may be
used to prove the value of a condition.
"""
@T.prim_func
def before(A: T.Buffer[4, "bool"]):
for i in T.serial(4):
condition = i < 3
if i < 3:
if condition:
A[i] = condition
@T.prim_func
def expected(A: T.Buffer[4, "bool"]):
for i in T.serial(4):
condition = i < 3
if i < 3:
A[i] = condition
class TestProveRepeatedLetCondition(BaseBeforeAfter):
"""Simplify conditions using non-inlined let bindings
A variable may be used as a literal constraint, and be recognized
as being True within the context of the constraint.
"""
@T.prim_func
def before(A: T.Buffer[4, "bool"]):
for i in T.serial(4):
condition = i < 3
if condition:
if condition:
A[i] = condition
@T.prim_func
def expected(A: T.Buffer[4, "bool"]):
for i in T.serial(4):
condition = i < 3
if condition:
A[i] = True
class TestIfThenElseExpr(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer[16, "float32"]):
for i in T.serial(16):
if i < 12:
A[i] = T.if_then_else(i < 12, 1.0, 2.0, dtype="float32")
@T.prim_func
def expected(A: T.Buffer[16, "float32"]):
for i in T.serial(16):
if i < 12:
A[i] = 1.0
class TestCeilLog2Int(BaseBeforeAfter):
"""Simplify expressions resulting from topi.math.ceil_log2"""
@T.prim_func
def before(A: T.Buffer[1, "int32"]):
A[0] = T.cast(
T.ceil(T.log2(T.cast(14, "float64"), dtype="float64"), dtype="float64"), dtype="int32"
)
@T.prim_func
def expected(A: T.Buffer[1, "int32"]):
A[0] = 4
class TestLeftCeilLog2LowerBound(BaseBeforeAfter):
"""Integer bounds are propagated through topi.math.ceil_log2"""
@T.prim_func
def before(A: T.Buffer[16, "float32"]):
for i in T.serial(16):
x = T.cast(
T.ceil(T.log2(T.cast(i + 1024 + 1, "float64"), dtype="float64"), dtype="float64"),
dtype="int32",
)
if x == 11:
A[i] = 0.0
@T.prim_func
def expected(A: T.Buffer[16, "float32"]):
for i in T.serial(16):
A[i] = 0.0
class TestLeftShiftLowerBound(BaseBeforeAfter):
"""Integer bounds are propagated through left shift
min(1 << i) = 1 << min(i)
= 1 << 0
= 1
"""
@T.prim_func
def before(A: T.Buffer[16, "float32"]):
for i in T.serial(16):
if T.shift_left(1, i, dtype="int32") >= 1:
A[i] = 0.0
@T.prim_func
def expected(A: T.Buffer[16, "float32"]):
for i in T.serial(16):
A[i] = 0.0
class TestLeftShiftUpperBound(BaseBeforeAfter):
"""Integer bounds are propagated through left shift
max(31 << i) = 31 << max(i)
= 31 << 15
= 1015808
"""
@T.prim_func
def before(A: T.Buffer[16, "float32"]):
for i in T.serial(16):
if T.shift_left(31, i, dtype="int32") <= 1015808:
A[i] = 0.0
@T.prim_func
def expected(A: T.Buffer[16, "float32"]):
for i in T.serial(16):
A[i] = 0.0
class TestLeftShiftOfNegativeValue(BaseBeforeAfter):
"""No const int bounds of left shift of negative value.
This is target dependent, and does not currently have a specified
behavior in TIR. For example, in CodeGenC, this generates C code
with undefined behavior.
"""
@T.prim_func
def before(A: T.Buffer[16, "float32"]):
for i in T.serial(16):
if -64 <= T.shift_left(-i, 4, dtype="int32"):
A[i] = 0.0
expected = before
class TestLeftShiftByNegativeValue(BaseBeforeAfter):
"""No const int bounds of left shift by negative bit count.
This is target dependent, and does not currently have a specified
behavior in TIR. For example, in CodeGenC, this generates C code
with undefined behavior.
"""
@T.prim_func
def before(A: T.Buffer[16, "float32"]):
for i in T.serial(16):
if T.shift_left(16, -i, dtype="int32") <= 16:
A[i] = 0.0
expected = before
class TestRemoveTransitivelyProvableCondition(BaseBeforeAfter):
"""Remove comparisons that may be proven using multiple others
For example, the `0 < i` and `i <= j` conditions can be used to prove
that `0 < j`.
"""
transitively_prove_inequalities = True
i, j, k = [tvm.tir.Var(name, "int32") for name in "ijk"]
zero = tvm.tir.IntImm("int32", 0)
test_case = tvm.testing.parameter(
(tvm.tir.all(zero < i, i <= j), zero < j, True),
# Transitive comparisons from LT
(tvm.tir.all(i < j, j < k), i < k, True),
(tvm.tir.all(i < j, j == k), i < k, True),
(tvm.tir.all(i < j, j <= k), i < k, True),
(tvm.tir.all(i < j, j > k), i < k, False),
(tvm.tir.all(i < j, j >= k), i < k, False),
(tvm.tir.all(i < j, j != k), i < k, False),
# Transitive comparisons from LE
(tvm.tir.all(i <= j, j < k), i < k, True),
(tvm.tir.all(i <= j, j == k), i == k, False),
(tvm.tir.all(i <= j, j == k), i <= k, True),
(tvm.tir.all(i <= j, j <= k), i <= k, True),
(tvm.tir.all(i <= j, j <= k), i < k, False),
(tvm.tir.all(i <= j, j > k), i < k, False),
(tvm.tir.all(i <= j, j >= k), i < k, False),
(tvm.tir.all(i <= j, j != k), i < k, False),
# Transitive comparisons from GT
(tvm.tir.all(i > j, j > k), i > k, True),
(tvm.tir.all(i > j, j == k), i > k, True),
(tvm.tir.all(i > j, j >= k), i > k, True),
(tvm.tir.all(i > j, j < k), i > k, False),
(tvm.tir.all(i > j, j <= k), i > k, False),
(tvm.tir.all(i > j, j != k), i > k, False),
# Transitive comparisons from GE
(tvm.tir.all(i >= j, j > k), i > k, True),
(tvm.tir.all(i >= j, j == k), i == k, False),
(tvm.tir.all(i >= j, j == k), i >= k, True),
(tvm.tir.all(i >= j, j >= k), i >= k, True),
(tvm.tir.all(i >= j, j >= k), i > k, False),
(tvm.tir.all(i >= j, j < k), i > k, False),
(tvm.tir.all(i >= j, j <= k), i > k, False),
(tvm.tir.all(i >= j, j != k), i > k, False),
# GT or LT may be used to prove NE
(tvm.tir.all(i == j, j != k), i != k, True),
(tvm.tir.all(i == j, j < k), i != k, True),
(tvm.tir.all(i == j, j > k), i != k, True),
(tvm.tir.all(i == j, j != k), i < k, False),
(tvm.tir.all(i == j, j != k), i > k, False),
# Because these are integers, x<y is equivalent to x <= y-1,
# and may be used in equivalent simplifications.
(tvm.tir.all(i <= j - 1, j < k), i < k, True),
(tvm.tir.all(i <= j - 1, j == k), i < k, True),
(tvm.tir.all(i <= j - 1, j <= k), i < k, True),
(tvm.tir.all(i <= j - 1, j > k), i < k, False),
(tvm.tir.all(i <= j - 1, j >= k), i < k, False),
(tvm.tir.all(i <= j - 1, j != k), i < k, False),
# Either or both inequalities may have an additive offset.
(tvm.tir.all(i <= j + 5, j <= k + 7), i <= k + 12, True),
(tvm.tir.all(i <= j + 5, j <= k + 7), i <= k + 11, False),
# For floats, x < y + c1 and y < z + c2 implies that x < z + (c1 + c2).
# Because this simplification applies to integers, transitive
# application of LT or GT can give a tighter constraint.
#
# i < j + c1, j < k + c2
# i <= j + c1 - 1, j <= k + c2 - 1
# i + 1 - c1 <= j, j <= k + c2 - 1
# i + 1 - c1 <= k + c2 - 1
# i <= k + c1 + c2 - 2
# i < k + (c1 + c2 - 1)
#
(tvm.tir.all(i < j + 5, j < k + 7), i < k + 11, True),
(tvm.tir.all(i < j + 5, j < k + 7), i < k + 10, False),
)
@tvm.testing.fixture
def before(self, test_case):
priors, postulate, _ = test_case
@T.prim_func
def func(A: T.Buffer[1, "bool"]):
if priors:
A[0] = postulate
return func
@tvm.testing.fixture
def expected(self, test_case):
priors, postulate, provable = test_case
analyzer = tvm.arith.Analyzer()
priors = analyzer.canonical_simplify(priors)
if provable:
@T.prim_func
def func(A: T.Buffer[1, "bool"]):
if priors:
A[0] = True
return func
else:
postulate = analyzer.canonical_simplify(postulate)
@T.prim_func
def func(A: T.Buffer[1, "bool"]):
if priors:
A[0] = postulate
return func
class TestSuppressTransitivelyProvableCondition(BaseBeforeAfter):
transitively_prove_inequalities = False
def before(A: T.Buffer[1, "bool"], i: T.int32, j: T.int32, k: T.int32):
if i < j and j < k:
A[0] = i < k
expected = before
class TestRewriteAsAndOfOrs(BaseBeforeAfter):
"""If enabled, rewrite boolean expressions into AND of OR"""
convert_boolean_to_and_of_ors = True
def before(A: T.Buffer[3, "bool"]):
T.evaluate(A[0] or (A[1] and A[2]))
def expected(A: T.Buffer[3, "bool"]):
T.evaluate((A[0] or A[1]) and (A[0] or A[2]))
class TestSuppressRewriteAsAndOfOrs(BaseBeforeAfter):
"""Only rewrite into AND of OR when allowed"""
convert_boolean_to_and_of_ors = False
def before(A: T.Buffer[3, "bool"]):
T.evaluate(A[0] or (A[1] and A[2]))
expected = before
class TestRewriteAsAndOfOrsWithTopLevelAnd(BaseBeforeAfter):
"""The expression being rewritten may start with an AND
Like TestRewriteAsAndOfOrs, but with an AndNode as the outermost
booelan operator. Even though it is primarily OR nodes that are
being rewritten, the call to SimplifyAsAndOfOrs should apply to
the outermost AndNode or OrNode in order to enable better
simplification.
"""
convert_boolean_to_and_of_ors = True
def before(A: T.Buffer[4, "bool"]):
T.evaluate((A[0] or A[1]) and (A[1] or (A[0] and A[2] and A[3])))
def expected(A: T.Buffer[4, "bool"]):
# If the simplification is applied to the OrNode, then a
# redundant `(A[1] or A[0])` would't be canceled out. When
# applying SimplifyAsAndOfOrs to the top-level AndNode, the
# internal representation is `[[0,1], [1,0], [1,2], [1,3]]`, and
# the redundant `[1,0]` can be removed.
#
# If the simplification were only applied when encountering an
# OrNode, the internal representation would be `[[0,1]]` during
# the first call and `[[1,0], [1,2], [1,3]]` during the second
# call. As a result, the `[0,1]` and `[1,0]` representations
# wouldn't occur within the same call, and the redundant `[1,0]`
# wouldn't be removed.
T.evaluate((A[0] or A[1]) and (A[1] or A[2]) and (A[1] or A[3]))
class TestRewriteAsAndOfOrsWithSimplificationBetweenGroups(BaseBeforeAfter):
"""Apply rewrite rules between OR groups that differ by a single element
The expression `(k==20 and k!=30)` could be rewritten into `(k==20)`.
However, by default these two terms must appear as part of an explict part
of the simplified expression. The AndOfOr simplification checks for
rewrite patterns of the form `(A or B) and (A or C)`, where `(B and C)` can
simplify to a single expression `D`. These can be rewritten to `(A or D)`.
"""
convert_boolean_to_and_of_ors = True
def before(A: T.Buffer[1, "bool"], i: T.int32, j: T.int32, k: T.int32):
A[0] = (i == 0 or j == 10 or k == 20) and (i == 0 or j == 10 or k != 30)
def expected(A: T.Buffer[1, "bool"], i: T.int32, j: T.int32, k: T.int32):
A[0] = i == 0 or j == 10 or k == 20
class TestRewriteAsAndOfOrsWithSimplificationBetweenReorderedGroups(BaseBeforeAfter):
"""Rewrite rules between OR groups do not depend on order
Like TestRewriteAsAndOfOrsWithSimplificationBetweenGroups, but the groups
are ordered differently. If this removes a group entirely, the result is
ordered according to the first group in the expression.
"""
convert_boolean_to_and_of_ors = True
def before(A: T.Buffer[1, "bool"], i: T.int32, j: T.int32, k: T.int32):
A[0] = (i == 0 or j == 10 or k == 20) and (j == 10 or k != 30 or i == 0)
def expected(A: T.Buffer[1, "bool"], i: T.int32, j: T.int32, k: T.int32):
A[0] = i == 0 or j == 10 or k == 20
class TestRewriteAsAndOfOrUsingSimplificationAcrossAnd(BaseBeforeAfter):
"""Apply AndNode rewrites to non-adjacent expressions
The RewriteSimplifier rules only check for simplifications between
left/right branches of an And/Or node. Simplifications that would require
rearranging components in a chain of And/Or nodes are not performed.
"""
convert_boolean_to_and_of_ors = True
def before(A: T.Buffer[1, "bool"], i: T.int32, j: T.int32, k: T.int32):
A[0] = (k == 20) and ((i == 0 or j == 10) and (k != 30))
def expected(A: T.Buffer[1, "bool"], i: T.int32, j: T.int32, k: T.int32):
A[0] = (k == 20) and (i == 0 or j == 10)
class TestRewriteAsAndOfOrUsingSimplificationWithinOr(BaseBeforeAfter):
"""Rewrite rules between OR groups do not depend on order
The RewriteSimplifier rules only check for simplifications between
left/right branches of an And/Or node. Simplifications that would require
rearranging components in a chain of And/Or nodes are not performed.
This test validates that `(i == 20) or (i != 30)` can be rewritten to
`(i != 30)`, even when there's an intervening clause between the
clauses being simplified.
"""
convert_boolean_to_and_of_ors = True
def before(A: T.Buffer[1, "bool"], i: T.int32, j: T.int32, k: T.int32):
A[0] = (i == 20) or (j == 0) or (i != 30)
def expected(A: T.Buffer[1, "bool"], i: T.int32, j: T.int32, k: T.int32):
A[0] = (i != 30) or (j == 0)
class TestConditionalFloorMod(BaseBeforeAfter):
"""A regression test for negative floormod denominator
Previously, simplifying this function could throw an error. First, the
`canonical_simplify` would rewrite `floormod(0-i,2)` to the equivalent
`floormod(i,-2)`. Then, the rewrite_simplifier would enter a
constrained context in which `floormod(i,-2)==1`. Passing this
expression to `ModularSet::EnterConstraint`, which previously did not
support a negative value for the second argument, threw an error.
The analogous failure mode never occurred for `truncmod`, because
`truncmod(0-i,2)` would be canonicalized to `truncmod(i, -2) * -1`, and
the pattern matching in `ModularSet` didn't recognize the constant
factor.
This failure mode was resolved by supporting negative arguments in
`ModularSet`, using the same sign convention as is used by
`canonical_simplify`.
"""
def before(A: T.Buffer[1, "bool"], i: T.int32):
if T.floormod(0 - i, 2) == 0:
A[0] = T.floormod(i, 2) == 0
def expected(A: T.Buffer[1, "bool"], i: T.int32):
if T.floormod(i, -2) == 0:
A[0] = True
class TestSimplifyRHSOfBooleanAndUsingLHS(BaseBeforeAfter):
"""Boolean expressions can introduce contexts.
In `A and B`, the result of `B` only matters when `A` is
true, and can be simplified under that context. This test
simplifies `n < 10` under the assumption that `n < 5`.
"""
apply_constraints_to_boolean_branches = True
def before(A: T.Buffer[1, "bool"], n: T.int32):
A[0] = n < 5 and n < 10
def expected(A: T.Buffer[1, "bool"], n: T.int32):
A[0] = n < 5
class TestSimplifyLHSOfBooleanAndUsingRHS(BaseBeforeAfter):
"""Boolean expressions can introduce contexts for their arguments.
Like TestSimplifyRHSOfBooleanAndUsingLHS, but using the RHS to
simplify the LHS.
"""
apply_constraints_to_boolean_branches = True
def before(A: T.Buffer[1, "bool"], n: T.int32):
A[0] = n < 10 and n < 5
def expected(A: T.Buffer[1, "bool"], n: T.int32):
A[0] = n < 5
class TestSimplifyRHSOfBooleanOrUsingLHS(BaseBeforeAfter):
"""Boolean expressions can introduce contexts.
In `A or B`, the result of `B` only matters when `A` is false, so
`B` can be simplified under the assumption that `A` is false.
This test simplifies `n < 5` under the assumption that `!(n < 10)`
"""
apply_constraints_to_boolean_branches = True
def before(A: T.Buffer[1, "bool"], n: T.int32):
A[0] = n < 10 or n < 5
def expected(A: T.Buffer[1, "bool"], n: T.int32):
A[0] = n < 10
class TestSimplifyLHSOfBooleanOrUsingRHS(BaseBeforeAfter):
"""Boolean expressions can introduce contexts for their arguments.
Like TestSimplifyRHSOfBooleanOrUsingLHS, but using the RHS to
simplify the LHS.
"""
apply_constraints_to_boolean_branches = True
def before(A: T.Buffer[1, "bool"], n: T.int32):
A[0] = n < 5 or n < 10
def expected(A: T.Buffer[1, "bool"], n: T.int32):
A[0] = n < 10
class TestSimplifyRHSOfBooleanAndUsingLHSWithoutConst(BaseBeforeAfter):
"""Boolean expressions can introduce contexts.
Like TestSimplifyRHSOfBooleanAndUsingLHS, but with variables in
the conditions, preventing ConstIntBoundAnalyzer from handling it.
This proof requires the extension to transitively prove
inequalities.
"""
apply_constraints_to_boolean_branches = True
transitively_prove_inequalities = True
def before(A: T.Buffer[1, "bool"], n: T.int32, m: T.int32):
A[0] = n < m + 5 and n < m + 10
def expected(A: T.Buffer[1, "bool"], n: T.int32, m: T.int32):
A[0] = n < m + 5
class TestSimplifyLHSOfBooleanAndUsingRHSWithoutConst(BaseBeforeAfter):
"""Boolean expressions can introduce contexts for their arguments.
Like TestSimplifyLHSOfBooleanAndUsingRHS, but with variables in
the conditions, preventing ConstIntBoundAnalyzer from handling it.
This proof requires the extension to transitively prove
inequalities.
"""
apply_constraints_to_boolean_branches = True
transitively_prove_inequalities = True
def before(A: T.Buffer[1, "bool"], n: T.int32, m: T.int32):
A[0] = n < m + 10 and n < m + 5
def expected(A: T.Buffer[1, "bool"], n: T.int32, m: T.int32):
A[0] = n < m + 5
class TestSimplifyRHSOfBooleanOrUsingLHSWithoutConst(BaseBeforeAfter):
"""Boolean expressions can introduce contexts.
Like TestSimplifyRHSOfBooleanOrUsingLHS, but with variables in the
conditions, preventing ConstIntBoundAnalyzer from handling it.
This proof requires the extension to transitively prove
inequalities.
"""
apply_constraints_to_boolean_branches = True
transitively_prove_inequalities = True
def before(A: T.Buffer[1, "bool"], n: T.int32, m: T.int32):
A[0] = n < m + 10 or n < m + 5
def expected(A: T.Buffer[1, "bool"], n: T.int32, m: T.int32):
A[0] = n < m + 10
class TestSimplifyLHSOfBooleanOrUsingRHSWithoutConst(BaseBeforeAfter):
"""Boolean expressions can introduce contexts for their arguments.
Like TestSimplifyLHSOfBooleanOrUsingRHS, but with variables in the
conditions, preventing ConstIntBoundAnalyzer from handling it.
This proof requires the extension to transitively prove
inequalities.
"""
apply_constraints_to_boolean_branches = True
transitively_prove_inequalities = True
def before(A: T.Buffer[1, "bool"], n: T.int32, m: T.int32):
A[0] = n < m + 5 or n < m + 10
def expected(A: T.Buffer[1, "bool"], n: T.int32, m: T.int32):
A[0] = n < m + 10
class TestProvableConditionWithOffset(BaseBeforeAfter):
"""Use scoped-constraint to prove inequalities"""
transitively_prove_inequalities = False
def before(A: T.Buffer[1, "bool"], i: T.int32, j: T.int32):
if i < j:
A[0] = i < j + 1
def expected(A: T.Buffer[1, "bool"], i: T.int32, j: T.int32):
if i < j:
A[0] = True
class TestMostRestrictiveConditional(BaseBeforeAfter):
"""Preferentially prove part of a compound conditional.
Even if we cannot prove a conditional as true or false on its own,
proving that a conditional must satisfy a stronger condition may
allow for later rewrites. For example, if it is known that `a <= b`,
then `a >= b` cannot be proven, but can be reduced to `a == b`.
"""
i, j, k = [tvm.tir.Var(name, "int32") for name in "ijk"]
tir_int = tvm.tir.IntImm("int32", 0)
test_case = tvm.testing.parameter(
(i <= tir_int, tir_int <= i, i == tir_int),
(i <= tir_int, i != tir_int, i < tir_int),
(i != tir_int, i <= tir_int, i < tir_int),
(i != tir_int, tir_int <= i, tir_int < i),
(i <= j, j <= i, j == i),
(i <= j, i != j, i < j),
(i != j, i <= j, i < j),
(i != j, j <= i, j < i),
)
@tvm.testing.fixture
def before(self, test_case):
priors, expr_before, _ = test_case
@T.prim_func
def func(A: T.Buffer[1, "bool"]):
if priors:
A[0] = expr_before
return func
@tvm.testing.fixture
def expected(self, test_case):
priors, _, expr_after = test_case
@T.prim_func
def func(A: T.Buffer[1, "bool"]):
if priors:
A[0] = expr_after
return func
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_split_host_device.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import tvm.testing
@tvm.testing.requires_cuda
def test_split_host_device_func_attr():
m = te.size_var("m")
l = te.size_var("l")
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2")
s = te.create_schedule(A2.op)
xo, xi = s[A2].split(A2.op.axis[0], factor=8)
s[A2].bind(xo, te.thread_axis("blockIdx.x"))
s[A1].compute_at(s[A2], xo)
s[A1].set_scope("shared")
mod = tvm.lower(s, [A, A2], name="f")
cuda_target = tvm.target.Target("cuda")
mod = tvm.tir.transform.Apply(
lambda f: f.with_attr({"global_symbol": "test", "target": cuda_target})
)(mod)
fdevice = tvm.tir.transform.SplitHostDevice()(mod)["test_kernel0"]
assert fdevice.attrs["global_symbol"] == "test_kernel0"
assert fdevice.attrs["calling_conv"].value == 2
assert fdevice.attrs["target"] == cuda_target
assert fdevice.attrs["tir.is_global_func"].value
if __name__ == "__main__":
test_split_host_device_func_attr()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_storage_flatten.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
from tvm.driver.build_module import schedule_to_module
from tvm.script import tir as T
from tvm.relay import GlobalVar
def test_flatten2():
m = te.size_var("m")
l = te.size_var("l")
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2")
s = te.create_schedule(A2.op)
xo, xi = s[A2].split(A2.op.axis[0], 8)
s[A1].compute_at(s[A2], xo)
Ab = tvm.tir.decl_buffer(A.shape, A.dtype, name="A")
A2b = tvm.tir.decl_buffer(A2.shape, A2.dtype, name="A2")
mod = schedule_to_module(s, [Ab, A2b], binds={A: Ab, A2: A2b})
mod = tvm.tir.transform.StorageFlatten(64)(mod)
def test_flatten_prefetch():
A = te.placeholder((25, 100, 4), name="A")
_A = tvm.tir.decl_buffer(A.shape, A.dtype, name="A")
i = te.size_var("i")
j = te.size_var("j")
region = [tvm.ir.Range.from_min_extent(i[0], i[1]) for i in [(i, 2), (j, 8), (0, 4)]]
stmt = tvm.tir.Prefetch(_A, region)
func = tvm.te.schedule.SchedulePostProcToPrimFunc([_A], stmt, {A: _A})
mod = tvm.IRModule.from_expr(func)
mod = tvm.transform.Sequential(
[tvm.tir.transform.StorageFlatten(64), tvm.tir.transform.Simplify()]
)(mod)
stmt = mod["main"].body
assert stmt.extent.value == 2
assert isinstance(stmt.body, tvm.tir.For)
assert stmt.body.extent.value == 2
def assert_flat_loads(stmt):
if isinstance(stmt, tvm.tir.BufferLoad):
assert len(stmt.indices) == 1, "All prefetch indices should be flattened"
tvm.tir.stmt_functor.post_order_visit(stmt, assert_flat_loads)
def test_flatten_storage_align():
m = 8
l = 16
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2")
s = te.create_schedule(A2.op)
s[A1].storage_align(A1.op.axis[0], 2, 1)
mod = schedule_to_module(s, [A, A2])
mod = tvm.transform.Sequential(
[tvm.tir.transform.StorageFlatten(64), tvm.tir.transform.Simplify()]
)(mod)
stmt = mod["main"].body
assert stmt.extents[0].value == 17 * 8
def test_flatten_double_buffer():
@tvm.script.ir_module
class ModFromScript:
@T.prim_func
def main(A_param: T.handle, C_param: T.handle):
A = T.match_buffer(A_param, (400,), "float32", strides=[1])
C = T.match_buffer(C_param, (4,), "float32", strides=[1])
T.func_attr({"from_legacy_te_schedule": True})
threadIdx_x = T.env_thread("threadIdx.x")
T.launch_thread(threadIdx_x, 1)
for i in T.serial(0, 100):
B = T.decl_buffer([4], "float32", scope="shared")
with T.attr(B.data, "double_buffer_scope", 1):
for j in T.serial(0, 4):
B[j] = A[4 * i + j]
for j in T.serial(0, 4):
C[j] = B[j] + 1.0
mod = ModFromScript
with tvm.transform.PassContext(config={"tir.InjectDoubleBuffer": {"split_loop": 2}}):
mod = tvm.transform.Sequential(
[
tvm.tir.transform.StorageFlatten(64),
tvm.tir.transform.InjectDoubleBuffer(),
tvm.tir.transform.Simplify(),
]
)(mod)
stmt = mod["main"].body
assert isinstance(stmt.body, tvm.tir.Allocate)
assert list(stmt.body.extents) == [8]
mod = tvm.tir.transform.ThreadSync("shared")(mod)
f = mod["main"]
count = [0]
def count_sync(op):
if isinstance(op, tvm.tir.Call) and op.op.same_as(tvm.ir.Op.get("tir.tvm_storage_sync")):
count[0] += 1
tvm.tir.stmt_functor.post_order_visit(f.body, count_sync)
assert count[0] == 4
def test_flatten_let_buffer():
@tvm.script.ir_module
class module:
@T.prim_func
def main():
T.func_attr({"from_legacy_te_schedule": True})
# If a pointer defined using a LetStmt,
A_data: T.Ptr[T.int32] = T.call_extern("dummy_extern_function", dtype="handle")
# and a buffer is backed by that pointer,
A = T.decl_buffer([1], dtype="float32", data=A_data)
T.evaluate(A[0])
# then the call to StorageFlatten would result in an exception
# being thrown.
tvm.tir.transform.StorageFlatten(64)(module)
@T.prim_func
def tir_func(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [2, 2])
B = T.match_buffer(a, [2, 2])
A[0, 1] = B[1, 1]
def test_flatten_tir():
orig_mod = tvm.IRModule({GlobalVar("main"): tir_func})
mod = tvm.tir.transform.StorageFlatten(64)(orig_mod)
tvm.ir.assert_structural_equal(
orig_mod, mod
) # StorageFlatten should do nothing to TIR functions
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_storage_rewrite.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import pytest
import tvm
import tvm.testing
from tvm import te
from tvm.driver.build_module import schedule_to_module
from tvm.script import tir as T
def test_storage_share():
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), name="A")
num_stage = 5
B = A
for t in range(num_stage):
B = te.compute((m, l), lambda i, j: B[i, j] + (t + 1), name="A%d" % t)
s = te.create_schedule(B.op)
mod = schedule_to_module(s, [A, B])
mod = tvm.tir.transform.StorageFlatten(64)(mod)
mod = tvm.tir.transform.Simplify()(mod)
mod = tvm.tir.transform.StorageRewrite()(mod)
stmt = mod["main"].body
# verify only have one allocations.
# verify inplace folding works
num_alloc = [0]
def verify(n):
if isinstance(n, tvm.tir.Allocate):
num_alloc[0] += 1
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
assert num_alloc[0] == 1
def register_mem(scope_tb, max_bits):
# Register mem
@tvm.register_func("tvm.info.mem.%s" % scope_tb)
def mem_info_inp_buffer():
return tvm.ir.make_node(
"MemoryInfo", unit_bits=16, max_simd_bits=32, max_num_bits=max_bits, head_address=None
)
def test_alloc_seq():
scope_tb = "local.L0A"
max_bits = 1024 * 1024 * 1024
register_mem(scope_tb, max_bits)
ib = tvm.tir.ir_builder.create()
n = te.var("n")
with ib.for_range(0, n, name="i") as i:
with ib.for_range(0, 10, name="j") as j:
A = ib.allocate("float32", 200, name="A", scope=scope_tb)
A[j] = 1.2
with ib.for_range(0, 10, name="j") as j:
A = ib.allocate("float32", 200, name="B", scope=scope_tb)
A[j] = 1.3
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))
body = tvm.tir.transform.StorageRewrite()(mod)["main"].body
num_alloc = [0]
def verify(n):
if isinstance(n, tvm.tir.Allocate):
num_alloc[0] += 1
assert n.extents[0].value == 200
tvm.tir.stmt_functor.post_order_visit(body, verify)
assert num_alloc[0] == 1
def test_alloc_different_dtypes():
def stmt_generater(dtype_list, length):
ib = tvm.tir.ir_builder.create()
base_dtype = dtype_list[0]
global_a = te.placeholder((length,), name="global_a", dtype=base_dtype)
assert len(dtype_list) == 4
with ib.for_range(0, length, name="j") as j:
dtype = dtype_list[0]
A = ib.allocate(dtype, length, name="A", scope="local.L0A")
A[j] = tvm.tir.const(1, dtype=dtype)
with ib.for_range(0, length, name="j") as j:
dtype = dtype_list[1]
B = ib.allocate(dtype, length, name="B", scope="local.L0A")
B[j] = tvm.tir.const(1, dtype=dtype)
with ib.for_range(0, length, name="j") as j:
dtype = dtype_list[2]
C = ib.allocate(dtype, length, name="C", scope="local.L0A")
C[j] = tvm.tir.const(1, dtype=dtype)
with ib.for_range(0, length, name="j") as j:
dtype = dtype_list[3]
D = ib.allocate(dtype, length, name="D", scope="local.L0A")
D[j] = tvm.tir.const(1, dtype=dtype)
with ib.for_range(0, length, name="j") as j:
dtype = "int8"
E = ib.allocate(dtype, length, name="E", scope="local.L0A")
E[j] = A[j].astype(dtype) + B[j].astype(dtype) + C[j].astype(dtype) + D[j].astype(dtype)
return ib.get()
def dtype_bit_len(dtype):
index = 0
for i in dtype:
if i.isdigit():
break
index += 1
return int(dtype[index:])
def offset_generater(dtype_list, length):
dtype_len_list = [dtype_bit_len(i) for i in dtype_list]
base_len = dtype_len_list[0]
return sum([i * length / base_len for i in dtype_len_list])
def dtype_test(dtype_list, length):
def verify(n):
if isinstance(n, tvm.tir.Allocate):
assert n.extents[0].value == offset
body = stmt_generater(dtype_list, length)
offset = offset_generater(dtype_list, length)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], body))
body = tvm.tir.transform.StorageRewrite()(mod)["main"].body
tvm.tir.stmt_functor.post_order_visit(body, verify)
length = 1024
dtype_list = ["float16", "int32", "uint16", "int8"]
dtype_test(dtype_list, length)
dtype_list = ["float32", "int32", "uint16", "int8"]
dtype_test(dtype_list, length)
dtype_list = ["float64", "int32", "uint16", "int8"]
dtype_test(dtype_list, length)
dtype_list = ["int8", "int32", "uint16", "uint8"]
dtype_test(dtype_list, length)
def test_inplace_rule():
m = 10
A = te.placeholder((m,), name="A")
A0 = te.compute((m,), lambda i: A[i], name="A0")
A1 = te.compute((m,), lambda i: A[i] + 1, name="A1")
AA = te.compute((m,), lambda i: A0[i] + A1[i] + A1[0], name="AA")
B = te.compute((m,), lambda i: AA[i] + 1, name="B")
s = te.create_schedule(B.op)
mod = schedule_to_module(s, [A, B])
mod = tvm.tir.transform.StorageFlatten(64)(mod)
mod = tvm.tir.transform.Simplify()(mod)
mod = tvm.tir.transform.StorageRewrite()(mod)
stmt = mod["main"].body
# verify only have one allocations.
# verify inplace folding works
num_alloc = [0]
def verify(n):
if isinstance(n, tvm.tir.Allocate):
num_alloc[0] += 1
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
assert num_alloc[0] == 2
def test_storage_combine():
n = 8
A = te.placeholder((4,), name="A")
num_stage = 5
B = A
stages = []
for t in range(num_stage):
B = te.compute((n,), lambda i: B[i] + B[0] + (t + 1), name="A%d" % t)
stages.append(B)
s = te.create_schedule(B.op)
for S in stages[:-1]:
s[S].set_scope("global:tag")
mod = schedule_to_module(s, [A, B])
mod = tvm.tir.transform.StorageFlatten(64)(mod)
mod = tvm.tir.transform.Simplify()(mod)
mod = tvm.tir.transform.StorageRewrite()(mod)
stmt = mod["main"].body
num_alloc = [0]
def verify(n):
if isinstance(n, tvm.tir.Allocate):
num_alloc[0] += 1
assert n.extents[0].value == 16
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
assert num_alloc[0] == 1
def test_storage_combine_with_vectorization():
n = 1024
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
C = te.compute((n,), lambda i: A[i] + B[i], name="C")
s = te.create_schedule(C.op)
AA = s.cache_read(A, "global:tag", readers=[C])
BB = s.cache_read(B, "global:tag", readers=[C])
CC = s.cache_write(C, "global:tag")
s[CC].vectorize(s[CC].op.axis[0])
mod = schedule_to_module(s, [A, B, C])
mod = tvm.tir.transform.StorageFlatten(64)(mod)
mod = tvm.tir.transform.VectorizeLoop()(mod)
mod = tvm.tir.transform.StorageRewrite()(mod)
mod = tvm.tir.transform.Simplify()(mod)
stmt = mod["main"].body
num_alloc = [0]
def verify(v):
# find add op
if (
isinstance(v, tvm.tir.Add)
and isinstance(v.a, tvm.tir.Load)
and isinstance(v.b, tvm.tir.Load)
):
lhs_ramp = v.a.index
rhs_ramp = v.b.index
# these two ramp load should not overlap
assert lhs_ramp.lanes == n
assert rhs_ramp.lanes == n
assert lhs_ramp.base >= rhs_ramp.base + n or rhs_ramp.base >= lhs_ramp.base + n
elif isinstance(v, tvm.tir.Allocate):
num_alloc[0] += 1
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
assert num_alloc[0] == 1
def test_storage_share_gpu():
m = te.var("m")
A = [te.placeholder((m), name="A")]
num_stage = 5
for t in range(num_stage):
A.append(te.compute((m,), lambda i: A[-1][i] + (t + 1), name="A%d_s" % t))
A.append(te.compute((m,), lambda i: A[-1][i], name="A%d" % t))
s = te.create_schedule(A[-1].op)
for t in range(num_stage):
x = A[2 * t + 2].op.axis[0]
bx, tx = s[A[2 * t + 2]].split(x, factor=32)
s[A[2 * t + 2]].bind(bx, te.thread_axis("blockIdx.x"))
s[A[2 * t + 2]].bind(tx, te.thread_axis("threadIdx.x"))
s[A[2 * t + 1]].compute_at(s[A[2 * t + 2]], tx)
s[A[2 * t + 1]].set_scope("shared")
mod = schedule_to_module(s, [A[0], A[-1]])
mod = tvm.tir.transform.StorageFlatten(64)(mod)
mod = tvm.tir.transform.Simplify()(mod)
mod = tvm.tir.transform.StorageRewrite()(mod)
stmt = mod["main"].body
alloc_stats = {"global": 0, "shared": 0}
def verify(n):
if isinstance(n, tvm.tir.Allocate):
scope = n.buffer_var.type_annotation.storage_scope
alloc_stats[scope] += 1
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
assert alloc_stats["global"] == 2
assert alloc_stats["shared"] == num_stage
def test_parallel_alloc():
ib = tvm.tir.ir_builder.create()
n = te.var("n")
with ib.for_range(0, n, name="i", kind="parallel") as i:
with ib.for_range(0, 10, name="j") as j:
A = ib.allocate("float32", n, name="A", scope="global")
A[j] = A[j] + 2
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))
body = tvm.tir.transform.StorageRewrite()(mod)["main"]
assert isinstance(body.body.body, tvm.tir.Allocate)
ib = tvm.tir.ir_builder.create()
n = te.var("n")
with ib.for_range(0, n, name="t") as i:
ib.scope_attr(
tvm.tir.const(1, "int32"), "pragma_scope", tvm.tir.StringImm("parallel_launch_point")
)
with ib.for_range(0, n, name="i", kind="parallel") as i:
with ib.for_range(0, 10, name="j") as j:
A = ib.allocate("float32", n, name="A", scope="global")
A[j] = A[j] + 2
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))
body = tvm.tir.transform.StorageRewrite()(mod)["main"]
assert isinstance(body.body.body.body.body, tvm.tir.Allocate)
def test_while_alloc():
def get_mod(kind="serial"):
ib = tvm.tir.ir_builder.create()
n = te.var("n")
with ib.for_range(0, n, name="i", kind=kind) as i:
j = ib.allocate("int32", 1, name="j", scope="global")
j[0] = 0
with ib.while_loop(j[0] < 10):
A = ib.allocate("float32", n, name="A", scope="global")
A[j[0]] = A[j[0]] + 2
j[0] += j[0] + 1
body = ib.get()
return tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))
mod = get_mod(kind="parallel")
# parallel (i, 0, n) {
# allocate j[int32 * 1]
# j[0] = 0
# while((j[0] < 10)){
# // attr [A] storage_scope = "global"
# allocate A[float32 * n]
# A[j[0]] = (A[j[0]] + 2f)
# j[0] = (j[0] + (j[0] + 1))
# }
# }
body = tvm.tir.transform.StorageRewrite()(mod)["main"]
# parallel (i, 0, n) {
# allocate j[int32 * 1]
# allocate A[float32 * n]
# j[0] = 0
# while((j[0] < 10)){
# A[j[0]] = (A[j[0]] + 2f)
# j[0] = (j[0] + (j[0] + 1))
# }
# }
assert isinstance(body.body.body, tvm.tir.Allocate) # j
assert isinstance(body.body.body.body, tvm.tir.Allocate) # A
mod = get_mod(kind="serial")
# for (i, 0, n) {
# allocate j[int32 * 1]
# j[0] = 0
# while((j[0] < 10)){
# // attr [A] storage_scope = "global"
# allocate A[float32 * n]
# A[j[0]] = (A[j[0]] + 2f)
# j[0] = (j[0] + (j[0] + 1))
# }
# }
body = tvm.tir.transform.StorageRewrite()(mod)["main"]
# allocate j[int32 * 1]
# allocate A[float32 * n]
# for (i, 0, n) {
# j[0] = 0
# while((j[0] < 10)){
# A[j[0]] = (A[j[0]] + 2f)
# j[0] = (j[0] + (j[0] + 1))
# }
# }
assert isinstance(body.body, tvm.tir.Allocate) # j
assert isinstance(body.body.body, tvm.tir.Allocate) # A
def test_inplace_rule2(scope_tb="local_TB2", max_bits=1024 * 1024 * 1024):
# Test Buffer
register_mem(scope_tb, max_bits)
m = 10
A = te.placeholder((m,), name="A")
C = te.placeholder((m,), name="C")
D = te.placeholder((m,), name="D")
A0 = te.compute((m,), lambda i: A[i] + C[i], name="A0")
A1 = te.compute((m,), lambda i: D[i] * D[i], name="A1")
A2 = te.compute((m,), lambda i: A0[i] + A1[i], name="A2")
B = te.compute((m,), lambda i: A2[i], name="B")
s = te.create_schedule(B.op)
A0L = s.cache_read(A0, scope_tb, [A2])
A1L = s.cache_read(A1, scope_tb, [A2])
A2L = s.cache_read(A2, scope_tb, [B])
mod = schedule_to_module(s, [A, B, C, D])
mod = tvm.tir.transform.StorageFlatten(64)(mod)
mod = tvm.tir.transform.Simplify()(mod)
mod = tvm.tir.transform.StorageRewrite()(mod)
stmt = mod["main"].body
# verify only have one allocations.
# verify inplace folding works
num_alloc = [0]
def verify(n):
if isinstance(n, tvm.tir.Allocate):
num_alloc[0] += 1
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
assert num_alloc[0] == 2
def test_exceed_mem():
max_bits = 639
# The critical max_num_bits is between 639 and 640
loc = -1
try:
test_inplace_rule2("local_TEM", max_bits)
except Exception as e:
estr = str(e)
loc = estr.find("Allocation exceed bound of memory")
assert loc != -1
def test_inplace_rule3():
# Test Buffer
scope_tb = "local_TB3"
max_bits = 1024 * 1024 * 1024
register_mem(scope_tb, max_bits)
m = 10
B0 = te.placeholder((m,), name="B0")
B1 = te.placeholder((m,), name="B1")
B2 = te.placeholder((m,), name="B2")
B3 = te.placeholder((m,), name="B3")
B4 = te.placeholder((m,), name="B4")
B5 = te.placeholder((m,), name="B5")
B6 = te.compute((m,), lambda i: B1[i] * B5[i], name="B6")
B7 = te.compute((m,), lambda i: B2[i] * B4[i], name="B7")
B8 = te.compute((m,), lambda i: B6[i] - B7[i], name="B8")
B9 = te.compute((m,), lambda i: B2[i] * B3[i], name="B9")
B10 = te.compute((m,), lambda i: B0[i] * B5[i], name="B10")
B11 = te.compute((m,), lambda i: B9[i] - B10[i], name="B11")
B12 = te.compute((m,), lambda i: B0[i] * B4[i], name="B12")
B13 = te.compute((m,), lambda i: B1[i] * B3[i], name="B13")
B14 = te.compute((m,), lambda i: B12[i] - B13[i], name="B14")
B = te.compute((m,), lambda i: B8[i] * B11[i] + B14[i], name="B")
s = te.create_schedule(B.op)
B1L = s.cache_read(B1, scope_tb, [B6, B13])
B5L = s.cache_read(B5, scope_tb, [B6, B10])
B2L = s.cache_read(B2, scope_tb, [B7, B9])
B4L = s.cache_read(B4, scope_tb, [B7, B12])
B3L = s.cache_read(B3, scope_tb, [B9, B13])
B0L = s.cache_read(B0, scope_tb, [B10, B12])
B8L = s.cache_write(B8, scope_tb)
B11L = s.cache_write(B11, scope_tb)
B14L = s.cache_write(B14, scope_tb)
B6L = s.cache_write(B6, scope_tb)
B7L = s.cache_write(B7, scope_tb)
B9L = s.cache_write(B9, scope_tb)
B10L = s.cache_write(B10, scope_tb)
B12L = s.cache_write(B12, scope_tb)
B13L = s.cache_write(B13, scope_tb)
s[B12].compute_inline()
s[B13].compute_inline()
s[B8].compute_inline()
s[B11].compute_inline()
s[B14].compute_inline()
s[B6].compute_inline()
s[B7].compute_inline()
s[B9].compute_inline()
s[B10].compute_inline()
s = s.normalize()
mod = schedule_to_module(s, [B0, B1, B2, B3, B4, B5, B])
mod = tvm.tir.transform.StorageFlatten(64)(mod)
mod = tvm.tir.transform.Simplify()(mod)
mod = tvm.tir.transform.StorageRewrite()(mod)
stmt = mod["main"].body
# verify only have one allocations.
# verify inplace folding works
def verify(n):
if isinstance(n, tvm.tir.Allocate):
assert n.extents[0].value == 70
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
def test_alloc_seq_type():
ib = tvm.tir.ir_builder.create()
n = te.var("n")
with ib.for_range(0, n, name="i") as i:
with ib.for_range(0, 10, name="j") as j:
A = ib.allocate("float32", 200, name="A", scope="local.L0A")
A1 = ib.allocate("float32", 200, name="A1", scope="local.L0A")
A[j] = 1.2
A1[j] = 1.3
B = ib.allocate("int16", 200, name="B", scope="local.L0A")
B[j] = tvm.tir.const(1, "int16")
C = ib.allocate("int16", 200, name="C", scope="local.L0A")
C[j] = tvm.tir.const(1, "int16")
D = ib.allocate("int16", 200, name="D", scope="local.L0A")
D[j] = B[j] + C[j]
A2 = ib.allocate("float32", 200, name="A2", scope="local.L0A")
A2[j] = A[j]
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))
body = tvm.tir.transform.StorageRewrite()(mod)["main"].body
num_alloc = [0]
def verify(n):
if isinstance(n, tvm.tir.Allocate):
num_alloc[0] += 1
assert n.extents[0].value == 500
tvm.tir.stmt_functor.post_order_visit(body, verify)
assert num_alloc[0] == 1
def test_alloc_seq_type2():
scope_tb = "local.L0A2"
max_bits = 1024 * 1024 * 1024
register_mem(scope_tb, max_bits)
ib = tvm.tir.ir_builder.create()
n = te.var("n")
with ib.for_range(0, n, name="i") as i:
with ib.for_range(0, 10, name="j") as j:
A = ib.allocate("float32", 200, name="A", scope=scope_tb)
A[j] = 1.2
with ib.for_range(0, 20, name="j") as j:
B = ib.allocate("int16", 400, name="B", scope=scope_tb)
B[j] = tvm.tir.const(1, "int16")
with ib.for_range(0, 10, name="j") as j:
C = ib.allocate("float32", 200, name="C", scope=scope_tb)
C[j] = 1.2
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))
body = tvm.tir.transform.StorageRewrite()(mod)["main"].body
num_alloc = [0]
def verify(n):
if isinstance(n, tvm.tir.Allocate):
num_alloc[0] += 1
assert n.extents[0].value == 200
tvm.tir.stmt_functor.post_order_visit(body, verify)
assert num_alloc[0] == 1
def test_reuse_small_buffer():
ib = tvm.tir.ir_builder.create()
n = te.var("n")
with ib.for_range(0, n, name="i") as i:
with ib.for_range(0, 10, name="j") as j:
A = ib.allocate("int16", 200, name="A", scope="local.L0A")
A[j] = tvm.tir.const(1, "int16")
B = ib.allocate("int16", 200, name="B", scope="local.L0A")
B[j] = tvm.tir.const(1, "int16")
B1 = ib.allocate("int16", 200, name="B1", scope="local.L0A")
B1[j] = A[j] + B[j]
C = ib.allocate("int16", 400, name="C", scope="local.L0A")
C[j] = tvm.tir.const(1, "int16")
D = ib.allocate("int16", 400, name="D", scope="local.L0A")
D[j] = tvm.tir.const(1, "int16")
E = ib.allocate("int16", 400, name="E", scope="local.L0A")
E[j] = C[j]
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))
body = tvm.tir.transform.StorageRewrite()(mod)["main"].body
num_alloc = [0]
def verify(n):
if isinstance(n, tvm.tir.Allocate):
num_alloc[0] += 1
assert n.extents[0].value == 800
tvm.tir.stmt_functor.post_order_visit(body, verify)
assert num_alloc[0] == 1
def test_replace_dataflow():
shape = (255,)
A = te.placeholder(shape, name="A")
B = te.compute(shape, lambda i: A[i] + A[i], name="B")
C = te.compute(shape, lambda i: A[i] + B[i], name="C")
D = te.compute(shape, lambda i: A[i] + C[i], name="D")
E = te.compute(shape, lambda i: A[i] + D[i], name="E")
s = te.create_schedule(E.op)
s.cache_read(A, "local", [B, C, D, E])
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
def test_large_input():
@te.hybrid.script
def compute(a, b):
n = 16384
c = output_tensor((n, n), "int32")
for i in range(n):
for j in range(n):
c[i, j] = a[i, j] - b[i, j]
return c
n = 16384
shape = (n, n)
a = te.placeholder(shape, name="a", dtype="int32")
b = te.placeholder(shape, name="b", dtype="int32")
c = te.compute(shape, lambda i, j: compute(a, b)[i, j])
c = te.compute(shape, lambda i, j: 1 + c[i, j])
s = te.create_schedule(c.op)
stmt = tvm.lower(s, [a, b, c])["main"].body
def verify(n):
if isinstance(n, tvm.tir.Allocate):
assert n.extents[0].value == 268435456
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
def test_access_in_let_value():
@T.prim_func
def func(A: T.Buffer[(8,), "float32"]):
for i in range(8):
B_data = T.allocate((1,), "float32", "global")
B = T.buffer_decl(shape=[1], dtype="float32", data=B_data)
B[0] = 3.14
x: T.float32 = T.exp(B[0], dtype="float32")
A[i] = (x + 1.0) / (x - 1.0)
@T.prim_func
def func_rewritten(A: T.Buffer[(8,), "float32"]) -> None:
B_data = T.allocate((1,), "float32", "global")
B = T.buffer_decl(shape=[1], dtype="float32", data=B_data)
for i in range(8):
B[0] = 3.14
x: T.float32 = T.exp(B[0], dtype="float32")
A[i] = (x + 1.0) / (x - 1.0)
mod = tvm.tir.transform.StorageRewrite()(tvm.IRModule.from_expr(func))
tvm.ir.assert_structural_equal(mod["main"], func_rewritten)
class BaseCompare(tvm.testing.CompareBeforeAfter):
transform = tvm.tir.transform.StorageRewrite()
class TestLetBufferRewrite(BaseCompare):
"""StorageRewrite replaces the bound var of backing allocations
If StorageRewrite replaces the backing variable of an array, such
as when vectorizing the storage type, the variable must be
replaced in the LetStmt that defines it. Currently, StmtMutator
only visits usage of variables, and does not visit definitions of
variables, so the definition in a LetStmt must be explicitly
handled.
"""
def before() -> None:
A_data: T.Ptr[T.int32] = T.call_extern("dummy_func", dtype="handle")
A = T.buffer_decl([8], "int32", data=A_data)
A[0:8] = T.broadcast(42, 8)
def expected() -> None:
A_data: T.Ptr[T.int32x8] = T.call_extern("dummy_func", dtype="handle")
A = T.buffer_decl([1], "int32x8", data=A_data)
A[0] = T.broadcast(42, 8)
class TestRewriteInPlaceUseOfNonFlatBuffer(BaseCompare):
"""A non-flat buffer may be re-used for in-place operations"""
def before(A: T.Buffer[(16, 16), "float32"], D: T.Buffer[(16, 16), "float32"]):
B_data = T.allocate(
[16, 16],
dtype="float32",
scope="global",
)
B = T.buffer_decl(
[16, 16],
dtype="float32",
axis_separators=[1],
data=B_data,
)
C_data = T.allocate(
[16, 16],
dtype="float32",
scope="global",
)
C = T.buffer_decl(
[16, 16],
dtype="float32",
axis_separators=[1],
data=C_data,
)
for i, j in T.grid(16, 16):
B[i, j] = A[i, j]
for i, j in T.grid(16, 16):
C[i, j] = 2.0 * B[i, j]
for i, j in T.grid(16, 16):
D[i, j] = C[i, j]
def expected(A: T.Buffer[(16, 16), "float32"], D: T.Buffer[(16, 16), "float32"]):
B_data = T.allocate(
[16, 16],
dtype="float32",
scope="global",
)
B = T.buffer_decl([16, 16], dtype="float32", axis_separators=[1], data=B_data)
C = T.buffer_decl(
[16, 16],
dtype="float32",
axis_separators=[1],
data=B.data,
)
for i, j in T.grid(16, 16):
B[i, j] = A[i, j]
for i, j in T.grid(16, 16):
C[i, j] = 2.0 * B[i, j]
for i, j in T.grid(16, 16):
D[i, j] = C[i, j]
class TestNoRewriteOfSharedNonFlatBuffer(BaseCompare):
"""In general, sharing of non-flat buffer isn't supported
The current packing algorithms in StorageRewrite assume a flat
memory space, and do not support packing of N-d buffers. For
buffers with axis separators, normal buffer sharing should be
disabled.
Like TestRewriteInPlaceUseOfNonFlatBuffer, except that B and C do
not have matching shapes.
"""
def before(A: T.Buffer[(16, 16), "float32"], D: T.Buffer[(16, 16), "float32"]):
B_data = T.allocate(
[16, 16],
dtype="float32",
scope="global",
)
B = T.buffer_decl(
[16, 16],
dtype="float32",
axis_separators=[1],
data=B_data,
)
C_data = T.allocate(
[20, 20],
dtype="float32",
scope="global",
)
C = T.buffer_decl(
[20, 20],
dtype="float32",
axis_separators=[1],
data=C_data,
)
for i, j in T.grid(16, 16):
B[i, j] = A[i, j]
for i, j in T.grid(16, 16):
C[i, j] = 2.0 * B[i, j]
for i, j in T.grid(16, 16):
D[i, j] = C[i, j]
expected = before
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_thread_sync.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
from tvm.script import tir as T
def run_passes(func: tvm.tir.PrimFunc):
mod = tvm.IRModule.from_expr(func)
mod = tvm.tir.transform.StorageFlatten(64)(mod)
cuda_target = tvm.target.Target("cuda")
mod = tvm.tir.transform.Apply(
lambda f: f.with_attr({"global_symbol": "test", "target": cuda_target})
)(mod)
mod = tvm.tir.transform.SplitHostDevice()(mod)
return tvm.tir.transform.ThreadSync("shared")(mod)
@tvm.testing.requires_cuda
def test_thread_storage_sync():
m = te.size_var("m")
l = te.size_var("l")
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2")
s = te.create_schedule(A2.op)
xo, xi = s[A2].split(A2.op.axis[0], factor=8)
s[A2].bind(xo, te.thread_axis("blockIdx.x"))
s[A1].compute_at(s[A2], xo)
s[A1].set_scope("shared")
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
func = tvm.te.schedule.SchedulePostProcToPrimFunc([A, A2], stmt, None)
mod = run_passes(func)
f = mod["test_kernel0"]
body_list = tvm.tir.stmt_list(f.body.body.body)
assert body_list[1].value.op.same_as(tvm.ir.Op.get("tir.tvm_storage_sync"))
@tvm.testing.requires_cuda
def test_sync_else_branch():
def ir(A, B):
ib = tvm.tir.ir_builder.create()
Aptr = ib.buffer_ptr(A)
Bptr = ib.buffer_ptr(B)
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(tx, "thread_extent", 1)
local = ib.allocate(A.dtype, (8,), name="buf_local", scope="local")
shared = ib.allocate(A.dtype, (8,), name="buf_shared", scope="shared")
with ib.for_range(0, 8) as i:
with ib.if_scope(Aptr[i] < 0):
local[i] = Aptr[i]
with ib.else_scope():
shared[i] = Aptr[i]
with ib.for_range(0, 8) as i:
with ib.if_scope(Aptr[i] < 0):
Bptr[i] = local[i]
with ib.else_scope():
Bptr[i] = shared[i]
return ib.get()
A = tvm.tir.decl_buffer((8,), "float32")
B = tvm.tir.decl_buffer((8,), "float32")
stmt = ir(A, B)
func = tvm.te.schedule.SchedulePostProcToPrimFunc([A, B], stmt, None)
mod = run_passes(func)
assert "@tir.tvm_storage_sync" in str(mod)
@tvm.testing.requires_cuda
def test_sync_read_thread_id_independent_location():
@T.prim_func
def func(p0: T.Buffer[2, "float32"], p1: T.Buffer[2, "float32"]) -> None:
threadIdx_x = T.env_thread("threadIdx.x")
blockIdx_x = T.env_thread("blockIdx.x")
T.preflattened_buffer(p0, [1, 2, 1, 1], dtype="float32", data=p0.data)
result_local = T.alloc_buffer([1], dtype="float32", scope="local")
temp_shared = T.alloc_buffer([1], dtype="float32", scope="shared")
T.launch_thread(blockIdx_x, 8)
T.launch_thread(threadIdx_x, 4)
result_local[0] = T.float32(0)
if threadIdx_x < 1:
temp_shared[0] = p0[0]
result_local[0] = result_local[0] + temp_shared[0] * p1[0]
if threadIdx_x < 1:
temp_shared[0] = p0[1]
result_local[0] = result_local[0] + temp_shared[0] * p1[1]
mod = run_passes(func)
assert "@tir.tvm_storage_sync" in str(mod)
if __name__ == "__main__":
test_thread_storage_sync()
test_sync_else_branch()
test_sync_read_thread_id_independent_location()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_unify_thread_binding.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import sys
import tvm
import tvm.testing
from tvm import te
from tvm.script import tir as T
def _check(original, transformed):
mod = tvm.IRModule.from_expr(original)
mod = tvm.tir.transform.UnifyThreadBinding()(mod)
mod = tvm.tir.transform.Simplify()(mod)
tvm.ir.assert_structural_equal(mod["main"], transformed, True)
def _check_fail(original):
mod = tvm.IRModule.from_expr(original)
with pytest.raises(ValueError):
tvm.tir.transform.UnifyThreadBinding()(mod)
@T.prim_func
def element_wise_thread_x(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i in T.thread_binding(0, 128, "blockIdx.x"):
for j0_0 in T.thread_binding(0, 4, "threadIdx.x"):
for j0_1 in T.serial(0, 32):
with T.block(""):
B[i, j0_0 * 32 + j0_1] = A[i, j0_0 * 32 + j0_1] * 2.0
for j1_0 in T.thread_binding(0, 4, "threadIdx.x"):
for j1_1 in T.serial(0, 32):
with T.block(""):
C[i, j1_0 * 32 + j1_1] = B[i, j1_0 * 32 + j1_1] + 1.0
@T.prim_func
def unified_element_wise_thread_x(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for blockIdx_x in T.thread_binding(0, 128, "blockIdx.x"):
for threadIdx_x in T.thread_binding(0, 4, "threadIdx.x"):
for j0_1 in T.serial(0, 32):
with T.block(""):
B[blockIdx_x, threadIdx_x * 32 + j0_1] = (
A[blockIdx_x, threadIdx_x * 32 + j0_1] * 2.0
)
for j1_1 in T.serial(0, 32):
with T.block(""):
C[blockIdx_x, threadIdx_x * 32 + j1_1] = (
B[blockIdx_x, threadIdx_x * 32 + j1_1] + 1.0
)
@T.prim_func
def element_wise_thread_x_different_dtype(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(128, 128), "float32"],
C: T.Buffer[(128, 128), "float32"],
) -> None:
for i in T.thread_binding(128, "blockIdx.x"):
for j0_0 in T.thread_binding(4, "threadIdx.x"):
for j0_1 in T.serial(0, 32):
with T.block(""):
B[i, j0_0 * 32 + j0_1] = A[i, j0_0 * 32 + j0_1] * 2.0
for j1_0 in T.thread_binding(T.int64(4), "threadIdx.x"):
for j1_1 in T.serial(T.int64(32)):
with T.block(""):
C[i, j1_0 * T.int64(32) + j1_1] = B[i, j1_0 * T.int64(32) + j1_1] + 1.0
@T.prim_func
def unified_element_wise_thread_x_different_dtype(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(128, 128), "float32"],
C: T.Buffer[(128, 128), "float32"],
) -> None:
for blockIdx_x in T.thread_binding(128, "blockIdx.x"):
for threadIdx_x in T.thread_binding(4, "threadIdx.x"):
for j0_1 in T.serial(0, 32):
with T.block(""):
B[blockIdx_x, threadIdx_x * 32 + j0_1] = (
A[blockIdx_x, threadIdx_x * 32 + j0_1] * 2.0
)
for j1_1 in T.serial(T.int64(32)):
with T.block(""):
C[blockIdx_x, T.cast(threadIdx_x, "int64") * T.int64(32) + j1_1] = (
B[blockIdx_x, T.cast(threadIdx_x, "int64") * T.int64(32) + j1_1] + 1.0
)
@T.prim_func
def element_wise_env_thread_x(a: T.handle, b: T.handle, c: T.handle) -> None:
j1_0 = T.env_thread("threadIdx.x")
j0_0 = T.env_thread("threadIdx.x")
i = T.env_thread("blockIdx.x")
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
T.launch_thread(i, 128)
T.launch_thread(j0_0, 4)
T.launch_thread(j1_0, 4)
for j0_1 in T.serial(0, 32):
with T.block(""):
B[i, j0_0 * 32 + j0_1] = A[i, j0_0 * 32 + j0_1] * 2.0
for j1_1 in T.serial(0, 32):
with T.block(""):
C[i, j1_0 * 32 + j1_1] = B[i, j1_0 * 32 + j1_1] + 1.0
@T.prim_func
def unified_element_wise_env_thread_x(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for blockIdx_x in T.thread_binding(0, 128, "blockIdx.x"):
for threadIdx_x in T.thread_binding(0, 4, "threadIdx.x"):
for j0_1 in T.serial(0, 32):
with T.block(""):
B[blockIdx_x, threadIdx_x * 32 + j0_1] = (
A[blockIdx_x, threadIdx_x * 32 + j0_1] * 2.0
)
for j1_1 in T.serial(0, 32):
with T.block(""):
C[blockIdx_x, threadIdx_x * 32 + j1_1] = (
B[blockIdx_x, threadIdx_x * 32 + j1_1] + 1.0
)
@T.prim_func
def element_wise_vthread_x(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
for i_0 in T.thread_binding(0, 2, "vthread.x"):
for i_1 in T.thread_binding(0, 64, "threadIdx.x"):
for j_0 in T.thread_binding(0, 2, "vthread.x"):
for j_1 in T.serial(0, 64):
with T.block(""):
B[i_0 * 64 + i_1, j_0 * 64 + j_1] = A[i_0 * 64 + i_1, j_0 * 64 + j_1] * 2.0
@T.prim_func
def unified_element_wise_vthread_x(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
for vthread_x in T.thread_binding(0, 2, "vthread.x"):
for threadIdx_x in T.thread_binding(0, 64, "threadIdx.x"):
for j_1 in T.serial(0, 64):
with T.block(""):
B[vthread_x * 64 + threadIdx_x, vthread_x * 64 + j_1] = (
A[vthread_x * 64 + threadIdx_x, vthread_x * 64 + j_1] * 2.0
)
@T.prim_func
def element_wise_two_thread_x_in_same_kernel_not_equal(
a: T.handle, b: T.handle, c: T.handle
) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 64])
for i in T.thread_binding(0, 128, "blockIdx.x"):
for j0 in T.thread_binding(0, 128, "threadIdx.x"):
B[i, j0] = A[i, j0] * 2.0
for j1 in T.thread_binding(0, 64, "threadIdx.x"):
C[i, j1] = A[i, j1] + 1.0
@T.prim_func
def element_wise_kernels_with_different_size(
a: T.handle, b: T.handle, c: T.handle, d: T.handle
) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [256, 256])
D = T.match_buffer(d, [256, 256])
for i0 in T.thread_binding(0, 128, "blockIdx.x"):
for j0 in T.thread_binding(0, 128, "threadIdx.x"):
B[i0, j0] = A[i0, j0] * 2.0
for i1 in T.thread_binding(0, 256, "blockIdx.x"):
for j1 in T.thread_binding(0, 256, "threadIdx.x"):
D[i1, j1] = C[i1, j1] + 1.0
@T.prim_func
def unified_element_wise_kernels_with_different_size(
a: T.handle, b: T.handle, c: T.handle, d: T.handle
) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [256, 256])
D = T.match_buffer(d, [256, 256])
for blockIdx_x in T.thread_binding(0, 128, "blockIdx.x"):
for threadIdx_x in T.thread_binding(0, 128, "threadIdx.x"):
B[blockIdx_x, threadIdx_x] = A[blockIdx_x, threadIdx_x] * 2.0
for blockIdx_x in T.thread_binding(0, 256, "blockIdx.x"):
for threadIdx_x in T.thread_binding(0, 256, "threadIdx.x"):
D[blockIdx_x, threadIdx_x] = C[blockIdx_x, threadIdx_x] + 1.0
@T.prim_func
def element_wise_implicit_block(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i in T.thread_binding(0, 128, "threadIdx.y"):
for j0_0 in T.thread_binding(0, 4, "threadIdx.x"):
for j0_1 in T.serial(0, 32):
with T.block(""):
B[i, j0_0 * 32 + j0_1] = A[i, j0_0 * 32 + j0_1] * 2.0
for j1_0 in T.thread_binding(0, 4, "threadIdx.x"):
for j1_1 in T.serial(0, 32):
with T.block(""):
C[i, j1_0 * 32 + j1_1] = B[i, j1_0 * 32 + j1_1] + 1.0
@T.prim_func
def unified_element_wise_implicit_block(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for blockIdx_x in T.thread_binding(0, 128, "threadIdx.y"):
for threadIdx_x in T.thread_binding(0, 4, "threadIdx.x"):
for j0_1 in T.serial(0, 32):
with T.block(""):
B[blockIdx_x, threadIdx_x * 32 + j0_1] = (
A[blockIdx_x, threadIdx_x * 32 + j0_1] * 2.0
)
for j1_1 in T.serial(0, 32):
with T.block(""):
C[blockIdx_x, threadIdx_x * 32 + j1_1] = (
B[blockIdx_x, threadIdx_x * 32 + j1_1] + 1.0
)
def test_thread_x():
_check(element_wise_thread_x, unified_element_wise_thread_x)
def test_thread_x_different_dtype():
_check(element_wise_thread_x_different_dtype, unified_element_wise_thread_x_different_dtype)
def test_env_thread_x():
_check(element_wise_env_thread_x, unified_element_wise_env_thread_x)
def test_vthread_x():
_check(element_wise_vthread_x, unified_element_wise_vthread_x)
def test_two_thread_x_in_same_kernel_not_equal():
_check_fail(element_wise_two_thread_x_in_same_kernel_not_equal)
def test_kernels_with_different_size():
_check(
element_wise_kernels_with_different_size, unified_element_wise_kernels_with_different_size
)
def test_implicit_block():
_check(element_wise_implicit_block, unified_element_wise_implicit_block)
def test_lower_te():
a = te.placeholder((32, 2, 2))
b = te.compute((32, 2, 2), lambda i, j, k: a[i, j, k] * 2.0)
s = te.create_schedule(b.op)
s[b].bind(b.op.axis[1], te.thread_axis("threadIdx.x"))
s[b].bind(b.op.axis[2], te.thread_axis("threadIdx.x"))
orig_mod = tvm.driver.build_module.schedule_to_module(s, [a, b])
mod = tvm.tir.transform.UnifyThreadBinding()(orig_mod)
tvm.ir.assert_structural_equal(mod, orig_mod) # UnifyThreadBinding should do nothing on TE
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_unroll_loop.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm.script import tir as T
import os
def test_unroll_loop():
ib = tvm.tir.ir_builder.create()
dtype = "int64"
n = te.size_var("n")
Ab = tvm.tir.decl_buffer((n,), dtype)
Aptr = ib.buffer_ptr(Ab)
# for i in 0 to n-1:
with ib.for_range(n, n + 2, name="i") as i:
with ib.for_range(0, 8, name="i", kind="unroll") as j:
Aptr[j + 1] = Aptr[i] + 1
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], stmt))
assert isinstance(stmt, tvm.tir.For)
with tvm.transform.PassContext(config={"tir.UnrollLoop": {"auto_max_step": 16}}):
ret = tvm.tir.transform.UnrollLoop()(mod)["main"].body
assert not isinstance(ret, tvm.tir.For)
with tvm.transform.PassContext(config={"tir.UnrollLoop": {"auto_max_step": 15}}):
ret = tvm.tir.transform.UnrollLoop()(mod)["main"].body
assert isinstance(ret, tvm.tir.For)
with tvm.transform.PassContext(
config={"tir.UnrollLoop": {"auto_max_step": 16, "explicit_unroll": False}}
):
ret = tvm.tir.transform.UnrollLoop()(mod)["main"].body
assert isinstance(ret, tvm.tir.For)
assert ret.kind == tvm.tir.ForKind.UNROLLED
ib = tvm.tir.ir_builder.create()
ib.scope_attr(tvm.tir.const(0, "int32"), "pragma_auto_unroll_max_step", 16)
ib.emit(stmt)
wrapped = ib.get()
wrapped = tvm.tir.SeqStmt([wrapped, stmt])
assert isinstance(ret, tvm.tir.For)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], wrapped))
with tvm.transform.PassContext(
config={"tir.UnrollLoop": {"auto_max_depth": 8, "explicit_unroll": False}}
):
ret = tvm.tir.transform.UnrollLoop()(mod)["main"].body
assert isinstance(ret[0], tvm.tir.For)
assert ret[0].kind == tvm.tir.ForKind.UNROLLED
assert isinstance(ret[1], tvm.tir.For)
assert ret[1].kind != tvm.tir.ForKind.UNROLLED
def test_unroll_fake_loop():
ib = tvm.tir.ir_builder.create()
dtype = "int32"
n = te.size_var("n")
Ab = tvm.tir.decl_buffer((n,), dtype)
Aptr = ib.buffer_ptr(Ab)
# for i in 0 to n-1:
with ib.for_range(0, 1, name="i") as i:
Aptr[i * 2] = 3
with ib.for_range(0, 10, name="j") as j:
Aptr[j + 1] = Aptr[i] + 1
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], stmt))
with tvm.transform.PassContext(
config={
"tir.UnrollLoop": {"auto_max_depth": 8, "auto_max_extent": 1, "explicit_unroll": False}
}
):
ret = tvm.tir.transform.UnrollLoop()(mod)["main"].body
assert isinstance(ret[0], tvm.tir.BufferStore)
def test_unroll_single_count_loops():
n = te.size_var("n")
A = te.placeholder((n,), name="A")
B = te.compute((n,), lambda *i: A(*i), name="B")
s = te.create_schedule(B.op)
s = s.normalize()
dom_map = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, dom_map)
# all parameters to UnrolLoops are default values except for
# auto_unroll_max_extent which has been set to 1 (default:0)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
with tvm.transform.PassContext(config={"tir.UnrollLoop": {"auto_max_step": 1}}):
ret = tvm.tir.transform.UnrollLoop()(mod)["main"].body
assert ret == stmt
def test_unroll_allocations():
@tvm.script.ir_module
class before:
@T.prim_func
def main():
for i in T.unroll(2):
with T.decl_buffer([16], "float32") as buf:
buf[0] = 0.0
@tvm.script.ir_module
class expected:
@T.prim_func
def main():
with T.decl_buffer([16], "float32") as buf1:
buf1[0] = 0.0
with T.decl_buffer([16], "float32") as buf2:
buf2[0] = 0.0
after = tvm.tir.transform.UnrollLoop()(before)
tvm.ir.assert_structural_equal(after, expected)
if __name__ == "__main__":
test_unroll_loop()
test_unroll_fake_loop()
test_unroll_single_count_loops()
test_unroll_allocations()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_vectorize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def test_vectorize_loop():
dtype = "int64"
n = te.var("n")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, n) as i:
with ib.for_range(0, 4, kind="vectorize") as j:
A[j] = tvm.tir.const(1, A.dtype)
stmt = ib.get()
assert isinstance(stmt.body, tvm.tir.For)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.For)
assert not isinstance(stmt.body, tvm.tir.For)
assert len(stmt.body.indices) == 1
assert isinstance(stmt.body.indices[0], tvm.tir.Ramp)
assert isinstance(stmt.body.value, tvm.tir.Broadcast)
def test_vectorize_vector():
dtype = "int64"
n = te.var("n")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32x4", name="A")
with ib.for_range(0, n) as i:
with ib.for_range(0, 4, kind="vectorize") as j:
A[j] = tvm.tir.const(1, A.dtype)
stmt = ib.get()
assert isinstance(stmt.body, tvm.tir.For)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.For)
assert not isinstance(stmt.body, tvm.tir.For)
assert len(stmt.body.indices) == 1
assert isinstance(stmt.body.indices[0], tvm.tir.Ramp)
assert isinstance(stmt.body.value, tvm.tir.Broadcast)
def test_vectorize_with_if():
n = te.var("n")
x = te.var("x")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 4, kind="vectorize") as i:
with ib.if_scope(x < n):
A[i] = A[i] + 1
with ib.else_scope():
with ib.if_scope(i < n):
A[i] = 2.0
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n, x], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.IfThenElse)
assert len(stmt.then_case.indices) == 1
assert isinstance(stmt.then_case.indices[0], tvm.tir.Ramp)
assert isinstance(stmt.then_case.value, tvm.tir.Add)
assert stmt.then_case.value.dtype == "float32x4"
assert isinstance(stmt.else_case, tvm.tir.For)
def test_vectorize_with_if_cond_int64():
m = te.size_var("m", dtype="int64")
A = te.placeholder((m,), name="A", dtype="float32")
B = te.compute((m,), lambda i: te.if_then_else(i < 2, A[i], A[i] * 2), name="B")
s = te.create_schedule(B.op)
x, y = s[B].split(B.op.axis[0], factor=4)
s[B].vectorize(y)
f = tvm.build(s, [A, B], "llvm")
def test_vectorize_let():
v = tvm.tir.Var("v", "float32")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 4, kind="vectorize") as i:
ib.emit(lambda body: tvm.tir.LetStmt(v, A[i] + 1, body))
A[i] = v + 2
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A], ib.get()))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.LetStmt)
assert stmt.value.dtype == "float32x4"
def test_vectorize_with_le_cond():
n = te.var("n")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 4, kind="vectorize") as i:
with ib.if_scope(i <= n):
A[i] = A[i] + 1
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.For)
def test_vectorize_with_ge_cond():
n = te.var("n")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 4, kind="vectorize") as i:
with ib.if_scope(i >= n):
A[i] = A[i] + 1
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.For)
def test_vectorize_if_then_else():
n = te.var("n")
x = te.var("x")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 4, kind="vectorize") as i:
A[i] = tvm.tir.call_intrin("float32", "tir.if_then_else", i > 0, A[i] + 1, A[i])
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n, x], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.For)
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, n) as k:
with ib.for_range(0, 4, kind="vectorize") as i:
A[k * 4 + i] = tvm.tir.call_intrin(
"float32", "tir.if_then_else", k > 0, A[k * 4 + i], 0
)
stmt = ib.get()
assert isinstance(stmt.body, tvm.tir.For)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert not isinstance(stmt.body, tvm.tir.For)
assert isinstance(stmt.body.value.args[2], tvm.tir.Broadcast)
def test_vectorize_while_fail():
"""A while loop inside a vectorized loop should fail."""
n = 64
num_iter = 10
def test_ir(A, B, C):
ib = tvm.tir.ir_builder.create()
n = C.shape[0]
A = ib.buffer_ptr(A)
B = ib.buffer_ptr(B)
C = ib.buffer_ptr(C)
i = ib.allocate("int32", (1,), name="i", scope="local")
i[0] = 0
with ib.for_range(0, n) as j:
C[j] = 0.0
with ib.for_range(0, n, kind="vectorize") as j:
with ib.while_loop(i[0] < num_iter):
C[j] += A[j] + B[j]
i[0] += 1
return ib.get()
dtype = "float32"
A = te.placeholder((n,), name="A", dtype=dtype)
B = te.placeholder((n,), name="B", dtype=dtype)
C = te.extern(
(n,),
[A, B],
lambda ins, outs: test_ir(ins[0], ins[1], outs[0]),
name="while_vectorize",
dtype=dtype,
)
s = te.create_schedule(C.op)
try:
tvm.lower(s, [A, B, C], "llvm")
assert False
except tvm.error.TVMError as e:
error_msg = str(e).split("\n")[-1]
expected = "A while loop inside a vectorized loop not supported"
assert expected in error_msg
def test_vectorize_dtype_mismatch():
n = tvm.tir.IntImm("int64", 4)
A = te.compute((n,), lambda i: tvm.tir.IntImm("int64", 2**31 - 1) + i, name="A")
s = te.create_schedule(A.op)
s[A].vectorize(A.op.axis[0])
tvm.lower(s, [A], "llvm", simple_mode=True)
if __name__ == "__main__":
test_vectorize_vector()
test_vectorize_with_if()
test_vectorize_loop()
test_vectorize_if_then_else()
test_vectorize_with_le_cond()
test_vectorize_with_ge_cond()
test_vectorize_let()
test_vectorize_while_fail()
test_vectorize_dtype_mismatch()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_usmp_algo.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import tir, script
from tvm.script import tir as T
from tvm.tir import stmt_functor
from tvm.tir.usmp import utils as usmp_utils
from tvm.target import Target
from tvm import WorkspacePoolInfo, PoolInfoProperties
def _replace_stmt_with_buf_var_names(buffer_info_map):
"""helper to replace tir.allocates with buffer names"""
new_buffer_info_map = dict()
for k, v in buffer_info_map.items():
new_buffer_info_map[v.buffer_var.name] = k
return new_buffer_info_map
def _verify_conflicts(main_buf_name, conflicting_buf_names, buffer_info_map):
"""helper to check expected liveness conflicts"""
buf_info = buffer_info_map[main_buf_name]
for conflict in buf_info.conflicts:
assert conflict.name_hint in conflicting_buf_names
def _get_allocates(primfunc):
"""helper to extract all allocate nodes by name"""
allocates = dict()
def get_allocate(stmt):
if isinstance(stmt, tvm.tir.Allocate):
allocates[str(stmt.buffer_var.name)] = stmt
stmt_functor.post_order_visit(primfunc.body, get_allocate)
return allocates
def _assign_poolinfos_to_allocates_in_primfunc(primfunc, pool_infos):
"""helper to assing poolinfos to allocate nodes in a tir.PrimFunc"""
def set_poolinfos(stmt):
if isinstance(stmt, tvm.tir.Allocate):
return tvm.tir.Allocate(
buffer_var=stmt.buffer_var,
dtype=stmt.dtype,
extents=stmt.extents,
condition=stmt.condition,
body=stmt.body,
annotations={tvm.tir.usmp.utils.CANDIDATE_MEMORY_POOL_ATTR: pool_infos},
)
return primfunc.with_body(stmt_functor.ir_transform(primfunc.body, None, set_poolinfos))
def _assign_poolinfos_to_allocates_in_irmodule(mod, pool_infos):
"""helper to assing poolinfos to allocate nodes in a IRModule"""
ret = tvm.IRModule()
for global_var, basefunc in mod.functions.items():
if isinstance(basefunc, tvm.tir.PrimFunc):
ret[global_var] = _assign_poolinfos_to_allocates_in_primfunc(basefunc, pool_infos)
return ret
def _assign_targets_to_primfuncs_irmodule(mod, target):
"""helper to assign target for PrimFunc in a IRModule"""
ret = tvm.IRModule()
for global_var, basefunc in mod.functions.items():
if isinstance(basefunc, tvm.tir.PrimFunc):
ret[global_var] = basefunc.with_attr("target", target)
return ret
def _check_max_workspace_size(buffer_pool_allocations, pool_info, size):
max_workspace_size = 0
for buffer_info, pool_allocation in buffer_pool_allocations.items():
if pool_allocation.pool_info == pool_info:
size_candidate = pool_allocation.byte_offset + buffer_info.size_bytes
if size_candidate > max_workspace_size:
max_workspace_size = size_candidate
assert max_workspace_size == size
def test_no_pool_error():
target = Target("c")
tiny_workspace_pool = WorkspacePoolInfo(
"tiny_workspace",
[target],
PoolInfoProperties(size_hint_bytes=10),
)
bi_a = usmp_utils.BufferInfo(
name_hint="bi_a", size_bytes=10, pool_candidates=[tiny_workspace_pool]
)
bi_b = usmp_utils.BufferInfo(
name_hint="bi_b", size_bytes=10, pool_candidates=[tiny_workspace_pool]
)
bi_c = usmp_utils.BufferInfo(
name_hint="bi_c", size_bytes=10, pool_candidates=[tiny_workspace_pool]
)
bi_a.set_conflicts([bi_b])
bi_b.set_conflicts([bi_c])
bi_c.set_conflicts([bi_a])
buffer_info_arr = [bi_a, bi_b, bi_c]
fusmp_algo = tvm.get_global_func(f"tir.usmp.algo.greedy_by_size")
with pytest.raises(
tvm.TVMError, match="TVM USMP Error: the space available in the provided pools exceeded"
):
buffer_pool_allocations = fusmp_algo(buffer_info_arr, 0)
@pytest.mark.parametrize("algorithm", ["greedy_by_size", "greedy_by_conflicts", "hill_climb"])
def test_name_based_ordering(algorithm):
"""This checks when the size and conlicts are same a stable result is generated"""
def _test():
target = Target("c")
global_workspace_pool = WorkspacePoolInfo(
"global_workspace",
[target],
)
bi_a = usmp_utils.BufferInfo(
name_hint="bi_a", size_bytes=10, pool_candidates=[global_workspace_pool]
)
bi_b = usmp_utils.BufferInfo(
name_hint="bi_b", size_bytes=10, pool_candidates=[global_workspace_pool]
)
bi_c = usmp_utils.BufferInfo(
name_hint="bi_c", size_bytes=10, pool_candidates=[global_workspace_pool]
)
bi_a.set_conflicts([bi_b, bi_c])
bi_b.set_conflicts([bi_c, bi_a])
bi_c.set_conflicts([bi_a, bi_b])
buffer_info_arr = [bi_a, bi_b, bi_c]
fusmp_algo = tvm.get_global_func(f"tir.usmp.algo.{algorithm}")
buffer_pool_allocations = fusmp_algo(buffer_info_arr, 0)
assert buffer_pool_allocations[bi_a].byte_offset == 20
assert buffer_pool_allocations[bi_b].byte_offset == 10
assert buffer_pool_allocations[bi_c].byte_offset == 0
# This is tested for several times to check stability
for x in range(0, 10):
_test()
@pytest.mark.parametrize(
["algorithm", "workspace_size"],
[("greedy_by_size", 140), ("greedy_by_conflicts", 140), ("hill_climb", 140)],
)
def test_linear(algorithm, workspace_size):
"""
The test case here represent BufferInfo objects
that could get generated for a linear sequence
such as :
(Op A)
|
bi_a
|
(Op B)
|
bi_b
|
.
.
.
(Op F)
|
bi_f
"""
target = Target("c")
global_workspace_pool = WorkspacePoolInfo(
"global_workspace",
[target],
)
bi_a = usmp_utils.BufferInfo(
name_hint="bi_a", size_bytes=10, pool_candidates=[global_workspace_pool]
)
bi_b = usmp_utils.BufferInfo(
name_hint="bi_b", size_bytes=20, pool_candidates=[global_workspace_pool]
)
bi_c = usmp_utils.BufferInfo(
name_hint="bi_c", size_bytes=100, pool_candidates=[global_workspace_pool]
)
bi_d = usmp_utils.BufferInfo(
name_hint="bi_d", size_bytes=40, pool_candidates=[global_workspace_pool]
)
bi_e = usmp_utils.BufferInfo(
name_hint="bi_e", size_bytes=50, pool_candidates=[global_workspace_pool]
)
bi_f = usmp_utils.BufferInfo(
name_hint="bi_f", size_bytes=50, pool_candidates=[global_workspace_pool]
)
# Creating conflicts for a linear graph
bi_a.set_conflicts([bi_b])
bi_b.set_conflicts([bi_a, bi_c])
bi_c.set_conflicts([bi_b, bi_d])
bi_d.set_conflicts([bi_c, bi_e])
bi_e.set_conflicts([bi_d, bi_f])
bi_f.set_conflicts([bi_e])
buffer_info_arr = [bi_a, bi_b, bi_c, bi_d, bi_e, bi_f]
fusmp_algo = tvm.get_global_func(f"tir.usmp.algo.{algorithm}")
buffer_pool_allocations = fusmp_algo(buffer_info_arr, 0)
_check_max_workspace_size(buffer_pool_allocations, global_workspace_pool, workspace_size)
@pytest.mark.parametrize(
["algorithm", "workspace_size"],
[("greedy_by_size", 190), ("greedy_by_conflicts", 320), ("hill_climb", 190)],
)
def test_fanout(algorithm, workspace_size):
"""
The test case here represent BufferInfo objects
that could get generated for a fanout topology
such as :
(Op A)
|
bi_a ---------
| |
(Op B) (Op C)
| |
bi_b bi_c
| |
(Op D) (Op E)
| |
bi_d bi_e
| |
(Op F) ------
|
bi_f
|
(Op G)
|
bi_g
"""
target = Target("c")
global_workspace_pool = WorkspacePoolInfo(
"global_workspace",
targets=[target],
)
bi_a = usmp_utils.BufferInfo(
name_hint="bi_a", size_bytes=10, pool_candidates=[global_workspace_pool]
)
bi_b = usmp_utils.BufferInfo(
name_hint="bi_b", size_bytes=20, pool_candidates=[global_workspace_pool]
)
bi_c = usmp_utils.BufferInfo(
name_hint="bi_c", size_bytes=100, pool_candidates=[global_workspace_pool]
)
bi_d = usmp_utils.BufferInfo(
name_hint="bi_d", size_bytes=40, pool_candidates=[global_workspace_pool]
)
bi_e = usmp_utils.BufferInfo(
name_hint="bi_e", size_bytes=50, pool_candidates=[global_workspace_pool]
)
bi_f = usmp_utils.BufferInfo(
name_hint="bi_f", size_bytes=60, pool_candidates=[global_workspace_pool]
)
bi_g = usmp_utils.BufferInfo(
name_hint="bi_g", size_bytes=70, pool_candidates=[global_workspace_pool]
)
# Creating conflicts for a linear graph
bi_a.set_conflicts([bi_b, bi_c])
bi_b.set_conflicts([bi_a, bi_c, bi_e])
bi_c.set_conflicts([bi_e, bi_a, bi_b, bi_d])
bi_d.set_conflicts([bi_b, bi_f, bi_c, bi_e])
bi_e.set_conflicts([bi_c, bi_f, bi_b, bi_d])
bi_f.set_conflicts([bi_d, bi_e, bi_f])
bi_g.set_conflicts([bi_f])
buffer_info_arr = [bi_a, bi_b, bi_c, bi_d, bi_e, bi_f, bi_g]
fusmp_algo = tvm.get_global_func(f"tir.usmp.algo.{algorithm}")
buffer_pool_allocations = fusmp_algo(buffer_info_arr, 0)
_check_max_workspace_size(buffer_pool_allocations, global_workspace_pool, workspace_size)
# fmt: off
@tvm.script.ir_module
class MobilenetStructure:
@T.prim_func
def tvmgen_default_fused_cast_subtract(placeholder_2: T.handle, placeholder_3: T.handle, T_subtract: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_cast_subtract", "tir.noalias": True})
placeholder_4 = T.match_buffer(placeholder_2, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
placeholder_5 = T.match_buffer(placeholder_3, [1], dtype="int16", elem_offset=0, align=64, offset_factor=1)
T_subtract_1 = T.match_buffer(T_subtract, [150528], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
for ax0_ax1_fused_1 in T.serial(0, 224):
for ax2_1, ax3_inner_1 in T.grid(224, 3):
T_subtract_1[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)] = (T.cast(placeholder_4[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)], "int16") - placeholder_5[0])
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast(placeholder_62: T.handle, placeholder_63: T.handle, placeholder_64: T.handle, T_cast_20: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast", "tir.noalias": True})
placeholder_65 = T.match_buffer(placeholder_62, [150528], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_66 = T.match_buffer(placeholder_63, [9408], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_67 = T.match_buffer(placeholder_64, [64], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_21 = T.match_buffer(T_cast_20, [802816], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_7 = T.decl_buffer([157323], "int16")
for i0_i1_fused_7 in T.serial(0, 229):
for i2_7, i3_7 in T.grid(229, 3):
PaddedInput_7[(((i0_i1_fused_7*687) + (i2_7*3)) + i3_7)] = T.if_then_else(((((2 <= i0_i1_fused_7) and (i0_i1_fused_7 < 226)) and (2 <= i2_7)) and (i2_7 < 226)), placeholder_65[((((i0_i1_fused_7*672) + (i2_7*3)) + i3_7) - 1350)], T.int16(0), dtype="int16")
for ax0_ax1_fused_ax2_fused_7 in T.serial(0, 12544):
Conv2dOutput_7 = T.decl_buffer([64], "int32")
for ff_3 in T.serial(0, 64):
Conv2dOutput_7[ff_3] = 0
for ry_2, rx_2, rc_7 in T.grid(7, 7, 3):
Conv2dOutput_7[ff_3] = (Conv2dOutput_7[ff_3] + (T.cast(PaddedInput_7[(((((T.floordiv(ax0_ax1_fused_ax2_fused_7, 112)*1374) + (ry_2*687)) + (T.floormod(ax0_ax1_fused_ax2_fused_7, 112)*6)) + (rx_2*3)) + rc_7)], "int32")*T.cast(placeholder_66[((((ry_2*1344) + (rx_2*192)) + (rc_7*64)) + ff_3)], "int32")))
for ax3_inner_7 in T.serial(0, 64):
T_cast_21[((ax0_ax1_fused_ax2_fused_7*64) + ax3_inner_7)] = T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_7[ax3_inner_7] + placeholder_67[ax3_inner_7]), 1939887962, 31, -9, dtype="int32"), 255), 0), "uint8")
@T.prim_func
def tvmgen_default_fused_nn_max_pool2d_cast(placeholder_28: T.handle, T_cast_6: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_max_pool2d_cast", "tir.noalias": True})
placeholder_29 = T.match_buffer(placeholder_28, [802816], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
T_cast_7 = T.match_buffer(T_cast_6, [200704], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
tensor_2 = T.decl_buffer([200704], "uint8")
for ax0_ax1_fused_4 in T.serial(0, 56):
for ax2_4 in T.serial(0, 56):
for ax3_init in T.serial(0, 64):
tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_init)] = T.uint8(0)
for rv0_rv1_fused_1, ax3_2 in T.grid(9, 64):
tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_2)] = T.max(tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_2)], T.if_then_else(((((ax0_ax1_fused_4*2) + T.floordiv(rv0_rv1_fused_1, 3)) < 112) and (((ax2_4*2) + T.floormod(rv0_rv1_fused_1, 3)) < 112)), placeholder_29[(((((ax0_ax1_fused_4*14336) + (T.floordiv(rv0_rv1_fused_1, 3)*7168)) + (ax2_4*128)) + (T.floormod(rv0_rv1_fused_1, 3)*64)) + ax3_2)], T.uint8(0), dtype="uint8"))
for ax0_ax1_fused_5 in T.serial(0, 56):
for ax2_5, ax3_3 in T.grid(56, 64):
T_cast_7[(((ax0_ax1_fused_5*3584) + (ax2_5*64)) + ax3_3)] = T.cast(tensor_2[(((ax0_ax1_fused_5*3584) + (ax2_5*64)) + ax3_3)], "int16")
@T.prim_func
def run_model(input: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_run_model", "runner_function": True})
# body
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 1)
sid_9 = T.allocate([301056], "int8", "global")
sid_8 = T.allocate([802816], "int8", "global")
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract", input, T.lookup_param("p0", dtype="handle"), sid_9, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast", sid_9, T.lookup_param("p1", dtype="handle"), T.lookup_param("p2", dtype="handle"), sid_8, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_max_pool2d_cast", sid_8, output, dtype="int32"))
__tvm_meta__ = None
# fmt: on
@pytest.mark.parametrize(
["algorithm", "fast_memory_size", "slow_memory_size"],
[
("greedy_by_size", 200704, 1418528),
("greedy_by_conflicts", 200704, 1418528),
("hill_climb", 200704, 1117462),
],
)
def test_mobilenet_subgraph(algorithm, fast_memory_size, slow_memory_size):
target = Target("c")
fast_memory_pool = WorkspacePoolInfo(
"fast_memory",
[target],
PoolInfoProperties(size_hint_bytes=200704),
)
slow_memory_pool = WorkspacePoolInfo(
"slow_memory",
[target],
)
tir_mod = MobilenetStructure
tir_mod = _assign_targets_to_primfuncs_irmodule(tir_mod, target)
tir_mod = _assign_poolinfos_to_allocates_in_irmodule(
tir_mod, [fast_memory_pool, slow_memory_pool]
)
main_func = tir_mod["run_model"]
buffer_info_analysis = tvm.tir.usmp.analysis.extract_buffer_info(main_func, tir_mod)
assert buffer_info_analysis.memory_pressure == 1117718
fcreate_array_bi = tvm.get_global_func("tir.usmp.CreateArrayBufferInfo")
buffer_info_arr = fcreate_array_bi(buffer_info_analysis.buffer_info_stmts)
fusmp_algo = tvm.get_global_func(f"tir.usmp.algo.{algorithm}")
buffer_pool_allocations = fusmp_algo(buffer_info_arr, buffer_info_analysis.memory_pressure)
buffer_info_map_names = dict()
for buf_info in buffer_info_arr:
buffer_info_map_names[buf_info.name_hint] = buf_info
# check conflicts
_verify_conflicts("PaddedInput_7", ["sid_9", "sid_8", "Conv2dOutput_7"], buffer_info_map_names)
_verify_conflicts("tensor_2", ["sid_8"], buffer_info_map_names)
_verify_conflicts("sid_9", ["PaddedInput_7"], buffer_info_map_names)
_verify_conflicts(
"sid_8", ["PaddedInput_7", "Conv2dOutput_7", "tensor_2"], buffer_info_map_names
)
_verify_conflicts("Conv2dOutput_7", ["sid_8", "PaddedInput_7"], buffer_info_map_names)
_check_max_workspace_size(buffer_pool_allocations, slow_memory_pool, slow_memory_size)
_check_max_workspace_size(buffer_pool_allocations, fast_memory_pool, fast_memory_size)
# fmt: off
@tvm.script.ir_module
class ResnetStructure:
@T.prim_func
def tvmgen_default_fused_cast_subtract_fixed_point_multiply_add_clip_cast_cast(placeholder: T.handle, placeholder_1: T.handle, T_cast: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_cast_subtract_fixed_point_multiply_add_clip_cast_cast", "tir.noalias": True})
placeholder_2 = T.match_buffer(placeholder, [360000], dtype="uint8")
placeholder_3 = T.match_buffer(placeholder_1, [64], dtype="int32")
T_cast_1 = T.match_buffer(T_cast, [360000], dtype="int16")
# body
for ax0_ax1_fused, ax2, ax3_outer, ax3_inner in T.grid(75, 75, 4, 16):
T_cast_1[ax0_ax1_fused * 4800 + ax2 * 64 + ax3_outer * 16 + ax3_inner] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift(T.cast(placeholder_2[ax0_ax1_fused * 4800 + ax2 * 64 + ax3_outer * 16 + ax3_inner], "int32") - 94, 1843157232, 31, 1, dtype="int32") + placeholder_3[ax3_outer * 16 + ax3_inner], 255), 0), "uint8"), "int16")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_1(placeholder_10: T.handle, placeholder_11: T.handle, placeholder_12: T.handle, T_cast_4: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_1", "tir.noalias": True})
placeholder_13 = T.match_buffer(placeholder_10, [360000], dtype="int16")
placeholder_14 = T.match_buffer(placeholder_11, [36864], dtype="int16")
placeholder_15 = T.match_buffer(placeholder_12, [64], dtype="int32")
T_cast_5 = T.match_buffer(T_cast_4, [360000], dtype="int16")
# body
PaddedInput_1 = T.decl_buffer([379456], "int16")
for i0_i1_fused_1, i2_1, i3_1 in T.grid(77, 77, 64):
PaddedInput_1[i0_i1_fused_1 * 4928 + i2_1 * 64 + i3_1] = T.if_then_else(1 <= i0_i1_fused_1 and i0_i1_fused_1 < 76 and 1 <= i2_1 and i2_1 < 76, placeholder_13[i0_i1_fused_1 * 4800 + i2_1 * 64 + i3_1 - 4864], T.int16(0), dtype="int16")
for ax0_ax1_fused_ax2_fused_1 in T.serial(0, 5625):
Conv2dOutput_1 = T.decl_buffer([64], "int32")
for ff_1 in T.serial(0, 64):
Conv2dOutput_1[ff_1] = 0
for ry, rx, rc_1 in T.grid(3, 3, 64):
Conv2dOutput_1[ff_1] = Conv2dOutput_1[ff_1] + T.cast(PaddedInput_1[T.floordiv(ax0_ax1_fused_ax2_fused_1, 75) * 4928 + ry * 4928 + rx * 64 + T.floormod(ax0_ax1_fused_ax2_fused_1, 75) * 64 + rc_1], "int32") * T.cast(placeholder_14[ry * 12288 + rx * 4096 + rc_1 * 64 + ff_1], "int32")
for ax3_inner_2 in T.serial(0, 64):
T_cast_5[ax0_ax1_fused_ax2_fused_1 * 64 + ax3_inner_2] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput_1[ax3_inner_2] + placeholder_15[ax3_inner_2], 1608879842, 31, -7, dtype="int32"), 255), 0), "uint8"), "int16")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_15934180698220515269_(placeholder_16: T.handle, placeholder_17: T.handle, placeholder_18: T.handle, T_add: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_15934180698220515269_", "tir.noalias": True})
placeholder_19 = T.match_buffer(placeholder_16, [360000], dtype="int16")
placeholder_20 = T.match_buffer(placeholder_17, [16384], dtype="int16")
placeholder_21 = T.match_buffer(placeholder_18, [256], dtype="int32")
T_add_1 = T.match_buffer(T_add, [1440000], dtype="int32")
# body
PaddedInput_2 = T.decl_buffer([360000], "int16")
for i0_i1_fused_2, i2_2, i3_2 in T.grid(75, 75, 64):
PaddedInput_2[i0_i1_fused_2 * 4800 + i2_2 * 64 + i3_2] = placeholder_19[i0_i1_fused_2 * 4800 + i2_2 * 64 + i3_2]
for ax0_ax1_fused_ax2_fused_2 in T.serial(0, 5625):
Conv2dOutput_2 = T.decl_buffer([64], "int32")
for ax3_outer_1 in T.serial(0, 4):
for ff_2 in T.serial(0, 64):
Conv2dOutput_2[ff_2] = 0
for rc_2 in T.serial(0, 64):
Conv2dOutput_2[ff_2] = Conv2dOutput_2[ff_2] + T.cast(PaddedInput_2[ax0_ax1_fused_ax2_fused_2 * 64 + rc_2], "int32") * T.cast(placeholder_20[rc_2 * 256 + ax3_outer_1 * 64 + ff_2], "int32")
for ax3_inner_3 in T.serial(0, 64):
T_add_1[ax0_ax1_fused_ax2_fused_2 * 256 + ax3_outer_1 * 64 + ax3_inner_3] = T.q_multiply_shift(T.cast(T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput_2[ax3_inner_3] + placeholder_21[ax3_outer_1 * 64 + ax3_inner_3], 1711626602, 31, -8, dtype="int32") + 132, 255), 0), "uint8"), "int32") - 132, 2094289803, 31, -2, dtype="int32") + 136
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_4200876283395191415_(placeholder_22: T.handle, placeholder_23: T.handle, placeholder_24: T.handle, placeholder_25: T.handle, T_cast_6: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_4200876283395191415_", "tir.noalias": True})
placeholder_29 = T.match_buffer(placeholder_22, [360000], dtype="int16")
placeholder_27 = T.match_buffer(placeholder_23, [16384], dtype="int16")
placeholder_26 = T.match_buffer(placeholder_24, [256], dtype="int32")
placeholder_28 = T.match_buffer(placeholder_25, [1440000], dtype="int32")
T_cast_7 = T.match_buffer(T_cast_6, [1440000], dtype="uint8")
# body
PaddedInput_3 = T.decl_buffer([360000], "int16")
for i0_i1_fused_3, i2_3, i3_3 in T.grid(75, 75, 64):
PaddedInput_3[i0_i1_fused_3 * 4800 + i2_3 * 64 + i3_3] = placeholder_29[i0_i1_fused_3 * 4800 + i2_3 * 64 + i3_3]
for ax0_ax1_fused_ax2_fused_3 in T.serial(0, 5625):
Conv2dOutput_3 = T.decl_buffer([64], "int32")
for ax3_outer_2 in T.serial(0, 4):
for ff_3 in T.serial(0, 64):
Conv2dOutput_3[ff_3] = 0
for rc_3 in T.serial(0, 64):
Conv2dOutput_3[ff_3] = Conv2dOutput_3[ff_3] + T.cast(PaddedInput_3[ax0_ax1_fused_ax2_fused_3 * 64 + rc_3], "int32") * T.cast(placeholder_27[rc_3 * 256 + ax3_outer_2 * 64 + ff_3], "int32")
for ax3_inner_4 in T.serial(0, 64):
T_cast_7[ax0_ax1_fused_ax2_fused_3 * 256 + ax3_outer_2 * 64 + ax3_inner_4] = T.cast(T.max(T.min(T.q_multiply_shift(T.cast(T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput_3[ax3_inner_4] + placeholder_26[ax3_outer_2 * 64 + ax3_inner_4], 1343014664, 31, -8, dtype="int32") + 136, 255), 0), "uint8"), "int32") - 136, 1073903788, 31, 1, dtype="int32") + placeholder_28[ax0_ax1_fused_ax2_fused_3 * 256 + ax3_outer_2 * 64 + ax3_inner_4], 255), 0), "uint8")
@T.prim_func
def tvmgen_default_run_model(input: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_run_model", "runner_function": True})
# body
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 1)
sid_2 = T.allocate([720000], "int8", "global")
sid_6 = T.allocate([5760000], "int8", "global")
sid_7 = T.allocate([720000], "int8", "global")
sid_8 = T.allocate([720000], "int8", "global")
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract_fixed_point_multiply_add_clip_cast_cast", input, T.lookup_param("p0", dtype="handle"), sid_2, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast", sid_2, T.lookup_param("p3", dtype="handle"), T.lookup_param("p4", dtype="handle"), sid_8, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_1", sid_8, T.lookup_param("p5", dtype="handle"), T.lookup_param("p6", dtype="handle"), sid_7, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_15934180698220515269_", sid_7, T.lookup_param("p7", dtype="handle"), T.lookup_param("p8", dtype="handle"), sid_6, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_4200876283395191415_", sid_2, T.lookup_param("p1", dtype="handle"), T.lookup_param("p2", dtype="handle"), sid_6, output, dtype="int32"))
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast(placeholder_4: T.handle, placeholder_5: T.handle, placeholder_6: T.handle, T_cast_2: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast", "tir.noalias": True})
placeholder_7 = T.match_buffer(placeholder_4, [360000], dtype="int16")
placeholder_8 = T.match_buffer(placeholder_5, [4096], dtype="int16")
placeholder_9 = T.match_buffer(placeholder_6, [64], dtype="int32")
T_cast_3 = T.match_buffer(T_cast_2, [360000], dtype="int16")
# body
PaddedInput = T.decl_buffer([360000], "int16")
for i0_i1_fused, i2, i3 in T.grid(75, 75, 64):
PaddedInput[i0_i1_fused * 4800 + i2 * 64 + i3] = placeholder_7[i0_i1_fused * 4800 + i2 * 64 + i3]
for ax0_ax1_fused_ax2_fused in T.serial(0, 5625):
Conv2dOutput = T.decl_buffer([64], "int32")
for ff in T.serial(0, 64):
Conv2dOutput[ff] = 0
for rc in T.serial(0, 64):
Conv2dOutput[ff] = Conv2dOutput[ff] + T.cast(PaddedInput[ax0_ax1_fused_ax2_fused * 64 + rc], "int32") * T.cast(placeholder_8[rc * 64 + ff], "int32")
for ax3_inner_1 in T.serial(0, 64):
T_cast_3[ax0_ax1_fused_ax2_fused * 64 + ax3_inner_1] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput[ax3_inner_1] + placeholder_9[ax3_inner_1], 1843106743, 31, -6, dtype="int32"), 255), 0), "uint8"), "int16")
__tvm_meta__ = None
# fmt: on
@pytest.mark.parametrize(
["algorithm", "workspace_size"],
[("greedy_by_size", 7920256), ("greedy_by_conflicts", 7200256), ("hill_climb", 7200256)],
)
def test_resnet_subgraph(algorithm, workspace_size):
target = Target("c")
global_workspace_pool = WorkspacePoolInfo(
"global_workspace",
[target],
)
tir_mod = ResnetStructure
tir_mod = _assign_targets_to_primfuncs_irmodule(tir_mod, target)
tir_mod = _assign_poolinfos_to_allocates_in_irmodule(tir_mod, [global_workspace_pool])
main_func = tir_mod["tvmgen_default_run_model"]
buffer_info_analysis = tvm.tir.usmp.analysis.extract_buffer_info(main_func, tir_mod)
assert buffer_info_analysis.memory_pressure == 7200256
fcreate_array_bi = tvm.get_global_func("tir.usmp.CreateArrayBufferInfo")
buffer_info_arr = fcreate_array_bi(buffer_info_analysis.buffer_info_stmts)
fusmp_algo = tvm.get_global_func(f"tir.usmp.algo.{algorithm}")
buffer_pool_allocations = fusmp_algo(buffer_info_arr, buffer_info_analysis.memory_pressure)
buffer_info_map_names = dict()
for buf_info in buffer_info_arr:
buffer_info_map_names[buf_info.name_hint] = buf_info
# check conflicts
_verify_conflicts(
"sid_7",
[
"PaddedInput_1",
"sid_2",
"Conv2dOutput_1",
"PaddedInput_2",
],
buffer_info_map_names,
)
_verify_conflicts(
"Conv2dOutput_3",
[
"PaddedInput_3",
"sid_6",
],
buffer_info_map_names,
)
_verify_conflicts(
"sid_6",
[
"Conv2dOutput_2",
"PaddedInput_2",
"sid_2",
"PaddedInput_3",
"Conv2dOutput_3",
],
buffer_info_map_names,
)
_verify_conflicts(
"Conv2dOutput",
[
"sid_8",
"sid_2",
"PaddedInput",
],
buffer_info_map_names,
)
_verify_conflicts(
"PaddedInput_3",
[
"sid_6",
"sid_2",
"Conv2dOutput_3",
],
buffer_info_map_names,
)
_verify_conflicts(
"Conv2dOutput_2",
[
"PaddedInput_2",
"sid_2",
"sid_6",
],
buffer_info_map_names,
)
_verify_conflicts(
"PaddedInput_1",
[
"sid_8",
"sid_2",
"sid_7",
"Conv2dOutput_1",
],
buffer_info_map_names,
)
_verify_conflicts(
"Conv2dOutput_1",
[
"sid_7",
"PaddedInput_1",
"sid_2",
],
buffer_info_map_names,
)
_verify_conflicts(
"PaddedInput",
[
"sid_2",
"sid_8",
"Conv2dOutput",
],
buffer_info_map_names,
)
_verify_conflicts(
"sid_8",
[
"PaddedInput",
"sid_2",
"Conv2dOutput",
"PaddedInput_1",
],
buffer_info_map_names,
)
_verify_conflicts(
"sid_2",
[
"PaddedInput",
"sid_8",
"Conv2dOutput",
"PaddedInput_1",
"sid_7",
"Conv2dOutput_1",
"PaddedInput_2",
"Conv2dOutput_2",
"sid_6",
"PaddedInput_3",
],
buffer_info_map_names,
)
_verify_conflicts(
"PaddedInput_2",
[
"sid_7",
"sid_2",
"Conv2dOutput_2",
"sid_6",
],
buffer_info_map_names,
)
_check_max_workspace_size(buffer_pool_allocations, global_workspace_pool, workspace_size)
def test_custom_algo():
target = Target("c")
global_workspace_pool = WorkspacePoolInfo(
"global_workspace",
[target],
)
tir_mod = ResnetStructure
tir_mod = _assign_targets_to_primfuncs_irmodule(tir_mod, target)
tir_mod = _assign_poolinfos_to_allocates_in_irmodule(tir_mod, [global_workspace_pool])
tir_mod = tir_mod.with_attr("executor", tvm.relay.backend.Executor("aot"))
tir_mod = tir_mod.with_attr("runtime", tvm.relay.backend.Runtime("crt"))
tir_mod["__tvm_main__"] = tir_mod[
"tvmgen_default_fused_cast_subtract_fixed_point_multiply_add_clip_cast_cast"
]
algo_called = False
@tvm.register_func("tir.usmp.algo.trivial")
def _trivial_algo(buf_infos, mem_pressure):
nonlocal algo_called
algo_called = True
out_layout = {}
offset = 0
for buf_info in buf_infos:
pool_info = buf_info.pool_candidates[0]
out_layout[buf_info] = usmp_utils.PoolAllocation(pool_info, offset)
offset += buf_info.size_bytes
return out_layout
usmp_pass = tvm.get_global_func("tir.transform.UnifiedStaticMemoryPlanner")
usmp_pass()(tir_mod)
assert not algo_called
with tvm.transform.PassContext(config={"tir.usmp.custom_algorithm": "trivial"}):
usmp_pass()(tir_mod)
assert algo_called
with pytest.raises(
tvm.TVMError, match="The selected custom USMP algorithm : invalid is not defined"
):
with tvm.transform.PassContext(config={"tir.usmp.custom_algorithm": "invalid"}):
usmp_pass()(tir_mod)
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_usmp_algo_hill_climb.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import pytest
import random
import tvm
import tvm.testing
from tvm.tir.usmp.utils import BufferInfo
from tvm import WorkspacePoolInfo, PoolInfoProperties
def _check_max_workspace_size(buffer_pool_allocations, pool_info, size, tolerance=0):
"""Helper to check maximum allocated memory size"""
max_workspace_size = 0
for buffer_info, pool_allocation in buffer_pool_allocations.items():
if pool_allocation.pool_info == pool_info:
size_candidate = pool_allocation.byte_offset + buffer_info.size_bytes
if size_candidate > max_workspace_size:
max_workspace_size = size_candidate
_diff = max_workspace_size.value - size
return (
(max_workspace_size.value == size if tolerance == 0 else tolerance > 100 * _diff / size),
"'{}': expected {} got {}, diff {:0.2f}% ({} bytes)".format(
pool_info.pool_name, size, max_workspace_size, 100 * _diff / size, _diff
),
)
def _verify_conflicts(buffer_info, pool_allocation, buffer_info_map):
"""Helper to check expected liveness conflicts"""
for conflict in buffer_info.conflicts:
conflict_pool_allocation = buffer_info_map[conflict]
if conflict_pool_allocation.pool_info == pool_allocation.pool_info:
assert conflict_pool_allocation.byte_offset != pool_allocation.byte_offset
l2 = max(
conflict_pool_allocation.byte_offset + conflict.size_bytes,
pool_allocation.byte_offset + buffer_info.size_bytes,
) - min(conflict_pool_allocation.byte_offset, pool_allocation.byte_offset)
assert (
conflict.size_bytes + buffer_info.size_bytes <= l2
), 'Conflicting: \n"{} @{}"\n"{} @{}"'.format(
conflict, conflict_pool_allocation, buffer_info, pool_allocation
)
def _verify_all_conflicts(buffer_pool_allocations):
"""Helper to verify liveness conflicts"""
for buffer_info, pool_allocation in buffer_pool_allocations.items():
_verify_conflicts(buffer_info, pool_allocation, buffer_pool_allocations)
def test_bounded(
random_len=150,
pools=[
WorkspacePoolInfo("default", [], PoolInfoProperties(65535)),
WorkspacePoolInfo("slow", []),
],
):
"""Tests two pools, one is bounded and one is not limited"""
random.seed(0)
mem_range = [BufferInfo(str(i), random.randrange(1, 65535), pools) for i in range(random_len)]
for mr in mem_range:
pr = random.choice(mem_range)
while pr in (*mr.conflicts, mr):
pr = random.choice(mem_range)
mr.set_conflicts([*mr.conflicts, pr])
pr.set_conflicts([*pr.conflicts, mr])
fusmp_algo = tvm.get_global_func("tir.usmp.algo.hill_climb")
result_map = fusmp_algo(mem_range, 0)
_verify_all_conflicts(result_map)
def __test_data_alloc_max():
"""Test data"""
intervals = [
(0, 159, 2048),
(0, 13, 7904),
(4, 35, 16),
(12, 17, 32768),
(16, 21, 32768),
]
return intervals
def __test_data_deep_speech():
"""Test data"""
intervals = [
(0, 159, 2048),
(0, 151, 2048),
(0, 13, 7904),
(2, 49, 16),
(4, 35, 16),
(6, 21, 16),
(12, 17, 32768),
(16, 21, 32768),
(20, 27, 32768),
(26, 31, 32768),
(30, 35, 32768),
(34, 41, 32768),
(40, 45, 32768),
(44, 49, 32768),
(48, 145, 32768),
(54, 59, 2048),
(58, 483, 4096),
(60, 65, 2048),
(64, 461, 4096),
(66, 71, 2048),
(70, 439, 4096),
(72, 77, 2048),
(76, 417, 4096),
(78, 83, 2048),
(82, 395, 4096),
(84, 89, 2048),
(88, 373, 4096),
(90, 95, 2048),
(94, 351, 4096),
(96, 101, 2048),
(100, 329, 4096),
(102, 107, 2048),
(106, 307, 4096),
(108, 113, 2048),
(112, 285, 4096),
(114, 119, 2048),
(118, 263, 4096),
(120, 125, 2048),
(124, 241, 4096),
(126, 131, 2048),
(130, 219, 4096),
(132, 137, 2048),
(136, 197, 4096),
(138, 143, 2048),
(142, 175, 4096),
(144, 149, 2048),
(148, 153, 4096),
(152, 163, 8192),
(154, 171, 2048),
(156, 181, 2048),
(160, 167, 2048),
(162, 165, 2048),
(168, 171, 2048),
(170, 509, 2048),
(174, 185, 8192),
(176, 193, 2048),
(178, 203, 2048),
(182, 189, 2048),
(184, 187, 2048),
(190, 193, 2048),
(192, 511, 2048),
(196, 207, 8192),
(198, 215, 2048),
(200, 225, 2048),
(204, 211, 2048),
(206, 209, 2048),
(212, 215, 2048),
(214, 513, 2048),
(218, 229, 8192),
(220, 237, 2048),
(222, 247, 2048),
(226, 233, 2048),
(228, 231, 2048),
(234, 237, 2048),
(236, 515, 2048),
(240, 251, 8192),
(242, 259, 2048),
(244, 269, 2048),
(248, 255, 2048),
(250, 253, 2048),
(256, 259, 2048),
(258, 517, 2048),
(262, 273, 8192),
(264, 281, 2048),
(266, 291, 2048),
(270, 277, 2048),
(272, 275, 2048),
(278, 281, 2048),
(280, 519, 2048),
(284, 295, 8192),
(286, 303, 2048),
(288, 313, 2048),
(292, 299, 2048),
(294, 297, 2048),
(300, 303, 2048),
(302, 521, 2048),
(306, 317, 8192),
(308, 325, 2048),
(310, 335, 2048),
(314, 321, 2048),
(316, 319, 2048),
(322, 325, 2048),
(324, 523, 2048),
(328, 339, 8192),
(330, 347, 2048),
(332, 357, 2048),
(336, 343, 2048),
(338, 341, 2048),
(344, 347, 2048),
(346, 525, 2048),
(350, 361, 8192),
(352, 369, 2048),
(354, 379, 2048),
(358, 365, 2048),
(360, 363, 2048),
(366, 369, 2048),
(368, 527, 2048),
(372, 383, 8192),
(374, 391, 2048),
(376, 401, 2048),
(380, 387, 2048),
(382, 385, 2048),
(388, 391, 2048),
(390, 529, 2048),
(394, 405, 8192),
(396, 413, 2048),
(398, 423, 2048),
(402, 409, 2048),
(404, 407, 2048),
(410, 413, 2048),
(412, 531, 2048),
(416, 427, 8192),
(418, 435, 2048),
(420, 445, 2048),
(424, 431, 2048),
(426, 429, 2048),
(432, 435, 2048),
(434, 533, 2048),
(438, 449, 8192),
(440, 457, 2048),
(442, 467, 2048),
(446, 453, 2048),
(448, 451, 2048),
(454, 457, 2048),
(456, 535, 2048),
(460, 471, 8192),
(462, 479, 2048),
(464, 489, 2048),
(468, 475, 2048),
(470, 473, 2048),
(476, 479, 2048),
(478, 537, 2048),
(482, 493, 8192),
(484, 501, 2048),
(486, 497, 2048),
(490, 497, 2048),
(492, 495, 2048),
(496, 626, 2048),
(498, 501, 2048),
(500, 626, 2048),
(504, 549, 16),
(508, 543, 32768),
(542, 549, 32768),
(548, 555, 32768),
(554, 563, 464),
(560, 563, 256),
(562, 617, 2048),
(564, 567, 1856),
(566, 573, 1024),
(568, 619, 1024),
(570, 573, 1024),
(572, 577, 1024),
(576, 579, 1024),
(578, 605, 1024),
(580, 593, 1024),
(584, 587, 1024),
(586, 603, 1024),
(594, 597, 1024),
(596, 613, 1024),
(604, 607, 1024),
(606, 617, 1024),
(616, 621, 2048),
(618, 621, 1024),
(620, 626, 464),
]
return intervals
def __test_data_five():
"""Test data"""
return [
(4, 5, 95),
(1, 4, 52135),
(3, 4, 12136),
(3, 5, 62099),
(4, 5, 50458),
]
def __test_data_simple():
"""Test data"""
return [
(0, 23, 131072), # 0
(4, 5, 65568), # 1
(4, 9, 8192), # 2
(8, 30, 15360), # 3
(10, 11, 65568), # 4
(10, 15, 4096), # 5
(16, 17, 65552), # 6
(16, 21, 2048), # 7
(22, 23, 32784), # 8
(22, 27, 1024), # 9
]
def find_maximum_from_intervals(intervals):
"""Expected list of intervals of (start, end, size)"""
sorted_list = sorted(intervals, key=lambda _: _[0])
max_mem = 0
for t in range(sorted_list[0][0], sorted_list[-1][1] + 1):
max_mem = max(
max_mem, sum([size for (start, end, size) in sorted_list if t >= start and t <= end])
)
return max_mem
@pytest.mark.parametrize(
"intervals",
[__test_data_alloc_max(), __test_data_simple(), __test_data_deep_speech(), __test_data_five()],
)
def test_intervals(intervals):
"""Tests supplied intervals"""
random.seed(0)
result = run_intervals(intervals, 5)
assert result["tir.usmp.algo.hill_climb"] == True, f" {result}"
def generate_range(sz, max_segment_sz=65535):
"""Helper func to generate list of size sz of ranges of random size max_segment_sz"""
for i in range(0, sz):
start = random.randrange(i, sz)
stop = random.randrange(start + 1, start + 2 + ((sz - start) // 2))
assert stop - start > 0
yield (start, stop, random.randrange(1, max_segment_sz))
def test_random_intervals(interval_len=16):
"""Tests randomly generated interval of length interval_len"""
random.seed(0)
intervals = list(generate_range(interval_len))
return run_intervals(intervals)
def run_intervals(intervals, tolerance=0):
"""Helper to run intervals"""
expected_mem = find_maximum_from_intervals(intervals)
pools = [WorkspacePoolInfo("default", [])]
buffers = []
# populate
for i, (start, stop, size) in enumerate(intervals):
buf = BufferInfo(str(i), size, pools)
# buf.set_pool_candidates( ["default"] )
buffers.append(buf)
# intersect
for i, (i_start, i_stop, _) in enumerate(intervals):
conflicts = set()
for j, (j_start, j_stop, _) in enumerate(intervals):
start = min(i_start, j_start)
stop = max(i_stop, j_stop)
i_dur = i_stop - i_start + 1
j_dur = j_stop - j_start + 1
if i != j and (stop - start + 1 < i_dur + j_dur):
conflicts.add(buffers[j])
buffers[i].set_conflicts([c for c in sorted(conflicts, key=lambda c: c.name_hint)])
result = {}
for (alg, params) in [
("tir.usmp.algo.hill_climb", (expected_mem,)),
("tir.usmp.algo.greedy_by_size", (expected_mem,)),
]:
fusmp_algo = tvm.get_global_func(alg)
print("\n", "started", alg)
buffer_info_arr = fusmp_algo(buffers, *params)
print()
_verify_all_conflicts(buffer_info_arr)
result[alg], msg = _check_max_workspace_size(
buffer_info_arr, pools[0], expected_mem, tolerance
)
if not result[alg]:
print(alg, msg)
return result
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_usmp_analysis_extract_bufferinfo.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import sys
import tvm
from tvm import tir, script
from tvm.ir import Range
from tvm.script import tir as T
from tvm.tir import stmt_functor
from tvm.tir import PrimFunc
from tvm.tir.usmp import utils as usmp_utils
from tvm.target import Target
from tvm import WorkspacePoolInfo, ConstantPoolInfo
def _replace_stmt_with_buf_var_names(buffer_info_map):
"""helper to replace tir.allocates with buffer names"""
new_buffer_info_map = dict()
for k, v in buffer_info_map.items():
new_buffer_info_map[k.name_hint] = k
return new_buffer_info_map
def _verify_conflicts(main_buf_name, conflicting_buf_names, buffer_info_map):
"""helper to check expected liveness conflicts"""
buf_info = buffer_info_map[main_buf_name]
for conflict in buf_info.conflicts:
assert conflict.name_hint in conflicting_buf_names
def _get_allocates(primfunc):
"""helper to extract all allocate nodes by name"""
allocates = dict()
def get_allocate(stmt):
if isinstance(stmt, tvm.tir.Allocate):
allocates[str(stmt.buffer_var.name)] = stmt
stmt_functor.post_order_visit(primfunc.body, get_allocate)
return allocates
def _assign_poolinfos_to_allocates_in_primfunc(primfunc, pool_infos, constant_pool_infos):
"""helper to assing poolinfos to allocate nodes in a tir.PrimFunc"""
def set_poolinfos(stmt):
if isinstance(stmt, tvm.tir.Allocate):
return tvm.tir.Allocate(
buffer_var=stmt.buffer_var,
dtype=stmt.dtype,
extents=stmt.extents,
condition=stmt.condition,
body=stmt.body,
annotations={tvm.tir.usmp.utils.CANDIDATE_MEMORY_POOL_ATTR: pool_infos},
)
elif isinstance(stmt, tvm.tir.AllocateConst):
return tvm.tir.AllocateConst(
buffer_var=stmt.buffer_var,
dtype=stmt.dtype,
extents=stmt.extents,
data_or_idx=stmt.data,
body=stmt.body,
annotations={tvm.tir.usmp.utils.CANDIDATE_MEMORY_POOL_ATTR: constant_pool_infos},
)
return primfunc.with_body(stmt_functor.ir_transform(primfunc.body, None, set_poolinfos))
def _assign_poolinfos_to_allocates_in_irmodule(mod, pool_infos, constant_pool_infos=None):
"""helper to assign poolinfos to allocate nodes in a IRModule"""
ret = tvm.IRModule()
for global_var, basefunc in mod.functions.items():
if isinstance(basefunc, tvm.tir.PrimFunc):
ret[global_var] = _assign_poolinfos_to_allocates_in_primfunc(
basefunc, pool_infos, constant_pool_infos
)
return ret
def _assign_targets_to_primfuncs_irmodule(mod, target):
"""helper to assign target for PrimFunc in a IRModule"""
ret = tvm.IRModule()
for global_var, basefunc in mod.functions.items():
if isinstance(basefunc, tvm.tir.PrimFunc):
ret[global_var] = basefunc.with_attr("target", target)
return ret
# These are test IRModules that contains varied topologies of operator graphs
# that includes a main TIR function that includes call to such operators.
# fmt: off
@tvm.script.ir_module
class LinearStructure:
@T.prim_func
def tvmgen_default_fused_cast_subtract(placeholder_2: T.handle, placeholder_3: T.handle, T_subtract: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_cast_subtract", "tir.noalias": True})
placeholder_4 = T.match_buffer(placeholder_2, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
placeholder_5 = T.match_buffer(placeholder_3, [1], dtype="int16", elem_offset=0, align=64, offset_factor=1)
T_subtract_1 = T.match_buffer(T_subtract, [452], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
for ax0_ax1_fused_1 in T.serial(0, 224):
for ax2_1, ax3_inner_1 in T.grid(224, 3):
T_subtract_1[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)] = (T.cast(placeholder_4[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)], "int16") - placeholder_5[0])
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast(placeholder_62: T.handle, placeholder_63: T.handle, placeholder_64: T.handle, T_cast_20: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast", "tir.noalias": True})
placeholder_65 = T.match_buffer(placeholder_62, [150528], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_66 = T.match_buffer(placeholder_63, [9408], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_67 = T.match_buffer(placeholder_64, [64], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_21 = T.match_buffer(T_cast_20, [289], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_7 = T.decl_buffer([157323], "int16")
for i0_i1_fused_7 in T.serial(0, 229):
for i2_7, i3_7 in T.grid(229, 3):
PaddedInput_7[(((i0_i1_fused_7*687) + (i2_7*3)) + i3_7)] = T.if_then_else(((((2 <= i0_i1_fused_7) and (i0_i1_fused_7 < 226)) and (2 <= i2_7)) and (i2_7 < 226)), placeholder_65[((((i0_i1_fused_7*672) + (i2_7*3)) + i3_7) - 1350)], T.int16(0), dtype="int16")
for ax0_ax1_fused_ax2_fused_7 in T.serial(0, 12544):
Conv2dOutput_7 = T.decl_buffer([64], "int32")
for ff_3 in T.serial(0, 64):
Conv2dOutput_7[ff_3] = 0
for ry_2, rx_2, rc_7 in T.grid(7, 7, 3):
Conv2dOutput_7[ff_3] = (Conv2dOutput_7[ff_3] + (T.cast(PaddedInput_7[(((((T.floordiv(ax0_ax1_fused_ax2_fused_7, 112)*1374) + (ry_2*687)) + (T.floormod(ax0_ax1_fused_ax2_fused_7, 112)*6)) + (rx_2*3)) + rc_7)], "int32")*T.cast(placeholder_66[((((ry_2*1344) + (rx_2*192)) + (rc_7*64)) + ff_3)], "int32")))
for ax3_inner_7 in T.serial(0, 64):
T_cast_21[((ax0_ax1_fused_ax2_fused_7*64) + ax3_inner_7)] = T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_7[ax3_inner_7] + placeholder_67[ax3_inner_7]), 1939887962, 31, -9, dtype="int32"), 255), 0), "uint8")
@T.prim_func
def tvmgen_default_fused_nn_max_pool2d_cast(placeholder_28: T.handle, T_cast_6: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_max_pool2d_cast", "tir.noalias": True})
placeholder_29 = T.match_buffer(placeholder_28, [802816], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
T_cast_7 = T.match_buffer(T_cast_6, [177], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
tensor_2 = T.decl_buffer([200704], "uint8")
for ax0_ax1_fused_4 in T.serial(0, 56):
for ax2_4 in T.serial(0, 56):
for ax3_init in T.serial(0, 64):
tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_init)] = T.uint8(0)
for rv0_rv1_fused_1, ax3_2 in T.grid(9, 64):
tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_2)] = T.max(tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_2)], T.if_then_else(((((ax0_ax1_fused_4*2) + T.floordiv(rv0_rv1_fused_1, 3)) < 112) and (((ax2_4*2) + T.floormod(rv0_rv1_fused_1, 3)) < 112)), placeholder_29[(((((ax0_ax1_fused_4*14336) + (T.floordiv(rv0_rv1_fused_1, 3)*7168)) + (ax2_4*128)) + (T.floormod(rv0_rv1_fused_1, 3)*64)) + ax3_2)], T.uint8(0), dtype="uint8"))
for ax0_ax1_fused_5 in T.serial(0, 56):
for ax2_5, ax3_3 in T.grid(56, 64):
T_cast_7[(((ax0_ax1_fused_5*3584) + (ax2_5*64)) + ax3_3)] = T.cast(tensor_2[(((ax0_ax1_fused_5*3584) + (ax2_5*64)) + ax3_3)], "int16")
@T.prim_func
def run_model(input: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_run_model", "runner_function": True})
# body
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 1)
sid_9 = T.allocate([301056], "int8", "global")
sid_8 = T.allocate([802816], "int8", "global")
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract", input, T.lookup_param("p0", dtype="handle"), sid_9, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast", sid_9, T.lookup_param("p1", dtype="handle"), T.lookup_param("p2", dtype="handle"), sid_8, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_max_pool2d_cast", sid_8, output, dtype="int32"))
__tvm_meta__ = None
# fmt: on
def test_linear():
target = Target("c")
fast_memory_pool = WorkspacePoolInfo(pool_name="fast_memory", targets=[target])
slow_memory_pool = WorkspacePoolInfo(pool_name="slow_memory", targets=[target])
tir_mod = LinearStructure
tir_mod = _assign_targets_to_primfuncs_irmodule(tir_mod, target)
tir_mod = _assign_poolinfos_to_allocates_in_irmodule(
tir_mod, [fast_memory_pool, slow_memory_pool]
)
buffer_info_analysis = tvm.tir.usmp.analysis.extract_buffer_info(tir_mod["run_model"], tir_mod)
assert buffer_info_analysis.memory_pressure == 1117718
buffer_info_map = _replace_stmt_with_buf_var_names(buffer_info_analysis.buffer_info_stmts)
# check conflicts
_verify_conflicts("PaddedInput_7", ["sid_9", "sid_8", "Conv2dOutput_7"], buffer_info_map)
_verify_conflicts("tensor_2", ["sid_8"], buffer_info_map)
_verify_conflicts("sid_9", ["PaddedInput_7"], buffer_info_map)
_verify_conflicts("sid_8", ["PaddedInput_7", "Conv2dOutput_7", "tensor_2"], buffer_info_map)
_verify_conflicts("Conv2dOutput_7", ["sid_8", "PaddedInput_7"], buffer_info_map)
# check sizes
assert buffer_info_map["sid_8"].size_bytes == 802816
assert buffer_info_map["Conv2dOutput_7"].size_bytes == 256
assert buffer_info_map["PaddedInput_7"].size_bytes == 314646
assert buffer_info_map["tensor_2"].size_bytes == 200704
assert buffer_info_map["sid_9"].size_bytes == 301056
# check_pool_candidates
assert [
pool_info.pool_name for pool_info in list(buffer_info_map["sid_8"].pool_candidates)
] == ["fast_memory", "slow_memory"]
# fmt: off
@tvm.script.ir_module
class ParallelSerialMixedForLoops:
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_1(placeholder_68: T.handle, placeholder_69: T.handle, placeholder_70: T.handle, T_cast_22: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_1", "tir.noalias": True})
placeholder_71 = T.match_buffer(placeholder_68, [200704], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_72 = T.match_buffer(placeholder_69, [110592], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_73 = T.match_buffer(placeholder_70, [192], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_23 = T.match_buffer(T_cast_22, [305], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_8 = T.decl_buffer([215296], "int16")
for i0_i1_fused_8 in T.serial(0, 58):
for i2_8, i3_8 in T.grid(58, 64):
PaddedInput_8[(((i0_i1_fused_8*3712) + (i2_8*64)) + i3_8)] = T.if_then_else(((((1 <= i0_i1_fused_8) and (i0_i1_fused_8 < 57)) and (1 <= i2_8)) and (i2_8 < 57)), placeholder_71[((((i0_i1_fused_8*3584) + (i2_8*64)) + i3_8) - 3648)], T.int16(0), dtype="int16")
for ax0_ax1_fused_ax2_fused_8 in T.parallel(0, 3136):
dummy_allocate = T.decl_buffer([1], "int32")
for ax3_outer_4 in T.serial(0, 3):
Conv2dOutput_8 = T.decl_buffer([64], "int32")
for ff_4 in T.serial(0, 64):
Conv2dOutput_8[ff_4] = 0
for ry_3, rx_3, rc_8 in T.grid(3, 3, 64):
Conv2dOutput_8[ff_4] = (Conv2dOutput_8[ff_4] + (T.cast(PaddedInput_8[(((((T.floordiv(ax0_ax1_fused_ax2_fused_8, 56)*3712) + (ry_3*3712)) + (rx_3*64)) + (T.floormod(ax0_ax1_fused_ax2_fused_8, 56)*64)) + rc_8)], "int32")*T.cast(placeholder_72[(((((ry_3*36864) + (rx_3*12288)) + (rc_8*192)) + (ax3_outer_4*64)) + ff_4)], "int32")))
for ax3_inner_8 in T.serial(0, 64):
T_cast_23[(((ax0_ax1_fused_ax2_fused_8*192) + (ax3_outer_4*64)) + ax3_inner_8)] = T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_8[ax3_inner_8] + placeholder_73[((ax3_outer_4*64) + ax3_inner_8)]), 1139793473, 31, -6, dtype="int32"), 255), 0), "uint8")
@T.prim_func
def run_model(input: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_run_model", "runner_function": True})
# body
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 1)
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_1", input, T.lookup_param("p5", dtype="handle"), T.lookup_param("p6", dtype="handle"), output, dtype="int32"))
__tvm_meta__ = None
# fmt: on
# fmt: off
@tvm.script.ir_module
class AllSerialForLoops:
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_1(placeholder_68: T.handle, placeholder_69: T.handle, placeholder_70: T.handle, T_cast_22: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_1", "tir.noalias": True})
placeholder_71 = T.match_buffer(placeholder_68, [200704], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_72 = T.match_buffer(placeholder_69, [110592], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_73 = T.match_buffer(placeholder_70, [192], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_23 = T.match_buffer(T_cast_22, [305], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_8 = T.decl_buffer([215296], "int16")
for i0_i1_fused_8 in T.serial(0, 58):
for i2_8, i3_8 in T.grid(58, 64):
PaddedInput_8[(((i0_i1_fused_8*3712) + (i2_8*64)) + i3_8)] = T.if_then_else(((((1 <= i0_i1_fused_8) and (i0_i1_fused_8 < 57)) and (1 <= i2_8)) and (i2_8 < 57)), placeholder_71[((((i0_i1_fused_8*3584) + (i2_8*64)) + i3_8) - 3648)], T.int16(0), dtype="int16")
for ax0_ax1_fused_ax2_fused_8 in T.serial(0, 3136):
dummy_allocate = T.decl_buffer([1], "int32")
for ax3_outer_4 in T.serial(0, 3):
Conv2dOutput_8 = T.decl_buffer([64], "int32")
for ff_4 in T.serial(0, 64):
Conv2dOutput_8[ff_4] = 0
for ry_3, rx_3, rc_8 in T.grid(3, 3, 64):
Conv2dOutput_8[ff_4] = (Conv2dOutput_8[ff_4] + (T.cast(PaddedInput_8[(((((T.floordiv(ax0_ax1_fused_ax2_fused_8, 56)*3712) + (ry_3*3712)) + (rx_3*64)) + (T.floormod(ax0_ax1_fused_ax2_fused_8, 56)*64)) + rc_8)], "int32")*T.cast(placeholder_72[(((((ry_3*36864) + (rx_3*12288)) + (rc_8*192)) + (ax3_outer_4*64)) + ff_4)], "int32")))
for ax3_inner_8 in T.serial(0, 64):
T_cast_23[(((ax0_ax1_fused_ax2_fused_8*192) + (ax3_outer_4*64)) + ax3_inner_8)] = T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_8[ax3_inner_8] + placeholder_73[((ax3_outer_4*64) + ax3_inner_8)]), 1139793473, 31, -6, dtype="int32"), 255), 0), "uint8")
@T.prim_func
def run_model(input: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_run_model", "runner_function": True})
# body
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 1)
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_1", input, T.lookup_param("p5", dtype="handle"), T.lookup_param("p6", dtype="handle"), output, dtype="int32"))
__tvm_meta__ = None
# fmt: on
def test_parallel_serial_mixed_for_loops():
target = Target("c")
global_ws_pool = WorkspacePoolInfo(
pool_name="global_workspace",
targets=[target],
)
all_serial_tir_mod = AllSerialForLoops
all_serial_tir_mod = _assign_targets_to_primfuncs_irmodule(all_serial_tir_mod, target)
all_serial_tir_mod = _assign_poolinfos_to_allocates_in_irmodule(
all_serial_tir_mod, [global_ws_pool]
)
main_func = all_serial_tir_mod["run_model"]
buffer_info_analysis = tvm.tir.usmp.analysis.extract_buffer_info(main_func, all_serial_tir_mod)
assert buffer_info_analysis.memory_pressure == 430848
buffer_info_map = _replace_stmt_with_buf_var_names(buffer_info_analysis.buffer_info_stmts)
# When all loops are serial all allocates are touched by USMP
assert len(buffer_info_map) == 3
for name, _ in buffer_info_map.items():
assert name in ["dummy_allocate", "Conv2dOutput_8", "PaddedInput_8"]
parallel_serial_mixed_tir_mod = ParallelSerialMixedForLoops
parallel_serial_mixed_tir_mod = _assign_targets_to_primfuncs_irmodule(
parallel_serial_mixed_tir_mod, target
)
parallel_serial_mixed_tir_mod = _assign_poolinfos_to_allocates_in_irmodule(
parallel_serial_mixed_tir_mod, [global_ws_pool]
)
main_func = parallel_serial_mixed_tir_mod["run_model"]
buffer_info_analysis = tvm.tir.usmp.analysis.extract_buffer_info(
main_func, parallel_serial_mixed_tir_mod
)
assert buffer_info_analysis.memory_pressure == 430848
buffer_info_map = _replace_stmt_with_buf_var_names(buffer_info_analysis.buffer_info_stmts)
# USMP will not touch (yet) the allocates inside parallel for loops
assert len(buffer_info_map) == 2
for name, _ in buffer_info_map.items():
assert name in ["Conv2dOutput_8", "PaddedInput_8"]
# fmt: off
@tvm.script.ir_module
class InceptionStructure:
@T.prim_func
def tvmgen_default_fused_nn_max_pool2d(placeholder: T.handle, tensor: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_max_pool2d", "tir.noalias": True})
placeholder_1 = T.match_buffer(placeholder, [602112], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
tensor_1 = T.match_buffer(tensor, [249], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
# body
for ax0_ax1_fused in T.serial(0, 28):
for ax2 in T.serial(0, 28):
for ax3_outer_init, ax3_inner_init in T.grid(3, 64):
tensor_1[((((ax0_ax1_fused*5376) + (ax2*192)) + (ax3_outer_init*64)) + ax3_inner_init)] = T.uint8(0)
for rv0_rv1_fused, ax3_outer, ax3_inner in T.grid(9, 3, 64):
tensor_1[((((ax0_ax1_fused*5376) + (ax2*192)) + (ax3_outer*64)) + ax3_inner)] = T.max(tensor_1[((((ax0_ax1_fused*5376) + (ax2*192)) + (ax3_outer*64)) + ax3_inner)], T.if_then_else(((((ax0_ax1_fused*2) + T.floordiv(rv0_rv1_fused, 3)) < 56) and (((ax2*2) + T.floormod(rv0_rv1_fused, 3)) < 56)), placeholder_1[((((((ax0_ax1_fused*21504) + (T.floordiv(rv0_rv1_fused, 3)*10752)) + (ax2*384)) + (T.floormod(rv0_rv1_fused, 3)*192)) + (ax3_outer*64)) + ax3_inner)], T.uint8(0), dtype="uint8"))
@T.prim_func
def tvmgen_default_fused_cast_subtract(placeholder_2: T.handle, placeholder_3: T.handle, T_subtract: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_cast_subtract", "tir.noalias": True})
placeholder_4 = T.match_buffer(placeholder_2, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
placeholder_5 = T.match_buffer(placeholder_3, [1], dtype="int16", elem_offset=0, align=64, offset_factor=1)
T_subtract_1 = T.match_buffer(T_subtract, [452], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
for ax0_ax1_fused_1 in T.serial(0, 224):
for ax2_1, ax3_inner_1 in T.grid(224, 3):
T_subtract_1[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)] = (T.cast(placeholder_4[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)], "int16") - placeholder_5[0])
@T.prim_func
def tvmgen_default_fused_cast(placeholder_6: T.handle, T_cast: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_cast", "tir.noalias": True})
placeholder_7 = T.match_buffer(placeholder_6, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
T_cast_1 = T.match_buffer(T_cast, [249], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
for ax0_ax1_fused_2 in T.serial(0, 28):
for ax2_2, ax3_outer_1, ax3_inner_2 in T.grid(28, 12, 16):
T_cast_1[((((ax0_ax1_fused_2*5376) + (ax2_2*192)) + (ax3_outer_1*16)) + ax3_inner_2)] = T.cast(placeholder_7[((((ax0_ax1_fused_2*5376) + (ax2_2*192)) + (ax3_outer_1*16)) + ax3_inner_2)], "int16")
@T.prim_func
def tvmgen_default_fused_concatenate(placeholder_8: T.handle, placeholder_9: T.handle, placeholder_10: T.handle, placeholder_11: T.handle, T_concat: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_concatenate", "tir.noalias": True})
placeholder_12 = T.match_buffer(placeholder_8, [50176], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
T_concat_1 = T.match_buffer(T_concat, [313], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
placeholder_13 = T.match_buffer(placeholder_9, [100352], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
placeholder_14 = T.match_buffer(placeholder_11, [25088], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
placeholder_15 = T.match_buffer(placeholder_10, [25088], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
# body
for ax0_ax1_fused_3 in T.serial(0, 28):
for ax2_3, ax3 in T.grid(28, 256):
T_concat_1[(((ax0_ax1_fused_3*7168) + (ax2_3*256)) + ax3)] = T.if_then_else((224 <= ax3), placeholder_14[((((ax0_ax1_fused_3*896) + (ax2_3*32)) + ax3) - 224)], T.if_then_else((192 <= ax3), placeholder_15[((((ax0_ax1_fused_3*896) + (ax2_3*32)) + ax3) - 192)], T.if_then_else((64 <= ax3), placeholder_13[((((ax0_ax1_fused_3*3584) + (ax2_3*128)) + ax3) - 64)], placeholder_12[(((ax0_ax1_fused_3*1792) + (ax2_3*64)) + ax3)], dtype="uint8"), dtype="uint8"), dtype="uint8")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast(placeholder_16: T.handle, placeholder_17: T.handle, placeholder_18: T.handle, T_cast_2: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast", "tir.noalias": True})
placeholder_19 = T.match_buffer(placeholder_16, [200704], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_20 = T.match_buffer(placeholder_17, [4096], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_21 = T.match_buffer(placeholder_18, [64], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_3 = T.match_buffer(T_cast_2, [177], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput = T.decl_buffer([200704], "int16")
for i0_i1_fused in T.serial(0, 56):
for i2, i3 in T.grid(56, 64):
PaddedInput[(((i0_i1_fused*3584) + (i2*64)) + i3)] = placeholder_19[(((i0_i1_fused*3584) + (i2*64)) + i3)]
for ax0_ax1_fused_ax2_fused in T.serial(0, 3136):
Conv2dOutput = T.decl_buffer([64], "int32")
for ff in T.serial(0, 64):
Conv2dOutput[ff] = 0
for rc in T.serial(0, 64):
Conv2dOutput[ff] = (Conv2dOutput[ff] + (T.cast(PaddedInput[((ax0_ax1_fused_ax2_fused*64) + rc)], "int32")*T.cast(placeholder_20[((rc*64) + ff)], "int32")))
for ax3_inner_3 in T.serial(0, 64):
T_cast_3[((ax0_ax1_fused_ax2_fused*64) + ax3_inner_3)] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput[ax3_inner_3] + placeholder_21[ax3_inner_3]), 1191576922, 31, -4, dtype="int32"), 255), 0), "uint8"), "int16")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_1(placeholder_22: T.handle, placeholder_23: T.handle, placeholder_24: T.handle, T_cast_4: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_1", "tir.noalias": True})
placeholder_25 = T.match_buffer(placeholder_22, [150528], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_26 = T.match_buffer(placeholder_23, [18432], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_27 = T.match_buffer(placeholder_24, [96], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_5 = T.match_buffer(T_cast_4, [153], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_1 = T.decl_buffer([150528], "int16")
for i0_i1_fused_1 in T.serial(0, 28):
for i2_1, i3_1 in T.grid(28, 192):
PaddedInput_1[(((i0_i1_fused_1*5376) + (i2_1*192)) + i3_1)] = placeholder_25[(((i0_i1_fused_1*5376) + (i2_1*192)) + i3_1)]
for ax0_ax1_fused_ax2_fused_1 in T.serial(0, 784):
Conv2dOutput_1 = T.decl_buffer([1], "int32")
for ax3_1 in T.serial(0, 96):
Conv2dOutput_1[0] = 0
for rc_1 in T.serial(0, 192):
Conv2dOutput_1[0] = (Conv2dOutput_1[0] + (T.cast(PaddedInput_1[((ax0_ax1_fused_ax2_fused_1*192) + rc_1)], "int32")*T.cast(placeholder_26[((rc_1*96) + ax3_1)], "int32")))
T_cast_5[((ax0_ax1_fused_ax2_fused_1*96) + ax3_1)] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_1[0] + placeholder_27[ax3_1]), 1201322342, 31, -6, dtype="int32"), 255), 0), "uint8"), "int16")
@T.prim_func
def tvmgen_default_fused_nn_max_pool2d_cast(placeholder_28: T.handle, T_cast_6: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_max_pool2d_cast", "tir.noalias": True})
placeholder_29 = T.match_buffer(placeholder_28, [802816], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
T_cast_7 = T.match_buffer(T_cast_6, [177], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
tensor_2 = T.decl_buffer([200704], "uint8")
for ax0_ax1_fused_4 in T.serial(0, 56):
for ax2_4 in T.serial(0, 56):
for ax3_init in T.serial(0, 64):
tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_init)] = T.uint8(0)
for rv0_rv1_fused_1, ax3_2 in T.grid(9, 64):
tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_2)] = T.max(tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_2)], T.if_then_else(((((ax0_ax1_fused_4*2) + T.floordiv(rv0_rv1_fused_1, 3)) < 112) and (((ax2_4*2) + T.floormod(rv0_rv1_fused_1, 3)) < 112)), placeholder_29[(((((ax0_ax1_fused_4*14336) + (T.floordiv(rv0_rv1_fused_1, 3)*7168)) + (ax2_4*128)) + (T.floormod(rv0_rv1_fused_1, 3)*64)) + ax3_2)], T.uint8(0), dtype="uint8"))
for ax0_ax1_fused_5 in T.serial(0, 56):
for ax2_5, ax3_3 in T.grid(56, 64):
T_cast_7[(((ax0_ax1_fused_5*3584) + (ax2_5*64)) + ax3_3)] = T.cast(tensor_2[(((ax0_ax1_fused_5*3584) + (ax2_5*64)) + ax3_3)], "int16")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_2(placeholder_30: T.handle, placeholder_31: T.handle, placeholder_32: T.handle, T_cast_8: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_2", "tir.noalias": True})
placeholder_33 = T.match_buffer(placeholder_30, [150528], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_34 = T.match_buffer(placeholder_31, [12288], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_35 = T.match_buffer(placeholder_32, [64], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_9 = T.match_buffer(T_cast_8, [121], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_2 = T.decl_buffer([150528], "int16")
for i0_i1_fused_2 in T.serial(0, 28):
for i2_2, i3_2 in T.grid(28, 192):
PaddedInput_2[(((i0_i1_fused_2*5376) + (i2_2*192)) + i3_2)] = placeholder_33[(((i0_i1_fused_2*5376) + (i2_2*192)) + i3_2)]
for ax0_ax1_fused_ax2_fused_2 in T.serial(0, 784):
Conv2dOutput_2 = T.decl_buffer([64], "int32")
for ff_1 in T.serial(0, 64):
Conv2dOutput_2[ff_1] = 0
for rc_2 in T.serial(0, 192):
Conv2dOutput_2[ff_1] = (Conv2dOutput_2[ff_1] + (T.cast(PaddedInput_2[((ax0_ax1_fused_ax2_fused_2*192) + rc_2)], "int32")*T.cast(placeholder_34[((rc_2*64) + ff_1)], "int32")))
for ax3_inner_4 in T.serial(0, 64):
T_cast_9[((ax0_ax1_fused_ax2_fused_2*64) + ax3_inner_4)] = T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_2[ax3_inner_4] + placeholder_35[ax3_inner_4]), 1663316467, 31, -7, dtype="int32"), 255), 0), "uint8")
@T.prim_func
def tvmgen_default_fused_nn_max_pool2d_cast_1(placeholder_36: T.handle, T_cast_10: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_max_pool2d_cast_1", "tir.noalias": True})
placeholder_37 = T.match_buffer(placeholder_36, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
T_cast_11 = T.match_buffer(T_cast_10, [249], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
tensor_3 = T.decl_buffer([150528], "uint8")
for ax0_ax1_fused_6 in T.serial(0, 28):
for ax2_6 in T.serial(0, 28):
for ax3_outer_init_1, ax3_inner_init_1 in T.grid(3, 64):
tensor_3[((((ax0_ax1_fused_6*5376) + (ax2_6*192)) + (ax3_outer_init_1*64)) + ax3_inner_init_1)] = T.uint8(0)
for rv0_rv1_fused_2, ax3_outer_2, ax3_inner_5 in T.grid(9, 3, 64):
tensor_3[((((ax0_ax1_fused_6*5376) + (ax2_6*192)) + (ax3_outer_2*64)) + ax3_inner_5)] = T.max(tensor_3[((((ax0_ax1_fused_6*5376) + (ax2_6*192)) + (ax3_outer_2*64)) + ax3_inner_5)], T.if_then_else(((((1 <= (T.floordiv(rv0_rv1_fused_2, 3) + ax0_ax1_fused_6)) and ((T.floordiv(rv0_rv1_fused_2, 3) + ax0_ax1_fused_6) < 29)) and (1 <= (ax2_6 + T.floormod(rv0_rv1_fused_2, 3)))) and ((ax2_6 + T.floormod(rv0_rv1_fused_2, 3)) < 29)), placeholder_37[(((((((T.floordiv(rv0_rv1_fused_2, 3)*5376) + (ax0_ax1_fused_6*5376)) + (ax2_6*192)) + (T.floormod(rv0_rv1_fused_2, 3)*192)) + (ax3_outer_2*64)) + ax3_inner_5) - 5568)], T.uint8(0), dtype="uint8"))
for ax0_ax1_fused_7 in T.serial(0, 28):
for ax2_7, ax3_4 in T.grid(28, 192):
T_cast_11[(((ax0_ax1_fused_7*5376) + (ax2_7*192)) + ax3_4)] = T.cast(tensor_3[(((ax0_ax1_fused_7*5376) + (ax2_7*192)) + ax3_4)], "int16")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_fixed_point_multiply_cli_4464294615199028320__2(placeholder_38: T.handle, placeholder_39: T.handle, placeholder_40: T.handle, T_cast_12: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_fixed_point_multiply_cli_4464294615199028320__2", "tir.noalias": True})
placeholder_41 = T.match_buffer(placeholder_38, [150528], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_42 = T.match_buffer(placeholder_39, [6144], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_43 = T.match_buffer(placeholder_40, [32], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_13 = T.match_buffer(T_cast_12, [89], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_3 = T.decl_buffer([150528], "int16")
for i0_i1_fused_3 in T.serial(0, 28):
for i2_3, i3_3 in T.grid(28, 192):
PaddedInput_3[(((i0_i1_fused_3*5376) + (i2_3*192)) + i3_3)] = placeholder_41[(((i0_i1_fused_3*5376) + (i2_3*192)) + i3_3)]
for ax0_ax1_fused_ax2_fused_3 in T.serial(0, 784):
Conv2dOutput_3 = T.decl_buffer([1], "int32")
for ax3_5 in T.serial(0, 32):
Conv2dOutput_3[0] = 0
for rc_3 in T.serial(0, 192):
Conv2dOutput_3[0] = (Conv2dOutput_3[0] + (T.cast(PaddedInput_3[((ax0_ax1_fused_ax2_fused_3*192) + rc_3)], "int32")*T.cast(placeholder_42[((rc_3*32) + ax3_5)], "int32")))
T_cast_13[((ax0_ax1_fused_ax2_fused_3*32) + ax3_5)] = T.cast(T.max(T.min(T.q_multiply_shift(T.cast(T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_3[0] + placeholder_43[ax3_5]), 1811141736, 31, -6, dtype="int32"), 255), 0), "uint8"), "int32"), 1136333842, 31, 0, dtype="int32"), 255), 0), "uint8")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_2(placeholder_44: T.handle, placeholder_45: T.handle, placeholder_46: T.handle, T_cast_14: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_2", "tir.noalias": True})
placeholder_47 = T.match_buffer(placeholder_44, [150528], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_48 = T.match_buffer(placeholder_45, [3072], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_49 = T.match_buffer(placeholder_46, [16], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_15 = T.match_buffer(T_cast_14, [73], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_4 = T.decl_buffer([150528], "int16")
for i0_i1_fused_4 in T.serial(0, 28):
for i2_4, i3_4 in T.grid(28, 192):
PaddedInput_4[(((i0_i1_fused_4*5376) + (i2_4*192)) + i3_4)] = placeholder_47[(((i0_i1_fused_4*5376) + (i2_4*192)) + i3_4)]
for ax0_ax1_fused_ax2_fused_4 in T.serial(0, 784):
Conv2dOutput_4 = T.decl_buffer([1], "int32")
for ax3_6 in T.serial(0, 16):
Conv2dOutput_4[0] = 0
for rc_4 in T.serial(0, 192):
Conv2dOutput_4[0] = (Conv2dOutput_4[0] + (T.cast(PaddedInput_4[((ax0_ax1_fused_ax2_fused_4*192) + rc_4)], "int32")*T.cast(placeholder_48[((rc_4*16) + ax3_6)], "int32")))
T_cast_15[((ax0_ax1_fused_ax2_fused_4*16) + ax3_6)] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_4[0] + placeholder_49[ax3_6]), 1764006585, 31, -7, dtype="int32"), 255), 0), "uint8"), "int16")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_fixed_point_multiply_cli_4464294615199028320__1(placeholder_50: T.handle, placeholder_51: T.handle, placeholder_52: T.handle, T_cast_16: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_fixed_point_multiply_cli_4464294615199028320__1", "tir.noalias": True})
placeholder_53 = T.match_buffer(placeholder_50, [12544], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_54 = T.match_buffer(placeholder_51, [4608], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_55 = T.match_buffer(placeholder_52, [32], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_17 = T.match_buffer(T_cast_16, [89], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_5 = T.decl_buffer([14400], "int16")
for i0_i1_fused_5 in T.serial(0, 30):
for i2_5, i3_5 in T.grid(30, 16):
PaddedInput_5[(((i0_i1_fused_5*480) + (i2_5*16)) + i3_5)] = T.if_then_else(((((1 <= i0_i1_fused_5) and (i0_i1_fused_5 < 29)) and (1 <= i2_5)) and (i2_5 < 29)), placeholder_53[((((i0_i1_fused_5*448) + (i2_5*16)) + i3_5) - 464)], T.int16(0), dtype="int16")
for ax0_ax1_fused_ax2_fused_5 in T.serial(0, 784):
Conv2dOutput_5 = T.decl_buffer([1], "int32")
for ax3_7 in T.serial(0, 32):
Conv2dOutput_5[0] = 0
for ry, rx, rc_5 in T.grid(3, 3, 16):
Conv2dOutput_5[0] = (Conv2dOutput_5[0] + (T.cast(PaddedInput_5[(((((T.floordiv(ax0_ax1_fused_ax2_fused_5, 28)*480) + (ry*480)) + (rx*16)) + (T.floormod(ax0_ax1_fused_ax2_fused_5, 28)*16)) + rc_5)], "int32")*T.cast(placeholder_54[((((ry*1536) + (rx*512)) + (rc_5*32)) + ax3_7)], "int32")))
T_cast_17[((ax0_ax1_fused_ax2_fused_5*32) + ax3_7)] = T.cast(T.max(T.min(T.q_multiply_shift(T.cast(T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_5[0] + placeholder_55[ax3_7]), 1131968888, 31, -6, dtype="int32"), 255), 0), "uint8"), "int32"), 1900719667, 31, 0, dtype="int32"), 255), 0), "uint8")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_fixed_point_multiply_cli_4464294615199028320_(placeholder_56: T.handle, placeholder_57: T.handle, placeholder_58: T.handle, T_cast_18: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_fixed_point_multiply_cli_4464294615199028320_", "tir.noalias": True})
placeholder_59 = T.match_buffer(placeholder_56, [75264], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_60 = T.match_buffer(placeholder_57, [110592], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_61 = T.match_buffer(placeholder_58, [128], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_19 = T.match_buffer(T_cast_18, [185], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_6 = T.decl_buffer([86400], "int16")
for i0_i1_fused_6 in T.serial(0, 30):
for i2_6, i3_6 in T.grid(30, 96):
PaddedInput_6[(((i0_i1_fused_6*2880) + (i2_6*96)) + i3_6)] = T.if_then_else(((((1 <= i0_i1_fused_6) and (i0_i1_fused_6 < 29)) and (1 <= i2_6)) and (i2_6 < 29)), placeholder_59[((((i0_i1_fused_6*2688) + (i2_6*96)) + i3_6) - 2784)], T.int16(0), dtype="int16")
for ax0_ax1_fused_ax2_fused_6 in T.serial(0, 784):
Conv2dOutput_6 = T.decl_buffer([64], "int32")
for ax3_outer_3 in T.serial(0, 2):
for ff_2 in T.serial(0, 64):
Conv2dOutput_6[ff_2] = 0
for ry_1, rx_1, rc_6 in T.grid(3, 3, 96):
Conv2dOutput_6[ff_2] = (Conv2dOutput_6[ff_2] + (T.cast(PaddedInput_6[(((((T.floordiv(ax0_ax1_fused_ax2_fused_6, 28)*2880) + (ry_1*2880)) + (rx_1*96)) + (T.floormod(ax0_ax1_fused_ax2_fused_6, 28)*96)) + rc_6)], "int32")*T.cast(placeholder_60[(((((ry_1*36864) + (rx_1*12288)) + (rc_6*128)) + (ax3_outer_3*64)) + ff_2)], "int32")))
for ax3_inner_6 in T.serial(0, 64):
T_cast_19[(((ax0_ax1_fused_ax2_fused_6*128) + (ax3_outer_3*64)) + ax3_inner_6)] = T.cast(T.max(T.min(T.q_multiply_shift(T.cast(T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_6[ax3_inner_6] + placeholder_61[((ax3_outer_3*64) + ax3_inner_6)]), 1374050734, 31, -7, dtype="int32"), 255), 0), "uint8"), "int32"), 1544713713, 31, 0, dtype="int32"), 255), 0), "uint8")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast(placeholder_62: T.handle, placeholder_63: T.handle, placeholder_64: T.handle, T_cast_20: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast", "T.noalias": True})
placeholder_65 = T.match_buffer(placeholder_62, [150528], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_66 = T.match_buffer(placeholder_63, [9408], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_67 = T.match_buffer(placeholder_64, [64], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_21 = T.match_buffer(T_cast_20, [289], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_7 = T.decl_buffer([157323], "int16")
for i0_i1_fused_7 in T.serial(0, 229):
for i2_7, i3_7 in T.grid(229, 3):
PaddedInput_7[(((i0_i1_fused_7*687) + (i2_7*3)) + i3_7)] = T.if_then_else(((((2 <= i0_i1_fused_7) and (i0_i1_fused_7 < 226)) and (2 <= i2_7)) and (i2_7 < 226)), placeholder_65[((((i0_i1_fused_7*672) + (i2_7*3)) + i3_7) - 1350)], T.int16(0), dtype="int16")
for ax0_ax1_fused_ax2_fused_7 in T.serial(0, 12544):
Conv2dOutput_7 = T.decl_buffer([64], "int32")
for ff_3 in T.serial(0, 64):
Conv2dOutput_7[ff_3] = 0
for ry_2, rx_2, rc_7 in T.grid(7, 7, 3):
Conv2dOutput_7[ff_3] = (Conv2dOutput_7[ff_3] + (T.cast(PaddedInput_7[(((((T.floordiv(ax0_ax1_fused_ax2_fused_7, 112)*1374) + (ry_2*687)) + (T.floormod(ax0_ax1_fused_ax2_fused_7, 112)*6)) + (rx_2*3)) + rc_7)], "int32")*T.cast(placeholder_66[((((ry_2*1344) + (rx_2*192)) + (rc_7*64)) + ff_3)], "int32")))
for ax3_inner_7 in T.serial(0, 64):
T_cast_21[((ax0_ax1_fused_ax2_fused_7*64) + ax3_inner_7)] = T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_7[ax3_inner_7] + placeholder_67[ax3_inner_7]), 1939887962, 31, -9, dtype="int32"), 255), 0), "uint8")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_1(placeholder_68: T.handle, placeholder_69: T.handle, placeholder_70: T.handle, T_cast_22: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_1", "tir.noalias": True})
placeholder_71 = T.match_buffer(placeholder_68, [200704], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_72 = T.match_buffer(placeholder_69, [110592], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_73 = T.match_buffer(placeholder_70, [192], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_23 = T.match_buffer(T_cast_22, [305], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_8 = T.decl_buffer([215296], "int16")
for i0_i1_fused_8 in T.serial(0, 58):
for i2_8, i3_8 in T.grid(58, 64):
PaddedInput_8[(((i0_i1_fused_8*3712) + (i2_8*64)) + i3_8)] = T.if_then_else(((((1 <= i0_i1_fused_8) and (i0_i1_fused_8 < 57)) and (1 <= i2_8)) and (i2_8 < 57)), placeholder_71[((((i0_i1_fused_8*3584) + (i2_8*64)) + i3_8) - 3648)], T.int16(0), dtype="int16")
for ax0_ax1_fused_ax2_fused_8 in T.serial(0, 3136):
Conv2dOutput_8 = T.decl_buffer([64], "int32")
for ax3_outer_4 in T.serial(0, 3):
for ff_4 in T.serial(0, 64):
Conv2dOutput_8[ff_4] = 0
for ry_3, rx_3, rc_8 in T.grid(3, 3, 64):
Conv2dOutput_8[ff_4] = (Conv2dOutput_8[ff_4] + (T.cast(PaddedInput_8[(((((T.floordiv(ax0_ax1_fused_ax2_fused_8, 56)*3712) + (ry_3*3712)) + (rx_3*64)) + (T.floormod(ax0_ax1_fused_ax2_fused_8, 56)*64)) + rc_8)], "int32")*T.cast(placeholder_72[(((((ry_3*36864) + (rx_3*12288)) + (rc_8*192)) + (ax3_outer_4*64)) + ff_4)], "int32")))
for ax3_inner_8 in T.serial(0, 64):
T_cast_23[(((ax0_ax1_fused_ax2_fused_8*192) + (ax3_outer_4*64)) + ax3_inner_8)] = T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_8[ax3_inner_8] + placeholder_73[((ax3_outer_4*64) + ax3_inner_8)]), 1139793473, 31, -6, dtype="int32"), 255), 0), "uint8")
@T.prim_func
def run_model(input: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_run_model", "runner_function": True})
# body
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 1)
sid_32 = T.allocate([301056], "int8", "global")
sid_20 = T.allocate([150528], "int8", "global")
sid_6 = T.allocate([401408], "int8", "global")
sid_9 = T.allocate([301056], "int8", "global")
sid_7 = T.allocate([401408], "int8", "global")
sid_8 = T.allocate([802816], "int8", "global")
sid_2 = T.allocate([50176], "int8", "global")
sid_3 = T.allocate([301056], "int8", "global")
sid_19 = T.allocate([100352], "int8", "global")
sid_4 = T.allocate([150528], "int8", "global")
sid_5 = T.allocate([602112], "int8", "global")
sid_25 = T.allocate([25088], "int8", "global")
sid_26 = T.allocate([25088], "int8", "global")
sid_31 = T.allocate([25088], "int8", "global")
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract", input, T.lookup_param("p0", dtype="handle"), sid_9, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast", sid_9, T.lookup_param("p1", dtype="handle"), T.lookup_param("p2", dtype="handle"), sid_8, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_max_pool2d_cast", sid_8, sid_7, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast", sid_7, T.lookup_param("p3", dtype="handle"), T.lookup_param("p4", dtype="handle"), sid_6, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_1", sid_6, T.lookup_param("p5", dtype="handle"), T.lookup_param("p6", dtype="handle"), sid_5, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_max_pool2d", sid_5, sid_4, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_cast", sid_4, sid_3, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_2", sid_3, T.lookup_param("p7", dtype="handle"), T.lookup_param("p8", dtype="handle"), sid_2, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_1", sid_3, T.lookup_param("p9", dtype="handle"), T.lookup_param("p10", dtype="handle"), sid_20, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_fixed_point_multiply_cli_4464294615199028320_", sid_20, T.lookup_param("p11", dtype="handle"), T.lookup_param("p12", dtype="handle"), sid_19, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_2", sid_3, T.lookup_param("p13", dtype="handle"), T.lookup_param("p14", dtype="handle"), sid_26, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_fixed_point_multiply_cli_4464294615199028320__1", sid_26, T.lookup_param("p15", dtype="handle"), T.lookup_param("p16", dtype="handle"), sid_25, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_max_pool2d_cast_1", sid_4, sid_32, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_fixed_point_multiply_cli_4464294615199028320__2", sid_32, T.lookup_param("p17", dtype="handle"), T.lookup_param("p18", dtype="handle"), sid_31, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_concatenate", sid_2, sid_19, sid_25, sid_31, output, dtype="int32"))
__tvm_meta__ = None
# fmt: on
def test_inception_structure():
target = Target("c")
global_ws_pool = WorkspacePoolInfo(
pool_name="global_workspace",
targets=[target],
)
tir_mod = InceptionStructure
tir_mod = _assign_targets_to_primfuncs_irmodule(tir_mod, target)
tir_mod = _assign_poolinfos_to_allocates_in_irmodule(tir_mod, [global_ws_pool])
main_func = tir_mod["run_model"]
buffer_info_analysis = tvm.tir.usmp.analysis.extract_buffer_info(main_func, tir_mod)
assert buffer_info_analysis.memory_pressure == 1117718
buffer_info_map = _replace_stmt_with_buf_var_names(buffer_info_analysis.buffer_info_stmts)
# check conflicts
_verify_conflicts(
"sid_3",
[
"sid_4",
"PaddedInput_2",
"sid_2",
"Conv2dOutput_2",
"PaddedInput_1",
"Conv2dOutput_1",
"sid_20",
"PaddedInput_6",
"Conv2dOutput_6",
"sid_19",
"PaddedInput_4",
],
buffer_info_map,
)
_verify_conflicts(
"Conv2dOutput",
[
"sid_6",
"PaddedInput",
],
buffer_info_map,
)
_verify_conflicts(
"Conv2dOutput_7",
[
"PaddedInput_7",
"sid_8",
],
buffer_info_map,
)
_verify_conflicts(
"sid_4",
[
"sid_5",
"sid_3",
"PaddedInput_2",
"sid_2",
"Conv2dOutput_2",
"PaddedInput_1",
"Conv2dOutput_1",
"sid_20",
"PaddedInput_6",
"Conv2dOutput_6",
"sid_19",
"PaddedInput_4",
"Conv2dOutput_4",
"sid_26",
"PaddedInput_5",
"Conv2dOutput_5",
"sid_25",
"tensor_3",
],
buffer_info_map,
)
_verify_conflicts(
"sid_2",
[
"PaddedInput_2",
"sid_3",
"sid_4",
"Conv2dOutput_2",
"PaddedInput_1",
"Conv2dOutput_1",
"sid_20",
"PaddedInput_6",
"Conv2dOutput_6",
"sid_19",
"PaddedInput_4",
"Conv2dOutput_4",
"sid_26",
"PaddedInput_5",
"Conv2dOutput_5",
"sid_25",
"tensor_3",
"sid_32",
"PaddedInput_3",
"Conv2dOutput_3",
"sid_31",
],
buffer_info_map,
)
_verify_conflicts(
"sid_19",
[
"Conv2dOutput_6",
"sid_2",
"PaddedInput_6",
"sid_3",
"sid_4",
"PaddedInput_4",
"Conv2dOutput_4",
"sid_26",
"PaddedInput_5",
"Conv2dOutput_5",
"sid_25",
"tensor_3",
"sid_32",
"PaddedInput_3",
"Conv2dOutput_3",
"sid_31",
],
buffer_info_map,
)
_verify_conflicts(
"PaddedInput_2",
[
"sid_3",
"sid_4",
"sid_2",
"Conv2dOutput_2",
],
buffer_info_map,
)
_verify_conflicts(
"Conv2dOutput_6",
[
"sid_2",
"PaddedInput_6",
"sid_3",
"sid_4",
"sid_19",
],
buffer_info_map,
)
_verify_conflicts(
"sid_9",
[
"PaddedInput_7",
],
buffer_info_map,
)
_verify_conflicts(
"sid_7",
[
"tensor_2",
"PaddedInput",
],
buffer_info_map,
)
_verify_conflicts(
"PaddedInput_4",
[
"sid_2",
"sid_19",
"sid_3",
"sid_4",
"Conv2dOutput_4",
"sid_26",
],
buffer_info_map,
)
_verify_conflicts(
"PaddedInput_3",
[
"sid_2",
"sid_32",
"sid_25",
"sid_19",
"Conv2dOutput_3",
"sid_31",
],
buffer_info_map,
)
_verify_conflicts(
"sid_5",
[
"PaddedInput_8",
"Conv2dOutput_8",
"sid_4",
],
buffer_info_map,
)
_verify_conflicts(
"sid_31",
[
"Conv2dOutput_3",
"PaddedInput_3",
"sid_2",
"sid_25",
"sid_19",
],
buffer_info_map,
)
_verify_conflicts(
"PaddedInput",
[
"sid_7",
"sid_6",
"Conv2dOutput",
],
buffer_info_map,
)
_verify_conflicts(
"Conv2dOutput_2",
[
"sid_2",
"PaddedInput_2",
"sid_3",
"sid_4",
],
buffer_info_map,
)
_verify_conflicts(
"sid_32",
[
"tensor_3",
"sid_2",
"sid_25",
"sid_19",
"PaddedInput_3",
],
buffer_info_map,
)
_verify_conflicts(
"tensor_2",
[
"sid_8",
"sid_7",
],
buffer_info_map,
)
_verify_conflicts(
"sid_26",
[
"Conv2dOutput_4",
"PaddedInput_4",
"sid_2",
"sid_19",
"sid_4",
"PaddedInput_5",
],
buffer_info_map,
)
_verify_conflicts(
"Conv2dOutput_3",
[
"PaddedInput_3",
"sid_2",
"sid_25",
"sid_19",
"sid_31",
],
buffer_info_map,
)
_verify_conflicts(
"PaddedInput_6",
[
"sid_2",
"sid_3",
"sid_20",
"sid_4",
"Conv2dOutput_6",
"sid_19",
],
buffer_info_map,
)
_verify_conflicts(
"sid_6",
[
"PaddedInput",
"Conv2dOutput",
"PaddedInput_8",
],
buffer_info_map,
)
_verify_conflicts(
"PaddedInput_8",
[
"sid_6",
"sid_5",
"Conv2dOutput_8",
],
buffer_info_map,
)
_verify_conflicts(
"Conv2dOutput_5",
[
"PaddedInput_5",
"sid_2",
"sid_19",
"sid_4",
"sid_25",
],
buffer_info_map,
)
_verify_conflicts(
"Conv2dOutput_1",
[
"PaddedInput_1",
"sid_2",
"sid_3",
"sid_4",
"sid_20",
],
buffer_info_map,
)
_verify_conflicts(
"tensor_3",
[
"sid_2",
"sid_25",
"sid_19",
"sid_4",
"sid_32",
],
buffer_info_map,
)
_verify_conflicts(
"sid_8",
[
"Conv2dOutput_7",
"PaddedInput_7",
"tensor_2",
],
buffer_info_map,
)
_verify_conflicts(
"sid_20",
[
"Conv2dOutput_1",
"PaddedInput_1",
"sid_2",
"sid_3",
"sid_4",
"PaddedInput_6",
],
buffer_info_map,
)
_verify_conflicts(
"Conv2dOutput_8",
[
"sid_5",
"PaddedInput_8",
],
buffer_info_map,
)
_verify_conflicts(
"PaddedInput_1",
[
"sid_2",
"sid_3",
"sid_4",
"Conv2dOutput_1",
"sid_20",
],
buffer_info_map,
)
_verify_conflicts(
"Conv2dOutput_4",
[
"PaddedInput_4",
"sid_2",
"sid_19",
"sid_4",
"sid_26",
],
buffer_info_map,
)
_verify_conflicts(
"sid_25",
[
"PaddedInput_5",
"Conv2dOutput_5",
"sid_2",
"sid_19",
"sid_4",
"tensor_3",
"sid_32",
"PaddedInput_3",
"Conv2dOutput_3",
"sid_31",
],
buffer_info_map,
)
_verify_conflicts(
"PaddedInput_7",
[
"sid_9",
"Conv2dOutput_7",
"sid_8",
],
buffer_info_map,
)
_verify_conflicts(
"PaddedInput_5",
[
"sid_2",
"sid_19",
"sid_26",
"sid_4",
"Conv2dOutput_5",
"sid_25",
],
buffer_info_map,
)
# check sizes
assert buffer_info_map["sid_20"].size_bytes == 150528
assert buffer_info_map["tensor_2"].size_bytes == 200704
assert buffer_info_map["sid_5"].size_bytes == 602112
assert buffer_info_map["sid_9"].size_bytes == 301056
assert buffer_info_map["Conv2dOutput_3"].size_bytes == 4
assert buffer_info_map["sid_26"].size_bytes == 25088
assert buffer_info_map["Conv2dOutput_2"].size_bytes == 256
assert buffer_info_map["PaddedInput_5"].size_bytes == 28800
assert buffer_info_map["sid_8"].size_bytes == 802816
assert buffer_info_map["Conv2dOutput_5"].size_bytes == 4
assert buffer_info_map["sid_3"].size_bytes == 301056
assert buffer_info_map["Conv2dOutput"].size_bytes == 256
assert buffer_info_map["PaddedInput_3"].size_bytes == 301056
assert buffer_info_map["sid_32"].size_bytes == 301056
assert buffer_info_map["PaddedInput_8"].size_bytes == 430592
assert buffer_info_map["sid_4"].size_bytes == 150528
assert buffer_info_map["PaddedInput_7"].size_bytes == 314646
assert buffer_info_map["sid_6"].size_bytes == 401408
assert buffer_info_map["Conv2dOutput_8"].size_bytes == 256
assert buffer_info_map["sid_25"].size_bytes == 25088
assert buffer_info_map["PaddedInput"].size_bytes == 401408
assert buffer_info_map["sid_7"].size_bytes == 401408
assert buffer_info_map["Conv2dOutput_1"].size_bytes == 4
assert buffer_info_map["Conv2dOutput_4"].size_bytes == 4
assert buffer_info_map["PaddedInput_2"].size_bytes == 301056
assert buffer_info_map["sid_31"].size_bytes == 25088
assert buffer_info_map["PaddedInput_1"].size_bytes == 301056
assert buffer_info_map["Conv2dOutput_6"].size_bytes == 256
assert buffer_info_map["PaddedInput_4"].size_bytes == 301056
assert buffer_info_map["sid_2"].size_bytes == 50176
assert buffer_info_map["tensor_3"].size_bytes == 150528
assert buffer_info_map["Conv2dOutput_7"].size_bytes == 256
assert buffer_info_map["sid_19"].size_bytes == 100352
assert buffer_info_map["PaddedInput_6"].size_bytes == 172800
# fmt: off
@tvm.script.ir_module
class MultipleCallsToSamePrimFuncModule:
@T.prim_func
def tvmgen_default_fused_layout_transform_1(placeholder: T.handle, T_layout_trans: T.handle) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "tvmgen_default_fused_layout_transform_1", "tir.noalias": True})
placeholder_1 = T.match_buffer(placeholder, [864], dtype="float32")
T_layout_trans_1 = T.match_buffer(T_layout_trans, [41], dtype="float32")
# body
for ax0_ax1_fused_ax2_fused, ax3, ax4_inner in T.grid(24, 12, 3):
T_layout_trans_1[ax0_ax1_fused_ax2_fused * 36 + ax3 * 3 + ax4_inner] = placeholder_1[ax4_inner * 288 + ax0_ax1_fused_ax2_fused * 12 + ax3]
@T.prim_func
def tvmgen_default_fused_nn_contrib_conv2d_NCHWc(placeholder_2: T.handle, placeholder_3: T.handle, conv2d_NCHWc: T.handle) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "tvmgen_default_fused_nn_contrib_conv2d_NCHWc", "tir.noalias": True})
placeholder_4 = T.match_buffer(placeholder_2, [864], dtype="float32")
placeholder_5 = T.match_buffer(placeholder_3, [81], dtype="float32")
conv2d_NCHWc_1 = T.match_buffer(conv2d_NCHWc, [41], dtype="float32")
# body
data_pad = T.decl_buffer([1092], "float32")
for i0_i1_fused_i2_fused, i3, i4 in T.grid(26, 14, 3):
data_pad[i0_i1_fused_i2_fused * 42 + i3 * 3 + i4] = T.if_then_else(1 <= i0_i1_fused_i2_fused and i0_i1_fused_i2_fused < 25 and 1 <= i3 and i3 < 13, placeholder_4[i0_i1_fused_i2_fused * 36 + i3 * 3 + i4 - 39], T.float32(0), dtype="float32")
for n_oc_chunk_fused_oh_fused in T.serial(0, 24):
conv2d_NCHWc_global = T.decl_buffer([36], "float32")
for oc_block_c_init in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c_init] = T.float32(0)
for oc_block_c_init in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c_init + 3] = T.float32(0)
for oc_block_c_init in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c_init + 6] = T.float32(0)
for oc_block_c_init in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c_init + 9] = T.float32(0)
for oc_block_c_init in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c_init + 12] = T.float32(0)
for oc_block_c_init in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c_init + 15] = T.float32(0)
for oc_block_c_init in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c_init + 18] = T.float32(0)
for oc_block_c_init in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c_init + 21] = T.float32(0)
for oc_block_c_init in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c_init + 24] = T.float32(0)
for oc_block_c_init in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c_init + 27] = T.float32(0)
for oc_block_c_init in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c_init + 30] = T.float32(0)
for oc_block_c_init in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c_init + 33] = T.float32(0)
for kh, kw, ic_inner in T.grid(3, 3, 3):
for oc_block_c in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c] = conv2d_NCHWc_global[oc_block_c] + data_pad[kh * 42 + n_oc_chunk_fused_oh_fused * 42 + kw * 3 + ic_inner] * placeholder_5[kh * 27 + kw * 9 + ic_inner * 3 + oc_block_c]
for oc_block_c in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c + 3] = conv2d_NCHWc_global[oc_block_c + 3] + data_pad[kh * 42 + n_oc_chunk_fused_oh_fused * 42 + kw * 3 + ic_inner + 3] * placeholder_5[kh * 27 + kw * 9 + ic_inner * 3 + oc_block_c]
for oc_block_c in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c + 6] = conv2d_NCHWc_global[oc_block_c + 6] + data_pad[kh * 42 + n_oc_chunk_fused_oh_fused * 42 + kw * 3 + ic_inner + 6] * placeholder_5[kh * 27 + kw * 9 + ic_inner * 3 + oc_block_c]
for oc_block_c in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c + 9] = conv2d_NCHWc_global[oc_block_c + 9] + data_pad[kh * 42 + n_oc_chunk_fused_oh_fused * 42 + kw * 3 + ic_inner + 9] * placeholder_5[kh * 27 + kw * 9 + ic_inner * 3 + oc_block_c]
for oc_block_c in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c + 12] = conv2d_NCHWc_global[oc_block_c + 12] + data_pad[kh * 42 + n_oc_chunk_fused_oh_fused * 42 + kw * 3 + ic_inner + 12] * placeholder_5[kh * 27 + kw * 9 + ic_inner * 3 + oc_block_c]
for oc_block_c in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c + 15] = conv2d_NCHWc_global[oc_block_c + 15] + data_pad[kh * 42 + n_oc_chunk_fused_oh_fused * 42 + kw * 3 + ic_inner + 15] * placeholder_5[kh * 27 + kw * 9 + ic_inner * 3 + oc_block_c]
for oc_block_c in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c + 18] = conv2d_NCHWc_global[oc_block_c + 18] + data_pad[kh * 42 + n_oc_chunk_fused_oh_fused * 42 + kw * 3 + ic_inner + 18] * placeholder_5[kh * 27 + kw * 9 + ic_inner * 3 + oc_block_c]
for oc_block_c in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c + 21] = conv2d_NCHWc_global[oc_block_c + 21] + data_pad[kh * 42 + n_oc_chunk_fused_oh_fused * 42 + kw * 3 + ic_inner + 21] * placeholder_5[kh * 27 + kw * 9 + ic_inner * 3 + oc_block_c]
for oc_block_c in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c + 24] = conv2d_NCHWc_global[oc_block_c + 24] + data_pad[kh * 42 + n_oc_chunk_fused_oh_fused * 42 + kw * 3 + ic_inner + 24] * placeholder_5[kh * 27 + kw * 9 + ic_inner * 3 + oc_block_c]
for oc_block_c in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c + 27] = conv2d_NCHWc_global[oc_block_c + 27] + data_pad[kh * 42 + n_oc_chunk_fused_oh_fused * 42 + kw * 3 + ic_inner + 27] * placeholder_5[kh * 27 + kw * 9 + ic_inner * 3 + oc_block_c]
for oc_block_c in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c + 30] = conv2d_NCHWc_global[oc_block_c + 30] + data_pad[kh * 42 + n_oc_chunk_fused_oh_fused * 42 + kw * 3 + ic_inner + 30] * placeholder_5[kh * 27 + kw * 9 + ic_inner * 3 + oc_block_c]
for oc_block_c in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c + 33] = conv2d_NCHWc_global[oc_block_c + 33] + data_pad[kh * 42 + n_oc_chunk_fused_oh_fused * 42 + kw * 3 + ic_inner + 33] * placeholder_5[kh * 27 + kw * 9 + ic_inner * 3 + oc_block_c]
for ow_inner, oc_block in T.grid(12, 3):
conv2d_NCHWc_1[n_oc_chunk_fused_oh_fused * 36 + ow_inner * 3 + oc_block] = conv2d_NCHWc_global[ow_inner * 3 + oc_block]
@T.prim_func
def tvmgen_default_fused_nn_softmax_add_add_multiply_add(placeholder_6: T.handle, placeholder_7: T.handle, placeholder_8: T.handle, placeholder_9: T.handle, placeholder_10: T.handle, T_add: T.handle) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "tvmgen_default_fused_nn_softmax_add_add_multiply_add", "tir.noalias": True})
placeholder_11 = T.match_buffer(placeholder_6, [864], dtype="float32")
placeholder_12 = T.match_buffer(placeholder_7, [864], dtype="float32")
placeholder_13 = T.match_buffer(placeholder_8, [3], dtype="float32")
placeholder_14 = T.match_buffer(placeholder_9, [3], dtype="float32")
placeholder_15 = T.match_buffer(placeholder_10, [3], dtype="float32")
T_add_1 = T.match_buffer(T_add, [864], dtype="float32")
# body
for ax0_ax1_fused_ax2_fused in T.serial(0, 72):
T_softmax_norm = T.decl_buffer([12], "float32")
with T.decl_buffer([1], "float32") as T_softmax_maxelem:
T_softmax_maxelem[0] = T.float32(-3.4028234663852886e+38)
for k in T.serial(0, 12):
T_softmax_maxelem[0] = T.max(T_softmax_maxelem[0], placeholder_11[ax0_ax1_fused_ax2_fused * 12 + k])
T_softmax_exp = T.decl_buffer([12], "float32")
for i3 in T.serial(0, 12):
T_softmax_exp[i3] = T.exp(placeholder_11[ax0_ax1_fused_ax2_fused * 12 + i3] - T_softmax_maxelem[0], dtype="float32")
T_softmax_expsum = T.decl_buffer([1], "float32")
T_softmax_expsum[0] = T.float32(0)
for k in T.serial(0, 12):
T_softmax_expsum[0] = T_softmax_expsum[0] + T_softmax_exp[k]
for i3 in T.serial(0, 12):
T_softmax_norm[i3] = T_softmax_exp[i3] / T_softmax_expsum[0]
for ax3 in T.serial(0, 12):
T_add_1[ax0_ax1_fused_ax2_fused * 12 + ax3] = (placeholder_12[ax0_ax1_fused_ax2_fused * 12 + ax3] + T_softmax_norm[ax3] + placeholder_13[T.floordiv(ax0_ax1_fused_ax2_fused, 24)]) * placeholder_14[T.floordiv(ax0_ax1_fused_ax2_fused, 24)] + placeholder_15[T.floordiv(ax0_ax1_fused_ax2_fused, 24)]
@T.prim_func
def tvmgen_default_fused_nn_contrib_dense_pack_nn_relu(placeholder_16: T.handle, placeholder_17: T.handle, T_relu: T.handle) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "tvmgen_default_fused_nn_contrib_dense_pack_nn_relu", "tir.noalias": True})
placeholder_18 = T.match_buffer(placeholder_16, [864], dtype="float32")
placeholder_19 = T.match_buffer(placeholder_17, [144], dtype="float32")
T_relu_1 = T.match_buffer(T_relu, [864], dtype="float32")
# body
for ax1_outer_ax0_outer_fused in T.serial(0, 18):
compute = T.decl_buffer([48], "float32")
with T.decl_buffer([48], "float32") as compute_global:
for x_c_init in T.serial(0, 6):
compute_global[x_c_init] = T.float32(0)
for x_c_init in T.serial(0, 6):
compute_global[x_c_init + 6] = T.float32(0)
for x_c_init in T.serial(0, 6):
compute_global[x_c_init + 12] = T.float32(0)
for x_c_init in T.serial(0, 6):
compute_global[x_c_init + 18] = T.float32(0)
for x_c_init in T.serial(0, 6):
compute_global[x_c_init + 24] = T.float32(0)
for x_c_init in T.serial(0, 6):
compute_global[x_c_init + 30] = T.float32(0)
for x_c_init in T.serial(0, 6):
compute_global[x_c_init + 36] = T.float32(0)
for x_c_init in T.serial(0, 6):
compute_global[x_c_init + 42] = T.float32(0)
for k_outer in T.serial(0, 12):
for x_c in T.serial(0, 6):
compute_global[x_c] = compute_global[x_c] + placeholder_18[T.floormod(ax1_outer_ax0_outer_fused, 9) * 96 + k_outer] * placeholder_19[T.floordiv(ax1_outer_ax0_outer_fused, 9) * 72 + k_outer * 6 + x_c]
for x_c in T.serial(0, 6):
compute_global[x_c + 6] = compute_global[x_c + 6] + placeholder_18[T.floormod(ax1_outer_ax0_outer_fused, 9) * 96 + k_outer + 12] * placeholder_19[T.floordiv(ax1_outer_ax0_outer_fused, 9) * 72 + k_outer * 6 + x_c]
for x_c in T.serial(0, 6):
compute_global[x_c + 12] = compute_global[x_c + 12] + placeholder_18[T.floormod(ax1_outer_ax0_outer_fused, 9) * 96 + k_outer + 24] * placeholder_19[T.floordiv(ax1_outer_ax0_outer_fused, 9) * 72 + k_outer * 6 + x_c]
for x_c in T.serial(0, 6):
compute_global[x_c + 18] = compute_global[x_c + 18] + placeholder_18[T.floormod(ax1_outer_ax0_outer_fused, 9) * 96 + k_outer + 36] * placeholder_19[T.floordiv(ax1_outer_ax0_outer_fused, 9) * 72 + k_outer * 6 + x_c]
for x_c in T.serial(0, 6):
compute_global[x_c + 24] = compute_global[x_c + 24] + placeholder_18[T.floormod(ax1_outer_ax0_outer_fused, 9) * 96 + k_outer + 48] * placeholder_19[T.floordiv(ax1_outer_ax0_outer_fused, 9) * 72 + k_outer * 6 + x_c]
for x_c in T.serial(0, 6):
compute_global[x_c + 30] = compute_global[x_c + 30] + placeholder_18[T.floormod(ax1_outer_ax0_outer_fused, 9) * 96 + k_outer + 60] * placeholder_19[T.floordiv(ax1_outer_ax0_outer_fused, 9) * 72 + k_outer * 6 + x_c]
for x_c in T.serial(0, 6):
compute_global[x_c + 36] = compute_global[x_c + 36] + placeholder_18[T.floormod(ax1_outer_ax0_outer_fused, 9) * 96 + k_outer + 72] * placeholder_19[T.floordiv(ax1_outer_ax0_outer_fused, 9) * 72 + k_outer * 6 + x_c]
for x_c in T.serial(0, 6):
compute_global[x_c + 42] = compute_global[x_c + 42] + placeholder_18[T.floormod(ax1_outer_ax0_outer_fused, 9) * 96 + k_outer + 84] * placeholder_19[T.floordiv(ax1_outer_ax0_outer_fused, 9) * 72 + k_outer * 6 + x_c]
for x_inner_inner in T.serial(0, 6):
compute[x_inner_inner] = compute_global[x_inner_inner]
for x_inner_inner in T.serial(0, 6):
compute[x_inner_inner + 6] = compute_global[x_inner_inner + 6]
for x_inner_inner in T.serial(0, 6):
compute[x_inner_inner + 12] = compute_global[x_inner_inner + 12]
for x_inner_inner in T.serial(0, 6):
compute[x_inner_inner + 18] = compute_global[x_inner_inner + 18]
for x_inner_inner in T.serial(0, 6):
compute[x_inner_inner + 24] = compute_global[x_inner_inner + 24]
for x_inner_inner in T.serial(0, 6):
compute[x_inner_inner + 30] = compute_global[x_inner_inner + 30]
for x_inner_inner in T.serial(0, 6):
compute[x_inner_inner + 36] = compute_global[x_inner_inner + 36]
for x_inner_inner in T.serial(0, 6):
compute[x_inner_inner + 42] = compute_global[x_inner_inner + 42]
for ax0_inner_inner, ax1_inner_inner in T.grid(8, 6):
T_relu_1[T.floormod(ax1_outer_ax0_outer_fused, 9) * 96 + ax0_inner_inner * 12 + T.floordiv(ax1_outer_ax0_outer_fused, 9) * 6 + ax1_inner_inner] = T.max(compute[ax0_inner_inner * 6 + ax1_inner_inner], T.float32(0))
@T.prim_func
def tvmgen_default_fused_reshape_1(placeholder_20: T.handle, T_reshape: T.handle) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "tvmgen_default_fused_reshape_1", "tir.noalias": True})
placeholder_21 = T.match_buffer(placeholder_20, [864], dtype="float32")
T_reshape_1 = T.match_buffer(T_reshape, [864], dtype="float32")
# body
for ax0, ax1_inner in T.grid(72, 12):
T_reshape_1[ax0 * 12 + ax1_inner] = placeholder_21[ax0 * 12 + ax1_inner]
@T.prim_func
def tvmgen_default_fused_layout_transform(placeholder_22: T.handle, T_layout_trans_2: T.handle) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "tvmgen_default_fused_layout_transform", "tir.noalias": True})
placeholder_23 = T.match_buffer(placeholder_22, [864], dtype="float32")
T_layout_trans_3 = T.match_buffer(T_layout_trans_2, [864], dtype="float32")
# body
for ax0_ax1_fused, ax2, ax3_inner in T.grid(3, 24, 12):
T_layout_trans_3[ax0_ax1_fused * 288 + ax2 * 12 + ax3_inner] = placeholder_23[ax2 * 36 + ax3_inner * 3 + ax0_ax1_fused]
@T.prim_func
def tvmgen_default_fused_reshape(placeholder_24: T.handle, T_reshape_2: T.handle) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "tvmgen_default_fused_reshape", "tir.noalias": True})
placeholder_25 = T.match_buffer(placeholder_24, [864], dtype="float32")
T_reshape_3 = T.match_buffer(T_reshape_2, [864], dtype="float32")
# body
for ax0_ax1_fused, ax2, ax3_inner in T.grid(3, 24, 12):
T_reshape_3[ax0_ax1_fused * 288 + ax2 * 12 + ax3_inner] = placeholder_25[ax0_ax1_fused * 288 + ax2 * 12 + ax3_inner]
@T.prim_func
def tvmgen_default_fused_nn_softmax_add(placeholder_26: T.handle, placeholder_27: T.handle, T_add_2: T.handle) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "tvmgen_default_fused_nn_softmax_add", "tir.noalias": True})
placeholder_28 = T.match_buffer(placeholder_26, [864], dtype="float32")
placeholder_29 = T.match_buffer(placeholder_27, [864], dtype="float32")
T_add_3 = T.match_buffer(T_add_2, [864], dtype="float32")
# body
for ax0_ax1_fused_ax2_fused in T.serial(0, 72):
T_softmax_norm = T.decl_buffer([12], "float32")
with T.decl_buffer([1], "float32") as T_softmax_maxelem:
T_softmax_maxelem[0] = T.float32(-3.4028234663852886e+38)
for k in T.serial(0, 12):
T_softmax_maxelem[0] = T.max(T_softmax_maxelem[0], placeholder_28[ax0_ax1_fused_ax2_fused * 12 + k])
T_softmax_exp= T.decl_buffer([12], "float32")
for i3 in T.serial(0, 12):
T_softmax_exp[i3] = T.exp(placeholder_28[ax0_ax1_fused_ax2_fused * 12 + i3] - T_softmax_maxelem[0], dtype="float32")
T_softmax_expsum = T.decl_buffer([1], "float32")
T_softmax_expsum[0] = T.float32(0)
for k in T.serial(0, 12):
T_softmax_expsum[0] = T_softmax_expsum[0] + T_softmax_exp[k]
for i3 in T.serial(0, 12):
T_softmax_norm[i3] = T_softmax_exp[i3] / T_softmax_expsum[0]
for ax3 in T.serial(0, 12):
T_add_3[ax0_ax1_fused_ax2_fused * 12 + ax3] = placeholder_29[ax0_ax1_fused_ax2_fused * 12 + ax3] + T_softmax_norm[ax3]
@T.prim_func
def run_model(data: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_run_model", "runner_function": True})
data_buffer = T.match_buffer(data, [864], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [864], dtype="float32", align=16)
# body
sid_11 = T.allocate([3456], "int8", "global.workspace")
sid_5 = T.allocate([3456], "int8", "global.workspace")
sid_10 = T.allocate([3456], "int8", "global.workspace")
sid_6 = T.allocate([3456], "int8", "global.workspace")
sid_8 = T.allocate([3456], "int8", "global.workspace")
sid_2 = T.allocate([3456], "int8", "global.workspace")
sid_7 = T.allocate([3456], "int8", "global.workspace")
sid_3 = T.allocate([3456], "int8", "global.workspace")
sid_12 = T.allocate([3456], "int8", "global.workspace")
sid_4 = T.allocate([3456], "int8", "global.workspace")
sid_18 = T.allocate([3456], "int8", "global.workspace")
sid_19 = T.allocate([3456], "int8", "global.workspace")
sid_20 = T.allocate([3456], "int8", "global.workspace")
sid_21 = T.allocate_const([0,1,2,3,4,5,6,7,8,9], "int8", [10])
sid_22 = T.allocate_const([1], "int8", [1])
sid_23 = T.allocate_const([2,1], "int8", [3456])
T.evaluate(T.tvm_call_cpacked("tvmgen_default_fused_layout_transform_1", data_buffer.data, sid_23, dtype="int32"))
T.evaluate(T.tvm_call_cpacked("tvmgen_default_fused_nn_contrib_conv2d_NCHWc", sid_8, T.cast(T.lookup_param("p0", dtype="handle"), "handle"), sid_7, dtype="int32"))
T.evaluate(T.tvm_call_cpacked("tvmgen_default_fused_layout_transform", sid_7, sid_6, dtype="int32"))
T.evaluate(T.tvm_call_cpacked("tvmgen_default_fused_reshape_1", data_buffer.data, sid_12, dtype="int32"))
T.evaluate(T.tvm_call_cpacked("tvmgen_default_fused_nn_contrib_dense_pack_nn_relu", sid_12, T.cast(T.lookup_param("p1", dtype="handle"), "handle"), sid_11, dtype="int32"))
T.evaluate(T.tvm_call_cpacked("tvmgen_default_fused_reshape", sid_11, sid_10, dtype="int32"))
T.evaluate(T.tvm_call_cpacked("tvmgen_default_fused_nn_softmax_add_add_multiply_add", sid_6, sid_10, T.cast(T.lookup_param("p2", dtype="handle"), "handle"), T.cast(T.lookup_param("p3", dtype="handle"), "handle"), T.cast(T.lookup_param("p4", dtype="handle"), "handle"), sid_5, dtype="int32"))
T.evaluate(T.tvm_call_cpacked("tvmgen_default_fused_layout_transform_1", sid_5, sid_4, dtype="int32"))
T.evaluate(T.tvm_call_cpacked("tvmgen_default_fused_nn_contrib_conv2d_NCHWc", sid_4, T.cast(T.lookup_param("p5", dtype="handle"), "handle"), sid_3, dtype="int32"))
T.evaluate(T.tvm_call_cpacked("tvmgen_default_fused_layout_transform", sid_3, sid_2, dtype="int32"))
T.evaluate(T.tvm_call_cpacked("tvmgen_default_fused_reshape_1", sid_5, sid_20, dtype="int32"))
T.evaluate(T.tvm_call_cpacked("tvmgen_default_fused_nn_contrib_dense_pack_nn_relu", sid_20, T.cast(T.lookup_param("p6", dtype="handle"), "handle"), sid_19, dtype="int32"))
T.evaluate(T.tvm_call_cpacked("tvmgen_default_fused_reshape", sid_19, sid_18, dtype="int32"))
T.evaluate(T.tvm_call_cpacked("tvmgen_default_fused_nn_softmax_add", sid_2, sid_18, output_buffer.data, dtype="int32"))
# fmt: on
def test_multiple_calls_to_same_primfunc():
target = Target("c")
global_ws_pool = WorkspacePoolInfo(
pool_name="global_workspace",
targets=[target],
)
global_const_pool = ConstantPoolInfo(
pool_name="global_constants",
targets=[target],
)
tir_mod = MultipleCallsToSamePrimFuncModule
tir_mod = _assign_targets_to_primfuncs_irmodule(tir_mod, target)
tir_mod = _assign_poolinfos_to_allocates_in_irmodule(
tir_mod, [global_ws_pool], [global_const_pool]
)
main_func = tir_mod["run_model"]
buffer_info_analysis = tvm.tir.usmp.analysis.extract_buffer_info(main_func, tir_mod)
assert buffer_info_analysis.memory_pressure == 11424
buffer_info_map = _replace_stmt_with_buf_var_names(buffer_info_analysis.buffer_info_stmts)
# check conflicts
_verify_conflicts("sid_23", ["sid_22", "sid_21"], buffer_info_map)
_verify_conflicts(
"sid_6",
[
"sid_7",
"sid_12",
"compute",
"compute_global",
"sid_11",
"sid_10",
"T_softmax_exp",
"T_softmax_maxelem",
"sid_5",
"T_softmax_norm",
"T_softmax_expsum",
],
buffer_info_map,
)
_verify_conflicts(
"T_softmax_exp",
[
"sid_10",
"sid_6",
"T_softmax_maxelem",
"sid_5",
"T_softmax_norm",
"T_softmax_expsum",
],
buffer_info_map,
)
_verify_conflicts(
"T_softmax_expsum2",
[
"T_softmax_exp2",
"T_softmax_norm2",
"sid_18",
"T_softmax_maxelem2",
"sid_2",
],
buffer_info_map,
)
_verify_conflicts(
"compute",
[
"sid_12",
"sid_6",
"compute_global",
"sid_11",
"sid_19",
"sid_20",
"sid_2",
"compute_global",
],
buffer_info_map,
)
_verify_conflicts(
"compute_global",
[
"compute",
"sid_12",
"sid_6",
"sid_11",
"compute",
"sid_19",
"sid_20",
"sid_2",
],
buffer_info_map,
)
_verify_conflicts(
"sid_10",
[
"sid_11",
"sid_6",
"T_softmax_exp",
"T_softmax_maxelem",
"sid_5",
"T_softmax_norm",
"T_softmax_expsum",
],
buffer_info_map,
)
_verify_conflicts(
"sid_2",
[
"sid_3",
"sid_5",
"sid_20",
"sid_19",
"compute",
"compute_global",
"sid_18",
"T_softmax_norm2",
"T_softmax_exp2",
"T_softmax_maxelem2",
"T_softmax_expsum2",
],
buffer_info_map,
)
_verify_conflicts(
"sid_5",
[
"T_softmax_maxelem",
"sid_10",
"T_softmax_exp",
"sid_6",
"T_softmax_norm",
"T_softmax_expsum",
"sid_4",
"data_pad",
"sid_3",
"conv2d_NCHWc_global",
"sid_2",
"sid_20",
],
buffer_info_map,
)
_verify_conflicts(
"T_softmax_norm2",
[
"sid_18",
"sid_2",
"T_softmax_exp2",
"T_softmax_maxelem2",
"T_softmax_expsum2",
],
buffer_info_map,
)
_verify_conflicts(
"sid_20",
[
"sid_2",
"sid_5",
"sid_19",
"compute",
"compute_global",
],
buffer_info_map,
)
_verify_conflicts(
"T_softmax_expsum",
[
"sid_5",
"T_softmax_norm",
"T_softmax_maxelem",
"sid_10",
"T_softmax_exp",
"sid_6",
],
buffer_info_map,
)
_verify_conflicts(
"data_pad",
[
"sid_8",
"conv2d_NCHWc_global",
"sid_7",
"sid_4",
"sid_5",
"sid_3",
"conv2d_NCHWc_global",
],
buffer_info_map,
)
_verify_conflicts(
"sid_19",
[
"sid_20",
"sid_2",
"compute",
"compute_global",
"sid_18",
],
buffer_info_map,
)
_verify_conflicts(
"conv2d_NCHWc_global",
[
"data_pad",
"sid_7",
"sid_3",
"data_pad",
"sid_5",
],
buffer_info_map,
)
_verify_conflicts(
"sid_18",
[
"sid_19",
"sid_2",
"T_softmax_norm2",
"T_softmax_exp2",
"T_softmax_maxelem2",
"T_softmax_expsum2",
],
buffer_info_map,
)
_verify_conflicts(
"sid_7",
[
"conv2d_NCHWc_global",
"data_pad",
"sid_6",
],
buffer_info_map,
)
_verify_conflicts(
"T_softmax_exp2",
[
"T_softmax_norm2",
"sid_18",
"sid_2",
"T_softmax_maxelem2",
"T_softmax_expsum2",
],
buffer_info_map,
)
_verify_conflicts(
"sid_4",
[
"sid_5",
"data_pad",
],
buffer_info_map,
)
_verify_conflicts(
"T_softmax_maxelem",
[
"sid_10",
"T_softmax_exp",
"sid_6",
"sid_5",
"T_softmax_norm",
"T_softmax_expsum",
],
buffer_info_map,
)
_verify_conflicts(
"T_softmax_maxelem2",
[
"T_softmax_exp2",
"T_softmax_norm2",
"sid_18",
"sid_2",
"T_softmax_expsum2",
],
buffer_info_map,
)
_verify_conflicts(
"sid_11",
[
"compute",
"sid_12",
"compute_global",
"sid_6",
"sid_10",
],
buffer_info_map,
)
_verify_conflicts(
"sid_12",
[
"sid_6",
"compute",
"compute_global",
"sid_11",
],
buffer_info_map,
)
_verify_conflicts(
"T_softmax_norm",
[
"sid_5",
"T_softmax_maxelem",
"sid_10",
"T_softmax_exp",
"sid_6",
"T_softmax_expsum",
],
buffer_info_map,
)
_verify_conflicts(
"sid_8",
[
"data_pad",
],
buffer_info_map,
)
if __name__ == "__main__":
pytest.main([__file__] + sys.argv[1:])
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_usmp_transform_convert_pool_allocations_to_offsets.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import sys
import tvm
from tvm.script import tir as T
from tvm.tir import stmt_functor
from tvm.tir.usmp import utils as usmp_utils
from tvm.target import Target
from tvm import WorkspacePoolInfo, PoolInfoProperties
def _get_primfuncs_from_module(module):
primfuncs = list()
for gv, primfunc in module.functions.items():
primfuncs.append(primfunc)
return primfuncs
def assign_poolinfos_to_allocates_in_primfunc(primfunc, pool_infos):
"""Helper to assign poolinfos to allocate nodes in a tir.PrimFunc"""
def set_poolinfos(stmt):
if isinstance(stmt, tvm.tir.Allocate):
return tvm.tir.Allocate(
buffer_var=stmt.buffer_var,
dtype=stmt.dtype,
extents=stmt.extents,
condition=stmt.condition,
body=stmt.body,
annotations={tvm.tir.usmp.utils.CANDIDATE_MEMORY_POOL_ATTR: pool_infos},
)
return primfunc.with_body(stmt_functor.ir_transform(primfunc.body, None, set_poolinfos))
def assign_poolinfos_to_allocates_in_irmodule(mod, pool_infos):
"""Helper to assign poolinfos to allocate nodes in a IRModule"""
ret = tvm.IRModule()
for global_var, basefunc in mod.functions.items():
if isinstance(basefunc, tvm.tir.PrimFunc):
ret[global_var] = assign_poolinfos_to_allocates_in_primfunc(basefunc, pool_infos)
return ret
def _assign_targets_to_primfuncs_irmodule(mod, target):
"""Helper to assign target for PrimFunc in a IRModule"""
ret = tvm.IRModule()
for global_var, basefunc in mod.functions.items():
if isinstance(basefunc, tvm.tir.PrimFunc):
ret[global_var] = basefunc.with_attr("target", target)
return ret
# fmt: off
@tvm.script.ir_module
class LinearStructure:
@T.prim_func
def tvmgen_default_fused_cast_subtract(placeholder_2: T.handle, placeholder_3: T.handle, T_subtract: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_cast_subtract", "tir.noalias": True})
placeholder_4 = T.match_buffer(placeholder_2, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
T.preflattened_buffer(placeholder_4, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
placeholder_5 = T.match_buffer(placeholder_3, [1], dtype="int16", elem_offset=0, align=64, offset_factor=1)
T.preflattened_buffer(placeholder_5, [1], dtype="int16", elem_offset=0, align=64, offset_factor=1)
T_subtract_1 = T.match_buffer(T_subtract, [452], dtype="int16", elem_offset=0, align=64, offset_factor=1)
T.preflattened_buffer(T_subtract_1, [452], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
for ax0_ax1_fused_1 in T.serial(0, 224):
for ax2_1, ax3_inner_1 in T.grid(224, 3):
T_subtract_1[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)] = (T.cast(placeholder_4[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)], "int16") - placeholder_5[0])
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast(placeholder_62: T.handle, placeholder_63: T.handle, placeholder_64: T.handle, T_cast_20: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast", "tir.noalias": True})
placeholder_65 = T.match_buffer(placeholder_62, [150528], dtype="int16", elem_offset=0, align=64, offset_factor=1)
T.preflattened_buffer(placeholder_65, [150528], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_66 = T.match_buffer(placeholder_63, [9408], dtype="int16", elem_offset=0, align=64, offset_factor=1)
T.preflattened_buffer(placeholder_66, [9408], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_67 = T.match_buffer(placeholder_64, [64], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T.preflattened_buffer(placeholder_67, [64], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_21 = T.match_buffer(T_cast_20, [289], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
T.preflattened_buffer(T_cast_21, [289], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_7_data = T.allocate([157323], "int16", "global")
PaddedInput_7 = T.buffer_decl(shape=[157323], dtype="int16", data=PaddedInput_7_data)
for i0_i1_fused_7 in T.serial(0, 229):
for i2_7, i3_7 in T.grid(229, 3):
PaddedInput_7[(((i0_i1_fused_7*687) + (i2_7*3)) + i3_7)] = T.if_then_else(((((2 <= i0_i1_fused_7) and (i0_i1_fused_7 < 226)) and (2 <= i2_7)) and (i2_7 < 226)), placeholder_65[((((i0_i1_fused_7*672) + (i2_7*3)) + i3_7) - 1350)], T.int16(0), dtype="int16")
for ax0_ax1_fused_ax2_fused_7 in T.serial(0, 12544):
Conv2dOutput_7_data = T.allocate([64], "int32", "global")
Conv2dOutput_7 = T.buffer_decl(shape=[64], dtype="int32", data=Conv2dOutput_7_data)
for ff_3 in T.serial(0, 64):
Conv2dOutput_7[ff_3] = 0
for ry_2, rx_2, rc_7 in T.grid(7, 7, 3):
Conv2dOutput_7[ff_3] = (Conv2dOutput_7[ff_3] + (T.cast(PaddedInput_7[(((((T.floordiv(ax0_ax1_fused_ax2_fused_7, 112)*1374) + (ry_2*687)) + (T.floormod(ax0_ax1_fused_ax2_fused_7, 112)*6)) + (rx_2*3)) + rc_7)], "int32")*T.cast(placeholder_66[((((ry_2*1344) + (rx_2*192)) + (rc_7*64)) + ff_3)], "int32")))
for ax3_inner_7 in T.serial(0, 64):
T_cast_21[((ax0_ax1_fused_ax2_fused_7*64) + ax3_inner_7)] = T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_7[ax3_inner_7] + placeholder_67[ax3_inner_7]), 1939887962, 31, -9, dtype="int32"), 255), 0), "uint8")
@T.prim_func
def tvmgen_default_fused_nn_max_pool2d_cast(placeholder_28: T.handle, T_cast_6: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_max_pool2d_cast", "tir.noalias": True})
placeholder_29 = T.match_buffer(placeholder_28, [802816], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
T.preflattened_buffer(placeholder_29, [802816], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
T_cast_7 = T.match_buffer(T_cast_6, [177], dtype="int16", elem_offset=0, align=64, offset_factor=1)
T.preflattened_buffer(T_cast_7, [177], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
tensor_2_data = T.allocate([200704], "uint8", "global")
tensor_2 = T.buffer_decl(shape=[200704], dtype="uint8", data=tensor_2_data)
for ax0_ax1_fused_4 in T.serial(0, 56):
for ax2_4 in T.serial(0, 56):
for ax3_init in T.serial(0, 64):
tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_init)] = T.uint8(0)
for rv0_rv1_fused_1, ax3_2 in T.grid(9, 64):
tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_2)] = T.max(tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_2)], T.if_then_else(((((ax0_ax1_fused_4*2) + T.floordiv(rv0_rv1_fused_1, 3)) < 112) and (((ax2_4*2) + T.floormod(rv0_rv1_fused_1, 3)) < 112)), placeholder_29[(((((ax0_ax1_fused_4*14336) + (T.floordiv(rv0_rv1_fused_1, 3)*7168)) + (ax2_4*128)) + (T.floormod(rv0_rv1_fused_1, 3)*64)) + ax3_2)], T.uint8(0), dtype="uint8"))
for ax0_ax1_fused_5 in T.serial(0, 56):
for ax2_5, ax3_3 in T.grid(56, 64):
T_cast_7[(((ax0_ax1_fused_5*3584) + (ax2_5*64)) + ax3_3)] = T.cast(tensor_2[(((ax0_ax1_fused_5*3584) + (ax2_5*64)) + ax3_3)], "int16")
@T.prim_func
def __tvm_main__(input: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "__tvm_main__", "runner_function": True})
# body
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 1)
sid_9 = T.allocate([301056], "int8", "global")
sid_8 = T.allocate([802816], "int8", "global")
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract", input, T.lookup_param("p0", dtype="handle"), sid_9, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast", sid_9, T.lookup_param("p1", dtype="handle"), T.lookup_param("p2", dtype="handle"), sid_8, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_max_pool2d_cast", sid_8, output, dtype="int32"))
# fmt: on
# fmt: off
@tvm.script.ir_module
class LinearStructurePlanned:
@T.prim_func
def __tvm_main__(input: T.handle, fast_memory_0_var: T.Ptr[T.uint8], slow_memory_1_var: T.Ptr[T.uint8], output: T.handle) -> None:
fast_memory_0_buffer_var = T.match_buffer(fast_memory_0_var, [200704], dtype="uint8", strides=[1], elem_offset=0, align=16)
slow_memory_1_buffer_var = T.match_buffer(slow_memory_1_var, [1418528], dtype="uint8", strides=[1], elem_offset=0, align=16)
# body
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 1)
sid_9_let: T.Ptr[T.int8] = T.address_of(slow_memory_1_buffer_var[1117472], dtype="handle")
sid_8_let: T.Ptr[T.int8] = T.address_of(slow_memory_1_buffer_var[0], dtype="handle")
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract", input, T.lookup_param("p0", dtype="handle"), sid_9_let, fast_memory_0_buffer_var.data, slow_memory_1_buffer_var.data, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast", sid_9_let, T.lookup_param("p1", dtype="handle"), T.lookup_param("p2", dtype="handle"), sid_8_let, fast_memory_0_buffer_var.data, slow_memory_1_buffer_var.data, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_max_pool2d_cast", sid_8_let, output, fast_memory_0_buffer_var.data, slow_memory_1_buffer_var.data, dtype="int32"))
@T.prim_func
def tvmgen_default_fused_nn_max_pool2d_cast(placeholder_28: T.handle, T_cast_6: T.handle, fast_memory_6_var: T.Ptr[T.uint8], slow_memory_7_var: T.Ptr[T.uint8]) -> None:
placeholder_29 = T.match_buffer(placeholder_28, [802816], dtype="uint8")
T.preflattened_buffer(placeholder_29, [802816], dtype="uint8")
T_cast_7 = T.match_buffer(T_cast_6, [177], dtype="int16")
T.preflattened_buffer(T_cast_7, [177], dtype="int16")
fast_memory_6_buffer_var = T.match_buffer(fast_memory_6_var, [200704], dtype="uint8", strides=[1], elem_offset=0, align=16)
T.preflattened_buffer(fast_memory_6_buffer_var, [200704], dtype="uint8", strides=[1], elem_offset=0, align=16)
slow_memory_7_buffer_var = T.match_buffer(slow_memory_7_var, [1418528], dtype="uint8", strides=[1], elem_offset=0, align=16)
T.preflattened_buffer(slow_memory_7_buffer_var, [1418528], dtype="uint8", strides=[1], elem_offset=0, align=16)
# body
tensor_2_let = T.buffer_decl([200704], dtype="uint8")
with T.let(tensor_2_let.data, T.address_of(fast_memory_6_buffer_var[0], dtype="handle")):
for ax0_ax1_fused_4, ax2_4 in T.grid(56, 56):
for ax3_init in T.serial(0, 64):
tensor_2_let[ax0_ax1_fused_4 * 3584 + ax2_4 * 64 + ax3_init] = T.uint8(0)
for rv0_rv1_fused_1, ax3_2 in T.grid(9, 64):
tensor_2_let[ax0_ax1_fused_4 * 3584 + ax2_4 * 64 + ax3_2] = T.max(tensor_2_let[ax0_ax1_fused_4 * 3584 + ax2_4 * 64 + ax3_2], T.if_then_else(ax0_ax1_fused_4 * 2 + rv0_rv1_fused_1 // 3 < 112 and ax2_4 * 2 + rv0_rv1_fused_1 % 3 < 112, placeholder_29[ax0_ax1_fused_4 * 14336 + rv0_rv1_fused_1 // 3 * 7168 + ax2_4 * 128 + rv0_rv1_fused_1 % 3 * 64 + ax3_2], T.uint8(0), dtype="uint8"))
for ax0_ax1_fused_5, ax2_5, ax3_3 in T.grid(56, 56, 64):
T_cast_7[ax0_ax1_fused_5 * 3584 + ax2_5 * 64 + ax3_3] = T.cast(tensor_2_let[ax0_ax1_fused_5 * 3584 + ax2_5 * 64 + ax3_3], "int16")
@T.prim_func
def tvmgen_default_fused_cast_subtract(placeholder_2: T.handle, placeholder_3: T.handle, T_subtract: T.handle, fast_memory_2_var: T.Ptr[T.uint8], slow_memory_3_var: T.Ptr[T.uint8]) -> None:
placeholder_4 = T.match_buffer(placeholder_2, [150528], dtype="uint8")
T.preflattened_buffer(placeholder_4, [150528], dtype="uint8")
placeholder_5 = T.match_buffer(placeholder_3, [1], dtype="int16")
T.preflattened_buffer(placeholder_5, [1], dtype="int16")
T_subtract_1 = T.match_buffer(T_subtract, [452], dtype="int16")
T.preflattened_buffer(T_subtract_1, [452], dtype="int16")
fast_memory_2_buffer_var = T.match_buffer(fast_memory_2_var, [200704], dtype="uint8", strides=[1], elem_offset=0, align=16)
T.preflattened_buffer(fast_memory_2_buffer_var, [200704], dtype="uint8", strides=[1], elem_offset=0, align=16)
slow_memory_3_buffer_var = T.match_buffer(slow_memory_3_var, [1418528], dtype="uint8", strides=[1], elem_offset=0, align=16)
T.preflattened_buffer(slow_memory_3_buffer_var, [1418528], dtype="uint8", strides=[1], elem_offset=0, align=16)
# body
for ax0_ax1_fused_1, ax2_1, ax3_inner_1 in T.grid(224, 224, 3):
T_subtract_1[ax0_ax1_fused_1 * 672 + ax2_1 * 3 + ax3_inner_1] = T.cast(placeholder_4[ax0_ax1_fused_1 * 672 + ax2_1 * 3 + ax3_inner_1], "int16") - placeholder_5[0]
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast(placeholder_62: T.handle, placeholder_63: T.handle, placeholder_64: T.handle, T_cast_20: T.handle, fast_memory_4_var: T.Ptr[T.uint8], slow_memory_5_var: T.Ptr[T.uint8]) -> None:
placeholder_65 = T.match_buffer(placeholder_62, [150528], dtype="int16")
T.preflattened_buffer(placeholder_65, [150528], dtype="int16")
placeholder_66 = T.match_buffer(placeholder_63, [9408], dtype="int16")
T.preflattened_buffer(placeholder_66, [9408], dtype="int16")
placeholder_67 = T.match_buffer(placeholder_64, [64], dtype="int32")
T.preflattened_buffer(placeholder_67, [64], dtype="int32")
T_cast_21 = T.match_buffer(T_cast_20, [289], dtype="uint8")
T.preflattened_buffer(T_cast_21, [289], dtype="uint8")
fast_memory_4_buffer_var = T.match_buffer(fast_memory_4_var, [200704], dtype="uint8", strides=[1], elem_offset=0, align=16)
T.preflattened_buffer(fast_memory_4_buffer_var, [200704], dtype="uint8", strides=[1], elem_offset=0, align=16)
slow_memory_5_buffer_var = T.match_buffer(slow_memory_5_var, [1418528], dtype="uint8", strides=[1], elem_offset=0, align=16)
T.preflattened_buffer(slow_memory_5_buffer_var, [1418528], dtype="uint8", strides=[1], elem_offset=0, align=16)
# body
PaddedInput_7_let = T.buffer_decl([157323], "int16")
with T.let(PaddedInput_7_let.data, T.address_of(slow_memory_5_buffer_var[802816], dtype="handle")):
for i0_i1_fused_7, i2_7, i3_7 in T.grid(229, 229, 3):
PaddedInput_7_let[i0_i1_fused_7 * 687 + i2_7 * 3 + i3_7] = T.if_then_else(2 <= i0_i1_fused_7 and i0_i1_fused_7 < 226 and 2 <= i2_7 and i2_7 < 226, placeholder_65[i0_i1_fused_7 * 672 + i2_7 * 3 + i3_7 - 1350], T.int16(0), dtype="int16")
for ax0_ax1_fused_ax2_fused_7 in T.serial(0, 12544):
Conv2dOutput_7_let = T.buffer_decl([64], "int32")
with T.let(Conv2dOutput_7_let.data, T.address_of(fast_memory_4_buffer_var[0], dtype="handle")):
for ff_3 in T.serial(0, 64):
Conv2dOutput_7_let[ff_3] = 0
for ry_2, rx_2, rc_7 in T.grid(7, 7, 3):
Conv2dOutput_7_let[ff_3] = Conv2dOutput_7_let[ff_3] + T.cast(PaddedInput_7_let[ax0_ax1_fused_ax2_fused_7 // 112 * 1374 + ry_2 * 687 + ax0_ax1_fused_ax2_fused_7 % 112 * 6 + rx_2 * 3 + rc_7], "int32") * T.cast(placeholder_66[ry_2 * 1344 + rx_2 * 192 + rc_7 * 64 + ff_3], "int32")
for ax3_inner_7 in T.serial(0, 64):
T_cast_21[ax0_ax1_fused_ax2_fused_7 * 64 + ax3_inner_7] = T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput_7_let[ax3_inner_7] + placeholder_67[ax3_inner_7], 1939887962, 31, -9, dtype="int32"), 255), 0), "uint8")
# fmt: on
def test_mobilenet_subgraph():
target = Target("c")
fast_memory_pool = WorkspacePoolInfo(
"fast_memory",
[target],
PoolInfoProperties(size_hint_bytes=200704),
)
slow_memory_pool = WorkspacePoolInfo(
"slow_memory",
[target],
)
tir_mod = LinearStructure
tir_mod = _assign_targets_to_primfuncs_irmodule(tir_mod, target)
tir_mod = assign_poolinfos_to_allocates_in_irmodule(
tir_mod, [fast_memory_pool, slow_memory_pool]
)
main_func = tir_mod["__tvm_main__"]
buffer_analysis = tvm.tir.usmp.analysis.extract_buffer_info(main_func, tir_mod)
buffer_info_map = buffer_analysis.buffer_info_stmts
fcreate_array_bi = tvm.get_global_func("tir.usmp.CreateArrayBufferInfo")
buffer_info_arr = fcreate_array_bi(buffer_info_map)
fusmp_algo_greedy_by_size = tvm.get_global_func("tir.usmp.algo.greedy_by_size")
buffer_pool_allocations = fusmp_algo_greedy_by_size(
buffer_info_arr, buffer_analysis.memory_pressure
)
fassign_stmt_pool_allocations = tvm.get_global_func("tir.usmp.AssignStmtPoolAllocations")
pool_allocations = fassign_stmt_pool_allocations(buffer_info_map, buffer_pool_allocations)
tir_mod_with_offsets = tvm.tir.usmp.transform.convert_pool_allocations_to_offsets(
pool_allocations, emit_tvmscript_printable=True
)(tir_mod)
tir_mod_with_offsets_ref = LinearStructurePlanned
for gv, ref_func in tir_mod_with_offsets_ref.functions.items():
actual_func = tir_mod_with_offsets[gv.name_hint]
tvm.ir.assert_structural_equal(actual_func, ref_func)
# fmt: off
@tvm.script.ir_module
class ResnetStructure:
@T.prim_func
def tvmgen_default_fused_cast_subtract_fixed_point_multiply_add_clip_cast_cast(placeholder: T.handle, placeholder_1: T.handle, T_cast: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_cast_subtract_fixed_point_multiply_add_clip_cast_cast", "tir.noalias": True})
placeholder_2 = T.match_buffer(placeholder, [360000], dtype="uint8")
T.preflattened_buffer(placeholder_2, [360000], dtype="uint8")
placeholder_3 = T.match_buffer(placeholder_1, [64], dtype="int32")
T.preflattened_buffer(placeholder_3, [64], dtype="int32")
T_cast_1 = T.match_buffer(T_cast, [215], dtype="int16")
T.preflattened_buffer(T_cast_1, [215], dtype="int16")
# body
for ax0_ax1_fused, ax2, ax3_outer, ax3_inner in T.grid(75, 75, 4, 16):
T_cast_1[ax0_ax1_fused * 4800 + ax2 * 64 + ax3_outer * 16 + ax3_inner] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift(T.cast(placeholder_2[ax0_ax1_fused * 4800 + ax2 * 64 + ax3_outer * 16 + ax3_inner], "int32") - 94, 1843157232, 31, 1, dtype="int32") + placeholder_3[ax3_outer * 16 + ax3_inner], 255), 0), "uint8"), "int16")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_1(placeholder_10: T.handle, placeholder_11: T.handle, placeholder_12: T.handle, T_cast_4: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_1", "tir.noalias": True})
placeholder_13 = T.match_buffer(placeholder_10, [360000], dtype="int16")
T.preflattened_buffer(placeholder_13, [360000], dtype="int16")
placeholder_14 = T.match_buffer(placeholder_11, [36864], dtype="int16")
T.preflattened_buffer(placeholder_14, [36864], dtype="int16")
placeholder_15 = T.match_buffer(placeholder_12, [64], dtype="int32")
T.preflattened_buffer(placeholder_15, [64], dtype="int32")
T_cast_5 = T.match_buffer(T_cast_4, [215], dtype="int16")
T.preflattened_buffer(T_cast_5, [215], dtype="int16")
# body
PaddedInput_1_data = T.allocate([379456], "int16", "global")
PaddedInput_1 = T.buffer_decl(shape=[379456], dtype="int16", data=PaddedInput_1_data)
for i0_i1_fused_1, i2_1, i3_1 in T.grid(77, 77, 64):
PaddedInput_1[i0_i1_fused_1 * 4928 + i2_1 * 64 + i3_1] = T.if_then_else(1 <= i0_i1_fused_1 and i0_i1_fused_1 < 76 and 1 <= i2_1 and i2_1 < 76, placeholder_13[i0_i1_fused_1 * 4800 + i2_1 * 64 + i3_1 - 4864], T.int16(0), dtype="int16")
for ax0_ax1_fused_ax2_fused_1 in T.serial(0, 5625):
Conv2dOutput_1_data = T.allocate([64], "int32", "global")
Conv2dOutput_1 = T.buffer_decl(shape=[64], dtype="int32", data=Conv2dOutput_1_data)
for ff_1 in T.serial(0, 64):
Conv2dOutput_1[ff_1] = 0
for ry, rx, rc_1 in T.grid(3, 3, 64):
Conv2dOutput_1[ff_1] = Conv2dOutput_1[ff_1] + T.cast(PaddedInput_1[T.floordiv(ax0_ax1_fused_ax2_fused_1, 75) * 4928 + ry * 4928 + rx * 64 + T.floormod(ax0_ax1_fused_ax2_fused_1, 75) * 64 + rc_1], "int32") * T.cast(placeholder_14[ry * 12288 + rx * 4096 + rc_1 * 64 + ff_1], "int32")
for ax3_inner_2 in T.serial(0, 64):
T_cast_5[ax0_ax1_fused_ax2_fused_1 * 64 + ax3_inner_2] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput_1[ax3_inner_2] + placeholder_15[ax3_inner_2], 1608879842, 31, -7, dtype="int32"), 255), 0), "uint8"), "int16")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_15934180698220515269_(placeholder_16: T.handle, placeholder_17: T.handle, placeholder_18: T.handle, T_add: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_15934180698220515269_", "tir.noalias": True})
placeholder_19 = T.match_buffer(placeholder_16, [360000], dtype="int16")
T.preflattened_buffer(placeholder_19, [360000], dtype="int16")
placeholder_20 = T.match_buffer(placeholder_17, [16384], dtype="int16")
T.preflattened_buffer(placeholder_20, [16384], dtype="int16")
placeholder_21 = T.match_buffer(placeholder_18, [256], dtype="int32")
T.preflattened_buffer(placeholder_21, [256], dtype="int32")
T_add_1 = T.match_buffer(T_add, [407], dtype="int32")
T.preflattened_buffer(T_add_1, [407], dtype="int32")
# body
PaddedInput_2_data = T.allocate([360000], "int16", "global")
PaddedInput_2 = T.buffer_decl(shape=[360000], dtype="int16", data=PaddedInput_2_data)
for i0_i1_fused_2, i2_2, i3_2 in T.grid(75, 75, 64):
PaddedInput_2[i0_i1_fused_2 * 4800 + i2_2 * 64 + i3_2] = placeholder_19[i0_i1_fused_2 * 4800 + i2_2 * 64 + i3_2]
for ax0_ax1_fused_ax2_fused_2 in T.serial(0, 5625):
Conv2dOutput_2_data = T.allocate([64], "int32", "global")
Conv2dOutput_2 = T.buffer_decl(shape=[64], dtype="int32", data=Conv2dOutput_2_data)
for ax3_outer_1 in T.serial(0, 4):
for ff_2 in T.serial(0, 64):
Conv2dOutput_2[ff_2] = 0
for rc_2 in T.serial(0, 64):
Conv2dOutput_2[ff_2] = Conv2dOutput_2[ff_2] + T.cast(PaddedInput_2[ax0_ax1_fused_ax2_fused_2 * 64 + rc_2], "int32") * T.cast(placeholder_20[rc_2 * 256 + ax3_outer_1 * 64 + ff_2], "int32")
for ax3_inner_3 in T.serial(0, 64):
T_add_1[ax0_ax1_fused_ax2_fused_2 * 256 + ax3_outer_1 * 64 + ax3_inner_3] = T.q_multiply_shift(T.cast(T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput_2[ax3_inner_3] + placeholder_21[ax3_outer_1 * 64 + ax3_inner_3], 1711626602, 31, -8, dtype="int32") + 132, 255), 0), "uint8"), "int32") - 132, 2094289803, 31, -2, dtype="int32") + 136
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_4200876283395191415_(placeholder_22: T.handle, placeholder_23: T.handle, placeholder_24: T.handle, placeholder_25: T.handle, T_cast_6: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_4200876283395191415_", "tir.noalias": True})
placeholder_29 = T.match_buffer(placeholder_22, [360000], dtype="int16")
T.preflattened_buffer(placeholder_29, [360000], dtype="int16")
placeholder_27 = T.match_buffer(placeholder_23, [16384], dtype="int16")
T.preflattened_buffer(placeholder_27, [16384], dtype="int16")
placeholder_26 = T.match_buffer(placeholder_24, [256], dtype="int32")
T.preflattened_buffer(placeholder_26, [256], dtype="int32")
placeholder_28 = T.match_buffer(placeholder_25, [1440000], dtype="int32")
T.preflattened_buffer(placeholder_28, [1440000], dtype="int32")
T_cast_7 = T.match_buffer(T_cast_6, [407], dtype="uint8")
T.preflattened_buffer(T_cast_7, [407], dtype="uint8")
# body
PaddedInput_3_data = T.allocate([360000], "int16", "global")
PaddedInput_3 = T.buffer_decl(shape=[360000], dtype="int16", data=PaddedInput_3_data)
for i0_i1_fused_3, i2_3, i3_3 in T.grid(75, 75, 64):
PaddedInput_3[i0_i1_fused_3 * 4800 + i2_3 * 64 + i3_3] = placeholder_29[i0_i1_fused_3 * 4800 + i2_3 * 64 + i3_3]
for ax0_ax1_fused_ax2_fused_3 in T.serial(0, 5625):
Conv2dOutput_3_data = T.allocate([64], "int32", "global")
Conv2dOutput_3 = T.buffer_decl(shape=[64], dtype="int32", data=Conv2dOutput_3_data)
for ax3_outer_2 in T.serial(0, 4):
for ff_3 in T.serial(0, 64):
Conv2dOutput_3[ff_3] = 0
for rc_3 in T.serial(0, 64):
Conv2dOutput_3[ff_3] = Conv2dOutput_3[ff_3] + T.cast(PaddedInput_3[ax0_ax1_fused_ax2_fused_3 * 64 + rc_3], "int32") * T.cast(placeholder_27[rc_3 * 256 + ax3_outer_2 * 64 + ff_3], "int32")
for ax3_inner_4 in T.serial(0, 64):
T_cast_7[ax0_ax1_fused_ax2_fused_3 * 256 + ax3_outer_2 * 64 + ax3_inner_4] = T.cast(T.max(T.min(T.q_multiply_shift(T.cast(T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput_3[ax3_inner_4] + placeholder_26[ax3_outer_2 * 64 + ax3_inner_4], 1343014664, 31, -8, dtype="int32") + 136, 255), 0), "uint8"), "int32") - 136, 1073903788, 31, 1, dtype="int32") + placeholder_28[ax0_ax1_fused_ax2_fused_3 * 256 + ax3_outer_2 * 64 + ax3_inner_4], 255), 0), "uint8")
@T.prim_func
def __tvm_main__(input: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "__tvm_main__", "runner_function": True})
# body
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 1)
sid_2 = T.allocate([720000], "int8", "global")
sid_6 = T.allocate([5760000], "int8", "global")
sid_7 = T.allocate([720000], "int8", "global")
sid_8 = T.allocate([720000], "int8", "global")
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract_fixed_point_multiply_add_clip_cast_cast", input, T.lookup_param("p0", dtype="handle"), sid_2, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast", sid_2, T.lookup_param("p3", dtype="handle"), T.lookup_param("p4", dtype="handle"), sid_8, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_1", sid_8, T.lookup_param("p5", dtype="handle"), T.lookup_param("p6", dtype="handle"), sid_7, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_15934180698220515269_", sid_7, T.lookup_param("p7", dtype="handle"), T.lookup_param("p8", dtype="handle"), sid_6, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_4200876283395191415_", sid_2, T.lookup_param("p1", dtype="handle"), T.lookup_param("p2", dtype="handle"), sid_6, output, dtype="int32"))
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast(placeholder_4: T.handle, placeholder_5: T.handle, placeholder_6: T.handle, T_cast_2: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast", "tir.noalias": True})
placeholder_7 = T.match_buffer(placeholder_4, [360000], dtype="int16")
T.preflattened_buffer(placeholder_7, [360000], dtype="int16")
placeholder_8 = T.match_buffer(placeholder_5, [4096], dtype="int16")
T.preflattened_buffer(placeholder_8, [4096], dtype="int16")
placeholder_9 = T.match_buffer(placeholder_6, [64], dtype="int32")
T.preflattened_buffer(placeholder_9, [64], dtype="int32")
T_cast_3 = T.match_buffer(T_cast_2, [215], dtype="int16")
T.preflattened_buffer(T_cast_3, [215], dtype="int16")
# body
PaddedInput_data = T.allocate([360000], "int16", "global")
PaddedInput = T.buffer_decl([360000], "int16", data=PaddedInput_data)
for i0_i1_fused, i2, i3 in T.grid(75, 75, 64):
PaddedInput[i0_i1_fused * 4800 + i2 * 64 + i3] = placeholder_7[i0_i1_fused * 4800 + i2 * 64 + i3]
for ax0_ax1_fused_ax2_fused in T.serial(0, 5625):
Conv2dOutput_data = T.allocate([64], "int32", "global")
Conv2dOutput = T.buffer_decl([64], "int32", data=Conv2dOutput_data)
for ff in T.serial(0, 64):
Conv2dOutput[ff] = 0
for rc in T.serial(0, 64):
Conv2dOutput[ff] = Conv2dOutput[ff] + T.cast(PaddedInput[ax0_ax1_fused_ax2_fused * 64 + rc], "int32") * T.cast(placeholder_8[rc * 64 + ff], "int32")
for ax3_inner_1 in T.serial(0, 64):
T_cast_3[ax0_ax1_fused_ax2_fused * 64 + ax3_inner_1] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput[ax3_inner_1] + placeholder_9[ax3_inner_1], 1843106743, 31, -6, dtype="int32"), 255), 0), "uint8"), "int16")
# fmt: on
# fmt: off
@tvm.script.ir_module
class ResnetStructurePlanned:
@T.prim_func
def tvmgen_default_fused_cast_subtract_fixed_point_multiply_add_clip_cast_cast(placeholder: T.handle, placeholder_1: T.handle, T_cast: T.handle, global_workspace_1_var: T.Ptr[T.uint8]) -> None:
placeholder_2 = T.match_buffer(placeholder, [360000], dtype="uint8")
T.preflattened_buffer(placeholder_2, [360000], dtype="uint8")
placeholder_3 = T.match_buffer(placeholder_1, [64], dtype="int32")
T.preflattened_buffer(placeholder_3, [64], dtype="int32")
T_cast_1 = T.match_buffer(T_cast, [215], dtype="int16")
T.preflattened_buffer(T_cast_1, [215], dtype="int16")
global_workspace_1_buffer_var = T.match_buffer(global_workspace_1_var, [7920256], dtype="uint8", strides=[1], elem_offset=0, align=16)
T.preflattened_buffer(global_workspace_1_buffer_var, [7920256], dtype="uint8", strides=[1], elem_offset=0, align=16)
# body
for ax0_ax1_fused, ax2, ax3_outer, ax3_inner in T.grid(75, 75, 4, 16):
T_cast_1[ax0_ax1_fused * 4800 + ax2 * 64 + ax3_outer * 16 + ax3_inner] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift(T.cast(placeholder_2[ax0_ax1_fused * 4800 + ax2 * 64 + ax3_outer * 16 + ax3_inner], "int32") - 94, 1843157232, 31, 1, dtype="int32") + placeholder_3[ax3_outer * 16 + ax3_inner], 255), 0), "uint8"), "int16")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_4200876283395191415_(placeholder_22: T.handle, placeholder_23: T.handle, placeholder_24: T.handle, placeholder_25: T.handle, T_cast_6: T.handle, global_workspace_5_var: T.Ptr[T.uint8]) -> None:
placeholder_29 = T.match_buffer(placeholder_22, [360000], dtype="int16")
T.preflattened_buffer(placeholder_29, [360000], dtype="int16")
placeholder_27 = T.match_buffer(placeholder_23, [16384], dtype="int16")
T.preflattened_buffer(placeholder_27, [16384], dtype="int16")
placeholder_26 = T.match_buffer(placeholder_24, [256], dtype="int32")
T.preflattened_buffer(placeholder_26, [256], dtype="int32")
placeholder_28 = T.match_buffer(placeholder_25, [1440000], dtype="int32")
T.preflattened_buffer(placeholder_28, [1440000], dtype="int32")
T_cast_7 = T.match_buffer(T_cast_6, [407], dtype="uint8")
T.preflattened_buffer(T_cast_7, [407], dtype="uint8")
global_workspace_5_buffer_var = T.match_buffer(global_workspace_5_var, [7920256], dtype="uint8", strides=[1], elem_offset=0, align=16)
T.preflattened_buffer(global_workspace_5_buffer_var, [7920256], dtype="uint8", strides=[1], elem_offset=0, align=16)
# body
PaddedInput_3_let = T.buffer_decl([360000], 'int16')
with T.let(PaddedInput_3_let.data, T.address_of(global_workspace_5_buffer_var[6480000], dtype="handle")):
for i0_i1_fused_3, i2_3, i3_3 in T.grid(75, 75, 64):
PaddedInput_3_let[i0_i1_fused_3 * 4800 + i2_3 * 64 + i3_3] = placeholder_29[i0_i1_fused_3 * 4800 + i2_3 * 64 + i3_3]
for ax0_ax1_fused_ax2_fused_3 in T.serial(0, 5625):
Conv2dOutput_3_let = T.buffer_decl([64], 'int32')
with T.let(Conv2dOutput_3_let.data, T.address_of(global_workspace_5_buffer_var[7200000], dtype="handle")):
for ax3_outer_2 in T.serial(0, 4):
for ff_3 in T.serial(0, 64):
Conv2dOutput_3_let[ff_3] = 0
for rc_3 in T.serial(0, 64):
Conv2dOutput_3_let[ff_3] = Conv2dOutput_3_let[ff_3] + T.cast(PaddedInput_3_let[ax0_ax1_fused_ax2_fused_3 * 64 + rc_3], "int32") * T.cast(placeholder_27[rc_3 * 256 + ax3_outer_2 * 64 + ff_3], "int32")
for ax3_inner_4 in T.serial(0, 64):
T_cast_7[ax0_ax1_fused_ax2_fused_3 * 256 + ax3_outer_2 * 64 + ax3_inner_4] = T.cast(T.max(T.min(T.q_multiply_shift(T.cast(T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput_3_let[ax3_inner_4] + placeholder_26[ax3_outer_2 * 64 + ax3_inner_4], 1343014664, 31, -8, dtype="int32") + 136, 255), 0), "uint8"), "int32") - 136, 1073903788, 31, 1, dtype="int32") + placeholder_28[ax0_ax1_fused_ax2_fused_3 * 256 + ax3_outer_2 * 64 + ax3_inner_4], 255), 0), "uint8")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_15934180698220515269_(placeholder_16: T.handle, placeholder_17: T.handle, placeholder_18: T.handle, T_add: T.handle, global_workspace_4_var: T.Ptr[T.uint8]) -> None:
placeholder_19 = T.match_buffer(placeholder_16, [360000], dtype="int16")
T.preflattened_buffer(placeholder_19, [360000], dtype="int16")
placeholder_20 = T.match_buffer(placeholder_17, [16384], dtype="int16")
T.preflattened_buffer(placeholder_20, [16384], dtype="int16")
placeholder_21 = T.match_buffer(placeholder_18, [256], dtype="int32")
T.preflattened_buffer(placeholder_21, [256], dtype="int32")
T_add_1 = T.match_buffer(T_add, [407], dtype="int32")
T.preflattened_buffer(T_add_1, [407], dtype="int32")
global_workspace_4_buffer_var = T.match_buffer(global_workspace_4_var, [7920256], dtype="uint8", strides=[1], elem_offset=0, align=16)
T.preflattened_buffer(global_workspace_4_buffer_var, [7920256], dtype="uint8", strides=[1], elem_offset=0, align=16)
# body
PaddedInput_2_let = T.buffer_decl([360000], "int16")
with T.let(PaddedInput_2_let.data, T.address_of(global_workspace_4_buffer_var[7200000], dtype="handle")):
for i0_i1_fused_2, i2_2, i3_2 in T.grid(75, 75, 64):
PaddedInput_2_let[i0_i1_fused_2 * 4800 + i2_2 * 64 + i3_2] = placeholder_19[i0_i1_fused_2 * 4800 + i2_2 * 64 + i3_2]
for ax0_ax1_fused_ax2_fused_2 in T.serial(0, 5625):
Conv2dOutput_2_let = T.buffer_decl([64], 'int32')
with T.let(Conv2dOutput_2_let.data, T.address_of(global_workspace_4_buffer_var[7920000], dtype="handle")):
for ax3_outer_1 in T.serial(0, 4):
for ff_2 in T.serial(0, 64):
Conv2dOutput_2_let[ff_2] = 0
for rc_2 in T.serial(0, 64):
Conv2dOutput_2_let[ff_2] = Conv2dOutput_2_let[ff_2] + T.cast(PaddedInput_2_let[ax0_ax1_fused_ax2_fused_2 * 64 + rc_2], "int32") * T.cast(placeholder_20[rc_2 * 256 + ax3_outer_1 * 64 + ff_2], "int32")
for ax3_inner_3 in T.serial(0, 64):
T_add_1[ax0_ax1_fused_ax2_fused_2 * 256 + ax3_outer_1 * 64 + ax3_inner_3] = T.q_multiply_shift(T.cast(T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput_2_let[ax3_inner_3] + placeholder_21[ax3_outer_1 * 64 + ax3_inner_3], 1711626602, 31, -8, dtype="int32") + 132, 255), 0), "uint8"), "int32") - 132, 2094289803, 31, -2, dtype="int32") + 136
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast(placeholder_4: T.handle, placeholder_5: T.handle, placeholder_6: T.handle, T_cast_2: T.handle, global_workspace_2_var: T.Ptr[T.uint8]) -> None:
placeholder_7 = T.match_buffer(placeholder_4, [360000], dtype="int16")
T.preflattened_buffer(placeholder_7, [360000], dtype="int16")
placeholder_8 = T.match_buffer(placeholder_5, [4096], dtype="int16")
T.preflattened_buffer(placeholder_8, [4096], dtype="int16")
placeholder_9 = T.match_buffer(placeholder_6, [64], dtype="int32")
T.preflattened_buffer(placeholder_9, [64], dtype="int32")
T_cast_3 = T.match_buffer(T_cast_2, [215], dtype="int16")
T.preflattened_buffer(T_cast_3, [215], dtype="int16")
global_workspace_2_buffer_var = T.match_buffer(global_workspace_2_var, [7920256], dtype="uint8", strides=[1], elem_offset=0, align=16)
T.preflattened_buffer(global_workspace_2_buffer_var, [7920256], dtype="uint8", strides=[1], elem_offset=0, align=16)
# body
PaddedInput_let = T.buffer_decl([360000], "int16")
with T.let(PaddedInput_let.data, T.address_of(global_workspace_2_buffer_var[7200000], dtype="handle")):
for i0_i1_fused, i2, i3 in T.grid(75, 75, 64):
PaddedInput_let[i0_i1_fused * 4800 + i2 * 64 + i3] = placeholder_7[i0_i1_fused * 4800 + i2 * 64 + i3]
for ax0_ax1_fused_ax2_fused in T.serial(0, 5625):
Conv2dOutput_let = T.buffer_decl([64], "int32")
with T.let(Conv2dOutput_let.data, T.address_of(global_workspace_2_buffer_var[7920000], dtype="handle")):
for ff in T.serial(0, 64):
Conv2dOutput_let[ff] = 0
for rc in T.serial(0, 64):
Conv2dOutput_let[ff] = Conv2dOutput_let[ff] + T.cast(PaddedInput_let[ax0_ax1_fused_ax2_fused * 64 + rc], "int32") * T.cast(placeholder_8[rc * 64 + ff], "int32")
for ax3_inner_1 in T.serial(0, 64):
T_cast_3[ax0_ax1_fused_ax2_fused * 64 + ax3_inner_1] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput_let[ax3_inner_1] + placeholder_9[ax3_inner_1], 1843106743, 31, -6, dtype="int32"), 255), 0), "uint8"), "int16")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_1(placeholder_10: T.handle, placeholder_11: T.handle, placeholder_12: T.handle, T_cast_4: T.handle, global_workspace_3_var: T.Ptr[T.uint8]) -> None:
placeholder_13 = T.match_buffer(placeholder_10, [360000], dtype="int16")
T.preflattened_buffer(placeholder_13, [360000], dtype="int16")
placeholder_14 = T.match_buffer(placeholder_11, [36864], dtype="int16")
T.preflattened_buffer(placeholder_14, [36864], dtype="int16")
placeholder_15 = T.match_buffer(placeholder_12, [64], dtype="int32")
T.preflattened_buffer(placeholder_15, [64], dtype="int32")
T_cast_5 = T.match_buffer(T_cast_4, [215], dtype="int16")
T.preflattened_buffer(T_cast_5, [215], dtype="int16")
global_workspace_3_buffer_var = T.match_buffer(global_workspace_3_var, [7920256], dtype="uint8", strides=[1], elem_offset=0, align=16)
T.preflattened_buffer(global_workspace_3_buffer_var, [7920256], dtype="uint8", strides=[1], elem_offset=0, align=16)
# body
PaddedInput_1_let = T.buffer_decl([379456], "int16")
with T.let(PaddedInput_1_let.data, T.address_of(global_workspace_3_buffer_var[0], dtype="handle")):
for i0_i1_fused_1, i2_1, i3_1 in T.grid(77, 77, 64):
PaddedInput_1_let[i0_i1_fused_1 * 4928 + i2_1 * 64 + i3_1] = T.if_then_else(1 <= i0_i1_fused_1 and i0_i1_fused_1 < 76 and 1 <= i2_1 and i2_1 < 76, placeholder_13[i0_i1_fused_1 * 4800 + i2_1 * 64 + i3_1 - 4864], T.int16(0), dtype="int16")
for ax0_ax1_fused_ax2_fused_1 in T.serial(0, 5625):
Conv2dOutput_1_let = T.buffer_decl([64], "int32")
with T.let(Conv2dOutput_1_let.data, T.address_of(global_workspace_3_buffer_var[7200000], dtype="handle")):
for ff_1 in T.serial(0, 64):
Conv2dOutput_1_let[ff_1] = 0
for ry, rx, rc_1 in T.grid(3, 3, 64):
Conv2dOutput_1_let[ff_1] = Conv2dOutput_1_let[ff_1] + T.cast(PaddedInput_1_let[ax0_ax1_fused_ax2_fused_1 // 75 * 4928 + ry * 4928 + rx * 64 + ax0_ax1_fused_ax2_fused_1 % 75 * 64 + rc_1], "int32") * T.cast(placeholder_14[ry * 12288 + rx * 4096 + rc_1 * 64 + ff_1], "int32")
for ax3_inner_2 in T.serial(0, 64):
T_cast_5[ax0_ax1_fused_ax2_fused_1 * 64 + ax3_inner_2] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput_1_let[ax3_inner_2] + placeholder_15[ax3_inner_2], 1608879842, 31, -7, dtype="int32"), 255), 0), "uint8"), "int16")
@T.prim_func
def __tvm_main__(input: T.handle, global_workspace_0_var: T.Ptr[T.uint8], output: T.handle) -> None:
global_workspace_0_buffer_var = T.match_buffer(global_workspace_0_var, [7920256], dtype="uint8", strides=[1], elem_offset=0, align=16)
# body
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 1)
sid_2_let: T.Ptr[T.int8] = T.address_of(global_workspace_0_buffer_var[5760000], dtype="handle")
sid_6_let: T.Ptr[T.int8] = T.address_of(global_workspace_0_buffer_var[0], dtype="handle")
sid_7_let: T.Ptr[T.int8] = T.address_of(global_workspace_0_buffer_var[6480000], dtype="handle")
sid_8_let: T.Ptr[T.int8] = T.address_of(global_workspace_0_buffer_var[6480000], dtype="handle")
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract_fixed_point_multiply_add_clip_cast_cast", input, T.lookup_param("p0", dtype="handle"), sid_2_let, global_workspace_0_buffer_var.data, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast", sid_2_let, T.lookup_param("p3", dtype="handle"), T.lookup_param("p4", dtype="handle"), sid_8_let, global_workspace_0_buffer_var.data, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_1", sid_8_let, T.lookup_param("p5", dtype="handle"), T.lookup_param("p6", dtype="handle"), sid_7_let, global_workspace_0_buffer_var.data, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_15934180698220515269_", sid_7_let, T.lookup_param("p7", dtype="handle"), T.lookup_param("p8", dtype="handle"), sid_6_let, global_workspace_0_buffer_var.data, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_4200876283395191415_", sid_2_let, T.lookup_param("p1", dtype="handle"), T.lookup_param("p2", dtype="handle"), sid_6_let, output, global_workspace_0_buffer_var.data, dtype="int32"))
__tvm_meta__ = None
# fmt: on
def test_resnet_subgraph():
target = Target("c")
global_workspace_pool = WorkspacePoolInfo(
"global_workspace",
[target],
)
tir_mod = ResnetStructure
tir_mod = _assign_targets_to_primfuncs_irmodule(tir_mod, target)
tir_mod = assign_poolinfos_to_allocates_in_irmodule(tir_mod, [global_workspace_pool])
main_func = tir_mod["__tvm_main__"]
buffer_analysis = tvm.tir.usmp.analysis.extract_buffer_info(main_func, tir_mod)
buffer_info_map = buffer_analysis.buffer_info_stmts
fcreate_array_bi = tvm.get_global_func("tir.usmp.CreateArrayBufferInfo")
buffer_info_arr = fcreate_array_bi(buffer_info_map)
fusmp_algo_greedy_by_size = tvm.get_global_func("tir.usmp.algo.greedy_by_size")
buffer_pool_allocations = fusmp_algo_greedy_by_size(
buffer_info_arr, buffer_analysis.memory_pressure
)
fassign_stmt_pool_allocations = tvm.get_global_func("tir.usmp.AssignStmtPoolAllocations")
pool_allocations = fassign_stmt_pool_allocations(buffer_info_map, buffer_pool_allocations)
tir_mod_with_offsets = tvm.tir.usmp.transform.convert_pool_allocations_to_offsets(
pool_allocations, emit_tvmscript_printable=True
)(tir_mod)
tir_mod_with_offsets_ref = ResnetStructurePlanned
for gv, ref_func in tir_mod_with_offsets_ref.functions.items():
actual_func = tir_mod_with_offsets[gv.name_hint]
tvm.ir.assert_structural_equal(actual_func, ref_func)
@tvm.script.ir_module
class TensorIntrinStructure:
@T.prim_func
def tensor_intrin_primfunc() -> None:
dense_data = T.allocate([10], "int32", "global")
T.evaluate(
T.call_extern(
"intrin_function",
T.tvm_access_ptr(
T.type_annotation(dtype="int32"), dense_data, 0, 1, 2, dtype="handle"
),
dtype="int32",
)
)
dense = T.buffer_decl([10], "int32", data=dense_data)
dense[0] = T.q_multiply_shift(dense[0], 1608879842, 31, -7, dtype="int32")
@T.prim_func
def __tvm_main__(input: T.handle, output: T.handle) -> None:
T.evaluate(T.call_extern("tensor_intrin_primfunc", dtype="int32"))
@tvm.script.ir_module
class TensorIntrinStructurePlanned:
@T.prim_func
def tensor_intrin_primfunc(global_workspace_1_var: T.Ptr[T.uint8]) -> None:
global_workspace_1_buffer_var = T.match_buffer(
global_workspace_1_var, [40], dtype="uint8", strides=[1], elem_offset=0, align=16
)
T.preflattened_buffer(
global_workspace_1_buffer_var, [40], dtype="uint8", strides=[1], elem_offset=0, align=16
)
dense_let = T.buffer_decl([10], "int32")
with T.let(dense_let.data, T.address_of(global_workspace_1_buffer_var[0], dtype="handle")):
T.evaluate(
T.call_extern(
"intrin_function",
T.tvm_access_ptr(
T.type_annotation(dtype="int32"), dense_let.data, 0, 1, 2, dtype="handle"
),
dtype="int32",
)
)
dense_let[0] = T.q_multiply_shift(dense_let[0], 1608879842, 31, -7, dtype="int32")
@T.prim_func
def __tvm_main__(
input: T.handle, global_workspace_1_var: T.Ptr[T.uint8], output: T.handle
) -> None:
global_workspace_1_buffer_var = T.match_buffer(
global_workspace_1_var, [40], dtype="uint8", strides=[1], elem_offset=0, align=16
)
T.evaluate(
T.call_extern(
"tensor_intrin_primfunc", global_workspace_1_buffer_var.data, dtype="int32"
)
)
def test_tensor_intrin():
target = Target("c")
global_workspace_pool = WorkspacePoolInfo(
"global_workspace",
[target],
)
tir_mod = TensorIntrinStructure
tir_mod = _assign_targets_to_primfuncs_irmodule(tir_mod, target)
tir_mod = assign_poolinfos_to_allocates_in_irmodule(tir_mod, [global_workspace_pool])
main_func = tir_mod["__tvm_main__"]
buffer_analysis = tvm.tir.usmp.analysis.extract_buffer_info(main_func, tir_mod)
buffer_info_map = buffer_analysis.buffer_info_stmts
fcreate_array_bi = tvm.get_global_func("tir.usmp.CreateArrayBufferInfo")
buffer_info_arr = fcreate_array_bi(buffer_info_map)
fusmp_algo_greedy_by_size = tvm.get_global_func("tir.usmp.algo.greedy_by_size")
buffer_pool_allocations = fusmp_algo_greedy_by_size(
buffer_info_arr, buffer_analysis.memory_pressure
)
fassign_stmt_pool_allocations = tvm.get_global_func("tir.usmp.AssignStmtPoolAllocations")
pool_allocations = fassign_stmt_pool_allocations(buffer_info_map, buffer_pool_allocations)
tir_mod_with_offsets = tvm.tir.usmp.transform.convert_pool_allocations_to_offsets(
pool_allocations, emit_tvmscript_printable=True
)(tir_mod)
expected = TensorIntrinStructurePlanned
for gv, ref_func in expected.functions.items():
actual_func = tir_mod_with_offsets[gv.name_hint]
tvm.ir.assert_structural_equal(actual_func, ref_func)
if __name__ == "__main__":
pytest.main([__file__] + sys.argv[1:])
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_usmp_transform_create_io_allocates.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from typing import NamedTuple, List
import tvm
from tvm.script import tir as T
# fmt: off
@tvm.script.ir_module
class SingleInputSingleOutput:
@T.prim_func
def tvmgen_default_fused_cast_subtract(placeholder_2: T.handle, placeholder_3: T.handle, T_subtract: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_cast_subtract", "tir.noalias": True})
placeholder_4 = T.match_buffer(placeholder_2, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
placeholder_5 = T.match_buffer(placeholder_3, [1], dtype="int16", elem_offset=0, align=64, offset_factor=1)
T_subtract_1 = T.match_buffer(T_subtract, [452], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
for ax0_ax1_fused_1 in T.serial(0, 224):
for ax2_1, ax3_inner_1 in T.grid(224, 3):
T_subtract_1[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)] = (T.cast(placeholder_4[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)], "int16") - placeholder_5[0])
@T.prim_func
def __tvm_main__(input: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "__tvm_main__", "runner_function": True})
input_buffer_var = T.match_buffer(input, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
output_buffer_var = T.match_buffer(output, [452], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract", input_buffer_var.data, T.lookup_param("p0", dtype="handle"), output_buffer_var.data, dtype="int32"))
# fmt: on
# fmt: off
@tvm.script.ir_module
class TwoInputSingleOutput:
@T.prim_func
def tvmgen_default_fused_cast_subtract(placeholder_2: T.handle, placeholder_3: T.handle, T_subtract: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_cast_subtract", "tir.noalias": True})
placeholder_4 = T.match_buffer(placeholder_2, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
placeholder_5 = T.match_buffer(placeholder_3, [1], dtype="int16", elem_offset=0, align=64, offset_factor=1)
T_subtract_1 = T.match_buffer(T_subtract, [452], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
for ax0_ax1_fused_1 in T.serial(0, 224):
for ax2_1, ax3_inner_1 in T.grid(224, 3):
T_subtract_1[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)] = (T.cast(placeholder_4[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)], "int16") - placeholder_5[0])
@T.prim_func
def __tvm_main__(input1: T.handle, input2: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "__tvm_main__", "runner_function": True})
input1_buffer_var = T.match_buffer(input1, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
input2_buffer_var = T.match_buffer(input2, [1], dtype="int16", elem_offset=0, align=64, offset_factor=1)
output_buffer_var = T.match_buffer(output, [452], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract", input1_buffer_var.data, input2_buffer_var.data, output_buffer_var.data, dtype="int32"))
# fmt: on
# fmt: off
@tvm.script.ir_module
class TwoInputTwoOutput:
@T.prim_func
def tvmgen_default_fused_cast_subtract(placeholder_2: T.handle, placeholder_3: T.handle, T_subtract: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_cast_subtract", "tir.noalias": True})
placeholder_4 = T.match_buffer(placeholder_2, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
placeholder_5 = T.match_buffer(placeholder_3, [1], dtype="int16", elem_offset=0, align=64, offset_factor=1)
T_subtract_1 = T.match_buffer(T_subtract, [452], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
for ax0_ax1_fused_1 in T.serial(0, 224):
for ax2_1, ax3_inner_1 in T.grid(224, 3):
T_subtract_1[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)] = (T.cast(placeholder_4[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)], "int16") - placeholder_5[0])
@T.prim_func
def __tvm_main__(input1: T.handle, input2: T.handle, output1: T.handle, output2: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "__tvm_main__", "runner_function": True})
input1_buffer_var = T.match_buffer(input1, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
input2_buffer_var = T.match_buffer(input2, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
output1_buffer_var = T.match_buffer(output1, [452], dtype="int16", elem_offset=0, align=64, offset_factor=1)
output2_buffer_var = T.match_buffer(output2, [452], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract", input1_buffer_var.data, T.lookup_param("p0", dtype="handle"), output1_buffer_var.data, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract", input2_buffer_var.data, T.lookup_param("p1", dtype="handle"), output2_buffer_var.data, dtype="int32"))
# fmt: on
# fmt: off
@tvm.script.ir_module
class SingleInputTwoOutput:
@T.prim_func
def tvmgen_default_fused_cast_subtract(placeholder_2: T.handle, placeholder_3: T.handle, T_subtract: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_cast_subtract", "tir.noalias": True})
placeholder_4 = T.match_buffer(placeholder_2, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
placeholder_5 = T.match_buffer(placeholder_3, [1], dtype="int16", elem_offset=0, align=64, offset_factor=1)
T_subtract_1 = T.match_buffer(T_subtract, [452], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
for ax0_ax1_fused_1 in T.serial(0, 224):
for ax2_1, ax3_inner_1 in T.grid(224, 3):
T_subtract_1[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)] = (T.cast(placeholder_4[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)], "int16") - placeholder_5[0])
@T.prim_func
def __tvm_main__(input: T.handle, output1: T.handle, output2: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "__tvm_main__", "runner_function": True})
input_buffer_var = T.match_buffer(input, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
output1_buffer_var = T.match_buffer(output1, [452], dtype="int16", elem_offset=0, align=64, offset_factor=1)
output2_buffer_var = T.match_buffer(output2, [452], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract", input_buffer_var.data, T.lookup_param("p0", dtype="handle"), output1_buffer_var.data, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract", input_buffer_var.data, T.lookup_param("p1", dtype="handle"), output2_buffer_var.data, dtype="int32"))
# fmt: on
class IOInfo(NamedTuple):
"""A data structure to hold test outputs per I/O tensor"""
name: str
shape: list
dtype: str
def check_io_allocations(mod: tvm.IRModule, inputs: List[IOInfo], outputs: List[IOInfo]):
"""This function checks whether outer most allocates correspond to I/O tensors"""
found_non_io_allocate_node = False
input_name_to_info = {}
for input in inputs:
input_name_to_info[input.name] = input
output_name_to_info = {}
for output in outputs:
output_name_to_info[output.name] = output
def _visit(stmt):
nonlocal found_non_io_allocate_node
if isinstance(stmt, tvm.tir.Allocate) and not found_non_io_allocate_node:
allocate = stmt
if dict(allocate.annotations).get("input_tensor"):
input_tensor_name = str(dict(allocate.annotations).get("input_tensor"))
assert input_tensor_name in input_name_to_info.keys()
assert input_name_to_info[input_tensor_name].shape == list(allocate.extents)
assert input_name_to_info[input_tensor_name].dtype == str(allocate.dtype)
del input_name_to_info[input_tensor_name]
if dict(allocate.annotations).get("output_tensor"):
output_tensor_name = str(dict(allocate.annotations).get("output_tensor"))
assert output_tensor_name in output_name_to_info.keys()
assert output_name_to_info[output_tensor_name].shape == list(allocate.extents)
assert output_name_to_info[output_tensor_name].dtype == str(allocate.dtype)
del output_name_to_info[output_tensor_name]
else:
found_non_io_allocate_node = True
main = mod["__tvm_main__"]
tvm.tir.stmt_functor.ir_transform(main.body, _visit, None, ["tir.Allocate", "tir.Call"])
assert len(input_name_to_info) == 0
assert len(output_name_to_info) == 0
@pytest.mark.parametrize(
"test_mod, input_names, output_names",
[
(
SingleInputSingleOutput,
[IOInfo("input", [150528], "uint8")],
[IOInfo("output", [452], "int16")],
),
(
SingleInputTwoOutput,
[IOInfo("input", [150528], "uint8")],
[IOInfo("output1", [452], "int16"), IOInfo("output2", [452], "int16")],
),
(
TwoInputSingleOutput,
[IOInfo("input1", [150528], "uint8"), IOInfo("input2", [1], "int16")],
[IOInfo("output", [452], "int16")],
),
(
TwoInputTwoOutput,
[IOInfo("input1", [150528], "uint8"), IOInfo("input2", [150528], "uint8")],
[IOInfo("output1", [452], "int16"), IOInfo("output2", [452], "int16")],
),
],
)
def test_mobilenet_subgraph(test_mod, input_names, output_names):
CreateAllocatesForIO = tvm.get_global_func("tir.usmp.transform.CreateAllocatesForIO")
test_mod = CreateAllocatesForIO()(test_mod)
check_io_allocations(test_mod, input_names, output_names)
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_usmp_utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import sys
import tvm
from tvm.script import tir as T
from tvm.tir import stmt_functor
from tvm.tir.usmp import utils as usmp_utils
from tvm.target import Target
from tvm import WorkspacePoolInfo, PoolInfoProperties
# fmt: off
@tvm.script.ir_module
class LinearStructure:
@T.prim_func
def tvmgen_default_fused_cast_subtract(placeholder_2: T.handle, placeholder_3: T.handle, T_subtract: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_cast_subtract", "tir.noalias": True})
placeholder_4 = T.match_buffer(placeholder_2, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
placeholder_5 = T.match_buffer(placeholder_3, [1], dtype="int16", elem_offset=0, align=64, offset_factor=1)
T_subtract_1 = T.match_buffer(T_subtract, [150528], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
for ax0_ax1_fused_1 in T.serial(0, 224):
for ax2_1, ax3_inner_1 in T.grid(224, 3):
T_subtract_1[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)] = (T.cast(placeholder_4[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)], "int16") - placeholder_5[0])
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast(placeholder_62: T.handle, placeholder_63: T.handle, placeholder_64: T.handle, T_cast_20: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast", "tir.noalias": True})
placeholder_65 = T.match_buffer(placeholder_62, [150528], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_66 = T.match_buffer(placeholder_63, [9408], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_67 = T.match_buffer(placeholder_64, [64], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_21 = T.match_buffer(T_cast_20, [289], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_7 = T.decl_buffer([157323], "int16")
for i0_i1_fused_7 in T.serial(0, 229):
for i2_7, i3_7 in T.grid(229, 3):
PaddedInput_7[(((i0_i1_fused_7*687) + (i2_7*3)) + i3_7)] = T.if_then_else(((((2 <= i0_i1_fused_7) and (i0_i1_fused_7 < 226)) and (2 <= i2_7)) and (i2_7 < 226)), placeholder_65[((((i0_i1_fused_7*672) + (i2_7*3)) + i3_7) - 1350)], T.int16(0), dtype="int16")
for ax0_ax1_fused_ax2_fused_7 in T.serial(0, 12544):
Conv2dOutput_7 = T.decl_buffer([64], "int32")
for ff_3 in T.serial(0, 64):
Conv2dOutput_7[ff_3] = 0
for ry_2, rx_2, rc_7 in T.grid(7, 7, 3):
Conv2dOutput_7[ff_3] = (Conv2dOutput_7[ff_3] + (T.cast(PaddedInput_7[(((((T.floordiv(ax0_ax1_fused_ax2_fused_7, 112)*1374) + (ry_2*687)) + (T.floormod(ax0_ax1_fused_ax2_fused_7, 112)*6)) + (rx_2*3)) + rc_7)], "int32")*T.cast(placeholder_66[((((ry_2*1344) + (rx_2*192)) + (rc_7*64)) + ff_3)], "int32")))
for ax3_inner_7 in T.serial(0, 64):
T_cast_21[((ax0_ax1_fused_ax2_fused_7*64) + ax3_inner_7)] = T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_7[ax3_inner_7] + placeholder_67[ax3_inner_7]), 1939887962, 31, -9, dtype="int32"), 255), 0), "uint8")
@T.prim_func
def tvmgen_default_fused_nn_max_pool2d_cast(placeholder_28: T.handle, T_cast_6: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_max_pool2d_cast", "tir.noalias": True})
placeholder_29 = T.match_buffer(placeholder_28, [802816], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
T_cast_7 = T.match_buffer(T_cast_6, [177], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
tensor_2 = T.decl_buffer([200704], "uint8")
for ax0_ax1_fused_4 in T.serial(0, 56):
for ax2_4 in T.serial(0, 56):
for ax3_init in T.serial(0, 64):
tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_init)] = T.uint8(0)
for rv0_rv1_fused_1, ax3_2 in T.grid(9, 64):
tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_2)] = T.max(tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_2)], T.if_then_else(((((ax0_ax1_fused_4*2) + T.floordiv(rv0_rv1_fused_1, 3)) < 112) and (((ax2_4*2) + T.floormod(rv0_rv1_fused_1, 3)) < 112)), placeholder_29[(((((ax0_ax1_fused_4*14336) + (T.floordiv(rv0_rv1_fused_1, 3)*7168)) + (ax2_4*128)) + (T.floormod(rv0_rv1_fused_1, 3)*64)) + ax3_2)], T.uint8(0), dtype="uint8"))
for ax0_ax1_fused_5 in T.serial(0, 56):
for ax2_5, ax3_3 in T.grid(56, 64):
T_cast_7[(((ax0_ax1_fused_5*3584) + (ax2_5*64)) + ax3_3)] = T.cast(tensor_2[(((ax0_ax1_fused_5*3584) + (ax2_5*64)) + ax3_3)], "int16")
@T.prim_func
def tvmgen_default_run_model(input: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_run_model", "runner_function": True})
# body
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 1)
sid_9 = T.allocate([301056], "int8", "global")
sid_8 = T.allocate([802816], "int8", "global")
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract", input, T.lookup_param("p0", dtype="handle"), sid_9, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast", sid_9, T.lookup_param("p1", dtype="handle"), T.lookup_param("p2", dtype="handle"), sid_8, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_max_pool2d_cast", sid_8, output, dtype="int32"))
__tvm_meta__ = None
# fmt: on
def test_create_pool_info():
target = Target("c")
pool_info = WorkspacePoolInfo(
"foo_workspace",
[target],
)
assert pool_info.pool_name == "foo_workspace"
# default pool size constraint
assert pool_info.size_hint_bytes == -1
pool_info = WorkspacePoolInfo(
"bar_workspace",
[target],
PoolInfoProperties(size_hint_bytes=1425),
)
assert pool_info.pool_name == "bar_workspace"
assert pool_info.size_hint_bytes == 1425
def test_create_buffer_info():
global_ws_pool = WorkspacePoolInfo(
"global_workspace",
[Target("c")],
)
buffer_info_obj = tvm.tir.usmp.BufferInfo(
name_hint="buf1", size_bytes=256, pool_candidates=[global_ws_pool]
)
assert buffer_info_obj.name_hint == "buf1"
assert buffer_info_obj.size_bytes == 256
assert list(buffer_info_obj.pool_candidates) == [global_ws_pool]
# default workspace alignment
assert buffer_info_obj.alignment == 1
buffer_info_obj = tvm.tir.usmp.BufferInfo("buf2", 512, [global_ws_pool], 8)
assert buffer_info_obj.name_hint == "buf2"
assert buffer_info_obj.size_bytes == 512
assert list(buffer_info_obj.pool_candidates) == [global_ws_pool]
assert buffer_info_obj.alignment == 8
def test_create_pool_allocation():
pool_info = WorkspacePoolInfo(
"foo_workspace",
[Target("c")],
)
pool_allocation = usmp_utils.PoolAllocation(pool_info=pool_info, byte_offset=64)
assert pool_allocation.pool_info == pool_info
assert pool_allocation.byte_offset == 64
def _assign_poolinfos_to_allocates_in_primfunc(primfunc, pool_infos):
"""helper to assing poolinfos to allocate nodes in a tir.PrimFunc"""
def set_poolinfos(stmt):
if isinstance(stmt, tvm.tir.Allocate):
return tvm.tir.Allocate(
buffer_var=stmt.buffer_var,
dtype=stmt.dtype,
extents=stmt.extents,
condition=stmt.condition,
body=stmt.body,
annotations={tvm.tir.usmp.utils.CANDIDATE_MEMORY_POOL_ATTR: pool_infos},
)
return primfunc.with_body(stmt_functor.ir_transform(primfunc.body, None, set_poolinfos))
def _assign_poolinfos_to_allocates_in_irmodule(mod, pool_infos):
"""helper to assing poolinfos to allocate nodes in a IRModule"""
ret = tvm.IRModule()
for global_var, basefunc in mod.functions.items():
if isinstance(basefunc, tvm.tir.PrimFunc):
ret[global_var] = _assign_poolinfos_to_allocates_in_primfunc(basefunc, pool_infos)
return ret
def _assign_targets_to_primfuncs_irmodule(mod, target):
"""helper to assign target for PrimFunc in a IRModule"""
ret = tvm.IRModule()
for global_var, basefunc in mod.functions.items():
if isinstance(basefunc, tvm.tir.PrimFunc):
ret[global_var] = basefunc.with_attr("target", target)
return ret
def test_create_array_buffer_info():
target = Target("c")
global_ws_pool = WorkspacePoolInfo(
"global_workspace",
[target],
)
fcreate_array_bi = tvm.get_global_func("tir.usmp.CreateArrayBufferInfo")
tir_mod = LinearStructure
tir_mod = _assign_targets_to_primfuncs_irmodule(tir_mod, target)
tir_mod = _assign_poolinfos_to_allocates_in_irmodule(tir_mod, [global_ws_pool])
main_func = tir_mod["tvmgen_default_run_model"]
buffer_info_analysis = tvm.tir.usmp.analysis.extract_buffer_info(main_func, tir_mod)
buffer_info_array = fcreate_array_bi(buffer_info_analysis.buffer_info_stmts)
for buffer_info in buffer_info_array:
assert buffer_info in buffer_info_analysis.buffer_info_stmts.keys()
if __name__ == "__main__":
pytest.main([__file__] + sys.argv[1:])
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_transform_layout.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import functools
import sys
import pytest
import numpy as np
import tvm
import tvm.testing
from tvm import te
from tvm.tir.stmt_functor import post_order_visit
from tvm.driver.build_module import schedule_to_module
dtype = tvm.testing.parameter("int32")
def flatten_all_indices(preflatten_shape):
def mapping(*indices):
output = 0
for index, size in zip(indices, preflatten_shape):
output = output * size + index
return [output]
return mapping
def unpack_flattened_indices(preflatten_shape):
def mapping(i):
output = []
for dim in reversed(preflatten_shape):
output.append(i % dim)
i //= dim
return output[::-1]
return mapping
def traverse(s, op, callback):
visited = set()
def _traverse(op):
if op in visited:
return
visited.add(op)
for tensor in op.input_tensors:
_traverse(tensor.op)
callback(op)
_traverse(op)
class TestCompareAgainstExplicitReshape:
A_definition_style = tvm.testing.parameter(
"explicit_reshape",
"transform_layout",
)
B_definition_style = tvm.testing.parameter(
"explicit_reshape",
"transform_layout",
)
reordered_shape = tvm.testing.parameter((2, 3, 4))
@tvm.testing.fixture
def n_items(self, reordered_shape):
return functools.reduce(lambda x, y: x * y, reordered_shape, 1)
@tvm.testing.fixture
def fphysical_layout(self, reordered_shape):
return unpack_flattened_indices(reordered_shape)
@tvm.testing.fixture
def fcompute(self, A_definition_style, B_definition_style, reordered_shape, n_items, dtype):
assert A_definition_style in ["explicit_reshape", "transform_layout"]
assert B_definition_style in ["explicit_reshape", "transform_layout"]
def func():
if A_definition_style == "explicit_reshape":
A_input = te.placeholder(shape=reordered_shape, name="A_input", dtype=dtype)
A = te.compute(
shape=(n_items,),
fcompute=lambda i: A_input[
i // (reordered_shape[1] * reordered_shape[2]),
(i // reordered_shape[2]) % reordered_shape[1],
i % reordered_shape[2],
],
name="A",
)
elif A_definition_style == "transform_layout":
A = te.placeholder(shape=(n_items,), name="A", dtype=dtype)
A_input = A
B = te.compute(shape=A.shape, fcompute=lambda i: A[i], name="B")
if B_definition_style == "explicit_reshape":
B_output = te.compute(
shape=reordered_shape,
fcompute=lambda i, j, k: B[
i * reordered_shape[1] * reordered_shape[2] + j * reordered_shape[2] + k
],
name="B_output",
)
elif B_definition_style == "transform_layout":
B_output = B
return A_input, B_output
return func
@tvm.testing.fixture
def fschedule(self, A_definition_style, B_definition_style, fphysical_layout):
def func(outs):
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def callback(op):
if (op.name == "A" and A_definition_style == "transform_layout") or (
op.name == "B" and B_definition_style == "transform_layout"
):
s[op].transform_layout(fphysical_layout)
traverse(s, outs[0].op, callback)
return s
return func
@tvm.testing.parametrize_targets("llvm")
def test_external_reshape(
self, target, dev, fcompute, fschedule, n_items, reordered_shape, dtype
):
A, B = fcompute()
s = fschedule(B)
func = tvm.build(s, [A, B], target=target, name="copy_reshape")
a_np = np.arange(n_items).reshape(reordered_shape).astype(dtype)
b_np = np.arange(n_items).reshape(reordered_shape).astype(dtype)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.empty(b_np.shape, dtype=dtype, device=dev)
func(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np)
@tvm.testing.parametrize_targets("llvm")
def test_internal_reshape(self, target, dev, n_items, reordered_shape, dtype, fphysical_layout):
# The reshaping of the buffer gets flattened away in
# StorageFlatten. Therefore, testing the behavior by running only
# ApplyLayoutTransforms.
logical_shape = (n_items,)
A = te.placeholder(logical_shape, name="A", dtype=dtype)
B = te.compute(shape=logical_shape, fcompute=lambda i: A[i], name="B")
C = te.compute(shape=logical_shape, fcompute=lambda i: B[i], name="C")
s = te.create_schedule(C.op)
s[B].transform_layout(fphysical_layout)
mod = schedule_to_module(s, [A, C])
body = mod["main"].body
def walk_buffer_interactions(stmt, callback):
buffer_classes = [
tvm.tir.BufferLoad,
tvm.tir.BufferStore,
tvm.tir.BufferRealize,
]
def inner(node):
if (type(node) in buffer_classes) and node.buffer.name == "B":
callback(node)
post_order_visit(stmt, inner)
# All references to the buffer are the same object
def check_references():
buffer_object = None
def inner(node):
nonlocal buffer_object
if buffer_object is None:
buffer_object = node.buffer
else:
assert node.buffer.same_as(buffer_object)
return inner
# The buffer has the expected shape.
def check_shape(expected_shape):
def inner(node):
assert tuple(node.buffer.shape) == expected_shape
return inner
# Before the transform, the buffer should be in the logical shape.
walk_buffer_interactions(body, check_references())
walk_buffer_interactions(body, check_shape(logical_shape))
mod = tvm.tir.transform.ApplyLayoutTransforms()(mod)
body = mod["main"].body
# After the transform, the buffer should be in the physical shape.
walk_buffer_interactions(body, check_references())
walk_buffer_interactions(body, check_shape(reordered_shape))
class Test2DPhysicalLayout:
transform_A = tvm.testing.parameter(
"1d_A",
"2d_A",
"2d_rev_A",
"3d_A",
)
transform_B = tvm.testing.parameter(
"1d_B",
"2d_B",
"2d_rev_B",
"3d_B",
)
@staticmethod
def extract_logical_indices(stmt):
output = {}
# Since the for loops can be reordered by the layout
# transformation, identify the loop corresponding to each
# pre-transformation axis based on the iteration extent.
def callback(node):
if isinstance(node, tvm.tir.For):
output[node.loop_var] = node.extent.value
post_order_visit(stmt, callback)
return sorted(output, key=output.get)
def get_transform(self, name):
name = name[:-2]
if name == "1d":
return None
elif name == "2d":
return lambda i, j, k: [i, j, te.AXIS_SEPARATOR, k]
elif name == "2d_rev":
return lambda i, j, k: [k, j, te.AXIS_SEPARATOR, i]
elif name == "3d":
return lambda i, j, k: [i, te.AXIS_SEPARATOR, j, te.AXIS_SEPARATOR, k]
else:
raise ValueError(f"Unknown transformation: {name}")
def transform_indices(self, name, logical_shape, logical_index_vars):
name = name[:-2]
i, j, k = logical_index_vars
if name == "1d":
return [i * (logical_shape[1] * logical_shape[2]) + j * logical_shape[2] + k]
elif name == "2d":
return [i * logical_shape[1] + j, k]
elif name == "2d_rev":
return [k * logical_shape[1] + j, i]
elif name == "3d":
return [i, j, k]
else:
raise ValueError(f"Unknown transformation: {name}")
def test_2d_physical(self, dtype, transform_A, transform_B):
logical_shape = (2, 3, 4)
A = te.placeholder(shape=logical_shape, dtype=dtype, name="A")
B = te.compute(shape=A.shape, fcompute=lambda i, j, k: A[i, j, k], name="B")
s = te.create_schedule(B.op)
func = self.get_transform(transform_A)
if func:
s[A].transform_layout(func)
func = self.get_transform(transform_B)
if func:
s[B].transform_layout(func)
# If the two buffers are accessed with the same indices, CSE
# will replace them with a Let binding. Since this makes it
# harder to test what the transformed indices are, disabling
# the CSE pass for this test.
with tvm.transform.PassContext(disabled_pass=["tir.CommonSubexprElimTIR"]):
mod = tvm.lower(s, [A, B])
logical_index_vars = self.extract_logical_indices(mod["main"].body)
expected_indices_A = self.transform_indices(transform_A, logical_shape, logical_index_vars)
expected_indices_B = self.transform_indices(transform_B, logical_shape, logical_index_vars)
def callback(node):
if type(node) in [tvm.tir.BufferLoad, tvm.tir.BufferStore]:
name = node.buffer.name
if name == "A":
expected_indices = expected_indices_A
elif name == "B":
expected_indices = expected_indices_B
else:
raise RuntimeError(f"Unexpected buffer: {name}")
tvm.ir.assert_structural_equal(expected_indices, node.indices)
post_order_visit(mod["main"].body, callback)
class TestTransformedSchedules:
logical_shape = tvm.testing.parameter((4, 6, 40))
transform_names = [
None,
"reverse",
"flatten_all",
"factor_last_by_4",
]
transform_A = tvm.testing.parameter(by_dict={f"A_{t}": t for t in transform_names})
transform_B = tvm.testing.parameter(
by_dict={f"B_{t}": t for t in transform_names if t is not None}
)
after_transform = tvm.testing.parameter(None)
def make_transform(self, logical_shape, transform_name):
if transform_name is None:
return lambda *indices: indices
elif transform_name == "reverse":
return lambda *indices: indices[::-1]
elif transform_name == "flatten_all":
return flatten_all_indices(logical_shape)
elif transform_name == "factor_last_by_4":
return lambda *indices, n: [*indices, n // 4, n % 4]
else:
raise NotImplementedError(f"Unknown transformation {transform_name}")
def make_transformed_shape(self, logical_shape, transform_name):
if transform_name is None:
return logical_shape
elif transform_name == "reverse":
return logical_shape[::-1]
elif transform_name == "flatten_all":
num_elements = functools.reduce(lambda x, y: x * y, logical_shape, 1)
return [num_elements]
elif transform_name == "factor_last_by_4":
*indices, n = logical_shape
return [*indices, n // 4, 4]
else:
raise NotImplementedError(f"Unknown transformation {transform_name}")
@tvm.testing.fixture
def expected_loop_order(self, logical_shape, transform_B, after_transform):
shape = self.make_transformed_shape(logical_shape, transform_B)
if after_transform == "reorder":
shape = shape[::-1]
elif after_transform == "split":
shape = [
*shape[:-1],
2,
shape[-1] // 2,
]
elif after_transform == "fuse":
fused_size = shape[0] if transform_B == "flatten_all" else shape[0] * shape[1]
shape = [fused_size, *shape[2:]]
return shape
@tvm.testing.fixture
def schedule(self, logical_shape, dtype, transform_A, transform_B, after_transform):
A = te.placeholder(shape=logical_shape, dtype=dtype, name="A")
B = te.compute(shape=A.shape, fcompute=lambda i, j, k: A[i, j, k], name="B")
s = te.create_schedule(B.op)
if transform_A:
s[A].transform_layout(self.make_transform(logical_shape, transform_A))
iter_vars = s[B].transform_layout(self.make_transform(logical_shape, transform_B))
iter_vars = list(iter_vars)
if after_transform == "reorder":
s[B].reorder(*iter_vars[::-1])
elif after_transform == "split":
s[B].split(iter_vars[-1], nparts=2)
elif after_transform == "fuse":
to_fuse = iter_vars[:2]
s[B].fuse(*iter_vars[:2])
return {
"schedule": s,
"tensors": [A, B],
"iter_vars": iter_vars,
}
def compare_tir_loop_order(self, stmt, expected_loop_order):
def collect_loops(node):
output = []
def callback(node):
if isinstance(node, tvm.tir.For):
output.append(node)
post_order_visit(node, callback)
return output[::-1]
loops = collect_loops(stmt)
loop_order = [loop.extent for loop in loops]
np.testing.assert_array_equal(loop_order, expected_loop_order)
def test_tir_loop_order(self, schedule, expected_loop_order):
func = tvm.lower(schedule["schedule"], schedule["tensors"])["main"]
self.compare_tir_loop_order(func.body, expected_loop_order)
def test_te_loop_order(self, schedule, expected_loop_order):
s = schedule["schedule"]
A, B = schedule["tensors"]
iter_vars = schedule["iter_vars"]
# No reduction axis, so all leaf_iter_vars are over the data
# array, and should have the new iteration variables.
extents = [int(iter_var.dom.extent) for iter_var in s[B].leaf_iter_vars]
np.testing.assert_array_equal(extents, expected_loop_order)
# layout_transform should return the new iteration variables.
extents = [int(iter_var.dom.extent) for iter_var in iter_vars]
np.testing.assert_array_equal(extents, expected_loop_order)
@pytest.mark.parametrize("after_transform", ["reorder", "split", "fuse"])
def test_use_transformed_axes(
self, schedule, expected_loop_order, transform_A, transform_B, after_transform
):
s = schedule["schedule"]
A, B = schedule["tensors"]
func = tvm.lower(s, [A, B])["main"]
self.compare_tir_loop_order(func.body, expected_loop_order)
class TestTransformCache:
A_size = tvm.testing.parameter(16)
transform_A = tvm.testing.parameter(by_dict={"transformA": True, "": False})
transform_B = tvm.testing.parameter(by_dict={"transformB": True, "": False})
cache_A = tvm.testing.parameter(by_dict={"cacheA": True, "": False})
cache_B = tvm.testing.parameter(by_dict={"cacheB": True, "": False})
@tvm.testing.fixture
def schedule_args(self, target, A_size, transform_A, transform_B, cache_A, cache_B, dtype):
A = te.placeholder(shape=[A_size], dtype=dtype, name="A")
B = te.compute(A.shape, lambda i: A[i], name="B")
s = te.create_schedule(B.op)
requires_thread_bind = "gpu" in tvm.target.Target(target).keys
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
thread_z = te.thread_axis("threadIdx.z")
if cache_A:
AA = s.cache_read(A, "shared", [B])
if requires_thread_bind:
s[AA].bind(AA.op.axis[0], thread_x)
if cache_B:
BB = s.cache_write(B, "shared")
if requires_thread_bind:
s[BB].bind(BB.op.axis[0], thread_y)
if transform_A:
A_axis = s[A].transform_layout(lambda i: [i // 4, i % 4])
if transform_B:
B_axis = s[B].transform_layout(lambda i: [i // 4, i % 4])
else:
B_axis = B.op.axis
if requires_thread_bind:
s[B].bind(B_axis[0], thread_z)
return [s, [A, B]]
@tvm.testing.fixture
def ref_data(self, A_size, dtype, transform_A, transform_B):
a_np = (100 * np.random.uniform(size=A_size)).astype(dtype)
b_np = a_np
if transform_A:
a_np = a_np.reshape((-1, 4))
if transform_B:
b_np = b_np.reshape((-1, 4))
return a_np, b_np
def test_lower(self, schedule_args):
tvm.lower(*schedule_args)
def test_execute(self, target, dev, schedule_args, ref_data, dtype):
func = tvm.build(*schedule_args, target=target)
a_np, b_np = ref_data
a = tvm.nd.array(a_np, dev)
b = tvm.nd.empty(b_np.shape, dtype=dtype, device=dev)
func(a, b)
if "int" in dtype:
np.testing.assert_equal(b.numpy(), b_np)
else:
tvm.testing.assert_allclose(b.numpy(), b_np)
def test_transform_with_reduction():
# To trigger this failure mode, the computation must use a
# reduction axis,
A = te.placeholder([16, 32, 64], dtype="float32", name="A")
k = te.reduce_axis((0, A.shape[-1]), name="k")
B = te.compute(A.shape[:-1], lambda i, j: te.sum(A[i, j, k], axis=[k]))
s = te.create_schedule(B.op)
# And the output of the computation must have a layout
# transformation applied.
s[B].transform_layout(lambda i, j: [j, i])
# When present, the failure occurred during tvm.lower, during the
# call to `tvm::te::PassDownBitMaskOr`.
tvm.lower(s, [A, B])
shape, transform = tvm.testing.parameters(
([1, 8], lambda n, i: [i, n]),
([1, 1, 8], lambda i, j, k: [j, te.AXIS_SEPARATOR, i, k]),
([1, 1, 8], lambda i, j, k: [i, te.AXIS_SEPARATOR, j, k]),
)
def test_size_one_buffer(shape, transform):
# This test is to catch a failure mode that occurred if a
# transformation were applied to a te.compute buffer, and one of
# the dimensions of the buffer was 1. Prior to bugfix,
# arith::DetectIterMap would fold the variable as a constant,
# causing an error when attempting to solve for the variable using
# arith::InverseAffineIterMap.
dtype = "int8"
A = te.placeholder(shape, dtype, name="A")
B = te.compute(
shape=A.shape,
fcompute=lambda *indices: A[indices].astype(dtype),
name="B",
)
s = te.create_schedule(B.op)
# If layout transformation is on the output buffer, and any
# dimension of the output buffer is 1, failure occurs in
# CheckFusePattern.
s[B].transform_layout(transform)
def test_non_divisible_transform_raises_error():
A = te.placeholder([1, 3, 8, 8])
B = te.compute(A.shape, lambda *indices: A[indices])
s = te.create_schedule(B.op)
transform = lambda n, c, h, w: [n, c // 4, h, w, c % 4]
# Error occurs here, because the transformation would introduce
# padding. Padded transforms are supported in TIR-based
# schedules.
with pytest.raises(tvm.TVMError):
s[B].transform_layout(transform)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tvm_testing_before_after.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm.script import tir as T, ir_module
class BaseBeforeAfter(tvm.testing.CompareBeforeAfter):
def transform(self):
return lambda x: x
class TestBeforeAfterPrimFunc(BaseBeforeAfter):
@T.prim_func
def before():
T.evaluate(0)
expected = before
class TestBeforeAfterMethod(BaseBeforeAfter):
def before(self):
@T.prim_func
def func():
T.evaluate(0)
return func
expected = before
class TestBeforeAfterFixture(BaseBeforeAfter):
@tvm.testing.fixture
def before(self):
@T.prim_func
def func():
T.evaluate(0)
return func
expected = before
class TestBeforeAfterDelayedPrimFunc(BaseBeforeAfter):
def before():
T.evaluate(0)
expected = before
class TestBeforeAfterParametrizedFixture(BaseBeforeAfter):
n = tvm.testing.parameter(1, 8, 16)
@tvm.testing.fixture
def before(self, n):
@T.prim_func
def func(A: T.Buffer[n, "float32"]):
for i in T.serial(n):
A[i] = 0.0
return func
expected = before
class TestBeforeAfterIRModule(BaseBeforeAfter):
"""The preferred form for writing TIR unit tests
All evaluation is done at test-time, with the minimal amount of
additional lines. The `@tvm.testing.fixture`, `@ir_module`, and
`@T.prim_func` annotations are handled by
`tvm.testing.CompareBeforeAfter`.
"""
class before:
def func_A(A: T.Buffer[16, "float32"]):
for i in T.serial(16):
A[i] = 0.0
def func_B(A: T.Buffer[16, "int32"]):
for i in T.serial(16):
A[i] = 42
expected = before
class TestBeforeAfterIRModuleExplicitFixture(BaseBeforeAfter):
"""Like TestBeforeAfterIRModule, but with an explicit fixture
If the IRModule depends on additional fixtures, this form can be
used.
"""
@tvm.testing.fixture
def before(self):
@ir_module
class mod:
@T.prim_func
def func_A(A: T.Buffer[16, "float32"]):
for i in T.serial(16):
A[i] = 0.0
@T.prim_func
def func_B(A: T.Buffer[16, "int32"]):
for i in T.serial(16):
A[i] = 42
return mod
expected = before
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tvm_testing_features.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sys
import pytest
import tvm.testing
# This file tests features in tvm.testing, such as verifying that
# cached fixtures are run an appropriate number of times. As a
# result, the order of the tests is important. Use of --last-failed
# or --failed-first while debugging this file is not advised. If
# these tests are distributed/parallelized using pytest-xdist or
# similar, all tests in this file should run sequentially on the same
# node. (See https://stackoverflow.com/a/59504228)
class TestTargetAutoParametrization:
targets_used = []
devices_used = []
enabled_targets = [target for target, dev in tvm.testing.enabled_targets()]
enabled_devices = [dev for target, dev in tvm.testing.enabled_targets()]
def test_target_parametrization(self, target):
assert target in self.enabled_targets
self.targets_used.append(target)
def test_device_parametrization(self, dev):
assert dev in self.enabled_devices
self.devices_used.append(dev)
def test_all_targets_used(self):
assert sorted(self.targets_used) == sorted(self.enabled_targets)
def test_all_devices_used(self):
sort_key = lambda dev: (dev.device_type, dev.device_id)
assert sorted(self.devices_used, key=sort_key) == sorted(self.enabled_devices, key=sort_key)
targets_with_explicit_list = []
@tvm.testing.parametrize_targets("llvm")
def test_explicit_list(self, target):
assert target == "llvm"
self.targets_with_explicit_list.append(target)
def test_no_repeats_in_explicit_list(self):
if tvm.testing.device_enabled("llvm"):
assert self.targets_with_explicit_list == ["llvm"]
else:
assert self.targets_with_explicit_list == []
targets_with_exclusion = []
@tvm.testing.exclude_targets("llvm")
def test_exclude_target(self, target):
assert "llvm" not in target
self.targets_with_exclusion.append(target)
def test_all_nonexcluded_targets_ran(self):
assert sorted(self.targets_with_exclusion) == sorted(
[target for target in self.enabled_targets if not target.startswith("llvm")]
)
run_targets_with_known_failure = []
@tvm.testing.known_failing_targets("llvm")
def test_known_failing_target(self, target):
# This test runs for all targets, but intentionally fails for
# llvm. The behavior is working correctly if this test shows
# up as an expected failure, xfail.
self.run_targets_with_known_failure.append(target)
assert "llvm" not in target
def test_all_targets_ran(self):
assert sorted(self.run_targets_with_known_failure) == sorted(self.enabled_targets)
@tvm.testing.known_failing_targets("llvm")
@tvm.testing.parametrize_targets("llvm")
def test_known_failing_explicit_list(self, target):
assert target != "llvm"
class TestJointParameter:
param1_vals = [1, 2, 3]
param2_vals = ["a", "b", "c"]
independent_usages = 0
param1 = tvm.testing.parameter(*param1_vals)
param2 = tvm.testing.parameter(*param2_vals)
joint_usages = 0
joint_param_vals = list(zip(param1_vals, param2_vals))
joint_param_ids = ["apple", "pear", "banana"]
joint_param1, joint_param2 = tvm.testing.parameters(*joint_param_vals, ids=joint_param_ids)
def test_using_independent(self, param1, param2):
type(self).independent_usages += 1
def test_independent(self):
assert self.independent_usages == len(self.param1_vals) * len(self.param2_vals)
def test_using_joint(self, joint_param1, joint_param2):
type(self).joint_usages += 1
assert (joint_param1, joint_param2) in self.joint_param_vals
def test_joint(self):
assert self.joint_usages == len(self.joint_param_vals)
def test_joint_test_id(self, joint_param1, joint_param2, request):
param_string = (
request.node.name.replace(request.node.originalname, "")
.replace("[", "")
.replace("]", "")
)
assert param_string in self.joint_param_ids
class TestFixtureCaching:
param1_vals = [1, 2, 3]
param2_vals = ["a", "b", "c"]
param1 = tvm.testing.parameter(*param1_vals)
param2 = tvm.testing.parameter(*param2_vals)
uncached_calls = 0
cached_calls = 0
@tvm.testing.fixture
def uncached_fixture(self, param1):
type(self).uncached_calls += 1
return 2 * param1
def test_use_uncached(self, param1, param2, uncached_fixture):
assert 2 * param1 == uncached_fixture
def test_uncached_count(self):
assert self.uncached_calls == len(self.param1_vals) * len(self.param2_vals)
@tvm.testing.fixture(cache_return_value=True)
def cached_fixture(self, param1):
type(self).cached_calls += 1
return 3 * param1
def test_use_cached(self, param1, param2, cached_fixture):
assert 3 * param1 == cached_fixture
def test_cached_count(self):
cache_disabled = bool(int(os.environ.get("TVM_TEST_DISABLE_CACHE", "0")))
if cache_disabled:
assert self.cached_calls == len(self.param1_vals) * len(self.param2_vals)
else:
assert self.cached_calls == len(self.param1_vals)
class TestCachedFixtureIsCopy:
param = tvm.testing.parameter(1, 2, 3, 4)
@tvm.testing.fixture(cache_return_value=True)
def cached_mutable_fixture(self):
return {"val": 0}
def test_modifies_fixture(self, param, cached_mutable_fixture):
assert cached_mutable_fixture["val"] == 0
# The tests should receive a copy of the fixture value. If
# the test receives the original and not a copy, then this
# will cause the next parametrization to fail.
cached_mutable_fixture["val"] = param
class TestBrokenFixture:
# Tests that use a fixture that throws an exception fail, and are
# marked as setup failures. The tests themselves are never run.
# This behavior should be the same whether or not the fixture
# results are cached.
num_uses_broken_uncached_fixture = 0
num_uses_broken_cached_fixture = 0
@tvm.testing.fixture
def broken_uncached_fixture(self):
raise RuntimeError("Intentionally broken fixture")
@pytest.mark.xfail(True, reason="Broken fixtures should result in a failing setup", strict=True)
def test_uses_broken_uncached_fixture(self, broken_uncached_fixture):
type(self).num_uses_broken_fixture += 1
def test_num_uses_uncached(self):
assert self.num_uses_broken_uncached_fixture == 0
@tvm.testing.fixture(cache_return_value=True)
def broken_cached_fixture(self):
raise RuntimeError("Intentionally broken fixture")
@pytest.mark.xfail(True, reason="Broken fixtures should result in a failing setup", strict=True)
def test_uses_broken_cached_fixture(self, broken_cached_fixture):
type(self).num_uses_broken_cached_fixture += 1
def test_num_uses_cached(self):
assert self.num_uses_broken_cached_fixture == 0
class TestAutomaticMarks:
@staticmethod
def check_marks(request, target):
decorators = tvm.testing.plugin._target_to_requirement(target)
required_marks = [decorator.mark for decorator in decorators]
applied_marks = list(request.node.iter_markers())
for required_mark in required_marks:
assert required_mark in applied_marks
def test_automatic_fixture(self, request, target):
self.check_marks(request, target)
@tvm.testing.parametrize_targets
def test_bare_parametrize(self, request, target):
self.check_marks(request, target)
@tvm.testing.parametrize_targets("llvm", "cuda", "vulkan")
def test_explicit_parametrize(self, request, target):
self.check_marks(request, target)
@pytest.mark.parametrize("target", ["llvm", "cuda", "vulkan"])
def test_pytest_mark(self, request, target):
self.check_marks(request, target)
@pytest.mark.parametrize("target,other_param", [("llvm", 0), ("cuda", 1), ("vulkan", 2)])
def test_pytest_mark_covariant(self, request, target, other_param):
self.check_marks(request, target)
@pytest.mark.skipif(
bool(int(os.environ.get("TVM_TEST_DISABLE_CACHE", "0"))),
reason="Cannot test cache behavior while caching is disabled",
)
class TestCacheableTypes:
class EmptyClass:
pass
@tvm.testing.fixture(cache_return_value=True)
def uncacheable_fixture(self):
return self.EmptyClass()
def test_uses_uncacheable(self, request):
# Normally the num_tests_use_this_fixture would be set before
# anything runs. For this test case only, because we are
# delaying the use of the fixture, we need to manually
# increment it.
self.uncacheable_fixture.num_tests_use_this_fixture[0] += 1
with pytest.raises(TypeError):
request.getfixturevalue("uncacheable_fixture")
class ImplementsReduce:
def __reduce__(self):
return super().__reduce__()
@tvm.testing.fixture(cache_return_value=True)
def fixture_with_reduce(self):
return self.ImplementsReduce()
def test_uses_reduce(self, fixture_with_reduce):
pass
class ImplementsDeepcopy:
def __deepcopy__(self, memo):
return type(self)()
@tvm.testing.fixture(cache_return_value=True)
def fixture_with_deepcopy(self):
return self.ImplementsDeepcopy()
def test_uses_deepcopy(self, fixture_with_deepcopy):
pass
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tvmscript_complete.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm.ir import Range
from tvm.script import tir as T
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def matmul_original(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j in T.grid(32, 32):
with T.block("init"):
vi, vj = T.axis.remap("SS", [i, j])
for ii, jj in T.grid(4, 4):
C[vi * 4 + ii, vj * 4 + jj] = T.float32(0)
for k in range(0, 32):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
for ii, jj, kk in T.grid(4, 4, 4):
C[vi * 4 + ii, vj * 4 + jj] = (
C[vi * 4 + ii, vj * 4 + jj]
+ A[vi * 4 + ii, vk * 4 + kk] * B[vj * 4 + jj, vk * 4 + kk]
)
@T.prim_func
def elementwise_with_root(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
with T.block():
for i, j in T.grid(128, 128):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] + T.float32(1)
for i, j in T.grid(128, 128):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + T.float32(1)
def func_with_opaque_block(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
with T.block():
with T.block():
B[0, 0] = A[0, 0] + T.float32(1)
for i, j in T.grid(128, 128):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + T.float32(1)
@T.prim_func
def func_with_part_access_region(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
with T.block():
for i, j in T.grid(128, 128):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi, vj])
B[vi, vj] = A[vi, vj] + T.float32(1)
for i, j in T.grid(128, 128):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
T.writes(C[vi, vj])
C[vi, vj] = B[vi, vj] + T.float32(1)
def test_complete_matmul():
func = matmul
A, B, C = [func.buffer_map[x] for x in func.params]
block = func.body.block.body.body.body.body.block
assert isinstance(block, tvm.tir.Block)
vi, vj, vk = [x.var for x in block.iter_vars]
access_A = tvm.tir.BufferRegion(A, [Range.from_min_extent(vi, 1), Range.from_min_extent(vk, 1)])
access_B = tvm.tir.BufferRegion(B, [Range.from_min_extent(vj, 1), Range.from_min_extent(vk, 1)])
access_C = tvm.tir.BufferRegion(C, [Range.from_min_extent(vi, 1), Range.from_min_extent(vj, 1)])
tvm.ir.assert_structural_equal(block.reads, [access_A, access_B])
tvm.ir.assert_structural_equal(block.writes, [access_C])
def test_complete_matmul_original():
func = matmul_original
A, B, C = [func.buffer_map[x] for x in func.params]
block1 = func.body.block.body.body.body[0].block
assert isinstance(block1, tvm.tir.Block)
vi, vj = [x.var for x in block1.iter_vars]
access_C = tvm.tir.BufferRegion(
C, [Range.from_min_extent(vi * 4, 4), Range.from_min_extent(vj * 4, 4)]
)
tvm.ir.assert_structural_equal(block1.reads, [])
tvm.ir.assert_structural_equal(block1.writes, [access_C])
block2 = func.body.block.body.body.body[1].body.block
assert isinstance(block2, tvm.tir.Block)
vi, vj, vk = [x.var for x in block2.iter_vars]
access_A = tvm.tir.BufferRegion(
A, [Range.from_min_extent(vi * 4, 4), Range.from_min_extent(vk * 4, 4)]
)
access_B = tvm.tir.BufferRegion(
B, [Range.from_min_extent(vj * 4, 4), Range.from_min_extent(vk * 4, 4)]
)
access_C = tvm.tir.BufferRegion(
C, [Range.from_min_extent(vi * 4, 4), Range.from_min_extent(vj * 4, 4)]
)
tvm.ir.assert_structural_equal(block2.reads, [access_C, access_A, access_B])
tvm.ir.assert_structural_equal(block2.writes, [access_C])
def _check_elementwise(func):
A, B, C = [func.buffer_map[x] for x in func.params]
block1 = func.body.block.body[0].body.body.block
assert isinstance(block1, tvm.tir.Block)
vi, vj = [x.var for x in block1.iter_vars]
tvm.ir.assert_structural_equal(
block1.reads,
[tvm.tir.BufferRegion(A, [Range.from_min_extent(vi, 1), Range.from_min_extent(vj, 1)])],
)
tvm.ir.assert_structural_equal(
block1.writes,
[tvm.tir.BufferRegion(B, [Range.from_min_extent(vi, 1), Range.from_min_extent(vj, 1)])],
)
block2 = func.body.block.body[1].body.body.block
assert isinstance(block2, tvm.tir.Block)
vi, vj = [x.var for x in block2.iter_vars]
tvm.ir.assert_structural_equal(
block2.reads,
[tvm.tir.BufferRegion(B, [Range.from_min_extent(vi, 1), Range.from_min_extent(vj, 1)])],
)
tvm.ir.assert_structural_equal(
block2.writes,
[tvm.tir.BufferRegion(C, [Range.from_min_extent(vi, 1), Range.from_min_extent(vj, 1)])],
)
def test_complete_with_root():
_check_elementwise(elementwise_with_root)
def test_complete_part_region():
_check_elementwise(func_with_part_access_region)
@T.prim_func
def func_with_bufferslice_indices(data: T.handle, index: T.handle) -> None:
data_buf = T.match_buffer(data, (16, 16), "float32")
index_buf = T.match_buffer(index, (1,), "int32")
out_buf = T.alloc_buffer((16, 16), "float32")
for i, j in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
out_buf[vi, vj] = data_buf[vi, index_buf[0]]
@T.prim_func
def expected_bufferslice_indices(data: T.handle, index: T.handle) -> None:
index_buf = T.match_buffer(index, [1], dtype="int32", elem_offset=0, align=64, offset_factor=1)
data_buf = T.match_buffer(data, [16, 16], elem_offset=0, align=64, offset_factor=1)
with T.block("root"):
T.reads([])
T.writes([])
out_buf = T.alloc_buffer([16, 16], elem_offset=0, align=64, offset_factor=1)
for i0, i1 in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i0, i1])
T.reads([data_buf[vi, index_buf[0]], index_buf[0]])
T.writes([out_buf[vi, vj]])
out_buf[vi, vj] = data_buf[vi, index_buf[0]]
@T.prim_func
def func_with_recursive_bufferslice_indices(data: T.handle, index: T.handle) -> None:
data_buf = T.match_buffer(data, (16, 16), "float32")
index_buf = T.match_buffer(index, (1,), "int32")
out_buf = T.alloc_buffer((16, 16), "float32")
for i, j in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
out_buf[vi, vj] = data_buf[index_buf[index_buf[0]], index_buf[0]]
@T.prim_func
def expected_recursive_bufferslice_indices(data: T.handle, index: T.handle) -> None:
index_buf = T.match_buffer(index, [1], dtype="int32", elem_offset=0, align=64, offset_factor=1)
data_buf = T.match_buffer(data, [16, 16], elem_offset=0, align=64, offset_factor=1)
with T.block("root"):
T.reads([])
T.writes([])
out_buf = T.alloc_buffer([16, 16], elem_offset=0, align=64, offset_factor=1)
for i0, i1 in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i0, i1])
T.reads(
[
data_buf[index_buf[index_buf[0]], index_buf[0]],
index_buf[T.min(index_buf[0], 0) : T.max(index_buf[0], 0) + 1],
]
)
T.writes([out_buf[vi, vj]])
out_buf[vi, vj] = data_buf[index_buf[index_buf[0]], index_buf[0]]
def test_complete_buffer_indices():
new_func = tvm.script.from_source(func_with_bufferslice_indices.script())
tvm.ir.assert_structural_equal(new_func, expected_bufferslice_indices)
new_func = tvm.script.from_source(func_with_recursive_bufferslice_indices.script())
tvm.ir.assert_structural_equal(new_func, expected_recursive_bufferslice_indices)
@T.prim_func
def match_buffer_func(a: T.handle) -> None:
A = T.match_buffer(a, (16, 16))
for i in range(0, 16):
with T.block():
A0 = T.match_buffer(A[i, 0:16], (16))
with T.block():
for j in range(0, 16):
with T.block():
A1 = T.match_buffer(A0[j], ())
A1[()] = 1.0
@T.prim_func
def expected_match_buffer_func(a: T.handle) -> None:
A = T.match_buffer(a, (16, 16))
for i in range(0, 16):
with T.block():
T.reads([])
T.writes(A[i, 0:16])
A0 = T.match_buffer(A[i, 0:16], (16))
with T.block():
T.reads([])
T.writes(A0[0:16])
for j in range(0, 16):
with T.block():
T.reads([])
T.writes(A0[j])
A1 = T.match_buffer(A0[j], ())
A1[()] = 1.0
def test_complete_match_buffer():
tvm.ir.assert_structural_equal(match_buffer_func, expected_match_buffer_func)
@T.prim_func
def alloc_buffer_func(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [2, 2], dtype="float32")
B = T.match_buffer(b, [2, 2], dtype="float32")
C = T.alloc_buffer([2, 2], dtype="float32")
A[(0, 0)] = T.float32(2)
C[(0, 0)] = A[(0, 0)] + B[(0, 0)]
B[(0, 0)] = C[(0, 0)]
@T.prim_func
def expect_alloc_buffer_func(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [2, 2], dtype="float32", elem_offset=0, align=64, offset_factor=1)
B = T.match_buffer(b, [2, 2], dtype="float32", elem_offset=0, align=64, offset_factor=1)
with T.block("root"):
T.reads([])
T.writes([])
C = T.alloc_buffer([2, 2], dtype="float32", elem_offset=0, align=64, offset_factor=1)
A[(0, 0)] = T.float32(2)
C[(0, 0)] = A[(0, 0)] + B[(0, 0)]
B[(0, 0)] = C[(0, 0)]
def test_complete_alloc_buffer():
rt_func = tvm.script.from_source(alloc_buffer_func.script(show_meta=True))
tvm.ir.assert_structural_equal(alloc_buffer_func, expect_alloc_buffer_func)
if __name__ == "__main__":
test_complete_matmul()
test_complete_matmul_original()
test_complete_with_root()
test_complete_part_region()
test_complete_buffer_indices()
test_complete_match_buffer()
test_complete_alloc_buffer()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tvmscript_error_report.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import inspect
import re
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.ir.diagnostics import override_renderer
from tvm.script import from_source
from tvm.script import tir as T
def check_error(func, rel_lineno):
check_error_re = re.compile(r"^.*# check_error: (.+)$")
"""check if TIR script throws error"""
# Override the default renderer to accumulate errors
errors = []
def render(e):
for d in e.diagnostics:
errors.append(d)
override_renderer(render)
# The diagnostic context throws an exception when it gets an error
try:
source_code = inspect.getsource(func)
indent = len(re.match(r"^\s*", source_code).group(0))
source_code = "@T.prim_func\n" + "\n".join(
line[indent:] for line in source_code.splitlines()
)
from_source(source_code)
except tvm.error.DiagnosticError as e:
pass
assert len(errors) == 1, errors
if rel_lineno is None:
return
error = errors[0]
assert (
error.span.line - 1 == rel_lineno or error.span.line == rel_lineno
), f"Expected error to be on line {rel_lineno}, but it was on {error.span.line - 1}"
error_line = source_code.split("\n")[rel_lineno]
m = check_error_re.match(error_line)
if m:
expected_error_text = m.group(1)
error = error.message
assert (
expected_error_text == error
), f'check_error expects "{expected_error_text} in str(errors): {error}'
def test_buffer_bind():
def buffer_bind_missing_args(a: T.handle) -> None:
A = T.match_buffer((16, 16), "float32") # error
check_error(buffer_bind_missing_args, 2)
def test_undefined_buffer():
def undefined_buffer(a: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
T.attr(A, "realize_scope", "")
T.realize(C[0:16, 0:16], "") # error
for i in T.serial(16):
for j in T.serial(0, 16):
A[i, j] = 0.0
check_error(undefined_buffer, 5)
def test_unsupported_stmt():
def unsupported_stmt(a: T.int32) -> None:
if a > 0:
print("I love tvm") # error
check_error(unsupported_stmt, 3)
def test_unsupported_function_call():
def unsupported_function_call(a: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
T.attr(A, "realize_scope", "")
T.realize(A[0:16, 0:16], "")
for i in T.const_range(16): # error
for j in T.serial(0, 16):
A[i, j] = 0.0
check_error(unsupported_function_call, 6)
def test_missing_type_annotation():
def missing_type_annotation(a) -> None: # error
T.evaluate(0.0)
check_error(missing_type_annotation, 1)
def test_invalid_for_function():
def invalid_for_function(a: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
for i in T.evaluate(0.0): # error
for j in T.serial(0, 16):
A[i, j] = 0.0
check_error(invalid_for_function, 4)
def test_invalid_block_function():
def invalid_block_function(a: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
with T.evaluate(0.0): # error
T.evaluate(1.0)
check_error(invalid_block_function, 4)
def test_return_not_allowed():
def return_not_allowed(a: T.handle) -> None:
return T.evaluate(0) # error
check_error(return_not_allowed, 2)
def test_no_body():
def no_body(a: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
T.realize(A, "") # error
check_error(no_body, 3)
def test_allocate_with_buffers():
def allocate_with_buffers() -> None:
with T.allocate([1], "float32", "") as [A, B]: # error
T.evaluate(1.0)
check_error(allocate_with_buffers, 2)
def test_inconsistent_binding():
def inconsistent_binding_value() -> None:
for i, j in T.grid(16, 16):
vi, vj = T.axis.remap("SS", [i]) # error
T.evaluate(1.0)
def inconsistent_binding_type() -> None:
for i, j in T.grid(16, 16):
vi, vj = T.axis.remap("S", [i, j]) # error
T.evaluate(1.0)
check_error(inconsistent_binding_value, 3)
check_error(inconsistent_binding_type, 3)
def test_error_remap_args():
def error_remap_type() -> None:
for i, j in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("TT", [i, j]) # error
T.evaluate(1.0)
def error_remap_value() -> None:
for i, j in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i + j, j]) # error
T.evaluate(1.0)
check_error(error_remap_type, 4)
check_error(error_remap_value, 4)
def test_invalid_block_axes():
def invalid_block_axes(a: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
for i, j in T.grid(16, 16):
with T.block():
vi = T.axis.S(i, A) # error
T.evaluate(1.0)
check_error(invalid_block_axes, 5)
def test_duplicate_block_axes():
def duplicate_block_axes() -> None:
for i, j in T.grid(16, 16):
with T.block():
vi = T.axis.S(16, i)
vi = T.axis.S(16, j) # error
T.evaluate(1.0)
def duplicate_block_axes_remap() -> None:
for i, j in T.grid(16, 16):
with T.block():
vi, vi = T.axis.remap("SS", [i, j]) # error
T.evaluate(1.0)
check_error(duplicate_block_axes, 5)
check_error(duplicate_block_axes_remap, 4)
def test_miss_block_bind():
def miss_block_bind_value() -> None:
for i, j in T.grid(128, 128):
with T.block():
vi = T.axis.S(i) # error
T.evaluate(1.0)
check_error(miss_block_bind_value, 4)
def test_invalid_loop_var():
def invalid_loop_var() -> None:
for i, j in range(0, 16): # error
T.evaluate(1.0)
check_error(invalid_loop_var, 2)
def test_inconsistent_grid():
def inconsistent_grid() -> None:
for i in T.grid(16, 16): # error
T.evaluate(1.0)
check_error(inconsistent_grid, 2)
def test_invalid_match_buffer_region():
def invalid_match_buffer_region() -> None:
for i, j in T.grid(128, 128):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
A = T.match_buffer(vi) # error
T.evaluate(1.0)
check_error(invalid_match_buffer_region, 5)
def test_duplicate_buffer():
def duplicate_buffer() -> None:
A = T.alloc_buffer((128, 128), "float32")
A = T.alloc_buffer((128, 128), "float32") # error
check_error(duplicate_buffer, 3)
def test_duplicate_block_signature():
def duplicate_reads() -> None:
A = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[0:8, 0:8])
T.reads(A[0:16, 0:16]) # error
T.evaluate(1.0)
def duplicate_writes() -> None:
A = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
T.writes(A[0:8, 0:8])
T.writes(A[0:16, 0:16]) # error
T.evaluate(1.0)
def duplicate_predicate() -> None:
for i, j in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
T.where(1)
T.where(0) # error
def duplicate_annotations() -> None:
for i, j in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
T.block_attr({})
T.block_attr({}) # error
def duplicate_init() -> None:
for i, j in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
with T.init():
T.evaluate(1.0)
with T.init(): # error
T.evaluate(1.0)
def duplicate_axes() -> None:
for i, j in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
vi = T.axis.S(i, 16) # error
T.evaluate(1.0)
check_error(duplicate_reads, 7)
check_error(duplicate_writes, 7)
check_error(duplicate_predicate, 6)
check_error(duplicate_annotations, 6)
check_error(duplicate_init, 7)
check_error(duplicate_axes, 5)
def test_opaque_access_during_complete():
def opaque_access_during_complete(a: T.handle) -> None: # error
A = T.match_buffer(a, (16, 16), "float32")
for i, j in T.grid(16, 16):
with T.block():
T.evaluate(T.call_extern("dummy_extern_function", A.data, dtype="int32"))
check_error(opaque_access_during_complete, None)
def test_convert_slice_to_bufferload():
def convert_slice_to_bufferload() -> None:
A = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
A[vi, vj] = A[vi : vi + 2, vj] + 1 # error
check_error(convert_slice_to_bufferload, 6)
def test_tvm_exception_catch():
def special_stmt_except() -> None:
A = T.alloc_buffer("(128, 128)", "float32") # error
T.evaluate(1.0)
def scope_handler_except() -> None:
for i in T.serial("1", "1"): # error
T.evaluate(1)
def intrin_except_unassign(a: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
T.evaluate(A) # error
def intrin_except_assign(a: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
A[0, 0] = A[A] # error
check_error(special_stmt_except, 2)
check_error(scope_handler_except, 2)
check_error(intrin_except_unassign, 3)
check_error(intrin_except_assign, 3)
def test_match_buffer_shape_mismatch():
def buffer_shape_mismatch(a: T.handle) -> None:
A = T.match_buffer(a, (8, 8))
for i, j in T.grid(8, 2):
with T.block():
T.reads([])
T.writes([A[i, j * 4 : j * 4 + 4]])
sub_A = T.match_buffer(
A[i, j * 4 : j * 4 + 4], (5)
) # error: shape mismatched between 4 and 5
for jj in range(0, 4):
sub_A[i, j * 4 + jj] = 1
check_error(buffer_shape_mismatch, 7)
def test_high_dim_store():
def high_dim_store() -> None:
with T.block("root"):
B = T.allocate([256], "float32", "global")
for i, j in T.grid(16, 16):
B[i, j] = 1.0 # error: Store is only allowed with one index
check_error(high_dim_store, 5)
def test_block_has_option_vars():
def block_has_option_vars() -> None:
with T.block("root") as x: # error: block does not support option_vars
T.evaluate(0.0)
check_error(block_has_option_vars, 2)
def test_implicit_root_has_attrs():
def implicit_root_has_read():
T.reads([]) # error: implicit root does not support reads
T.evaluate(0.0)
def implicit_root_has_write():
T.writes([]) # error: implicit root does not support writes
T.evaluate(0.0)
def implicit_root_has_attrs():
T.block_attr({}) # error: implicit root does not support block_attr
T.evaluate(0.0)
def implicit_root_has_predicate():
T.where(True) # error: implicit root does not support predicate
T.evaluate(0.0)
def implicit_root_has_axes():
v = T.axis.S(0, 0) # error: implicit root does not support axis define
T.evaluate(0.0)
check_error(implicit_root_has_read, 2)
check_error(implicit_root_has_write, 2)
check_error(implicit_root_has_attrs, 2)
check_error(implicit_root_has_predicate, 2)
check_error(implicit_root_has_axes, 2)
@T.prim_func
def elementwise_not_affine(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128, 128))
B = T.match_buffer(b, (128, 128, 128, 128))
for i, j, k, l in T.grid(128, 128, 128, 8):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
vl = T.axis.S(128, l * 16)
B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0
@T.prim_func
def elementwise_non_single_branch(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128))
C = T.alloc_buffer((128, 128, 128))
B = T.match_buffer(b, (128, 128, 128))
for i, j in T.grid(128, 128):
for k in T.serial(0, 128):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
C[vi, vj, vk] = A[vi, vj, vk] * 2.0
for k in T.serial(0, 128):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
B[vi, vj, vk] = C[vi, vj, vk] * 2.0
def test_reorder_fail_block():
sch = tir.Schedule(elementwise_not_affine, debug_mask="all")
block_b = sch.get_block("B")
i, j, k, l = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError) as execinfo:
sch.reorder(l, i)
expected_sub_error_message = (
" # tir.Block#0\n"
' with T.block("B"):\n'
" ^^^^^^^^^^^^^^^^^^\n"
)
assert expected_sub_error_message in str(execinfo.value)
def test_reorder_fail_nested_loop_inner():
sch = tir.Schedule(elementwise_non_single_branch, debug_mask="all")
block_b = sch.get_block("B")
i, j, k = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError) as execinfo:
sch.reorder(k, i)
expected_sub_error_message = (
" for i in T.serial(128):\n"
" # tir.For#0\n"
" for j in T.serial(128):\n"
" ^^^^^^^^^^^^^^^^^^^^^^^\n"
)
assert expected_sub_error_message in str(execinfo.value)
def test_fuse_fail_nested_loop_outer():
sch = tir.Schedule(elementwise_non_single_branch, debug_mask="all")
block_b = sch.get_block("B")
i, j, k = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError) as execinfo:
sch.fuse(k, i)
expected_sub_error_message = (
" # tir.For#1\n"
" for i in T.serial(128):\n"
" ^^^^^^^^^^^^^^^^^^^^^^^\n"
" for j in T.serial(128):\n"
)
assert expected_sub_error_message in str(execinfo.value)
def test_report_error_root_block():
sch = tir.Schedule(elementwise_non_single_branch, debug_mask="all")
root = sch.get_block("root")
with pytest.raises(tvm.tir.ScheduleError) as execinfo:
sch.compute_inline(root)
expected_sub_error_message = (
" # tir.Block#0\n"
' with T.block("root"):\n'
" ^^^^^^^^^^^^^^^^^^^^^\n"
)
assert expected_sub_error_message in str(execinfo.value)
def test_load_var():
def load_var_multiple() -> None:
d = T.var("float32")
d[2] = d[2, 1] # error cannot provide two indices to load
check_error(load_var_multiple, 3)
def test_store_var():
def store_var_multiple() -> None:
d = T.var("float32")
d[2, 1] = d[1] # error cannot provide two indices to store
check_error(store_var_multiple, 3)
def test_load_handle():
def load_handle(h: T.handle) -> None:
h_ = T.match_buffer(h, [1])
h_[0] = h[0] # error cannot load from handle
check_error(load_handle, 3)
def test_store_handle():
def store_handle(h: T.handle) -> None:
h_ = T.match_buffer(h, [1])
h[0] = h_[0] # error cannot store to handle
check_error(store_handle, 3)
def test_binop_bad_ast_type():
def binop_bad_ast_type(h: T.handle):
h_ = T.match_buffer(h, [1])
h_[0] = h + [2] # error rhs should be a primexpr
check_error(binop_bad_ast_type, 3)
def test_binop_bad_type():
def binop_bad_type(h: T.handle):
h_ = T.match_buffer(h, [1])
h_[0] = h + 2 # error lhs and rhs should be the same type
check_error(binop_bad_type, 3)
def test_non_integer_typed_block_iter():
def non_integer_typed_block_iter():
with T.block():
i = T.axis.S(0.1, 0.1) # error IterVar requires an integer dtype
check_error(non_integer_typed_block_iter, 3)
def test_preflattened_buffer_map_align():
def preflattened_buffer_map_align_nonint(foo: T.handle):
foo_1 = T.match_buffer(foo, [1])
T.preflattened_buffer(
foo_1, [1], align="bar"
) # check_error: align: want int or IntImm, got 'bar'
check_error(preflattened_buffer_map_align_nonint, 3)
def test_preflattened_buffer_map_offset_factor():
def preflattened_buffer_map_offset_factor_nonint(foo: T.handle):
foo_1 = T.match_buffer(foo, [1])
T.preflattened_buffer(
foo_1, [1], offset_factor="bar"
) # check_error: offset_factor: want int or IntImm, got 'bar'
check_error(preflattened_buffer_map_offset_factor_nonint, 3)
def test_illegal_buffer_slice():
def strided_buffer_region(A: T.handle):
# do not allow stride in buffer region
A = T.match_buffer((128, 128), "int32")
with T.block():
T.reads([])
T.writes([A[0:128:2, 0:128:3]]) # error
T.evaluate(T.call_extern("strided_compute", dtype=""))
def access_reversed_slice(A: T.handle):
# do not allow reversed slice step
A = T.match_buffer((128,), "int32")
A[0:128:-1] = T.broadcast(1, 128) # error
def access_non_const_slice_length(A: T.handle):
# do not allow non-constant slice length
A = T.match_buffer((128,), "int32")
for i in range(4):
T.evaluate(A[0:i:1]) # error
check_error(strided_buffer_region, 3)
check_error(access_reversed_slice, 3)
check_error(access_non_const_slice_length, 3)
def test_syntax_sugar_fail():
def loop_syntax_sugar_fail(a: T.handle) -> None:
A = T.match_buffer(a, (128,))
for i in T.thread_binding(128, 128):
A[i] = A[i] * 2.0
check_error(loop_syntax_sugar_fail, 3)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tvmscript_ir_builder_base.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unittests for tvm.script.ir_builder.base"""
import pytest
from tvm.script.ir_builder import IRBuilder
def test_ir_builder_scope():
with IRBuilder() as ib: # pylint: disable=invalid-name
assert IRBuilder.current() == ib
def test_ir_builder_multi_scope():
with IRBuilder() as ib: # pylint: disable=invalid-name
with IRBuilder() as ib2: # pylint: disable=invalid-name
assert IRBuilder.current() == ib2
assert IRBuilder.current() == ib
def test_ir_builder_no_scope():
with pytest.raises(ValueError):
IRBuilder.current()
if __name__ == "__main__":
test_ir_builder_scope()
test_ir_builder_multi_scope()
test_ir_builder_no_scope()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tvmscript_ir_builder_irmodule.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unittests for tvm.script.ir_builder.ir"""
import pytest
import tvm.testing
from tvm.script.ir_builder import IRBuilder
from tvm.script.ir_builder import ir as I
from tvm import ir
from tvm.ir.base import assert_structural_equal
def test_ir_builder_irmodule():
with IRBuilder() as ib: # pylint: disable=invalid-name
with I.ir_module():
pass
# the ir_module generated by IRBuilder
ir_module_actual = ib.get()
# the expected prim_func
ir_module_expected = ir.IRModule(None, None)
assert_structural_equal(ir_module_actual, ir_module_expected, map_free_vars=True)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tvmscript_ir_builder_tir.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, missing-docstring
"""Unittests for tvm.script.ir_builder.tir"""
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.ir.base import assert_structural_equal
from tvm.runtime import ndarray
from tvm.script.ir_builder import IRBuilder
from tvm.script.ir_builder import tir as T
def test_ir_builder_tir_primfunc_base():
with IRBuilder() as ib:
with T.prim_func():
T.evaluate(0)
# the prim_func generated by IRBuilder
prim_func_actual = ib.get()
# the expected prim_func
prim_func_expected = tir.PrimFunc(
params=[],
body=tir.Evaluate(0),
ret_type=None,
buffer_map=None,
preflattened_buffer_map=None,
attrs=None,
)
# Check if the generated ir is expected
assert_structural_equal(prim_func_actual, prim_func_expected, map_free_vars=True)
def test_ir_builder_tir_primfunc_complete():
with IRBuilder() as ib:
with T.prim_func():
T.arg("a", T.handle())
T.arg("b", T.var("int64"))
T.arg("c", T.buffer_decl((128, 128), "float32"))
d = T.arg("d", T.handle())
e = T.arg("e", T.buffer_decl((1024,), "int8"))
T.func_attr({"key": "value"})
T.func_ret(tvm.ir.PrimType("int64"))
buffer_d = T.match_buffer(d, (64, 64), "int64")
T.preflattened_buffer(e, (32, 32), "int8", data=e.data)
T.evaluate(0)
# the prim_func generated by IRBuilder
prim_func_actual = ib.get()
# the expected prim_func
c_handle, c_buffer = tir.Var("c_handle", "handle"), tir.decl_buffer(
(128, 128), "float32", name="c"
)
d_handle, d_buffer = tir.Var("d", "handle"), tir.decl_buffer((64, 64), "int64", name="d")
e_handle, e_buffer = tir.Var("e_handle", "handle"), tir.decl_buffer((1024,), "int8", name="e")
prim_func_expected = tir.PrimFunc(
params=[
tir.Var("a", "handle"),
tir.Var("b", "int64"),
c_handle,
d_handle,
e_handle,
],
body=tir.Evaluate(0),
ret_type=tvm.ir.PrimType("int64"),
buffer_map={c_handle: c_buffer, d_handle: d_buffer, e_handle: e_buffer},
preflattened_buffer_map={
e_handle: tir.decl_buffer((32, 32), "int8", name="e_preflatten", data=e_buffer.data)
},
attrs=tvm.ir.make_node("DictAttrs", key="value"),
)
# Check if the generated ir is expected
assert_structural_equal(prim_func_actual, prim_func_expected, map_free_vars=True)
def test_ir_builder_tir_block_base():
with IRBuilder() as ib:
with T.block("block"):
T.evaluate(0)
# the block generated by IRBuilder
block_realize_actual = ib.get()
# the expected block
block_expected = tir.Block(
iter_vars=[],
reads=[],
writes=[],
name_hint="block",
body=tir.Evaluate(0),
alloc_buffers=None,
match_buffers=None,
annotations={"tir.script_parsing_detect_access": tir.IntImm("int64", 3)},
)
block_realize_expected = tir.BlockRealize(
iter_values=[],
predicate=True,
block=block_expected,
)
# Check if the generated ir is expected
assert_structural_equal(block_realize_actual, block_realize_expected, map_free_vars=True)
def test_ir_builder_tir_block_complete():
with IRBuilder() as ib:
a = T.var("int64", "a")
b = T.buffer_decl((128, 128), "float32")
c = T.buffer_decl((128, 128), "float32")
d = T.var("int32", "d")
e = T.buffer_decl((128, 128), "float32")
f = T.var("int32", "f")
with T.block("block"):
T.where(a > 1)
T.reads(b[0:16, 0:16])
T.writes(c[d:128, d:128])
T.block_attr({"key": "value"})
T.alloc_buffer((128, 128), "float32")
T.match_buffer(e[0:32, 0:32], (32, 32), "float32")
T.axis.spatial(128, f)
T.evaluate(0)
# the block generated by IRBuilder
block_realize_actual = ib.get()
# the expected block
var_a = tir.Var("a", "int64")
buffer_b = tir.decl_buffer((128, 128), "float32", name="b")
buffer_c = tir.decl_buffer((128, 128), "float32", name="c")
var_d = tir.Var("d", "int32")
buffer_e = tir.decl_buffer((128, 128), "float32", name="c")
var_f = tir.Var("f", "int32")
block_expected = tir.Block(
iter_vars=[tir.IterVar((0, 128), tir.Var("", "int32"), iter_type=tir.IterVar.DataPar)],
reads=[buffer_b[0:16, 0:16]],
writes=[buffer_c[var_d:128, var_d:128]],
name_hint="block",
body=tir.Evaluate(0),
alloc_buffers=[tir.decl_buffer((128, 128), "float32")],
match_buffers=[
tir.MatchBufferRegion(tir.decl_buffer((32, 32), "float32"), buffer_e[0:32, 0:32])
],
annotations={"key": "value"},
)
block_realize_expected = tir.BlockRealize(
iter_values=[var_f],
predicate=var_a > 1,
block=block_expected,
)
# Check if the generated ir is expected
assert_structural_equal(block_realize_actual, block_realize_expected, map_free_vars=True)
def test_ir_builder_tir_axis():
with IRBuilder() as ib:
a = T.var("int32", "a")
b = T.var("int32", "b")
c = T.var("int32", "c")
d = T.var("int32", "d")
with T.block("block"):
T.axis.spatial(8, a)
T.axis.reduce(16, b)
T.axis.scan(32, c)
T.axis.opaque(64, d)
T.evaluate(0)
# the block generated by IRBuilder
block_realize_actual = ib.get()
# the expected block
var_a = tir.Var("a", "int32")
var_b = tir.Var("b", "int32")
var_c = tir.Var("c", "int32")
var_d = tir.Var("d", "int32")
block_expected = tir.Block(
iter_vars=[
tir.IterVar((0, 8), tir.Var("", "int32"), iter_type=tir.IterVar.DataPar),
tir.IterVar((0, 16), tir.Var("", "int32"), iter_type=tir.IterVar.CommReduce),
tir.IterVar((0, 32), tir.Var("", "int32"), iter_type=tir.IterVar.Ordered),
tir.IterVar((0, 64), tir.Var("", "int32"), iter_type=tir.IterVar.DimInfo),
],
reads=[],
writes=[],
name_hint="block",
body=tir.Evaluate(0),
annotations={"tir.script_parsing_detect_access": tir.IntImm("int64", 3)},
)
block_realize_expected = tir.BlockRealize(
iter_values=[var_a, var_b, var_c, var_d],
predicate=True,
block=block_expected,
)
# Check if the generated ir is expected
assert_structural_equal(block_realize_actual, block_realize_expected, map_free_vars=True)
def test_ir_builder_tir_for():
with IRBuilder() as ib:
with T.serial(128) as a:
with T.parallel(64) as b:
with T.vectorized(32) as c:
with T.unroll(16) as d:
with T.thread_binding(8, thread="threadIdx.x") as e:
T.evaluate(0)
# the for generated by IRBuilder
for_actual = ib.get()
# the expected for
thread_binding_expected = tir.For(
loop_var=tir.Var("", "int32"),
min_val=0,
extent=8,
kind=tir.ForKind.THREAD_BINDING,
body=tir.Evaluate(0),
thread_binding=tir.IterVar(
None, tir.Var("", "int32"), tir.IterVar.ThreadIndex, "threadIdx.x"
),
)
unroll_expected = tir.For(
loop_var=tir.Var("", "int32"),
min_val=0,
extent=16,
kind=tir.ForKind.UNROLLED,
body=thread_binding_expected,
)
vectorized_expected = tir.For(
loop_var=tir.Var("", "int32"),
min_val=0,
extent=32,
kind=tir.ForKind.VECTORIZED,
body=unroll_expected,
)
parallel_expected = tir.For(
loop_var=tir.Var("", "int32"),
min_val=0,
extent=64,
kind=tir.ForKind.PARALLEL,
body=vectorized_expected,
)
for_expected = tir.For(
loop_var=tir.Var("", "int32"),
min_val=0,
extent=128,
kind=tir.ForKind.SERIAL,
body=parallel_expected,
)
# Check if the generated ir is expected
assert_structural_equal(for_actual, for_expected, map_free_vars=True)
def test_ir_builder_tir_assert():
with IRBuilder() as ib:
with T.Assert(T.var("int32", name="a") == 0, message="a is 0"):
T.evaluate(0)
# the assert generated by IRBuilder
assert_actual = ib.get()
# the expected assert statement
assert_expected = tir.AssertStmt(
T.var("int32", name="a") == 0, tir.StringImm("a is 0"), tir.Evaluate(0)
)
# Check if the generated ir is expected
assert_structural_equal(assert_actual, assert_expected, map_free_vars=True)
def test_ir_builder_tir_let():
with IRBuilder() as ib:
with T.let(T.var("int32", name="a"), tir.IntImm("int32", 2)):
T.evaluate(0)
# the let binding generated by IRBuilder
let_actual = ib.get()
# the expected Let statement
let_expected = tir.LetStmt(T.var("int32", name="a"), tir.IntImm("int32", 2), tir.Evaluate(0))
# Check if the generated ir is expected
assert_structural_equal(let_actual, let_expected, map_free_vars=True)
def test_ir_builder_tir_realize():
buffer_a = T.buffer_decl((128, 128), "float32")
with IRBuilder() as ib:
with T.realize(buffer_a[0:128, 0:128], "test_storage_scope", True):
T.evaluate(0)
# the buffer realization generated by IRBuilder
realize_actual = ib.get()
# the expected buffer realization
buffer_realize = tir.BufferRealize(
buffer_a, [tvm.ir.Range(0, 128), tvm.ir.Range(0, 128)], True, tir.Evaluate(0)
)
expected_realize = tir.AttrStmt(
buffer_a, "realize_scope", tir.StringImm("test_storage_scope"), buffer_realize
)
# Check if the generated ir is expected
assert_structural_equal(realize_actual, expected_realize, map_free_vars=True)
def test_ir_builder_tir_thread():
with IRBuilder() as ib:
with T.prim_func():
brow = T.env_thread("blockIdx.y")
with T.launch_thread(brow, 1):
T.evaluate(0)
# the prim_func generated by IRBuilder
ir_actual = ib.get()
# the expected prim_func
iter_var = tir.IterVar((0, 1), "v", iter_type=1, thread_tag="blockIdx.y")
attr_stmt = tir.AttrStmt(iter_var, "thread_extent", 1, tir.Evaluate(0))
func = tir.PrimFunc([], attr_stmt)
# Check if the generated ir is expected
assert_structural_equal(ir_actual, func, map_free_vars=True)
def test_ir_builder_tir_allocate():
with IRBuilder() as ib:
with T.allocate([10], "float32", scope="local"):
T.evaluate(1)
# the allocate generated by IRBuilder
ir_actual = ib.get()
# the expected allocate
buffer_var = tir.Var("v", tvm.ir.PointerType(tvm.ir.PrimType("float32"), "local"))
ir_expected = tir.Allocate(
buffer_var, "float32", [10], tvm.tir.const(1, "uint1"), tir.Evaluate(1)
)
# Check if the generated ir is expected
assert_structural_equal(ir_actual, ir_expected, map_free_vars=True)
def test_ir_builder_tir_allocate_const():
data = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
with IRBuilder() as ib:
with T.allocate_const(data, "int32", [10]):
T.evaluate(1)
# the allocate const generated by IRBuilder
ir_actual = ib.get()
# the expected allocate const
buffer_var = tir.Var("v", tvm.ir.PointerType(tvm.ir.PrimType("int32")))
ir_expected = tir.AllocateConst(
buffer_var,
"int32",
[10],
ndarray.array(np.asarray(data, "int32")),
tir.Evaluate(1),
annotations={},
)
# Check if the generated ir is expected
assert_structural_equal(ir_actual, ir_expected, map_free_vars=True)
def test_ir_builder_tir_while():
with IRBuilder() as ib:
with T.While(T.var("int32", "x") > 0):
T.evaluate(0)
# the while generated by IRBuilder
ir_actual = ib.get()
# the expected while
ir_expected = tir.While(tir.Var("x", "int32") > 0, tir.Evaluate(0))
# Check if the generated ir is expected
assert_structural_equal(ir_actual, ir_expected, map_free_vars=True)
def test_ir_builder_tir_if_then_else():
with IRBuilder() as ib:
with T.If(T.var("int32", "c") < 12):
with T.Then():
T.evaluate(T.int32(0))
with T.Else():
T.evaluate(T.int32(1))
# the if_then_else generated by IRBuilder
ir_actual = ib.get()
# the expected if_then_else
ir_expected = tir.IfThenElse(
tir.Var("c", "int32") < 12,
tir.Evaluate(tir.IntImm("int32", 0)),
tir.Evaluate(tir.IntImm("int32", 1)),
)
# Check if the generated ir is expected
assert_structural_equal(ir_actual, ir_expected, map_free_vars=True)
def test_ir_builder_tir_buffer_store():
buffer_a = T.buffer_decl((10, 10), "float32")
i = T.var("int32", "x")
with IRBuilder() as ib:
T.buffer_store(buffer_a, 0.1, [0, i])
# the buffer store generated by IRBuilder
ir_actual = ib.get()
# the expected buffer store
ir_expected = tir.BufferStore(buffer_a, 0.1, [0, i])
# Check if the generated ir is expected
assert_structural_equal(ir_actual, ir_expected, map_free_vars=True)
def test_ir_builder_tir_prefetch():
with IRBuilder() as ib:
buffer_a = T.buffer_decl((128, 128), "float32")
T.prefetch(buffer_a, [])
# the prefetch generated by IRBuilder
ir_actual = ib.get()
# the expected prefetch
ir_expected = tir.Prefetch(buffer_a, [])
# Check if the generated ir is expected
assert_structural_equal(ir_actual, ir_expected, map_free_vars=True)
def test_ir_builder_tir_evaluate():
with IRBuilder() as ib:
T.evaluate(0)
# the evaluate generated by IRBuilder
eval_actual = ib.get()
# the expected evaluate
eval_expected = tir.Evaluate(0)
# Check if the generated ir is expected
assert_structural_equal(eval_actual, eval_expected, map_free_vars=True)
def test_ir_builder_tir_decl_buffer():
with IRBuilder() as ib:
with T.decl_buffer([128, 128], "float32"):
T.evaluate(0)
# the decl_buffer generated by IRBuilder
ir_actual = ib.get()
# the expected decl_buffer
buffer = T.buffer_decl((128, 128), "float32")
ir_expected = tir.Allocate(
buffer.data,
"float32",
(128, 128),
tir.IntImm("bool", True),
tir.DeclBuffer(buffer, tir.Evaluate(0)),
)
# Check if the generated ir is expected
assert_structural_equal(ir_actual, ir_expected, map_free_vars=True)
def test_ir_builder_tir_inline():
with IRBuilder() as ib:
m, n = T.meta_var(1), T.meta_var(2)
a, b = T.meta_var([3, 4])
T.evaluate(m.value + n.value + a.value + b.value)
# the evaluate generated by IRBuilder
eval_actual = ib.get()
# the expected evaluate
eval_expected = tir.Evaluate(10)
# Check if the generated ir is expected
assert_structural_equal(eval_actual, eval_expected, map_free_vars=True)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tvmscript_meta_programming.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm.script import tir as T
def matmul_generator(M: int, N: int, K: int, dtype: str):
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [M, K], dtype=dtype)
B = T.match_buffer(b, [N, K], dtype=dtype)
C = T.match_buffer(c, [M, N], dtype=dtype)
for i, j, k in T.grid(M, N, K):
with T.block():
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
return matmul
@T.prim_func
def matmul_128_128_128_fp16(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float16")
B = T.match_buffer(b, [128, 128], dtype="float16")
C = T.match_buffer(c, [128, 128], dtype="float16")
for i, j, k in T.grid(128, 128, 128):
with T.block():
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
def test_meta_programming_matmul():
f = matmul_generator(128, 128, 128, "float16")
tvm.ir.assert_structural_equal(f, matmul_128_128_128_fp16)
if __name__ == "__main__":
test_meta_programming_matmul()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tvmscript_ops.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm.script import tir as T
import numpy as np
import tvm.testing
@T.prim_func
def get_valid_counts(
data: T.handle,
valid_count: T.handle,
out: T.handle,
out_indices: T.handle,
score_threshold: T.float32,
id_index: T.int32,
score_index: T.int32,
) -> None:
data_buf = T.match_buffer(data, (1, 2500, 6), "float32")
valid_count_buf = T.match_buffer(valid_count, (1,), "int32")
out_buf = T.match_buffer(out, (1, 2500, 6), "float32")
out_indices_buf = T.match_buffer(out_indices, (1, 2500), "int32")
with T.block("init"):
vi = T.axis.S(1, 0)
valid_count_buf[vi] = T.int32(0)
for j in range(2500):
with T.block("update"):
vj = T.axis.S(2500, j)
T.reads([data_buf[vi, vj, 6]])
T.writes([valid_count_buf[vi], out_indices_buf[vi, vj], out_buf[vi, vj, 6]])
if (data_buf[vi, vj, score_index] > score_threshold) and (
(id_index < 0) or (data_buf[vi, vj, id_index] >= T.float32(0))
):
for k in T.serial(0, 6):
out_buf[vi, valid_count_buf[vi], k] = data_buf[vi, vj, k]
out_indices_buf[vi, valid_count_buf[vi]] = vj
valid_count_buf[vi] = valid_count_buf[vi] + 1
if vj >= valid_count_buf[vi]:
for k in T.serial(0, 6):
out_buf[vi, vj, k] = T.float32(-1)
out_indices_buf[vi, vj] = T.int32(-1)
def _check_get_valid_counts_with_numpy(f, dshape, score_threshold, id_index, score_index):
dtype = "float32"
ctx = tvm.cpu()
batch_size, num_anchor, elem_length = dshape
np_data = np.random.uniform(low=-2, high=2, size=dshape).astype(dtype)
np_out1 = np.zeros(shape=(batch_size,), dtype="int32")
np_out2 = np.zeros(shape=dshape).astype(dtype)
np_out3 = np.zeros(shape=(batch_size, num_anchor), dtype="int32")
for i in range(batch_size):
np_out1[i] = 0
inter_idx = 0
for j in range(num_anchor):
score = np_data[i, j, score_index]
if score > score_threshold and (id_index < 0 or np_data[i, j, id_index] >= 0):
for k in range(elem_length):
np_out2[i, inter_idx, k] = np_data[i, j, k]
np_out1[i] += 1
np_out3[i, inter_idx] = j
inter_idx += 1
if j >= np_out1[i]:
for k in range(elem_length):
np_out2[i, j, k] = -1.0
np_out3[i, j] = -1
in_data = tvm.nd.array(np_data, ctx)
out1 = tvm.nd.array(np_out1, ctx)
out2 = tvm.nd.array(np_out2, ctx)
out3 = tvm.nd.array(np_out3, ctx)
f(in_data, out1, out2, out3, score_threshold, id_index, score_index)
tvm.testing.assert_allclose(out1.numpy(), np_out1, rtol=1e-5)
tvm.testing.assert_allclose(out2.numpy(), np_out2, rtol=1e-5)
tvm.testing.assert_allclose(out3.numpy(), np_out3, rtol=1e-5)
print("test get_valid_counts end")
def test_get_valid_counts_script_func():
device = "llvm"
# check lowering
print(get_valid_counts.script())
mod = tvm.ir.IRModule({"get_valid_counts": get_valid_counts})
print(mod.script())
# check building
f = tvm.build(mod["get_valid_counts"], target=device)
_check_get_valid_counts_with_numpy(f, (1, 2500, 6), 0.0, 0, 1)
@T.prim_func
def alloc_zero_dim_buffer(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [], dtype="float32")
B = T.match_buffer(b, [], dtype="float32")
# body
# tir.with block("root")
C = T.alloc_buffer([], dtype="float32")
A[()] = T.float32(2)
C[()] = A[()] + B[()]
B[()] = C[()]
@T.prim_func
def alloc_zero_dim_buffer_block(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (), "float32")
B = T.match_buffer(b, (), "float32")
with T.block("root"):
T.reads([])
T.writes([])
C = T.alloc_buffer((), "float32")
A[()] = T.float32(2)
C[()] = A[()] + B[()]
B[()] = C[()]
def _check_alloc_zero_dim_buffer(f):
dtype = "float32"
ctx = tvm.cpu()
np_data = np.zeros(shape=()).astype(dtype)
np_out = np.zeros(shape=()).astype(dtype)
tvm_data = tvm.nd.array(np_data, ctx)
tvm_out = tvm.nd.array(np_out, ctx)
# np func exection
np_inter = np.array(1)
np_data[()] = 2.0
np_inter[()] = np_data[()] + np_out[()]
np_out[()] = np_inter[()]
# tvm func execution
f(tvm_data, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), np_out, rtol=1e-5)
def test_alloc_zero_dim_buffer_round_trip():
func = alloc_zero_dim_buffer
func_with_block = alloc_zero_dim_buffer_block
rt_func = tvm.script.from_source(func.script(show_meta=True))
rt_func_with_block = tvm.script.from_source(func_with_block.script(show_meta=True))
rt_mod = tvm.build(rt_func, "llvm")
rt_mod_with_block = tvm.build(rt_func_with_block, "llvm")
tvm.ir.assert_structural_equal(func, func_with_block)
tvm.ir.assert_structural_equal(rt_func, rt_func_with_block)
_check_alloc_zero_dim_buffer(rt_mod)
_check_alloc_zero_dim_buffer(rt_mod_with_block)
@T.prim_func
def ceildiv_test(A: T.Buffer[16, "int32"]):
for i in range(16):
A[i] = T.ceildiv(A[i], 4)
@tvm.testing.requires_llvm
def test_ceildiv():
f = tvm.build(ceildiv_test, "llvm")
a = tvm.nd.array(np.arange(16).astype("int32"))
f(a)
ref = (np.arange(16) + 3) // 4
tvm.testing.assert_allclose(a.numpy(), ref)
if __name__ == "__main__":
test_get_valid_counts_script_func()
test_alloc_zero_dim_buffer_round_trip()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tvmscript_parser_evaluator.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unittests for tvm.script.parser.evaluator"""
import pytest
import tvm.testing
from tvm.script.parser.core.diagnostics import Source
from tvm.script.parser.core.evaluator import ExprEvaluator
def _calc(expr, extra_vars=None):
if extra_vars is None:
extra_vars = {}
source = Source(expr)
mod_ast = source.as_ast()
mod_body_ast = mod_ast.body
expr_stmt_ast = mod_body_ast[0]
expr_ast = expr_stmt_ast.value
return ExprEvaluator.eval(None, extra_vars, expr_ast)
def test_evaluator_basic():
assert _calc("1, 3.14, True, 'str'") == (1, 3.14, True, "str")
def test_evaluator_op():
assert _calc("1 + 2, 1 - 2, 1 * 2, 1 / 2") == (3, -1, 2, 0.5)
def test_evaluator_value_table():
res = _calc("a + b, a - b, a * b, a / b", {"a": 1, "b": 2})
a, b = 1, 2
assert res == (a + b, a - b, a * b, a / b)
def test_evaluator_func_call():
def func(a, b):
return a + b, a - b, a * b, a / b
assert _calc("func(1, 2)", {"func": func}) == func(1, 2)
def test_evaluator_slice():
res = _calc("a, a[1:], a[:5], a[1: 5], a[1: 5: 2]", {"a": [1, 2, 3, 4, 5, 6]})
a = [1, 2, 3, 4, 5, 6]
assert res == (a, a[1:], a[:5], a[1:5], a[1:5:2])
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tvmscript_parser_ir.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unittests for tvm.script.parser.ir"""
import pytest
import inspect
import tvm.testing
from tvm.script.parser import ir_module
from tvm.ir import IRModule
def test_ir_base():
@ir_module
class BlankIRModule:
pass
assert isinstance(BlankIRModule, IRModule) and len(BlankIRModule.functions.items()) == 0
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tvmscript_parser_source.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unittests for tvm.script.parser.core"""
import pytest
import inspect
import tvm.testing
from tvm.script.parser.core.diagnostics import Source
from tvm.script.parser.core import doc_core as doc
from tvm.script import tir as T
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
def test_source_base():
source = Source(matmul)
assert (
source.source_name == inspect.getsourcefile(matmul)
and source.start_line is not None
and source.start_column == 0
and source.source == inspect.getsource(matmul)
and source.full_source == inspect.getsource(inspect.getmodule(matmul))
)
def test_source_ast():
source = Source(matmul)
mod = source.as_ast()
assert isinstance(mod, doc.Module)
func_def = mod.body[0]
assert isinstance(func_def, doc.FunctionDef)
assert func_def.name == "matmul"
func_args = func_def.args
assert (
len(func_args.args) == 3
and func_args.args[0].arg == "a"
and func_args.args[1].arg == "b"
and func_args.args[2].arg == "c"
)
func_body = func_def.body
assert len(func_body) == 4
func_assigns = func_body[:3]
assert (
isinstance(func_assigns[0], doc.Assign)
and func_assigns[0].targets[0].id == "A"
and isinstance(func_assigns[1], doc.Assign)
and func_assigns[1].targets[0].id == "B"
and isinstance(func_assigns[2], doc.Assign)
and func_assigns[2].targets[0].id == "C"
)
func_for = func_body[3]
assert (
len(func_for.target.elts) == 3
and func_for.target.elts[0].id == "i"
and func_for.target.elts[1].id == "j"
and func_for.target.elts[2].id == "k"
)
for_body = func_for.body
assert len(for_body) == 1
for_block = for_body[0]
assert isinstance(for_block, doc.With) and len(for_block.body) == 2
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tvmscript_parser_tir.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unittests for tvm.script.parser.tir"""
import pytest
import inspect
import tvm.testing
from tvm.script.parser import tir as T
from tvm import ir, tir
def test_tir_buffer_proxy():
buffer_0 = T.Buffer((128, 128), "float32")
assert (
isinstance(buffer_0, tir.Buffer)
and list(buffer_0.shape) == [128, 128]
and buffer_0.dtype == "float32"
)
buffer_1 = T.Buffer[(64, 64, 64), "int32"]
assert (
isinstance(buffer_1, tir.Buffer)
and list(buffer_1.shape) == [64, 64, 64]
and buffer_1.dtype == "int32"
)
def test_tir_ptr_proxy():
ptr_0 = T.Ptr("int32", "global")
assert (
isinstance(ptr_0, tir.Var)
and ptr_0.dtype == "handle"
and isinstance(ptr_0.type_annotation, ir.PointerType)
and ptr_0.type_annotation.element_type == ir.PrimType("int32")
and ptr_0.type_annotation.storage_scope == "global"
)
ptr_1 = T.Ptr["float32", "shared"]
assert (
isinstance(ptr_1, tir.Var)
and ptr_1.dtype == "handle"
and isinstance(ptr_1.type_annotation, ir.PointerType)
and ptr_1.type_annotation.element_type == ir.PrimType("float32")
and ptr_1.type_annotation.storage_scope == "shared"
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tvmscript_printer_doc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
In this test file, we want to make sure the Python code can construct
Doc objects, then access and modify their attributes correctly.
"""
import pytest
import tvm
from tvm.runtime import ObjectPath
from tvm.script.printer.doc import (
AssertDoc,
AssignDoc,
AttrAccessDoc,
CallDoc,
ClassDoc,
DictDoc,
ExprStmtDoc,
ForDoc,
FunctionDoc,
IdDoc,
IfDoc,
IndexDoc,
LambdaDoc,
ListDoc,
LiteralDoc,
OperationDoc,
OperationKind,
ReturnDoc,
ScopeDoc,
SliceDoc,
StmtBlockDoc,
TupleDoc,
WhileDoc,
)
@pytest.mark.parametrize(
"value",
[None, "test", 0, 1, -2, 0.0, 1.5, -1.3, True, False],
)
def test_literal_doc_construction(value):
doc = LiteralDoc(value)
if isinstance(value, float):
# FloatImm cannot be compared with Python's float directly
assert float(doc.value) == pytest.approx(value)
else:
assert doc.value == value
def test_id_doc():
doc = IdDoc("name")
assert doc.name == "name"
def test_attr_access_doc():
target = IdDoc("x")
doc = AttrAccessDoc(target, "attribute")
assert doc.value == target
assert doc.name == "attribute"
@pytest.mark.parametrize(
"indices",
[
[],
[LiteralDoc(1)],
[LiteralDoc(2), IdDoc("x")],
[SliceDoc(LiteralDoc(1), LiteralDoc(2))],
[SliceDoc(LiteralDoc(1)), IdDoc("y")],
],
)
def test_index_doc(indices):
target = IdDoc("x")
doc = IndexDoc(target, indices)
assert doc.value == target
assert list(doc.indices) == indices
@pytest.mark.parametrize(
"args, kwargs",
[
([], {}),
([LiteralDoc("arg")], {}),
([LiteralDoc("arg"), IdDoc("x")], {}),
([], {"x": LiteralDoc("x")}),
([], {"x": LiteralDoc("x"), "y": LiteralDoc("y")}),
([LiteralDoc("arg")], {"x": LiteralDoc("x"), "y": LiteralDoc("y")}),
([LiteralDoc("arg"), IdDoc("x")], {"x": LiteralDoc("x"), "y": LiteralDoc("y")}),
],
)
def test_call_doc(args, kwargs):
target = IdDoc("x")
doc = CallDoc(target, *args, **kwargs)
assert doc.callee == target
assert list(doc.args) == args
assert dict(zip(doc.kwargs_keys, doc.kwargs_values)) == kwargs
@pytest.mark.parametrize(
"operands",
[
[],
[LiteralDoc(1)],
[LiteralDoc(2), IdDoc("x")],
[LiteralDoc(2), IdDoc("x"), LiteralDoc("y")],
],
)
def test_operation_doc(operands):
# Here we just test the contructor and attr visitor of OperationDoc
# so the choice of OperationKind doesn't matter
operator = OperationKind.Add
doc = OperationDoc(OperationKind.Add, operands)
assert doc.kind == operator
assert list(doc.operands) == operands
@pytest.mark.parametrize(
"args",
[
[],
[IdDoc("x")],
[IdDoc("x"), IdDoc("y")],
],
)
def test_lambda_doc(args):
body = LiteralDoc(1)
doc = LambdaDoc(args, body)
assert doc.body == body
assert list(doc.args) == args
@pytest.mark.parametrize(
"elements",
[
[],
[IdDoc("x")],
[IdDoc("x"), IdDoc("y")],
],
)
def test_tuple_doc(elements):
doc = TupleDoc(elements)
assert list(doc.elements) == elements
@pytest.mark.parametrize(
"elements",
[
[],
[IdDoc("x")],
[IdDoc("x"), IdDoc("y")],
],
)
def test_list_doc(elements):
doc = ListDoc(elements)
assert list(doc.elements) == elements
@pytest.mark.parametrize(
"content",
[
{},
{LiteralDoc("k"): IdDoc("v")},
{LiteralDoc("k"): IdDoc("v"), LiteralDoc("k2"): IdDoc("v2")},
],
)
def test_dict_doc(content):
doc = DictDoc(content)
assert dict(zip(doc.keys, doc.values)) == content
@pytest.mark.parametrize("start", [LiteralDoc(1), None])
@pytest.mark.parametrize("stop", [LiteralDoc(2), None])
@pytest.mark.parametrize("step", [LiteralDoc(3), None])
def test_slice_doc(start, stop, step):
doc = SliceDoc(start, stop)
assert doc.start == start
assert doc.stop == stop
def test_expr_doc_attr_access():
target = IdDoc("x")
attr = "test"
doc = target.attr(attr)
assert doc.value == target
assert doc.name == attr
@pytest.mark.parametrize(
"indices",
[
(),
LiteralDoc(1),
SliceDoc(LiteralDoc(1), LiteralDoc(2)),
(LiteralDoc(1),),
(LiteralDoc(2), IdDoc("x")),
(SliceDoc(LiteralDoc(1), LiteralDoc(2)),),
(SliceDoc(LiteralDoc(1)), IdDoc("y")),
],
)
def test_expr_doc_get_item(indices):
target = IdDoc("x")
doc = target[indices]
assert doc.value == target
if not isinstance(indices, tuple):
indices = (indices,)
assert tuple(doc.indices) == indices
@pytest.mark.parametrize(
"args, kwargs",
[
([], {}),
([LiteralDoc("arg")], {}),
([LiteralDoc("arg"), IdDoc("x")], {}),
([], {"x": LiteralDoc("x")}),
([], {"x": LiteralDoc("x"), "y": LiteralDoc("y")}),
([LiteralDoc("arg")], {"x": LiteralDoc("x"), "y": LiteralDoc("y")}),
([LiteralDoc("arg"), IdDoc("x")], {"x": LiteralDoc("x"), "y": LiteralDoc("y")}),
],
)
def test_expr_doc_call_with(args, kwargs):
target = IdDoc("x")
doc = target.call(*args, **kwargs)
assert doc.callee == target
assert list(doc.args) == args
assert dict(zip(doc.kwargs_keys, doc.kwargs_values)) == kwargs
@pytest.mark.parametrize(
"stmts",
[
[],
[ExprStmtDoc(IdDoc("x"))],
[ExprStmtDoc(IdDoc("x")), ExprStmtDoc(IdDoc("y"))],
],
)
def test_stmt_block_doc(stmts):
doc = StmtBlockDoc(stmts)
assert list(doc.stmts) == stmts
@pytest.mark.parametrize(
"lhs, rhs, annotation",
[
(IdDoc("x"), IdDoc("y"), None),
(IdDoc("x"), None, IdDoc("int")),
(IdDoc("x"), IdDoc("y"), IdDoc("int")),
],
)
def test_assign_doc(lhs, rhs, annotation):
doc = AssignDoc(lhs, rhs, annotation)
assert doc.lhs == lhs
assert doc.rhs == rhs
assert doc.annotation == annotation
@pytest.mark.parametrize(
"lhs, rhs, annotation",
[
(IdDoc("x"), None, None),
(TupleDoc([IdDoc("x"), IdDoc("y")]), None, IdDoc("int")),
(TupleDoc([IdDoc("x"), IdDoc("y")]), IdDoc("u"), IdDoc("int")),
],
)
def test_invalid_assign_doc(lhs, rhs, annotation):
with pytest.raises(ValueError) as e:
AssignDoc(lhs, rhs, annotation)
assert "AssignDoc" in str(e.value)
@pytest.mark.parametrize(
"else_branch",
[
[],
[ExprStmtDoc(IdDoc("x"))],
[ExprStmtDoc(IdDoc("x")), ExprStmtDoc(IdDoc("y"))],
],
)
@pytest.mark.parametrize(
"then_branch",
[
[],
[ExprStmtDoc(IdDoc("x"))],
[ExprStmtDoc(IdDoc("x")), ExprStmtDoc(IdDoc("y"))],
],
)
def test_if_doc(then_branch, else_branch):
predicate = IdDoc("x")
if not then_branch and not else_branch:
with pytest.raises(ValueError) as e:
IfDoc(predicate, then_branch, else_branch)
assert "IfDoc" in str(e.value)
return
else:
doc = IfDoc(predicate, then_branch, else_branch)
assert doc.predicate == predicate
assert list(doc.then_branch) == then_branch
assert list(doc.else_branch) == else_branch
@pytest.mark.parametrize(
"body",
[
[],
[ExprStmtDoc(IdDoc("x"))],
[ExprStmtDoc(IdDoc("x")), ExprStmtDoc(IdDoc("y"))],
],
)
def test_while_doc(body):
predicate = IdDoc("x")
doc = WhileDoc(predicate, body)
assert doc.predicate == predicate
assert list(doc.body) == body
@pytest.mark.parametrize(
"body",
[
[],
[ExprStmtDoc(IdDoc("x"))],
[ExprStmtDoc(IdDoc("x")), ExprStmtDoc(IdDoc("y"))],
],
)
def test_for_doc(body):
lhs = IdDoc("x")
rhs = IdDoc("y")
doc = ForDoc(lhs, rhs, body)
assert doc.lhs == lhs
assert doc.rhs == rhs
assert list(doc.body) == body
@pytest.mark.parametrize(
"lhs",
[
None,
IdDoc("x"),
],
)
@pytest.mark.parametrize(
"body",
[
[],
[ExprStmtDoc(IdDoc("x"))],
[ExprStmtDoc(IdDoc("x")), ExprStmtDoc(IdDoc("y"))],
],
)
def test_scope_doc(lhs, body):
rhs = IdDoc("y")
doc = ScopeDoc(lhs, rhs, body)
assert doc.lhs == lhs
assert doc.rhs == rhs
assert list(doc.body) == body
def test_expr_stmt_doc():
expr = IdDoc("x")
doc = ExprStmtDoc(expr)
assert doc.expr == expr
@pytest.mark.parametrize(
"msg",
[
None,
LiteralDoc("msg"),
],
)
def test_assert_doc(msg):
test = IdDoc("x")
doc = AssertDoc(test, msg)
assert doc.test == test
assert doc.msg == msg
def test_return_doc():
value = IdDoc("x")
doc = ReturnDoc(value)
assert doc.value == value
@pytest.mark.parametrize(
"args",
[
[],
[AssignDoc(IdDoc("x"), None, IdDoc("int"))],
[
AssignDoc(IdDoc("x"), None, IdDoc("int")),
AssignDoc(IdDoc("y"), LiteralDoc(1), IdDoc("int")),
],
],
)
@pytest.mark.parametrize(
"decorators",
[
[],
[IdDoc("test")],
[IdDoc("test"), IdDoc("test2")],
],
)
@pytest.mark.parametrize(
"return_type",
[
None,
LiteralDoc(None),
],
)
@pytest.mark.parametrize(
"body",
[
[],
[ExprStmtDoc(IdDoc("x"))],
[ExprStmtDoc(IdDoc("x")), ExprStmtDoc(IdDoc("y"))],
],
)
def test_function_doc(args, decorators, return_type, body):
name = IdDoc("name")
doc = FunctionDoc(name, args, decorators, return_type, body)
assert doc.name == name
assert list(doc.args) == args
assert list(doc.decorators) == decorators
assert doc.return_type == return_type
assert list(doc.body) == body
@pytest.mark.parametrize(
"decorators",
[
[],
[IdDoc("test")],
[IdDoc("test"), IdDoc("test2")],
],
)
@pytest.mark.parametrize(
"body",
[
[],
[ExprStmtDoc(IdDoc("x"))],
[ExprStmtDoc(IdDoc("x")), ExprStmtDoc(IdDoc("y"))],
],
)
def test_class_doc(decorators, body):
name = IdDoc("name")
doc = ClassDoc(name, decorators, body)
assert doc.name == name
assert list(doc.decorators) == decorators
assert list(doc.body) == body
def test_stmt_doc_comment():
doc = ExprStmtDoc(IdDoc("x"))
assert doc.comment is None
comment = "test comment"
doc.comment = comment
# Make sure the previous statement doesn't set attribute
# as if it's an ordinary Python object.
assert "comment" not in doc.__dict__
assert doc.comment == comment
def test_doc_source_paths():
doc = IdDoc("x")
assert len(doc.source_paths) == 0
source_paths = [ObjectPath.root(), ObjectPath.root().attr("x")]
doc.source_paths = source_paths
# This should triggers the __getattr__ and gets a tvm.ir.container.Array
assert not isinstance(doc.source_paths, list)
assert list(doc.source_paths) == source_paths
doc.source_paths = []
assert len(doc.source_paths) == 0
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tvmscript_printer_entry_point.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from tvm.error import TVMError
from tvm.script.printer import script
from tvm.tir import FloatImm
def test_as_script_unknown_ir():
ir_node = FloatImm("float32", 1.0)
with pytest.raises(TVMError) as e:
script(ir_node, "test_xyz", {})
assert "test_xyz" in str(e.value)
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tvmscript_printer_frame.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tvm.script.printer.frame import MetadataFrame
def test_frame_add_callback():
frame = MetadataFrame()
flag = 0
def callback1():
nonlocal flag
flag += 1
def callback2():
nonlocal flag
flag += 5
frame.add_exit_callback(callback1)
with frame:
frame.add_exit_callback(callback2)
assert flag == 0
assert flag == 6
def test_frame_clear_callbacks_after_exit():
frame = MetadataFrame()
flag = 0
def callback():
nonlocal flag
flag += 1
frame.add_exit_callback(callback)
with frame:
pass
assert flag == 1
with frame:
pass
assert flag == 1
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tvmscript_printer_highlight.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm.script import tir as T
def test_highlight_script():
@tvm.script.ir_module
class Module:
@T.prim_func
def main( # type: ignore
a: T.handle,
b: T.handle,
c: T.handle,
) -> None: # pylint: disable=no-self-argument
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, [16, 128, 128])
B = T.match_buffer(b, [16, 128, 128])
C = T.match_buffer(c, [16, 128, 128])
for n, i, j, k in T.grid(16, 128, 128, 128):
with T.block("matmul"):
vn, vi, vj, vk = T.axis.remap("SSSR", [n, i, j, k])
with T.init():
C[vn, vi, vj] = 0.0 # type: ignore
C[vn, vi, vj] = C[vn, vi, vj] + A[vn, vi, vk] * B[vn, vj, vk]
Module.show()
Module["main"].show()
Module["main"].show(style="light")
Module["main"].show(style="dark")
Module["main"].show(style="ansi")
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tvmscript_printer_irdocsifier.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from tvm.runtime import ObjectPath
from tvm.script.printer.doc import IdDoc
from tvm.script.printer.frame import MetadataFrame, VarDefFrame
from tvm.script.printer.ir_docsifier import IRDocsifier, RootNodeContainer
from tvm.tir import Var
@pytest.fixture
def ir_docsifier():
"""
Creates an IRDocsifier instance with a special dispatch token.
"""
_ir_docsifier = IRDocsifier({})
with _ir_docsifier.dispatch_token(f"{__file__}"):
yield _ir_docsifier
def _get_id_doc_printer(id_name):
def printer(obj, object_path, ir_docsifier): # pylint: disable=unused-argument
return IdDoc(id_name)
return printer
def _root_dispatch_function(obj, ir_docsifier):
doc = ir_docsifier.as_doc(obj, ObjectPath.root())
doc.source_paths = [ObjectPath.root().attr("irdocsifier_test")]
return doc
# Because the dispatch table is global, tests should only set dispatch function under
# unique dispatch token.
IRDocsifier.set_dispatch(Var, _get_id_doc_printer("x"), f"{__file__}")
IRDocsifier.set_root_dispatch(f"{__file__}", _root_dispatch_function)
def test_set_dispatch(ir_docsifier):
IRDocsifier.set_dispatch(Var, _get_id_doc_printer("x2"), f"{__file__}-2")
with ir_docsifier.dispatch_token(f"{__file__}-2"):
doc = ir_docsifier.as_doc(Var("x", dtype="int8"), ObjectPath.root())
assert doc.name == "x2"
doc = ir_docsifier.as_doc(Var("x", dtype="int8"), ObjectPath.root())
assert doc.name == "x"
def test_set_root_dispatch(ir_docsifier):
doc = ir_docsifier.as_doc(RootNodeContainer(Var("x", dtype="int8")), ObjectPath.root())
assert ObjectPath.root().attr("irdocsifier_test") in doc.source_paths
def test_as_doc(ir_docsifier):
object_path = ObjectPath.root()
doc = ir_docsifier.as_doc(Var("x", "int8"), ObjectPath.root())
assert doc.name == "x"
assert list(doc.source_paths) == [object_path]
def test_with_dispatch_token(ir_docsifier):
initial_token_count = len(ir_docsifier.dispatch_tokens)
with ir_docsifier.dispatch_token("tir"):
assert len(ir_docsifier.dispatch_tokens) == initial_token_count + 1
assert len(ir_docsifier.dispatch_tokens) == initial_token_count
def test_with_frame(ir_docsifier):
initial_frame_count = len(ir_docsifier.frames)
frame = VarDefFrame()
is_callback_called = False
def callback():
nonlocal is_callback_called
is_callback_called = True
frame.add_exit_callback(callback)
with ir_docsifier.frame(frame):
assert len(ir_docsifier.frames) == initial_frame_count + 1
assert not is_callback_called
assert len(ir_docsifier.frames) == initial_frame_count
assert is_callback_called
def test_get_frame(ir_docsifier):
with ir_docsifier.frame(VarDefFrame()) as frame_a:
assert ir_docsifier.get_frame(MetadataFrame) is None
assert ir_docsifier.get_frame(VarDefFrame) == frame_a
with ir_docsifier.frame(VarDefFrame()) as frame_b:
assert ir_docsifier.get_frame(MetadataFrame) is None
assert ir_docsifier.get_frame(VarDefFrame) == frame_b
with ir_docsifier.frame(MetadataFrame()) as frame_c:
assert ir_docsifier.get_frame(MetadataFrame) == frame_c
assert ir_docsifier.get_frame(VarDefFrame) == frame_b
assert ir_docsifier.get_frame(MetadataFrame) is None
assert ir_docsifier.get_frame(VarDefFrame) == frame_b
assert ir_docsifier.get_frame(MetadataFrame) is None
assert ir_docsifier.get_frame(VarDefFrame) == frame_a
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tvmscript_printer_python_doc_printer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import itertools
import pytest
import tvm
from tvm.script.printer.doc import (
AssertDoc,
AssignDoc,
CallDoc,
ClassDoc,
DictDoc,
ExprStmtDoc,
ForDoc,
FunctionDoc,
IdDoc,
IfDoc,
LambdaDoc,
ListDoc,
LiteralDoc,
OperationDoc,
OperationKind,
ReturnDoc,
ScopeDoc,
SliceDoc,
StmtBlockDoc,
TupleDoc,
WhileDoc,
)
from tvm.script.printer.doc_printer import to_python_script
def format_script(s: str) -> str:
"""
Remove leading and trailing blank lines, and make the minimum idention 0
"""
s = s.strip("\n")
non_empty_lines = [line for line in s.splitlines() if line and not line.isspace()]
if not non_empty_lines:
# no actual content
return "\n"
line_indents = [len(line) - len(line.lstrip(" ")) for line in non_empty_lines]
spaces_to_remove = min(line_indents)
cleaned_lines = "\n".join(line[spaces_to_remove:] for line in s.splitlines())
if not cleaned_lines.endswith("\n"):
cleaned_lines += "\n"
return cleaned_lines
@pytest.mark.parametrize(
"doc,expected",
[
(LiteralDoc(None), "None"),
(LiteralDoc(True), "True"),
(LiteralDoc(False), "False"),
(LiteralDoc("test"), '"test"'),
(LiteralDoc(""), '""'),
(LiteralDoc('""'), r'"\"\""'),
(LiteralDoc("\n\t\\test\r"), r'"\n\t\\test\r"'),
# TODO: fix the roundatrippable problem caused by utf8
pytest.param(LiteralDoc("\x88"), r'"\x88"', marks=pytest.mark.xfail),
(LiteralDoc(0), "0"),
(LiteralDoc(-1), "-1"),
(LiteralDoc(3.25), "3.25"),
(LiteralDoc(-0.5), "-0.5"),
],
ids=itertools.count(),
)
def test_print_literal_doc(doc, expected):
assert to_python_script(doc) == format_script(expected)
@pytest.mark.parametrize(
"name",
[
"test",
"_test",
"TestCase",
"test_case",
"test123",
],
ids=itertools.count(),
)
def test_print_id_doc(name):
doc = IdDoc(name)
assert to_python_script(doc) == format_script(name)
@pytest.mark.parametrize(
"attr",
[
"attr",
"_attr",
"Attr",
"attr_1",
],
ids=itertools.count(),
)
def test_print_attr_doc(attr):
doc = IdDoc("x").attr(attr)
assert to_python_script(doc) == format_script(f"x.{attr}")
@pytest.mark.parametrize(
"indices, expected",
[
(
(),
"[()]",
),
(
(LiteralDoc(1),),
"[1]",
),
(
(LiteralDoc(2), IdDoc("x")),
"[2, x]",
),
(
(SliceDoc(LiteralDoc(1), LiteralDoc(2)),),
"[1:2]",
),
(
(SliceDoc(LiteralDoc(1)), IdDoc("y")),
"[1:, y]",
),
(
(SliceDoc(), IdDoc("y")),
"[:, y]",
),
(
(IdDoc("x"), IdDoc("y"), IdDoc("z")),
"[x, y, z]",
),
],
ids=itertools.count(),
)
def test_print_index_doc(indices, expected):
doc = IdDoc("x")[indices]
assert to_python_script(doc) == format_script(f"x{expected}")
UNARY_OP_TOKENS = {
OperationKind.USub: "-",
OperationKind.Invert: "~",
OperationKind.Not: "not ",
}
@pytest.mark.parametrize(
"op_kind, expected_token",
list(UNARY_OP_TOKENS.items()),
ids=UNARY_OP_TOKENS.keys(),
)
def test_print_unary_operation_doc(op_kind, expected_token):
doc = OperationDoc(op_kind, [IdDoc("x")])
assert to_python_script(doc) == format_script(f"{expected_token}x")
BINARY_OP_TOKENS = {
OperationKind.Add: "+",
OperationKind.Sub: "-",
OperationKind.Mult: "*",
OperationKind.Div: "/",
OperationKind.FloorDiv: "//",
OperationKind.Mod: "%",
OperationKind.Pow: "**",
OperationKind.LShift: "<<",
OperationKind.RShift: ">>",
OperationKind.BitAnd: "&",
OperationKind.BitOr: "|",
OperationKind.BitXor: "^",
OperationKind.Lt: "<",
OperationKind.LtE: "<=",
OperationKind.Eq: "==",
OperationKind.NotEq: "!=",
OperationKind.Gt: ">",
OperationKind.GtE: ">=",
OperationKind.And: "and",
OperationKind.Or: "or",
}
@pytest.mark.parametrize(
"op_kind, expected_token",
list(BINARY_OP_TOKENS.items()),
ids=BINARY_OP_TOKENS.keys(),
)
def test_print_binary_operation_doc(op_kind, expected_token):
doc = OperationDoc(op_kind, [IdDoc("x"), IdDoc("y")])
assert to_python_script(doc) == format_script(f"x {expected_token} y")
SPECIAL_OP_CASES = [
(
OperationKind.IfThenElse,
[LiteralDoc(True), LiteralDoc("true"), LiteralDoc("false")],
'"true" if True else "false"',
),
(
OperationKind.IfThenElse,
[IdDoc("x"), LiteralDoc(None), LiteralDoc(1)],
"None if x else 1",
),
]
@pytest.mark.parametrize(
"op_kind, operands, expected", SPECIAL_OP_CASES, ids=[kind for (kind, *_) in SPECIAL_OP_CASES]
)
def test_print_special_operation_doc(op_kind, operands, expected):
doc = OperationDoc(op_kind, operands)
assert to_python_script(doc) == format_script(expected)
def test_operation_doc_test_exhaustive():
special_op_covered = {k for k, *_ in SPECIAL_OP_CASES}
for op_kind in OperationKind:
if OperationKind._UnaryStart < op_kind < OperationKind._UnaryEnd:
assert op_kind in UNARY_OP_TOKENS, (
f"{op_kind.name} not covered in test_print_unary_operation_doc. "
f"Please add the expected token to UNARY_OP_TOKENS"
)
elif OperationKind._BinaryStart < op_kind < OperationKind._BinaryEnd:
assert op_kind in BINARY_OP_TOKENS, (
f"{op_kind.name} not covered in test_print_binary_operation_doc. "
f"Please add the expected token to BINARY_OP_TOKENS"
)
elif not op_kind.name.startswith("_"):
# Special Op
assert op_kind in special_op_covered, (
f"{op_kind.name} not covered in test_print_special_operation_doc. "
f"Please add the test cases for it to SPECIAL_OP_CASES"
)
@pytest.mark.parametrize(
"args, kwargs, expected",
[
(
(),
{},
"()",
),
(
(),
{"key0": IdDoc("u")},
"(key0=u)",
),
(
(),
{"key0": IdDoc("u"), "key1": IdDoc("v")},
"(key0=u, key1=v)",
),
(
(IdDoc("x"),),
{},
"(x)",
),
(
(IdDoc("x"),),
{"key0": IdDoc("u")},
"(x, key0=u)",
),
(
(IdDoc("x"),),
{"key0": IdDoc("u"), "key1": IdDoc("v")},
"(x, key0=u, key1=v)",
),
(
(IdDoc("x"), (IdDoc("y"))),
{},
"(x, y)",
),
(
(IdDoc("x"), (IdDoc("y"))),
{"key0": IdDoc("u")},
"(x, y, key0=u)",
),
(
(IdDoc("x"), (IdDoc("y"))),
{"key0": IdDoc("u"), "key1": IdDoc("v")},
"(x, y, key0=u, key1=v)",
),
],
ids=itertools.count(),
)
def test_print_call_doc(args, kwargs, expected):
doc = CallDoc(IdDoc("f"), *args, **kwargs)
assert to_python_script(doc) == format_script(f"f{expected}")
@pytest.mark.parametrize(
"args, expected",
[
(
(),
"lambda : 0",
),
(
(IdDoc("x"),),
"lambda x: 0",
),
(
(IdDoc("x"), IdDoc("y")),
"lambda x, y: 0",
),
(
(IdDoc("x"), IdDoc("y"), IdDoc("z")),
"lambda x, y, z: 0",
),
],
ids=itertools.count(),
)
def test_print_lambda_doc(args, expected):
doc = LambdaDoc(args, body=LiteralDoc(0))
assert to_python_script(doc) == format_script(expected)
@pytest.mark.parametrize(
"elements, expected",
[
(
(),
"[]",
),
(
[IdDoc("x")],
"[x]",
),
(
[IdDoc("x"), IdDoc("y")],
"[x, y]",
),
(
[IdDoc("x"), IdDoc("y"), IdDoc("z")],
"[x, y, z]",
),
],
ids=itertools.count(),
)
def test_print_list_doc(elements, expected):
doc = ListDoc(elements)
assert to_python_script(doc) == format_script(expected)
@pytest.mark.parametrize(
"elements, expected",
[
(
(),
"()",
),
(
[IdDoc("x")],
"(x,)",
),
(
[IdDoc("x"), IdDoc("y")],
"(x, y)",
),
(
[IdDoc("x"), IdDoc("y"), IdDoc("z")],
"(x, y, z)",
),
],
ids=itertools.count(),
)
def test_print_tuple_doc(elements, expected):
doc = TupleDoc(elements)
assert to_python_script(doc) == format_script(expected)
@pytest.mark.parametrize(
"content, expected",
[
(
{},
"{}",
),
(
{LiteralDoc("key_x"): IdDoc("x")},
'{"key_x": x}',
),
(
{LiteralDoc("key_x"): IdDoc("x"), LiteralDoc("key_y"): IdDoc("y")},
'{"key_x": x, "key_y": y}',
),
(
{
LiteralDoc("key_x"): IdDoc("x"),
LiteralDoc("key_y"): IdDoc("y"),
LiteralDoc("key_z"): IdDoc("z"),
},
'{"key_x": x, "key_y": y, "key_z": z}',
),
],
ids=itertools.count(),
)
def test_print_dict_doc(content, expected):
doc = DictDoc(content)
assert to_python_script(doc) == format_script(expected)
@pytest.mark.parametrize(
"slice_doc, expected",
[
(
SliceDoc(),
":",
),
(
SliceDoc(LiteralDoc(1)),
"1:",
),
(
SliceDoc(None, LiteralDoc(2)),
":2",
),
(
SliceDoc(LiteralDoc(1), LiteralDoc(2)),
"1:2",
),
(
SliceDoc(None, None, LiteralDoc(3)),
"::3",
),
(
SliceDoc(LiteralDoc(1), None, LiteralDoc(3)),
"1::3",
),
(
SliceDoc(None, LiteralDoc(2), LiteralDoc(3)),
":2:3",
),
(
SliceDoc(LiteralDoc(1), LiteralDoc(2), LiteralDoc(3)),
"1:2:3",
),
],
ids=itertools.count(),
)
def test_print_slice_doc(slice_doc, expected):
doc = IdDoc("x")[slice_doc]
assert to_python_script(doc) == format_script(f"x[{expected}]")
@pytest.mark.parametrize(
"stmts, expected",
[
(
[],
"",
),
(
[ExprStmtDoc(IdDoc("x"))],
"x",
),
(
[ExprStmtDoc(IdDoc("x")), ExprStmtDoc(IdDoc("y"))],
"""
x
y
""",
),
],
ids=itertools.count(),
)
def test_print_stmt_block_doc(stmts, expected):
doc = StmtBlockDoc(stmts)
assert to_python_script(doc).strip() == format_script(expected).strip()
@pytest.mark.parametrize(
"doc, expected",
[
(
AssignDoc(IdDoc("x"), IdDoc("y"), None),
"x = y",
),
(
AssignDoc(IdDoc("x"), IdDoc("y"), IdDoc("int")),
"x: int = y",
),
(
AssignDoc(IdDoc("x"), None, IdDoc("int")),
"x: int",
),
(
AssignDoc(TupleDoc([IdDoc("x"), IdDoc("y")]), IdDoc("z"), None),
"x, y = z",
),
(
AssignDoc(TupleDoc([IdDoc("x"), TupleDoc([IdDoc("y"), IdDoc("z")])]), IdDoc("z"), None),
"x, (y, z) = z",
),
],
ids=itertools.count(),
)
def test_print_assign_doc(doc, expected):
assert to_python_script(doc) == format_script(expected)
@pytest.mark.parametrize(
"then_branch, else_branch, expected",
[
(
[ExprStmtDoc(IdDoc("x"))],
[],
"""
if pred:
x
""",
),
(
[],
[ExprStmtDoc(IdDoc("y"))],
"""
if pred:
pass
else:
y
""",
),
(
[ExprStmtDoc(IdDoc("x"))],
[ExprStmtDoc(IdDoc("y"))],
"""
if pred:
x
else:
y
""",
),
],
ids=itertools.count(),
)
def test_print_if_doc(then_branch, else_branch, expected):
doc = IfDoc(IdDoc("pred"), then_branch, else_branch)
assert to_python_script(doc) == format_script(expected)
@pytest.mark.parametrize(
"body, expected",
[
(
[ExprStmtDoc(IdDoc("x"))],
"""
while pred:
x
""",
),
(
[],
"""
while pred:
pass
""",
),
],
ids=itertools.count(),
)
def test_print_while_doc(body, expected):
doc = WhileDoc(IdDoc("pred"), body)
assert to_python_script(doc) == format_script(expected)
@pytest.mark.parametrize(
"body, expected",
[
(
[ExprStmtDoc(IdDoc("x"))],
"""
for x in y:
x
""",
),
(
[],
"""
for x in y:
pass
""",
),
],
ids=itertools.count(),
)
def test_print_for_doc(body, expected):
doc = ForDoc(IdDoc("x"), IdDoc("y"), body)
assert to_python_script(doc) == format_script(expected)
@pytest.mark.parametrize(
"lhs, body, expected",
[
(
IdDoc("c"),
[ExprStmtDoc(IdDoc("x"))],
"""
with context() as c:
x
""",
),
(
IdDoc("c"),
[],
"""
with context() as c:
pass
""",
),
(
None,
[],
"""
with context():
pass
""",
),
(
None,
[ExprStmtDoc(IdDoc("x"))],
"""
with context():
x
""",
),
],
ids=itertools.count(),
)
def test_print_scope_doc(lhs, body, expected):
doc = ScopeDoc(lhs, CallDoc(IdDoc("context")), body)
assert to_python_script(doc) == format_script(expected)
def test_print_expr_stmt_doc():
doc = ExprStmtDoc(CallDoc(IdDoc("f"), IdDoc("x")))
assert to_python_script(doc) == format_script("f(x)")
@pytest.mark.parametrize(
"msg, expected",
[
(
None,
"""
assert True
""",
),
(
LiteralDoc("test message"),
"""
assert True, "test message"
""",
),
],
ids=itertools.count(),
)
def test_print_assert_doc(msg, expected):
test = LiteralDoc(True)
doc = AssertDoc(test, msg)
assert to_python_script(doc) == format_script(expected)
@pytest.mark.parametrize(
"value, expected",
[
(
LiteralDoc(None),
"""
return None
""",
),
(
IdDoc("x"),
"""
return x
""",
),
],
ids=itertools.count(),
)
def test_print_return_doc(value, expected):
doc = ReturnDoc(value)
assert to_python_script(doc) == format_script(expected)
@pytest.mark.parametrize(
"args, decorators, return_type, body, expected",
[
(
[],
[],
None,
[],
"""
def func():
pass
""",
),
(
[AssignDoc(IdDoc("x"), rhs=None, annotation=IdDoc("int"))],
[],
IdDoc("int"),
[],
"""
def func(x: int) -> int:
pass
""",
),
(
[AssignDoc(IdDoc("x"), rhs=LiteralDoc(1), annotation=IdDoc("int"))],
[],
LiteralDoc(None),
[],
"""
def func(x: int = 1) -> None:
pass
""",
),
(
[],
[IdDoc("wrap")],
LiteralDoc(None),
[],
"""
@wrap
def func() -> None:
pass
""",
),
(
[],
[IdDoc("wrap_outter"), IdDoc("wrap_inner")],
LiteralDoc(None),
[],
"""
@wrap_outter
@wrap_inner
def func() -> None:
pass
""",
),
(
[
AssignDoc(IdDoc("x"), rhs=None, annotation=IdDoc("int")),
AssignDoc(IdDoc("y"), rhs=LiteralDoc(1), annotation=IdDoc("int")),
],
[IdDoc("wrap")],
LiteralDoc(None),
[],
"""
@wrap
def func(x: int, y: int = 1) -> None:
pass
""",
),
(
[
AssignDoc(IdDoc("x"), rhs=None, annotation=IdDoc("int")),
AssignDoc(IdDoc("y"), rhs=LiteralDoc(1), annotation=IdDoc("int")),
],
[IdDoc("wrap")],
LiteralDoc(None),
[
AssignDoc(IdDoc("y"), OperationDoc(OperationKind.Add, [IdDoc("x"), LiteralDoc(1)])),
AssignDoc(IdDoc("y"), OperationDoc(OperationKind.Sub, [IdDoc("y"), LiteralDoc(1)])),
],
"""
@wrap
def func(x: int, y: int = 1) -> None:
y = x + 1
y = y - 1
""",
),
],
ids=itertools.count(),
)
def test_print_function_doc(args, decorators, body, return_type, expected):
doc = FunctionDoc(IdDoc("func"), args, decorators, return_type, body)
assert to_python_script(doc) == format_script(expected) # test
def get_func_doc_for_class(name):
args = [
AssignDoc(IdDoc("x"), rhs=None, annotation=IdDoc("int")),
AssignDoc(IdDoc("y"), rhs=LiteralDoc(1), annotation=IdDoc("int")),
]
body = [
AssignDoc(IdDoc("y"), OperationDoc(OperationKind.Add, [IdDoc("x"), LiteralDoc(1)])),
AssignDoc(IdDoc("y"), OperationDoc(OperationKind.Sub, [IdDoc("y"), LiteralDoc(1)])),
]
return FunctionDoc(
name=IdDoc(name),
args=args,
decorators=[IdDoc("wrap")],
return_type=LiteralDoc(None),
body=body,
)
@pytest.mark.parametrize(
"decorators, body, expected",
[
(
[],
[],
"""
class TestClass:
pass
""",
),
(
[IdDoc("wrap")],
[],
"""
@wrap
class TestClass:
pass
""",
),
(
[IdDoc("wrap_outter"), IdDoc("wrap_inner")],
[],
"""
@wrap_outter
@wrap_inner
class TestClass:
pass
""",
),
(
[IdDoc("wrap")],
[get_func_doc_for_class("f1")],
"""
@wrap
class TestClass:
@wrap
def f1(x: int, y: int = 1) -> None:
y = x + 1
y = y - 1
""",
),
(
[IdDoc("wrap")],
[get_func_doc_for_class("f1"), get_func_doc_for_class("f2")],
"""
@wrap
class TestClass:
@wrap
def f1(x: int, y: int = 1) -> None:
y = x + 1
y = y - 1
@wrap
def f2(x: int, y: int = 1) -> None:
y = x + 1
y = y - 1
""",
),
],
ids=itertools.count(),
)
def test_print_class_doc(decorators, body, expected):
doc = ClassDoc(IdDoc("TestClass"), decorators, body)
assert to_python_script(doc) == format_script(expected)
@pytest.mark.parametrize(
"doc, comment, expected",
[
(
AssignDoc(IdDoc("x"), IdDoc("y"), IdDoc("int")),
"comment",
"""
x: int = y # comment
""",
),
(
IfDoc(IdDoc("x"), [ExprStmtDoc(IdDoc("y"))], [ExprStmtDoc(IdDoc("z"))]),
"comment",
"""
# comment
if x:
y
else:
z
""",
),
(
IfDoc(IdDoc("x"), [ExprStmtDoc(IdDoc("y"))], [ExprStmtDoc(IdDoc("z"))]),
"comment line 1\ncomment line 2",
"""
# comment line 1
# comment line 2
if x:
y
else:
z
""",
),
(
WhileDoc(
LiteralDoc(True),
[
AssignDoc(IdDoc("x"), IdDoc("y")),
],
),
"comment",
"""
# comment
while True:
x = y
""",
),
(
ForDoc(IdDoc("x"), IdDoc("y"), []),
"comment",
"""
# comment
for x in y:
pass
""",
),
(
ScopeDoc(IdDoc("x"), IdDoc("y"), []),
"comment",
"""
# comment
with y as x:
pass
""",
),
(
ExprStmtDoc(IdDoc("x")),
"comment",
"""
x # comment
""",
),
(
AssertDoc(LiteralDoc(True)),
"comment",
"""
assert True # comment
""",
),
(
ReturnDoc(LiteralDoc(1)),
"comment",
"""
return 1 # comment
""",
),
(
get_func_doc_for_class("f"),
"comment",
'''
@wrap
def f(x: int, y: int = 1) -> None:
"""
comment
"""
y = x + 1
y = y - 1
''',
),
(
get_func_doc_for_class("f"),
"comment line 1\n\ncomment line 3",
'''
@wrap
def f(x: int, y: int = 1) -> None:
"""
comment line 1
comment line 3
"""
y = x + 1
y = y - 1
''',
),
(
ClassDoc(IdDoc("TestClass"), decorators=[IdDoc("wrap")], body=[]),
"comment",
'''
@wrap
class TestClass:
"""
comment
"""
pass
''',
),
(
ClassDoc(IdDoc("TestClass"), decorators=[IdDoc("wrap")], body=[]),
"comment line 1\n\ncomment line 3",
'''
@wrap
class TestClass:
"""
comment line 1
comment line 3
"""
pass
''',
),
],
ids=itertools.count(),
)
def test_print_doc_comment(doc, comment, expected):
doc.comment = comment
assert to_python_script(doc) == format_script(expected)
@pytest.mark.parametrize(
"doc",
[
AssignDoc(IdDoc("x"), IdDoc("y"), IdDoc("int")),
ExprStmtDoc(IdDoc("x")),
AssertDoc(IdDoc("x")),
ReturnDoc(IdDoc("x")),
],
)
def test_print_invalid_multiline_doc_comment(doc):
doc.comment = "1\n2"
with pytest.raises(ValueError) as e:
to_python_script(doc)
assert "cannot have newline" in str(e.value)
def generate_expr_precedence_test_cases():
x = IdDoc("x")
y = IdDoc("y")
z = IdDoc("z")
def negative(a):
return OperationDoc(OperationKind.USub, [a])
def invert(a):
return OperationDoc(OperationKind.Invert, [a])
def not_(a):
return OperationDoc(OperationKind.Not, [a])
def add(a, b):
return OperationDoc(OperationKind.Add, [a, b])
def sub(a, b):
return OperationDoc(OperationKind.Sub, [a, b])
def mult(a, b):
return OperationDoc(OperationKind.Mult, [a, b])
def div(a, b):
return OperationDoc(OperationKind.Div, [a, b])
def mod(a, b):
return OperationDoc(OperationKind.Mod, [a, b])
def pow(a, b):
return OperationDoc(OperationKind.Pow, [a, b])
def lshift(a, b):
return OperationDoc(OperationKind.LShift, [a, b])
def bit_and(a, b):
return OperationDoc(OperationKind.BitAnd, [a, b])
def bit_or(a, b):
return OperationDoc(OperationKind.BitOr, [a, b])
def bit_xor(a, b):
return OperationDoc(OperationKind.BitXor, [a, b])
def lt(a, b):
return OperationDoc(OperationKind.Lt, [a, b])
def eq(a, b):
return OperationDoc(OperationKind.Eq, [a, b])
def not_eq(a, b):
return OperationDoc(OperationKind.NotEq, [a, b])
def and_(a, b):
return OperationDoc(OperationKind.And, [a, b])
def or_(a, b):
return OperationDoc(OperationKind.Or, [a, b])
def if_then_else(a, b, c):
return OperationDoc(OperationKind.IfThenElse, [a, b, c])
test_cases = {
"attr-call-index": [
(
add(x, y).attr("test"),
"(x + y).test",
),
(
add(x, y.attr("test")),
"x + y.test",
),
(
x[z].call(y),
"x[z](y)",
),
(
x.call(y)[z],
"x(y)[z]",
),
(
x.call(y).call(z),
"x(y)(z)",
),
(
x.call(y).attr("test"),
"x(y).test",
),
(
x.attr("test").call(y),
"x.test(y)",
),
(
x.attr("test").attr("test2"),
"x.test.test2",
),
(
LambdaDoc([x], x).call(y),
"(lambda x: x)(y)",
),
(
add(x, y)[z][add(z, z)].attr("name"),
"(x + y)[z][z + z].name",
),
],
"power": [
(
pow(pow(x, y), z),
"(x ** y) ** z",
),
(
pow(x, pow(y, z)),
"x ** y ** z",
),
(
pow(negative(x), negative(y)),
"(-x) ** -y",
),
(
pow(add(x, y), add(y, z)),
"(x + y) ** (y + z)",
),
],
"unary": [
(
invert(negative(y)),
"~-y",
),
(
negative(y).attr("test"),
"(-y).test",
),
(
negative(y.attr("test")),
"-y.test",
),
(
mult(negative(x), negative(y)),
"-x * -y",
),
(
negative(add(invert(x), negative(y))),
"-(~x + -y)",
),
],
"add-mult": [
(
mult(x, mult(y, z)),
"x * (y * z)",
),
(
mult(mult(x, y), z),
"x * y * z",
),
(
mult(x, add(y, z)),
"x * (y + z)",
),
(
mult(add(y, z), x),
"(y + z) * x",
),
(
add(x, mod(y, z)),
"x + y % z",
),
(
add(mult(y, z), x),
"y * z + x",
),
(
add(add(x, y), add(y, z)),
"x + y + (y + z)",
),
(
div(add(x, y), add(y, z)),
"(x + y) / (y + z)",
),
],
"shift": [
(
div(x, lshift(y, z)),
"x / (y << z)",
),
(
mult(lshift(y, z), x),
"(y << z) * x",
),
(
lshift(x, mult(y, z)),
"x << y * z",
),
(
lshift(mult(x, y), z),
"x * y << z",
),
(
lshift(mult(x, y), z),
"x * y << z",
),
(
lshift(lshift(x, y), z),
"x << y << z",
),
(
lshift(x, lshift(y, z)),
"x << (y << z)",
),
],
"bitwise": [
(
add(bit_or(x, y), bit_or(y, z)),
"(x | y) + (y | z)",
),
(
bit_and(bit_or(x, y), bit_or(y, z)),
"(x | y) & (y | z)",
),
(
bit_or(bit_and(x, y), bit_and(y, z)),
"x & y | y & z",
),
(
bit_and(bit_xor(x, bit_or(y, z)), z),
"(x ^ (y | z)) & z",
),
],
"comparison": [
(
not_eq(add(x, y), z),
"x + y != z",
),
(
eq(pow(x, y), z),
"x ** y == z",
),
(
lt(x, div(y, z)),
"x < y / z",
),
(
lt(x, if_then_else(y, y, y)),
"x < (y if y else y)",
),
],
"boolean": [
(
not_(and_(x, y)),
"not (x and y)",
),
(
and_(not_(x), y),
"not x and y",
),
(
and_(or_(x, y), z),
"(x or y) and z",
),
(
or_(x, or_(y, z)),
"x or (y or z)",
),
(
or_(or_(x, y), z),
"x or y or z",
),
(
or_(and_(x, y), z),
# Maybe we should consider adding parentheses here
# for readability, even though it's not necessary.
"x and y or z",
),
(
and_(or_(not_(x), y), z),
"(not x or y) and z",
),
(
and_(lt(x, y), lt(y, z)),
"x < y and y < z",
),
(
or_(not_(eq(x, y)), lt(y, z)),
# Same as the previous one, the code here is not
# readable without parentheses.
"not x == y or y < z",
),
(
and_(if_then_else(x, y, z), x),
"(y if x else z) and x",
),
(
not_(if_then_else(x, y, z)),
"not (y if x else z)",
),
],
"if-then-else": [
(
if_then_else(x, if_then_else(y, y, y), z),
"y if y else y if x else z",
),
(
if_then_else(if_then_else(x, x, x), y, z),
"y if (x if x else x) else z",
),
(
if_then_else(x, y, if_then_else(z, z, z)),
"y if x else (z if z else z)",
),
(
if_then_else(lt(x, x), add(y, y), mult(z, z)),
"y + y if x < x else z * z",
),
(
if_then_else(LambdaDoc([x], x), LambdaDoc([y], y), LambdaDoc([z], z)),
"(lambda y: y) if (lambda x: x) else (lambda z: z)",
),
],
"lambda": [
(
LambdaDoc([x, y], add(z, z)),
"lambda x, y: z + z",
),
(
add(LambdaDoc([x, y], z), z),
"(lambda x, y: z) + z",
),
(
LambdaDoc([x, y], add(z, z)).call(x, y),
"(lambda x, y: z + z)(x, y)",
),
(
LambdaDoc([x], LambdaDoc([y], z)),
"lambda x: lambda y: z",
),
],
}
return [
pytest.param(*args, id=f"{group_name}-{i}")
for group_name, cases in test_cases.items()
for i, args in enumerate(cases)
]
@pytest.mark.parametrize("doc, expected", generate_expr_precedence_test_cases())
def test_expr_precedence(doc, expected):
assert to_python_script(doc) == format_script(expected)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tvmscript_printer_underlining.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Optional
import pytest
from tvm.runtime import ObjectPath
from tvm.script.printer.doc import (
StmtBlockDoc,
ExprStmtDoc,
IdDoc,
OperationDoc,
OperationKind,
)
from tvm.script.printer.doc_printer import to_python_script
def make_path(name: str) -> ObjectPath:
return ObjectPath.root().attr(name)
def make_id_doc(name: str, path_name: Optional[str] = None) -> IdDoc:
if path_name is None:
path_name = name
doc = IdDoc(name)
doc.source_paths = [make_path(path_name)]
return doc
def format_script(s: str) -> str:
"""
Remove leading and trailing blank lines, and make the minimum idention 0
"""
s = s.strip("\n")
non_empty_lines = [line for line in s.splitlines() if line and not line.isspace()]
if not non_empty_lines:
# no actual content
return "\n"
line_indents = [len(line) - len(line.lstrip(" ")) for line in non_empty_lines]
spaces_to_remove = min(line_indents)
cleaned_lines = "\n".join(line[spaces_to_remove:] for line in s.splitlines())
if not cleaned_lines.endswith("\n"):
cleaned_lines += "\n"
return cleaned_lines
def test_underline_basic():
doc = StmtBlockDoc(
[
ExprStmtDoc(make_id_doc("foo")),
ExprStmtDoc(OperationDoc(OperationKind.Add, [make_id_doc("bar"), make_id_doc("baz")])),
ExprStmtDoc(make_id_doc("qux")),
]
)
assert to_python_script(doc, path_to_underline=make_path("baz")) == format_script(
"""
foo
bar + baz
^^^
qux
"""
)
def test_underline_multiple_spans():
doc = StmtBlockDoc(
[
ExprStmtDoc(make_id_doc("foo")),
ExprStmtDoc(make_id_doc("bar")),
ExprStmtDoc(OperationDoc(OperationKind.Add, [make_id_doc("foo"), make_id_doc("foo")])),
]
)
assert to_python_script(doc, path_to_underline=make_path("foo")) == format_script(
"""
foo
^^^
bar
foo + foo
^^^ ^^^
"""
)
def test_underline_multiple_spans_with_line_numbers():
doc = StmtBlockDoc(
[
ExprStmtDoc(make_id_doc("foo")),
ExprStmtDoc(make_id_doc("bar")),
ExprStmtDoc(OperationDoc(OperationKind.Add, [make_id_doc("foo"), make_id_doc("foo")])),
]
)
assert to_python_script(
doc, print_line_numbers=True, path_to_underline=make_path("foo")
) == format_script(
"""
1 foo
^^^
2 bar
3 foo + foo
^^^ ^^^
"""
)
def test_underline_multiline():
doc = StmtBlockDoc(
[
ExprStmtDoc(IdDoc("foo")),
ExprStmtDoc(IdDoc("bar")),
]
)
doc.source_paths = [make_path("whole_doc")]
assert to_python_script(doc, path_to_underline=make_path("whole_doc")) == format_script(
"""
foo
^^^
bar
^^^
"""
)
@pytest.mark.parametrize(
"to_underline, expected_text",
[
(
[0],
"""
x0
^^
x1
x2
(... 7 lines skipped ...)
""",
),
(
[1],
"""
x0
x1
^^
x2
x3
(... 6 lines skipped ...)
""",
),
(
[3],
"""
x0
x1
x2
x3
^^
x4
x5
(... 4 lines skipped ...)
""",
),
(
[4],
"""
(... 2 lines skipped ...)
x2
x3
x4
^^
x5
x6
(... 3 lines skipped ...)
""",
),
(
[6],
"""
(... 4 lines skipped ...)
x4
x5
x6
^^
x7
x8
x9
""",
),
(
[9],
"""
(... 7 lines skipped ...)
x7
x8
x9
^^
""",
),
(
[0, 9],
"""
x0
^^
x1
x2
(... 4 lines skipped ...)
x7
x8
x9
^^
""",
),
(
[0, 3, 9],
"""
x0
^^
x1
x2
x3
^^
x4
x5
x6
x7
x8
x9
^^
""",
),
(
[0, 6, 9],
"""
x0
^^
x1
x2
x3
x4
x5
x6
^^
x7
x8
x9
^^
""",
),
(
[33],
"""
x0
x1
x2
x3
x4
x5
x6
x7
x8
x9
""",
),
],
)
def test_print_two_context_lines(to_underline, expected_text):
doc = StmtBlockDoc(
[ExprStmtDoc(make_id_doc(f"x{i}", "yes" if i in to_underline else "no")) for i in range(10)]
)
result = to_python_script(doc, num_context_lines=2, path_to_underline=make_path("yes"))
assert result == format_script(expected_text)
def test_underline_and_print_line_numbers():
doc = StmtBlockDoc([ExprStmtDoc(make_id_doc(f"line{i + 1}")) for i in range(12)])
result = to_python_script(doc, print_line_numbers=True, path_to_underline=make_path("line6"))
assert result == format_script(
"""
1 line1
2 line2
3 line3
4 line4
5 line5
6 line6
^^^^^
7 line7
8 line8
9 line9
10 line10
11 line11
12 line12
"""
)
def test_underline_and_print_line_numbers_with_context():
doc = StmtBlockDoc([ExprStmtDoc(make_id_doc(f"line{i + 1}")) for i in range(12)])
result = to_python_script(
doc, print_line_numbers=True, num_context_lines=2, path_to_underline=make_path("line8")
)
assert result == format_script(
"""
(... 5 lines skipped ...)
6 line6
7 line7
8 line8
^^^^^
9 line9
10 line10
(... 2 lines skipped ...)
"""
)
def test_underline_based_on_path_prefix():
doc = StmtBlockDoc([ExprStmtDoc(make_id_doc("foo")), ExprStmtDoc(make_id_doc("bar"))])
result = to_python_script(doc, path_to_underline=make_path("foo").attr("x").attr("y"))
# There is no document that matches the desired path exactly,
# but path of "foo" is a prefix of the desired path, and thus should be underlined.
assert result == format_script(
"""
foo
^^^
bar
"""
)
def test_longer_prefix_must_win():
foo_x = IdDoc("foo_x")
foo_x.source_paths = [make_path("foo").attr("x")]
doc = StmtBlockDoc(
[ExprStmtDoc(make_id_doc("foo")), ExprStmtDoc(make_id_doc("bar")), ExprStmtDoc(foo_x)]
)
result = to_python_script(doc, path_to_underline=make_path("foo").attr("x").attr("y"))
# "foo" should not be underlined because there is a document with a more specific path prefix
assert result == format_script(
"""
foo
bar
foo_x
^^^^^
"""
)
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tvmscript_printer_var_table.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This file tests the FFI binding of script.printer.VarTable.
These only make sure parameter can be passed to the C++ functions
correctly. The test for the functionality of VarTable is in C++.
"""
from tvm.runtime import ObjectPath
from tvm.script.printer.doc import LiteralDoc
from tvm.script.printer.frame import VarDefFrame
from tvm.script.printer.var_table import VarTable
from tvm.tir import Var
def test_define():
var_table = VarTable()
var_name = "a"
var_obj = Var(var_name, dtype="int32")
object_path = ObjectPath.root().attr("a")
frame = VarDefFrame()
id_doc = var_table.define(var_obj, var_name, object_path, frame)
assert id_doc.name == "a"
assert list(id_doc.source_paths) == [object_path]
id_doc = var_table.get_var_doc(var_obj, object_path)
assert id_doc.name == "a"
assert list(id_doc.source_paths) == [object_path]
def test_define_by_doc():
var_table = VarTable()
var_name = "a"
var_obj = Var(var_name, dtype="int32")
object_path = ObjectPath.root().attr("a")
frame = VarDefFrame()
var_table.define_by_doc(var_obj, lambda: LiteralDoc(var_name), frame)
var_doc = var_table.get_var_doc(var_obj, object_path)
assert isinstance(var_doc, LiteralDoc)
assert var_doc.value == var_name
assert list(var_doc.source_paths) == [object_path]
def test_is_var_defined():
var_table = VarTable()
a = Var("a", dtype="int32")
object_path = ObjectPath.root().attr("a")
frame = VarDefFrame()
var_table.define(a, "a", object_path, frame)
assert var_table.is_var_defined(a)
assert a in var_table
def test_var_out_of_scope():
var_table = VarTable()
var_name = "a"
var_obj = Var(var_name, dtype="int32")
object_path = ObjectPath.root().attr("a")
frame = VarDefFrame()
var_table.define(var_obj, var_name, object_path, frame)
with frame:
assert var_obj in var_table
assert var_obj not in var_table
assert var_table.get_var_doc(var_obj, object_path) is None
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tvmscript_regression.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy
import tvm
from tvm.script import tir as T
# This numpy array is used to test the comparison between the global objects and the
# `tvm.script.tir` submodule.
np_array = numpy.array([0, 1, 2, 3])
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
def test_multi_element_array_in_outmost_namespace():
func = matmul
rt_func = tvm.script.from_source(func.script(show_meta=True))
tvm.ir.assert_structural_equal(func, rt_func)
if __name__ == "__main__":
test_multi_element_array_in_outmost_namespace()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tvmscript_roundtrip.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
import numpy as np
def opt_gemm_normalize():
@tvm.script.ir_module
class Module:
@T.prim_func
def mmult(A: T.handle, B: T.handle, C: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "mmult", "tir.noalias": True})
# buffer definition
C_global = T.buffer_decl([1024, 1024], elem_offset=0, align=64, offset_factor=1)
packedB = T.buffer_decl([32, 1024, 32], elem_offset=0, align=64, offset_factor=1)
A_1 = T.match_buffer(A, [1024, 1024], elem_offset=0, align=64, offset_factor=1)
B_1 = T.match_buffer(B, [1024, 1024], elem_offset=0, align=64, offset_factor=1)
C_1 = T.match_buffer(C, [1024, 1024], elem_offset=0, align=64, offset_factor=1)
# body
T.realize(packedB[0:32, 0:1024, 0:32], "")
for x in T.parallel(0, 32):
for y in T.serial(0, 1024):
for z in T.vectorized(0, 32):
packedB[x, y, z] = B_1[y, ((x * 32) + z)]
T.realize(C_1[0:1024, 0:1024], "")
for x_outer in T.parallel(0, 32):
for y_outer in T.serial(0, 32):
T.realize(
C_global[
(x_outer * 32) : ((x_outer * 32) + 32),
(y_outer * 32) : ((y_outer * 32) + 32),
],
"global",
)
for x_c_init in T.serial(0, 32):
for y_c_init in T.vectorized(0, 32):
C_global[
(x_c_init + (x_outer * 32)), (y_c_init + (y_outer * 32))
] = T.float32(0)
for k_outer in T.serial(0, 256):
for x_c in T.serial(0, 32):
for k_inner in T.unroll(0, 4):
for y_c in T.vectorized(0, 32):
C_global[
(x_c + (x_outer * 32)), (y_c + (y_outer * 32))
] = C_global[(x_c + (x_outer * 32)), (y_c + (y_outer * 32))] + (
A_1[(x_c + (x_outer * 32)), (k_inner + (k_outer * 4))]
* packedB[
T.floordiv((y_c + (y_outer * 32)), 32),
(k_inner + (k_outer * 4)),
T.floormod((y_c + (y_outer * 32)), 32),
]
)
for x_inner in T.serial(0, 32):
for y_inner in T.serial(0, 32):
C_1[(x_inner + (x_outer * 32)), (y_inner + (y_outer * 32))] = C_global[
(x_inner + (x_outer * 32)), (y_inner + (y_outer * 32))
]
return Module
def opt_gemm_lower():
@tvm.script.ir_module
class Module:
@T.prim_func
def mmult(A: T.handle, B: T.handle, C: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "mmult", "tir.noalias": True})
A_1 = T.match_buffer(A, [16384], elem_offset=0, align=64, offset_factor=1)
B_1 = T.match_buffer(B, [1024, 1024], elem_offset=0, align=64, offset_factor=1)
C_1 = T.match_buffer(C, [16384], elem_offset=0, align=64, offset_factor=1)
# body
packedB_data = T.allocate([32768], "float32", "global")
packedB = T.buffer_decl(
shape=[32768], dtype="float32", scope="global", data=packedB_data
)
for x in T.parallel(0, 32):
for y in T.serial(0, 1024):
packedB[T.ramp(((x * 32768) + (y * 32)), 1, 32)] = B_1[y, T.ramp(x * 32, 1, 32)]
for x_outer in T.parallel(0, 32):
C_global_data = T.allocate([1024], "float32", "global")
C_global = T.buffer_decl(
shape=[1024], dtype="float32", scope="global", data=C_global_data
)
for y_outer in T.serial(0, 32):
for x_c_init in T.serial(0, 32):
C_global[T.ramp((x_c_init * 32), 1, 32)] = T.broadcast(T.float32(0), 32)
for k_outer in T.serial(0, 256):
for x_c in T.serial(0, 32):
C_global[T.ramp((x_c * 32), 1, 32)] = C_global[
T.ramp((x_c * 32), 1, 32)
] + (
T.broadcast(
A_1[
(((x_outer * 32768) + (x_c * 1024)) + (k_outer * 4)),
],
32,
)
* packedB[T.ramp(((y_outer * 32768) + (k_outer * 128)), 1, 32)]
)
C_global[T.ramp((x_c * 32), 1, 32)] = C_global[
T.ramp((x_c * 32), 1, 32)
] + (
T.broadcast(
A_1[
((((x_outer * 32768) + (x_c * 1024)) + (k_outer * 4)) + 1),
],
32,
)
* packedB[
T.ramp((((y_outer * 32768) + (k_outer * 128)) + 32), 1, 32)
]
)
C_global[T.ramp((x_c * 32), 1, 32)] = C_global[
T.ramp((x_c * 32), 1, 32)
] + (
T.broadcast(
A_1[
((((x_outer * 32768) + (x_c * 1024)) + (k_outer * 4)) + 2),
],
32,
)
* packedB[
T.ramp((((y_outer * 32768) + (k_outer * 128)) + 64), 1, 32)
]
)
C_global[T.ramp((x_c * 32), 1, 32)] = C_global[
T.ramp((x_c * 32), 1, 32)
] + (
T.broadcast(
A_1[
((((x_outer * 32768) + (x_c * 1024)) + (k_outer * 4)) + 3),
],
32,
)
* packedB[
T.ramp((((y_outer * 32768) + (k_outer * 128)) + 96), 1, 32)
]
)
for x_inner in T.serial(0, 32):
for y_inner in T.serial(0, 32):
C_1[
(
(((x_outer * 32768) + (x_inner * 1024)) + (y_outer * 32))
+ y_inner
)
] = C_global[((x_inner * 32) + y_inner)]
return Module
def opt_gemm_mod_host():
@tvm.script.ir_module
class Module:
@T.prim_func
def mmult(
args: T.handle,
arg_type_ids: T.handle,
num_args: T.int32,
out_ret_value: T.handle,
out_ret_tcode: T.handle,
) -> T.int32:
# function attr dict
T.func_attr(
{
"tir.noalias": True,
"global_symbol": "mmult",
"tir.is_entry_func": True,
"calling_conv": 1,
}
)
# buffer definition
buf_type_ids = T.match_buffer(arg_type_ids, [3], dtype="int32")
packedB = T.buffer_decl([32768], dtype="float32")
C_global = T.buffer_decl([1024], dtype="float32")
# var definition
# C_global = T.buffer_var("float32", "global")
# packedB = T.buffer_var("float32", "global")
# body
assert num_args == 3, "mmult: num_args should be 3"
arg0: T.handle = T.tvm_struct_get(args, 0, 12, dtype="handle")
arg0_code: T.int32 = buf_type_ids[0]
arg1: T.handle = T.tvm_struct_get(args, 1, 12, dtype="handle")
arg1_code: T.int32 = buf_type_ids[1]
arg2: T.handle = T.tvm_struct_get(args, 2, 12, dtype="handle")
arg2_code: T.int32 = buf_type_ids[2]
A_data: T.Ptr[T.int32] = T.tvm_struct_get(arg0, 0, 1, dtype="handle")
T.attr(A_data, "storage_alignment", 128)
A = T.buffer_decl([1024 * 1024], dtype="int32", data=A_data)
buf0_shape_data: T.Ptr[T.int32] = T.tvm_struct_get(arg0, 0, 2, dtype="handle")
buf0_shape = T.buffer_decl([2], dtype="int32", data=buf0_shape_data)
buf0_strides_data: T.Ptr[T.int32] = T.tvm_struct_get(arg0, 0, 3, dtype="handle")
buf0_strides = T.buffer_decl([2], dtype="int32", data=buf0_strides_data)
dev_id: T.int32 = T.tvm_struct_get(arg0, 0, 9, dtype="int32")
B_data: T.Ptr[T.int32] = T.tvm_struct_get(arg1, 0, 1, dtype="handle")
T.attr(B_data, "storage_alignment", 128)
B = T.buffer_decl([1024 * 1024], dtype="int32", data=B_data)
buf1_shape_data: T.Ptr[T.int32] = T.tvm_struct_get(arg1, 0, 2, dtype="handle")
buf1_shape = T.buffer_decl([2], dtype="int32", data=buf1_shape_data)
buf1_strides_data: T.Ptr[T.int32] = T.tvm_struct_get(arg1, 0, 3, dtype="handle")
buf1_strides = T.buffer_decl([2], dtype="int32", data=buf1_strides_data)
C_data: T.Ptr[T.int32] = T.tvm_struct_get(arg2, 0, 1, dtype="handle")
T.attr(C_data, "storage_alignment", 128)
C = T.buffer_decl([1024 * 1024], dtype="int32", data=C_data)
buf2_shape_data: T.Ptr[T.int32] = T.tvm_struct_get(arg2, 0, 2, dtype="handle")
buf2_shape = T.buffer_decl([2], dtype="int32", data=buf2_shape_data)
buf2_strides_data: T.Ptr[T.int32] = T.tvm_struct_get(arg2, 0, 3, dtype="handle")
buf2_strides = T.buffer_decl([2], dtype="int32", data=buf2_strides_data)
assert (((arg0_code == 3) or (arg0_code == 13)) or (arg0_code == 7)) or (
arg0_code == 4
), "mmult: Expect arg[0] to be pointer"
assert (((arg1_code == 3) or (arg1_code == 13)) or (arg1_code == 7)) or (
arg1_code == 4
), "mmult: Expect arg[1] to be pointer"
assert (((arg2_code == 3) or (arg2_code == 13)) or (arg2_code == 7)) or (
arg2_code == 4
), "mmult: Expect arg[2] to be pointer"
assert 2 == T.tvm_struct_get(
arg0, 0, 4, dtype="int32"
), "arg0.ndim is expected to equal 2"
assert 2 == T.tvm_struct_get(
arg0, 0, 4, dtype="int32"
), "arg0.ndim is expected to equal 2"
assert (
(T.tvm_struct_get(arg0, 0, 5, dtype="uint8") == T.uint8(2))
and (T.tvm_struct_get(arg0, 0, 6, dtype="uint8") == T.uint8(32))
) and (
T.tvm_struct_get(arg0, 0, 7, dtype="uint16") == T.uint16(1)
), "arg0.dtype is expected to be float32"
assert 1024 == T.cast(
buf0_shape[0], "int32"
), "Argument arg0.shape[0] has an unsatisfied constraint"
assert 1024 == T.cast(
buf0_shape[1], "int32"
), "Argument arg0.shape[1] has an unsatisfied constraint"
if not (T.isnullptr(buf0_strides.data, dtype="bool")):
assert (1 == T.cast(buf0_strides[1], "int32")) and (
1024 == T.cast(buf0_strides[0], "int32")
), "arg0.strides: expected to be compact array"
T.evaluate(0)
assert T.uint64(0) == T.tvm_struct_get(
arg0, 0, 8, dtype="uint64"
), "Argument arg0.byte_offset has an unsatisfied constraint"
assert 1 == T.tvm_struct_get(
arg0, 0, 10, dtype="int32"
), "Argument arg0.device_type has an unsatisfied constraint"
assert 2 == T.tvm_struct_get(
arg1, 0, 4, dtype="int32"
), "arg1.ndim is expected to equal 2"
assert 2 == T.tvm_struct_get(
arg1, 0, 4, dtype="int32"
), "arg1.ndim is expected to equal 2"
assert (
(T.tvm_struct_get(arg1, 0, 5, dtype="uint8") == T.uint8(2))
and (T.tvm_struct_get(arg1, 0, 6, dtype="uint8") == T.uint8(32))
) and (
T.tvm_struct_get(arg1, 0, 7, dtype="uint16") == T.uint16(1)
), "arg1.dtype is expected to be float32"
assert 1024 == T.cast(
buf1_shape[0], "int32"
), "Argument arg1.shape[0] has an unsatisfied constraint"
assert 1024 == T.cast(
buf1_shape[1], "int32"
), "Argument arg1.shape[1] has an unsatisfied constraint"
if not (T.isnullptr(buf1_strides.data, dtype="bool")):
assert (1 == T.cast(buf1_strides[1], "int32")) and (
1024 == T.cast(buf1_strides[0], "int32")
), "arg1.strides: expected to be compact array"
T.evaluate(0)
assert T.uint64(0) == T.tvm_struct_get(
arg1, 0, 8, dtype="uint64"
), "Argument arg1.byte_offset has an unsatisfied constraint"
assert 1 == T.tvm_struct_get(
arg1, 0, 10, dtype="int32"
), "Argument arg1.device_type has an unsatisfied constraint"
assert dev_id == T.tvm_struct_get(
arg1, 0, 9, dtype="int32"
), "Argument arg1.device_id has an unsatisfied constraint"
assert 2 == T.tvm_struct_get(
arg2, 0, 4, dtype="int32"
), "arg2.ndim is expected to equal 2"
assert 2 == T.tvm_struct_get(
arg2, 0, 4, dtype="int32"
), "arg2.ndim is expected to equal 2"
assert (
(T.tvm_struct_get(arg2, 0, 5, dtype="uint8") == T.uint8(2))
and (T.tvm_struct_get(arg2, 0, 6, dtype="uint8") == T.uint8(32))
) and (
T.tvm_struct_get(arg2, 0, 7, dtype="uint16") == T.uint16(1)
), "arg2.dtype is expected to be float32"
assert 1024 == T.cast(
buf2_shape[0], "int32"
), "Argument arg2.shape[0] has an unsatisfied constraint"
assert 1024 == T.cast(
buf2_shape[1], "int32"
), "Argument arg2.shape[1] has an unsatisfied constraint"
if not (T.isnullptr(buf2_strides.data, dtype="bool")):
assert (1 == T.cast(buf2_strides[1], "int32")) and (
1024 == T.cast(buf2_strides[0], "int32")
), "arg2.strides: expected to be compact array"
T.evaluate(0)
assert T.uint64(0) == T.tvm_struct_get(
arg2, 0, 8, dtype="uint64"
), "Argument arg2.byte_offset has an unsatisfied constraint"
assert 1 == T.tvm_struct_get(
arg2, 0, 10, dtype="int32"
), "Argument arg2.device_type has an unsatisfied constraint"
assert dev_id == T.tvm_struct_get(
arg2, 0, 9, dtype="int32"
), "Argument arg2.device_id has an unsatisfied constraint"
T.attr(0, "compute_scope", "mmult_compute_")
T.attr(packedB.data, "storage_scope", "global")
T.attr(packedB.data, "storage_alignment", 128)
with T.let(
packedB.data,
T.TVMBackendAllocWorkspace(1, dev_id, T.uint64(4194304), 2, 32, dtype="handle"),
):
if T.isnullptr(packedB.data, dtype="bool"):
T.evaluate(T.tvm_throw_last_error(dtype="int32"))
for x in T.parallel(0, 32):
for y in T.serial(0, 1024):
packedB[T.ramp(((x * 32768) + (y * 32)), 1, 32)] = B[
T.ramp(((y * 1024) + (x * 32)), 1, 32)
]
for x_outer in T.parallel(0, 32):
T.attr(C_global.data, "storage_scope", "global")
T.attr(C_global.data, "storage_alignment", 128)
with T.let(
C_global.data,
T.TVMBackendAllocWorkspace(
1, dev_id, T.uint64(4096), 2, 32, dtype="handle"
),
):
if T.isnullptr(C_global.data, dtype="bool"):
T.evaluate(T.tvm_throw_last_error(dtype="int32"))
for y_outer in T.serial(0, 32):
for x_c_init in T.serial(0, 32):
C_global[T.ramp((x_c_init * 32), 1, 32)] = T.broadcast(
T.float32(0), 32
)
for k_outer in T.serial(0, 256):
for x_c in T.serial(0, 32):
C_global[T.ramp((x_c * 32), 1, 32)] = T.call_llvm_pure_intrin(
T.uint32(97),
T.uint32(3),
T.broadcast(
A[
(
((x_outer * 32768) + (x_c * 1024))
+ (k_outer * 4)
),
],
32,
),
packedB[
T.ramp(((y_outer * 32768) + (k_outer * 128)), 1, 32)
],
C_global[T.ramp((x_c * 32), 1, 32)],
dtype="float32x32",
)
C_global[T.ramp((x_c * 32), 1, 32)] = T.call_llvm_pure_intrin(
T.uint32(97),
T.uint32(3),
T.broadcast(
A[
(
(
((x_outer * 32768) + (x_c * 1024))
+ (k_outer * 4)
)
+ 1
),
],
32,
),
packedB[
T.ramp(
(((y_outer * 32768) + (k_outer * 128)) + 32), 1, 32
)
],
C_global[T.ramp((x_c * 32), 1, 32)],
dtype="float32x32",
)
C_global[T.ramp((x_c * 32), 1, 32)] = T.call_llvm_pure_intrin(
T.uint32(97),
T.uint32(3),
T.broadcast(
A[
(
(
((x_outer * 32768) + (x_c * 1024))
+ (k_outer * 4)
)
+ 2
),
],
32,
),
packedB[
T.ramp(
(((y_outer * 32768) + (k_outer * 128)) + 64), 1, 32
)
],
C_global[T.ramp((x_c * 32), 1, 32)],
dtype="float32x32",
)
C_global[T.ramp((x_c * 32), 1, 32)] = T.call_llvm_pure_intrin(
T.uint32(97),
T.uint32(3),
T.broadcast(
A[
(
(
((x_outer * 32768) + (x_c * 1024))
+ (k_outer * 4)
)
+ 3
),
],
32,
),
packedB[
T.ramp(
(((y_outer * 32768) + (k_outer * 128)) + 96), 1, 32
)
],
C_global[T.ramp((x_c * 32), 1, 32)],
dtype="float32x32",
)
for x_inner in T.serial(0, 32):
for y_inner in T.serial(0, 32):
C[
(
(
((x_outer * 32768) + (x_inner * 1024))
+ (y_outer * 32)
)
+ y_inner
)
] = C_global[((x_inner * 32) + y_inner)]
if T.TVMBackendFreeWorkspace(1, dev_id, C_global.data, dtype="int32") != 0:
T.evaluate(T.tvm_throw_last_error(dtype="int32"))
if T.TVMBackendFreeWorkspace(1, dev_id, packedB.data, dtype="int32") != 0:
T.evaluate(T.tvm_throw_last_error(dtype="int32"))
return Module
def opt_conv_tensorcore_normalize():
@T.prim_func
def func(A: T.handle, W: T.handle, Conv: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
# var definition
bx = T.env_thread("blockIdx.x")
by = T.env_thread("blockIdx.y")
bz = T.env_thread("blockIdx.z")
tx = T.env_thread("threadIdx.x")
ty = T.env_thread("threadIdx.y")
tz = T.env_thread("threadIdx.z")
# buffer definition
Apad_shared = T.buffer_decl(
[16, 16, 16, 16, 16, 16], dtype="float16", elem_offset=0, align=64, offset_factor=1
)
Apad_shared_wmma_matrix_a = T.buffer_decl(
[16, 16, 16, 16, 16, 16], dtype="float16", elem_offset=0, align=64, offset_factor=1
)
BA = T.buffer_decl(
[16, 16], dtype="float16", scope="wmma.matrix_a", align=32, offset_factor=256
)
BB = T.buffer_decl(
[16, 16], dtype="float16", scope="wmma.matrix_b", align=32, offset_factor=256
)
BC = T.buffer_decl([16, 16], scope="wmma.accumulator", align=32, offset_factor=256)
Conv_wmma_accumulator = T.buffer_decl(
[16, 14, 14, 32, 16, 16], elem_offset=0, align=64, offset_factor=1
)
W_shared = T.buffer_decl(
[3, 3, 16, 32, 16, 16], dtype="float16", elem_offset=0, align=64, offset_factor=1
)
W_shared_wmma_matrix_b = T.buffer_decl(
[3, 3, 16, 32, 16, 16], dtype="float16", elem_offset=0, align=64, offset_factor=1
)
buffer = T.buffer_decl(
[16, 16], dtype="float16", scope="shared", align=32, offset_factor=256
)
buffer_1 = T.buffer_decl(
[16, 16], dtype="float16", scope="wmma.matrix_a", align=32, offset_factor=256
)
buffer_2 = T.buffer_decl(
[16, 16], dtype="float16", scope="shared", align=32, offset_factor=256
)
buffer_3 = T.buffer_decl(
[16, 16], dtype="float16", scope="wmma.matrix_b", align=32, offset_factor=256
)
buffer_4 = T.buffer_decl([16, 16], scope="wmma.accumulator", align=32, offset_factor=256)
buffer_5 = T.buffer_decl([16, 16], align=32, offset_factor=256)
A_1 = T.match_buffer(
A, [16, 14, 14, 16, 16, 16], dtype="float16", elem_offset=0, align=64, offset_factor=1
)
W_1 = T.match_buffer(
W, [3, 3, 16, 32, 16, 16], dtype="float16", elem_offset=0, align=64, offset_factor=1
)
Conv_1 = T.match_buffer(
Conv, [16, 14, 14, 32, 16, 16], elem_offset=0, align=64, offset_factor=1
)
# body
T.realize(Conv_1[0:16, 0:14, 0:14, 0:32, 0:16, 0:16], "")
T.launch_thread(bz, 196)
T.launch_thread(bx, 2)
T.launch_thread(by, 4)
T.launch_thread(ty, 4)
T.launch_thread(tz, 2)
T.realize(
Conv_wmma_accumulator[
((bx * 8) + (ty * 2)) : (((bx * 8) + (ty * 2)) + 2),
T.floordiv(bz, 14) : (T.floordiv(bz, 14) + 1),
T.floormod(bz, 14) : (T.floormod(bz, 14) + 1),
((by * 8) + (tz * 4)) : (((by * 8) + (tz * 4)) + 4),
0:16,
0:16,
],
"wmma.accumulator",
)
for n_c_init in T.serial(0, 2):
for o_c_init in T.serial(0, 4):
T.attr(
[BC, Conv_wmma_accumulator],
"buffer_bind_scope",
T.tvm_tuple(
(n_c_init + ((bx * 8) + (ty * 2))),
1,
T.floordiv(bz, 14),
1,
T.floormod(bz, 14),
1,
(o_c_init + ((by * 8) + (tz * 4))),
1,
0,
16,
0,
16,
dtype="handle",
),
)
T.evaluate(
T.tvm_fill_fragment(
BC.data,
16,
16,
16,
T.floordiv(BC.elem_offset, 256),
T.float32(0),
dtype="handle",
)
)
for ic_outer in T.serial(0, 8):
for kh in T.serial(0, 3):
T.realize(
Apad_shared[
(bx * 8) : ((bx * 8) + 8),
(T.floordiv(bz, 14) + kh) : ((T.floordiv(bz, 14) + kh) + 1),
T.floormod(bz, 14) : (T.floormod(bz, 14) + 3),
(ic_outer * 2) : ((ic_outer * 2) + 2),
0:16,
0:16,
],
"shared",
)
for ax2 in T.serial(0, 3):
for ax3 in T.serial(0, 2):
for ax4_ax5_fused_outer in T.serial(0, 8):
T.launch_thread(tx, 32)
Apad_shared[
((tz + (ty * 2)) + (bx * 8)),
(T.floordiv(bz, 14) + kh),
(ax2 + T.floormod(bz, 14)),
(ax3 + (ic_outer * 2)),
T.floordiv((tx + (ax4_ax5_fused_outer * 32)), 16),
T.floormod((tx + (ax4_ax5_fused_outer * 32)), 16),
] = T.if_then_else(
(
(
(
((T.floordiv(bz, 14) + kh) >= 1)
and (((T.floordiv(bz, 14) + kh) - 1) < 14)
)
and ((ax2 + T.floormod(bz, 14)) >= 1)
)
and (((ax2 + T.floormod(bz, 14)) - 1) < 14)
),
A_1[
((tz + (ty * 2)) + (bx * 8)),
((T.floordiv(bz, 14) + kh) - 1),
((ax2 + T.floormod(bz, 14)) - 1),
(ax3 + (ic_outer * 2)),
T.floordiv((tx + (ax4_ax5_fused_outer * 32)), 16),
T.floormod((tx + (ax4_ax5_fused_outer * 32)), 16),
],
T.float16(0),
dtype="float16",
)
T.realize(
W_shared[
kh : (kh + 1),
0:3,
(ic_outer * 2) : ((ic_outer * 2) + 2),
(by * 8) : ((by * 8) + 8),
0:16,
0:16,
],
"shared",
)
for ax1 in T.serial(0, 3):
for ax2_1 in T.serial(0, 2):
T.launch_thread(tx, 32)
for ax4_ax5_fused_inner in T.vectorized(0, 8):
W_shared[
kh,
ax1,
(ax2_1 + (ic_outer * 2)),
((tz + (ty * 2)) + (by * 8)),
T.floordiv((ax4_ax5_fused_inner + (tx * 8)), 16),
T.floormod((ax4_ax5_fused_inner + (tx * 8)), 16),
] = W_1[
kh,
ax1,
(ax2_1 + (ic_outer * 2)),
((tz + (ty * 2)) + (by * 8)),
T.floordiv((ax4_ax5_fused_inner + (tx * 8)), 16),
T.floormod((ax4_ax5_fused_inner + (tx * 8)), 16),
]
for ic_inner in T.serial(0, 2):
for kw in T.serial(0, 3):
T.realize(
Apad_shared_wmma_matrix_a[
((bx * 8) + (ty * 2)) : (((bx * 8) + (ty * 2)) + 2),
(T.floordiv(bz, 14) + kh) : ((T.floordiv(bz, 14) + kh) + 1),
(kw + T.floormod(bz, 14)) : ((kw + T.floormod(bz, 14)) + 1),
((ic_outer * 2) + ic_inner) : (((ic_outer * 2) + ic_inner) + 1),
0:16,
0:16,
],
"wmma.matrix_a",
)
for ax0 in T.serial(0, 2):
T.attr(
[buffer, Apad_shared],
"buffer_bind_scope",
T.tvm_tuple(
(ax0 + ((bx * 8) + (ty * 2))),
1,
(T.floordiv(bz, 14) + kh),
1,
(kw + T.floormod(bz, 14)),
1,
((ic_outer * 2) + ic_inner),
1,
0,
16,
0,
16,
dtype="handle",
),
)
T.attr(
[buffer_1, Apad_shared_wmma_matrix_a],
"buffer_bind_scope",
T.tvm_tuple(
(ax0 + ((bx * 8) + (ty * 2))),
1,
(T.floordiv(bz, 14) + kh),
1,
(kw + T.floormod(bz, 14)),
1,
((ic_outer * 2) + ic_inner),
1,
0,
16,
0,
16,
dtype="handle",
),
)
T.evaluate(
T.tvm_load_matrix_sync(
buffer_1.data,
16,
16,
16,
T.floordiv(buffer_1.elem_offset, 256),
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
buffer.data,
buffer.elem_offset,
256,
1,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
T.realize(
W_shared_wmma_matrix_b[
kh : (kh + 1),
kw : (kw + 1),
((ic_outer * 2) + ic_inner) : (((ic_outer * 2) + ic_inner) + 1),
((by * 8) + (tz * 4)) : (((by * 8) + (tz * 4)) + 4),
0:16,
0:16,
],
"wmma.matrix_b",
)
for ax3_1 in T.serial(0, 4):
T.attr(
[buffer_2, W_shared],
"buffer_bind_scope",
T.tvm_tuple(
kh,
1,
kw,
1,
((ic_outer * 2) + ic_inner),
1,
(ax3_1 + ((by * 8) + (tz * 4))),
1,
0,
16,
0,
16,
dtype="handle",
),
)
T.attr(
[buffer_3, W_shared_wmma_matrix_b],
"buffer_bind_scope",
T.tvm_tuple(
kh,
1,
kw,
1,
((ic_outer * 2) + ic_inner),
1,
(ax3_1 + ((by * 8) + (tz * 4))),
1,
0,
16,
0,
16,
dtype="handle",
),
)
T.evaluate(
T.tvm_load_matrix_sync(
buffer_3.data,
16,
16,
16,
T.floordiv(buffer_3.elem_offset, 256),
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
buffer_2.data,
buffer_2.elem_offset,
256,
1,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
for n_c in T.serial(0, 2):
for o_c in T.serial(0, 4):
T.attr(
[BA, Apad_shared_wmma_matrix_a],
"buffer_bind_scope",
T.tvm_tuple(
(n_c + ((bx * 8) + (ty * 2))),
1,
(T.floordiv(bz, 14) + kh),
1,
(T.floormod(bz, 14) + kw),
1,
((ic_outer * 2) + ic_inner),
1,
0,
16,
0,
16,
dtype="handle",
),
)
T.attr(
[BB, W_shared_wmma_matrix_b],
"buffer_bind_scope",
T.tvm_tuple(
kh,
1,
kw,
1,
((ic_outer * 2) + ic_inner),
1,
(o_c + ((by * 8) + (tz * 4))),
1,
0,
16,
0,
16,
dtype="handle",
),
)
T.attr(
[BC, Conv_wmma_accumulator],
"buffer_bind_scope",
T.tvm_tuple(
(n_c + ((bx * 8) + (ty * 2))),
1,
T.floordiv(bz, 14),
1,
T.floormod(bz, 14),
1,
(o_c + ((by * 8) + (tz * 4))),
1,
0,
16,
0,
16,
dtype="handle",
),
)
T.evaluate(
T.tvm_mma_sync(
BC.data,
T.floordiv(BC.elem_offset, 256),
BA.data,
T.floordiv(BA.elem_offset, 256),
BB.data,
T.floordiv(BB.elem_offset, 256),
BC.data,
T.floordiv(BC.elem_offset, 256),
dtype="handle",
)
)
for n_inner in T.serial(0, 2):
for o_inner in T.serial(0, 4):
T.attr(
[buffer_4, Conv_wmma_accumulator],
"buffer_bind_scope",
T.tvm_tuple(
((((bx * 4) + ty) * 2) + n_inner),
1,
T.floordiv(bz, 14),
1,
T.floormod(bz, 14),
1,
((((by * 2) + tz) * 4) + o_inner),
1,
0,
16,
0,
16,
dtype="handle",
),
)
T.attr(
[buffer_5, Conv_1],
"buffer_bind_scope",
T.tvm_tuple(
((((bx * 4) + ty) * 2) + n_inner),
1,
T.floordiv(bz, 14),
1,
T.floormod(bz, 14),
1,
((((by * 2) + tz) * 4) + o_inner),
1,
0,
16,
0,
16,
dtype="handle",
),
)
T.evaluate(
T.tvm_store_matrix_sync(
buffer_4.data,
16,
16,
16,
T.floordiv(buffer_4.elem_offset, 256),
T.tvm_access_ptr(
T.type_annotation(dtype="float32"),
buffer_5.data,
buffer_5.elem_offset,
256,
2,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
return func
def opt_conv_tensorcore_lower():
@T.prim_func
def func(
A: T.Buffer[(16, 14, 14, 16, 16, 16), "float16"],
W: T.Buffer[(3, 3, 16, 32, 16, 16), "float16"],
Conv: T.Buffer[(16, 14, 14, 32, 16, 16), "float32"],
) -> None:
# function attr dict
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
# body
A_1 = T.buffer_decl([12845056], dtype="float16", data=A.data)
W_1 = T.buffer_decl([1179648], dtype="float16", data=W.data)
Conv_1 = T.buffer_decl([25690112], data=Conv.data)
bx = T.env_thread("blockIdx.x")
by = T.env_thread("blockIdx.y")
bz = T.env_thread("blockIdx.z")
tx = T.env_thread("threadIdx.x")
ty = T.env_thread("threadIdx.y")
tz = T.env_thread("threadIdx.z")
T.launch_thread(bz, 196)
Conv_wmma_accumulator_data = T.allocate([2048], "float32", "wmma.accumulator")
Conv_wmma_accumulator = T.buffer_decl(
shape=[2048], dtype="float32", scope="wmma.accumulator", data=Conv_wmma_accumulator_data
)
Apad_shared_data = T.allocate([12288], "float16", "shared")
Apad_shared = T.buffer_decl(
shape=[12288], dtype="float16", scope="shared", data=Apad_shared_data
)
W_shared_data = T.allocate([12288], "float16", "shared")
W_shared = T.buffer_decl(shape=[12288], dtype="float16", scope="shared", data=W_shared_data)
Apad_shared_wmma_matrix_a_data = T.allocate([512], "float16", "wmma.matrix_a")
Apad_shared_wmma_matrix_a = T.buffer_decl(
shape=[512], dtype="float16", scope="wmma.matrix_a", data=Apad_shared_wmma_matrix_a_data
)
W_shared_wmma_matrix_b_data = T.allocate([1024], "float16", "wmma.matrix_b")
W_shared_wmma_matrix_b = T.buffer_decl(
shape=[1024], dtype="float16", scope="wmma.matrix_b", data=W_shared_wmma_matrix_b_data
)
T.launch_thread(bx, 2)
T.launch_thread(by, 4)
T.launch_thread(ty, 4)
T.launch_thread(tz, 2)
T.evaluate(
T.tvm_fill_fragment(
Conv_wmma_accumulator.data, 16, 16, 16, 0, T.float32(0), dtype="handle"
)
)
T.evaluate(
T.tvm_fill_fragment(
Conv_wmma_accumulator.data, 16, 16, 16, 1, T.float32(0), dtype="handle"
)
)
T.evaluate(
T.tvm_fill_fragment(
Conv_wmma_accumulator.data, 16, 16, 16, 2, T.float32(0), dtype="handle"
)
)
T.evaluate(
T.tvm_fill_fragment(
Conv_wmma_accumulator.data, 16, 16, 16, 3, T.float32(0), dtype="handle"
)
)
T.evaluate(
T.tvm_fill_fragment(
Conv_wmma_accumulator.data, 16, 16, 16, 4, T.float32(0), dtype="handle"
)
)
T.evaluate(
T.tvm_fill_fragment(
Conv_wmma_accumulator.data, 16, 16, 16, 5, T.float32(0), dtype="handle"
)
)
T.evaluate(
T.tvm_fill_fragment(
Conv_wmma_accumulator.data, 16, 16, 16, 6, T.float32(0), dtype="handle"
)
)
T.evaluate(
T.tvm_fill_fragment(
Conv_wmma_accumulator.data, 16, 16, 16, 7, T.float32(0), dtype="handle"
)
)
for ic_outer in T.serial(0, 8):
for kh in T.serial(0, 3):
for ax2 in T.serial(0, 3):
with T.launch_thread(tx, 32):
Apad_shared[
((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx)
] = T.if_then_else(
(
(
(
(1 <= (T.floordiv(bz, 14) + kh))
and ((T.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + T.floormod(bz, 14)))
)
and ((ax2 + T.floormod(bz, 14)) < 15)
),
A_1[
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61440
),
],
T.float16(0),
dtype="float16",
)
with T.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 32)
] = T.if_then_else(
(
(
(
(1 <= (T.floordiv(bz, 14) + kh))
and ((T.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + T.floormod(bz, 14)))
)
and ((ax2 + T.floormod(bz, 14)) < 15)
),
A_1[
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61408
),
],
T.float16(0),
dtype="float16",
)
with T.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 64)
] = T.if_then_else(
(
(
(
(1 <= (T.floordiv(bz, 14) + kh))
and ((T.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + T.floormod(bz, 14)))
)
and ((ax2 + T.floormod(bz, 14)) < 15)
),
A_1[
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61376
),
],
T.float16(0),
dtype="float16",
)
with T.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 96)
] = T.if_then_else(
(
(
(
(1 <= (T.floordiv(bz, 14) + kh))
and ((T.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + T.floormod(bz, 14)))
)
and ((ax2 + T.floormod(bz, 14)) < 15)
),
A_1[
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61344
),
],
T.float16(0),
dtype="float16",
)
with T.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 128)
] = T.if_then_else(
(
(
(
(1 <= (T.floordiv(bz, 14) + kh))
and ((T.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + T.floormod(bz, 14)))
)
and ((ax2 + T.floormod(bz, 14)) < 15)
),
A_1[
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61312
),
],
T.float16(0),
dtype="float16",
)
with T.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 160)
] = T.if_then_else(
(
(
(
(1 <= (T.floordiv(bz, 14) + kh))
and ((T.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + T.floormod(bz, 14)))
)
and ((ax2 + T.floormod(bz, 14)) < 15)
),
A_1[
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61280
),
],
T.float16(0),
dtype="float16",
)
with T.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 192)
] = T.if_then_else(
(
(
(
(1 <= (T.floordiv(bz, 14) + kh))
and ((T.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + T.floormod(bz, 14)))
)
and ((ax2 + T.floormod(bz, 14)) < 15)
),
A_1[
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61248
),
],
T.float16(0),
dtype="float16",
)
with T.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 224)
] = T.if_then_else(
(
(
(
(1 <= (T.floordiv(bz, 14) + kh))
and ((T.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + T.floormod(bz, 14)))
)
and ((ax2 + T.floormod(bz, 14)) < 15)
),
A_1[
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61216
),
],
T.float16(0),
dtype="float16",
)
with T.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 256)
] = T.if_then_else(
(
(
(
(1 <= (T.floordiv(bz, 14) + kh))
and ((T.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + T.floormod(bz, 14)))
)
and ((ax2 + T.floormod(bz, 14)) < 15)
),
A_1[
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61184
),
],
T.float16(0),
dtype="float16",
)
with T.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 288)
] = T.if_then_else(
(
(
(
(1 <= (T.floordiv(bz, 14) + kh))
and ((T.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + T.floormod(bz, 14)))
)
and ((ax2 + T.floormod(bz, 14)) < 15)
),
A_1[
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61152
),
],
T.float16(0),
dtype="float16",
)
with T.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 320)
] = T.if_then_else(
(
(
(
(1 <= (T.floordiv(bz, 14) + kh))
and ((T.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + T.floormod(bz, 14)))
)
and ((ax2 + T.floormod(bz, 14)) < 15)
),
A_1[
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61120
),
],
T.float16(0),
dtype="float16",
)
with T.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 352)
] = T.if_then_else(
(
(
(
(1 <= (T.floordiv(bz, 14) + kh))
and ((T.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + T.floormod(bz, 14)))
)
and ((ax2 + T.floormod(bz, 14)) < 15)
),
A_1[
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61088
),
],
T.float16(0),
dtype="float16",
)
with T.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 384)
] = T.if_then_else(
(
(
(
(1 <= (T.floordiv(bz, 14) + kh))
and ((T.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + T.floormod(bz, 14)))
)
and ((ax2 + T.floormod(bz, 14)) < 15)
),
A_1[
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61056
),
],
T.float16(0),
dtype="float16",
)
with T.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 416)
] = T.if_then_else(
(
(
(
(1 <= (T.floordiv(bz, 14) + kh))
and ((T.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + T.floormod(bz, 14)))
)
and ((ax2 + T.floormod(bz, 14)) < 15)
),
A_1[
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61024
),
],
T.float16(0),
dtype="float16",
)
with T.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 448)
] = T.if_then_else(
(
(
(
(1 <= (T.floordiv(bz, 14) + kh))
and ((T.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + T.floormod(bz, 14)))
)
and ((ax2 + T.floormod(bz, 14)) < 15)
),
A_1[
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 60992
),
],
T.float16(0),
dtype="float16",
)
T.launch_thread(tx, 32)
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 480)
] = T.if_then_else(
(
(
(
(1 <= (T.floordiv(bz, 14) + kh))
and ((T.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + T.floormod(bz, 14)))
)
and ((ax2 + T.floormod(bz, 14)) < 15)
),
A_1[
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 60960
),
],
T.float16(0),
dtype="float16",
)
with T.launch_thread(tx, 32):
W_shared[T.ramp((((ty * 512) + (tz * 256)) + (tx * 8)), 1, 8)] = W_1[
T.ramp(
(
(
(
(((kh * 393216) + (ic_outer * 16384)) + (by * 2048))
+ (ty * 512)
)
+ (tz * 256)
)
+ (tx * 8)
),
1,
8,
)
]
with T.launch_thread(tx, 32):
W_shared[T.ramp(((((ty * 512) + (tz * 256)) + (tx * 8)) + 2048), 1, 8)] = W_1[
T.ramp(
(
(
(
(
(((kh * 393216) + (ic_outer * 16384)) + (by * 2048))
+ (ty * 512)
)
+ (tz * 256)
)
+ (tx * 8)
)
+ 8192
),
1,
8,
)
]
with T.launch_thread(tx, 32):
W_shared[T.ramp(((((ty * 512) + (tz * 256)) + (tx * 8)) + 4096), 1, 8)] = W_1[
T.ramp(
(
(
(
(
(((kh * 393216) + (ic_outer * 16384)) + (by * 2048))
+ (ty * 512)
)
+ (tz * 256)
)
+ (tx * 8)
)
+ 131072
),
1,
8,
)
]
with T.launch_thread(tx, 32):
W_shared[T.ramp(((((ty * 512) + (tz * 256)) + (tx * 8)) + 6144), 1, 8)] = W_1[
T.ramp(
(
(
(
(
(((kh * 393216) + (ic_outer * 16384)) + (by * 2048))
+ (ty * 512)
)
+ (tz * 256)
)
+ (tx * 8)
)
+ 139264
),
1,
8,
)
]
with T.launch_thread(tx, 32):
W_shared[T.ramp(((((ty * 512) + (tz * 256)) + (tx * 8)) + 8192), 1, 8)] = W_1[
T.ramp(
(
(
(
(
(((kh * 393216) + (ic_outer * 16384)) + (by * 2048))
+ (ty * 512)
)
+ (tz * 256)
)
+ (tx * 8)
)
+ 262144
),
1,
8,
)
]
with T.launch_thread(tx, 32):
W_shared[T.ramp(((((ty * 512) + (tz * 256)) + (tx * 8)) + 10240), 1, 8)] = W_1[
T.ramp(
(
(
(
(
(((kh * 393216) + (ic_outer * 16384)) + (by * 2048))
+ (ty * 512)
)
+ (tz * 256)
)
+ (tx * 8)
)
+ 270336
),
1,
8,
)
]
for ic_inner in T.serial(0, 2):
for kw in T.serial(0, 3):
T.evaluate(
T.tvm_load_matrix_sync(
Apad_shared_wmma_matrix_a.data,
16,
16,
16,
0,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
Apad_shared.data,
(((ty * 3072) + (kw * 512)) + (ic_inner * 256)),
256,
1,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
T.evaluate(
T.tvm_load_matrix_sync(
Apad_shared_wmma_matrix_a.data,
16,
16,
16,
1,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
Apad_shared.data,
((((ty * 3072) + (kw * 512)) + (ic_inner * 256)) + 1536),
256,
1,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
T.evaluate(
T.tvm_load_matrix_sync(
W_shared_wmma_matrix_b.data,
16,
16,
16,
0,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
W_shared.data,
(((kw * 4096) + (ic_inner * 2048)) + (tz * 1024)),
256,
1,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
T.evaluate(
T.tvm_load_matrix_sync(
W_shared_wmma_matrix_b.data,
16,
16,
16,
1,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
W_shared.data,
((((kw * 4096) + (ic_inner * 2048)) + (tz * 1024)) + 256),
256,
1,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
T.evaluate(
T.tvm_load_matrix_sync(
W_shared_wmma_matrix_b.data,
16,
16,
16,
2,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
W_shared.data,
((((kw * 4096) + (ic_inner * 2048)) + (tz * 1024)) + 512),
256,
1,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
T.evaluate(
T.tvm_load_matrix_sync(
W_shared_wmma_matrix_b.data,
16,
16,
16,
3,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
W_shared.data,
((((kw * 4096) + (ic_inner * 2048)) + (tz * 1024)) + 768),
256,
1,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
T.evaluate(
T.tvm_mma_sync(
Conv_wmma_accumulator.data,
0,
Apad_shared_wmma_matrix_a.data,
0,
W_shared_wmma_matrix_b.data,
0,
Conv_wmma_accumulator.data,
0,
dtype="handle",
)
)
T.evaluate(
T.tvm_mma_sync(
Conv_wmma_accumulator.data,
1,
Apad_shared_wmma_matrix_a.data,
0,
W_shared_wmma_matrix_b.data,
1,
Conv_wmma_accumulator.data,
1,
dtype="handle",
)
)
T.evaluate(
T.tvm_mma_sync(
Conv_wmma_accumulator.data,
2,
Apad_shared_wmma_matrix_a.data,
0,
W_shared_wmma_matrix_b.data,
2,
Conv_wmma_accumulator.data,
2,
dtype="handle",
)
)
T.evaluate(
T.tvm_mma_sync(
Conv_wmma_accumulator.data,
3,
Apad_shared_wmma_matrix_a.data,
0,
W_shared_wmma_matrix_b.data,
3,
Conv_wmma_accumulator.data,
3,
dtype="handle",
)
)
T.evaluate(
T.tvm_mma_sync(
Conv_wmma_accumulator.data,
4,
Apad_shared_wmma_matrix_a.data,
1,
W_shared_wmma_matrix_b.data,
0,
Conv_wmma_accumulator.data,
4,
dtype="handle",
)
)
T.evaluate(
T.tvm_mma_sync(
Conv_wmma_accumulator.data,
5,
Apad_shared_wmma_matrix_a.data,
1,
W_shared_wmma_matrix_b.data,
1,
Conv_wmma_accumulator.data,
5,
dtype="handle",
)
)
T.evaluate(
T.tvm_mma_sync(
Conv_wmma_accumulator.data,
6,
Apad_shared_wmma_matrix_a.data,
1,
W_shared_wmma_matrix_b.data,
2,
Conv_wmma_accumulator.data,
6,
dtype="handle",
)
)
T.evaluate(
T.tvm_mma_sync(
Conv_wmma_accumulator.data,
7,
Apad_shared_wmma_matrix_a.data,
1,
W_shared_wmma_matrix_b.data,
3,
Conv_wmma_accumulator.data,
7,
dtype="handle",
)
)
T.evaluate(
T.tvm_store_matrix_sync(
Conv_wmma_accumulator.data,
16,
16,
16,
0,
T.tvm_access_ptr(
T.type_annotation(dtype="float32"),
Conv_1.data,
(
((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048))
+ (tz * 1024)
),
256,
2,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
T.evaluate(
T.tvm_store_matrix_sync(
Conv_wmma_accumulator.data,
16,
16,
16,
1,
T.tvm_access_ptr(
T.type_annotation(dtype="float32"),
Conv_1.data,
(
(
((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048))
+ (tz * 1024)
)
+ 256
),
256,
2,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
T.evaluate(
T.tvm_store_matrix_sync(
Conv_wmma_accumulator.data,
16,
16,
16,
2,
T.tvm_access_ptr(
T.type_annotation(dtype="float32"),
Conv_1.data,
(
(
((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048))
+ (tz * 1024)
)
+ 512
),
256,
2,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
T.evaluate(
T.tvm_store_matrix_sync(
Conv_wmma_accumulator.data,
16,
16,
16,
3,
T.tvm_access_ptr(
T.type_annotation(dtype="float32"),
Conv_1.data,
(
(
((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048))
+ (tz * 1024)
)
+ 768
),
256,
2,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
T.evaluate(
T.tvm_store_matrix_sync(
Conv_wmma_accumulator.data,
16,
16,
16,
4,
T.tvm_access_ptr(
T.type_annotation(dtype="float32"),
Conv_1.data,
(
(
((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048))
+ (tz * 1024)
)
+ 1605632
),
256,
2,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
T.evaluate(
T.tvm_store_matrix_sync(
Conv_wmma_accumulator.data,
16,
16,
16,
5,
T.tvm_access_ptr(
T.type_annotation(dtype="float32"),
Conv_1.data,
(
(
((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048))
+ (tz * 1024)
)
+ 1605888
),
256,
2,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
T.evaluate(
T.tvm_store_matrix_sync(
Conv_wmma_accumulator.data,
16,
16,
16,
6,
T.tvm_access_ptr(
T.type_annotation(dtype="float32"),
Conv_1.data,
(
(
((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048))
+ (tz * 1024)
)
+ 1606144
),
256,
2,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
T.evaluate(
T.tvm_store_matrix_sync(
Conv_wmma_accumulator.data,
16,
16,
16,
7,
T.tvm_access_ptr(
T.type_annotation(dtype="float32"),
Conv_1.data,
(
(
((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048))
+ (tz * 1024)
)
+ 1606400
),
256,
2,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
return func
def opt_conv_tensorcore_mod_host():
@T.prim_func
def opt_conv_tensorcore_mod_host(
args: T.handle,
arg_type_ids: T.Buffer[(3,), "int32"],
num_args: T.int32,
out_ret_value: T.handle,
out_ret_tcode: T.handle,
resource_handle: T.handle,
) -> T.int32:
# function attr dict
T.func_attr(
{
"tir.noalias": True,
"global_symbol": "default_function",
"tir.is_entry_func": True,
"calling_conv": 1,
}
)
# body
stack_tcode_data: T.Ptr[T.int32] = T.tvm_stack_alloca("arg_tcode", 10, dtype="handle")
stack_tcode = T.buffer_decl([9], "int32", data=stack_tcode_data)
stack_value: T.handle = T.tvm_stack_alloca("arg_value", 10, dtype="handle")
assert num_args == 3, "default_function: num_args should be 3"
arg0: T.handle = T.tvm_struct_get(args, 0, 12, dtype="handle")
arg0_code: T.int32 = arg_type_ids[0]
arg1: T.handle = T.tvm_struct_get(args, 1, 12, dtype="handle")
arg1_code: T.int32 = arg_type_ids[1]
arg2: T.handle = T.tvm_struct_get(args, 2, 12, dtype="handle")
arg2_code: T.int32 = arg_type_ids[2]
A: T.handle = T.tvm_struct_get(arg0, 0, 1, dtype="handle")
T.attr(A, "storage_alignment", 128)
arg0_shape_data: T.Ptr[T.int64] = T.tvm_struct_get(arg0, 0, 2, dtype="handle")
arg0_shape = T.buffer_decl([6], "int64", data=arg0_shape_data)
arg0_strides_data: T.Ptr[T.int64] = T.tvm_struct_get(arg0, 0, 3, dtype="handle")
arg0_strides = T.buffer_decl([6], "int64", data=arg0_strides_data)
dev_id: T.int32 = T.tvm_struct_get(arg0, 0, 9, dtype="int32")
W: T.handle = T.tvm_struct_get(arg1, 0, 1, dtype="handle")
T.attr(W, "storage_alignment", 128)
arg1_shape_data: T.Ptr[T.int64] = T.tvm_struct_get(arg1, 0, 2, dtype="handle")
arg1_shape = T.buffer_decl([6], "int64", data=arg1_shape_data)
arg1_strides_data: T.Ptr[T.int64] = T.tvm_struct_get(arg1, 0, 3, dtype="handle")
arg1_strides = T.buffer_decl([6], "int64", data=arg1_strides_data)
Conv: T.handle = T.tvm_struct_get(arg2, 0, 1, dtype="handle")
T.attr(Conv, "storage_alignment", 128)
arg2_shape_data: T.Ptr[T.int64] = T.tvm_struct_get(arg2, 0, 2, dtype="handle")
arg2_shape = T.buffer_decl([6], "int64", data=arg2_shape_data)
arg2_strides_data: T.Ptr[T.int64] = T.tvm_struct_get(arg2, 0, 3, dtype="handle")
arg2_strides = T.buffer_decl([6], "int64", data=arg2_strides_data)
assert (((arg0_code == 3) or (arg0_code == 13)) or (arg0_code == 7)) or (
arg0_code == 4
), "default_function: Expect arg[0] to be pointer"
assert (((arg1_code == 3) or (arg1_code == 13)) or (arg1_code == 7)) or (
arg1_code == 4
), "default_function: Expect arg[1] to be pointer"
assert (((arg2_code == 3) or (arg2_code == 13)) or (arg2_code == 7)) or (
arg2_code == 4
), "default_function: Expect arg[2] to be pointer"
assert 6 == T.tvm_struct_get(arg0, 0, 4, dtype="int32"), "arg0.ndim is expected to equal 6"
assert 6 == T.tvm_struct_get(arg0, 0, 4, dtype="int32"), "arg0.ndim is expected to equal 6"
assert (
(T.tvm_struct_get(arg0, 0, 5, dtype="uint8") == T.uint8(2))
and (T.tvm_struct_get(arg0, 0, 6, dtype="uint8") == T.uint8(16))
) and (
T.tvm_struct_get(arg0, 0, 7, dtype="uint16") == T.uint16(1)
), "arg0.dtype is expected to be float16"
assert 16 == T.cast(
arg0_shape[0], "int32"
), "Argument arg0.shape[0] has an unsatisfied constraint"
assert 14 == T.cast(
arg0_shape[1], "int32"
), "Argument arg0.shape[1] has an unsatisfied constraint"
assert 14 == T.cast(
arg0_shape[2], "int32"
), "Argument arg0.shape[2] has an unsatisfied constraint"
assert 16 == T.cast(
arg0_shape[3], "int32"
), "Argument arg0.shape[3] has an unsatisfied constraint"
assert 16 == T.cast(
arg0_shape[4], "int32"
), "Argument arg0.shape[4] has an unsatisfied constraint"
assert 16 == T.cast(
arg0_shape[5], "int32"
), "Argument arg0.shape[5] has an unsatisfied constraint"
if not (T.isnullptr(arg0_strides.data, dtype="bool")):
assert (
(
(
(
(1 == T.cast(arg0_strides[5], "int32"))
and (16 == T.cast(arg0_strides[4], "int32"))
)
and (256 == T.cast(arg0_strides[3], "int32"))
)
and (4096 == T.cast(arg0_strides[2], "int32"))
)
and (57344 == T.cast(arg0_strides[1], "int32"))
) and (
802816 == T.cast(arg0_strides[0], "int32")
), "arg0.strides: expected to be compact array"
T.evaluate(0)
assert T.uint64(0) == T.tvm_struct_get(
arg0, 0, 8, dtype="uint64"
), "Argument arg0.byte_offset has an unsatisfied constraint"
assert 2 == T.tvm_struct_get(
arg0, 0, 10, dtype="int32"
), "Argument arg0.device_type has an unsatisfied constraint"
assert 6 == T.tvm_struct_get(arg1, 0, 4, dtype="int32"), "arg1.ndim is expected to equal 6"
assert 6 == T.tvm_struct_get(arg1, 0, 4, dtype="int32"), "arg1.ndim is expected to equal 6"
assert (
(T.tvm_struct_get(arg1, 0, 5, dtype="uint8") == T.uint8(2))
and (T.tvm_struct_get(arg1, 0, 6, dtype="uint8") == T.uint8(16))
) and (
T.tvm_struct_get(arg1, 0, 7, dtype="uint16") == T.uint16(1)
), "arg1.dtype is expected to be float16"
assert 3 == T.cast(
arg1_shape[0], "int32"
), "Argument arg1.shape[0] has an unsatisfied constraint"
assert 3 == T.cast(
arg1_shape[1], "int32"
), "Argument arg1.shape[1] has an unsatisfied constraint"
assert 16 == T.cast(
arg1_shape[2], "int32"
), "Argument arg1.shape[2] has an unsatisfied constraint"
assert 32 == T.cast(
arg1_shape[3], "int32"
), "Argument arg1.shape[3] has an unsatisfied constraint"
assert 16 == T.cast(
arg1_shape[4], "int32"
), "Argument arg1.shape[4] has an unsatisfied constraint"
assert 16 == T.cast(
arg1_shape[5], "int32"
), "Argument arg1.shape[5] has an unsatisfied constraint"
if not (T.isnullptr(arg1_strides.data, dtype="bool")):
assert (
(
(
(
(1 == T.cast(arg1_strides[5], "int32"))
and (16 == T.cast(arg1_strides[4], "int32"))
)
and (256 == T.cast(arg1_strides[3], "int32"))
)
and (8192 == T.cast(arg1_strides[2], "int32"))
)
and (131072 == T.cast(arg1_strides[1], "int32"))
) and (
393216 == T.cast(arg1_strides[0], "int32")
), "arg1.strides: expected to be compact array"
T.evaluate(0)
assert T.uint64(0) == T.tvm_struct_get(
arg1, 0, 8, dtype="uint64"
), "Argument arg1.byte_offset has an unsatisfied constraint"
assert 2 == T.tvm_struct_get(
arg1, 0, 10, dtype="int32"
), "Argument arg1.device_type has an unsatisfied constraint"
assert dev_id == T.tvm_struct_get(
arg1, 0, 9, dtype="int32"
), "Argument arg1.device_id has an unsatisfied constraint"
assert 6 == T.tvm_struct_get(arg2, 0, 4, dtype="int32"), "arg2.ndim is expected to equal 6"
assert 6 == T.tvm_struct_get(arg2, 0, 4, dtype="int32"), "arg2.ndim is expected to equal 6"
assert (
(T.tvm_struct_get(arg2, 0, 5, dtype="uint8") == T.uint8(2))
and (T.tvm_struct_get(arg2, 0, 6, dtype="uint8") == T.uint8(32))
) and (
T.tvm_struct_get(arg2, 0, 7, dtype="uint16") == T.uint16(1)
), "arg2.dtype is expected to be float32"
assert 16 == T.cast(
arg2_shape[0], "int32"
), "Argument arg2.shape[0] has an unsatisfied constraint"
assert 14 == T.cast(
arg2_shape[1], "int32"
), "Argument arg2.shape[1] has an unsatisfied constraint"
assert 14 == T.cast(
arg2_shape[2], "int32"
), "Argument arg2.shape[2] has an unsatisfied constraint"
assert 32 == T.cast(
arg2_shape[3], "int32"
), "Argument arg2.shape[3] has an unsatisfied constraint"
assert 16 == T.cast(
arg2_shape[4], "int32"
), "Argument arg2.shape[4] has an unsatisfied constraint"
assert 16 == T.cast(
arg2_shape[5], "int32"
), "Argument arg2.shape[5] has an unsatisfied constraint"
if not (T.isnullptr(arg2_strides.data, dtype="bool")):
assert (
(
(
(
(1 == T.cast(arg2_strides[5], "int32"))
and (16 == T.cast(arg2_strides[4], "int32"))
)
and (256 == T.cast(arg2_strides[3], "int32"))
)
and (8192 == T.cast(arg2_strides[2], "int32"))
)
and (114688 == T.cast(arg2_strides[1], "int32"))
) and (
1605632 == T.cast(arg2_strides[0], "int32")
), "arg2.strides: expected to be compact array"
T.evaluate(0)
assert T.uint64(0) == T.tvm_struct_get(
arg2, 0, 8, dtype="uint64"
), "Argument arg2.byte_offset has an unsatisfied constraint"
assert 2 == T.tvm_struct_get(
arg2, 0, 10, dtype="int32"
), "Argument arg2.device_type has an unsatisfied constraint"
assert dev_id == T.tvm_struct_get(
arg2, 0, 9, dtype="int32"
), "Argument arg2.device_id has an unsatisfied constraint"
T.evaluate(T.tvm_struct_set(stack_value, 0, 12, T.cast(2, "int64"), dtype="int32"))
stack_tcode[0] = 0
T.evaluate(T.tvm_struct_set(stack_value, 1, 12, T.cast(dev_id, "int64"), dtype="int32"))
stack_tcode[1] = 0
T.evaluate(
T.tvm_call_packed_lowered(
"__tvm_set_device", stack_value, stack_tcode.data, 0, 2, dtype="int32"
)
)
T.attr(0, "compute_scope", "default_function_compute_")
T.evaluate(T.tvm_struct_set(stack_value, 0, 12, A, dtype="int32"))
stack_tcode[0] = 3
T.evaluate(T.tvm_struct_set(stack_value, 1, 12, W, dtype="int32"))
stack_tcode[1] = 3
T.evaluate(T.tvm_struct_set(stack_value, 2, 12, Conv, dtype="int32"))
stack_tcode[2] = 3
T.evaluate(T.tvm_struct_set(stack_value, 3, 12, T.cast(196, "int64"), dtype="int32"))
stack_tcode[3] = 0
T.evaluate(T.tvm_struct_set(stack_value, 4, 12, T.cast(2, "int64"), dtype="int32"))
stack_tcode[4] = 0
T.evaluate(T.tvm_struct_set(stack_value, 5, 12, T.cast(4, "int64"), dtype="int32"))
stack_tcode[5] = 0
T.evaluate(T.tvm_struct_set(stack_value, 6, 12, T.cast(4, "int64"), dtype="int32"))
stack_tcode[6] = 0
T.evaluate(T.tvm_struct_set(stack_value, 7, 12, T.cast(2, "int64"), dtype="int32"))
stack_tcode[7] = 0
T.evaluate(T.tvm_struct_set(stack_value, 8, 12, T.cast(32, "int64"), dtype="int32"))
stack_tcode[8] = 0
T.evaluate(
T.tvm_call_packed_lowered(
"default_function_kernel0", stack_value, stack_tcode.data, 0, 9, dtype="int32"
)
)
return opt_conv_tensorcore_mod_host
def vthread_func():
@T.prim_func
def vthread_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [256], "float32")
C = T.match_buffer(c, [256], "float32")
i0 = T.env_thread("blockIdx.x")
i1 = T.env_thread("threadIdx.x")
i2 = T.env_thread("vthread")
T.launch_thread(i0, 4)
T.launch_thread(i1, 2)
T.launch_thread(i2, 2)
B_data = T.allocate([16], "float32", "local")
B = T.buffer_decl(shape=[16], dtype="float32", scope="local", data=B_data)
for j in range(16):
B[j] = A[i0 * 64 + i1 * 32 + i2 * 16 + j] + T.float32(1)
for j in range(16):
C[i0 * 64 + i1 * 32 + i2 * 16 + j] = B[j] * T.float32(2)
return vthread_func
def matmul():
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
return matmul
def matmul_original():
@T.prim_func
def matmul_original(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j in T.grid(128, 128):
with T.block("init"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = T.float32(0)
for k in range(128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
return matmul_original
def element_wise():
@T.prim_func
def element_wise(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + T.float32(1)
return element_wise
def predicate():
@T.prim_func
def predicate(b: T.handle, c: T.handle) -> None:
B = T.match_buffer(b, (16, 16), "float32")
C = T.match_buffer(c, (16, 16), "float32")
for i, jo, ji in T.grid(16, 4, 5):
with T.block("update"):
vi = T.axis.S(16, i)
vj = T.axis.S(16, jo * 4 + ji)
T.where(jo * 4 + ji < 16)
C[vi, vj] = B[vi, vj] + T.float32(1)
return predicate
def test_module_define():
func1 = tvm.ir.IRModule({"matmul": matmul()})["matmul"]
func2 = tvm.ir.IRModule({"element_wise": element_wise()})["element_wise"]
func3 = tvm.ir.IRModule({"predicate": predicate()})["predicate"]
mod1 = tvm.ir.IRModule({"func1": func1, "func2": func2, "func3": func3})
mod2 = tvm.ir.IRModule({"func1": matmul(), "func2": element_wise(), "func3": predicate()})
tvm.ir.assert_structural_equal(mod1, mod2)
def test_matmul_original():
func = matmul_original()
rt_func = tvm.script.from_source(func.script(show_meta=True))
tvm.ir.assert_structural_equal(func, rt_func)
assert isinstance(rt_func.body.block, tir.stmt.Block)
assert isinstance(rt_func.body.block.body, tir.stmt.For)
assert isinstance(rt_func.body.block.body.body, tir.stmt.For)
assert isinstance(rt_func.body.block.body.body.body, tir.stmt.SeqStmt)
assert isinstance(rt_func.body.block.body.body.body[0].block, tir.stmt.Block)
assert isinstance(rt_func.body.block.body.body.body[1], tir.stmt.For)
assert isinstance(rt_func.body.block.body.body.body[1].body.block, tir.stmt.Block)
def test_element_wise():
func = element_wise()
rt_func = tvm.script.from_source(func.script(show_meta=True))
tvm.ir.assert_structural_equal(func, rt_func)
assert isinstance(rt_func.body.block, tir.stmt.Block)
assert isinstance(rt_func.body.block.body, tir.stmt.SeqStmt)
assert isinstance(rt_func.body.block.body[0], tir.stmt.For)
assert isinstance(rt_func.body.block.body[0].body, tir.stmt.For)
assert isinstance(rt_func.body.block.body[0].body.body.block, tir.stmt.Block)
assert isinstance(rt_func.body.block.body[1], tir.stmt.For)
assert isinstance(rt_func.body.block.body[1].body, tir.stmt.For)
assert isinstance(rt_func.body.block.body[1].body.body.block, tir.stmt.Block)
def test_predicate():
func = predicate()
rt_func = tvm.script.from_source(func.script(show_meta=True))
tvm.ir.assert_structural_equal(func, rt_func)
assert isinstance(rt_func.body.block, tir.stmt.Block)
assert isinstance(rt_func.body.block.body, tir.stmt.For)
assert isinstance(rt_func.body.block.body.body, tir.stmt.For)
assert isinstance(rt_func.body.block.body.body.body, tir.stmt.For)
assert isinstance(rt_func.body.block.body.body.body.body.block, tir.stmt.Block)
def for_thread_binding():
@T.prim_func
def for_thread_binding(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
B = T.match_buffer(b, (16, 16), "float32")
for i in T.thread_binding(0, 16, thread="threadIdx.x"):
for j in T.thread_binding(
0, 16, thread="threadIdx.y", annotations={"attr_key": "attr_value"}
):
A[i, j] = B[i, j] + T.float32(1)
return for_thread_binding
def test_for_thread_binding():
func = for_thread_binding()
rt_func = tvm.script.from_source(func.script(show_meta=True))
tvm.ir.assert_structural_equal(func, rt_func)
assert isinstance(rt_func.body, tir.stmt.For)
assert rt_func.body.kind == 4
assert rt_func.body.thread_binding.thread_tag == "threadIdx.x"
assert isinstance(rt_func.body.body, tir.stmt.For)
assert rt_func.body.body.kind == 4
assert rt_func.body.body.thread_binding.thread_tag == "threadIdx.y"
assert rt_func.body.body.annotations["attr_key"] == "attr_value"
def match_buffer_region():
@T.prim_func
def match_buffer_region(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (16, 16, 16), "float32")
B = T.match_buffer(b, (1), "float32")
for i, j in T.grid(16, 4):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
C = T.match_buffer(A[0:16, vi, vj * 4 : vj * 4 + 4], (16, 1, 4))
for ii in range(4):
with T.block():
vii = T.axis.S(4, ii)
D = T.match_buffer(C[vii * 4 : vii * 4 + 4, 0, 0:4], (4, 1, 4))
for i, j in T.grid(4, 4):
B[0] += D[i, 0, j]
return match_buffer_region
def test_match_buffer_region():
func = match_buffer_region()
rt_func = tvm.script.from_source(func.script(show_meta=True))
tvm.ir.assert_structural_equal(func, rt_func)
assert isinstance(rt_func.body, tir.stmt.BlockRealize)
root = rt_func.body.block
assert isinstance(root.body, tir.stmt.For)
assert isinstance(root.body.body, tir.stmt.For)
assert isinstance(root.body.body.body, tir.stmt.BlockRealize)
outer_block = root.body.body.body.block
assert len(outer_block.match_buffers) == 1
buffer_C = outer_block.match_buffers[0].buffer
tvm.ir.assert_structural_equal(buffer_C.shape, [16, 1, 4])
assert isinstance(outer_block.body, tir.stmt.For)
assert isinstance(outer_block.body.body, tir.stmt.BlockRealize)
inner_block = outer_block.body.body.block
assert len(inner_block.match_buffers) == 1
buffer_D = inner_block.match_buffers[0].buffer
tvm.ir.assert_structural_equal(buffer_D.shape, [4, 1, 4])
def block_elements():
@T.prim_func
def block_elements(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
B = T.match_buffer(b, (1, 1), "float32")
with T.block("update"):
vi = T.axis.S(1, 0)
T.where(True)
T.reads(A[0:16, 0:16])
T.writes(B[0, 0])
T.block_attr({"attr_key": "attr_value"})
C = T.alloc_buffer((4, 4), dtype="float32")
D = T.match_buffer(A[0:4, 0], (4, 1))
with T.init():
B[0, 0] = T.float32(0)
B[0, 0] = A[0, 0] + B[0, 0] + C[1, 1] + D[2, 0]
return block_elements
def test_block_elements():
func = block_elements()
rt_func = tvm.script.from_source(func.script(show_meta=True))
tvm.ir.assert_structural_equal(func, rt_func)
assert isinstance(rt_func.body.block, tir.stmt.Block)
assert isinstance(rt_func.body.block.body, tir.stmt.BlockRealize)
assert isinstance(rt_func.body.block.body.block, tir.stmt.Block)
block = rt_func.body.block.body.block
assert isinstance(block.body, tir.stmt.BufferStore)
assert isinstance(block.init, tir.stmt.BufferStore)
assert len(block.annotations) == 1
assert block.annotations["attr_key"] == "attr_value"
def opaque_block():
@T.prim_func
def opaque_block(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
B = T.match_buffer(b, (16, 16), "float32")
for i in range(16):
for j in range(16):
with T.block():
T.reads([])
T.writes(A[i, j])
A[i, j] = T.float32(0)
with T.block():
T.reads([A[i, 0:16]])
T.writes([B[i, 0:16]])
for j in range(16):
B[i, j] = A[i, j]
return opaque_block
def test_opaque_block():
func = opaque_block()
rt_func = tvm.script.from_source(func.script(show_meta=True))
tvm.ir.assert_structural_equal(func, rt_func)
root_block = rt_func.body.block
assert isinstance(root_block, tir.stmt.Block)
assert isinstance(root_block.body, tir.stmt.For)
assert isinstance(root_block.body.body[0], tir.stmt.For)
assert isinstance(root_block.body.body[0].body, tir.stmt.BlockRealize)
assert isinstance(root_block.body.body[0].body.block, tir.stmt.Block)
assert len(root_block.body.body[0].body.block.iter_vars) == 0
assert isinstance(root_block.body.body[1], tir.stmt.BlockRealize)
assert isinstance(root_block.body.body[1].block, tir.stmt.Block)
assert len(root_block.body.body[1].block.iter_vars) == 0
def module_const():
@tvm.script.ir_module
class Module4:
# There is an ongoing (python)dict->(c++)Map->(python)dict issue which potentially
# changes order of the items in dict after roundtrip due to map not support order
# of insertion while dict does. Hence func 'def A(a: T.handle, c: T.handle) -> None'
# is commented
#
# test:
# d = {"B": 1, "A": 2}
# m = tvm.runtime.convert(d)
# assert d.keys() == m.keys(), f"Order changed from {list(d.keys())} to {list(m.keys())}"
"""
@T.prim_func
def A(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (10), "int32")
C = T.match_buffer(c, (10), "int32")
B = T.alloc_buffer((10), "int32")
K1 = T.allocate_const([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "int32", [10])
for x in T.serial(0, 10):
B[x] = A[x] + T.load("int32", K1, x)
for x in T.serial(0, 10):
C[x] = B[x]
"""
@T.prim_func
def B(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (10), "int32")
C = T.match_buffer(c, (10), "int32")
B = T.alloc_buffer((10), "int32")
K1_data = T.allocate_const([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "int32", [10])
K1 = T.buffer_decl(shape=[10], dtype="int32", data=K1_data)
for x in T.serial(0, 10):
B[x] = A[x] + K1[x]
K2_data = T.allocate_const([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "int32", [10])
K2 = T.buffer_decl(shape=[10], dtype="int32", data=K2_data)
for x in T.serial(0, 10):
B[x] = B[x] + K2[x]
for x in T.serial(0, 10):
C[x] = B[x]
return Module4
def constant():
@T.prim_func
def constant(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (10), "int32")
C = T.match_buffer(c, (10), "int32")
B = T.alloc_buffer((10), "int32")
K_data = T.allocate_const([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "int32", [10])
K = T.buffer_decl(shape=[10], dtype="int32", data=K_data)
for x in T.serial(0, 10):
B[x] = A[x] + K[x]
for x in T.serial(0, 10):
C[x] = B[x]
return constant
def rank0():
@T.prim_func
def rank0(a: T.handle) -> None:
A = T.match_buffer(a, (), "float32")
B = T.alloc_buffer((), "float32")
A[()] = 2
B[()] = A[()]
return rank0
def rank0_block():
@T.prim_func
def rank0_block(a: T.handle) -> None:
A = T.match_buffer(a, (), "float32")
B = T.alloc_buffer((), "float32")
B[()] = A[()]
with T.block("update"):
T.reads([A[()]])
T.writes([B[()]])
for i in range(1):
B[()] = A[()]
return rank0_block
def select():
@T.prim_func
def select(a: T.handle) -> None:
A = T.match_buffer(a, (), "float32")
A[()] = T.Select(True, 1, 2)
return select
def minmax():
@T.prim_func
def minmax(a: T.handle) -> None:
A = T.match_buffer(a, (), "float32")
A[()] = T.min(1, 2)
A[()] = T.max(1, 2)
return minmax
def abs():
@T.prim_func
def abs(a: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("A"):
vi, vj = T.axis.remap("SS", [i, j])
A[vi, vj] = T.abs(A[vi, vj])
return abs
def constant_folding():
@T.prim_func
def constant_folding(a: T.handle) -> None:
A = T.match_buffer(a, (), "float32")
A[()] = T.min(2.2, 5.2)
A[()] = T.max(T.float32(2.2), T.float32(T.float32(5.2)))
A[()] = T.min(2.2, 5.0)
return constant_folding
def simplify_bracket():
@T.prim_func
def simplify_bracket() -> None:
a = T.var("int32")
b = T.var("int32")
c = T.var("int32")
d = T.var("int32")
T.evaluate(a + b * (c + d))
return simplify_bracket
def var_with_same_name():
@T.prim_func
def var_with_same_name(a: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
for i, j in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
A[vi, vj] = 0
for i, j in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
A[vi, vj] = 0
return var_with_same_name
def test_same_name_var():
func = var_with_same_name()
out_str = func.script(tir_prefix="T", show_meta=True)
rt_func = tvm.script.from_source(out_str)
tvm.ir.assert_structural_equal(func, rt_func)
assert out_str.count('vi, vj = T.axis.remap("SS", [i, j])') == 2
assert out_str.find("vi_") == -1
assert out_str.find("vj_") == -1
assert out_str.count("for i, j in T.grid(16, 16)") == 2
assert out_str.find("i_") == -1
assert out_str.find("i_") == -1
def while_loop():
@T.prim_func
def while_loop(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (16,), "float32")
B = T.match_buffer(b, (16,), "float32")
i = T.alloc_buffer((), "int32", scope="local")
for ii in range(16):
with T.block():
vi = T.axis.S(16, ii)
B[vi] = 0
while i[()] < 10:
for j in range(16):
B[j] += A[j]
return while_loop
# fmt: off
def primfunc_with_allocate_annotations():
@T.prim_func
def primfunc_with_allocate_annotations(placeholder_28: T.handle, T_cast_6: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_max_pool2d_cast", "tir.noalias": True})
placeholder_29 = T.match_buffer(placeholder_28, [802816], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
T_cast_7 = T.match_buffer(T_cast_6, [200704], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
tensor_2_data = T.allocate([200704], "uint8", "global", annotations={"attr1_key": "attr1_value"})
tensor_2 = T.buffer_decl(shape=[200704], dtype="uint8", scope="global", data=tensor_2_data)
for ax0_ax1_fused_4 in T.serial(0, 56):
for ax2_4 in T.serial(0, 56):
for ax3_init in T.serial(0, 64):
tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_init)] = T.uint8(0)
for rv0_rv1_fused_1, ax3_2 in T.grid(9, 64):
tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_2)] = T.max(tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_2)], T.if_then_else(((((ax0_ax1_fused_4*2) + T.floordiv(rv0_rv1_fused_1, 3)) < 112) and (((ax2_4*2) + T.floormod(rv0_rv1_fused_1, 3)) < 112)), placeholder_29[(((((ax0_ax1_fused_4*14336) + (T.floordiv(rv0_rv1_fused_1, 3)*7168)) + (ax2_4*128)) + (T.floormod(rv0_rv1_fused_1, 3)*64)) + ax3_2)], T.uint8(0), dtype="uint8"))
for ax0_ax1_fused_5 in T.serial(0, 56):
for ax2_5, ax3_3 in T.grid(56, 64):
T_cast_7[(((ax0_ax1_fused_5*3584) + (ax2_5*64)) + ax3_3)] = T.cast(tensor_2[(((ax0_ax1_fused_5*3584) + (ax2_5*64)) + ax3_3)], "int16")
return primfunc_with_allocate_annotations
# fmt: on
# fmt: off
def comm_reducer_single_reduce_group():
@T.prim_func
def comm_reducer_single_reduce_group(a: T.handle, b: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
threadIdx_x = T.env_thread("threadIdx.x")
A = T.match_buffer(a, [16384], dtype="float32")
for i in T.serial(0, 128):
T.launch_thread(threadIdx_x, 128)
reduce_temp0_data = T.allocate([1], "float32", "local")
reduce_temp0 = T.buffer_decl(shape=[1], dtype="float32", scope="local", data=reduce_temp0_data)
with T.attr(T.comm_reducer(lambda x, y: x + y, [T.float32(0)]), "reduce_scope", T.reinterpret(T.uint64(0), dtype="handle")):
T.evaluate(T.tvm_thread_allreduce(T.uint32(1), A[i * 128 + threadIdx_x], True, reduce_temp0.data, threadIdx_x, dtype="handle"))
return comm_reducer_single_reduce_group
def comm_reducer_multiple_reduce_groups():
@T.prim_func
def comm_reducer_multiple_reduce_groups(a: T.handle, b: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
threadIdx_x = T.env_thread("threadIdx.x")
A = T.match_buffer(a, [16384], dtype="float32")
for i in T.serial(0, 128):
T.launch_thread(threadIdx_x, 128)
reduce_temp0_data = T.allocate([1], "float32", "local")
reduce_temp0 = T.buffer_decl(shape=[1], dtype="float32", scope="local", data=reduce_temp0_data)
with T.attr(T.comm_reducer(lambda x0, x1, y0, y1: (T.Select((x1 >= y1), x0, y0), T.Select((x1 >= y1), x1, y1)), [T.int32(-1), T.min_value("float32")]), "reduce_scope", T.reinterpret(T.uint64(0), dtype="handle")):
T.evaluate(T.tvm_thread_allreduce(T.uint32(1), A[i * 128 + threadIdx_x], True, reduce_temp0.data, threadIdx_x, dtype="handle"))
return comm_reducer_multiple_reduce_groups
def multiple_commreducer():
@T.prim_func
def multiple_commreducer() -> None:
normal_reduce_temp0 = T.buffer_decl([1], dtype="float32", strides=[1], scope="local")
normal_reduce_temp1 = T.buffer_decl([1], dtype="float32", strides=[1], scope="local")
reduce_temp0 = T.buffer_decl([1], dtype="float32", strides=[1], scope="local")
reduce_temp1 = T.buffer_decl([1], dtype="float32", strides=[1], scope="local")
for ax0_1 in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("T_softmax_maxelem_cross_thread_reduction"):
T.attr(T.comm_reducer(lambda x, y: T.max(x, y), [T.min_value("float32")]), "reduce_scope", T.reinterpret(T.uint64(0), dtype="handle"))
T.evaluate(T.tvm_thread_allreduce(T.uint32(1), normal_reduce_temp0[0], True, reduce_temp0.data, ax0_1, dtype="handle"))
for ax0_1 in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("T_softmax_expsum_cross_thread_reduction"):
T.attr(T.comm_reducer(lambda x, y: x + y, [T.float32(0)]), "reduce_scope", T.reinterpret(T.uint64(0), dtype="handle"))
T.evaluate(T.tvm_thread_allreduce(T.uint32(1), normal_reduce_temp1[0], True, reduce_temp1.data, ax0_1, dtype="handle"))
return multiple_commreducer
# fmt: on
def func_div_mod():
@T.prim_func
def func_div_mod():
a = T.var("int32")
b = T.var("int32")
T.evaluate(a // b)
T.evaluate(a % b)
T.evaluate(T.truncmod(a, b))
return func_div_mod
def test_div_mod():
func = func_div_mod()
rt_func = tvm.script.from_source(func.script())
tvm.ir.assert_structural_equal(func, rt_func, True)
assert isinstance(func.body[0].value, tvm.tir.FloorDiv)
assert isinstance(func.body[1].value, tvm.tir.FloorMod)
assert isinstance(func.body[2].value, tvm.tir.Mod)
def loop_extent_dependent():
@T.prim_func
def loop_extent_dependent(a: T.handle) -> None:
A = T.match_buffer(a, [], dtype="int32")
for i in T.serial(0, 128):
for j in T.serial(0, i):
A[()] = A[()] + j
return loop_extent_dependent
def nontrivial_range_axis():
@T.prim_func
def nontrivial_range_axis(a: T.handle) -> None:
A = T.match_buffer(a, (10), "float32")
for i in range(10):
with T.block("block"):
vi = T.axis.spatial((1, 11), i + 1)
A[vi - 1] = A[vi - 1] + 1.0
return nontrivial_range_axis
def func_with_target_spec_by_config():
@T.prim_func
def func_with_target_spec_by_config() -> None:
T.func_attr(
{
"kTarget": T.target(
{
"max_num_threads": 1024,
"arch": "sm_70",
"thread_warp_size": 32,
"kind": "cuda",
"tag": "",
"keys": ["cuda", "gpu"],
"host": T.target({"kind": "llvm", "tag": "", "keys": ["cpu"]}),
}
)
}
)
T.evaluate(0)
return func_with_target_spec_by_config
def func_with_target_spec_by_str():
@T.prim_func
def func_with_target_spec_by_str() -> None:
T.func_attr({"kTarget": T.target("nvidia/nvidia-a100")})
T.evaluate(0)
return func_with_target_spec_by_str
def func_root_attr():
@T.prim_func
def func_root_attr():
with T.block("root"):
T.block_attr({"a": "0"})
T.evaluate(0)
return func_root_attr
def func_trivial_root_block():
@T.prim_func
def func(A: T.Buffer[1, "int32"]):
with T.block("root"):
A[0] = 0
return func
def func_nested_root_block():
@T.prim_func
def func(A: T.Buffer[1, "int32"]):
with T.block("root"):
with T.block("block"):
A[0] = 0
return func
def func_T_ptr_let_statement():
@T.prim_func
def func_T_ptr_let_statement(
args: T.handle, arg_type_ids_handle: T.Ptr[T.int32], num_args: T.int32
) -> None:
# The T.Ptr declaration in the parameter list should parse
# correctly, and should be usable as the data pointer in a buffer.
arg_type_ids = T.buffer_decl([2], dtype="int32", data=arg_type_ids_handle)
arg0: T.handle = T.tvm_struct_get(args, 0, 12, dtype="handle")
arg1: T.handle = T.tvm_struct_get(args, 1, 12, dtype="handle")
# Functions that return a "handle" can be assigned to a T.Ptr
# variable. A variable annotated with T.Ptr still has dtype of
# T.handle, but has type annotation as a pointer type.
A_data: T.Ptr[T.float32] = T.tvm_struct_get(arg0, 0, 1, dtype="handle")
# The buffer declaration has a data pointer defined earlier in
# this function. It should only be defined after the data pointer
# has been defined, and should not be hoisted into the header of
# the function as other buffer_decl statements can be.
A = T.buffer_decl([1024], dtype="float32", data=A_data)
B_data: T.Ptr[T.float32] = T.tvm_struct_get(arg1, 0, 1, dtype="handle")
B = T.buffer_decl([1024], dtype="float32", data=B_data)
B[0] = A[0]
return func_T_ptr_let_statement
def func_T_ptr_allocate():
@T.prim_func
def func_T_ptr_allocate() -> None:
A_data = T.allocate([1024], "float32", "global")
A = T.buffer_decl(shape=[1024], dtype="float32", scope="global", data=A_data)
A[0] = 0.0
return func_T_ptr_allocate
def llvm_intrin_call():
@T.prim_func
def ctpop(A: T.Buffer[(16,), "uint8"], B: T.Buffer[(16,), "uint8"]) -> None:
for i in range(0, 16):
with T.block("A"):
vi = T.axis.remap(
"S",
[
i,
],
)
B[vi] = T.call_llvm_pure_intrin(
T.llvm_lookup_intrinsic_id("llvm.ctpop.i8"),
T.uint32(1),
A[vi],
dtype="uint8",
)
return ctpop
def parse_bufferslice_as_range_bound():
@T.prim_func
def segment_sum(
A_ptr: T.handle, B_ptr: T.handle, indptr_ptr: T.handle, n: T.int32, m: T.int32
) -> None:
A = T.match_buffer(A_ptr, [m], dtype="float32")
B = T.match_buffer(B_ptr, [n], dtype="float32")
indptr = T.match_buffer(indptr_ptr, [n + 1], dtype="int32")
for i in T.serial(n):
with T.block("outer"):
vi = T.axis.spatial(n, i)
T.reads(indptr[i : i + 2], B[vi], A[indptr[i] : indptr[i + 1]])
T.writes(B[vi])
for j in T.serial(indptr[i], indptr[i + 1]):
with T.block("inner"):
vj = T.axis.reduce(m, j)
T.reads(B[vi], A[vj])
T.writes(B[vi])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + A[vj]
return segment_sum
def int64_support():
@T.prim_func
def elementwise_shape_int64(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (T.int64(128), T.int64(128)), dtype="float32")
B = T.alloc_buffer((T.int64(128), T.int64(128)), dtype="float32")
C = T.match_buffer(c, (T.int64(128), T.int64(128)), dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(T.int64(128), T.int64(128)):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
return elementwise_shape_int64
def string_annotation_escaping():
@T.prim_func
def string_annotation_of_special_chars():
T.func_attr(
{
"key1": '"\'hello\t\r"',
"key2": """
%1 = add i32 %0, %0
%2 = add i32 %0, %1
%3 = add i32 %1, %2
""",
}
)
T.evaluate(0)
return string_annotation_of_special_chars
def pointer_type():
@T.prim_func
def func_with_ptr_type_annotations(x: T.Ptr[T.int32], y: T.Ptr[T.int32, "shared"]):
xx_data = T.allocate([16], "int32", "global")
xx = T.buffer_decl(shape=[16], dtype="int32", scope="global", data=xx_data)
yy_data = T.allocate([16], "int32", "shared")
yy = T.buffer_decl(shape=[16], dtype="int32", scope="shared", data=yy_data)
a: T.Ptr[T.int32] = T.address_of(xx[0], dtype="handle")
b: T.Ptr[T.int32, "shared"] = T.address_of(yy[0], dtype="handle")
T.evaluate(T.call_extern("copy", a, b, dtype=""))
return func_with_ptr_type_annotations
def buffer_axis_separator():
@T.prim_func
def element_wise(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32", axis_separators=[1])
C = T.match_buffer(c, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32", axis_separators=[1])
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + T.float32(1)
return element_wise
def buffer_ramp_access_as_slice_index():
@T.prim_func
def buffer_ramp_access(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128,), "float32")
B = T.match_buffer(b, (128,), "float32")
C = T.match_buffer(c, (128,), "float32")
for i in range(128):
A[i : i + 1 : 1] = i
for i in range(4):
B[i * 32 : i * 32 + 32] = A[i * 32 : i * 32 + 32 : 1] + T.broadcast(1.0, 32)
for i in range(4):
C[i : i + 128 : 4] = B[i : i + 128 : 4] + T.broadcast(1.0, 32)
return buffer_ramp_access
def let_expression():
@T.prim_func
def func():
x = T.var("int32")
T.evaluate(T.let(x, 1, x + 1))
return func
def void_ptr():
@T.prim_func
def func(out_ret_value: T.Ptr[T.void]):
T.evaluate(out_ret_value)
return func
def decl_buffer():
@T.prim_func
def func(A: T.Buffer[(16, 16), "float32"], B: T.Buffer[(16, 16), "float32"]) -> None:
A_flattened = T.decl_buffer(data=A.data, shape=(256,), dtype="float32")
B_flattened = T.decl_buffer(data=B.data, shape=(256,), dtype="float32")
C_alias = T.decl_buffer(data=A_flattened.data, shape=(256,), dtype="float32")
for i in range(256):
B_flattened[i] = A_flattened[i] + C_alias[i] + T.float32(1.0)
return func
def allocate_and_decl_buffer():
@T.prim_func
def func(A: T.Buffer[(16,), "float32"], B: T.Buffer[(16,), "float32"]) -> None:
D_data = T.allocate((16,), "float32", "global")
D = T.decl_buffer((16,), "float32", data=D_data)
for i in range(4):
with T.allocate((4,), "float32", "global") as C_data:
C = T.decl_buffer((4,), "float32", data=C_data)
for j in range(4):
C[j] = A[i * 4 + j] + T.float32(1.0)
for j in range(4):
D[j] = C[j]
for j in range(4):
B[i * 4 + j] = D[j]
return func
def float_infinity():
@T.prim_func
def func(
placeholder: T.Buffer[(1, 512, 768), "float32"], T_isinf: T.Buffer[(1, 512, 768), "bool"]
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
for i0, i1, i2 in T.grid(1, 512, 768):
with T.block("T_isinf"):
ax0, ax1, ax2 = T.axis.remap("SSS", [i0, i1, i2])
T.reads(placeholder[ax0, ax1, ax2])
T.writes(T_isinf[ax0, ax1, ax2])
T_isinf[ax0, ax1, ax2] = T.fabs(
placeholder[ax0, ax1, ax2], dtype="float32"
) == T.float32("inf") and not (T.isnan(placeholder[ax0, ax1, ax2], dtype="bool"))
return func
def minimal_i32_literal():
@T.prim_func
def func() -> None:
T.evaluate(T.int32(-2147483648))
T.evaluate(-T.int64(2147483648))
return func
def boolean_argument():
@T.prim_func
def func(a: T.boolean) -> None:
T.evaluate(a)
return func
def bool_argument():
@T.prim_func
def func(a: T.bool) -> None:
T.evaluate(a)
return func
def bool_variable_annotation():
@T.prim_func
def func() -> None:
a: T.bool = T.call_extern("dummy", dtype="bool")
T.evaluate(0)
return func
def return_none():
@T.prim_func
def func():
T.evaluate(0)
return func
def bool_primitive():
@T.prim_func
def func() -> None:
T.evaluate(T.bool(True))
return func
def bool_cast():
@T.prim_func
def func() -> None:
T.evaluate(T.bool(T.int32(0)))
return func
ir_generator = tvm.testing.parameter(
opt_gemm_normalize,
opt_gemm_lower,
opt_gemm_mod_host,
opt_conv_tensorcore_normalize,
opt_conv_tensorcore_lower,
opt_conv_tensorcore_mod_host,
vthread_func,
matmul,
module_const,
constant,
rank0,
rank0_block,
select,
minmax,
abs,
constant_folding,
simplify_bracket,
while_loop,
primfunc_with_allocate_annotations,
comm_reducer_single_reduce_group,
comm_reducer_multiple_reduce_groups,
multiple_commreducer,
loop_extent_dependent,
nontrivial_range_axis,
func_with_target_spec_by_config,
func_with_target_spec_by_str,
func_root_attr,
func_trivial_root_block,
func_nested_root_block,
func_T_ptr_let_statement,
func_T_ptr_allocate,
llvm_intrin_call,
parse_bufferslice_as_range_bound,
int64_support,
string_annotation_escaping,
pointer_type,
buffer_axis_separator,
buffer_ramp_access_as_slice_index,
let_expression,
void_ptr,
decl_buffer,
allocate_and_decl_buffer,
float_infinity,
minimal_i32_literal,
boolean_argument,
bool_argument,
bool_variable_annotation,
bool_primitive,
bool_cast,
return_none,
)
def test_roundtrip(ir_generator):
original = ir_generator()
after_roundtrip = tvm.script.from_source(original.script(show_meta=True))
tvm.ir.assert_structural_equal(original, after_roundtrip, True)
def test_return_none_no_trailing_type():
func = return_none()
script = func.script()
assert "-> None" not in script
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tvmscript_spans.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tvm.script.parser_v1 import tir as T
@T.prim_func
def loops() -> None:
for i in T.parallel(0, 2):
for j in T.serial(0, 1):
for z in T.vectorized(3, 4):
T.evaluate(0)
def test_loops():
start_line = 23
parsed = loops
assert parsed.span.line == start_line
assert parsed.body.span.line == start_line + 1
assert parsed.body.min.span.column == 25
assert parsed.body.extent.span.column == 28
assert parsed.body.extent.span.line == start_line + 1
assert parsed.body.body.span.line == start_line + 2
assert parsed.body.body.loop_var.span.line == start_line + 2
assert parsed.body.body.loop_var.span.column == 13
assert parsed.body.body.body.span.line == start_line + 3
assert parsed.body.body.body.span.column == 22
assert parsed.body.body.body.body.span.line == start_line + 4
assert parsed.body.body.body.body.span.column == 17
@T.prim_func
def statements() -> None:
T.evaluate(1)
T.evaluate("test")
def test_statements():
start_line = 53
parsed = statements
assert parsed.body.span.line == start_line + 1
assert parsed.body[0].span.line == start_line + 1
assert parsed.body[0].span.column == 5
assert parsed.body[0].span.line == start_line + 1
assert parsed.body[0].span.column == 5
if __name__ == "__main__":
test_loops()
test_statements()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tvmscript_syntax_sugar.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring,invalid-name,pointless-string-statement
import sys
import pytest
import tvm.testing
from tvm.ir import assert_structural_equal
from tvm.script import from_source
from tvm.script import tir as T
@T.prim_func
def transformed_matmul_no_syntax_sugar(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i0, i1, i2_outer, i2_inner_outer, i2_inner_inner in T.grid(128, 128, 4, 8, 4):
with T.block("update"):
vi, vj = T.axis.remap("SS", [i0, i1])
vk = T.axis.R(128, i2_outer * 32 + i2_inner_outer * 4 + i2_inner_inner)
T.reads([C[vi, vj], A[vi, vk], B[vj, vk]])
T.writes([C[vi, vj], A[vi, vk]])
with T.init():
C[vi, vj] = 0.0
A[vi, vk] = A[vi, vk] + B[vj, vk]
C[vi, vj] = C[vi, vj] + (A[vi, vk] * B[vj, vk])
@T.prim_func
def transformed_matmul_syntax_sugar(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i0, i1, i2_outer, i2_inner_outer, i2_inner_inner in T.grid(128, 128, 4, 8, 4):
with T.block("update"):
vi, vj = T.axis.remap("SS", [i0, i1])
vk = T.axis.R(128, i2_outer * 32 + i2_inner_outer * 4 + i2_inner_inner)
T.reads(C[vi, vj], A[vi, vk], B[vj, vk])
T.writes(C[vi, vj], A[vi, vk])
with T.init():
C[vi, vj] = 0.0
A[vi, vk] = A[vi, vk] + B[vj, vk]
C[vi, vj] = C[vi, vj] + (A[vi, vk] * B[vj, vk])
def test_reads_writes_syntax_sugar():
assert_structural_equal(transformed_matmul_no_syntax_sugar, transformed_matmul_syntax_sugar)
@T.prim_func
def loop_no_syntax_sugar(a: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128, 128))
for i in T.serial(0, 128):
for j in T.parallel(0, 128):
for k in T.vectorized(0, 128):
for x in T.unroll(0, 128):
for y in T.thread_binding(0, 128, thread="threadIdx.x"):
for z in T.thread_binding(0, 128, thread="threadIdx.x"):
A[i, j, k, x] = A[i, j, k, x] * 2.0
@T.prim_func
def loop_syntax_sugar(a: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128, 128))
for i in T.serial(128):
for j in T.parallel(128):
for k in T.vectorized(128):
for x in T.unroll(128):
for y in T.thread_binding(128, "threadIdx.x"):
for z in T.thread_binding(128, thread="threadIdx.x"):
A[i, j, k, x] = A[i, j, k, x] * 2.0
def test_loop_syntax_sugar():
assert_structural_equal(loop_no_syntax_sugar, loop_syntax_sugar)
# match buffer - use kwargs
@T.prim_func
def elementwise_handle(
a: T.handle,
b: T.handle,
) -> None:
A = T.match_buffer(a, (128, 128, 128, 128))
B = T.match_buffer(b, (128, 128, 128, 128))
for i, j, k, l in T.grid(128, 128, 128, 128):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0
# match buffer - use buffer with kwargs
@T.prim_func
def elementwise_buffer_kwargs(
a: T.Buffer(shape=(128, 128, 128, 128), dtype="float32"),
b: T.Buffer(shape=(128, 128, 128, 128), dtype="float32"),
) -> None:
for i, j, k, l in T.grid(128, 128, 128, 128):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
b[vi, vj, vk, vl] = a[vi, vj, vk, vl] * 2.0
# match buffer - use buffer without kwargs
@T.prim_func
def elementwise_buffer_no_kwargs(
a: T.Buffer[(128, 128, 128, 128), "float32"],
b: T.Buffer[(128, 128, 128, 128), "float32"],
) -> None:
for i, j, k, l in T.grid(128, 128, 128, 128):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
b[vi, vj, vk, vl] = a[vi, vj, vk, vl] * 2.0
def test_match_buffer_syntax_sugar():
# with kwargs
assert_structural_equal(elementwise_handle, elementwise_buffer_kwargs)
# without kwargs
assert_structural_equal(elementwise_handle, elementwise_buffer_no_kwargs)
def test_match_buffer_1d():
@T.prim_func
def func_no_sugar(a: T.handle):
A = T.match_buffer(a, shape=(16,))
for i in T.serial(16):
A[i] = 0.0
@T.prim_func
def func_with_sugar(A: T.Buffer[16, "float32"]):
for i in T.serial(16):
A[i] = 0.0
assert_structural_equal(func_no_sugar, func_with_sugar)
# match buffer failed case
def test_match_buffer_no_kwargs_failed():
with pytest.raises(ValueError) as e:
@T.prim_func
def elementwise_buffer_no_kwargs_failed(
a: T.Buffer[(128, 128, 128, 128)],
b: T.Buffer[(128, 128, 128, 128)],
) -> None:
pass
# dynamic shape gemm
@T.prim_func
def gemm_dyn_shape(a: T.handle, b: T.handle, c: T.handle):
N = T.var("int32")
M = T.var("int32")
K = T.var("int32")
A = T.match_buffer(a, (N, K), "float32")
B = T.match_buffer(b, (K, M), "float32")
C = T.match_buffer(c, (N, M), "float32")
for i, j, k in T.grid(N, M, K):
with T.block("gemm"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
def test_dynamic_shape_gemm():
gemm_dyn_shape_roundtrip = from_source(gemm_dyn_shape.script())
assert_structural_equal(gemm_dyn_shape, gemm_dyn_shape_roundtrip)
@T.prim_func
def preflattened_buffer_map(A: T.handle, B: T.handle):
A_1 = T.match_buffer(A, [1])
T.preflattened_buffer(A_1, [1], align=1, offset_factor=2)
B_1 = T.match_buffer(B, [1])
T.preflattened_buffer(B_1, [1])
B_1[0] = A_1[0]
def test_preflattened_buffer_map():
A_var = [
k for k, _ in preflattened_buffer_map.preflattened_buffer_map.items() if k.name == "A"
][0]
assert preflattened_buffer_map.preflattened_buffer_map[A_var].data_alignment == 1
assert preflattened_buffer_map.preflattened_buffer_map[A_var].offset_factor == 2
@T.prim_func
def match_buffer_int64(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (T.int64(128), T.int64(128)), dtype="float32")
B = T.alloc_buffer((T.int64(128), T.int64(128)), dtype="float32")
C = T.match_buffer(c, (T.int64(128), T.int64(128)), dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(T.int64(128), T.int64(128)):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def match_buffer_int64_after_roundtrip(
A: T.Buffer[(T.int64(128), T.int64(128)), "float32"],
C: T.Buffer[(T.int64(128), T.int64(128)), "float32"],
) -> None:
B = T.alloc_buffer((T.int64(128), T.int64(128)), dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(T.int64(128), T.int64(128)):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
def test_match_buffer_int64():
original = match_buffer_int64
after_roundtrip = match_buffer_int64_after_roundtrip
assert_structural_equal(original, after_roundtrip, True)
def test_match_buffer_region_has_implicit_shape_dtype():
@T.prim_func
def explicit_shape_dtype(A: T.Buffer[(16, 64), "int32"]):
with T.block():
B = T.match_buffer(A[8:16, 32:64], shape=(8, 32), dtype="int32")
T.evaluate(0)
@T.prim_func
def implicit_shape_dtype(A: T.Buffer[(16, 64), "int32"]):
with T.block():
B = T.match_buffer(A[8:16, 32:64])
T.evaluate(0)
assert_structural_equal(explicit_shape_dtype, implicit_shape_dtype)
def test_match_buffer_input_requires_shape_arg():
with pytest.raises(tvm.error.DiagnosticError):
@T.prim_func
def func(a: T.handle):
A = T.match_buffer(a, dtype="int32")
T.evaluate(0)
def test_letstmt_bufferload_without_type_annotation():
# Variable assignment of PrimExpr types uses the dtype of the
# PrimExpr to determine the variable's dtype. Parsing of
# buf[indices] is done by generating a BufferSlice object, which
# handles both store and load cases. BufferSlice is not a
# PrimExpr, and implements BufferSlice.dtype explicitly.
# Failure occurred during parsing of the tvmscript.
@T.prim_func
def func_without_type_annotation(A: T.Buffer[(1,), "int32"]):
x = A[0]
T.evaluate(x)
def test_letstmt_bind_with_constant():
@T.prim_func
def constant_binds():
x = 1
y = 42.0
T.evaluate(T.cast(x, "float32") + y)
@T.prim_func
def constant_binds_wrapped():
x = T.meta_var(T.int32(1))
y = T.meta_var(T.float32(42.0))
T.evaluate(T.cast(x, "float32") + y)
assert_structural_equal(constant_binds, constant_binds_wrapped)
def test_func_call():
def shared_16x16_to_ldmatrix_32x8_layout(i, j):
thread_id = (i % 8) * 4 + (j % 8) // 2
return T.meta_var((thread_id, (j // 8) * 4 + (i // 8) * 2 + (j % 2)))
@T.prim_func
def mma_sync_m16n16k16_desc(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (32, 8), "float16", align=64, offset_factor=16, scope="warp")
B = T.match_buffer(b, (32, 8), "float16", align=64, offset_factor=16, scope="warp")
C = T.match_buffer(c, (32, 8), "float16", align=64, offset_factor=16, scope="warp")
with T.block("root"):
T.reads(C[0:32, 0:8], A[0:32, 0:8], B[0:32, 0:8])
T.writes(C[0:32, 0:8])
for i, j, k in T.grid(16, 16, 16):
with T.block("C"):
i, j, k = T.axis.remap("SSR", [i, j, k])
thread_id_C, local_id_C = shared_16x16_to_ldmatrix_32x8_layout(i, j)
thread_id_A, local_id_A = shared_16x16_to_ldmatrix_32x8_layout(i, k)
thread_id_B, local_id_B = shared_16x16_to_ldmatrix_32x8_layout(k, j)
T.reads(
C[thread_id_C, local_id_C],
A[thread_id_A, local_id_A],
B[thread_id_B, local_id_B],
)
T.writes(C[thread_id_C, local_id_C])
C[thread_id_C, local_id_C] += (
A[thread_id_A, local_id_A] * B[thread_id_B, local_id_B]
)
@T.prim_func
def mma_sync_m16n16k16_desc_manual(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (32, 8), "float16", align=64, offset_factor=16, scope="warp")
B = T.match_buffer(b, (32, 8), "float16", align=64, offset_factor=16, scope="warp")
C = T.match_buffer(c, (32, 8), "float16", align=64, offset_factor=16, scope="warp")
with T.block("root"):
T.reads(C[0:32, 0:8], A[0:32, 0:8], B[0:32, 0:8])
T.writes(C[0:32, 0:8])
for i, j, k in T.grid(16, 16, 16):
with T.block("C"):
i, j, k = T.axis.remap("SSR", [i, j, k])
T.reads(
C[i % 8 * 4 + j % 8 // 2, j // 8 * 4 + i // 8 * 2 + j % 2],
A[i % 8 * 4 + k % 8 // 2, k // 8 * 4 + i // 8 * 2 + k % 2],
B[k % 8 * 4 + j % 8 // 2, j // 8 * 4 + k // 8 * 2 + j % 2],
)
T.writes(C[i % 8 * 4 + j % 8 // 2, j // 8 * 4 + i // 8 * 2 + j % 2])
C[i % 8 * 4 + j % 8 // 2, j // 8 * 4 + i // 8 * 2 + j % 2] = (
C[i % 8 * 4 + j % 8 // 2, j // 8 * 4 + i // 8 * 2 + j % 2]
+ A[i % 8 * 4 + k % 8 // 2, k // 8 * 4 + i // 8 * 2 + k % 2]
* B[k % 8 * 4 + j % 8 // 2, j // 8 * 4 + k // 8 * 2 + j % 2]
)
assert_structural_equal(mma_sync_m16n16k16_desc, mma_sync_m16n16k16_desc_manual)
# The following is an example of an error message from calling an invalid function
# error: Error occurred when invoking the function sqrt:
# loop of ufunc does not support argument 0 of type Var which has no callable sqrt method
# --> test_tvmscript_syntax_sugar.py:334:19
# |
# 334 | ind = sqrt(i)
# | ^^^^^^^
# note: run with `TVM_BACKTRACE=1` environment variable to display a backtrace.
# Uncomment to see the error above.
# def sqrt(x):
# import numpy as np
# return np.sqrt(x)
# @T.prim_func
# def loop(a: T.handle) -> None:
# A = T.match_buffer(a, (128,))
# for i in T.serial(128):
# ind = sqrt(i)
# A[i] = A[ind]
def test_int64_loop():
@T.prim_func
def int64_grid(
A: T.Buffer[(T.int64(128), T.int64(128)), "float32"],
B: T.Buffer[(T.int64(128), T.int64(128)), "float32"],
) -> None:
for i, j in T.grid(T.int64(128), T.int64(128)):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] + 1.0
@T.prim_func
def int64_grid_expanded(
A: T.Buffer[(T.int64(128), T.int64(128)), "float32"],
B: T.Buffer[(T.int64(128), T.int64(128)), "float32"],
) -> None:
for i in range(T.int64(0), T.int64(128)):
for j in range(T.int64(0), T.int64(128)):
with T.block("C"):
vi = T.axis.spatial(T.int64(128), i)
vj = T.axis.spatial(T.int64(128), j)
B[vi, vj] = A[vi, vj] + 1.0
assert_structural_equal(int64_grid, int64_grid_expanded)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tvmscript_type.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring,invalid-name,pointless-string-statement
from tvm.script import tir as T
"""
This prim func include necessary buffer types that need to be checked
e.g. reads/writes, match_buffer/alloc_buffer, serial/block etc.
"""
@T.prim_func
def element_wise_storage_align(a: T.handle, c: T.handle) -> None:
C = T.match_buffer(c, [128, 128], elem_offset=0, align=64, offset_factor=1)
A = T.match_buffer(a, [128, 128], elem_offset=0, align=64, offset_factor=1)
# body
with T.block("root"):
T.reads([])
T.writes([])
B = T.alloc_buffer([128, 128], elem_offset=0, align=64, offset_factor=1)
for i0 in T.serial(0, 128):
for ax1 in T.serial(0, 128):
with T.block("B"):
vi = T.axis.S(128, i0)
vj = T.axis.S(128, ax1)
T.reads([A[vi, vj]])
T.writes([B[vi, vj]])
T.block_attr({"buffer_dim_align": [[0, 0, 128, 127]]})
B[vi, vj] = A[vi, vj] * T.float32(2)
for i1 in T.serial(0, 128):
with T.block("C"):
vi_1, vj_1 = T.axis.remap("SS", [i0, i1])
T.reads([B[vi_1, vj_1]])
T.writes([C[vi_1, vj_1]])
C[vi_1, vj_1] = B[vi_1, vj_1] + T.float32(1)
"""
This prim func include necessary thread types that need to be checked
e.g. env_thread, launch_thread, thread_binding etc.
"""
@T.prim_func
def element_wise_env_thread_x(a: T.handle, b: T.handle, c: T.handle) -> None:
j1_0 = T.env_thread("threadIdx.x")
j0_0 = T.env_thread("threadIdx.x")
i = T.env_thread("blockIdx.x")
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
T.launch_thread(i, 128)
T.launch_thread(j0_0, 4)
T.launch_thread(j1_0, 4)
for blockIdx_x in T.thread_binding(0, 128, "blockIdx.x"):
for threadIdx_x in T.thread_binding(0, 4, "threadIdx.x"):
for j0_1 in T.serial(0, 32):
with T.block(""):
B[blockIdx_x, threadIdx_x * 32 + j0_1] = (
A[blockIdx_x, threadIdx_x * 32 + j0_1] * 2.0
)
for j1_1 in T.serial(0, 32):
with T.block(""):
C[blockIdx_x, threadIdx_x * 32 + j1_1] = (
B[blockIdx_x, threadIdx_x * 32 + j1_1] + 1.0
)
"""
This test case is added to test T.grid
"""
@T.prim_func
def loop_split(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
for i, ko in T.grid(128, 4):
for ki in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("B"):
vi = T.axis.S(128, i)
vk = T.axis.R(128, ko * 32 + ki)
T.reads([B[vi], A[vi, vk]])
T.writes([B[vi]])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + A[vi, vk]
"""
This test case is added to test T.comm_reducer, T.reinterpret, T.tvm_thread_allreduce
"""
@T.prim_func
def lowered_loop_split(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
normal_reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i in T.serial(0, 128):
for ki in T.thread_binding(0, 32, thread="threadIdx.x"):
normal_reduce_temp0[0] = T.float32(0)
for ko in T.serial(0, 4):
with T.block("B_normal_reduction"):
vi = T.axis.S(128, i)
vk = T.axis.R(128, ko * 32 + ki)
T.reads([A[vi, vk], normal_reduce_temp0[0]])
T.writes([normal_reduce_temp0[0]])
normal_reduce_temp0[0] = normal_reduce_temp0[0] + A[vi, vk]
with T.block("B_cross_thread_reduction"):
T.reads([normal_reduce_temp0[0]])
T.writes([reduce_temp0[0]])
T.attr(
T.comm_reducer(lambda x, y: x + y, [T.float32(0)]),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1),
normal_reduce_temp0[0],
True,
reduce_temp0.data,
ki,
dtype="handle",
)
)
with T.block("B_write_back"):
vi = T.axis.S(128, i)
T.reads([reduce_temp0[0]])
T.writes([B[vi]])
B[vi] = reduce_temp0[0]
"""
This test case is added to test T.Buffer with slice as argument and T.exp
"""
@T.prim_func
def different_access_indices(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128, 128], dtype="float32")
B = T.match_buffer(b, [128, 128], dtype="float32")
for i, j in T.grid(128, 128):
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
T.reads([B[vi, vj], A[vi, vj, vk]])
T.writes(
[
B[
T.min(vj, vi) : T.min(vj, vi) # type: ignore[misc]
+ (T.max(vj, vi) + 1 - T.min(vj, vi)),
T.min(vi, vj) : T.min(vi, vj) # type: ignore[misc]
+ (T.max(vi, vj) + 1 - T.min(vi, vj)),
]
]
)
with T.init():
B[vj, vi] = T.exp(B[vj, vi], dtype="float32")
B[vi, vj] = B[vi, vj] + A[vi, vj, vk]
# Not running any test as we only want to type-check here
if __name__ == "__main__":
pass
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_type_annotation_checker.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test type checker based on python's type annotations"""
import sys
from typing import Dict, List, Tuple, Union, Callable
import pytest
import _pytest
from tvm.tir.schedule._type_checker import type_checked
def int_func(x: int) -> int:
return 2 * x
def str_func(x: str) -> str:
return 2 * x
test_cases = [
{
"type_annotation": int,
"positive_cases": [5],
"negative_cases": ["5"],
},
{
"type_annotation": List[int],
"positive_cases": [
[5],
[],
# Tuples are allowed to be used as lists, because both are
# represented in FFI as tvm::runtime::Array.
(1, 2, 3),
],
"negative_cases": [
None,
5,
["5"],
],
},
{
"type_annotation": Dict[str, int],
"positive_cases": [
{"key1": 0, "key2": 1, "key3": -1},
],
"negative_cases": [None, [1], {1: "1"}],
},
{
"type_annotation": Tuple[int],
"positive_cases": [
(5,),
],
"negative_cases": [
None,
(1, 2, 3),
[1],
5,
["5"],
],
},
{
"type_annotation": Tuple[str, int],
"positive_cases": [
("x", 5),
],
"negative_cases": [
42,
("x", 5, 6),
("x", 5, "y"),
("x", 5.0),
(None, 5),
],
},
{
"type_annotation": Union[str, int],
"positive_cases": [
"x",
5,
],
"negative_cases": [
5.0,
("x", 5, 6),
None,
],
},
{
"type_annotation": Callable,
"positive_cases": [str_func, int_func],
"negative_cases": [
None,
"x",
42,
],
},
{
"type_annotation": Callable[[int], int],
"positive_cases": [int_func],
"negative_cases": [
None,
"x",
42,
pytest.param(
str_func,
marks=pytest.mark.xfail(
reason="Signature of Callable arguments not currently checked"
),
),
],
},
]
def make_parametrization(type_annotation, case):
if isinstance(case, _pytest.mark.structures.ParameterSet):
marks = case.marks
(case,) = case.values
else:
marks = []
try:
annotation_name = type_annotation.__name__
except AttributeError:
annotation_name = str(type_annotation).replace("typing.", "")
if hasattr(case, "__name__"):
case_name = case.__name__
else:
case_name = str(case)
name = f"{annotation_name}, {case_name}"
return pytest.param(type_annotation, case, marks=marks, id=name)
positive_cases = [
make_parametrization(config["type_annotation"], case)
for config in test_cases
for case in config["positive_cases"]
]
negative_cases = [
make_parametrization(config["type_annotation"], case)
for config in test_cases
for case in config["negative_cases"]
]
@pytest.mark.parametrize(
["type_annotation", "case"],
positive_cases,
)
def test_matches_type(type_annotation, case):
@type_checked
def func(_: type_annotation):
pass
func(case)
@pytest.mark.parametrize(
["type_annotation", "case"],
negative_cases,
)
def test_not_matches(type_annotation, case):
@type_checked
def func(_: type_annotation):
pass
with pytest.raises(TypeError):
func(case)
if __name__ == "__main__":
sys.exit(pytest.main(sys.argv))
| https://github.com/zk-ml/tachikoma |
tests/scripts/ci.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import multiprocessing
import os
import getpass
import inspect
import argparse
import json
import shutil
import grp
import string
import random
import subprocess
import platform
import textwrap
import typing
from pathlib import Path
from typing import List, Dict, Any, Optional, Tuple, Callable, Union
REPO_ROOT = Path(__file__).resolve().parent.parent.parent
SCRIPT_DIR = REPO_ROOT / ".ci-py-scripts"
NPROC = multiprocessing.cpu_count()
class col:
BLUE = "\033[94m"
CYAN = "\033[96m"
GREEN = "\033[92m"
YELLOW = "\033[93m"
RED = "\033[91m"
RESET = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def print_color(color: str, msg: str, bold: bool, **kwargs: Any) -> None:
if hasattr(sys.stdout, "isatty") and sys.stdout.isatty():
bold_code = col.BOLD if bold else ""
print(bold_code + color + msg + col.RESET, **kwargs)
else:
print(msg, **kwargs)
warnings: List[str] = []
def clean_exit(msg: str) -> None:
print_color(col.RED, msg, bold=True, file=sys.stderr)
for warning in warnings:
print_color(col.YELLOW, warning, bold=False, file=sys.stderr)
exit(1)
def cmd(commands: List[Any], **kwargs: Any):
commands = [str(s) for s in commands]
command_str = " ".join(commands)
print_color(col.BLUE, command_str, bold=True)
proc = subprocess.run(commands, **kwargs)
if proc.returncode != 0:
raise RuntimeError(f"Command failed: '{command_str}'")
return proc
def get_build_dir(name: str) -> str:
build_dir = REPO_ROOT / f"build-{name}"
return str(build_dir.relative_to(REPO_ROOT))
def check_docker():
executable = shutil.which("docker")
if executable is None:
clean_exit("'docker' executable not found, install it first (e.g. 'apt install docker.io')")
if sys.platform == "linux":
# Check that the user is in the docker group before running
try:
group = grp.getgrnam("docker")
if getpass.getuser() not in group.gr_mem:
warnings.append(
f"Note: User '{getpass.getuser()}' is not in the 'docker' group, either:\n"
" * run with 'sudo'\n"
" * add user to 'docker': sudo usermod -aG docker $(whoami), then log out and back in",
)
except KeyError:
warnings.append("Note: 'docker' group does not exist")
def check_gpu():
if not (sys.platform == "linux" and shutil.which("lshw")):
# Can't check GPU on non-Linux platforms
return
# See if we can check if a GPU is present in case of later failures,
# but don't block on execution since this isn't critical
try:
proc = cmd(
["lshw", "-json", "-C", "display"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
)
stdout = proc.stdout.strip().strip(",")
stdout = json.loads(stdout)
except (subprocess.CalledProcessError, json.decoder.JSONDecodeError):
# Do nothing if any step failed
return
if isinstance(stdout, dict):
# Sometimes lshw outputs a single item as a dict instead of a list of
# dicts, so wrap it up if necessary
stdout = [stdout]
if not isinstance(stdout, list):
return
vendors = [s.get("vendor", "").lower() for s in stdout]
if not any("nvidia" in vendor for vendor in vendors):
warnings.append(
"nvidia GPU not found in 'lshw', maybe use --cpu flag when running 'docs' command?"
)
def gen_name(s: str) -> str:
# random 4 letters
suffix = "".join([random.choice(string.ascii_lowercase) for i in range(5)])
return f"{s}-{suffix}"
def docker(
name: str,
image: str,
scripts: List[str],
env: Dict[str, str],
interactive: bool,
additional_flags: Optional[Dict[str, str]] = None,
):
"""
Invoke a set of bash scripts through docker/bash.sh
name: container name
image: docker image name
scripts: list of bash commands to run
env: environment to set
"""
check_docker()
# As sccache is added to these images these can be uncommented
sccache_images = {
# "ci_lint",
"ci_gpu",
"ci_cpu",
# "ci_wasm",
# "ci_i386",
"ci_cortexm",
"ci_arm",
"ci_hexagon",
"ci_riscv",
"ci_adreno",
}
if image in sccache_images and os.getenv("USE_SCCACHE", "1") == "1":
scripts = [
"sccache --start-server",
] + scripts
# Set the C/C++ compiler so CMake picks them up in the build
env["CC"] = "/opt/sccache/cc"
env["CXX"] = "/opt/sccache/c++"
env["SCCACHE_CACHE_SIZE"] = os.getenv("SCCACHE_CACHE_SIZE", "50G")
docker_bash = REPO_ROOT / "docker" / "bash.sh"
command = [docker_bash]
if sys.stdout.isatty():
command.append("-t")
command.append("--name")
command.append(name)
if interactive:
command.append("-i")
scripts = ["interact() {", " bash", "}", "trap interact 0", ""] + scripts
for key, value in env.items():
command.append("--env")
command.append(f"{key}={value}")
if additional_flags is not None:
for key, value in additional_flags.items():
command.append(key)
command.append(value)
SCRIPT_DIR.mkdir(exist_ok=True)
script_file = SCRIPT_DIR / f"{name}.sh"
with open(script_file, "w") as f:
f.write("set -eux\n\n")
f.write("\n".join(scripts))
f.write("\n")
command += [image, "bash", str(script_file.relative_to(REPO_ROOT))]
try:
cmd(command)
except RuntimeError as e:
clean_exit(f"Error invoking Docker: {e}")
except KeyboardInterrupt:
cmd(["docker", "stop", "--time", "1", name])
finally:
if os.getenv("DEBUG", "0") != "1":
script_file.unlink()
def docs(
tutorial_pattern: Optional[str] = None,
full: bool = False,
interactive: bool = False,
skip_build: bool = False,
docker_image: Optional[str] = None,
) -> None:
"""
Build the documentation from gallery/ and docs/. By default this builds only
the Python docs without any tutorials.
arguments:
full -- Build all language docs, not just Python (this will use the 'ci_gpu' Docker image)
tutorial-pattern -- Regex for which tutorials to execute when building docs (this will use the 'ci_gpu' Docker image)
skip_build -- skip build and setup scripts
interactive -- start a shell after running build / test scripts
docker-image -- manually specify the docker image to use
"""
build_dir = get_build_dir("gpu")
extra_setup = []
image = "ci_gpu" if docker_image is None else docker_image
if not full and tutorial_pattern is None:
# TODO: Change this to tlcpack/docs once that is uploaded
image = "ci_cpu" if docker_image is None else docker_image
build_dir = get_build_dir("cpu")
config_script = " && ".join(
[
f"mkdir -p {build_dir}",
f"pushd {build_dir}",
"cp ../cmake/config.cmake .",
# The docs import tvm.micro, so it has to be enabled in the build
"echo set\(USE_MICRO ON\) >> config.cmake",
"popd",
]
)
# These are taken from the ci-gpu image via pip freeze, consult that
# if there are any changes: https://github.com/apache/tvm/tree/main/docs#native
requirements = [
"Sphinx==4.2.0",
"tlcpack-sphinx-addon==0.2.1",
"synr==0.5.0",
"image==1.5.33",
# Temporary git link until a release is published
"git+https://github.com/sphinx-gallery/sphinx-gallery.git@6142f1791151849b5bec4bf3959f75697ba226cd",
"sphinx-rtd-theme==1.0.0",
"matplotlib==3.3.4",
"commonmark==0.9.1",
"Pillow==8.3.2",
"autodocsumm==0.2.7",
"docutils==0.16",
]
extra_setup = [
"python3 -m pip install --user " + " ".join(requirements),
]
else:
check_gpu()
config_script = f"./tests/scripts/task_config_build_gpu.sh {build_dir}"
scripts = extra_setup + [
config_script,
f"./tests/scripts/task_build.py --build-dir {build_dir}",
]
if skip_build:
scripts = []
scripts.append("./tests/scripts/task_python_docs.sh")
if tutorial_pattern is None:
tutorial_pattern = os.getenv("TVM_TUTORIAL_EXEC_PATTERN", ".py" if full else "none")
env = {
"TVM_TUTORIAL_EXEC_PATTERN": tutorial_pattern,
"PYTHON_DOCS_ONLY": "0" if full else "1",
"IS_LOCAL": "1",
"TVM_LIBRARY_PATH": str(REPO_ROOT / build_dir),
}
docker(name=gen_name("docs"), image=image, scripts=scripts, env=env, interactive=interactive)
def serve_docs(directory: str = "_docs") -> None:
"""
Serve the docs using Python's http server
arguments:
directory -- Directory to serve from
"""
directory_path = Path(directory)
if not directory_path.exists():
clean_exit("Docs have not been built, run 'ci.py docs' first")
cmd([sys.executable, "-m", "http.server"], cwd=directory_path)
def lint(interactive: bool = False, fix: bool = False, docker_image: Optional[str] = None) -> None:
"""
Run CI's Sanity Check step
arguments:
interactive -- start a shell after running build / test scripts
fix -- where possible (currently black and clang-format) edit files in place with formatting fixes
docker-image -- manually specify the docker image to use
"""
env = {}
if fix:
env["IS_LOCAL"] = "true"
env["INPLACE_FORMAT"] = "true"
docker(
name=gen_name(f"ci-lint"),
image="ci_lint" if docker_image is None else docker_image,
scripts=["./tests/scripts/task_lint.sh"],
env=env,
interactive=interactive,
)
Option = Tuple[str, List[str]]
def generate_command(
name: str,
options: Dict[str, Option],
help: str,
precheck: Optional[Callable[[], None]] = None,
post_build: Optional[List[str]] = None,
additional_flags: Optional[Dict[str, str]] = None,
):
"""
Helper to generate CLIs that:
1. Build a with a config matching a specific CI Docker image (e.g. 'cpu')
2. Run tests (either a pre-defined set from scripts or manually via invoking
pytest)
3. (optional) Drop down into a terminal into the Docker container
"""
def fn(
tests: Optional[List[str]],
skip_build: bool = False,
interactive: bool = False,
docker_image: Optional[str] = None,
verbose: bool = False,
**kwargs,
) -> None:
"""
arguments:
tests -- pytest test IDs (e.g. tests/python or tests/python/a_file.py::a_test[param=1])
skip_build -- skip build and setup scripts
interactive -- start a shell after running build / test scripts
docker-image -- manually specify the docker image to use
verbose -- run verbose build
"""
if precheck is not None:
precheck()
build_dir = get_build_dir(name)
if skip_build:
scripts = []
else:
scripts = [
f"./tests/scripts/task_config_build_{name}.sh {build_dir}",
f"./tests/scripts/task_build.py --build-dir {build_dir}",
]
if post_build is not None:
scripts += post_build
# Check that a test suite was not used alongside specific test names
if any(v for v in kwargs.values()) and tests is not None:
option_flags = ", ".join([f"--{k}" for k in options.keys()])
clean_exit(f"{option_flags} cannot be used with --tests")
if tests is not None:
scripts.append(f"python3 -m pytest {' '.join(tests)}")
# Add named test suites
for option_name, (_, extra_scripts) in options.items():
if kwargs.get(option_name, False):
scripts.extend(script.format(build_dir=build_dir) for script in extra_scripts)
docker(
name=gen_name(f"ci-{name}"),
image=f"ci_{name}" if docker_image is None else docker_image,
scripts=scripts,
env={
# Need to specify the library path manually or else TVM can't
# determine which build directory to use (i.e. if there are
# multiple copies of libtvm.so laying around)
"TVM_LIBRARY_PATH": str(REPO_ROOT / get_build_dir(name)),
"VERBOSE": "true" if verbose else "false",
},
interactive=interactive,
additional_flags=additional_flags,
)
fn.__name__ = name
return fn, options, help
def check_arm_qemu() -> None:
"""
Check if a machine is ready to run an ARM Docker image
"""
machine = platform.machine().lower()
if "arm" in machine or "aarch64" in machine:
# No need to check anything if the machine runs ARM
return
binfmt = Path("/proc/sys/fs/binfmt_misc")
if not binfmt.exists() or len(list(binfmt.glob("qemu-*"))) == 0:
clean_exit(
textwrap.dedent(
"""
You must run a one-time setup to use ARM containers on x86 via QEMU:
sudo apt install -y qemu binfmt-support qemu-user-static
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
See https://www.stereolabs.com/docs/docker/building-arm-container-on-x86/ for details""".strip(
"\n"
)
)
)
def cli_name(s: str) -> str:
return s.replace("_", "-")
def typing_get_origin(annotation):
if sys.version_info >= (3, 8):
return typing.get_origin(annotation)
else:
return annotation.__origin__
def typing_get_args(annotation):
if sys.version_info >= (3, 8):
return typing.get_args(annotation)
else:
return annotation.__args__
def is_optional_type(annotation):
return (
hasattr(annotation, "__origin__")
and (typing_get_origin(annotation) == typing.Union)
and (type(None) in typing_get_args(annotation))
)
def add_subparser(
func: Callable,
subparsers: Any,
options: Optional[Dict[str, Option]] = None,
help: Optional[str] = None,
) -> Any:
"""
Utility function to make it so subparser commands can be defined locally
as a function rather than directly via argparse and manually dispatched
out.
"""
# Each function is intended follow the example for arguments in PEP257, so
# split apart the function documentation from the arguments
split = [s.strip() for s in func.__doc__.split("arguments:\n")]
if len(split) == 1:
args_help = None
command_help = split[0]
else:
command_help, args_help = split
if help is not None:
command_help = help
# Parse out the help text for each argument if present
arg_help_texts = {}
if args_help is not None:
for line in args_help.split("\n"):
line = line.strip()
name, help_text = [t.strip() for t in line.split(" -- ")]
arg_help_texts[cli_name(name)] = help_text
subparser = subparsers.add_parser(cli_name(func.__name__), help=command_help)
seen_prefixes = set()
# Add each parameter to the subparser
signature = inspect.signature(func)
for name, value in signature.parameters.items():
if name == "kwargs":
continue
arg_cli_name = cli_name(name)
kwargs: Dict[str, Union[str, bool]] = {"help": arg_help_texts[arg_cli_name]}
is_optional = is_optional_type(value.annotation)
if is_optional:
arg_type = typing_get_args(value.annotation)[0]
else:
arg_type = value.annotation
# Grab the default value if present
has_default = False
if value.default is not value.empty:
kwargs["default"] = value.default
has_default = True
# Check if it should be a flag
if arg_type is bool:
kwargs["action"] = "store_true"
else:
kwargs["required"] = not is_optional and not has_default
if str(arg_type).startswith("typing.List"):
kwargs["action"] = "append"
if arg_cli_name[0] not in seen_prefixes:
subparser.add_argument(f"-{arg_cli_name[0]}", f"--{arg_cli_name}", **kwargs)
seen_prefixes.add(arg_cli_name[0])
else:
subparser.add_argument(f"--{arg_cli_name}", **kwargs)
if options is not None:
for option_name, (help, _) in options.items():
option_cli_name = cli_name(option_name)
if option_cli_name[0] not in seen_prefixes:
subparser.add_argument(
f"-{option_cli_name[0]}", f"--{option_cli_name}", action="store_true", help=help
)
seen_prefixes.add(option_cli_name[0])
else:
subparser.add_argument(f"--{option_cli_name}", action="store_true", help=help)
return subparser
CPP_UNITTEST = ("run c++ unitests", ["./tests/scripts/task_cpp_unittest.sh {build_dir}"])
generated = [
generate_command(
name="gpu",
help="Run GPU build and test(s)",
options={
"cpp": CPP_UNITTEST,
"topi": ("run topi tests", ["./tests/scripts/task_python_topi.sh"]),
"unittest": (
"run unit tests",
[
"./tests/scripts/task_java_unittest.sh",
"./tests/scripts/task_python_unittest_gpuonly.sh",
"./tests/scripts/task_python_integration_gpuonly.sh",
],
),
"frontend": ("run frontend tests", ["./tests/scripts/task_python_frontend.sh"]),
},
),
generate_command(
name="cpu",
help="Run CPU build and test(s)",
options={
"cpp": CPP_UNITTEST,
"integration": (
"run integration tests",
["./tests/scripts/task_python_integration.sh"],
),
"unittest": (
"run unit tests",
[
"./tests/scripts/task_python_unittest.sh",
"./tests/scripts/task_python_vta_fsim.sh",
"./tests/scripts/task_python_vta_tsim.sh",
],
),
"frontend": ("run frontend tests", ["./tests/scripts/task_python_frontend_cpu.sh"]),
},
),
generate_command(
name="minimal",
help="Run minimal CPU build and test(s)",
options={
"cpp": CPP_UNITTEST,
"unittest": (
"run unit tests",
[
"./tests/scripts/task_python_unittest.sh",
],
),
},
),
generate_command(
name="i386",
help="Run i386 build and test(s)",
options={
"cpp": CPP_UNITTEST,
"integration": (
"run integration tests",
[
"./tests/scripts/task_python_unittest.sh",
"./tests/scripts/task_python_integration_i386only.sh",
],
),
},
),
generate_command(
name="wasm",
help="Run WASM build and test(s)",
options={
"cpp": CPP_UNITTEST,
"test": ("run WASM tests", ["./tests/scripts/task_web_wasm.sh"]),
},
),
generate_command(
name="cortexm",
help="Run Cortex-M build and test(s)",
options={
"cpp": CPP_UNITTEST,
"test": (
"run microTVM tests",
[
"./tests/scripts/task_python_microtvm.sh",
"./tests/scripts/task_demo_microtvm.sh",
],
),
},
),
generate_command(
name="hexagon",
help="Run Hexagon build and test(s)",
post_build=["./tests/scripts/task_build_hexagon_api.sh --output build-hexagon"],
options={
"cpp": CPP_UNITTEST,
"test": (
"run Hexagon API/Python tests",
[
"./tests/scripts/task_python_hexagon.sh",
],
),
},
),
generate_command(
name="arm",
help="Run ARM build and test(s) (native or via QEMU on x86)",
precheck=check_arm_qemu,
options={
"cpp": CPP_UNITTEST,
"python": (
"run full Python tests",
[
"./tests/scripts/task_python_unittest.sh",
"./tests/scripts/task_python_arm_compute_library.sh",
],
),
},
),
generate_command(
name="riscv",
help="Run RISC-V build and test(s)",
options={
"cpp": CPP_UNITTEST,
"python": (
"run full Python tests",
[
"./tests/scripts/task_riscv_microtvm.sh",
],
),
},
),
generate_command(
name="adreno",
help="Run Adreno build and test(s)",
post_build=["./tests/scripts/task_build_adreno_bins.sh"],
additional_flags={
"--volume": os.environ.get("ADRENO_OPENCL", "") + ":/adreno-opencl",
"--env": "ADRENO_OPENCL=/adreno-opencl",
"--net": "host",
},
options={
"test": (
"run Adreno API/Python tests",
[
"./tests/scripts/task_python_adreno.sh " + os.environ.get("ANDROID_SERIAL", ""),
],
),
},
),
]
def main():
description = """
Run CI jobs locally via Docker. This facilitates reproducing CI failures for
fast iteration. Note that many of the Docker images required are large (the
CPU and GPU images are both over 25GB) and may take some time to download on first use.
"""
parser = argparse.ArgumentParser(description=description)
subparsers = parser.add_subparsers(dest="command")
commands = {}
# Add manually defined commands
for func in [docs, serve_docs, lint]:
add_subparser(func, subparsers)
commands[cli_name(func.__name__)] = func
# Add generated commands
for func, options, help in generated:
add_subparser(func, subparsers, options, help)
commands[cli_name(func.__name__)] = func
args = parser.parse_args()
if args.command is None:
# Command not found in list, error out
parser.print_help()
exit(1)
func = commands[args.command]
# Extract out the parsed args and invoke the relevant function
kwargs = {k: getattr(args, k) for k in dir(args) if not k.startswith("_") and k != "command"}
func(**kwargs)
if __name__ == "__main__":
main()
| https://github.com/zk-ml/tachikoma |
tests/scripts/release/gather_prs.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import os
import pickle
from pathlib import Path
import csv
import sys
from typing import Callable, Dict, List, Any
REPO_ROOT = Path(__file__).resolve().parent.parent.parent.parent
sys.path.append(str(REPO_ROOT / "ci" / "scripts"))
from git_utils import git, GitHubRepo
from github_tag_teams import tags_from_title
GITHUB_TOKEN = os.environ["GITHUB_TOKEN"]
PRS_QUERY = """
query ($owner: String!, $name: String!, $after: String, $pageSize: Int!) {
repository(owner: $owner, name: $name) {
defaultBranchRef {
name
target {
... on Commit {
oid
history(after: $after, first: $pageSize) {
pageInfo {
hasNextPage
endCursor
}
nodes {
oid
committedDate
associatedPullRequests(first: 1) {
nodes {
number
additions
changedFiles
deletions
author {
login
}
title
body
}
}
}
}
}
}
}
}
}
"""
def append_and_save(items, file):
if not file.exists():
data = []
else:
with open(file, "rb") as f:
data = pickle.load(f)
data += items
with open(file, "wb") as f:
pickle.dump(data, f)
def fetch_pr_data(args, cache):
github = GitHubRepo(user=user, repo=repo, token=GITHUB_TOKEN)
if args.from_commit is None or args.to_commit is None:
print("--from-commit and --to-commit must be specified if --skip-query is not used")
exit(1)
i = 0
page_size = 80
cursor = f"{args.from_commit} {i}"
while True:
r = github.graphql(
query=PRS_QUERY,
variables={
"owner": user,
"name": repo,
"after": cursor,
"pageSize": page_size,
},
)
data = r["data"]["repository"]["defaultBranchRef"]["target"]["history"]
if not data["pageInfo"]["hasNextPage"]:
break
cursor = data["pageInfo"]["endCursor"]
results = data["nodes"]
to_add = []
stop = False
for r in results:
if r["oid"] == args.to_commit:
print(f"Found {r['oid']}, stopping")
stop = True
break
else:
to_add.append(r)
oids = [r["oid"] for r in to_add]
print(oids)
append_and_save(to_add, cache)
if stop:
break
print(i)
i += page_size
def write_csv(
filename: str, data: List[Dict[str, Any]], filter: Callable[[Dict[str, Any]], bool]
) -> None:
with open(filename, "w", newline="") as csvfile:
writer = csv.writer(csvfile, quotechar='"')
writer.writerow(
(
"category",
"description",
"date",
"number",
"author",
"tags",
"title",
"additions",
"deletions",
"changed files",
)
)
for item in data:
pr = item["associatedPullRequests"]["nodes"][0]
if not filter(pr):
continue
tags = tags_from_title(pr["title"])
actual_tags = []
for t in tags:
items = [x.strip() for x in t.split(",")]
actual_tags += items
tags = actual_tags
tags = [t.lower() for t in tags]
category = ""
if len(tags) == 1:
category = tags[0]
writer.writerow(
(
category,
"",
item["committedDate"],
f'https://github.com/apache/tvm/pull/{pr["number"]}',
pr["author"]["login"],
", ".join(tags),
pr["title"],
pr["additions"],
pr["deletions"],
pr["changedFiles"],
)
)
if __name__ == "__main__":
help = "List out commits with attached PRs since a certain commit"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--from-commit", help="commit to start checking PRs from")
parser.add_argument("--to-commit", help="commit to stop checking PRs from")
parser.add_argument(
"--threshold", default=150, help="sum of additions + deletions to consider large"
)
parser.add_argument(
"--skip-query", action="store_true", help="don't query GitHub and instead use cache file"
)
args = parser.parse_args()
user = "apache"
repo = "tvm"
threshold = int(args.threshold)
cache = Path("out.pkl")
if not args.skip_query:
fetch_pr_data(args, cache)
with open(cache, "rb") as f:
data = pickle.load(f)
print(f"Found {len(data)} PRs")
write_csv(
filename="out-large.csv",
data=data,
filter=lambda pr: pr["additions"] + pr["deletions"] > threshold,
)
write_csv(
filename="out-small.csv",
data=data,
filter=lambda pr: pr["additions"] + pr["deletions"] <= threshold,
)
| https://github.com/zk-ml/tachikoma |
tests/scripts/release/list_rfcs.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import subprocess
import sys
LINK_BASE = "https://github.com/apache/tvm-rfcs/blob/main/"
COMMIT_BASE = "https://github.com/apache/tvm-rfcs/commit/"
def sprint(*args):
print(*args, file=sys.stderr)
if __name__ == "__main__":
help = "List out RFCs since a commit"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--since-commit", required=True, help="last commit to include")
parser.add_argument("--rfcs-repo", required=True, help="path to checkout of apache/tvm-rfcs")
args = parser.parse_args()
user = "apache"
repo = "tvm"
rfc_repo = args.rfcs_repo
subprocess.run("git fetch origin main", cwd=rfc_repo, shell=True)
subprocess.run("git checkout main", cwd=rfc_repo, shell=True)
subprocess.run("git reset --hard origin/main", cwd=rfc_repo, shell=True)
r = subprocess.run(
f"git log {args.since_commit}..HEAD --format='%H %s'",
cwd=rfc_repo,
shell=True,
stdout=subprocess.PIPE,
encoding="utf-8",
)
commits = r.stdout.strip().split("\n")
for commit in commits:
parts = commit.split()
commit = parts[0]
subject = " ".join(parts[1:])
r2 = subprocess.run(
f"git diff-tree --no-commit-id --name-only -r {commit}",
cwd=rfc_repo,
shell=True,
stdout=subprocess.PIPE,
encoding="utf-8",
)
files = r2.stdout.strip().split("\n")
rfc_file = None
for file in files:
if file.startswith("rfcs/") and file.endswith(".md"):
if rfc_file is not None:
sprint(f"error on {commit} {subject}")
rfc_file = file
if rfc_file is None:
sprint(f"error on {commit} {subject}")
continue
print(f" * [{subject}]({LINK_BASE + rfc_file}) ([`{commit[:7]}`]({COMMIT_BASE + commit}))")
| https://github.com/zk-ml/tachikoma |
tests/scripts/release/make_notes.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import os
import pickle
from pathlib import Path
import csv
import sys
from collections import defaultdict
from typing import Callable, Dict, List, Any
REPO_ROOT = Path(__file__).resolve().parent.parent.parent.parent
sys.path.append(str(REPO_ROOT / "tests" / "scripts"))
def strip_header(title: str, header: str) -> str:
pos = title.lower().find(header.lower())
if pos == -1:
return title
return title[0:pos] + title[pos + len(header) :].strip()
def sprint(*args):
print(*args, file=sys.stderr)
if __name__ == "__main__":
help = "List out commits with attached PRs since a certain commit"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--notes-csv", required=True, help="csv file of categorized PRs in order")
args = parser.parse_args()
user = "apache"
repo = "tvm"
cache = Path("out.pkl")
if not cache.exists():
sprint("run gather_prs.py first to generate out.pkl")
exit(1)
with open(cache, "rb") as f:
data = pickle.load(f)
sprint(data[1])
reverse = {}
for item in data:
prs = item["associatedPullRequests"]["nodes"]
if len(prs) != 1:
continue
pr = prs[0]
reverse[pr["number"]] = pr
def pr_title(number, heading):
title = reverse[int(number)]["title"]
title = strip_header(title, heading)
return title
headings = defaultdict(lambda: defaultdict(list))
output = ""
sprint("Opening CSV")
with open(args.notes_csv) as f:
# Skip header stuff
f.readline()
f.readline()
f.readline()
input_file = csv.DictReader(f)
i = 0
for row in input_file:
category = row["category"].strip()
subject = row["subject"].strip()
pr_number = row["url"].split("/")[-1]
if category == "" or subject == "":
sprint(f"Skipping {pr_number}")
continue
headings[category][subject].append(pr_number)
i += 1
# if i > 30:
# break
def sorter(x):
if x == "Misc":
return 10
return 0
keys = list(headings.keys())
keys = list(sorted(keys))
keys = list(sorted(keys, key=sorter))
for key in keys:
value = headings[key]
if key == "DO NOT INCLUDE":
continue
value = dict(value)
output += f"### {key}\n"
misc = []
misc += value.get("n/a", [])
misc += value.get("Misc", [])
for pr_number in misc:
output += f" * #{pr_number} - {pr_title(pr_number, '[' + key + ']')}\n"
for subheading, pr_numbers in value.items():
if subheading == "DO NOT INCLUDE":
continue
if subheading == "n/a" or subheading == "Misc":
continue
else:
output += f" * {subheading} - " + ", ".join([f"#{n}" for n in pr_numbers]) + "\n"
# print(value)
output += "\n"
print(output)
| https://github.com/zk-ml/tachikoma |
tests/scripts/request_hook/request_hook.py | #!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import urllib.request
import logging
LOGGER = None
# To update this list, run the workflow <HERE> with the URL to download and the SHA512 of the file
BASE = "https://tvm-ci-resources.s3.us-west-2.amazonaws.com"
URL_MAP = {
"http://data.mxnet.io.s3-website-us-west-1.amazonaws.com/data/val_256_q90.rec": f"{BASE}/mxnet-val_256_q90.rec",
"http://dl.caffe.berkeleyvision.org/bvlc_alexnet.caffemodel": f"{BASE}/bvlc_alexnet.caffemodel",
"http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel": f"{BASE}/bvlc_googlenet.caffemodel",
"http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz": f"{BASE}/tf-mobilenet_v1_1.0_224.tgz",
"http://images.cocodataset.org/zips/val2017.zip": f"{BASE}/cocodataset-val2017.zip",
"https://bj.bcebos.com/x2paddle/models/paddle_resnet50.tar": f"{BASE}/bcebos-paddle_resnet50.tar",
"https://data.deepai.org/stanfordcars.zip": f"{BASE}/deepai-stanfordcars.zip",
"https://docs-assets.developer.apple.com/coreml/models/MobileNet.mlmodel": f"{BASE}/2022-10-05/MobileNet.mlmodel",
"https://download.pytorch.org/models/maskrcnn_resnet50_fpn_coco-bf2d0c1e.pth": f"{BASE}/2022-10-05/maskrcnn_resnet50_fpn_coco-bf2d0c1e.pth",
"https://download.pytorch.org/models/mobilenet_v2-b0353104.pth": f"{BASE}/2022-10-05/mobilenet_v2-b0353104.pth",
"https://download.pytorch.org/models/resnet18-f37072fd.pth": f"{BASE}/2022-10-05/resnet18-f37072fd.pth",
"https://gist.github.com/zhreshold/bcda4716699ac97ea44f791c24310193/raw/93672b029103648953c4e5ad3ac3aadf346a4cdc/super_resolution_0.2.onnx": f"{BASE}/2022-10-05/super_resolution_0.2.onnx",
"https://gist.githubusercontent.com/zhreshold/4d0b62f3d01426887599d4f7ede23ee5/raw/596b27d23537e5a1b5751d2b0481ef172f58b539/imagenet1000_clsid_to_human.txt": f"{BASE}/2022-10-05/imagenet1000_clsid_to_human.txt",
"https://github.com/dmlc/web-data/blob/main/darknet/data/dog.jpg": f"{BASE}/dog.jpg",
"https://github.com/dmlc/web-data/blob/main/gluoncv/detection/street_small.jpg?raw=true": f"{BASE}/2022-10-05/small_street_raw.jpg",
"https://github.com/dmlc/web-data/raw/main/gluoncv/detection/street_small.jpg": f"{BASE}/2022-10-05/gluon-small-stree.jpg",
"https://github.com/JonathanCMitchell/mobilenet_v2_keras/releases/download/v1.1/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_0.5_224.h5": f"{BASE}/2022-10-05/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_0.5_224.h5",
"https://github.com/onnx/models/raw/bd206494e8b6a27b25e5cf7199dbcdbfe9d05d1c/vision/classification/mnist/model/mnist-1.onnx": f"{BASE}/onnx/mnist-1.onnx",
"https://github.com/onnx/models/raw/main/vision/classification/resnet/model/resnet50-v2-7.onnx": f"{BASE}/2022-10-05/resnet50-v2-7.onnx",
"https://github.com/pjreddie/darknet/blob/master/cfg/yolov3-tiny.cfg?raw=true": f"{BASE}/2022-10-05/yolov3-tiny-raw.cfg",
"https://github.com/uwsampl/web-data/raw/main/vta/models/synset.txt": f"{BASE}/2022-10-05/synset.txt",
"https://homes.cs.washington.edu/~cyulin/media/gnn_model/gcn_cora.torch": f"{BASE}/gcn_cora.torch",
"https://homes.cs.washington.edu/~moreau/media/vta/cat.jpg": f"{BASE}/vta_cat.jpg",
"https://objects.githubusercontent.com/github-production-release-asset-2e65be/130932608/4b196a8a-4e2d-11e8-9a11-be3c41846711?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20221004%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20221004T170456Z&X-Amz-Expires=300&X-Amz-Signature=0602b68e8864b9b01c9142eee22aed3543fe98a5482686eec33d98e2617a2295&X-Amz-SignedHeaders=host&actor_id=0&key_id=0&repo_id=130932608&response-content-disposition=attachment%3B%20filename%3Dmobilenet_v2_weights_tf_dim_ordering_tf_kernels_0.5_224.h5&response-content-type=application%2Foctet-stream": f"{BASE}/2022-10-05/aws-mobilenet_v2_weights_tf_dim_ordering_tf_kernels_0.5_224.h5",
"https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ResNet/resnet18.zip": f"{BASE}/oneflow/resnet18.zip",
"https://people.linaro.org/~tom.gall/sine_model.tflite": f"{BASE}/sine_model.tflite",
"https://pjreddie.com/media/files/yolov3-tiny.weights?raw=true": f"{BASE}/yolov3-tiny.weights",
"https://pjreddie.com/media/files/yolov3.weights": f"{BASE}/yolov3.weights",
"https://raw.githubusercontent.com/Cadene/pretrained-models.pytorch/master/data/imagenet_classes.txt": f"{BASE}/2022-10-05/imagenet_classes.txt",
"https://raw.githubusercontent.com/Cadene/pretrained-models.pytorch/master/data/imagenet_synsets.txt": f"{BASE}/2022-10-05/imagenet_synsets.txt",
"https://raw.githubusercontent.com/dmlc/web-data/main/gluoncv/detection/street_small.jpg": f"{BASE}/2022-10-05/small_street.jpg",
"https://raw.githubusercontent.com/dmlc/web-data/master/gluoncv/detection/street_small.jpg": f"{BASE}/2022-10-05/street_small.jpg",
"https://raw.githubusercontent.com/tensorflow/tensorflow/master/tensorflow/lite/java/demo/app/src/main/assets/labels_mobilenet_quant_v1_224.txt": f"{BASE}/2022-10-05/labels_mobilenet_quant_v1_224.txt",
"https://raw.githubusercontent.com/tlc-pack/tophub/main/tophub/mali_v0.06.log": f"{BASE}/2022-10-05/mali_v0.06.log",
"https://s3.amazonaws.com/model-server/inputs/kitten.jpg": f"{BASE}/2022-10-05/kitten.jpg",
"https://s3.amazonaws.com/onnx-model-zoo/synset.txt": f"{BASE}/2022-10-05/synset-s3.txt",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite_11_05_08/mobilenet_v2_1.0_224_quant.tgz": f"{BASE}/2022-10-05/mobilenet_v2_1.0_224_quant.tgz",
"https://storage.googleapis.com/tensorflow/keras-applications/mobilenet/mobilenet_2_5_128_tf.h5": f"{BASE}/2022-10-05/mobilenet_2_5_128_tf.h5",
"https://storage.googleapis.com/tensorflow/keras-applications/resnet/resnet50_weights_tf_dim_ordering_tf_kernels.h5": f"{BASE}/2022-10-05/resnet50_weights_tf_dim_ordering_tf_kernels.h5",
}
class TvmRequestHook(urllib.request.Request):
def __init__(self, url, *args, **kwargs):
LOGGER.info(f"Caught access to {url}")
if url in URL_MAP:
new_url = URL_MAP[url]
LOGGER.info(f"Mapped URL {url} to {new_url}")
else:
new_url = url
super().__init__(new_url, *args, **kwargs)
def init():
global LOGGER
urllib.request.Request = TvmRequestHook
LOGGER = logging.getLogger("tvm_request_hook")
LOGGER.setLevel(logging.DEBUG)
fh = logging.FileHandler("redirected_urls.log")
fh.setLevel(logging.DEBUG)
LOGGER.addHandler(fh)
| https://github.com/zk-ml/tachikoma |
tests/scripts/task_build.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import shutil
import os
import logging
import sys
import multiprocessing
from pathlib import Path
# Hackery to enable importing of utils from ci/scripts
REPO_ROOT = Path(__file__).resolve().parent.parent.parent
sys.path.append(str(REPO_ROOT / "ci" / "scripts"))
from cmd_utils import Sh, init_log, REPO_ROOT
if __name__ == "__main__":
init_log()
parser = argparse.ArgumentParser(description="List pytest nodeids for a folder")
parser.add_argument("--sccache-bucket", required=False, help="sccache bucket name")
parser.add_argument("--build-dir", default="build", help="build folder")
parser.add_argument("--cmake-target", help="optional build target")
args = parser.parse_args()
env = {"VTA_HW_PATH": str(Path(os.getcwd()) / "3rdparty" / "vta-hw")}
sccache_exe = shutil.which("sccache")
use_sccache = sccache_exe is not None
build_dir = Path(os.getcwd()) / args.build_dir
build_dir = build_dir.relative_to(REPO_ROOT)
if use_sccache:
if args.sccache_bucket:
env["SCCACHE_BUCKET"] = args.sccache_bucket
logging.info(f"Using sccache bucket: {args.sccache_bucket}")
else:
logging.info(f"No sccache bucket set, using local cache")
env["CXX"] = "/opt/sccache/c++"
env["CC"] = "/opt/sccache/cc"
else:
if sccache_exe is None:
reason = "'sccache' executable not found"
else:
reason = "<unknown>"
logging.info(f"Not using sccache, reason: {reason}")
sh = Sh(env)
if use_sccache:
sh.run("sccache --start-server", check=False)
logging.info("===== sccache stats =====")
sh.run("sccache --show-stats")
executors = int(os.environ.get("CI_NUM_EXECUTORS", 1))
nproc = multiprocessing.cpu_count()
available_cpus = nproc // executors
num_cpus = max(available_cpus, 1)
sh.run("cmake -GNinja -DCMAKE_BUILD_TYPE=RelWithDebInfo ..", cwd=build_dir)
target = ""
if args.cmake_target:
target = args.cmake_target
verbose = os.environ.get("VERBOSE", "true").lower() in {"1", "true", "yes"}
ninja_args = [target, f"-j{num_cpus}"]
if verbose:
ninja_args.append("-v")
sh.run(f"cmake --build . -- " + " ".join(ninja_args), cwd=build_dir)
if use_sccache:
logging.info("===== sccache stats =====")
sh.run("sccache --show-stats")
| https://github.com/zk-ml/tachikoma |
version.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This is the global script that set the version information of TVM.
This script runs and update all the locations that related to versions
List of affected files:
- tvm-root/python/tvm/_ffi/libinfo.py
- tvm-root/include/tvm/runtime/c_runtime_api.h
- tvm-root/conda/recipe/meta.yaml
- tvm-root/web/package.json
"""
import os
import re
import argparse
import logging
import subprocess
# Modify the following value during release
# ---------------------------------------------------
# Current version:
# We use the version of the incoming release for code
# that is under development.
#
# It is also fallback version to be used when --git-describe
# is not invoked, or when the repository does not present the
# git tags in a format that this script can use.
#
# Two tag formats are supported:
# - vMAJ.MIN.PATCH (e.g. v0.8.0) or
# - vMAJ.MIN.devN (e.g. v0.8.dev0)
__version__ = "0.11.dev0"
# ---------------------------------------------------
PROJ_ROOT = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
def py_str(cstr):
return cstr.decode("utf-8")
def git_describe_version():
"""Get PEP-440 compatible public and local version using git describe.
Returns
-------
pub_ver: str
Public version.
local_ver: str
Local version (with additional label appended to pub_ver).
Notes
-----
- We follow PEP 440's convention of public version
and local versions.
- Only tags conforming to vMAJOR.MINOR.REV (e.g. "v0.7.0")
are considered in order to generate the version string.
See the use of `--match` in the `git` command below.
Here are some examples:
- pub_ver = '0.7.0', local_ver = '0.7.0':
We are at the 0.7.0 release.
- pub_ver = '0.8.dev94', local_ver = '0.8.dev94+g0d07a329e':
We are at the 0.8 development cycle.
The current source contains 94 additional commits
after the most recent tag(v0.7.0),
the git short hash tag of the current commit is 0d07a329e.
"""
cmd = [
"git",
"describe",
"--tags",
"--match",
"v[0-9]*.[0-9]*.[0-9]*",
"--match",
"v[0-9]*.[0-9]*.dev[0-9]*",
]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=PROJ_ROOT)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = py_str(out)
if msg.find("not a git repository") != -1:
return __version__, __version__
logging.warning("git describe: %s, use %s", msg, __version__)
return __version__, __version__
describe = py_str(out).strip()
arr_info = describe.split("-")
# Remove the v prefix, mainly to be robust
# to the case where v is not presented as well.
if arr_info[0].startswith("v"):
arr_info[0] = arr_info[0][1:]
# hit the exact tag
if len(arr_info) == 1:
return arr_info[0], arr_info[0]
if len(arr_info) != 3:
logging.warning("Invalid output from git describe %s", describe)
return __version__, __version__
dev_pos = arr_info[0].find(".dev")
# Development versions:
# The code will reach this point in case it can't match a full release version, such as v0.7.0.
#
# 1. in case the last known label looks like vMAJ.MIN.devN e.g. v0.8.dev0, we use
# the current behaviour of just using vMAJ.MIN.devNNNN+gGIT_REV
if dev_pos != -1:
dev_version = arr_info[0][: arr_info[0].find(".dev")]
# 2. in case the last known label looks like vMAJ.MIN.PATCH e.g. v0.8.0
# then we just carry on with a similar version to what git describe provides, which is
# vMAJ.MIN.PATCH.devNNNN+gGIT_REV
else:
dev_version = arr_info[0]
pub_ver = "%s.dev%s" % (dev_version, arr_info[1])
local_ver = "%s+%s" % (pub_ver, arr_info[2])
return pub_ver, local_ver
# Implementations
def update(file_name, pattern, repl, dry_run=False):
update = []
hit_counter = 0
need_update = False
with open(file_name) as file:
for l in file:
result = re.findall(pattern, l)
if result:
assert len(result) == 1
hit_counter += 1
if result[0] != repl:
l = re.sub(pattern, repl, l)
need_update = True
print("%s: %s -> %s" % (file_name, result[0], repl))
else:
print("%s: version is already %s" % (file_name, repl))
update.append(l)
if hit_counter != 1:
raise RuntimeError("Cannot find version in %s" % file_name)
if need_update and not dry_run:
with open(file_name, "w") as output_file:
for l in update:
output_file.write(l)
def sync_version(pub_ver, local_ver, dry_run):
"""Synchronize version."""
# python uses the PEP-440: local version
update(
os.path.join(PROJ_ROOT, "python", "tvm", "_ffi", "libinfo.py"),
r"(?<=__version__ = \")[.0-9a-z\+]+",
local_ver,
dry_run,
)
# Use public version for other parts for now
# Note that full git hash is already available in libtvm
# C++ header
update(
os.path.join(PROJ_ROOT, "include", "tvm", "runtime", "c_runtime_api.h"),
r'(?<=TVM_VERSION ")[.0-9a-z\+]+',
pub_ver,
dry_run,
)
# conda
update(
os.path.join(PROJ_ROOT, "conda", "recipe", "meta.yaml"),
r"(?<=version = ')[.0-9a-z\+]+",
pub_ver,
dry_run,
)
# web
# change to pre-release convention by npm
dev_pos = pub_ver.find(".dev")
npm_ver = pub_ver if dev_pos == -1 else "%s.0-%s" % (pub_ver[:dev_pos], pub_ver[dev_pos + 1 :])
update(
os.path.join(PROJ_ROOT, "web", "package.json"),
r'(?<="version": ")[.0-9a-z\-\+]+',
npm_ver,
dry_run,
)
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description="Detect and synchronize version.")
parser.add_argument(
"--print-version",
action="store_true",
help="Print version to the command line. No changes is applied to files.",
)
parser.add_argument(
"--git-describe",
action="store_true",
help="Use git describe to generate development version.",
)
parser.add_argument("--dry-run", action="store_true")
opt = parser.parse_args()
pub_ver, local_ver = __version__, __version__
if opt.git_describe:
pub_ver, local_ver = git_describe_version()
if opt.print_version:
print(local_ver)
else:
sync_version(pub_ver, local_ver, opt.dry_run)
if __name__ == "__main__":
main()
| https://github.com/zk-ml/tachikoma |
vta/python/vta/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""VTA Package is a TVM backend extension to support VTA hardware.
Besides the compiler toolchain, it also includes utility functions to
configure the hardware environment and access remote device through RPC.
"""
import sys
import tvm._ffi.base
from .autotvm import module_loader
from .bitstream import get_bitstream_path, download_bitstream
from .environment import get_env, Environment
from .rpc_client import reconfig_runtime, program_fpga
__version__ = "0.1.0"
# do not from tvm import topi when running vta.exec.rpc_server
# in lib tvm runtime only mode
if not tvm._ffi.base._RUNTIME_ONLY:
from . import top
from .build_module import build_config, lower, build
| https://github.com/zk-ml/tachikoma |
vta/python/vta/autotvm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines AutoTVM components used with VTA."""
from tvm.autotvm.measure import default_module_loader
from . import rpc_client
def module_loader(bitstream=None):
"""Construct a ModuleLoader implementation specialized for VTA.
Parameters
----------
bitsream : Optional[str]
Path to the bitstream to write prior to uploading code.
Returns
-------
ModuleLoader :
The ModuleLoader instance.
"""
def reprogram_fpga(remote, _build_result):
"""default_module_loader callback which reprograms the FPGA.
Parameters
----------
remote : tvm.rpc.RPCSession
RPC session established to the remote device.
_build_result : tvm.autotvm.measure.measure_methods.BuildResult
Artifact from the build phase, unused here.
"""
rpc_client.program_fpga(remote, bitstream)
rpc_client.reconfig_runtime(remote)
return default_module_loader(reprogram_fpga)
| https://github.com/zk-ml/tachikoma |
vta/python/vta/bitstream.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""VTA specific bitstream management library."""
from __future__ import absolute_import as _abs
import os
import sys
from tvm.contrib.download import download
from .environment import get_env
if sys.version_info >= (3,):
import urllib.error as urllib2
else:
import urllib2
# bitstream repo
BITSTREAM_URL = "https://github.com/uwsampl/vta-distro/raw/master/bitstreams/"
def get_bitstream_path():
"""Returns the path to the cached bitstream corresponding to the current config
Returns
-------
bit_path: str
Corresponding to the filepath of the bitstream
"""
env = get_env()
# Derive destination path
cache_dir = os.getenv("VTA_CACHE_PATH", os.path.join(os.getenv("HOME"), ".vta_cache/"))
cache_dir = os.path.join(cache_dir, env.TARGET)
cache_dir = os.path.join(cache_dir, env.HW_VER.replace(".", "_"))
# Create the directory if it didn't exist
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
bit_path = os.path.join(cache_dir, env.BITSTREAM) + ".bit"
return bit_path
def download_bitstream():
"""Downloads a cached bitstream corresponding to the current config"""
env = get_env()
success = False
bit = get_bitstream_path()
url = os.path.join(BITSTREAM_URL, env.TARGET)
url = os.path.join(url, env.HW_VER)
url = os.path.join(url, env.BITSTREAM + ".bit")
try:
download(url, bit)
except urllib2.HTTPError as err:
if err.code == 404:
raise RuntimeError(
# Raise error - the solution when this happens it to build your
# own bitstream and add it to your $VTA_CACHE_PATH
"{} is not available. It appears that this configuration \
bistream has not been cached. Please compile your own bitstream (see hardware \
compilation guide to get Xilinx toolchains setup) and add it to your \
$VTA_CACHE_PATH. Alternatively edit your config.json back to its default \
settings. You can see the list of available bitstreams under {}".format(
url, BITSTREAM_URL
)
)
raise RuntimeError(
# This could happen when trying to access the URL behind a proxy
"Something went wrong when trying to access {}. Check your \
internet connection or proxy settings.".format(
url
)
)
return success
| https://github.com/zk-ml/tachikoma |
vta/python/vta/build_module.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument, invalid-name
"""VTA specific buildin for runtime."""
import tvm
from tvm.ir import register_intrin_lowering
from . import transform
from .environment import get_env, Environment
def EarlyRewrite():
"""Try to do storage rewrite in early pass."""
def _transform(mod, ctx):
try:
return tvm.tir.transform.StorageRewrite()(mod)
except tvm.error.TVMError:
return mod
return tvm.transform.module_pass(_transform, opt_level=0, name="tir.vta.EarlyRewrite")
def build_config(debug_flag=0, **kwargs):
"""Build a build config for VTA.
Parameters
----------
debug_flag : int
The dbeug flag to be passed.
kwargs : dict
Additional configurations.
Returns
-------
build_config: tvm.transform.PassContext
The build config that can be used in TVM.
Example
--------
.. code-block:: python
# build a vta module.
with vta.build_config():
vta_module = tvm.build(s, ...)
"""
env = get_env()
@tvm.tir.transform.prim_func_pass(opt_level=0)
def add_debug(f, *_):
debug = tvm.tir.call_extern("int32", "VTASetDebugMode", env.dev.command_handle, debug_flag)
return f.with_body(tvm.tir.stmt_seq(debug, f.body))
pass_list = [
(0, transform.InjectConv2DTransposeSkip()),
(1, transform.InjectDMAIntrin()),
(1, transform.InjectSkipCopy()),
(1, transform.AnnotateALUCoProcScope()),
(1, tvm.tir.transform.LiftAttrScope("coproc_uop_scope")),
(1, transform.LiftAllocToScopeBegin()),
(1, tvm.tir.transform.LiftAttrScope("coproc_scope")),
(1, transform.InjectCoProcSync()),
(1, EarlyRewrite()),
]
if debug_flag:
pass_list.append((1, add_debug))
pass_list.append((2, transform.InjectALUIntrin()))
pass_list.append((3, tvm.tir.transform.LowerDeviceStorageAccessInfo()))
pass_list.append((3, transform.FoldUopLoop()))
pass_list.append((3, transform.CPUAccessRewrite()))
config = {"tir.add_lower_pass": pass_list}
if kwargs.get("config"):
config.update(kwargs[config])
del kwargs["config"]
return tvm.transform.PassContext(config=config, **kwargs)
def lower(*args, **kwargs):
"""Thin wrapper of tvm.lower
This wrapper automatically applies VTA's build_config
if there is no user specified build_config in context.
See Also
--------
tvm.lower : The original TVM's lower function
"""
pass_ctx = tvm.transform.PassContext.current()
if not pass_ctx.config.get("add_lower_pass"):
with build_config():
return tvm.lower(*args, **kwargs)
return tvm.lower(*args, **kwargs)
def build(*args, **kwargs):
"""Thin wrapper of tvm.build
This wrapper automatically applies VTA's build_config
if there is no user specified build_config in context.
See Also
--------
tvm.build : The original TVM's build function
"""
pass_ctx = tvm.transform.PassContext.current()
if not pass_ctx.config.get("tir.add_lower_pass"):
with build_config():
return tvm.build(*args, **kwargs)
return tvm.build(*args, **kwargs)
# Register key ops
tvm.ir.register_op_attr("tir.vta.coproc_sync", "TCallEffectKind", tvm.tir.CallEffectKind.Opaque)
tvm.ir.register_op_attr("tir.vta.coproc_dep_push", "TCallEffectKind", tvm.tir.CallEffectKind.Opaque)
tvm.ir.register_op_attr("tir.vta.coproc_dep_pop", "TCallEffectKind", tvm.tir.CallEffectKind.Opaque)
tvm.ir.register_op_attr("tir.vta.uop_push", "TCallEffectKind", tvm.tir.CallEffectKind.Opaque)
tvm.ir.register_op_attr("tir.vta.uop_push", "TGlobalSymbol", "VTAUopPush")
tvm.ir.register_op_attr("tir.vta.command_handle", "TGlobalSymbol", "VTATLSCommandHandle")
tvm.ir.register_op_attr("tir.vta.command_handle", "TCallEffectKind", tvm.tir.CallEffectKind.Opaque)
# The memory information for the compiler
@tvm.register_func("tvm.info.mem.%s" % Environment.inp_scope)
def mem_info_inp_buffer():
spec = get_env()
return tvm.ir.make_node(
"MemoryInfo",
unit_bits=spec.INP_ELEM_BITS,
max_simd_bits=spec.INP_ELEM_BITS,
max_num_bits=spec.INP_BUFF_SIZE * 8,
head_address=None,
)
@tvm.register_func("tvm.info.mem.%s" % Environment.wgt_scope)
def mem_info_wgt_buffer():
spec = get_env()
return tvm.ir.make_node(
"MemoryInfo",
unit_bits=spec.WGT_ELEM_BITS,
max_simd_bits=spec.WGT_ELEM_BITS,
max_num_bits=spec.WGT_BUFF_SIZE * 8,
head_address=None,
)
@tvm.register_func("tvm.info.mem.%s" % Environment.acc_scope)
def mem_info_acc_buffer():
spec = get_env()
return tvm.ir.make_node(
"MemoryInfo",
unit_bits=spec.ACC_ELEM_BITS,
max_simd_bits=spec.ACC_ELEM_BITS,
max_num_bits=spec.ACC_BUFF_SIZE * 8,
head_address=None,
)
# TVM Op related registration
@register_intrin_lowering("tir.vta.coproc_sync", "default")
def coproc_sync(op):
_ = op
return tvm.tir.call_extern(
"int32",
"VTASynchronize",
get_env().dev.command_handle,
tvm.runtime.const(1 << 31, dtype="uint32"),
)
@register_intrin_lowering("tir.vta.coproc_dep_push", "default")
def coproc_dep_push(op):
return tvm.tir.call_extern(
"int32", "VTADepPush", get_env().dev.command_handle, op.args[0], op.args[1]
)
@register_intrin_lowering("tir.vta.coproc_dep_pop", "default")
def coproc_dep_pop(op):
return tvm.tir.call_extern(
"int32", "VTADepPop", get_env().dev.command_handle, op.args[0], op.args[1]
)
| https://github.com/zk-ml/tachikoma |
vta/python/vta/environment.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Configurable VTA Hareware Environment scope."""
# pylint: disable=invalid-name, exec-used
from __future__ import absolute_import as _abs
import os
import json
import copy
import tvm
from tvm import te
from . import intrin
def get_vta_hw_path():
"""Get the VTA HW path."""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
vta_hw_default = os.path.abspath(os.path.join(curr_path, "../../../3rdparty/vta-hw"))
VTA_HW_PATH = os.getenv("VTA_HW_PATH", vta_hw_default)
return os.path.abspath(VTA_HW_PATH)
def pkg_config(cfg):
"""Returns PkgConfig pkg config object."""
pkg_config_py = os.path.join(get_vta_hw_path(), "config/pkg_config.py")
libpkg = {"__file__": pkg_config_py}
exec(compile(open(pkg_config_py, "rb").read(), pkg_config_py, "exec"), libpkg, libpkg)
PkgConfig = libpkg["PkgConfig"]
return PkgConfig(cfg)
class DevContext(object):
"""Internal development context
This contains all the non-user facing compiler
internal context that is hold by the Environment.
Parameters
----------
env : Environment
The environment hosting the DevContext
Note
----
This class is introduced so we have a clear separation
of developer related, and user facing attributes.
"""
# Memory id for DMA
MEM_ID_UOP = 0
MEM_ID_WGT = 1
MEM_ID_INP = 2
MEM_ID_ACC = 3
MEM_ID_OUT = 4
MEM_ID_ACC_8BIT = 5
# VTA ALU Opcodes
ALU_OPCODE_MIN = 0
ALU_OPCODE_MAX = 1
ALU_OPCODE_ADD = 2
ALU_OPCODE_SHR = 3
ALU_OPCODE_MUL = 4
# Task queue id (pipeline stage)
QID_LOAD_INP = 1
QID_LOAD_WGT = 1
QID_LOAD_OUT = 2
QID_STORE_OUT = 3
QID_COMPUTE = 2
def __init__(self, env):
self.vta_axis = te.thread_axis("vta")
self.vta_push_uop = tvm.tir.StringImm("VTAPushGEMMOp")
ctx = tvm.tir.call_intrin("handle", "tir.vta.command_handle")
self.command_handle = tvm.tir.Call("handle", "tir.tvm_thread_context", [ctx])
self.DEBUG_NO_SYNC = False
env._dev_ctx = self
self.gemm = intrin.gemm(env, env.mock_mode)
def get_task_qid(self, qid):
"""Get transformed queue index."""
return 1 if self.DEBUG_NO_SYNC else qid
class Environment(object):
"""Hardware configuration object.
This object contains all the information
needed for compiling to a specific VTA backend.
Parameters
----------
cfg : dict of str to value.
The configuration parameters.
Example
--------
.. code-block:: python
# the following code reconfigures the environment
# temporarily to attributes specified in new_cfg.json
new_cfg = json.load(json.load(open("new_cfg.json")))
with vta.Environment(new_cfg):
# env works on the new environment
env = vta.get_env()
"""
current = None
# constants
MAX_XFER = 1 << 22
# debug flags
DEBUG_DUMP_INSN = 1 << 1
DEBUG_DUMP_UOP = 1 << 2
DEBUG_SKIP_READ_BARRIER = 1 << 3
DEBUG_SKIP_WRITE_BARRIER = 1 << 4
# memory scopes
inp_scope = "local.inp_buffer"
wgt_scope = "local.wgt_buffer"
acc_scope = "local.acc_buffer"
# initialization function
def __init__(self, cfg):
# Produce the derived parameters and update dict
self.pkg = pkg_config(cfg)
self.__dict__.update(self.pkg.cfg_dict)
# data type width
self.INP_WIDTH = 1 << self.LOG_INP_WIDTH
self.WGT_WIDTH = 1 << self.LOG_WGT_WIDTH
self.ACC_WIDTH = 1 << self.LOG_ACC_WIDTH
self.OUT_WIDTH = 1 << self.LOG_OUT_WIDTH
# tensor intrinsic shape
self.BATCH = 1 << self.LOG_BATCH
self.BLOCK_IN = 1 << self.LOG_BLOCK_IN
self.BLOCK_OUT = 1 << self.LOG_BLOCK_OUT
# buffer size
self.UOP_BUFF_SIZE = 1 << self.LOG_UOP_BUFF_SIZE
self.INP_BUFF_SIZE = 1 << self.LOG_INP_BUFF_SIZE
self.WGT_BUFF_SIZE = 1 << self.LOG_WGT_BUFF_SIZE
self.ACC_BUFF_SIZE = 1 << self.LOG_ACC_BUFF_SIZE
self.OUT_BUFF_SIZE = 1 << self.LOG_OUT_BUFF_SIZE
# bytes per buffer
self.INP_ELEM_BITS = self.BATCH * self.BLOCK_IN * self.INP_WIDTH
self.WGT_ELEM_BITS = self.BLOCK_OUT * self.BLOCK_IN * self.WGT_WIDTH
self.ACC_ELEM_BITS = self.BATCH * self.BLOCK_OUT * self.ACC_WIDTH
self.OUT_ELEM_BITS = self.BATCH * self.BLOCK_OUT * self.OUT_WIDTH
self.INP_ELEM_BYTES = self.INP_ELEM_BITS // 8
self.WGT_ELEM_BYTES = self.WGT_ELEM_BITS // 8
self.ACC_ELEM_BYTES = self.ACC_ELEM_BITS // 8
self.OUT_ELEM_BYTES = self.OUT_ELEM_BITS // 8
# dtypes
self.acc_dtype = "int%d" % self.ACC_WIDTH
self.inp_dtype = "int%d" % self.INP_WIDTH
self.wgt_dtype = "int%d" % self.WGT_WIDTH
self.out_dtype = "int%d" % self.OUT_WIDTH
# bistream name
self.BITSTREAM = self.pkg.bitstream
# model string
self.MODEL = self.TARGET + "_" + self.BITSTREAM
# lazy cached members
self.mock_mode = False
self._mock_env = None
self._dev_ctx = None
self._last_env = None
def __enter__(self):
self._last_env = Environment.current
Environment.current = self
return self
def __exit__(self, ptype, value, trace):
Environment.current = self._last_env
@property
def cfg_dict(self):
return self.pkg.cfg_dict
@property
def dev(self):
"""Developer context"""
if self._dev_ctx is None:
self._dev_ctx = DevContext(self)
return self._dev_ctx
@property
def mock(self):
"""A mock version of the Environment
The ALU, dma_copy and intrinsics will be
mocked to be nop.
"""
if self.mock_mode:
return self
if self._mock_env is None:
self._mock_env = copy.copy(self)
self._mock_env._dev_ctx = None
self._mock_env.mock_mode = True
return self._mock_env
@property
def dma_copy(self):
"""DMA copy pragma"""
return "dma_copy" if not self.mock_mode else "skip_dma_copy"
@property
def alu(self):
"""ALU pragma"""
return "alu" if not self.mock_mode else "skip_alu"
@property
def gemm(self):
"""GEMM intrinsic"""
return self.dev.gemm
@property
def target(self):
return tvm.target.vta(model=self.MODEL)
@property
def target_host(self):
"""The target host"""
if self.TARGET in ["pynq", "de10nano"]:
return "llvm -mtriple=armv7-none-linux-gnueabihf"
if self.TARGET == "ultra96":
return "llvm -mtriple=aarch64-linux-gnu"
if self.TARGET in ["sim", "tsim", "intelfocl"]:
return "llvm"
raise ValueError("Unknown target %s" % self.TARGET)
@property
def target_vta_cpu(self):
return tvm.target.arm_cpu(model=self.TARGET)
def get_env():
"""Get the current VTA Environment.
Returns
-------
env : Environment
The current environment.
"""
return Environment.current
def _init_env():
"""Initialize the default global env"""
config_path = os.path.join(get_vta_hw_path(), "config/vta_config.json")
if not os.path.exists(config_path):
raise RuntimeError("Cannot find config in %s" % str(config_path))
cfg = json.load(open(config_path))
return Environment(cfg)
Environment.current = _init_env()
| https://github.com/zk-ml/tachikoma |
vta/python/vta/exec/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""VTA Command line utils."""
| https://github.com/zk-ml/tachikoma |
vta/python/vta/exec/rpc_server.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""VTA customized TVM RPC Server
Provides additional runtime function and library loading.
"""
from __future__ import absolute_import
import logging
import argparse
import os
import ctypes
import json
import tvm
from tvm import rpc
from tvm.contrib import cc
from vta import program_bitstream
from ..environment import get_env, pkg_config
from ..libinfo import find_libvta
def server_start():
"""VTA RPC server extension."""
# pylint: disable=unused-variable
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
proj_root = os.path.abspath(os.path.join(curr_path, "../../../../"))
dll_path = find_libvta("libvta")[0]
cfg_path = os.path.abspath(os.path.join(proj_root, "3rdparty/vta-hw/config/vta_config.json"))
runtime_dll = []
_load_module = tvm.get_global_func("tvm.rpc.server.load_module")
def load_vta_dll():
"""Try to load vta dll"""
if not runtime_dll:
runtime_dll.append(ctypes.CDLL(dll_path, ctypes.RTLD_GLOBAL))
logging.info("Loading VTA library: %s", dll_path)
return runtime_dll[0]
@tvm.register_func("tvm.rpc.server.load_module", override=True)
def load_module(file_name):
load_vta_dll()
return _load_module(file_name)
@tvm.register_func("device_api.ext_dev")
def ext_dev_callback():
load_vta_dll()
return tvm.get_global_func("device_api.ext_dev")()
@tvm.register_func("tvm.contrib.vta.init", override=True)
def program_fpga(file_name):
# pylint: disable=import-outside-toplevel
env = get_env()
if env.TARGET == "pynq":
from pynq import xlnk
# Reset xilinx driver
xlnk.Xlnk().xlnk_reset()
elif env.TARGET == "de10nano":
# Load the de10nano program function.
load_vta_dll()
path = tvm.get_global_func("tvm.rpc.server.workpath")(file_name)
program_bitstream.bitstream_program(env.TARGET, path)
logging.info("Program FPGA with %s ", file_name)
@tvm.register_func("tvm.rpc.server.shutdown", override=True)
def server_shutdown():
if runtime_dll:
runtime_dll[0].VTARuntimeShutdown()
runtime_dll.pop()
@tvm.register_func("tvm.contrib.vta.reconfig_runtime", override=True)
def reconfig_runtime(cfg_json):
"""Rebuild and reload runtime with new configuration.
Parameters
----------
cfg_json : str
JSON string used for configurations.
"""
env = get_env()
if runtime_dll:
if env.TARGET == "de10nano":
print("Please reconfigure the runtime AFTER programming a bitstream.")
raise RuntimeError("Can only reconfig in the beginning of session...")
cfg = json.loads(cfg_json)
cfg["TARGET"] = env.TARGET
pkg = pkg_config(cfg)
# check if the configuration is already the same
if os.path.isfile(cfg_path):
old_cfg = json.loads(open(cfg_path, "r").read())
if pkg.same_config(old_cfg):
logging.info("Skip reconfig_runtime due to same config.")
return
cflags = ["-O2", "-std=c++17"]
cflags += pkg.cflags
ldflags = pkg.ldflags
lib_name = dll_path
source = pkg.lib_source
logging.info(
"Rebuild runtime:\n output=%s,\n cflags=%s,\n source=%s,\n ldflags=%s",
dll_path,
"\n\t".join(cflags),
"\n\t".join(source),
"\n\t".join(ldflags),
)
cc.create_shared(lib_name, source, cflags + ldflags)
with open(cfg_path, "w") as outputfile:
outputfile.write(pkg.cfg_json)
def main():
"""Main funciton"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--host", type=str, default="0.0.0.0", help="The host IP address the server binds to"
)
parser.add_argument("--port", type=int, default=9091, help="The port of the RPC")
parser.add_argument("--port-end", type=int, default=9199, help="The end search port of the RPC")
parser.add_argument(
"--key", type=str, default="", help="RPC key used to identify the connection type."
)
parser.add_argument("--tracker", type=str, default="", help="Report to RPC tracker")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
if args.tracker:
url, port = args.tracker.split(":")
port = int(port)
tracker_addr = (url, port)
if not args.key:
raise RuntimeError("Need key to present type of resource when tracker is available")
else:
tracker_addr = None
# register the initialization callback
def server_init_callback():
# pylint: disable=redefined-outer-name, reimported, import-outside-toplevel, import-self
import tvm
import vta.exec.rpc_server
tvm.register_func("tvm.rpc.server.start", vta.exec.rpc_server.server_start, override=True)
server = rpc.Server(
args.host,
args.port,
args.port_end,
key=args.key,
tracker_addr=tracker_addr,
server_init_callback=server_init_callback,
)
server.proc.join()
if __name__ == "__main__":
main()
| https://github.com/zk-ml/tachikoma |
vta/python/vta/intrin.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""VTA related intrinsics"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
def gemm(env, mock=False):
"""Matrix-matrix multiply intrinsic
Parameters
----------
env : Environment
The Environment
mock : bool
Whether create a mock version.
"""
wgt_lanes = env.WGT_ELEM_BITS // env.WGT_WIDTH
assert wgt_lanes == env.BLOCK_OUT * env.BLOCK_IN
wgt_shape = (env.BLOCK_OUT, env.BLOCK_IN)
assert wgt_shape[0] * wgt_shape[1] == wgt_lanes
inp_lanes = env.INP_ELEM_BITS // env.INP_WIDTH
assert inp_lanes == env.BATCH * env.BLOCK_IN
inp_shape = (env.BATCH, env.BLOCK_IN)
assert inp_shape[0] * inp_shape[1] == inp_lanes
out_lanes = env.ACC_ELEM_BITS // env.ACC_WIDTH
assert out_lanes == env.BATCH * env.BLOCK_OUT
out_shape = (env.BATCH, env.BLOCK_OUT)
assert out_shape[0] * out_shape[1] == out_lanes
wgt = te.placeholder(
(wgt_shape[0], wgt_shape[1]), dtype="int%d" % env.WGT_WIDTH, name=env.wgt_scope
)
inp = te.placeholder(
(inp_shape[0], inp_shape[1]), dtype="int%d" % env.INP_WIDTH, name=env.inp_scope
)
k = te.reduce_axis((0, wgt_shape[1]), name="k")
out_dtype = "int%d" % env.ACC_WIDTH
out = te.compute(
(out_shape[0], out_shape[1]),
lambda i, j: te.sum(inp[i, k].astype(out_dtype) * wgt[j, k].astype(out_dtype), axis=[k]),
name="out",
)
wgt_layout = tvm.tir.decl_buffer(
wgt.shape,
wgt.dtype,
env.wgt_scope,
scope=env.wgt_scope,
offset_factor=wgt_lanes,
data_alignment=wgt_lanes,
)
inp_layout = tvm.tir.decl_buffer(
inp.shape,
inp.dtype,
env.inp_scope,
scope=env.inp_scope,
offset_factor=inp_lanes,
data_alignment=inp_lanes,
)
out_layout = tvm.tir.decl_buffer(
out.shape,
out.dtype,
env.acc_scope,
scope=env.acc_scope,
offset_factor=out_lanes,
data_alignment=out_lanes,
)
def intrin_func(ins, outs):
"""Matrix-matrix multiply intrinsic function"""
dinp, dwgt = ins
dout = outs[0]
def instr(index):
"""Generate matrix-matrix multiply VTA instruction"""
irb = tvm.tir.ir_builder.create()
dev = env.dev
irb.scope_attr(dev.vta_axis, "coproc_scope", dev.get_task_qid(dev.QID_COMPUTE))
irb.scope_attr(dev.vta_axis, "coproc_uop_scope", dev.vta_push_uop)
if index in (0, 2):
irb.emit(
tvm.tir.call_intrin(
"int32",
"tir.vta.uop_push",
0,
0,
dout.access_ptr("rw", "int32"),
dinp.access_ptr("r", "int32"),
dwgt.access_ptr("r", "int32"),
0,
0,
0,
)
)
else:
irb.emit(
tvm.tir.call_intrin(
"int32",
"tir.vta.uop_push",
0,
1,
dout.access_ptr("rw", "int32"),
0,
0,
0,
0,
0,
)
)
return irb.get()
# return a triple of normal-set, reset, update
nop = tvm.tir.Evaluate(0)
if mock:
return (nop, nop, nop)
return (instr(0), instr(1), instr(2))
return te.decl_tensor_intrin(
out.op, intrin_func, name="GEMM", binds={inp: inp_layout, wgt: wgt_layout, out: out_layout}
)
| https://github.com/zk-ml/tachikoma |
vta/python/vta/libinfo.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Library information."""
from __future__ import absolute_import
import sys
import os
from .environment import get_vta_hw_path
def _get_lib_name(lib_name):
"""Get lib name with extension
Returns
-------
lib_name_ext : str
Name of VTA shared library with extension
Parameters
------------
lib_name : str
Name of VTA shared library
"""
if sys.platform.startswith("win32"):
return lib_name + ".dll"
if sys.platform.startswith("darwin"):
return lib_name + ".dylib"
return lib_name + ".so"
def find_libvta(lib_vta, optional=False):
"""Find VTA Chisel-based library
Returns
-------
lib_found : str
Library path
Parameters
------------
lib_vta : str
Name of VTA shared library
optional : bool
Enable error check
"""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
tvm_library_path = os.environ.get("TVM_LIBRARY_PATH", None)
if tvm_library_path is None:
tvm_library_path = os.path.join(
curr_path,
os.pardir,
os.pardir,
os.pardir,
"build",
)
lib_search = [tvm_library_path, os.path.join(get_vta_hw_path(), "build")]
lib_name = _get_lib_name(lib_vta)
lib_path = [os.path.join(x, lib_name) for x in lib_search]
lib_found = [x for x in lib_path if os.path.exists(x)]
if not lib_found and not optional:
raise RuntimeError(
"Cannot find the files.\n" + "List of candidates:\n" + str("\n".join(lib_path))
)
return lib_found
| https://github.com/zk-ml/tachikoma |
vta/python/vta/program_bitstream.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""VTA specific bitstream program library."""
import os
import argparse
def main():
"""Main function"""
parser = argparse.ArgumentParser()
parser.add_argument("target", type=str, default="", help="target")
parser.add_argument("bitstream", type=str, default="", help="bitstream path")
args = parser.parse_args()
if args.target not in ("pynq", "ultra96", "de10nano", "sim", "tsim"):
raise RuntimeError("Unknown target {}".format(args.target))
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
path_list = [
os.path.join(curr_path, "/{}".format(args.bitstream)),
os.path.join("./", "{}".format(args.bitstream)),
]
ok_path_list = [p for p in path_list if os.path.exists(p)]
if not ok_path_list:
raise RuntimeError("Cannot find bitstream file in %s" % str(path_list))
bitstream_program(args.target, args.bitstream)
def pynq_bitstream_program(bitstream_path):
# pylint: disable=import-outside-toplevel
from pynq import Bitstream
bitstream = Bitstream(bitstream_path)
bitstream.download()
def de10nano_bitstream_program(bitstream_path):
# pylint: disable=import-outside-toplevel
from tvm import get_global_func
program = get_global_func("vta.de10nano.program")
program(bitstream_path)
def intelfocl_bitstream_program(bitstream_path, mem_size=4 * 1024 * 1024 * 1024):
# pylint: disable=import-outside-toplevel
from tvm import get_global_func
program = get_global_func("vta.oclfpga.program")
program(bitstream_path, mem_size)
def bitstream_program(target, bitstream, *args):
"""program bitstream to devices"""
if target in ["pynq", "ultra96"]:
pynq_bitstream_program(bitstream)
elif target in ["de10nano"]:
de10nano_bitstream_program(bitstream)
elif target in ["sim", "tsim"]:
# In simulation, bit stream programming is a no-op
return
elif target in ["intelfocl"]:
intelfocl_bitstream_program(bitstream, *args)
else:
raise RuntimeError("Unknown target {}".format(target))
if __name__ == "__main__":
main()
| https://github.com/zk-ml/tachikoma |
vta/python/vta/rpc_client.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""VTA RPC client function"""
import os
from tvm import rpc
from vta import program_bitstream
from .environment import get_env
from .bitstream import download_bitstream, get_bitstream_path
def reconfig_runtime(remote):
"""Reconfigure remote runtime based on current hardware spec.
Parameters
----------
remote : RPCSession
The TVM RPC session
"""
env = get_env()
freconfig = remote.get_function("tvm.contrib.vta.reconfig_runtime")
freconfig(env.pkg.cfg_json)
def program_fpga(remote, bitstream=None):
"""Upload and program bistream
Parameters
----------
remote : RPCSession
The TVM RPC session
bitstream : str, optional
Path to a local bistream file. If unset, tries to download from cache server.
"""
env = get_env()
if bitstream:
assert os.path.isfile(bitstream)
else:
bitstream = get_bitstream_path()
if not os.path.isfile(bitstream):
if env.TARGET == "de10nano":
return
download_bitstream()
if isinstance(remote, rpc.LocalSession):
program_bitstream.bitstream_program(env.TARGET, bitstream)
else:
fprogram = remote.get_function("tvm.contrib.vta.init")
remote.upload(bitstream)
fprogram(os.path.basename(bitstream))
| https://github.com/zk-ml/tachikoma |
vta/python/vta/testing/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Testing utilities, this namespace is not imported by default."""
from .utils import run
| https://github.com/zk-ml/tachikoma |
vta/python/vta/testing/simulator.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utilities to start simulator."""
import ctypes
import json
import warnings
import tvm
from ..environment import get_env
from ..libinfo import find_libvta
def _load_sw():
"""Load hardware library for simulator."""
env = get_env()
lib_driver_name = (
"libvta_tsim"
if env.TARGET == "tsim"
else "libvta"
if env.TARGET == "intelfocl"
else "libvta_fsim"
)
require_sim = env.TARGET in ("sim", "tsim")
libs = []
# Load driver library
lib_driver = find_libvta(lib_driver_name, optional=(not require_sim))
if not lib_driver:
return []
try:
libs = [ctypes.CDLL(lib_driver[0], ctypes.RTLD_GLOBAL)]
except OSError as err:
if require_sim:
raise err
warnings.warn("Error when loading VTA driver {}: {}".format(lib_driver[0], err))
return []
if env.TARGET == "tsim":
lib_hw = find_libvta("libvta_hw", optional=True)
assert lib_hw # make sure to make in ${VTA_HW_PATH}/hardware/chisel
f = tvm.get_global_func("vta.tsim.init")
m = tvm.runtime.load_module(lib_hw[0], "vta-tsim")
f(m)
return lib_hw
return libs
def enabled():
"""Check if simulator is enabled."""
f = tvm.get_global_func("vta.simulator.profiler_clear", True)
return f is not None
def clear_stats():
"""Clear profiler statistics."""
env = get_env()
if env.TARGET == "sim":
f = tvm.get_global_func("vta.simulator.profiler_clear", True)
else:
f = tvm.get_global_func("vta.tsim.profiler_clear", True)
if f:
f()
def stats():
"""Get profiler statistics
Returns
-------
stats : dict
Current profiler statistics
"""
env = get_env()
if env.TARGET == "sim":
x = tvm.get_global_func("vta.simulator.profiler_status")()
else:
x = tvm.get_global_func("vta.tsim.profiler_status")()
return json.loads(x)
# debug flag to skip execution.
DEBUG_SKIP_EXEC = 1
def debug_mode(flag):
"""Set debug mode
Paramaters
----------
flag : int
The debug flag, 0 means clear all flags.
"""
tvm.get_global_func("vta.simulator.profiler_debug_mode")(flag)
LIBS = _load_sw()
| https://github.com/zk-ml/tachikoma |
vta/python/vta/testing/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test Utilities"""
from __future__ import absolute_import as _abs
import os
from tvm import rpc, autotvm
from ..environment import get_env
from . import simulator
def run(run_func):
"""Run test function on all available env.
Parameters
----------
run_func : function(env, remote)
"""
env = get_env()
if env.TARGET in ["sim", "tsim", "intelfocl"]:
# Talk to local RPC if necessary to debug RPC server.
# Compile vta on your host with make at the root.
# Make sure TARGET is set to "sim" in the config.json file.
# Then launch the RPC server on the host machine
# with ./apps/vta_rpc/start_rpc_server.sh
# Set your VTA_LOCAL_SIM_RPC environment variable to
# the port it's listening to, e.g. 9090
local_rpc = int(os.environ.get("VTA_LOCAL_SIM_RPC", "0"))
if local_rpc:
remote = rpc.connect("127.0.0.1", local_rpc)
run_func(env, remote)
else:
# Make sure simulation library exists
# If this fails, build vta on host (make)
# with TARGET="sim" in the json.config file.
if env.TARGET == "sim":
assert simulator.enabled()
run_func(env, rpc.LocalSession())
elif env.TARGET in ["pynq", "ultra96", "de10nano"]:
# The environment variables below should be set if we are using
# a tracker to obtain a remote for a test device
tracker_host = os.environ.get("TVM_TRACKER_HOST", None)
tracker_port = os.environ.get("TVM_TRACKER_PORT", None)
# Otherwise, we can set the variables below to directly
# obtain a remote from a test device
pynq_host = os.environ.get("VTA_RPC_HOST", None)
pynq_port = os.environ.get("VTA_RPC_PORT", None)
# Run device from fleet node if env variables are defined
if tracker_host and tracker_port:
remote = autotvm.measure.request_remote(
env.TARGET, tracker_host, int(tracker_port), timeout=10000
)
run_func(env, remote)
else:
# Next, run on PYNQ if env variables are defined
if pynq_host and pynq_port:
remote = rpc.connect(pynq_host, int(pynq_port))
run_func(env, remote)
else:
raise RuntimeError(
"Please set the VTA_RPC_HOST and VTA_RPC_PORT environment variables"
)
else:
raise RuntimeError("Unknown target %s" % env.TARGET)
| https://github.com/zk-ml/tachikoma |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.