text
stringlengths 0
2.2M
|
---|
std::vector<float> result_buf(prod(resShape), -1.f);
|
#ifdef TORCH_ENABLE_LLVM
|
LLVMCodeGen llvm_codegen(l.root_stmt(), {A, B, Result});
|
llvm_codegen.call({a_buf, b_buf, result_buf});
|
nnc_result =
|
at::from_blob(result_buf.data(), c10::IntArrayRef(resShape), options);
|
ASSERT_TRUE(at::allclose(nnc_result, ref));
|
#endif
|
SimpleIREvaluator ir_eval(l.root_stmt(), {A, B, Result});
|
ir_eval.call({a_buf, b_buf, result_buf});
|
nnc_result =
|
at::from_blob(result_buf.data(), c10::IntArrayRef(resShape), options);
|
ASSERT_TRUE(at::allclose(nnc_result, ref));
|
}
|
}
|
TEST(ExternalCall, UnaryFloat) {
|
using TensorFunc = std::function<at::Tensor(at::Tensor)>;
|
auto toExprHandleVec = [](std::vector<int64_t> v) {
|
auto intV = std::vector<int>(v.begin(), v.end());
|
return std::vector<ExprHandle>(intV.begin(), intV.end());
|
};
|
using Test = std::tuple<
|
std::vector<int64_t>,
|
std::vector<int64_t>,
|
TensorFunc,
|
std::string,
|
std::vector<ExprHandle>>;
|
std::vector<Test> tests = {};
|
tests.push_back(Test{// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
{1, 64, 8, 9},
|
{1, 64, 5, 7},
|
[](at::Tensor x) {
|
return at::adaptive_avg_pool2d(x, {5, 7});
|
},
|
"nnc_aten_adaptive_avg_pool2d",
|
toExprHandleVec({5, 7})});
|
tests.push_back(Test{// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
{100, 200},
|
{100},
|
[](at::Tensor x) { return at::mean(x, {1}); },
|
"nnc_aten_mean",
|
toExprHandleVec({1, /*keepdim=*/0})});
|
for (auto curTest : tests) {
|
std::vector<int64_t> aShape, resShape;
|
TensorFunc torchFunc;
|
std::string externCallName;
|
std::vector<ExprHandle> externCallArgs;
|
std::tie(aShape, resShape, torchFunc, externCallName, externCallArgs) =
|
curTest;
|
BufHandle A("A", toExprHandleVec(aShape), kFloat);
|
BufHandle ResultBuf("Result", toExprHandleVec(resShape), kFloat);
|
Tensor Result = Tensor(
|
ResultBuf.node(),
|
ExternalCall::make(ResultBuf, externCallName, {A}, externCallArgs));
|
LoopNest l({Result});
|
l.prepareForCodegen();
|
l.simplify();
|
auto options = at::TensorOptions()
|
.dtype(at::kFloat)
|
.layout(at::kStrided)
|
.device(at::kCPU)
|
.requires_grad(false);
|
at::Tensor a = at::ones(c10::IntArrayRef(aShape), options) * 5.f;
|
at::Tensor ref = torchFunc(a);
|
auto prod = [](std::vector<int64_t> v) {
|
// NOLINTNEXTLINE(modernize-use-transparent-functors)
|
return std::accumulate(v.begin(), v.end(), 1, std::multiplies<int64_t>());
|
};
|
at::Tensor nnc_result;
|
std::vector<float> a_buf(prod(aShape), 5.f);
|
std::vector<float> result_buf(prod(resShape), -1.f);
|
#ifdef TORCH_ENABLE_LLVM
|
LLVMCodeGen llvm_codegen(l.root_stmt(), {A, Result});
|
llvm_codegen.call({a_buf, result_buf});
|
nnc_result =
|
at::from_blob(result_buf.data(), c10::IntArrayRef(resShape), options);
|
ASSERT_TRUE(at::allclose(nnc_result, ref));
|
#endif
|
SimpleIREvaluator ir_eval(l.root_stmt(), {A, Result});
|
ir_eval.call({a_buf, result_buf});
|
nnc_result =
|
at::from_blob(result_buf.data(), c10::IntArrayRef(resShape), options);
|
ASSERT_TRUE(at::allclose(nnc_result, ref));
|
}
|
}
|
TEST(ExternalCall, ComputeInterop) {
|
// This test verifies that Tensors using external calls can be used by and can
|
// use Tensors built with Compute API.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.