text
stringlengths
0
2.2M
std::vector<int64_t>,
std::vector<int64_t>,
int64_t,
const c10::optional<at::Scalar>&,
const c10::optional<at::Scalar>&)>();
auto prepacked = conv2d_clamp_prepack_op.call(
weight,
bias,
{stride, stride},
{pad, pad},
{dilation, dilation},
groups,
c10::optional<at::Scalar>(),
c10::optional<at::Scalar>());
BufHandle DummyPrepacked("DummyPrepacked", {1}, kFloat);
Tensor Result = Tensor(
ResultBuf.node(),
ExternalCall::make(
ResultBuf,
"nnc_prepacked_conv2d_clamp_run",
{Input, DummyPrepacked},
{}));
LoopNest l({Result});
l.prepareForCodegen();
l.simplify();
at::Tensor nnc_result;
std::vector<float> input_buf(
input.data_ptr<float>(), input.data_ptr<float>() + 1 * 3 * 224 * 224);
std::vector<float> result_buf(1 * 16 * 112 * 112, -1.f);
#ifdef TORCH_ENABLE_LLVM
LLVMCodeGen llvm_codegen(l.root_stmt(), {Input, DummyPrepacked, Result});
llvm_codegen.call({input_buf, prepacked.get(), result_buf});
nnc_result = at::from_blob(result_buf.data(), {1, 16, 112, 112}, options);
ASSERT_TRUE(at::allclose(nnc_result, ref, 1e-03, 1e-03));
#endif
SimpleIREvaluator ir_eval(l.root_stmt(), {Input, DummyPrepacked, Result});
ir_eval.call({input_buf, prepacked.get(), result_buf});
nnc_result = at::from_blob(result_buf.data(), {1, 16, 112, 112}, options);
ASSERT_TRUE(at::allclose(nnc_result, ref, 1e-03, 1e-03));
}
#endif // USE_XNNPACK
TEST(ExternalCall, BinaryFloat) {
using TensorFunc = std::function<at::Tensor(at::Tensor, at::Tensor)>;
using Test = std::tuple<
std::vector<int64_t>,
std::vector<int64_t>,
std::vector<int64_t>,
TensorFunc,
std::string>;
std::vector<Test> tests = {};
tests.push_back(
Test{{100, 200}, {200, 300}, {100, 300}, at::matmul, "nnc_aten_matmul"});
tests.push_back(Test{{100, 300}, {300}, {100}, at::mv, "nnc_aten_mv"});
tests.push_back(
Test{{100, 200}, {200, 300}, {100, 300}, at::mm, "nnc_aten_mm"});
for (auto curTest : tests) {
std::vector<int64_t> aShape, bShape, resShape;
TensorFunc torchFunc;
std::string externCallName;
std::tie(aShape, bShape, resShape, torchFunc, externCallName) = curTest;
auto toExprHandleVec = [](std::vector<int64_t> v) {
auto intV = std::vector<int>(v.begin(), v.end());
return std::vector<ExprHandle>(intV.begin(), intV.end());
};
BufHandle A("A", toExprHandleVec(aShape), kFloat);
BufHandle B("B", toExprHandleVec(bShape), kFloat);
BufHandle ResultBuf("Result", toExprHandleVec(resShape), kFloat);
Tensor Result = Tensor(
ResultBuf.node(),
ExternalCall::make(ResultBuf, externCallName, {A, B}, {}));
LoopNest l({Result});
l.prepareForCodegen();
l.simplify();
auto options = at::TensorOptions()
.dtype(at::kFloat)
.layout(at::kStrided)
.device(at::kCPU)
.requires_grad(false);
at::Tensor a = at::ones(c10::IntArrayRef(aShape), options) * 5.f;
at::Tensor b = at::ones(c10::IntArrayRef(bShape), options) * 6.f;
at::Tensor ref = torchFunc(a, b);
auto prod = [](std::vector<int64_t> v) {
// NOLINTNEXTLINE(modernize-use-transparent-functors)
return std::accumulate(v.begin(), v.end(), 1, std::multiplies<int64_t>());
};
at::Tensor nnc_result;
std::vector<float> a_buf(prod(aShape), 5.f);
std::vector<float> b_buf(prod(bShape), 6.f);