text
stringlengths
0
2.2M
BufHandle ResultBuf("Result", {1, 16, 112, 112}, kFloat);
int64_t stride = 2;
int64_t pad = 1;
int64_t dilation = 1;
int64_t groups = 1;
Tensor Result = Tensor(
ResultBuf.node(),
ExternalCall::make(
ResultBuf,
"nnc_aten_conv2d",
{Input, Weight, Bias},
{stride, stride, pad, pad, dilation, dilation, groups}));
LoopNest l({Result});
l.prepareForCodegen();
l.simplify();
auto options = at::TensorOptions()
.dtype(at::kFloat)
.layout(at::kStrided)
.device(at::kCPU)
.requires_grad(false);
at::Tensor input = at::ones({1, 3, 224, 224}, options) * 5.f;
at::Tensor weight = at::ones({16, 3, 3, 3}, options) * 6.f;
at::Tensor bias = at::ones({16}, options) * 11.f;
at::Tensor ref = at::conv2d(
input,
weight,
bias,
{stride, stride},
{pad, pad},
{dilation, dilation},
groups);
at::Tensor nnc_result;
std::vector<float> input_buf(1 * 3 * 224 * 224, 5.f);
std::vector<float> weight_buf(16 * 3 * 3 * 3, 6.f);
std::vector<float> bias_buf(16, 11.f);
std::vector<float> result_buf(1 * 16 * 112 * 112, -1.f);
#ifdef TORCH_ENABLE_LLVM
LLVMCodeGen llvm_codegen(l.root_stmt(), {Input, Weight, Bias, Result});
llvm_codegen.call({input_buf, weight_buf, bias_buf, result_buf});
nnc_result = at::from_blob(result_buf.data(), {1, 16, 112, 112}, options);
ASSERT_TRUE(at::allclose(nnc_result, ref));
#endif
SimpleIREvaluator ir_eval(l.root_stmt(), {Input, Weight, Bias, Result});
ir_eval.call({input_buf, weight_buf, bias_buf, result_buf});
nnc_result = at::from_blob(result_buf.data(), {1, 16, 112, 112}, options);
ASSERT_TRUE(at::allclose(nnc_result, ref));
}
TEST(ExternalCall, Conv2d_int) {
// A similar test, but now using kInt tensors
BufHandle Input("Input", {1, 3, 224, 224}, kInt);
BufHandle Weight("Weight", {16, 3, 3, 3}, kInt);
BufHandle Bias("Bias", {16}, kInt);
BufHandle ResultBuf("Result", {1, 16, 112, 112}, kInt);
int64_t stride = 2;
int64_t pad = 1;
int64_t dilation = 1;
int64_t groups = 1;
Tensor Result = Tensor(
ResultBuf.node(),
ExternalCall::make(
ResultBuf,
"nnc_aten_conv2d",
{Input, Weight, Bias},
{stride, stride, pad, pad, dilation, dilation, groups}));
LoopNest l({Result});
l.prepareForCodegen();
l.simplify();
auto options = at::TensorOptions()
.dtype(at::kInt)
.layout(at::kStrided)
.device(at::kCPU)
.requires_grad(false);
at::Tensor input = at::ones({1, 3, 224, 224}, options) * 5;
at::Tensor weight = at::ones({16, 3, 3, 3}, options) * 6;
at::Tensor bias = at::ones({16}, options) * 11;
at::Tensor ref = at::conv2d(
input,
weight,
bias,
{stride, stride},
{pad, pad},
{dilation, dilation},
groups);
at::Tensor nnc_result;
std::vector<int32_t> input_buf(1 * 3 * 224 * 224, 5);
std::vector<int32_t> weight_buf(16 * 3 * 3 * 3, 6);
std::vector<int32_t> bias_buf(16, 11);
std::vector<int32_t> result_buf(1 * 16 * 112 * 112, -1);