text
stringlengths
0
2.2M
#ifdef TORCH_ENABLE_LLVM
LLVMCodeGen llvm_codegen(l.root_stmt(), {Input, Weight, Bias, Result});
llvm_codegen.call({input_buf, weight_buf, bias_buf, result_buf});
nnc_result = at::from_blob(result_buf.data(), {1, 16, 112, 112}, options);
ASSERT_TRUE(at::allclose(nnc_result, ref));
#endif
SimpleIREvaluator ir_eval(l.root_stmt(), {Input, Weight, Bias, Result});
ir_eval.call({input_buf, weight_buf, bias_buf, result_buf});
nnc_result = at::from_blob(result_buf.data(), {1, 16, 112, 112}, options);
ASSERT_TRUE(at::allclose(nnc_result, ref));
}
TEST(ExternalCall, Conv2d_nobias_noargs) {
BufHandle Input("Input", {1, 16, 112, 112}, kFloat);
BufHandle Weight("Weight", {16, 16, 1, 1}, kFloat);
BufHandle ResultBuf("Result", {1, 16, 112, 112}, kFloat);
Tensor Result = Tensor(
ResultBuf.node(),
ExternalCall::make(ResultBuf, "nnc_aten_conv2d", {Input, Weight}, {}));
LoopNest l({Result});
l.prepareForCodegen();
l.simplify();
auto options = at::TensorOptions()
.dtype(at::kFloat)
.layout(at::kStrided)
.device(at::kCPU)
.requires_grad(false);
at::Tensor input = at::ones({1, 16, 112, 112}, options) * 5.f;
at::Tensor weight = at::ones({16, 16, 1, 1}, options) * 6.f;
at::Tensor ref = at::conv2d(input, weight);
at::Tensor nnc_result;
std::vector<float> input_buf(1 * 16 * 112 * 112, 5.f);
std::vector<float> weight_buf(16 * 16 * 1 * 1, 6.f);
std::vector<float> result_buf(1 * 16 * 112 * 112, -1.f);
#ifdef TORCH_ENABLE_LLVM
LLVMCodeGen llvm_codegen(l.root_stmt(), {Input, Weight, Result});
llvm_codegen.call({input_buf, weight_buf, result_buf});
nnc_result = at::from_blob(result_buf.data(), {1, 16, 112, 112}, options);
ASSERT_TRUE(at::allclose(nnc_result, ref));
#endif
SimpleIREvaluator ir_eval(l.root_stmt(), {Input, Weight, Result});
ir_eval.call({input_buf, weight_buf, result_buf});
nnc_result = at::from_blob(result_buf.data(), {1, 16, 112, 112}, options);
ASSERT_TRUE(at::allclose(nnc_result, ref));
}
TEST(ExternalCall, Addmm_float) {
BufHandle Input("Input", {100, 300}, kFloat);
BufHandle Mat1("Mat1", {100, 200}, kFloat);
BufHandle Mat2("Mat2", {200, 300}, kFloat);
BufHandle ResultBuf("Result", {100, 300}, kFloat);
int64_t beta = 2;
int64_t alpha = 2;
Tensor Result = Tensor(
ResultBuf.node(),
ExternalCall::make(
ResultBuf, "nnc_aten_addmm", {Input, Mat1, Mat2}, {beta, alpha}));
LoopNest l({Result});
l.prepareForCodegen();
l.simplify();
auto options = at::TensorOptions()
.dtype(at::kFloat)
.layout(at::kStrided)
.device(at::kCPU)
.requires_grad(false);
at::Tensor input = at::ones({100, 300}, options) * 5.f;
at::Tensor mat1 = at::ones({100, 200}, options) * 6.f;
at::Tensor mat2 = at::ones({200, 300}, options) * 11.f;
at::Tensor ref = at::addmm(input, mat1, mat2, beta, alpha);
at::Tensor nnc_result;
std::vector<float> input_buf(100 * 300, 5.f);
std::vector<float> mat1_buf(100 * 200, 6.f);
std::vector<float> mat2_buf(200 * 300, 11.f);
std::vector<float> result_buf(100 * 300, -1.f);
#ifdef TORCH_ENABLE_LLVM
LLVMCodeGen llvm_codegen(l.root_stmt(), {Input, Mat1, Mat2, Result});
llvm_codegen.call({input_buf, mat1_buf, mat2_buf, result_buf});
nnc_result = at::from_blob(result_buf.data(), {100, 300}, options);
ASSERT_TRUE(at::allclose(nnc_result, ref));
#endif
SimpleIREvaluator ir_eval(l.root_stmt(), {Input, Mat1, Mat2, Result});
ir_eval.call({input_buf, mat1_buf, mat2_buf, result_buf});