text
stringlengths
0
2.2M
});
Tensor MatmulResult = Tensor(
MatmulResultBuf.node(),
ExternalCall::make(
MatmulResultBuf,
"nnc_aten_matmul",
{BufHandle(A.buf()), BufHandle(B.buf())},
{}));
Tensor Result =
Compute("Result", {8, 8}, [&](const VarHandle& i, const VarHandle& j) {
return MatmulResult.load(i, j) + FloatImm::make(3.0f);
});
StmtPtr root_stmt = alloc<Block>(std::vector<StmtPtr>(
{A.stmt(), B.stmt(), MatmulResult.stmt(), Result.stmt()}));
LoopNest l(root_stmt, {Result.buf()});
// Inlining should not inline anything here since all Bufs are either
// defined or used in ExternalCalls
l.inlineIntermediateBufs(false);
l.prepareForCodegen();
l.simplify();
auto options = at::TensorOptions()
.dtype(at::kFloat)
.layout(at::kStrided)
.device(at::kCPU)
.requires_grad(false);
at::Tensor a = at::ones({8, 8}, options) * 5.f;
at::Tensor b = at::ones({8, 8}, options) * 4.f;
at::Tensor t = at::matmul(a, b);
at::Tensor ref = t + 3.f;
at::Tensor nnc_result;
std::vector<float> result_buf(8 * 8);
#ifdef TORCH_ENABLE_LLVM
LLVMCodeGen llvm_codegen(l.root_stmt(), {Result});
llvm_codegen.call({result_buf});
nnc_result = at::from_blob(result_buf.data(), {8, 8}, options);
ASSERT_TRUE(at::allclose(nnc_result, ref));
#endif
SimpleIREvaluator ir_eval(l.root_stmt(), {Result});
ir_eval.call({result_buf});
nnc_result = at::from_blob(result_buf.data(), {8, 8}, options);
ASSERT_TRUE(at::allclose(nnc_result, ref));
}
} // namespace jit
} // namespace torch
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "flashlight/lib/sequence/criterion/cpu/ForceAlignmentCriterion.h"
#include <algorithm>
#include <cmath>
#include "flashlight/lib/sequence/criterion/Workspace.h"
#include "flashlight/lib/sequence/criterion/cpu/CriterionUtils.h"
namespace {
template <class Float>
struct WorkspacePtrs {
WorkspacePtrs(void* workspace, int B, int T, int N, int L) {
fl::lib::seq::Workspace<> ws(workspace);
ws.request(&scale, B);
ws.request(&alpha, B, T, L);
ws.request(&alphaGrad, B, T, L);
ws.request(&transBatchGrad, B, N, N);
ws.request(&transBuf1, B, L);
ws.request(&transBuf2, B, L);
ws.request(&transBufGrad1, B, L);
ws.request(&transBufGrad2, B, L);
requiredSize = ws.requiredSize();
}
Float* scale;
double* alpha;
double* alphaGrad;
Float* transBatchGrad;
Float* transBuf1;
Float* transBuf2;
Float* transBufGrad1;
Float* transBufGrad2;
size_t requiredSize;
};
} // namespace