text
stringlengths
0
2.2M
content += "<script type=\"text/javascript\" src=\"_afc_Interfaces.js\"></script>";
content += "<script type=\"text/javascript\" src=\""+fviewmap[file]+".js\"></script>" + en;
}
else
{
if(ht!=(int)std::string::npos)
{
std::string st = content.substr(0,ht+6);
std::string en = content.substr(ht+6);
content = st + "<script type=\"text/javascript\" src=\"json2.js\"></script>";
content += "<script type=\"text/javascript\" src=\"prototype.js\"></script>";
content += "<script type=\"text/javascript\" src=\"afc.js\"></script>";
content += "<script type=\"text/javascript\" src=\"_afc_Objects.js\"></script>";
content += "<script type=\"text/javascript\" src=\"_afc_Interfaces.js\"></script>";
content += "<script type=\"text/javascript\" src=\""+fviewmap[file]+".js\"></script>" + en;
}
}
res->addHeader(HttpResponse::ContentType, ContentTypes::CONTENT_TYPE_TEXT_HTML);
res->setHTTPResponseStatus(HTTPResponseStatus::Ok);
res->setContent(content);
infile.close();
return true;
}
return false;
}
#include <gtest/gtest.h>
#include <test/cpp/tensorexpr/test_base.h>
#include <test/cpp/tensorexpr/test_utils.h>
#include <torch/csrc/jit/tensorexpr/eval.h>
#include <torch/csrc/jit/tensorexpr/ir.h>
#include <torch/csrc/jit/tensorexpr/ir_printer.h>
#include <torch/csrc/jit/tensorexpr/ir_simplifier.h>
#include <torch/csrc/jit/tensorexpr/llvm_codegen.h>
#include <torch/csrc/jit/tensorexpr/loopnest.h>
#include <torch/csrc/jit/tensorexpr/tensor.h>
#include <ATen/NativeFunctions.h>
#include <ATen/core/dispatch/Dispatcher.h>
#include <ATen/native/xnnpack/OpContext.h>
namespace torch {
namespace jit {
using namespace torch::jit::tensorexpr;
TEST(ExternalCall, Conv1d_float) {
BufHandle Input("Input", {1, 100, 115}, kFloat);
BufHandle Weight("Weight", {100, 1, 7}, kFloat);
BufHandle Bias("Bias", {100}, kFloat);
BufHandle ResultBuf("Result", {1, 100, 115}, kFloat);
int64_t stride = 1;
int64_t pad = 3;
int64_t dilation = 1;
int64_t groups = 100;
Tensor Result = Tensor(
ResultBuf.node(),
ExternalCall::make(
ResultBuf,
"nnc_aten_conv1d",
{Input, Weight, Bias},
{stride, pad, dilation, groups}));
LoopNest l({Result});
l.prepareForCodegen();
l.simplify();
auto options = at::TensorOptions()
.dtype(at::kFloat)
.layout(at::kStrided)
.device(at::kCPU)
.requires_grad(false);
at::Tensor input = at::ones({1, 100, 115}, options) * 5.f;
at::Tensor weight = at::ones({100, 1, 7}, options) * 6.f;
at::Tensor bias = at::ones({100}, options) * 11.f;
at::Tensor ref =
at::conv1d(input, weight, bias, {stride}, {pad}, {dilation}, groups);
at::Tensor nnc_result;
std::vector<float> input_buf(1 * 100 * 115, 5.f);
std::vector<float> weight_buf(100 * 1 * 7, 6.f);
std::vector<float> bias_buf(100, 11.f);
std::vector<float> result_buf(1 * 100 * 115, -1.f);
#ifdef TORCH_ENABLE_LLVM
LLVMCodeGen llvm_codegen(l.root_stmt(), {Input, Weight, Bias, Result});
llvm_codegen.call({input_buf, weight_buf, bias_buf, result_buf});
nnc_result = at::from_blob(result_buf.data(), {1, 100, 115}, options);
ASSERT_TRUE(at::allclose(nnc_result, ref));
#endif
SimpleIREvaluator ir_eval(l.root_stmt(), {Input, Weight, Bias, Result});
ir_eval.call({input_buf, weight_buf, bias_buf, result_buf});
nnc_result = at::from_blob(result_buf.data(), {1, 100, 115}, options);
ASSERT_TRUE(at::allclose(nnc_result, ref));
}