text
stringlengths
0
2.2M
LOOP_ASSERT( *it2, 42 == *it2);
LOOP2_ASSERT(*it1, *it2, *it1 == *it2);
if (verbose) printf("Test length of array\n");
{
int x[5];
ASSERT(5 == bsl::size(x));
ASSERT(5 == bsl::ssize(x));
}
} break;
default: {
fprintf(stderr, "WARNING: CASE `%d' NOT FOUND.\n", test);
testStatus = -1;
}
}
if (testStatus > 0) {
fprintf(stderr, "Error, non-zero test status = %d.\n", testStatus);
}
return testStatus;
}
// ----------------------------------------------------------------------------
// Copyright 2013 Bloomberg Finance L.P.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ----------------------------- END-OF-FILE ----------------------------------
#include <torch/csrc/jit/runtime/static/init.h>
#include <torch/csrc/jit/passes/freeze_module.h>
#include <torch/csrc/jit/runtime/static/fusion.h>
#include <torch/csrc/jit/runtime/static/impl.h>
// This number is a heuristic determined with pytorch/benchmark
#define DEFAULT_FUSION_SIZE 4
namespace torch {
namespace jit {
void initStaticModuleBindings(PyObject* module) {
auto m = py::handle(module).cast<py::module>();
py::class_<StaticModule> static_module(m, "StaticModule");
py::class_<StaticRuntime::IndividualMetrics>(
static_module, "IndividualMetrics")
.def_readonly("setup_time", &StaticRuntime::IndividualMetrics::setup_time)
.def_readonly(
"memory_alloc_time",
&StaticRuntime::IndividualMetrics::memory_alloc_time)
.def_readonly(
"memory_dealloc_time",
&StaticRuntime::IndividualMetrics::memory_dealloc_time)
.def_readonly(
"output_dealloc_time",
&StaticRuntime::IndividualMetrics::output_dealloc_time)
.def_readonly(
"first_iter_time", &StaticRuntime::IndividualMetrics::first_iter_time)
.def_readonly("total_time", &StaticRuntime::IndividualMetrics::total_time)
.def_readonly(
"out_nodes_count", &StaticRuntime::IndividualMetrics::out_nodes_count)
.def_readonly(
"total_nodes_count",
&StaticRuntime::IndividualMetrics::total_nodes_count)
.def_readonly(
"time_per_node", &StaticRuntime::IndividualMetrics::time_per_node)
.def_readonly(
"time_per_node_type",
&StaticRuntime::IndividualMetrics::time_per_node_type)
.def_readonly(
"percent_per_node_type",
&StaticRuntime::IndividualMetrics::percent_per_node_type)
.def_readonly(
"instances_per_node_type",
&StaticRuntime::IndividualMetrics::instances_per_node_type)
.def_readonly("out_nodes", &StaticRuntime::IndividualMetrics::out_nodes);
static_module
.def(
"__call__",
[](StaticModule& self,
const py::args& args,
const py::kwargs& kwargs) {
std::vector<c10::IValue> arg_ivalues;
std::unordered_map<std::string, c10::IValue> kwarg_ivalues;
for (size_t i = 0; i < args.size(); ++i) {
auto ivalue = torch::jit::toIValue(args[i], c10::AnyType::get());
arg_ivalues.push_back(ivalue);
}