ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 13
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 78
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 93
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
f058373b-e476-464a-856c-33de8c061930 | cpp | tensorflow/tensorflow | activations | tensorflow/lite/kernels/activations.cc | tensorflow/lite/delegates/hexagon/builders/tests/activations_test.cc | #include <stddef.h>
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <functional>
#include <limits>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/cppmath.h"
#include "tensorflow/lite/kernels/internal/optimized/integer_ops/leaky_relu.h"
#include "tensorflow/lite/kernels/internal/optimized/integer_ops/lut.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/reference/binary_function.h"
#include "tensorflow/lite/kernels/internal/reference/gelu.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/log_softmax.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/lut.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h"
#include "tensorflow/lite/kernels/internal/reference/logistic.h"
#include "tensorflow/lite/kernels/internal/reference/prelu.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/reference/softmax.h"
#include "tensorflow/lite/kernels/internal/reference/tanh.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace activations {
enum KernelType {
kReference,
kGenericOptimized,
kFixedPointOptimized,
};
struct OpData {
int32_t input_multiplier = 0;
int input_left_shift = 0;
int32_t input_range_radius = 0;
int diff_min = 0;
union {
uint8_t lut_uint8[LUTSize<uint8_t>()];
int8_t lut_int8[LUTSize<int8_t>()];
int16_t lut_int16[LUTSize<int16_t>()];
};
};
struct SoftmaxOpData {
struct SoftmaxParams params = {};
float table[256];
#ifdef TFLITE_SOFTMAX_USE_UINT16_LUT
uint8_t uint8_table1[256];
uint8_t uint8_table2[256];
#endif
static constexpr int kInt16LUTArraySize = LUTSize<int16_t>();
int16_t exp_lut[kInt16LUTArraySize];
int16_t one_over_one_plus_x_lut[kInt16LUTArraySize];
};
struct LogSoftmaxOpData : public OpData {
int32_t reverse_scaling_divisor = 0;
int32_t reverse_scaling_right_shift = 0;
struct SoftmaxParams params = {};
float f_table[256];
};
struct LeakyReluOpData : public OpData {
int32_t output_multiplier_alpha = 0;
int32_t output_shift_alpha = 0;
int32_t output_multiplier_identity = 0;
int32_t output_shift_identity = 0;
};
struct PreluOpData : public OpData {
int32_t output_multiplier_1 = 0;
int32_t output_shift_1 = 0;
int32_t output_multiplier_2 = 0;
int32_t output_shift_2 = 0;
bool requires_broadcast;
};
struct HardSwishData {
HardSwishParams params;
};
struct ReluOpData : public OpData {
int32_t output_multiplier = 0;
int output_shift = 0;
};
namespace {
template <typename T>
void QuantizedReluX(float act_min, float act_max, const TfLiteTensor* input,
TfLiteTensor* output, const ReluOpData* data) {
ReluParams params;
params.quantized_activation_min =
std::max(static_cast<int32_t>(std::numeric_limits<T>::min()),
output->params.zero_point +
static_cast<int32>(roundf(act_min / output->params.scale)));
params.quantized_activation_max =
act_max == std::numeric_limits<float>::infinity()
? static_cast<int32_t>(std::numeric_limits<T>::max())
: std::min(
static_cast<int32_t>(std::numeric_limits<T>::max()),
output->params.zero_point +
static_cast<int32>(roundf(act_max / output->params.scale)));
params.input_offset = input->params.zero_point;
params.output_offset = output->params.zero_point;
params.output_multiplier = data->output_multiplier;
params.output_shift = data->output_shift;
optimized_ops::ReluX(params, GetTensorShape(input), GetTensorData<T>(input),
GetTensorShape(output), GetTensorData<T>(output));
}
}
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
return new OpData;
}
void* SoftmaxInit(TfLiteContext* context, const char* buffer, size_t length) {
return new SoftmaxOpData;
}
void SoftmaxFree(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<SoftmaxOpData*>(buffer);
}
void* LogSoftmaxInit(TfLiteContext* context, const char* buffer,
size_t length) {
return new LogSoftmaxOpData;
}
void* PreluInit(TfLiteContext* context, const char* buffer, size_t length) {
return new PreluOpData;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
void LogSoftmaxFree(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<LogSoftmaxOpData*>(buffer);
}
void PreluFree(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<PreluOpData*>(buffer);
}
void* HardSwishInit(TfLiteContext* context, const char* buffer, size_t length) {
return new HardSwishData;
}
TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
}
void* ReluInit(TfLiteContext* context, const char* buffer, size_t length) {
return new ReluOpData;
}
void ReluFree(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<ReluOpData*>(buffer);
}
TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node) {
ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8 ||
input->type == kTfLiteInt16) {
double real_multiplier = input->params.scale / output->params.scale;
QuantizeMultiplier(real_multiplier, &data->output_multiplier,
&data->output_shift);
}
if (input->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
}
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
}
void* LeakyReluInit(TfLiteContext* context, const char* buffer, size_t length) {
return new LeakyReluOpData;
}
void LeakyReluFree(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<LeakyReluOpData*>(buffer);
}
void HardSwishFree(TfLiteContext* context, void* buffer) {
delete static_cast<HardSwishData*>(buffer);
}
TfLiteStatus HardSwishPrepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_STATUS(GenericPrepare(context, node));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) {
HardSwishData* data = static_cast<HardSwishData*>(node->user_data);
HardSwishParams* params = &data->params;
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
params->input_zero_point = input->params.zero_point;
params->output_zero_point = output->params.zero_point;
const float input_scale = input->params.scale;
const float hires_input_scale = (1.0f / 128.0f) * input_scale;
const float reluish_scale = 3.0f / 32768.0f;
const float output_scale = output->params.scale;
const float output_multiplier = hires_input_scale / output_scale;
int32_t output_multiplier_fixedpoint_int32;
QuantizeMultiplier(output_multiplier, &output_multiplier_fixedpoint_int32,
¶ms->output_multiplier_exponent);
DownScaleInt32ToInt16Multiplier(
output_multiplier_fixedpoint_int32,
¶ms->output_multiplier_fixedpoint_int16);
TF_LITE_ENSURE(context, params->output_multiplier_exponent <= 0);
const float reluish_multiplier = hires_input_scale / reluish_scale;
int32_t reluish_multiplier_fixedpoint_int32;
QuantizeMultiplier(reluish_multiplier, &reluish_multiplier_fixedpoint_int32,
¶ms->reluish_multiplier_exponent);
DownScaleInt32ToInt16Multiplier(
reluish_multiplier_fixedpoint_int32,
¶ms->reluish_multiplier_fixedpoint_int16);
}
return kTfLiteOk;
}
TfLiteStatus LeakyReluPrepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
LeakyReluOpData* data = reinterpret_cast<LeakyReluOpData*>(node->user_data);
if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 ||
output->type == kTfLiteInt16) {
const auto* params =
reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data);
double alpha_multiplier =
input->params.scale * params->alpha / output->params.scale;
QuantizeMultiplier(alpha_multiplier, &data->output_multiplier_alpha,
&data->output_shift_alpha);
double identity_multiplier = input->params.scale / output->params.scale;
QuantizeMultiplier(identity_multiplier, &data->output_multiplier_identity,
&data->output_shift_identity);
}
if (input->type == kTfLiteInt16 && output->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
}
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
}
template <KernelType kernel_type>
TfLiteStatus TanhPrepare(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
if (kernel_type == kFixedPointOptimized) {
if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) {
static constexpr int kInputIntegerBits = 4;
const double input_real_multiplier =
input->params.scale *
static_cast<double>(1 << (15 - kInputIntegerBits));
const double q =
std::frexp(input_real_multiplier, &data->input_left_shift);
auto q_fixed = static_cast<int32_t>(TfLiteRound(q * (1LL << 15)));
data->input_multiplier = static_cast<int16_t>(q_fixed);
int16_t input_range_radius =
CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 15);
data->input_range_radius = input_range_radius;
}
}
if (kernel_type == kGenericOptimized || kernel_type == kReference) {
if (input->type == kTfLiteUInt8) {
LUTPopulate<uint8_t>(
input->params.scale, input->params.zero_point, output->params.scale,
output->params.zero_point,
[](float value) { return std::tanh(value); }, data->lut_uint8);
} else if (input->type == kTfLiteInt8) {
LUTPopulate<int8_t>(
input->params.scale, input->params.zero_point, output->params.scale,
output->params.zero_point,
[](float value) { return std::tanh(value); }, data->lut_int8);
}
}
if (input->type == kTfLiteInt16) {
static constexpr int kInputIntegerBits = 3;
static constexpr int kOutputFractionalBits = 15;
TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
int input_scale_log2_rounded;
bool param_scale_pot =
CheckedLog2(input->params.scale, &input_scale_log2_rounded);
data->input_left_shift =
(15 - kInputIntegerBits) + input_scale_log2_rounded;
param_scale_pot &=
(data->input_left_shift == 0 || data->input_left_shift == 1);
if (!param_scale_pot) {
double multiplier = input->params.scale * 4096.0 * 3.0;
data->input_left_shift = 0;
while (multiplier <= 32767.0 / 2.0 && data->input_left_shift <= 30) {
data->input_left_shift++;
multiplier = multiplier * 2.0;
}
data->input_multiplier = static_cast<int32_t>(multiplier);
}
int output_scale_log2_rounded;
TF_LITE_ENSURE(
context, CheckedLog2(output->params.scale, &output_scale_log2_rounded));
TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded,
-kOutputFractionalBits);
}
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
}
template <KernelType kernel_type>
TfLiteStatus SigmoidPrepare(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
if (kernel_type == kFixedPointOptimized) {
if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) {
if (input->type == kTfLiteUInt8) {
TF_LITE_ENSURE_EQ(context, output->params.zero_point,
std::numeric_limits<uint8_t>::min());
}
if (input->type == kTfLiteInt8) {
TF_LITE_ENSURE_EQ(context, output->params.zero_point,
std::numeric_limits<int8_t>::min());
}
TF_LITE_ENSURE(context, output->params.scale == 1. / 256);
static constexpr int kInputIntegerBits = 4;
const double input_real_multiplier =
input->params.scale *
static_cast<double>(1 << (15 - kInputIntegerBits));
const double q =
std::frexp(input_real_multiplier, &data->input_left_shift);
auto q_fixed = static_cast<int32_t>(TfLiteRound(q * (1LL << 15)));
data->input_multiplier = static_cast<int16_t>(q_fixed);
int16_t input_range_radius =
CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 15);
data->input_range_radius = input_range_radius;
}
}
if (kernel_type == kGenericOptimized || kernel_type == kReference) {
if (input->type == kTfLiteUInt8) {
TF_LITE_ENSURE(context, output->params.scale == 1. / 256);
LUTPopulate<uint8_t>(
input->params.scale, input->params.zero_point, output->params.scale,
output->params.zero_point,
[](float value) { return 1.0f / (1.0f + std::exp(-value)); },
data->lut_uint8);
} else if (input->type == kTfLiteInt8) {
TF_LITE_ENSURE(context, output->params.scale == 1. / 256);
LUTPopulate<int8_t>(
input->params.scale, input->params.zero_point, output->params.scale,
output->params.zero_point,
[](float value) { return 1.0f / (1.0f + std::exp(-value)); },
data->lut_int8);
} else if (input->type == kTfLiteInt16) {
TF_LITE_ENSURE(context, output->params.scale == 1. / 32768);
TF_LITE_ENSURE(context, output->params.zero_point == 0);
}
}
if (input->type == kTfLiteInt16) {
static constexpr int kInputIntegerBits = 3;
static constexpr int kOutputFractionalBits = 15;
TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
int input_scale_log2_rounded;
bool param_scale_pot =
CheckedLog2(input->params.scale, &input_scale_log2_rounded);
data->input_left_shift =
(15 - kInputIntegerBits) + input_scale_log2_rounded;
param_scale_pot &= (data->input_left_shift == 0);
if (!param_scale_pot) {
double multiplier = input->params.scale * 4096.0 * 3.0;
data->input_left_shift = 0;
while (multiplier <= 32767.0 / 2.0 && data->input_left_shift <= 30) {
data->input_left_shift++;
multiplier = multiplier * 2.0;
}
data->input_multiplier = static_cast<int32_t>(multiplier);
}
int output_scale_log2_rounded;
TF_LITE_ENSURE(
context, CheckedLog2(output->params.scale, &output_scale_log2_rounded));
TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded,
-kOutputFractionalBits);
}
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
}
template <KernelType kernel_type>
TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data);
SoftmaxOpData* data = reinterpret_cast<SoftmaxOpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TF_LITE_ENSURE(context, NumDimensions(input) >= 1);
if (input->type == kTfLiteInt8 && output->type == kTfLiteInt8) {
TF_LITE_ENSURE_EQ(context, output->params.zero_point, -128);
TF_LITE_ENSURE_NEAR(context, output->params.scale, 1.f / 256,
(0.001f * 1.f / 256));
} else if (input->type == kTfLiteInt16 && output->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
TF_LITE_ENSURE_NEAR(context, output->params.scale, 1.f / 32768,
(0.001f * 1.f / 32768));
}
if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) {
if (kernel_type == kReference) {
const int kScaledDiffIntegerBits = 5;
int input_left_shift;
tflite::PreprocessSoftmaxScaling(
static_cast<double>(params->beta),
static_cast<double>(input->params.scale), kScaledDiffIntegerBits,
&data->params.input_multiplier, &input_left_shift);
data->params.input_left_shift = input_left_shift;
data->params.diff_min =
-1.0 * tflite::CalculateInputRadius(kScaledDiffIntegerBits,
input_left_shift);
} else {
switch (output->type) {
case kTfLiteUInt8:
case kTfLiteInt8:
#ifdef TFLITE_SOFTMAX_USE_UINT16_LUT
data->params.uint8_table1 = data->uint8_table1;
data->params.uint8_table2 = data->uint8_table2;
optimized_ops::PopulateSoftmaxUInt8LookupTable(
&data->params, input->params.scale, params->beta);
break;
#endif
case kTfLiteInt16:
default:
data->params.table = data->table;
optimized_ops::PopulateSoftmaxLookupTable(
&data->params, input->params.scale, params->beta);
}
data->params.zero_point = output->params.zero_point;
data->params.scale = output->params.scale;
}
} else if (input->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
const int32_t range = std::numeric_limits<int16_t>::max() -
std::numeric_limits<int16_t>::min();
data->params.exp_lut = data->exp_lut;
LUTPopulate<int16_t>(
10.0 / range, std::numeric_limits<int16_t>::max(), 2.0 / range, 0,
[](double value) { return std::exp(value); }, data->params.exp_lut);
data->params.one_over_one_plus_x_lut = data->one_over_one_plus_x_lut;
LUTPopulate<int16_t>(
1.0 / range, std::numeric_limits<int16_t>::min(), 2.0 / range, 0,
[](double value) { return 1.0 / (1.0 + value); },
data->params.one_over_one_plus_x_lut);
data->params.zero_point = output->params.zero_point;
data->params.scale = output->params.scale;
double input_scale_beta_rescale =
input->params.scale * params->beta /
(10.0 / 65535.0);
QuantizeMultiplier(input_scale_beta_rescale, &data->params.input_multiplier,
&data->params.input_left_shift);
}
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
}
template <KernelType kernel_type>
TfLiteStatus LogSoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) {
LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) {
TF_LITE_ENSURE_EQ(context, output->params.scale, 16.0 / 256);
static const double kBeta = 1.0;
if (input->type == kTfLiteUInt8) {
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 255);
}
if (input->type == kTfLiteInt8) {
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 127);
}
if (kernel_type == kReference) {
const int kScaledDiffIntegerBits = 5;
int input_left_shift;
int reverse_scaling_right_shift;
tflite::PreprocessLogSoftmaxScalingExp(
kBeta, static_cast<double>(input->params.scale),
kScaledDiffIntegerBits, &data->params.input_multiplier,
&input_left_shift, &data->params.reverse_scaling_divisor,
&reverse_scaling_right_shift);
reverse_scaling_right_shift *= -1;
data->params.input_left_shift = input_left_shift;
data->params.reverse_scaling_right_shift = reverse_scaling_right_shift;
data->params.diff_min = -tflite::CalculateInputRadius(
kScaledDiffIntegerBits, input_left_shift);
} else {
data->params.table = data->f_table;
optimized_ops::PopulateSoftmaxLookupTable(&data->params,
input->params.scale, kBeta);
data->params.zero_point = output->params.zero_point;
data->params.scale = output->params.scale;
}
}
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
}
TfLiteStatus PreluPrepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const TfLiteTensor* alpha;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &alpha));
PreluOpData* data = reinterpret_cast<PreluOpData*>(node->user_data);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, alpha->type);
output->type = input->type;
if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) {
double real_multiplier_1 = input->params.scale / output->params.scale;
double real_multiplier_2 =
input->params.scale * alpha->params.scale / output->params.scale;
QuantizeMultiplier(real_multiplier_1, &data->output_multiplier_1,
&data->output_shift_1);
QuantizeMultiplier(real_multiplier_2, &data->output_multiplier_2,
&data->output_shift_2);
}
data->requires_broadcast = !HaveSameShapes(input, alpha);
TfLiteIntArray* output_size = nullptr;
TF_LITE_ENSURE_OK(
context, CalculateShapeForBroadcast(context, input, alpha, &output_size));
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output, output_size));
TF_LITE_ENSURE(context, HaveSameShapes(input, output));
return kTfLiteOk;
}
TfLiteStatus ReluEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data);
switch (input->type) {
case kTfLiteFloat32: {
optimized_ops::Relu(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
} break;
case kTfLiteUInt8: {
QuantizedReluX<uint8_t>(0.0f, std::numeric_limits<float>::infinity(),
input, output, data);
} break;
case kTfLiteInt8: {
QuantizedReluX<int8_t>(0.0f, std::numeric_limits<float>::infinity(),
input, output, data);
} break;
case kTfLiteInt16: {
QuantizedReluX<int16_t>(0.0f, std::numeric_limits<float>::infinity(),
input, output, data);
} break;
default:
TF_LITE_KERNEL_LOG(context,
"Only float32, uint8, int8 and int16 are supported "
"currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
return kTfLiteOk;
}
TfLiteStatus Relu1Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data);
switch (input->type) {
case kTfLiteFloat32: {
optimized_ops::Relu1(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output),
GetTensorData<float>(output));
return kTfLiteOk;
}
case kTfLiteUInt8: {
QuantizedReluX<uint8_t>(-1.0f, 1.0f, input, output, data);
return kTfLiteOk;
}
case kTfLiteInt8: {
QuantizedReluX<int8_t>(-1, 1, input, output, data);
return kTfLiteOk;
}
default:
TF_LITE_KERNEL_LOG(context,
"Only float32, uint8, int8 supported "
"currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
}
template <KernelType kernel_type>
TfLiteStatus HardSwishEval(TfLiteContext* context, TfLiteNode* node) {
HardSwishData* data = static_cast<HardSwishData*>(node->user_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
switch (input->type) {
case kTfLiteFloat32: {
if (kernel_type == kReference) {
reference_ops::HardSwish(
GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
} else {
optimized_ops::HardSwish(
GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
}
return kTfLiteOk;
} break;
case kTfLiteUInt8: {
HardSwishParams& params = data->params;
if (kernel_type == kReference) {
reference_ops::HardSwish(
params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(output), GetTensorData<uint8_t>(output));
} else {
optimized_ops::HardSwish(
params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(output), GetTensorData<uint8_t>(output));
}
return kTfLiteOk;
} break;
case kTfLiteInt8: {
HardSwishParams& params = data->params;
if (kernel_type == kReference) {
reference_ops::HardSwish(
params, GetTensorShape(input), GetTensorData<int8_t>(input),
GetTensorShape(output), GetTensorData<int8_t>(output));
} else {
optimized_ops::HardSwish(
params, GetTensorShape(input), GetTensorData<int8_t>(input),
GetTensorShape(output), GetTensorData<int8_t>(output));
}
return kTfLiteOk;
} break;
default:
TF_LITE_KERNEL_LOG(
context,
"Only float32, uint8 and int8 are supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
}
TfLiteStatus Relu0to1Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data);
switch (input->type) {
case kTfLiteFloat32: {
optimized_ops::Relu0To1(
GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
return kTfLiteOk;
}
case kTfLiteUInt8: {
QuantizedReluX<uint8_t>(0.0f, 1.0f, input, output, data);
return kTfLiteOk;
}
case kTfLiteInt8: {
QuantizedReluX<int8_t>(0, 1, input, output, data);
return kTfLiteOk;
}
default:
TF_LITE_KERNEL_LOG(context,
"Only float32, uint8, int8 supported "
"currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
}
TfLiteStatus Relu6Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data);
switch (input->type) {
case kTfLiteFloat32: {
size_t elements = input->bytes / sizeof(float);
const float* in = GetTensorData<float>(input);
const float* in_end = in + elements;
float* out = GetTensorData<float>(output);
for (; in < in_end; in++, out++) *out = std::min(std::max(0.f, *in), 6.f);
return kTfLiteOk;
}
case kTfLiteUInt8:
QuantizedReluX<uint8_t>(0.0f, 6.0f, input, output, data);
return kTfLiteOk;
case kTfLiteInt8: {
QuantizedReluX<int8_t>(0.0f, 6.0f, input, output, data);
return kTfLiteOk;
}
case kTfLiteInt16: {
QuantizedReluX<int16_t>(0.0f, 6.0f, input, output, data);
return kTfLiteOk;
}
default:
TF_LITE_KERNEL_LOG(context,
"Only float32, uint8, int8 and int16 are supported "
"currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
}
template <KernelType kernel_type>
TfLiteStatus TanhEval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
switch (input->type) {
case kTfLiteFloat32: {
if (kernel_type == kReference) {
reference_ops::Tanh(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output),
GetTensorData<float>(output));
} else {
optimized_ops::Tanh(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output),
GetTensorData<float>(output));
}
return kTfLiteOk;
} break;
case kTfLiteInt16: {
TanhParams params;
params.input_left_shift = data->input_left_shift;
if (kernel_type == kReference || (data->input_multiplier > 0)) {
reference_integer_ops::Tanh(
data->input_multiplier, data->input_left_shift,
GetTensorShape(input), GetTensorData<int16_t>(input),
GetTensorShape(output), GetTensorData<int16_t>(output));
} else {
optimized_ops::Tanh(
params, GetTensorShape(input), GetTensorData<int16_t>(input),
GetTensorShape(output), GetTensorData<int16_t>(output));
}
return kTfLiteOk;
} break;
case kTfLiteUInt8: {
if (kernel_type == kFixedPointOptimized) {
TanhParams params;
params.input_zero_point = input->params.zero_point;
params.input_range_radius = data->input_range_radius;
params.input_multiplier = data->input_multiplier;
params.input_left_shift = data->input_left_shift;
optimized_ops::Tanh16bitPrecision(
params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(output), GetTensorData<uint8_t>(output));
} else {
optimized_integer_ops::LookupTable(
GetTensorData<uint8_t>(input),
MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)),
data->lut_uint8, GetTensorData<uint8_t>(output));
}
return kTfLiteOk;
} break;
case kTfLiteInt8: {
if (kernel_type == kFixedPointOptimized) {
TanhParams params;
params.input_zero_point = input->params.zero_point;
params.input_range_radius = data->input_range_radius;
params.input_multiplier = data->input_multiplier;
params.input_left_shift = data->input_left_shift;
optimized_ops::Tanh16bitPrecision(
params, GetTensorShape(input), GetTensorData<int8_t>(input),
GetTensorShape(output), GetTensorData<int8_t>(output));
} else {
optimized_integer_ops::LookupTable(
GetTensorData<int8_t>(input),
MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)),
data->lut_int8, GetTensorData<int8_t>(output));
}
return kTfLiteOk;
} break;
default:
TF_LITE_KERNEL_LOG(context,
"Only float32, uint8, int16 and int8 are supported "
"currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
}
template <KernelType kernel_type>
TfLiteStatus SigmoidEval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
switch (input->type) {
case kTfLiteFloat32: {
if (kernel_type == kReference) {
reference_ops::Logistic(
GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
} else {
optimized_ops::Logistic(
GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
}
break;
}
case kTfLiteInt16: {
LogisticParams params;
if (kernel_type == kReference || (data->input_multiplier > 0)) {
const int size =
MatchingFlatSize(GetTensorShape(input), GetTensorShape(output));
reference_integer_ops::Logistic(
data->input_multiplier, data->input_left_shift, size,
GetTensorData<int16_t>(input), GetTensorData<int16_t>(output));
} else {
optimized_ops::Logistic(
params, GetTensorShape(input), GetTensorData<int16_t>(input),
GetTensorShape(output), GetTensorData<int16_t>(output));
}
break;
}
case kTfLiteUInt8: {
if (kernel_type == kFixedPointOptimized) {
LogisticParams params;
params.input_zero_point = input->params.zero_point;
params.input_range_radius = data->input_range_radius;
params.input_multiplier = data->input_multiplier;
params.input_left_shift = data->input_left_shift;
optimized_ops::Logistic16bitPrecision(
params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(output), GetTensorData<uint8_t>(output));
} else {
optimized_integer_ops::LookupTable(
GetTensorData<uint8_t>(input),
MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)),
data->lut_uint8, GetTensorData<uint8_t>(output));
}
break;
}
case kTfLiteInt8: {
if (kernel_type == kFixedPointOptimized) {
LogisticParams params;
params.input_zero_point = input->params.zero_point;
params.input_range_radius = data->input_range_radius;
params.input_multiplier = data->input_multiplier;
params.input_left_shift = data->input_left_shift;
optimized_ops::Logistic16bitPrecision(
params, GetTensorShape(input), GetTensorData<int8_t>(input),
GetTensorShape(output), GetTensorData<int8_t>(output));
} else {
optimized_integer_ops::LookupTable(
GetTensorData<int8_t>(input),
MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)),
data->lut_int8, GetTensorData<int8_t>(output));
}
break;
}
default:
TF_LITE_KERNEL_LOG(context,
"Only float32, uint8, int16 and int8 are supported "
"currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
return kTfLiteOk;
}
TfLiteStatus SoftmaxFloat(TfLiteContext* context, const TfLiteTensor* input,
TfLiteTensor* output, TfLiteSoftmaxParams* params,
KernelType kernel_type = kGenericOptimized) {
SoftmaxParams op_params;
op_params.beta = params->beta;
if (kernel_type == kReference) {
reference_ops::Softmax(op_params, GetTensorShape(input),
GetTensorData<float>(input), GetTensorShape(output),
GetTensorData<float>(output));
} else {
optimized_ops::Softmax(op_params, GetTensorShape(input),
GetTensorData<float>(input), GetTensorShape(output),
GetTensorData<float>(output),
CpuBackendContext::GetFromContext(context));
}
return kTfLiteOk;
}
template <typename In, typename Out>
TfLiteStatus SoftmaxQuantized(TfLiteContext* context, const TfLiteTensor* input,
TfLiteTensor* output, SoftmaxOpData* data,
KernelType kernel_type = kGenericOptimized) {
if (kernel_type == kReference) {
reference_ops::Softmax(data->params, GetTensorShape(input),
GetTensorData<In>(input), GetTensorShape(output),
GetTensorData<Out>(output));
} else {
optimized_ops::Softmax(data->params, GetTensorShape(input),
GetTensorData<In>(input), GetTensorShape(output),
GetTensorData<Out>(output));
}
return kTfLiteOk;
}
template <>
TfLiteStatus SoftmaxQuantized<int8_t, int8_t>(TfLiteContext* context,
const TfLiteTensor* input,
TfLiteTensor* output,
SoftmaxOpData* data,
KernelType kernel_type) {
if (kernel_type == kReference) {
reference_ops::Softmax(data->params, GetTensorShape(input),
GetTensorData<int8_t>(input), GetTensorShape(output),
GetTensorData<int8_t>(output));
} else {
#ifdef TFLITE_SOFTMAX_USE_UINT16_LUT
optimized_ops::SoftmaxInt8LUT(
data->params, GetTensorShape(input), GetTensorData<int8_t>(input),
GetTensorShape(output), GetTensorData<int8_t>(output));
#else
optimized_ops::Softmax(data->params, GetTensorShape(input),
GetTensorData<int8_t>(input), GetTensorShape(output),
GetTensorData<int8_t>(output));
#endif
}
return kTfLiteOk;
}
template <>
TfLiteStatus SoftmaxQuantized<uint8_t, uint8_t>(TfLiteContext* context,
const TfLiteTensor* input,
TfLiteTensor* output,
SoftmaxOpData* data,
KernelType kernel_type) {
if (kernel_type == kReference) {
reference_ops::Softmax(
data->params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(output), GetTensorData<uint8_t>(output));
} else {
#ifdef TFLITE_SOFTMAX_USE_UINT16_LUT
optimized_ops::SoftmaxInt8LUT(
data->params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(output), GetTensorData<uint8_t>(output));
#else
optimized_ops::Softmax(
data->params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(output), GetTensorData<uint8_t>(output));
#endif
}
return kTfLiteOk;
}
template <>
TfLiteStatus SoftmaxQuantized<int16, int16>(TfLiteContext* context,
const TfLiteTensor* input,
TfLiteTensor* output,
SoftmaxOpData* data,
KernelType kernel_type) {
if (NumDimensions(input) >= 1 && NumDimensions(input) <= 4) {
reference_ops::SoftmaxInt16(
data->params, GetTensorShape(input), GetTensorData<int16_t>(input),
GetTensorShape(output), GetTensorData<int16_t>(output));
return kTfLiteOk;
} else {
TF_LITE_KERNEL_LOG(context,
"Only 1D, 2D, 3D and 4D tensors supported for int16 "
"input with int16 output, got %dD.",
NumDimensions(input));
return kTfLiteError;
}
}
template <KernelType kernel_type>
TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data);
SoftmaxOpData* data = reinterpret_cast<SoftmaxOpData*>(node->user_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
switch (input->type) {
case kTfLiteFloat32: {
return SoftmaxFloat(context, input, output, params, kernel_type);
}
case kTfLiteUInt8: {
switch (output->type) {
case kTfLiteUInt8:
return SoftmaxQuantized<uint8_t, uint8_t>(context, input, output,
data, kernel_type);
case kTfLiteInt16:
return SoftmaxQuantized<uint8_t, int16_t>(context, input, output,
data, kernel_type);
default:
TF_LITE_KERNEL_LOG(context,
"Only uint8_t and int16_t outputs are supported "
"with uint8_t inputs currently, got %s.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
}
case kTfLiteInt8: {
switch (output->type) {
case kTfLiteInt8:
return SoftmaxQuantized<int8_t, int8_t>(context, input, output, data,
kernel_type);
case kTfLiteInt16:
return SoftmaxQuantized<int8_t, int16_t>(context, input, output, data,
kernel_type);
default:
TF_LITE_KERNEL_LOG(context,
"Only int8_t and int16_t outputs are supported "
"with int8_t inputs currently, got %s.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
}
case kTfLiteInt16: {
return SoftmaxQuantized<int16_t, int16_t>(context, input, output, data,
kernel_type);
}
default:
TF_LITE_KERNEL_LOG(context,
"Only float32, uint8_t, Int8_t, Int16_t are supported "
"currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
}
template <KernelType kernel_type>
TfLiteStatus LogSoftmaxEval(TfLiteContext* context, TfLiteNode* node) {
const LogSoftmaxOpData* data =
reinterpret_cast<LogSoftmaxOpData*>(node->user_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
switch (input->type) {
case kTfLiteFloat32: {
SoftmaxParams op_params;
if (kernel_type == kGenericOptimized) {
optimized_ops::LogSoftmax(
op_params, GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
} else {
reference_ops::LogSoftmax(
op_params, GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
}
return kTfLiteOk;
}
case kTfLiteUInt8: {
const SoftmaxParams& op_params = data->params;
if (kernel_type == kGenericOptimized) {
optimized_ops::LogSoftmax(
op_params, input->params.scale, GetTensorShape(input),
GetTensorData<uint8_t>(input), GetTensorShape(output),
GetTensorData<uint8_t>(output));
} else {
reference_ops::LogSoftmax(
op_params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(output), GetTensorData<uint8_t>(output));
}
return kTfLiteOk;
}
case kTfLiteInt8: {
const SoftmaxParams& op_params = data->params;
if (kernel_type == kGenericOptimized) {
optimized_ops::LogSoftmax(
op_params, input->params.scale, GetTensorShape(input),
GetTensorData<int8_t>(input), GetTensorShape(output),
GetTensorData<int8_t>(output));
} else {
const auto input_shape = GetTensorShape(input);
const auto output_shape = GetTensorShape(output);
const int trailing_dim = input_shape.DimensionsCount() - 1;
const int outer_size =
MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape);
const int depth =
MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim);
reference_integer_ops::LogSoftmax(
op_params.input_multiplier, op_params.input_left_shift,
op_params.reverse_scaling_divisor,
op_params.reverse_scaling_right_shift, op_params.diff_min,
outer_size, depth, GetTensorData<int8_t>(input),
GetTensorData<int8_t>(output));
}
return kTfLiteOk;
}
default:
TF_LITE_KERNEL_LOG(
context,
"Only float32, uint8 and int8 are supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
}
template <typename T>
T ApplyPrelu(T input, T alpha) {
return input >= 0.0 ? input : input * alpha;
}
template <KernelType kernel_type>
TfLiteStatus PreluEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
const TfLiteTensor* alpha;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &alpha));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const PreluOpData* data = reinterpret_cast<PreluOpData*>(node->user_data);
switch (input->type) {
case kTfLiteFloat32: {
if (kernel_type == kGenericOptimized) {
tflite::ArithmeticParams op_params;
bool need_broadcast = optimized_ops::ProcessBroadcastShapes(
GetTensorShape(input), GetTensorShape(alpha), &op_params);
if (need_broadcast) {
optimized_ops::BroadcastPReluDispatch(
op_params, GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(alpha), GetTensorData<float>(alpha),
GetTensorShape(output), GetTensorData<float>(output),
ApplyPrelu<float>);
} else {
const int flat_size =
MatchingElementsSize(GetTensorShape(input), GetTensorShape(alpha),
GetTensorShape(output));
optimized_ops::PReluElementWise(
flat_size, op_params, GetTensorData<float>(alpha),
GetTensorData<float>(input), GetTensorData<float>(output));
}
} else {
if (data->requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<float, float, float>(
GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(alpha), GetTensorData<float>(alpha),
GetTensorShape(output), GetTensorData<float>(output),
ApplyPrelu<float>);
} else {
reference_ops::BinaryFunction<float, float, float>(
GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(alpha), GetTensorData<float>(alpha),
GetTensorShape(output), GetTensorData<float>(output),
ApplyPrelu<float>);
}
}
return kTfLiteOk;
}
case kTfLiteUInt8: {
PreluParams op_params;
op_params.input_offset = -input->params.zero_point;
op_params.alpha_offset = -alpha->params.zero_point;
op_params.output_offset = output->params.zero_point;
op_params.output_multiplier_1 = data->output_multiplier_1;
op_params.output_shift_1 = data->output_shift_1;
op_params.output_multiplier_2 = data->output_multiplier_2;
op_params.output_shift_2 = data->output_shift_2;
if (data->requires_broadcast) {
reference_ops::BroadcastPrelu4DSlow(
op_params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(alpha), GetTensorData<uint8_t>(alpha),
GetTensorShape(output), GetTensorData<uint8_t>(output));
} else {
reference_ops::Prelu(
op_params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(alpha), GetTensorData<uint8_t>(alpha),
GetTensorShape(output), GetTensorData<uint8_t>(output));
}
return kTfLiteOk;
}
case kTfLiteInt8: {
PreluParams op_params;
op_params.input_offset = -input->params.zero_point;
op_params.alpha_offset = -alpha->params.zero_point;
op_params.output_offset = output->params.zero_point;
op_params.output_multiplier_1 = data->output_multiplier_1;
op_params.output_shift_1 = data->output_shift_1;
op_params.output_multiplier_2 = data->output_multiplier_2;
op_params.output_shift_2 = data->output_shift_2;
if (data->requires_broadcast) {
reference_ops::BroadcastPrelu4DSlow(
op_params, GetTensorShape(input), GetTensorData<int8_t>(input),
GetTensorShape(alpha), GetTensorData<int8_t>(alpha),
GetTensorShape(output), GetTensorData<int8_t>(output));
} else {
reference_ops::Prelu(
op_params, GetTensorShape(input), GetTensorData<int8_t>(input),
GetTensorShape(alpha), GetTensorData<int8_t>(alpha),
GetTensorShape(output), GetTensorData<int8_t>(output));
}
return kTfLiteOk;
}
default:
TF_LITE_KERNEL_LOG(
context,
"Only float32 and uint8 and int8 are supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
}
template <KernelType kernel_type, typename T>
void QuantizeLeakyRelu(const TfLiteTensor* input, TfLiteTensor* output,
const LeakyReluOpData* data) {
LeakyReluParams op_params;
op_params.input_offset = input->params.zero_point;
op_params.output_offset = output->params.zero_point;
op_params.output_multiplier_alpha = data->output_multiplier_alpha;
op_params.output_shift_alpha = data->output_shift_alpha;
op_params.output_multiplier_identity = data->output_multiplier_identity;
op_params.output_shift_identity = data->output_shift_identity;
if (kernel_type != KernelType::kReference && input->type == kTfLiteInt16) {
optimized_integer_ops::QuantizeLeakyRelu(
op_params, GetTensorShape(input), GetTensorData<int16>(input),
GetTensorShape(output), GetTensorData<int16>(output));
} else {
reference_ops::QuantizeLeakyRelu(
op_params, GetTensorShape(input), GetTensorData<T>(input),
GetTensorShape(output), GetTensorData<T>(output));
}
}
template <KernelType kernel_type>
TfLiteStatus LeakyReluEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const auto* params =
reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data);
const LeakyReluOpData* data =
reinterpret_cast<LeakyReluOpData*>(node->user_data);
LeakyReluParams op_params;
switch (input->type) {
case kTfLiteFloat32: {
op_params.alpha = params->alpha;
optimized_ops::LeakyRelu(
op_params, GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
return kTfLiteOk;
}
case kTfLiteUInt8: {
QuantizeLeakyRelu<kernel_type, uint8_t>(input, output, data);
return kTfLiteOk;
}
case kTfLiteInt8: {
QuantizeLeakyRelu<kernel_type, int8_t>(input, output, data);
return kTfLiteOk;
}
case kTfLiteInt16: {
QuantizeLeakyRelu<kernel_type, int16_t>(input, output, data);
return kTfLiteOk;
}
default:
TF_LITE_KERNEL_LOG(
context,
"Only float32, int8, int16 and uint8 is supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
}
TfLiteStatus EluPrepare(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
OpData* data = reinterpret_cast<OpData*>(node->user_data);
if (input->type == kTfLiteInt8) {
LUTPopulate<int8_t>(
input->params.scale, input->params.zero_point, output->params.scale,
output->params.zero_point,
[](float value) { return value < 0.0f ? std::expm1(value) : value; },
data->lut_int8);
}
return GenericPrepare(context, node);
}
TfLiteStatus EluEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
switch (input->type) {
case kTfLiteFloat32: {
optimized_ops::Elu(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
return kTfLiteOk;
}
case kTfLiteInt8: {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
optimized_integer_ops::LookupTable(
GetTensorData<int8_t>(input),
MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)),
data->lut_int8, GetTensorData<int8_t>(output));
return kTfLiteOk;
}
default:
TF_LITE_KERNEL_LOG(
context, "Only float32 and int8 is supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
}
TfLiteStatus GeluPrepare(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
OpData* data = reinterpret_cast<OpData*>(node->user_data);
auto* params = reinterpret_cast<TfLiteGeluParams*>(node->builtin_data);
if (input->type == kTfLiteInt8) {
LUTPopulate<int8_t>(input->params.scale, input->params.zero_point,
output->params.scale, output->params.zero_point,
params->approximate
? reference_ops::GeluTransformApproximate
: reference_ops::GeluTransform,
data->lut_int8);
} else if (input->type == kTfLiteUInt8) {
LUTPopulate<uint8_t>(input->params.scale, input->params.zero_point,
output->params.scale, output->params.zero_point,
params->approximate
? reference_ops::GeluTransformApproximate
: reference_ops::GeluTransform,
data->lut_uint8);
}
return GenericPrepare(context, node);
}
TfLiteStatus GeluEval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
auto* params = reinterpret_cast<TfLiteGeluParams*>(node->builtin_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
switch (input->type) {
case kTfLiteFloat32:
reference_ops::Gelu(GetTensorShape(input), GetTensorData<float>(input),
params->approximate, GetTensorShape(output),
GetTensorData<float>(output));
return kTfLiteOk;
case kTfLiteUInt8:
optimized_integer_ops::LookupTable(
GetTensorData<uint8_t>(input),
MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)),
data->lut_uint8, GetTensorData<uint8_t>(output));
return kTfLiteOk;
case kTfLiteInt8:
optimized_integer_ops::LookupTable(
GetTensorData<int8_t>(input),
MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)),
data->lut_int8, GetTensorData<int8_t>(output));
return kTfLiteOk;
default:
TF_LITE_KERNEL_LOG(
context, "Only float32, int8 and uint8 supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
}
}
TfLiteRegistration* Register_ELU() {
static TfLiteRegistration r = {activations::Init, activations::Free,
activations::EluPrepare, activations::EluEval};
return &r;
}
TfLiteRegistration* Register_RELU() {
static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree,
activations::ReluPrepare,
activations::ReluEval};
return &r;
}
TfLiteRegistration* Register_RELU_N1_TO_1() {
static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree,
activations::ReluPrepare,
activations::Relu1Eval};
return &r;
}
TfLiteRegistration* Register_RELU6() {
static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree,
activations::ReluPrepare,
activations::Relu6Eval};
return &r;
}
TfLiteRegistration* Register_RELU_0_TO_1() {
static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree,
activations::ReluPrepare,
activations::Relu0to1Eval};
return &r;
}
TfLiteRegistration* Register_TANH_REF() {
static TfLiteRegistration r = {
activations::Init, activations::Free,
activations::TanhPrepare<activations::kReference>,
activations::TanhEval<activations::kReference>};
return &r;
}
TfLiteRegistration* Register_TANH_GENERIC_OPT() {
static TfLiteRegistration r = {
activations::Init, activations::Free,
activations::TanhPrepare<activations::kGenericOptimized>,
activations::TanhEval<activations::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_TANH_FIXED_POINT_OPT() {
static TfLiteRegistration r = {
activations::Init, activations::Free,
activations::TanhPrepare<activations::kFixedPointOptimized>,
activations::TanhEval<activations::kFixedPointOptimized>};
return &r;
}
TfLiteRegistration* Register_TANH() {
return Register_TANH_GENERIC_OPT();
}
TfLiteRegistration* Register_LOGISTIC_REF() {
static TfLiteRegistration r = {
activations::Init, activations::Free,
activations::SigmoidPrepare<activations::kReference>,
activations::SigmoidEval<activations::kReference>};
return &r;
}
TfLiteRegistration* Register_LOGISTIC_GENERIC_OPT() {
static TfLiteRegistration r = {
activations::Init, activations::Free,
activations::SigmoidPrepare<activations::kGenericOptimized>,
activations::SigmoidEval<activations::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_LOGISTIC_FIXED_POINT_OPT() {
static TfLiteRegistration r = {
activations::Init, activations::Free,
activations::SigmoidPrepare<activations::kFixedPointOptimized>,
activations::SigmoidEval<activations::kFixedPointOptimized>};
return &r;
}
TfLiteRegistration* Register_LOGISTIC() {
return Register_LOGISTIC_GENERIC_OPT();
}
TfLiteRegistration* Register_SOFTMAX_REF() {
static TfLiteRegistration r = {
activations::SoftmaxInit,
activations::SoftmaxFree,
activations::SoftmaxPrepare<activations::kReference>,
activations::SoftmaxEval<activations::kReference>,
nullptr,
0,
nullptr,
0,
nullptr,
nullptr,
kTfLiteInplaceOpInput0Shared};
return &r;
}
TfLiteRegistration* Register_SOFTMAX() {
static TfLiteRegistration r = {
activations::SoftmaxInit,
activations::SoftmaxFree,
activations::SoftmaxPrepare<activations::kGenericOptimized>,
activations::SoftmaxEval<activations::kGenericOptimized>,
nullptr,
0,
nullptr,
0,
nullptr,
nullptr,
kTfLiteInplaceOpInput0Shared};
return &r;
}
TfLiteRegistration* Register_LOG_SOFTMAX_REF() {
static TfLiteRegistration r = {
activations::LogSoftmaxInit, activations::LogSoftmaxFree,
activations::LogSoftmaxPrepare<activations::kReference>,
activations::LogSoftmaxEval<activations::kReference>};
return &r;
}
TfLiteRegistration* Register_LOG_SOFTMAX() {
static TfLiteRegistration r = {
activations::LogSoftmaxInit, activations::LogSoftmaxFree,
activations::LogSoftmaxPrepare<activations::kGenericOptimized>,
activations::LogSoftmaxEval<activations::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_PRELU_REF() {
static TfLiteRegistration r = {
activations::PreluInit, activations::PreluFree, activations::PreluPrepare,
activations::PreluEval<activations::kReference>};
return &r;
}
TfLiteRegistration* Register_PRELU() {
static TfLiteRegistration r = {
activations::PreluInit, activations::PreluFree, activations::PreluPrepare,
activations::PreluEval<activations::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_LEAKY_RELU_REF() {
static TfLiteRegistration r = {
activations::LeakyReluInit, activations::LeakyReluFree,
activations::LeakyReluPrepare,
activations::LeakyReluEval<activations::kReference>};
return &r;
}
TfLiteRegistration* Register_LEAKY_RELU() {
static TfLiteRegistration r = {
activations::LeakyReluInit, activations::LeakyReluFree,
activations::LeakyReluPrepare,
activations::LeakyReluEval<activations::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_HARD_SWISH() {
static TfLiteRegistration r = {
activations::HardSwishInit, activations::HardSwishFree,
activations::HardSwishPrepare,
activations::HardSwishEval<activations::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_HARD_SWISH_REF() {
static TfLiteRegistration r = {
activations::HardSwishInit, activations::HardSwishFree,
activations::HardSwishPrepare,
activations::HardSwishEval<activations::kReference>};
return &r;
}
TfLiteRegistration* Register_GELU() {
static TfLiteRegistration r = {activations::Init, activations::Free,
activations::GeluPrepare,
activations::GeluEval};
return &r;
}
}
}
} | #include <algorithm>
#include <cstdarg>
#include <cstdint>
#include <limits>
#include <random>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
using testing::ElementsAreArray;
namespace {
void GenerateUniformRandomVector(int size, float min, float max,
std::minstd_rand* random_engine,
std::vector<float>* result) {
result->resize(size);
for (int i = 0; i < size; i++) {
float random_value_scaled_0_1 =
(*random_engine)() *
(1.0f / static_cast<float>(std::minstd_rand::modulus));
(*result)[i] = min + (max - min) * random_value_scaled_0_1;
}
}
}
class ActivationOpModel : public SingleOpModelWithHexagon {
public:
explicit ActivationOpModel(BuiltinOperator type, const TensorData& input,
const TensorData& output) {
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(type, BuiltinOptions_NONE, 0);
BuildInterpreter({GetShape(input_)});
}
template <typename T>
void SetInput(const std::vector<float>& data) {
QuantizeAndPopulate<T>(input_, data);
}
template <typename T>
std::vector<float> GetDequantizedOutput() {
return Dequantize<T>(ExtractVector<T>(output_), GetScale(output_),
GetZeroPoint(output_));
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
BuiltinOperator op_code_;
int input_;
int output_;
};
template <typename integer_type, TensorType tensor_dtype>
void ReluTestImpl() {
const float kMin = -6;
const float kMax = 6;
ActivationOpModel model(BuiltinOperator_RELU,
{tensor_dtype, {1, 3}, kMin, kMax},
{tensor_dtype, {1, 3}, kMin, kMax});
model.SetInput<integer_type>({1, 5, 7});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 3}));
EXPECT_THAT(model.GetDequantizedOutput<integer_type>(),
ElementsAreArray(
ArrayFloatNear({1.0, 5.0, 6.0}, 0.03)));
}
template <typename integer_type, TensorType tensor_dtype>
void Relu6TestImpl() {
const float kMin = -8;
const float kMax = 8;
ActivationOpModel model(BuiltinOperator_RELU6,
{tensor_dtype, {1, 3}, kMin, kMax},
{tensor_dtype, {1, 3}, kMin, kMax});
model.SetInput<integer_type>({4, -1.0, 8});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 3}));
EXPECT_THAT(model.GetDequantizedOutput<integer_type>(),
ElementsAreArray(
ArrayFloatNear({4.0, 0.0, 6.0}, 0.03)));
}
template <typename integer_type, TensorType tensor_dtype>
void TanhTestImpl() {
const float kMin = -1;
const float kMax = 127.f / 128.f;
ActivationOpModel model(BuiltinOperator_TANH,
{tensor_dtype, {1, 3}, 8 * kMin, 8 * kMax},
{tensor_dtype, {1, 3}, kMin, kMax});
model.SetInput<integer_type>({4, -1.0, 8});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 3}));
EXPECT_THAT(model.GetDequantizedOutput<integer_type>(),
ElementsAreArray(ArrayFloatNear({1.00392, -0.752941, 1.00392},
0.03)));
}
template <typename integer_type, TensorType tensor_dtype>
void SigmoidTestImpl() {
const float kMin = -8;
const float kMax = 8;
TensorData output;
if (tensor_dtype == TensorType_UINT8) {
output = {tensor_dtype, {}, 0, 0, 1. / 256};
} else if (tensor_dtype == TensorType_INT8) {
output = {tensor_dtype, {}, 0, 0, 1. / 256, -128};
}
ActivationOpModel model(BuiltinOperator_LOGISTIC,
{tensor_dtype, {1, 3}, kMin, kMax},
output);
model.SetInput<integer_type>({4, -1.0, 8});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 3}));
EXPECT_THAT(model.GetDequantizedOutput<integer_type>(),
ElementsAreArray(ArrayFloatNear({0.977, 0.266, 0.996},
0.03)));
}
TEST(ActivationOpModel, ReluOutput_UInt8) {
ReluTestImpl<uint8_t, TensorType_UINT8>();
}
TEST(ActivationOpModel, ReluOutput_Int8) {
ReluTestImpl<int8_t, TensorType_INT8>();
}
TEST(ActivationOpModel, Relu6Output_UInt8) {
Relu6TestImpl<uint8_t, TensorType_UINT8>();
}
TEST(ActivationOpModel, Relu6Output_Int8) {
Relu6TestImpl<int8_t, TensorType_INT8>();
}
TEST(ActivationOpModel, SigmoidOutput_UInt8) {
SigmoidTestImpl<uint8_t, TensorType_UINT8>();
}
TEST(ActivationOpModel, SigmoidOutput_Int8) {
SigmoidTestImpl<int8_t, TensorType_INT8>();
}
TEST(ActivationOpModel, TanhOutput_UInt8) {
TanhTestImpl<uint8_t, TensorType_UINT8>();
}
TEST(ActivationOpModel, TanhOutput_Int8) {
TanhTestImpl<int8_t, TensorType_INT8>();
}
void EvalTestReferenceHardSwish(int size, const std::vector<float>& input,
std::vector<float>* result) {
result->resize(size);
for (int i = 0; i < size; i++) {
const float in = input[i];
(*result)[i] = in * std::min(6.0f, std::max(0.0f, in + 3)) * (1.0f / 6.0f);
}
}
template <TensorType Tensor_Type, typename input_type>
void TestQuantizedHardSwish(int size, float input_min, float input_max,
float output_min, float output_max,
std::minstd_rand* random_engine) {
std::vector<float> float_input_values;
GenerateUniformRandomVector(size, input_min, input_max, random_engine,
&float_input_values);
std::vector<float> float_ref_output_values;
EvalTestReferenceHardSwish(size, float_input_values,
&float_ref_output_values);
for (float& val : float_ref_output_values) {
val = std::min(output_max, std::max(output_min, val));
}
ActivationOpModel m(
BuiltinOperator_HARD_SWISH,
{Tensor_Type, {1, 1, 1, size}, input_min, input_max},
{Tensor_Type, {1, 1, 1, size}, output_min, output_max});
m.SetInput<input_type>(float_input_values);
m.ApplyDelegateAndInvoke();
const std::vector<float> dequantized_output =
m.GetDequantizedOutput<input_type>();
const float quant_recommended_tolerance =
std::max(input_max - input_min, output_max - output_min) * (1.5f / 256.f);
const float kTolerance = std::max(0.03f, quant_recommended_tolerance);
EXPECT_THAT(dequantized_output, ElementsAreArray(ArrayFloatNear(
float_ref_output_values, kTolerance)));
}
template <TensorType Tensor_Type, typename input_type>
void HardSwishTestImpl() {
std::minstd_rand random_engine;
std::vector<std::pair<float, float>> minmax_pairs{{0.f, 1.f}, {-5.f, 10.f}};
for (const auto& input_minmax : minmax_pairs) {
for (const auto& output_minmax : minmax_pairs) {
float input_min = input_minmax.first;
float input_max = input_minmax.second;
float output_min = output_minmax.first;
float output_max = output_minmax.second;
for (int size : {1, 3, 40}) {
TestQuantizedHardSwish<Tensor_Type, input_type>(
size, input_min, input_max, output_min, output_max, &random_engine);
}
}
}
}
TEST(ActivationOpModel, HardSwishTestUInt8) {
HardSwishTestImpl<TensorType_UINT8, uint8_t>();
}
TEST(ActivationOpModel, HardSwishTestInt8) {
HardSwishTestImpl<TensorType_INT8, int8_t>();
}
template <TensorType Tensor_Type, typename input_type>
void HardSwishBiasTestImpl() {
float input_min = -11.654928f;
float input_max = 25.036512f;
float output_min = -0.3905796f;
float output_max = 24.50887f;
float tolerated_bias = 0.035;
const float quantized_type_range =
static_cast<float>(std::numeric_limits<int8_t>::max()) -
static_cast<float>(std::numeric_limits<int8_t>::min());
const float input_scale = (input_max - input_min) / quantized_type_range;
const float output_scale = (output_max - output_min) / quantized_type_range;
const float max_scale = std::max(output_scale, input_scale);
ASSERT_LE(input_min, -3.0f);
ASSERT_GE(input_max, 3.0f);
const int quantized_input_negative_three =
std::round(std::numeric_limits<input_type>::min() +
(-3.0f - input_min) / input_scale);
const int quantized_input_positive_three =
std::round(std::numeric_limits<input_type>::min() +
(3.0f - input_min) / input_scale);
std::vector<float> float_input_values;
for (int i = quantized_input_negative_three;
i <= quantized_input_positive_three; i++) {
float_input_values.push_back(
input_min + (i - std::numeric_limits<int8_t>::min()) * input_scale);
}
const int size = float_input_values.size();
std::vector<float> float_ref_output_values;
EvalTestReferenceHardSwish(size, float_input_values,
&float_ref_output_values);
for (float& val : float_ref_output_values) {
val = std::min(output_max, std::max(output_min, val));
}
ActivationOpModel m(
BuiltinOperator_HARD_SWISH,
{Tensor_Type, {1, 1, 1, size}, input_min, input_max},
{Tensor_Type, {1, 1, 1, size}, output_min, output_max});
m.SetInput<input_type>(float_input_values);
m.ApplyDelegateAndInvoke();
const std::vector<float> dequantized_output =
m.GetDequantizedOutput<input_type>();
float sum_diff = 0;
for (int i = 0; i < size; i++) {
sum_diff += dequantized_output[i] - float_ref_output_values[i];
}
const float bias = sum_diff / (size * max_scale);
EXPECT_LE(std::abs(bias), tolerated_bias);
}
TEST(ActivationOpModel, HardSwishBiasTest) {
HardSwishBiasTestImpl<TensorType_UINT8, uint8_t>();
}
TEST(ActivationOpModel, HardSwishBiasTestInt8) {
HardSwishBiasTestImpl<TensorType_INT8, int8_t>();
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/activations.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/hexagon/builders/tests/activations_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b383377e-f904-4454-a793-afa88b9c8c84 | cpp | tensorflow/tensorflow | reverse | tensorflow/lite/kernels/reverse.cc | tensorflow/lite/kernels/reverse_test.cc | #include <stdint.h>
#include <algorithm>
#include <array>
#include <cstring>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace reverse {
namespace {
constexpr int kInputTensor = 0;
constexpr int kAxisTensor = 1;
constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* axis;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kAxisTensor, &axis));
TF_LITE_ENSURE_EQ(context, NumDimensions(axis), 1);
TF_LITE_ENSURE(context, NumDimensions(input) <= 8);
TF_LITE_ENSURE(context, NumDimensions(input) >= NumElements(axis));
if (input->type != kTfLiteInt32 && input->type != kTfLiteFloat32 &&
input->type != kTfLiteUInt8 && input->type != kTfLiteInt8 &&
input->type != kTfLiteInt16 && input->type != kTfLiteInt64 &&
input->type != kTfLiteBool) {
TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by reverse.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
if (axis->type != kTfLiteInt32) {
TF_LITE_KERNEL_LOG(context, "Axis Type '%s' is not supported by reverse.",
TfLiteTypeGetName(axis->type));
return kTfLiteError;
}
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TfLiteIntArray* output_shape = TfLiteIntArrayCopy(input->dims);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, input->type);
return context->ResizeTensor(context, output, output_shape);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* axis_tensor;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kAxisTensor, &axis_tensor));
TF_LITE_ENSURE_EQ(context, axis_tensor->type, kTfLiteInt32);
const int num_axes = NumElements(axis_tensor);
TF_LITE_ENSURE(context, num_axes <= 8);
std::array<int32_t, 8> axes;
memcpy(axes.data(), GetTensorData<int32_t>(axis_tensor),
num_axes * sizeof(int32_t));
const int rank = NumDimensions(input);
for (int i = 0; i < num_axes; ++i) {
if (axes[i] < 0) {
axes[i] += rank;
}
TF_LITE_ENSURE(context, axes[i] >= 0 && axes[i] < rank);
}
std::sort(axes.begin(), axes.begin() + num_axes);
bool is_contiguous = true;
for (int i = 1; i < num_axes; ++i) {
if (axes[i - 1] + 1 != axes[i]) {
is_contiguous = false;
break;
}
}
if (!is_contiguous) {
TF_LITE_KERNEL_LOG(context, "Non-contiguous `axes` not supported");
return kTfLiteError;
}
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (output->type) {
case kTfLiteFloat32: {
reference_ops::Reverse<float>(axes, num_axes, GetTensorShape(input),
GetTensorData<float>(input),
GetTensorData<float>(output));
break;
}
case kTfLiteUInt8:
case kTfLiteInt8: {
reference_ops::Reverse<uint8_t>(axes, num_axes, GetTensorShape(input),
GetTensorData<uint8_t>(input),
GetTensorData<uint8_t>(output));
break;
}
case kTfLiteInt16: {
reference_ops::Reverse<int16_t>(axes, num_axes, GetTensorShape(input),
GetTensorData<int16_t>(input),
GetTensorData<int16_t>(output));
break;
}
case kTfLiteInt32: {
reference_ops::Reverse<int32_t>(axes, num_axes, GetTensorShape(input),
GetTensorData<int32_t>(input),
GetTensorData<int32_t>(output));
break;
}
case kTfLiteInt64: {
reference_ops::Reverse<int64_t>(axes, num_axes, GetTensorShape(input),
GetTensorData<int64_t>(input),
GetTensorData<int64_t>(output));
break;
}
case kTfLiteBool: {
reference_ops::Reverse<bool>(axes, num_axes, GetTensorShape(input),
GetTensorData<bool>(input),
GetTensorData<bool>(output));
break;
}
default: {
TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by reverse.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
}
return kTfLiteOk;
}
}
}
TfLiteRegistration* Register_REVERSE_V2() {
static TfLiteRegistration r = {nullptr, nullptr, reverse::Prepare,
reverse::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
template <typename T>
class ReverseOpModel : public SingleOpModel {
public:
ReverseOpModel(const TensorData& input, const TensorData& axis) {
input_ = AddInput(input);
axis_ = AddInput(axis);
output_ = AddOutput({input.type, {}});
SetBuiltinOp(BuiltinOperator_REVERSE_V2, BuiltinOptions_ReverseV2Options,
CreateReverseV2Options(builder_).Union());
BuildInterpreter({GetShape(input_), GetShape(axis_)});
}
int input() { return input_; }
int axis() { return axis_; }
std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input_;
int axis_;
int output_;
};
TEST(ReverseOpTest, FloatOneDimension) {
ReverseOpModel<float> model({TensorType_FLOAT32, {4}},
{TensorType_INT32, {1}});
model.PopulateTensor<float>(model.input(), {1, 2, 3, 4});
model.PopulateTensor<int32_t>(model.axis(), {0});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({4, 3, 2, 1}));
}
TEST(ReverseOpTest, FloatMultiDimensions) {
ReverseOpModel<float> model({TensorType_FLOAT32, {4, 3, 2}},
{TensorType_INT32, {1}});
model.PopulateTensor<float>(model.input(),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.PopulateTensor<int32_t>(model.axis(), {1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4, 3, 2));
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray({5, 6, 3, 4, 1, 2, 11, 12, 9, 10, 7, 8,
17, 18, 15, 16, 13, 14, 23, 24, 21, 22, 19, 20}));
}
TEST(ReverseOpTest, Int32OneDimension) {
ReverseOpModel<int32_t> model({TensorType_INT32, {4}},
{TensorType_INT32, {1}});
model.PopulateTensor<int32_t>(model.input(), {1, 2, 3, 4});
model.PopulateTensor<int32_t>(model.axis(), {0});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({4, 3, 2, 1}));
}
TEST(ReverseOpTest, Int32MultiDimensions) {
ReverseOpModel<int32_t> model({TensorType_INT32, {4, 3, 2}},
{TensorType_INT32, {1}});
model.PopulateTensor<int32_t>(
model.input(), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.PopulateTensor<int32_t>(model.axis(), {1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4, 3, 2));
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray({5, 6, 3, 4, 1, 2, 11, 12, 9, 10, 7, 8,
17, 18, 15, 16, 13, 14, 23, 24, 21, 22, 19, 20}));
}
TEST(ReverseOpTest, Int32MultiDimensionsFirst) {
ReverseOpModel<int32_t> model({TensorType_INT32, {3, 3, 3}},
{TensorType_INT32, {1}});
model.PopulateTensor<int32_t>(
model.input(), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27});
model.PopulateTensor<int32_t>(model.axis(), {0});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3, 3, 3));
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray({19, 20, 21, 22, 23, 24, 25, 26, 27, 10, 11, 12, 13, 14,
15, 16, 17, 18, 1, 2, 3, 4, 5, 6, 7, 8, 9}));
}
TEST(ReverseOpTest, Int32MultiDimensionsSecond) {
ReverseOpModel<int32_t> model({TensorType_INT32, {3, 3, 3}},
{TensorType_INT32, {1}});
model.PopulateTensor<int32_t>(
model.input(), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27});
model.PopulateTensor<int32_t>(model.axis(), {1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3, 3, 3));
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray({7, 8, 9, 4, 5, 6, 1, 2, 3, 16, 17, 18, 13, 14,
15, 10, 11, 12, 25, 26, 27, 22, 23, 24, 19, 20, 21}));
}
TEST(ReverseOpTest, Int32MultiDimensionsThird) {
ReverseOpModel<int32_t> model({TensorType_INT32, {3, 3, 3}},
{TensorType_INT32, {1}});
model.PopulateTensor<int32_t>(
model.input(), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27});
model.PopulateTensor<int32_t>(model.axis(), {2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3, 3, 3));
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray({3, 2, 1, 6, 5, 4, 9, 8, 7, 12, 11, 10, 15, 14,
13, 18, 17, 16, 21, 20, 19, 24, 23, 22, 27, 26, 25}));
}
TEST(ReverseOpTest, Int32MultiDimensionsFirstSecond) {
ReverseOpModel<int32_t> model({TensorType_INT32, {3, 3, 3}},
{TensorType_INT32, {2}});
model.PopulateTensor<int32_t>(
model.input(), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27});
model.PopulateTensor<int32_t>(model.axis(), {0, 1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3, 3, 3));
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray({25, 26, 27, 22, 23, 24, 19, 20, 21, 16, 17, 18, 13, 14,
15, 10, 11, 12, 7, 8, 9, 4, 5, 6, 1, 2, 3}));
}
TEST(ReverseOpTest, Int32MultiDimensionsSecondThird) {
ReverseOpModel<int32_t> model({TensorType_INT32, {3, 3, 3}},
{TensorType_INT32, {2}});
model.PopulateTensor<int32_t>(
model.input(), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27});
model.PopulateTensor<int32_t>(model.axis(), {1, 2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3, 3, 3));
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray({9, 8, 7, 6, 5, 4, 3, 2, 1, 18, 17, 16, 15, 14,
13, 12, 11, 10, 27, 26, 25, 24, 23, 22, 21, 20, 19}));
}
TEST(ReverseOpTest, Int32MultiDimensionsSecondFirst) {
ReverseOpModel<int32_t> model({TensorType_INT32, {3, 3, 3}},
{TensorType_INT32, {2}});
model.PopulateTensor<int32_t>(
model.input(), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27});
model.PopulateTensor<int32_t>(model.axis(), {1, 0});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3, 3, 3));
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray({25, 26, 27, 22, 23, 24, 19, 20, 21, 16, 17, 18, 13, 14,
15, 10, 11, 12, 7, 8, 9, 4, 5, 6, 1, 2, 3}));
}
TEST(ReverseOpTest, Int32MultiDimensionsAll) {
ReverseOpModel<int32_t> model({TensorType_INT32, {3, 3, 3}},
{TensorType_INT32, {3}});
model.PopulateTensor<int32_t>(
model.input(), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27});
model.PopulateTensor<int32_t>(model.axis(), {0, 1, 2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3, 3, 3));
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray({27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14,
13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}));
}
TEST(ReverseOpTest, Int32MultiDimensions8D) {
ReverseOpModel<int32_t> model({TensorType_INT32, {1, 1, 1, 1, 1, 1, 1, 3}},
{TensorType_INT32, {8}});
model.PopulateTensor<int32_t>(model.input(), {1, 2, 3});
model.PopulateTensor<int32_t>(model.axis(), {7, 6, 5, 4, 3, 2, 1, 0});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 1, 1, 1, 1, 3));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({3, 2, 1}));
}
#if GTEST_HAS_DEATH_TEST
TEST(ReverseOpTest, Int32MultiDimensions9D) {
EXPECT_DEATH(
ReverseOpModel<int32_t>({TensorType_INT32, {1, 1, 1, 1, 1, 1, 1, 1, 3}},
{TensorType_INT32, {9}}),
"Cannot allocate tensors");
}
#endif
TEST(ReverseOpTest, Int64OneDimension) {
ReverseOpModel<int64_t> model({TensorType_INT64, {4}},
{TensorType_INT32, {1}});
model.PopulateTensor<int64_t>(model.input(), {1, 2, 3, 4});
model.PopulateTensor<int32_t>(model.axis(), {0});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({4, 3, 2, 1}));
}
TEST(ReverseOpTest, Int64MultiDimensions) {
ReverseOpModel<int64_t> model({TensorType_INT64, {4, 3, 2}},
{TensorType_INT32, {1}});
model.PopulateTensor<int64_t>(
model.input(), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.PopulateTensor<int32_t>(model.axis(), {1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4, 3, 2));
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray({5, 6, 3, 4, 1, 2, 11, 12, 9, 10, 7, 8,
17, 18, 15, 16, 13, 14, 23, 24, 21, 22, 19, 20}));
}
TEST(ReverseOpTest, Uint8OneDimension) {
ReverseOpModel<uint8_t> model({TensorType_UINT8, {4}},
{TensorType_INT32, {1}});
model.PopulateTensor<uint8_t>(model.input(), {1, 2, 3, 4});
model.PopulateTensor<int32_t>(model.axis(), {0});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({4, 3, 2, 1}));
}
TEST(ReverseOpTest, Uint8MultiDimensions) {
ReverseOpModel<uint8_t> model({TensorType_UINT8, {4, 3, 2}},
{TensorType_INT32, {1}});
model.PopulateTensor<uint8_t>(
model.input(), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.PopulateTensor<int32_t>(model.axis(), {1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4, 3, 2));
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray({5, 6, 3, 4, 1, 2, 11, 12, 9, 10, 7, 8,
17, 18, 15, 16, 13, 14, 23, 24, 21, 22, 19, 20}));
}
TEST(ReverseOpTest, Int8OneDimension) {
ReverseOpModel<int8_t> model({TensorType_INT8, {4}}, {TensorType_INT32, {1}});
model.PopulateTensor<int8_t>(model.input(), {1, 2, -1, -2});
model.PopulateTensor<int32_t>(model.axis(), {0});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({-2, -1, 2, 1}));
}
TEST(ReverseOpTest, Int8MultiDimensions) {
ReverseOpModel<int8_t> model({TensorType_INT8, {4, 3, 2}},
{TensorType_INT32, {1}});
model.PopulateTensor<int8_t>(
model.input(), {-1, -2, -3, -4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, -21, -22, -23, -24});
model.PopulateTensor<int32_t>(model.axis(), {1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4, 3, 2));
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray({5, 6, -3, -4, -1, -2, 11, 12, 9, 10, 7, 8,
17, 18, 15, 16, 13, 14, -23, -24, -21, -22, 19, 20}));
}
TEST(ReverseOpTest, Int16OneDimension) {
ReverseOpModel<int16_t> model({TensorType_INT16, {4}},
{TensorType_INT32, {1}});
model.PopulateTensor<int16_t>(model.input(), {1, 2, 3, 4});
model.PopulateTensor<int32_t>(model.axis(), {0});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({4, 3, 2, 1}));
}
TEST(ReverseOpTest, Int16MultiDimensions) {
ReverseOpModel<int16_t> model({TensorType_INT16, {4, 3, 2}},
{TensorType_INT32, {1}});
model.PopulateTensor<int16_t>(
model.input(), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.PopulateTensor<int32_t>(model.axis(), {1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4, 3, 2));
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray({5, 6, 3, 4, 1, 2, 11, 12, 9, 10, 7, 8,
17, 18, 15, 16, 13, 14, 23, 24, 21, 22, 19, 20}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/reverse.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/reverse_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
93ad1401-b7f7-499b-be17-eb2c1f1b45de | cpp | tensorflow/tensorflow | detection_postprocess | tensorflow/lite/kernels/detection_postprocess.cc | tensorflow/lite/kernels/detection_postprocess_test.cc | #include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <initializer_list>
#include <numeric>
#include <vector>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace detection_postprocess {
constexpr int kInputTensorBoxEncodings = 0;
constexpr int kInputTensorClassPredictions = 1;
constexpr int kInputTensorAnchors = 2;
constexpr int kOutputTensorDetectionBoxes = 0;
constexpr int kOutputTensorDetectionClasses = 1;
constexpr int kOutputTensorDetectionScores = 2;
constexpr int kOutputTensorNumDetections = 3;
constexpr int kNumCoordBox = 4;
constexpr int kBatchSize = 1;
constexpr int kNumDetectionsPerClass = 100;
struct BoxCornerEncoding {
float ymin;
float xmin;
float ymax;
float xmax;
};
struct CenterSizeEncoding {
float y;
float x;
float h;
float w;
};
static_assert(sizeof(BoxCornerEncoding) == sizeof(float) * kNumCoordBox,
"Size of BoxCornerEncoding is 4 float values");
static_assert(sizeof(CenterSizeEncoding) == sizeof(float) * kNumCoordBox,
"Size of CenterSizeEncoding is 4 float values");
struct OpData {
int max_detections;
int max_classes_per_detection;
int detections_per_class;
float non_max_suppression_score_threshold;
float intersection_over_union_threshold;
int num_classes;
bool use_regular_non_max_suppression;
CenterSizeEncoding scale_values;
int decoded_boxes_index;
int scores_index;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData;
const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer);
const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap();
op_data->max_detections = m["max_detections"].AsInt32();
op_data->max_classes_per_detection = m["max_classes_per_detection"].AsInt32();
if (m["detections_per_class"].IsNull())
op_data->detections_per_class = kNumDetectionsPerClass;
else
op_data->detections_per_class = m["detections_per_class"].AsInt32();
if (m["use_regular_nms"].IsNull())
op_data->use_regular_non_max_suppression = false;
else
op_data->use_regular_non_max_suppression = m["use_regular_nms"].AsBool();
op_data->non_max_suppression_score_threshold =
m["nms_score_threshold"].AsFloat();
op_data->intersection_over_union_threshold = m["nms_iou_threshold"].AsFloat();
op_data->num_classes = m["num_classes"].AsInt32();
op_data->scale_values.y = m["y_scale"].AsFloat();
op_data->scale_values.x = m["x_scale"].AsFloat();
op_data->scale_values.h = m["h_scale"].AsFloat();
op_data->scale_values.w = m["w_scale"].AsFloat();
context->AddTensors(context, 1, &op_data->decoded_boxes_index);
context->AddTensors(context, 1, &op_data->scores_index);
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete static_cast<OpData*>(buffer);
}
TfLiteStatus SetTensorSizes(TfLiteContext* context, TfLiteTensor* tensor,
std::initializer_list<int> values) {
TfLiteIntArray* size = TfLiteIntArrayCreate(values.size());
int index = 0;
for (const auto& v : values) {
size->data[index] = v;
++index;
}
return context->ResizeTensor(context, tensor, size);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
auto* op_data = static_cast<OpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
const TfLiteTensor* input_box_encodings;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorBoxEncodings,
&input_box_encodings));
const TfLiteTensor* input_class_predictions;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorClassPredictions,
&input_class_predictions));
const TfLiteTensor* input_anchors;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensorAnchors,
&input_anchors));
TF_LITE_ENSURE_EQ(context, NumDimensions(input_box_encodings), 3);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_class_predictions), 3);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_anchors), 2);
const int num_detected_boxes =
op_data->max_detections * op_data->max_classes_per_detection;
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 4);
TfLiteTensor* detection_boxes;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensorDetectionBoxes,
&detection_boxes));
detection_boxes->type = kTfLiteFloat32;
SetTensorSizes(context, detection_boxes,
{kBatchSize, num_detected_boxes, kNumCoordBox});
TfLiteTensor* detection_classes;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensorDetectionClasses,
&detection_classes));
detection_classes->type = kTfLiteFloat32;
SetTensorSizes(context, detection_classes, {kBatchSize, num_detected_boxes});
TfLiteTensor* detection_scores;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensorDetectionScores,
&detection_scores));
detection_scores->type = kTfLiteFloat32;
SetTensorSizes(context, detection_scores, {kBatchSize, num_detected_boxes});
TfLiteTensor* num_detections;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensorNumDetections,
&num_detections));
num_detections->type = kTfLiteFloat32;
SetTensorSizes(context, num_detections, {1});
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(2);
node->temporaries->data[0] = op_data->decoded_boxes_index;
node->temporaries->data[1] = op_data->scores_index;
TfLiteTensor* decoded_boxes = &context->tensors[op_data->decoded_boxes_index];
decoded_boxes->type = kTfLiteFloat32;
decoded_boxes->allocation_type = kTfLiteArenaRw;
SetTensorSizes(context, decoded_boxes,
{input_box_encodings->dims->data[1], kNumCoordBox});
TfLiteTensor* scores = &context->tensors[op_data->scores_index];
scores->type = kTfLiteFloat32;
scores->allocation_type = kTfLiteArenaRw;
SetTensorSizes(context, scores,
{input_class_predictions->dims->data[1],
input_class_predictions->dims->data[2]});
return kTfLiteOk;
}
class Dequantizer {
public:
Dequantizer(int zero_point, float scale)
: zero_point_(zero_point), scale_(scale) {}
float operator()(uint8 x) {
return (static_cast<float>(x) - zero_point_) * scale_;
}
private:
int zero_point_;
float scale_;
};
void DequantizeBoxEncodings(const TfLiteTensor* input_box_encodings, int idx,
float quant_zero_point, float quant_scale,
int length_box_encoding,
CenterSizeEncoding* box_centersize) {
const uint8* boxes =
GetTensorData<uint8>(input_box_encodings) + length_box_encoding * idx;
Dequantizer dequantize(quant_zero_point, quant_scale);
box_centersize->y = dequantize(boxes[0]);
box_centersize->x = dequantize(boxes[1]);
box_centersize->h = dequantize(boxes[2]);
box_centersize->w = dequantize(boxes[3]);
}
template <class T>
T ReInterpretTensor(const TfLiteTensor* tensor) {
const float* tensor_base = GetTensorData<float>(tensor);
return reinterpret_cast<T>(tensor_base);
}
template <class T>
T ReInterpretTensor(TfLiteTensor* tensor) {
float* tensor_base = GetTensorData<float>(tensor);
return reinterpret_cast<T>(tensor_base);
}
TfLiteStatus DecodeCenterSizeBoxes(TfLiteContext* context, TfLiteNode* node,
OpData* op_data) {
const TfLiteTensor* input_box_encodings;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorBoxEncodings,
&input_box_encodings));
TF_LITE_ENSURE_EQ(context, input_box_encodings->dims->data[0], kBatchSize);
const int num_boxes = input_box_encodings->dims->data[1];
TF_LITE_ENSURE(context, input_box_encodings->dims->data[2] >= kNumCoordBox);
const TfLiteTensor* input_anchors;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensorAnchors,
&input_anchors));
CenterSizeEncoding box_centersize;
CenterSizeEncoding scale_values = op_data->scale_values;
CenterSizeEncoding anchor;
for (int idx = 0; idx < num_boxes; ++idx) {
switch (input_box_encodings->type) {
case kTfLiteUInt8:
DequantizeBoxEncodings(
input_box_encodings, idx,
static_cast<float>(input_box_encodings->params.zero_point),
static_cast<float>(input_box_encodings->params.scale),
input_box_encodings->dims->data[2], &box_centersize);
DequantizeBoxEncodings(
input_anchors, idx,
static_cast<float>(input_anchors->params.zero_point),
static_cast<float>(input_anchors->params.scale), kNumCoordBox,
&anchor);
break;
case kTfLiteFloat32: {
const int box_encoding_idx = idx * input_box_encodings->dims->data[2];
const float* boxes =
&(GetTensorData<float>(input_box_encodings)[box_encoding_idx]);
box_centersize = *reinterpret_cast<const CenterSizeEncoding*>(boxes);
TF_LITE_ENSURE_EQ(context, input_anchors->type, kTfLiteFloat32);
anchor =
ReInterpretTensor<const CenterSizeEncoding*>(input_anchors)[idx];
break;
}
default:
return kTfLiteError;
}
float ycenter = static_cast<float>(static_cast<double>(box_centersize.y) /
static_cast<double>(scale_values.y) *
static_cast<double>(anchor.h) +
static_cast<double>(anchor.y));
float xcenter = static_cast<float>(static_cast<double>(box_centersize.x) /
static_cast<double>(scale_values.x) *
static_cast<double>(anchor.w) +
static_cast<double>(anchor.x));
float half_h =
static_cast<float>(0.5 *
(std::exp(static_cast<double>(box_centersize.h) /
static_cast<double>(scale_values.h))) *
static_cast<double>(anchor.h));
float half_w =
static_cast<float>(0.5 *
(std::exp(static_cast<double>(box_centersize.w) /
static_cast<double>(scale_values.w))) *
static_cast<double>(anchor.w));
TfLiteTensor* decoded_boxes =
&context->tensors[op_data->decoded_boxes_index];
TF_LITE_ENSURE_EQ(context, decoded_boxes->type, kTfLiteFloat32);
auto& box = ReInterpretTensor<BoxCornerEncoding*>(decoded_boxes)[idx];
box.ymin = ycenter - half_h;
box.xmin = xcenter - half_w;
box.ymax = ycenter + half_h;
box.xmax = xcenter + half_w;
}
return kTfLiteOk;
}
void DecreasingPartialArgSort(const float* values, int num_values,
int num_to_sort, int* indices) {
if (num_to_sort == 1) {
indices[0] = optimized_ops::ArgMaxVector(values, num_values);
} else {
std::iota(indices, indices + num_values, 0);
std::partial_sort(
indices, indices + num_to_sort, indices + num_values,
[&values](const int i, const int j) { return values[i] > values[j]; });
}
}
void DecreasingArgSort(const float* values, int num_values, int* indices) {
std::iota(indices, indices + num_values, 0);
std::stable_sort(
indices, indices + num_values,
[&values](const int i, const int j) { return values[i] > values[j]; });
}
void SelectDetectionsAboveScoreThreshold(const std::vector<float>& values,
const float threshold,
std::vector<float>* keep_values,
std::vector<int>* keep_indices) {
for (int i = 0; i < values.size(); i++) {
if (values[i] >= threshold) {
keep_values->emplace_back(values[i]);
keep_indices->emplace_back(i);
}
}
}
bool ValidateBoxes(const TfLiteTensor* decoded_boxes, const int num_boxes) {
for (int i = 0; i < num_boxes; ++i) {
auto& box = ReInterpretTensor<const BoxCornerEncoding*>(decoded_boxes)[i];
if (box.ymin > box.ymax || box.xmin > box.xmax) {
return false;
}
}
return true;
}
float ComputeIntersectionOverUnion(const TfLiteTensor* decoded_boxes,
const int i, const int j) {
auto& box_i = ReInterpretTensor<const BoxCornerEncoding*>(decoded_boxes)[i];
auto& box_j = ReInterpretTensor<const BoxCornerEncoding*>(decoded_boxes)[j];
const float area_i = (box_i.ymax - box_i.ymin) * (box_i.xmax - box_i.xmin);
const float area_j = (box_j.ymax - box_j.ymin) * (box_j.xmax - box_j.xmin);
if (area_i <= 0 || area_j <= 0) return 0.0;
const float intersection_ymin = std::max<float>(box_i.ymin, box_j.ymin);
const float intersection_xmin = std::max<float>(box_i.xmin, box_j.xmin);
const float intersection_ymax = std::min<float>(box_i.ymax, box_j.ymax);
const float intersection_xmax = std::min<float>(box_i.xmax, box_j.xmax);
const float intersection_area =
std::max<float>(intersection_ymax - intersection_ymin, 0.0) *
std::max<float>(intersection_xmax - intersection_xmin, 0.0);
return intersection_area / (area_i + area_j - intersection_area);
}
TfLiteStatus NonMaxSuppressionSingleClassHelper(
TfLiteContext* context, TfLiteNode* node, OpData* op_data,
const std::vector<float>& scores, int max_detections,
std::vector<int>* selected) {
const TfLiteTensor* input_box_encodings;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorBoxEncodings,
&input_box_encodings));
const TfLiteTensor* decoded_boxes =
&context->tensors[op_data->decoded_boxes_index];
const int num_boxes = input_box_encodings->dims->data[1];
const float non_max_suppression_score_threshold =
op_data->non_max_suppression_score_threshold;
const float intersection_over_union_threshold =
op_data->intersection_over_union_threshold;
TF_LITE_ENSURE(context, (max_detections >= 0));
TF_LITE_ENSURE(context, (intersection_over_union_threshold > 0.0f) &&
(intersection_over_union_threshold <= 1.0f));
TF_LITE_ENSURE_EQ(context, decoded_boxes->type, kTfLiteFloat32);
TF_LITE_ENSURE(context, ValidateBoxes(decoded_boxes, num_boxes));
std::vector<int> keep_indices;
std::vector<float> keep_scores;
SelectDetectionsAboveScoreThreshold(
scores, non_max_suppression_score_threshold, &keep_scores, &keep_indices);
int num_scores_kept = keep_scores.size();
std::vector<int> sorted_indices;
sorted_indices.resize(num_scores_kept);
DecreasingArgSort(keep_scores.data(), num_scores_kept, sorted_indices.data());
const int num_boxes_kept = num_scores_kept;
const int output_size = std::min(num_boxes_kept, max_detections);
selected->clear();
int num_active_candidate = num_boxes_kept;
std::vector<uint8_t> active_box_candidate(num_boxes_kept, 1);
for (int i = 0; i < num_boxes_kept; ++i) {
if (num_active_candidate == 0 || selected->size() >= output_size) break;
if (active_box_candidate[i] == 1) {
selected->push_back(keep_indices[sorted_indices[i]]);
active_box_candidate[i] = 0;
num_active_candidate--;
} else {
continue;
}
for (int j = i + 1; j < num_boxes_kept; ++j) {
if (active_box_candidate[j] == 1) {
TF_LITE_ENSURE_EQ(context, decoded_boxes->type, kTfLiteFloat32);
float intersection_over_union = ComputeIntersectionOverUnion(
decoded_boxes, keep_indices[sorted_indices[i]],
keep_indices[sorted_indices[j]]);
if (intersection_over_union > intersection_over_union_threshold) {
active_box_candidate[j] = 0;
num_active_candidate--;
}
}
}
}
return kTfLiteOk;
}
struct BoxInfo {
int index;
float score;
};
struct NMSTaskParam {
TfLiteContext* context;
TfLiteNode* node;
OpData* op_data;
const float* scores;
int num_classes;
int num_boxes;
int label_offset;
int num_classes_with_background;
int num_detections_per_class;
int max_detections;
std::vector<int>& num_selected;
};
void InplaceMergeBoxInfo(std::vector<BoxInfo>& boxes, int mid_index,
int end_index) {
std::inplace_merge(
boxes.begin(), boxes.begin() + mid_index, boxes.begin() + end_index,
[](const BoxInfo& a, const BoxInfo& b) { return a.score >= b.score; });
}
TfLiteStatus ComputeNMSResult(const NMSTaskParam& nms_task_param, int col_begin,
int col_end, int& sorted_indices_size,
std::vector<BoxInfo>& resulted_sorted_box_info) {
std::vector<float> class_scores(nms_task_param.num_boxes);
std::vector<int> selected;
selected.reserve(nms_task_param.num_detections_per_class);
for (int col = col_begin; col <= col_end; ++col) {
const float* scores_base =
nms_task_param.scores + col + nms_task_param.label_offset;
for (int row = 0; row < nms_task_param.num_boxes; row++) {
class_scores[row] = *scores_base;
scores_base += nms_task_param.num_classes_with_background;
}
selected.clear();
TF_LITE_ENSURE_OK(
nms_task_param.context,
NonMaxSuppressionSingleClassHelper(
nms_task_param.context, nms_task_param.node, nms_task_param.op_data,
class_scores, nms_task_param.num_detections_per_class, &selected));
if (selected.empty()) {
continue;
}
for (int i = 0; i < selected.size(); ++i) {
resulted_sorted_box_info[sorted_indices_size + i].score =
class_scores[selected[i]];
resulted_sorted_box_info[sorted_indices_size + i].index =
(selected[i] * nms_task_param.num_classes_with_background + col +
nms_task_param.label_offset);
}
InplaceMergeBoxInfo(resulted_sorted_box_info, sorted_indices_size,
sorted_indices_size + selected.size());
sorted_indices_size =
std::min(sorted_indices_size + static_cast<int>(selected.size()),
nms_task_param.max_detections);
}
return kTfLiteOk;
}
struct NonMaxSuppressionWorkerTask : cpu_backend_threadpool::Task {
NonMaxSuppressionWorkerTask(NMSTaskParam& nms_task_param,
std::atomic<int>& next_col, int col_begin)
: nms_task_param(nms_task_param),
next_col(next_col),
col_begin(col_begin),
sorted_indices_size(0) {}
void Run() override {
sorted_box_info.resize(nms_task_param.num_detections_per_class +
nms_task_param.max_detections);
for (int col = col_begin; col < nms_task_param.num_classes;
col = (++next_col)) {
if (ComputeNMSResult(nms_task_param, col, col, sorted_indices_size,
sorted_box_info) != kTfLiteOk) {
break;
}
}
}
NMSTaskParam& nms_task_param;
std::atomic<int>& next_col;
const int col_begin;
int sorted_indices_size;
std::vector<BoxInfo> sorted_box_info;
};
TfLiteStatus NonMaxSuppressionMultiClassRegularHelper(TfLiteContext* context,
TfLiteNode* node,
OpData* op_data,
const float* scores) {
const TfLiteTensor* input_box_encodings;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorBoxEncodings,
&input_box_encodings));
const TfLiteTensor* input_class_predictions;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorClassPredictions,
&input_class_predictions));
const TfLiteTensor* decoded_boxes =
&context->tensors[op_data->decoded_boxes_index];
TfLiteTensor* detection_boxes;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensorDetectionBoxes,
&detection_boxes));
TfLiteTensor* detection_classes;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensorDetectionClasses,
&detection_classes));
TfLiteTensor* detection_scores;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensorDetectionScores,
&detection_scores));
TfLiteTensor* num_detections;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensorNumDetections,
&num_detections));
const int num_boxes = input_box_encodings->dims->data[1];
const int num_classes = op_data->num_classes;
const int num_detections_per_class =
std::min(op_data->detections_per_class, op_data->max_detections);
const int max_detections = op_data->max_detections;
const int num_classes_with_background =
input_class_predictions->dims->data[2];
int label_offset = num_classes_with_background - num_classes;
TF_LITE_ENSURE(context, num_detections_per_class > 0);
int sorted_indices_size = 0;
std::vector<BoxInfo> box_info_after_regular_non_max_suppression(
max_detections + num_detections_per_class);
std::vector<int> num_selected(num_classes);
NMSTaskParam nms_task_param{context,
node,
op_data,
scores,
num_classes,
num_boxes,
label_offset,
num_classes_with_background,
num_detections_per_class,
max_detections,
num_selected};
int num_threads =
CpuBackendContext::GetFromContext(context)->max_num_threads();
if (num_threads == 1) {
TF_LITE_ENSURE_OK(
context, ComputeNMSResult(nms_task_param, 0,
num_classes - 1, sorted_indices_size,
box_info_after_regular_non_max_suppression));
} else {
std::atomic<int> next_col(num_threads);
std::vector<NonMaxSuppressionWorkerTask> tasks;
tasks.reserve(num_threads);
for (int i = 0; i < num_threads; ++i) {
tasks.emplace_back(
NonMaxSuppressionWorkerTask(nms_task_param, next_col, i));
}
cpu_backend_threadpool::Execute(tasks.size(), tasks.data(),
CpuBackendContext::GetFromContext(context));
for (int j = 0; j < tasks.size(); ++j) {
if (tasks[j].sorted_indices_size == 0) {
continue;
}
memcpy(&box_info_after_regular_non_max_suppression[sorted_indices_size],
&tasks[j].sorted_box_info[0],
sizeof(BoxInfo) * tasks[j].sorted_indices_size);
InplaceMergeBoxInfo(box_info_after_regular_non_max_suppression,
sorted_indices_size,
sorted_indices_size + tasks[j].sorted_indices_size);
sorted_indices_size = std::min(
sorted_indices_size + tasks[j].sorted_indices_size, max_detections);
}
}
for (int output_box_index = 0; output_box_index < max_detections;
output_box_index++) {
if (output_box_index < sorted_indices_size) {
const int anchor_index = floor(
box_info_after_regular_non_max_suppression[output_box_index].index /
num_classes_with_background);
const int class_index =
box_info_after_regular_non_max_suppression[output_box_index].index -
anchor_index * num_classes_with_background - label_offset;
const float selected_score =
box_info_after_regular_non_max_suppression[output_box_index].score;
TF_LITE_ENSURE_EQ(context, detection_boxes->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, decoded_boxes->type, kTfLiteFloat32);
ReInterpretTensor<BoxCornerEncoding*>(detection_boxes)[output_box_index] =
ReInterpretTensor<const BoxCornerEncoding*>(
decoded_boxes)[anchor_index];
GetTensorData<float>(detection_classes)[output_box_index] = class_index;
GetTensorData<float>(detection_scores)[output_box_index] = selected_score;
} else {
TF_LITE_ENSURE_EQ(context, detection_boxes->type, kTfLiteFloat32);
ReInterpretTensor<BoxCornerEncoding*>(
detection_boxes)[output_box_index] = {0.0f, 0.0f, 0.0f, 0.0f};
GetTensorData<float>(detection_classes)[output_box_index] = 0.0f;
GetTensorData<float>(detection_scores)[output_box_index] = 0.0f;
}
}
GetTensorData<float>(num_detections)[0] = sorted_indices_size;
box_info_after_regular_non_max_suppression.clear();
return kTfLiteOk;
}
TfLiteStatus NonMaxSuppressionMultiClassFastHelper(TfLiteContext* context,
TfLiteNode* node,
OpData* op_data,
const float* scores) {
const TfLiteTensor* input_box_encodings;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorBoxEncodings,
&input_box_encodings));
const TfLiteTensor* input_class_predictions;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorClassPredictions,
&input_class_predictions));
const TfLiteTensor* decoded_boxes =
&context->tensors[op_data->decoded_boxes_index];
TfLiteTensor* detection_boxes;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensorDetectionBoxes,
&detection_boxes));
TfLiteTensor* detection_classes;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensorDetectionClasses,
&detection_classes));
TfLiteTensor* detection_scores;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensorDetectionScores,
&detection_scores));
TfLiteTensor* num_detections;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensorNumDetections,
&num_detections));
const int num_boxes = input_box_encodings->dims->data[1];
const int num_classes = op_data->num_classes;
const int max_categories_per_anchor = op_data->max_classes_per_detection;
const int num_classes_with_background =
input_class_predictions->dims->data[2];
int label_offset = num_classes_with_background - num_classes;
TF_LITE_ENSURE(context, (max_categories_per_anchor > 0));
const int num_categories_per_anchor =
std::min(max_categories_per_anchor, num_classes);
std::vector<float> max_scores;
max_scores.resize(num_boxes);
std::vector<int> sorted_class_indices;
sorted_class_indices.resize(num_boxes * num_classes);
for (int row = 0; row < num_boxes; row++) {
const float* box_scores =
scores + row * num_classes_with_background + label_offset;
int* class_indices = sorted_class_indices.data() + row * num_classes;
DecreasingPartialArgSort(box_scores, num_classes, num_categories_per_anchor,
class_indices);
max_scores[row] = box_scores[class_indices[0]];
}
std::vector<int> selected;
TF_LITE_ENSURE_STATUS(NonMaxSuppressionSingleClassHelper(
context, node, op_data, max_scores, op_data->max_detections, &selected));
int output_box_index = 0;
for (const auto& selected_index : selected) {
const float* box_scores =
scores + selected_index * num_classes_with_background + label_offset;
const int* class_indices =
sorted_class_indices.data() + selected_index * num_classes;
for (int col = 0; col < num_categories_per_anchor; ++col) {
int box_offset = max_categories_per_anchor * output_box_index + col;
TF_LITE_ENSURE_EQ(context, detection_boxes->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, decoded_boxes->type, kTfLiteFloat32);
ReInterpretTensor<BoxCornerEncoding*>(detection_boxes)[box_offset] =
ReInterpretTensor<const BoxCornerEncoding*>(
decoded_boxes)[selected_index];
GetTensorData<float>(detection_classes)[box_offset] = class_indices[col];
GetTensorData<float>(detection_scores)[box_offset] =
box_scores[class_indices[col]];
}
output_box_index++;
}
GetTensorData<float>(num_detections)[0] = output_box_index;
return kTfLiteOk;
}
void DequantizeClassPredictions(const TfLiteTensor* input_class_predictions,
const int num_boxes,
const int num_classes_with_background,
TfLiteTensor* scores) {
float quant_zero_point =
static_cast<float>(input_class_predictions->params.zero_point);
float quant_scale = static_cast<float>(input_class_predictions->params.scale);
tflite::DequantizationParams op_params;
op_params.zero_point = quant_zero_point;
op_params.scale = quant_scale;
const auto shape = RuntimeShape(1, num_boxes * num_classes_with_background);
optimized_ops::Dequantize(op_params, shape,
GetTensorData<uint8>(input_class_predictions),
shape, GetTensorData<float>(scores));
}
TfLiteStatus NonMaxSuppressionMultiClass(TfLiteContext* context,
TfLiteNode* node, OpData* op_data) {
const TfLiteTensor* input_box_encodings;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorBoxEncodings,
&input_box_encodings));
const TfLiteTensor* input_class_predictions;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorClassPredictions,
&input_class_predictions));
const int num_boxes = input_box_encodings->dims->data[1];
const int num_classes = op_data->num_classes;
TF_LITE_ENSURE_EQ(context, input_class_predictions->dims->data[0],
kBatchSize);
TF_LITE_ENSURE_EQ(context, input_class_predictions->dims->data[1], num_boxes);
const int num_classes_with_background =
input_class_predictions->dims->data[2];
TF_LITE_ENSURE(context, (num_classes_with_background - num_classes <= 1));
TF_LITE_ENSURE(context, (num_classes_with_background >= num_classes));
const TfLiteTensor* scores;
switch (input_class_predictions->type) {
case kTfLiteUInt8: {
TfLiteTensor* temporary_scores = &context->tensors[op_data->scores_index];
DequantizeClassPredictions(input_class_predictions, num_boxes,
num_classes_with_background, temporary_scores);
scores = temporary_scores;
} break;
case kTfLiteFloat32:
scores = input_class_predictions;
break;
default:
return kTfLiteError;
}
if (op_data->use_regular_non_max_suppression)
TF_LITE_ENSURE_STATUS(NonMaxSuppressionMultiClassRegularHelper(
context, node, op_data, GetTensorData<float>(scores)));
else
TF_LITE_ENSURE_STATUS(NonMaxSuppressionMultiClassFastHelper(
context, node, op_data, GetTensorData<float>(scores)));
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE(context, (kBatchSize == 1));
auto* op_data = static_cast<OpData*>(node->user_data);
TF_LITE_ENSURE_STATUS(DecodeCenterSizeBoxes(context, node, op_data));
TF_LITE_ENSURE_STATUS(NonMaxSuppressionMultiClass(context, node, op_data));
return kTfLiteOk;
}
}
TfLiteRegistration* Register_DETECTION_POSTPROCESS() {
static TfLiteRegistration r = {
detection_postprocess::Init, detection_postprocess::Free,
detection_postprocess::Prepare, detection_postprocess::Eval};
return &r;
}
TfLiteRegistration* Register_TFLITE_DETECTION_POST_PROCESS() {
return Register_DETECTION_POSTPROCESS();
}
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace custom {
TfLiteRegistration* Register_DETECTION_POSTPROCESS();
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
class BaseDetectionPostprocessOpModel : public SingleOpModel {
public:
BaseDetectionPostprocessOpModel(
const TensorData& input1, const TensorData& input2,
const TensorData& input3, const TensorData& output1,
const TensorData& output2, const TensorData& output3,
const TensorData& output4, int max_classes_per_detection = 1) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
input3_ = AddInput(input3);
output1_ = AddOutput(output1);
output2_ = AddOutput(output2);
output3_ = AddOutput(output3);
output4_ = AddOutput(output4);
flexbuffers::Builder fbb;
fbb.Map([&]() {
fbb.Int("max_detections", 3);
fbb.Int("max_classes_per_detection", max_classes_per_detection);
fbb.Float("nms_score_threshold", 0.0);
fbb.Float("nms_iou_threshold", 0.5);
fbb.Int("num_classes", 2);
fbb.Float("y_scale", 10.0);
fbb.Float("x_scale", 10.0);
fbb.Float("h_scale", 5.0);
fbb.Float("w_scale", 5.0);
});
fbb.Finish();
SetCustomOp("TFLite_Detection_PostProcess", fbb.GetBuffer(),
Register_DETECTION_POSTPROCESS);
BuildInterpreter({GetShape(input1_), GetShape(input2_), GetShape(input3_)});
}
int input1() { return input1_; }
int input2() { return input2_; }
int input3() { return input3_; }
template <class T>
void SetInput1(std::initializer_list<T> data) {
PopulateTensor<T>(input1_, data);
}
template <class T>
void SetInput2(std::initializer_list<T> data) {
PopulateTensor<T>(input2_, data);
}
template <class T>
void SetInput3(std::initializer_list<T> data) {
PopulateTensor<T>(input3_, data);
}
template <class T>
std::vector<T> GetOutput1() {
return ExtractVector<T>(output1_);
}
template <class T>
std::vector<T> GetOutput2() {
return ExtractVector<T>(output2_);
}
template <class T>
std::vector<T> GetOutput3() {
return ExtractVector<T>(output3_);
}
template <class T>
std::vector<T> GetOutput4() {
return ExtractVector<T>(output4_);
}
std::vector<int> GetOutputShape1() { return GetTensorShape(output1_); }
std::vector<int> GetOutputShape2() { return GetTensorShape(output2_); }
std::vector<int> GetOutputShape3() { return GetTensorShape(output3_); }
std::vector<int> GetOutputShape4() { return GetTensorShape(output4_); }
protected:
int input1_;
int input2_;
int input3_;
int output1_;
int output2_;
int output3_;
int output4_;
};
TEST(DetectionPostprocessOpTest, FloatTest) {
BaseDetectionPostprocessOpModel m(
{TensorType_FLOAT32, {1, 6, 4}}, {TensorType_FLOAT32, {1, 6, 3}},
{TensorType_FLOAT32, {6, 4}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}});
m.SetInput1<float>({
0.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0
});
m.SetInput2<float>({0., .9, .8, 0., .75, .72, 0., .6, .5, 0., .93, .95, 0.,
.5, .4, 0., .3, .2});
m.SetInput3<float>({
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 100.5, 1.0, 1.0
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape1 = m.GetOutputShape1();
EXPECT_THAT(output_shape1, ElementsAre(1, 3, 4));
EXPECT_THAT(
m.GetOutput1<float>(),
ElementsAreArray(ArrayFloatNear(
{0.0, 10.0, 1.0, 11.0, 0.0, 0.0, 1.0, 1.0, 0.0, 100.0, 1.0, 101.0},
1e-1)));
std::vector<int> output_shape2 = m.GetOutputShape2();
EXPECT_THAT(output_shape2, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput2<float>(),
ElementsAreArray(ArrayFloatNear({1, 0, 0}, 1e-4)));
std::vector<int> output_shape3 = m.GetOutputShape3();
EXPECT_THAT(output_shape3, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput3<float>(),
ElementsAreArray(ArrayFloatNear({0.95, 0.9, 0.3}, 1e-4)));
std::vector<int> output_shape4 = m.GetOutputShape4();
EXPECT_THAT(output_shape4, ElementsAre(1));
EXPECT_THAT(m.GetOutput4<float>(),
ElementsAreArray(ArrayFloatNear({3.0}, 1e-4)));
}
TEST(DetectionPostprocessOpTest, FloatTestWithDegeneratedBox) {
BaseDetectionPostprocessOpModel m(
{TensorType_FLOAT32, {1, 2, 4}}, {TensorType_FLOAT32, {1, 2, 3}},
{TensorType_FLOAT32, {2, 4}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}});
m.SetInput1<float>({
0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
});
m.SetInput2<float>({
0., .9, .8,
0., .2, .7
});
m.SetInput3<float>({
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 0.0, 0.0
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape4 = m.GetOutputShape4();
EXPECT_THAT(output_shape4, ElementsAre(1));
const int num_detections = static_cast<int>(m.GetOutput4<float>()[0]);
EXPECT_EQ(num_detections, 2);
std::vector<int> output_shape1 = m.GetOutputShape1();
EXPECT_THAT(output_shape1, ElementsAre(1, 3, 4));
std::vector<float> detection_boxes = m.GetOutput1<float>();
detection_boxes.resize(num_detections * 4);
EXPECT_THAT(detection_boxes,
ElementsAreArray(ArrayFloatNear({0.0, 0.0, 1.0, 1.0,
0.5, 0.5, 0.5, 0.5},
1e-1)));
std::vector<int> output_shape2 = m.GetOutputShape2();
EXPECT_THAT(output_shape2, ElementsAre(1, 3));
std::vector<float> detection_classes = m.GetOutput2<float>();
detection_classes.resize(num_detections);
EXPECT_THAT(detection_classes,
ElementsAreArray(ArrayFloatNear({0, 1}, 1e-4)));
std::vector<int> output_shape3 = m.GetOutputShape3();
EXPECT_THAT(output_shape3, ElementsAre(1, 3));
std::vector<float> detection_scores = m.GetOutput3<float>();
detection_scores.resize(num_detections);
EXPECT_THAT(detection_scores,
ElementsAreArray(ArrayFloatNear({0.9, 0.7}, 1e-4)));
}
TEST(DetectionPostprocessOpTest, QuantizedTest) {
BaseDetectionPostprocessOpModel m(
{TensorType_UINT8, {1, 6, 4}, -1.0, 1.0},
{TensorType_UINT8, {1, 6, 3}, 0.0, 1.0},
{TensorType_UINT8, {6, 4}, 0.0, 100.5}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}});
std::vector<std::vector<float>> inputs1 = {{
0.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0
}};
m.QuantizeAndPopulate<uint8_t>(m.input1(), inputs1[0]);
std::vector<std::vector<float>> inputs2 = {{0., .9, .8, 0., .75, .72, 0., .6,
.5, 0., .93, .95, 0., .5, .4, 0.,
.3, .2}};
m.QuantizeAndPopulate<uint8_t>(m.input2(), inputs2[0]);
std::vector<std::vector<float>> inputs3 = {{
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 100.5, 1.0, 1.0
}};
m.QuantizeAndPopulate<uint8_t>(m.input3(), inputs3[0]);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape1 = m.GetOutputShape1();
EXPECT_THAT(output_shape1, ElementsAre(1, 3, 4));
EXPECT_THAT(
m.GetOutput1<float>(),
ElementsAreArray(ArrayFloatNear(
{0.0, 10.0, 1.0, 11.0, 0.0, 0.0, 1.0, 1.0, 0.0, 100.0, 1.0, 101.0},
3e-1)));
std::vector<int> output_shape2 = m.GetOutputShape2();
EXPECT_THAT(output_shape2, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput2<float>(),
ElementsAreArray(ArrayFloatNear({1, 0, 0}, 1e-1)));
std::vector<int> output_shape3 = m.GetOutputShape3();
EXPECT_THAT(output_shape3, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput3<float>(),
ElementsAreArray(ArrayFloatNear({0.95, 0.9, 0.3}, 1e-1)));
std::vector<int> output_shape4 = m.GetOutputShape4();
EXPECT_THAT(output_shape4, ElementsAre(1));
EXPECT_THAT(m.GetOutput4<float>(),
ElementsAreArray(ArrayFloatNear({3.0}, 1e-1)));
}
TEST(DetectionPostprocessOpTest, MaxClass2Test) {
BaseDetectionPostprocessOpModel m(
{TensorType_FLOAT32, {1, 6, 4}}, {TensorType_FLOAT32, {1, 6, 3}},
{TensorType_FLOAT32, {6, 4}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, 2);
m.SetInput1<float>({
0.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0
});
m.SetInput2<float>({0., .9, .8, 0., .75, .72, 0., .6, .5, 0., .93, .95, 0.,
.5, .4, 0., .3, .2});
m.SetInput3<float>({
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 100.5, 1.0, 1.0
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape1 = m.GetOutputShape1();
EXPECT_THAT(output_shape1, ElementsAre(1, 6, 4));
EXPECT_THAT(m.GetOutput1<float>(),
ElementsAreArray(ArrayFloatNear(
{0.0, 10.0, 1.0, 11.0, 0.0, 10.0, 1.0, 11.0,
0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0,
0.0, 100.0, 1.0, 101.0, 0.0, 100.0, 1.0, 101.0},
1e-1)));
std::vector<int> output_shape2 = m.GetOutputShape2();
EXPECT_THAT(output_shape2, ElementsAre(1, 6));
EXPECT_THAT(m.GetOutput2<float>(),
ElementsAreArray(ArrayFloatNear({1, 0, 0, 1, 0, 1}, 1e-4)));
std::vector<int> output_shape3 = m.GetOutputShape3();
EXPECT_THAT(output_shape3, ElementsAre(1, 6));
EXPECT_THAT(
m.GetOutput3<float>(),
ElementsAreArray(ArrayFloatNear({0.95, .93, 0.9, 0.8, 0.3, 0.2}, 1e-4)));
std::vector<int> output_shape4 = m.GetOutputShape4();
EXPECT_THAT(output_shape4, ElementsAre(1));
EXPECT_THAT(m.GetOutput4<float>(),
ElementsAreArray(ArrayFloatNear({3.0}, 1e-4)));
}
class DetectionPostprocessOpModelwithRegularNMS : public SingleOpModel {
public:
DetectionPostprocessOpModelwithRegularNMS(
const TensorData& input1, const TensorData& input2,
const TensorData& input3, const TensorData& output1,
const TensorData& output2, const TensorData& output3,
const TensorData& output4, bool use_regular_nms, int num_threads = 1) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
input3_ = AddInput(input3);
output1_ = AddOutput(output1);
output2_ = AddOutput(output2);
output3_ = AddOutput(output3);
output4_ = AddOutput(output4);
flexbuffers::Builder fbb;
fbb.Map([&]() {
fbb.Int("max_detections", 3);
fbb.Int("max_classes_per_detection", 1);
fbb.Int("detections_per_class", 1);
fbb.Bool("use_regular_nms", use_regular_nms);
fbb.Float("nms_score_threshold", 0.0);
fbb.Float("nms_iou_threshold", 0.5);
fbb.Int("num_classes", 2);
fbb.Float("y_scale", 10.0);
fbb.Float("x_scale", 10.0);
fbb.Float("h_scale", 5.0);
fbb.Float("w_scale", 5.0);
});
fbb.Finish();
SetCustomOp("TFLite_Detection_PostProcess", fbb.GetBuffer(),
Register_DETECTION_POSTPROCESS);
BuildInterpreter({GetShape(input1_), GetShape(input2_), GetShape(input3_)},
num_threads,
false,
true);
}
int input1() { return input1_; }
int input2() { return input2_; }
int input3() { return input3_; }
template <class T>
void SetInput1(std::initializer_list<T> data) {
PopulateTensor<T>(input1_, data);
}
template <class T>
void SetInput2(std::initializer_list<T> data) {
PopulateTensor<T>(input2_, data);
}
template <class T>
void SetInput3(std::initializer_list<T> data) {
PopulateTensor<T>(input3_, data);
}
template <class T>
std::vector<T> GetOutput1() {
return ExtractVector<T>(output1_);
}
template <class T>
std::vector<T> GetOutput2() {
return ExtractVector<T>(output2_);
}
template <class T>
std::vector<T> GetOutput3() {
return ExtractVector<T>(output3_);
}
template <class T>
std::vector<T> GetOutput4() {
return ExtractVector<T>(output4_);
}
std::vector<int> GetOutputShape1() { return GetTensorShape(output1_); }
std::vector<int> GetOutputShape2() { return GetTensorShape(output2_); }
std::vector<int> GetOutputShape3() { return GetTensorShape(output3_); }
std::vector<int> GetOutputShape4() { return GetTensorShape(output4_); }
protected:
int input1_;
int input2_;
int input3_;
int output1_;
int output2_;
int output3_;
int output4_;
};
TEST(DetectionPostprocessOpTest, FloatTestFastNMS) {
DetectionPostprocessOpModelwithRegularNMS m(
{TensorType_FLOAT32, {1, 6, 4}}, {TensorType_FLOAT32, {1, 6, 3}},
{TensorType_FLOAT32, {6, 4}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, false);
m.SetInput1<float>({
0.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0
});
m.SetInput2<float>({0., .9, .8, 0., .75, .72, 0., .6, .5, 0., .93, .95, 0.,
.5, .4, 0., .3, .2});
m.SetInput3<float>({
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 100.5, 1.0, 1.0
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape1 = m.GetOutputShape1();
EXPECT_THAT(output_shape1, ElementsAre(1, 3, 4));
EXPECT_THAT(
m.GetOutput1<float>(),
ElementsAreArray(ArrayFloatNear(
{0.0, 10.0, 1.0, 11.0, 0.0, 0.0, 1.0, 1.0, 0.0, 100.0, 1.0, 101.0},
1e-1)));
std::vector<int> output_shape2 = m.GetOutputShape2();
EXPECT_THAT(output_shape2, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput2<float>(),
ElementsAreArray(ArrayFloatNear({1, 0, 0}, 1e-4)));
std::vector<int> output_shape3 = m.GetOutputShape3();
EXPECT_THAT(output_shape3, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput3<float>(),
ElementsAreArray(ArrayFloatNear({0.95, 0.9, 0.3}, 1e-4)));
std::vector<int> output_shape4 = m.GetOutputShape4();
EXPECT_THAT(output_shape4, ElementsAre(1));
EXPECT_THAT(m.GetOutput4<float>(),
ElementsAreArray(ArrayFloatNear({3.0}, 1e-4)));
}
TEST(DetectionPostprocessOpTest, QuantizedTestFastNMS) {
DetectionPostprocessOpModelwithRegularNMS m(
{TensorType_UINT8, {1, 6, 4}, -1.0, 1.0},
{TensorType_UINT8, {1, 6, 3}, 0.0, 1.0},
{TensorType_UINT8, {6, 4}, 0.0, 100.5}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, false);
std::vector<std::vector<float>> inputs1 = {{
0.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0
}};
m.QuantizeAndPopulate<uint8_t>(m.input1(), inputs1[0]);
std::vector<std::vector<float>> inputs2 = {{0., .9, .8, 0., .75, .72, 0., .6,
.5, 0., .93, .95, 0., .5, .4, 0.,
.3, .2}};
m.QuantizeAndPopulate<uint8_t>(m.input2(), inputs2[0]);
std::vector<std::vector<float>> inputs3 = {{
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 100.5, 1.0, 1.0
}};
m.QuantizeAndPopulate<uint8_t>(m.input3(), inputs3[0]);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape1 = m.GetOutputShape1();
EXPECT_THAT(output_shape1, ElementsAre(1, 3, 4));
EXPECT_THAT(
m.GetOutput1<float>(),
ElementsAreArray(ArrayFloatNear(
{0.0, 10.0, 1.0, 11.0, 0.0, 0.0, 1.0, 1.0, 0.0, 100.0, 1.0, 101.0},
3e-1)));
std::vector<int> output_shape2 = m.GetOutputShape2();
EXPECT_THAT(output_shape2, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput2<float>(),
ElementsAreArray(ArrayFloatNear({1, 0, 0}, 1e-1)));
std::vector<int> output_shape3 = m.GetOutputShape3();
EXPECT_THAT(output_shape3, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput3<float>(),
ElementsAreArray(ArrayFloatNear({0.95, 0.9, 0.3}, 1e-1)));
std::vector<int> output_shape4 = m.GetOutputShape4();
EXPECT_THAT(output_shape4, ElementsAre(1));
EXPECT_THAT(m.GetOutput4<float>(),
ElementsAreArray(ArrayFloatNear({3.0}, 1e-1)));
}
class DetectionPostprocessOpRegularTest
: public ::testing::TestWithParam<::testing::tuple<TensorType, int>> {
protected:
DetectionPostprocessOpRegularTest()
: tensor_type_(::testing::get<0>(GetParam())),
num_threads_(::testing::get<1>(GetParam())) {}
TensorType tensor_type_;
int num_threads_;
};
INSTANTIATE_TEST_SUITE_P(
DetectionPostprocessOpRegularTest, DetectionPostprocessOpRegularTest,
::testing::Combine(::testing::Values(TensorType_FLOAT32, TensorType_UINT8),
::testing::Values(1, 2)));
TEST_P(DetectionPostprocessOpRegularTest, RegularNMS) {
TensorData input1, input2, input3;
if (tensor_type_ == TensorType_UINT8) {
input1 = {tensor_type_, {1, 6, 4}, -1.0, 1.0};
input2 = {tensor_type_, {1, 6, 3}, 0.0, 1.0};
input3 = {tensor_type_, {6, 4}, 0.0, 100.5};
} else {
input1 = {tensor_type_, {1, 6, 4}};
input2 = {tensor_type_, {1, 6, 3}};
input3 = {tensor_type_, {6, 4}};
}
DetectionPostprocessOpModelwithRegularNMS m(
input1, input2, input3, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, true, num_threads_);
auto inputs1 = {
0.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, -1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 0.0f, 0.0f
};
if (tensor_type_ == TensorType_UINT8) {
m.QuantizeAndPopulate<uint8_t>(m.input1(), std::vector<float>{inputs1});
} else {
m.SetInput1<float>(inputs1);
}
auto inputs2 = {0.f, .9f, .8f, 0.f, .75f, .72f, 0.f, .6f, .5f,
0.f, .93f, .95f, 0.f, .5f, .4f, 0.f, .3f, .2f};
if (tensor_type_ == TensorType_UINT8) {
m.QuantizeAndPopulate<uint8_t>(m.input2(), std::vector<float>{inputs2});
} else {
m.SetInput2<float>(inputs2);
}
auto inputs3 = {
0.5f, 0.5f, 1.0f, 1.0f,
0.5f, 0.5f, 1.0f, 1.0f,
0.5f, 0.5f, 1.0f, 1.0f,
0.5f, 10.5f, 1.0f, 1.0f,
0.5f, 10.5f, 1.0f, 1.0f,
0.5f, 100.5f, 1.0f, 1.0f
};
if (tensor_type_ == TensorType_UINT8) {
m.QuantizeAndPopulate<uint8_t>(m.input3(), std::vector<float>{inputs3});
} else {
m.SetInput3<float>(inputs3);
}
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape1 = m.GetOutputShape1();
EXPECT_THAT(output_shape1, ElementsAre(1, 3, 4));
if (tensor_type_ == TensorType_UINT8) {
EXPECT_THAT(
m.GetOutput1<float>(),
ElementsAreArray(ArrayFloatNear(
{0.0, 10.0, 1.0, 11.0, 0.0, 10.0, 1.0, 11.0, 0.0, 0.0, 0.0, 0.0},
3e-1)));
} else {
EXPECT_THAT(
m.GetOutput1<float>(),
ElementsAreArray(ArrayFloatNear(
{0.0, 10.0, 1.0, 11.0, 0.0, 10.0, 1.0, 11.0, 0.0, 0.0, 0.0, 0.0},
3e-4)));
}
std::vector<int> output_shape2 = m.GetOutputShape2();
EXPECT_THAT(output_shape2, ElementsAre(1, 3));
if (tensor_type_ == TensorType_UINT8) {
EXPECT_THAT(m.GetOutput2<float>(),
ElementsAreArray(ArrayFloatNear({1, 0, 0}, 1e-1)));
} else {
EXPECT_THAT(m.GetOutput2<float>(),
ElementsAreArray(ArrayFloatNear({1, 0, 0}, 1e-4)));
}
std::vector<int> output_shape3 = m.GetOutputShape3();
EXPECT_THAT(output_shape3, ElementsAre(1, 3));
if (tensor_type_ == TensorType_UINT8) {
EXPECT_THAT(m.GetOutput3<float>(),
ElementsAreArray(ArrayFloatNear({0.95, 0.9, 0.0}, 1e-1)));
} else {
EXPECT_THAT(m.GetOutput3<float>(),
ElementsAreArray(ArrayFloatNear({0.95, 0.93, 0.0}, 1e-4)));
}
std::vector<int> output_shape4 = m.GetOutputShape4();
EXPECT_THAT(output_shape4, ElementsAre(1));
if (tensor_type_ == TensorType_UINT8) {
EXPECT_THAT(m.GetOutput4<float>(),
ElementsAreArray(ArrayFloatNear({2.0}, 1e-1)));
} else {
EXPECT_THAT(m.GetOutput4<float>(),
ElementsAreArray(ArrayFloatNear({2.0}, 1e-4)));
}
}
TEST(DetectionPostprocessOpTest, FloatTestwithNoBackgroundClassAndNoKeypoints) {
DetectionPostprocessOpModelwithRegularNMS m(
{TensorType_FLOAT32, {1, 6, 4}}, {TensorType_FLOAT32, {1, 6, 2}},
{TensorType_FLOAT32, {6, 4}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, false);
m.SetInput1<float>({
0.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0
});
m.SetInput2<float>({.9, .8, .75, .72, .6, .5, .93, .95, .5, .4, .3, .2});
m.SetInput3<float>({
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 100.5, 1.0, 1.0
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape1 = m.GetOutputShape1();
EXPECT_THAT(output_shape1, ElementsAre(1, 3, 4));
EXPECT_THAT(
m.GetOutput1<float>(),
ElementsAreArray(ArrayFloatNear(
{0.0, 10.0, 1.0, 11.0, 0.0, 0.0, 1.0, 1.0, 0.0, 100.0, 1.0, 101.0},
1e-1)));
std::vector<int> output_shape2 = m.GetOutputShape2();
EXPECT_THAT(output_shape2, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput2<float>(),
ElementsAreArray(ArrayFloatNear({1, 0, 0}, 1e-1)));
std::vector<int> output_shape3 = m.GetOutputShape3();
EXPECT_THAT(output_shape3, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput3<float>(),
ElementsAreArray(ArrayFloatNear({0.95, 0.9, 0.3}, 1e-1)));
std::vector<int> output_shape4 = m.GetOutputShape4();
EXPECT_THAT(output_shape4, ElementsAre(1));
EXPECT_THAT(m.GetOutput4<float>(),
ElementsAreArray(ArrayFloatNear({3.0}, 1e-1)));
}
TEST(DetectionPostprocessOpTest, FloatTestwithBackgroundClassAndKeypoints) {
DetectionPostprocessOpModelwithRegularNMS m(
{TensorType_FLOAT32, {1, 6, 5}}, {TensorType_FLOAT32, {1, 6, 3}},
{TensorType_FLOAT32, {6, 4}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, false);
m.SetInput1<float>({
0.0, 0.0, 0.0, 0.0, 1.0,
0.0, 1.0, 0.0, 0.0, 1.0,
0.0, -1.0, 0.0, 0.0, 1.0,
0.0, 0.0, 0.0, 0.0, 1.0,
0.0, 1.0, 0.0, 0.0, 1.0,
0.0, 0.0, 0.0, 0.0, 1.0,
});
m.SetInput2<float>({0., .9, .8, 0., .75, .72, 0., .6, .5, 0., .93, .95, 0.,
.5, .4, 0., .3, .2});
m.SetInput3<float>({
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 100.5, 1.0, 1.0
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape1 = m.GetOutputShape1();
EXPECT_THAT(output_shape1, ElementsAre(1, 3, 4));
EXPECT_THAT(
m.GetOutput1<float>(),
ElementsAreArray(ArrayFloatNear(
{0.0, 10.0, 1.0, 11.0, 0.0, 0.0, 1.0, 1.0, 0.0, 100.0, 1.0, 101.0},
1e-1)));
std::vector<int> output_shape2 = m.GetOutputShape2();
EXPECT_THAT(output_shape2, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput2<float>(),
ElementsAreArray(ArrayFloatNear({1, 0, 0}, 1e-4)));
std::vector<int> output_shape3 = m.GetOutputShape3();
EXPECT_THAT(output_shape3, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput3<float>(),
ElementsAreArray(ArrayFloatNear({0.95, 0.9, 0.3}, 1e-4)));
std::vector<int> output_shape4 = m.GetOutputShape4();
EXPECT_THAT(output_shape4, ElementsAre(1));
EXPECT_THAT(m.GetOutput4<float>(),
ElementsAreArray(ArrayFloatNear({3.0}, 1e-4)));
}
TEST(DetectionPostprocessOpTest,
QuantizedTestwithNoBackgroundClassAndKeypoints) {
DetectionPostprocessOpModelwithRegularNMS m(
{TensorType_UINT8, {1, 6, 5}, -1.0, 1.0},
{TensorType_UINT8, {1, 6, 2}, 0.0, 1.0},
{TensorType_UINT8, {6, 4}, 0.0, 100.5}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, false);
std::vector<std::vector<float>> inputs1 = {{
0.0, 0.0, 0.0, 0.0, 1.0,
0.0, 1.0, 0.0, 0.0, 1.0,
0.0, -1.0, 0.0, 0.0, 1.0,
0.0, 0.0, 0.0, 0.0, 1.0,
0.0, 1.0, 0.0, 0.0, 1.0,
0.0, 0.0, 0.0, 0.0, 1.0
}};
m.QuantizeAndPopulate<uint8_t>(m.input1(), inputs1[0]);
std::vector<std::vector<float>> inputs2 = {
{.9, .8, .75, .72, .6, .5, .93, .95, .5, .4, .3, .2}};
m.QuantizeAndPopulate<uint8_t>(m.input2(), inputs2[0]);
std::vector<std::vector<float>> inputs3 = {{
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 100.5, 1.0, 1.0
}};
m.QuantizeAndPopulate<uint8_t>(m.input3(), inputs3[0]);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape1 = m.GetOutputShape1();
EXPECT_THAT(output_shape1, ElementsAre(1, 3, 4));
EXPECT_THAT(
m.GetOutput1<float>(),
ElementsAreArray(ArrayFloatNear(
{0.0, 10.0, 1.0, 11.0, 0.0, 0.0, 1.0, 1.0, 0.0, 100.0, 1.0, 101.0},
3e-1)));
std::vector<int> output_shape2 = m.GetOutputShape2();
EXPECT_THAT(output_shape2, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput2<float>(),
ElementsAreArray(ArrayFloatNear({1, 0, 0}, 1e-1)));
std::vector<int> output_shape3 = m.GetOutputShape3();
EXPECT_THAT(output_shape3, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput3<float>(),
ElementsAreArray(ArrayFloatNear({0.95, 0.9, 0.3}, 1e-1)));
std::vector<int> output_shape4 = m.GetOutputShape4();
EXPECT_THAT(output_shape4, ElementsAre(1));
EXPECT_THAT(m.GetOutput4<float>(),
ElementsAreArray(ArrayFloatNear({3.0}, 1e-1)));
}
TEST(DetectionPostprocessOpTest, FloatTestwithNoBackgroundClassAndKeypoints) {
DetectionPostprocessOpModelwithRegularNMS m(
{TensorType_FLOAT32, {1, 6, 5}}, {TensorType_FLOAT32, {1, 6, 2}},
{TensorType_FLOAT32, {6, 4}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, false);
m.SetInput1<float>({
0.0, 0.0, 0.0, 0.0, 1.0,
0.0, 1.0, 0.0, 0.0, 1.0,
0.0, -1.0, 0.0, 0.0, 1.0,
0.0, 0.0, 0.0, 0.0, 1.0,
0.0, 1.0, 0.0, 0.0, 1.0,
0.0, 0.0, 0.0, 0.0, 1.0,
});
m.SetInput2<float>({.9, .8, .75, .72, .6, .5, .93, .95, .5, .4, .3, .2});
m.SetInput3<float>({
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 100.5, 1.0, 1.0
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape1 = m.GetOutputShape1();
EXPECT_THAT(output_shape1, ElementsAre(1, 3, 4));
EXPECT_THAT(
m.GetOutput1<float>(),
ElementsAreArray(ArrayFloatNear(
{0.0, 10.0, 1.0, 11.0, 0.0, 0.0, 1.0, 1.0, 0.0, 100.0, 1.0, 101.0},
1e-1)));
std::vector<int> output_shape2 = m.GetOutputShape2();
EXPECT_THAT(output_shape2, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput2<float>(),
ElementsAreArray(ArrayFloatNear({1, 0, 0}, 1e-4)));
std::vector<int> output_shape3 = m.GetOutputShape3();
EXPECT_THAT(output_shape3, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput3<float>(),
ElementsAreArray(ArrayFloatNear({0.95, 0.9, 0.3}, 1e-4)));
std::vector<int> output_shape4 = m.GetOutputShape4();
EXPECT_THAT(output_shape4, ElementsAre(1));
EXPECT_THAT(m.GetOutput4<float>(),
ElementsAreArray(ArrayFloatNear({3.0}, 1e-4)));
}
TEST(DetectionPostprocessOpTest,
QuantizedTestwithNoBackgroundClassAndKeypointsStableSort) {
DetectionPostprocessOpModelwithRegularNMS m(
{TensorType_UINT8, {1, 6, 5}, -1.0, 1.0},
{TensorType_UINT8, {1, 6, 2}, 0.0, 1.0},
{TensorType_UINT8, {6, 4}, 0.0, 100.5}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, false);
std::vector<std::vector<float>> inputs1 = {{
0.0, 0.0, 0.0, 0.0, 1.0,
0.0, 1.0, 0.0, 0.0, 1.0,
0.0, -1.0, 0.0, 0.0, 1.0,
0.0, 0.0, 0.0, 0.0, 1.0,
0.0, 1.0, 0.0, 0.0, 1.0,
0.0, 0.0, 0.0, 0.0, 1.0
}};
m.QuantizeAndPopulate<uint8_t>(m.input1(), inputs1[0]);
std::vector<std::vector<float>> inputs2 = {
{0.015625, 0.007812, 0.003906, 0.015625, 0.015625, 0.007812, 0.019531,
0.019531, 0.007812, 0.003906, 0.003906, 0.003906}};
m.QuantizeAndPopulate<uint8_t>(m.input2(), inputs2[0]);
std::vector<std::vector<float>> inputs3 = {{
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 100.5, 1.0, 1.0
}};
m.QuantizeAndPopulate<uint8_t>(m.input3(), inputs3[0]);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape1 = m.GetOutputShape1();
EXPECT_THAT(output_shape1, ElementsAre(1, 3, 4));
EXPECT_THAT(
m.GetOutput1<float>(),
ElementsAreArray(ArrayFloatNear(
{0.0, 10.0, 1.0, 11.0, 0.0, 0.0, 1.0, 1.0, 0.0, 100.0, 1.0, 101.0},
3e-1)));
std::vector<int> output_shape2 = m.GetOutputShape2();
EXPECT_THAT(output_shape2, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput2<float>(),
ElementsAreArray(ArrayFloatNear({0, 0, 0}, 1e-1)));
std::vector<int> output_shape3 = m.GetOutputShape3();
EXPECT_THAT(output_shape3, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput3<float>(),
ElementsAreArray(
ArrayFloatNear({0.0196078, 0.0156863, 0.00392157}, 1e-7)));
std::vector<int> output_shape4 = m.GetOutputShape4();
EXPECT_THAT(output_shape4, ElementsAre(1));
EXPECT_THAT(m.GetOutput4<float>(),
ElementsAreArray(ArrayFloatNear({3.0}, 1e-1)));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/detection_postprocess.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/detection_postprocess_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bb9051f2-6655-44b1-ac5c-e4c26785b4a3 | cpp | tensorflow/tensorflow | call_once | tensorflow/lite/kernels/call_once.cc | tensorflow/lite/kernels/call_once_test.cc | #include <stddef.h>
#include <cstring>
#include <memory>
#include <vector>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/experimental/resource/initialization_status.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace call_once_kernel {
struct OpData {
int init_subgraph_index;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData;
const auto* params = reinterpret_cast<const TfLiteCallOnceParams*>(buffer);
op_data->init_subgraph_index = params->init_subgraph_index;
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
Subgraph* this_subgraph = reinterpret_cast<Subgraph*>(context->impl_);
resource::InitializationStatusMap* map =
&this_subgraph->initialization_status_map();
resource::InitializationStatus* status =
resource::GetInitializationStatus(map, op_data->init_subgraph_index);
if (status->IsInitialized()) return kTfLiteOk;
auto* subgraphs = this_subgraph->GetSubgraphs();
TF_LITE_ENSURE_EQ(context, node->inputs->size, 0);
TF_LITE_ENSURE_EQ(context, node->outputs->size, 0);
TF_LITE_ENSURE(context, op_data->init_subgraph_index < subgraphs->size());
Subgraph* init_subgraph = (*subgraphs)[op_data->init_subgraph_index].get();
TF_LITE_ENSURE_EQ(context, init_subgraph->inputs().size(), 0);
TF_LITE_ENSURE_EQ(context, init_subgraph->outputs().size(), 0);
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
Subgraph* this_subgraph = reinterpret_cast<Subgraph*>(context->impl_);
resource::InitializationStatusMap* map =
&this_subgraph->initialization_status_map();
resource::InitializationStatus* status =
resource::GetInitializationStatus(map, op_data->init_subgraph_index);
if (status->IsInitialized()) return kTfLiteOk;
auto* subgraphs = this_subgraph->GetSubgraphs();
Subgraph& init_subgraph = *(*subgraphs)[op_data->init_subgraph_index];
TF_LITE_ENSURE_OK(context, init_subgraph.AllocateTensors());
TF_LITE_ENSURE_OK(context, init_subgraph.Invoke());
TF_LITE_ENSURE_OK(context, init_subgraph.ReleaseNonPersistentMemory());
status->MarkInitializationIsDone();
return kTfLiteOk;
}
}
TfLiteRegistration* Register_CALL_ONCE() {
static TfLiteRegistration r = {call_once_kernel::Init, call_once_kernel::Free,
call_once_kernel::Prepare,
call_once_kernel::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/subgraph_test_util.h"
namespace tflite {
using subgraph_test_util::ControlFlowOpTest;
namespace {
class CallOnceTest : public ControlFlowOpTest {
protected:
void SetUp() override {
AddSubgraphs(2);
builder_->BuildCallOnceAndReadVariableSubgraph(
&interpreter_->primary_subgraph());
builder_->BuildAssignRandomValueToVariableSubgraph(
interpreter_->subgraph(1));
builder_->BuildCallOnceAndReadVariablePlusOneSubgraph(
interpreter_->subgraph(2));
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter_->subgraph(2)->AllocateTensors(), kTfLiteOk);
}
};
TEST_F(CallOnceTest, TestSimple) {
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
ASSERT_EQ(output->dims->size, 1);
ASSERT_EQ(output->dims->data[0], 1);
ASSERT_EQ(output->type, kTfLiteInt32);
ASSERT_EQ(NumElements(output), 1);
EXPECT_GT(output->data.i32[0], 0);
}
TEST_F(CallOnceTest, TestInvokeMultipleTimes) {
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
ASSERT_EQ(output->dims->size, 1);
ASSERT_EQ(output->dims->data[0], 1);
ASSERT_EQ(output->type, kTfLiteInt32);
ASSERT_EQ(NumElements(output), 1);
int value = output->data.i32[0];
EXPECT_GT(value, 0);
for (int i = 0; i < 3; ++i) {
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
ASSERT_EQ(output->dims->size, 1);
ASSERT_EQ(output->dims->data[0], 1);
ASSERT_EQ(output->type, kTfLiteInt32);
ASSERT_EQ(NumElements(output), 1);
ASSERT_EQ(output->data.i32[0], value);
}
}
TEST_F(CallOnceTest, TestInvokeOnceAcrossMultipleEntryPoints) {
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
ASSERT_EQ(output->dims->size, 1);
ASSERT_EQ(output->dims->data[0], 1);
ASSERT_EQ(output->type, kTfLiteInt32);
ASSERT_EQ(NumElements(output), 1);
int value = output->data.i32[0];
EXPECT_GT(value, 0);
ASSERT_EQ(interpreter_->subgraph(2)->Invoke(), kTfLiteOk);
output = interpreter_->subgraph(2)->tensor(
interpreter_->subgraph(2)->outputs()[0]);
ASSERT_EQ(output->dims->size, 1);
ASSERT_EQ(output->dims->data[0], 1);
ASSERT_EQ(output->type, kTfLiteInt32);
ASSERT_EQ(NumElements(output), 1);
ASSERT_EQ(output->data.i32[0], value + 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/call_once.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/call_once_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
51178850-f375-45ed-a8e6-f2e8892fe6b4 | cpp | tensorflow/tensorflow | multinomial | tensorflow/lite/kernels/multinomial.cc | tensorflow/lite/kernels/multinomial_test.cc | #include <algorithm>
#include <cmath>
#include <cstdint>
#include <limits>
#include <random>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace multinomial {
struct MultinomialParams {
std::default_random_engine rng;
};
template <typename FloatType, typename IntegralType>
TfLiteStatus MultinomialSample(std::default_random_engine& rng,
const FloatType* logits, int logits_size,
IntegralType* outputs, int output_size) {
std::vector<double> cumulative_odds;
cumulative_odds.reserve(logits_size);
double last_odds = 0.0;
FloatType max_logit = std::numeric_limits<FloatType>::lowest();
for (int i = 0; i < logits_size; i++) {
max_logit = std::max(max_logit, logits[i]);
}
for (int i = 0; i < logits_size; i++) {
FloatType odds = std::exp(logits[i] - max_logit) + last_odds;
cumulative_odds.push_back(odds);
last_odds = odds;
}
std::uniform_real_distribution<double> distribution{0.0,
cumulative_odds.back()};
for (int i = 0; i < output_size; i++) {
double sample = distribution(rng);
auto it = std::lower_bound(cumulative_odds.begin(), cumulative_odds.end(),
sample);
if (it == cumulative_odds.end()) {
return kTfLiteError;
}
*outputs++ = static_cast<IntegralType>(it - cumulative_odds.begin());
}
return kTfLiteOk;
}
template <typename FloatType>
TfLiteStatus MultinomialSample(TfLiteContext* context,
std::default_random_engine& rng,
const FloatType* logits, int logits_size,
TfLiteTensor* output, int outputs_offset,
int output_size) {
switch (output->type) {
case kTfLiteInt32:
return MultinomialSample<FloatType, int32_t>(
rng, logits, logits_size,
GetTensorData<int32_t>(output) + outputs_offset, output_size);
break;
case kTfLiteInt64:
return MultinomialSample<FloatType, int64_t>(
rng, logits, logits_size,
GetTensorData<int64_t>(output) + outputs_offset, output_size);
break;
default:
TF_LITE_KERNEL_LOG(context,
"Unsupported datatype for multinomial output: %s",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
}
TfLiteStatus MultinomialSample(TfLiteContext* context,
std::default_random_engine& rng,
const TfLiteTensor* logits, int logits_offset,
int logits_size, TfLiteTensor* output,
int outputs_offset, int output_size) {
switch (logits->type) {
case kTfLiteFloat16:
TF_LITE_KERNEL_LOG(context, "TfLiteFloat16 is currently not supported.");
return kTfLiteError;
break;
case kTfLiteFloat32:
TF_LITE_ENSURE_OK(
context,
MultinomialSample<float>(
context, rng, GetTensorData<float>(logits) + logits_offset,
logits_size, output, outputs_offset, output_size));
break;
case kTfLiteFloat64:
TF_LITE_ENSURE_OK(
context,
MultinomialSample<double>(
context, rng, GetTensorData<double>(logits) + logits_offset,
logits_size, output, outputs_offset, output_size));
break;
default:
TF_LITE_KERNEL_LOG(context,
"Unsupported datatype for multinomial logit input: %s",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
return kTfLiteOk;
}
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
return new MultinomialParams();
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<MultinomialParams*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, tflite::NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, tflite::NumOutputs(node), 1);
const TfLiteTensor* logits_input = tflite::GetInput(context, node, 0);
TF_LITE_ENSURE_EQ(context, tflite::NumDimensions(logits_input), 2);
int batch_size = tflite::SizeOfDimension(logits_input, 0);
const TfLiteTensor* num_samples_input = tflite::GetInput(context, node, 1);
TF_LITE_ENSURE_EQ(context, tflite::NumDimensions(num_samples_input), 0);
TF_LITE_ENSURE_EQ(context, num_samples_input->type, kTfLiteInt32);
TF_LITE_ENSURE(context, IsConstantTensor(num_samples_input));
int num_samples = *num_samples_input->data.i32;
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(2);
output_shape->data[0] = batch_size;
output_shape->data[1] = num_samples;
TfLiteTensor* output = tflite::GetOutput(context, node, 0);
return context->ResizeTensor(context, output, output_shape);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
MultinomialParams* params =
reinterpret_cast<MultinomialParams*>(node->user_data);
TF_LITE_ENSURE(context, params != nullptr);
const TfLiteTensor* logits = tflite::GetInput(context, node, 0);
int batch_size = tflite::SizeOfDimension(logits, 0);
int logits_size = tflite::SizeOfDimension(logits, 1);
const TfLiteTensor* num_samples_input = tflite::GetInput(context, node, 1);
int output_size = *num_samples_input->data.i32;
TfLiteTensor* output = tflite::GetOutput(context, node, 0);
for (int batch = 0; batch < batch_size; ++batch) {
int logits_offset = logits_size * batch;
int output_offset = output_size * batch;
TF_LITE_ENSURE_OK(
context,
MultinomialSample(context, params->rng, logits, logits_offset,
logits_size, output, output_offset, output_size));
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_MULTINOMIAL() {
static TfLiteRegistration r = {multinomial::Init, multinomial::Free,
multinomial::Prepare, multinomial::Eval};
return &r;
}
}
}
} | #include <algorithm>
#include <cmath>
#include <cstddef>
#include <limits>
#include <random>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/custom_ops_register.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace {
template <typename T>
tflite::TensorType GetTTEnum();
template <>
tflite::TensorType GetTTEnum<float>() {
return tflite::TensorType_FLOAT32;
}
template <>
tflite::TensorType GetTTEnum<double>() {
return tflite::TensorType_FLOAT64;
}
template <>
tflite::TensorType GetTTEnum<int>() {
return tflite::TensorType_INT32;
}
template <>
tflite::TensorType GetTTEnum<int64_t>() {
return tflite::TensorType_INT64;
}
class MultinomialOpModel : public tflite::SingleOpModel {
public:
MultinomialOpModel(tflite::TensorData logits, int num_samples,
tflite::TensorData output) {
logits_ = AddInput(logits);
num_samples_ = AddConstInput(tflite::TensorType_INT32, {num_samples}, {});
output_ = AddOutput(output);
SetCustomOp("Multinomial", {}, ops::custom::Register_MULTINOMIAL);
BuildInterpreter({GetShape(logits_), GetShape(num_samples_)});
}
int logits_;
int num_samples_;
int output_;
int logits() { return logits_; }
int num_samples() { return num_samples_; }
int output() { return output_; }
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
};
}
}
template <typename Type1, typename Type2>
struct TypePair {
using T1 = Type1;
using T2 = Type2;
};
template <typename TypePair>
class MultinomialTest : public ::testing::Test {
public:
using FloatType = typename TypePair::T1;
using IntegralType = typename TypePair::T2;
};
using TestTypes =
::testing::Types<TypePair<float, int>, TypePair<double, int>,
TypePair<float, int64_t>, TypePair<double, int64_t> >;
TYPED_TEST_SUITE(MultinomialTest, TestTypes);
TYPED_TEST(MultinomialTest, TestMultiBatch) {
const int kNumSamples = 1000;
using Float = typename TestFixture::FloatType;
using Int = typename TestFixture::IntegralType;
tflite::MultinomialOpModel m({tflite::GetTTEnum<Float>(), {3, 3}},
kNumSamples, {tflite::GetTTEnum<Int>(), {}});
m.PopulateTensor<Float>(m.logits(),
std::vector<Float>(9, static_cast<Float>(0.0f)));
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output = m.GetOutput<Int>();
EXPECT_EQ(output.size(), kNumSamples * 3);
int c0 = std::count(output.begin(), output.end(), 0);
int c1 = std::count(output.begin(), output.end(), 1);
int c2 = std::count(output.begin(), output.end(), 2);
EXPECT_EQ(c0 + c1 + c2, 3 * kNumSamples);
EXPECT_GT(c0, 750);
EXPECT_GT(c1, 750);
EXPECT_GT(c2, 750);
EXPECT_LT(c0, 1250);
EXPECT_LT(c1, 1250);
EXPECT_LT(c2, 1250);
}
TYPED_TEST(MultinomialTest, TestSampleHighLogOdds) {
const int kNumSamples = 1000;
using Float = typename TestFixture::FloatType;
using Int = typename TestFixture::IntegralType;
tflite::MultinomialOpModel m({tflite::GetTTEnum<Float>(), {1, 3}},
kNumSamples, {tflite::GetTTEnum<Int>(), {}});
m.PopulateTensor<Float>(m.logits(),
{static_cast<Float>(0.0f), static_cast<Float>(1.0f),
static_cast<Float>(0.0f)});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output = m.GetOutput<Int>();
EXPECT_EQ(output.size(), kNumSamples);
int c0 = std::count(output.begin(), output.end(), 0);
int c1 = std::count(output.begin(), output.end(), 1);
int c2 = std::count(output.begin(), output.end(), 2);
EXPECT_EQ(c0 + c1 + c2, kNumSamples);
EXPECT_GT(c1, c0);
EXPECT_GT(c1, c2);
}
TYPED_TEST(MultinomialTest, TestVeryLowLogOdds) {
const int kNumSamples = 1000;
using Float = typename TestFixture::FloatType;
using Int = typename TestFixture::IntegralType;
tflite::MultinomialOpModel m({tflite::GetTTEnum<Float>(), {1, 3}},
kNumSamples, {tflite::GetTTEnum<Int>(), {}});
m.PopulateTensor<Float>(
m.logits(), {static_cast<Float>(-1000.0f), static_cast<Float>(-1000.0f),
static_cast<Float>(0.0f)});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output = m.GetOutput<Int>();
EXPECT_EQ(output.size(), kNumSamples);
int c0 = std::count(output.begin(), output.end(), 0);
int c1 = std::count(output.begin(), output.end(), 1);
int c2 = std::count(output.begin(), output.end(), 2);
EXPECT_EQ(c0, 0);
EXPECT_EQ(c1, 0);
EXPECT_EQ(c2, kNumSamples);
}
TYPED_TEST(MultinomialTest, TestSamplesDifferent) {
using Float = typename TestFixture::FloatType;
using Int = typename TestFixture::IntegralType;
const int kNumSamples = 5;
const int kNumLogits = 1000;
tflite::MultinomialOpModel m({tflite::GetTTEnum<Float>(), {1, kNumLogits}},
kNumSamples, {tflite::GetTTEnum<Int>(), {}});
std::vector<Float> logits(kNumLogits, static_cast<Float>(0.0f));
m.PopulateTensor<Float>(m.logits(), logits);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output1 = m.GetOutput<Int>();
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output2 = m.GetOutput<Int>();
bool successive_samples_are_different = false;
for (int i = 0; i < kNumSamples; ++i) {
if (output1[i] == output2[i]) continue;
successive_samples_are_different = true;
break;
}
EXPECT_TRUE(successive_samples_are_different);
}
TYPED_TEST(MultinomialTest, TestSamplesPrecise) {
using Float = typename TestFixture::FloatType;
using Int = typename TestFixture::IntegralType;
const int kNumSamples = 100000;
const int kNumLogits = 2;
tflite::MultinomialOpModel m({tflite::GetTTEnum<Float>(), {1, kNumLogits}},
kNumSamples, {tflite::GetTTEnum<Int>(), {}});
std::vector<Float> logits(
{static_cast<Float>(1000.0), static_cast<float>(1001.0)});
m.PopulateTensor<Float>(m.logits(), logits);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output = m.GetOutput<Int>();
int c0 = std::count(output.begin(), output.end(), 0);
int c1 = std::count(output.begin(), output.end(), 1);
double p0 = static_cast<double>(c0) / (c0 + c1);
EXPECT_LT(std::abs(p0 - 0.26894142137), 0.01);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/multinomial.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/multinomial_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
84e73c58-4c71-4603-8fd2-133a99b2e99a | cpp | tensorflow/tensorflow | squared_difference | tensorflow/lite/delegates/hexagon/builders/squared_difference.cc | tensorflow/lite/delegates/xnnpack/squared_difference_test.cc | #include "hexagon/hexagon_nn_ops.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/delegates/hexagon/builders/op_builder.h"
namespace tflite {
namespace delegates {
namespace hexagon {
class SquaredDifferenceOpBuilder : public OpBuilder {
public:
explicit SquaredDifferenceOpBuilder(GraphBuilder* graph_builder, int op_type)
: OpBuilder(graph_builder, op_type) {}
TfLiteStatus PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) override;
TfLiteStatus RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) override;
private:
TensorID node_output_;
};
TfLiteStatus SquaredDifferenceOpBuilder::PopulateSubGraph(
const TfLiteIntArray* inputs, const TfLiteIntArray* outputs,
TfLiteContext* context) {
const int tensor_a_index = inputs->data[0];
const int tensor_b_index = inputs->data[1];
const auto& tensor_a = context->tensors[tensor_a_index];
const auto& tensor_b = context->tensors[tensor_b_index];
AddInput(graph_builder_->GetHexagonTensorId(tensor_a_index));
AddInput(graph_builder_->GetHexagonTensorId(tensor_b_index));
TF_LITE_ENSURE_STATUS(ComputeAndAddMinAndMax(context, tensor_a));
TF_LITE_ENSURE_STATUS(ComputeAndAddMinAndMax(context, tensor_b));
float output_min = -1, output_max = -1;
TF_LITE_ENSURE_STATUS(ComputeMinAndMaxQuantValues(
context->tensors[outputs->data[0]], &output_min, &output_max));
auto* output_min_const = graph_builder_->AddConstNodeWithData(
kScalarShape, reinterpret_cast<char*>(&output_min), sizeof(output_min));
auto* output_max_const = graph_builder_->AddConstNodeWithData(
kScalarShape, reinterpret_cast<char*>(&output_max), sizeof(output_max));
int output_batch_size, output_height_size, output_width_size,
output_depth_size;
GetDims(&output_batch_size, &output_height_size, &output_width_size,
&output_depth_size, context->tensors[outputs->data[0]].dims);
auto sub_out = AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
auto sub_min = AddOutput(sizeof(float), 4, kScalarShape);
auto sub_max = AddOutput(sizeof(float), 4, kScalarShape);
auto* mul_op = graph_builder_->AddNode(GetTFLiteNodeID());
mul_op->SetOpType(OP_QuantizedMul_8x8to8);
mul_op->AddInput(sub_out);
mul_op->AddInput(sub_out);
mul_op->AddInput(sub_min);
mul_op->AddInput(sub_max);
mul_op->AddInput(sub_min);
mul_op->AddInput(sub_max);
mul_op->AddInput(TensorID(output_min_const->GetID(), 0));
mul_op->AddInput(TensorID(output_max_const->GetID(), 0));
node_output_ = mul_op->AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
mul_op->AddOutput(sizeof(float), 4, kScalarShape);
mul_op->AddOutput(sizeof(float), 4, kScalarShape);
return kTfLiteOk;
}
TfLiteStatus SquaredDifferenceOpBuilder::RegisterOutputs(
const TfLiteIntArray* outputs, TfLiteContext* context) {
graph_builder_->AddTensorWithID(outputs->data[0], node_output_.first,
node_output_.second);
return kTfLiteOk;
}
OpBuilder* CreateSquaredDifferenceOpBuilder(GraphBuilder* graph_builder,
int op_type) {
return new SquaredDifferenceOpBuilder(graph_builder, op_type);
}
}
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/binary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(SquaredDifference, 4DBy4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DBy4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, 1, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, 1, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DBy4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, width, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, width, 1})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DBy4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, 1, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, 1, 1})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DBy4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, 1, 1, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, 1, 1, 1})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DBy4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, width, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DBy3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({height, width, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({width, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 2DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 2DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 2DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DByStatic4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DByStatic4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, 1, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, 1, channels})
.Input2Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DByStatic4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, width, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, width, 1})
.Input2Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DByStatic4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, 1, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, 1, 1})
.Input2Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DByStatic4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, 1, 1, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, 1, 1, 1})
.Input2Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DByStatic4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DByStatic3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DByStatic2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({width, channels})
.Input2Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DByStatic1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({channels})
.Input2Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DByStatic0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({})
.Input2Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 2DByStatic2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Input2Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 2DByStatic1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({channels})
.Input2Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 2DByStatic0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({})
.Input2Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, FP16Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.FP16Weights()
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.FP16Weights()
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, INT8Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.INT8Weights()
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.INT8Weights()
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, INT8ChannelWiseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.INT8ChannelWiseWeights()
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.INT8ChannelWiseWeights()
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, SparseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.SparseWeights()
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.SparseWeights()
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/hexagon/builders/squared_difference.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/squared_difference_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7f3aca48-7578-4d48-bd73-bdc90e971678 | cpp | tensorflow/tensorflow | conv3d_transpose | tensorflow/lite/kernels/conv3d_transpose.cc | tensorflow/lite/kernels/conv3d_transpose_test.cc | #include "tensorflow/lite/kernels/internal/reference/conv3d_transpose.h"
#include <cstddef>
#include <cstdint>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/padding.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace conv3d_transpose {
enum KernelType {
kReference,
kGenericOptimized,
};
const int kTensorNotAllocated = -1;
struct OpData {
Padding3DValues padding;
int col2im_id = kTensorNotAllocated;
int col2im_index;
bool need_col2im = false;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* opdata = new OpData;
return opdata;
}
void Free(TfLiteContext* context, void* buffer) {
delete static_cast<OpData*>(buffer);
}
static TfLiteStatus AllocateTemporaryTensorsIfRequired(TfLiteContext* context,
TfLiteNode* node,
KernelType kernel_type) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
int temporaries_count = 0;
if (kernel_type == kGenericOptimized) {
if (data->col2im_id == kTensorNotAllocated) {
context->AddTensors(context, 1, &data->col2im_id);
}
data->col2im_index = temporaries_count++;
data->need_col2im = true;
}
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(temporaries_count);
return kTfLiteOk;
}
TfLiteStatus ResizeOutputAndTemporaryTensors(
TfLiteContext* context, OpData* opdata, TfLiteConv3DTransposeParams* params,
const TfLiteTensor* shape_tensor, const TfLiteTensor* filter,
const TfLiteTensor* input, TfLiteTensor* col2im, TfLiteTensor* output) {
auto shape_data = GetTensorData<int32_t>(shape_tensor);
TF_LITE_ENSURE_EQ(context, shape_data[0], SizeOfDimension(input, 0));
TF_LITE_ENSURE_EQ(context, shape_data[4] % SizeOfDimension(filter, 3), 0);
const RuntimeShape& filter_shape = GetTensorShape(filter);
const int depth = shape_data[1];
const int height = shape_data[2];
const int width = shape_data[3];
const int filter_depth = filter_shape.Dims(0);
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
int unused_out_width, unused_out_height, unused_out_depth;
opdata->padding = ComputePadding3DValues(
params->stride_height, params->stride_width, params->stride_depth,
params->dilation_height_factor, params->dilation_width_factor,
params->dilation_depth_factor, height, width, depth, filter_height,
filter_width, filter_depth, params->padding, &unused_out_height,
&unused_out_width, &unused_out_depth);
TF_LITE_ENSURE_EQ(context, unused_out_depth, SizeOfDimension(input, 1));
TF_LITE_ENSURE_EQ(context, unused_out_height, SizeOfDimension(input, 2));
TF_LITE_ENSURE_EQ(context, unused_out_width, SizeOfDimension(input, 3));
TfLiteIntArray* output_shape =
TfLiteIntArrayCreate(NumElements(shape_tensor));
for (int i = 0; i < output_shape->size; ++i) {
output_shape->data[i] = GetTensorData<int32_t>(shape_tensor)[i];
}
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, output, output_shape));
if (opdata->need_col2im) {
TfLiteIntArray* col2im_shape_array = TfLiteIntArrayCreate(2);
const RuntimeShape& input_shape = GetTensorShape(input);
col2im_shape_array->data[0] =
input_shape.Dims(1) * input_shape.Dims(2) * input_shape.Dims(3);
col2im_shape_array->data[1] =
filter_depth * filter_height * filter_width * filter_shape.Dims(3);
col2im->type = kTfLiteFloat32;
col2im->allocation_type = kTfLiteDynamic;
return context->ResizeTensor(context, col2im, col2im_shape_array);
}
return kTfLiteOk;
}
TfLiteStatus Prepare(KernelType kernel_type, TfLiteContext* context,
TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteConv3DTransposeParams*>(node->builtin_data);
OpData* opdata = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE(context, node->inputs->size == 3 || node->inputs->size == 4);
TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const TfLiteTensor* output_shape;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &output_shape));
const TfLiteTensor* filter;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &filter));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &input));
TF_LITE_ENSURE_EQ(context, output_shape->dims->size, 1);
TF_LITE_ENSURE_EQ(context, NumElements(output_shape), 5);
TF_LITE_ENSURE_EQ(context, input->dims->size, 5);
TF_LITE_ENSURE_EQ(context, filter->dims->size, 5);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(input, 4),
SizeOfDimension(filter, 4));
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, filter->type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, input->type);
TF_LITE_ENSURE_TYPES_EQ(context, output_shape->type, kTfLiteInt32);
const TfLiteTensor* bias = GetInput(context, node, 3);
if (bias) {
TF_LITE_ENSURE_TYPES_EQ(context, bias->type, input->type);
TF_LITE_ENSURE_EQ(context, NumElements(bias), SizeOfDimension(filter, 3));
}
if (params->dilation_depth_factor > 1 || params->dilation_height_factor > 1 ||
params->dilation_width_factor > 1) {
kernel_type = kReference;
}
TF_LITE_ENSURE_STATUS(
AllocateTemporaryTensorsIfRequired(context, node, kernel_type));
TfLiteTensor* col2im = nullptr;
if (opdata->need_col2im) {
node->temporaries->data[opdata->col2im_index] = opdata->col2im_id;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node,
opdata->col2im_index, &col2im));
}
if (!IsConstantOrPersistentTensor(output_shape)) {
SetTensorToDynamic(output);
if (opdata->need_col2im) {
SetTensorToDynamic(col2im);
}
} else {
TF_LITE_ENSURE_STATUS(ResizeOutputAndTemporaryTensors(
context, opdata, params, output_shape, filter, input, col2im, output));
}
return kTfLiteOk;
}
template <KernelType kernel_type>
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
return Prepare(kernel_type, context, node);
}
void EvalFloat(KernelType kernel_type, TfLiteContext* context, TfLiteNode* node,
TfLiteConv3DTransposeParams* params, OpData* opdata,
const TfLiteTensor* input, const TfLiteTensor* filter,
const TfLiteTensor* bias, TfLiteTensor* col2im,
TfLiteTensor* output) {
float output_activation_min, output_activation_max;
CalculateActivationRange(params->activation, &output_activation_min,
&output_activation_max);
Conv3DTransposeParams runtime_params;
runtime_params.padding_values = opdata->padding;
runtime_params.stride_depth = params->stride_depth;
runtime_params.stride_height = params->stride_height;
runtime_params.stride_width = params->stride_width;
runtime_params.dilation_depth = params->dilation_depth_factor;
runtime_params.dilation_height = params->dilation_height_factor;
runtime_params.dilation_width = params->dilation_width_factor;
runtime_params.float_activation_min = output_activation_min;
runtime_params.float_activation_max = output_activation_max;
switch (kernel_type) {
case kReference: {
reference_ops::Conv3DTranspose(
runtime_params, GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(filter), GetTensorData<float>(filter),
GetTensorShape(bias), GetTensorData<float>(bias),
GetTensorShape(output), GetTensorData<float>(output));
break;
}
case kGenericOptimized: {
optimized_ops::Conv3DTranspose(
runtime_params, GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(filter), GetTensorData<float>(filter),
GetTensorShape(bias), GetTensorData<float>(bias),
GetTensorShape(output), GetTensorData<float>(output),
GetTensorShape(col2im), GetTensorData<float>(col2im),
CpuBackendContext::GetFromContext(context));
} break;
}
}
TfLiteStatus Eval(KernelType kernel_type, TfLiteContext* context,
TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteConv3DTransposeParams*>(node->builtin_data);
OpData* opdata = reinterpret_cast<OpData*>(node->user_data);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const TfLiteTensor* output_shape;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &output_shape));
const TfLiteTensor* filter;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &filter));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &input));
const TfLiteTensor* bias = GetInput(context, node, 3);
TfLiteTensor* col2im = opdata->need_col2im
? GetTemporary(context, node, opdata->col2im_index)
: nullptr;
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context, ResizeOutputAndTemporaryTensors(
context, opdata, params, output_shape,
filter, input, col2im, output));
}
if (params->dilation_depth_factor > 1 || params->dilation_height_factor > 1 ||
params->dilation_width_factor > 1) {
kernel_type = kReference;
}
switch (input->type) {
case kTfLiteFloat32:
EvalFloat(kernel_type, context, node, params, opdata, input, filter, bias,
col2im, output);
break;
default:
TF_LITE_KERNEL_LOG(context, "Type %s currently not supported.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
return kTfLiteOk;
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
return Eval(kernel_type, context, node);
}
}
TfLiteRegistration* Register_CONV_3D_TRANSPOSE_REF() {
static TfLiteRegistration r = {
conv3d_transpose::Init, conv3d_transpose::Free,
conv3d_transpose::Prepare<conv3d_transpose::kReference>,
conv3d_transpose::Eval<conv3d_transpose::kReference>};
return &r;
}
TfLiteRegistration* Register_CONV_3D_TRANSPOSE_GENERIC_OPT() {
static TfLiteRegistration r = {
conv3d_transpose::Init, conv3d_transpose::Free,
conv3d_transpose::Prepare<conv3d_transpose::kGenericOptimized>,
conv3d_transpose::Eval<conv3d_transpose::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_CONV_3D_TRANSPOSE() {
return Register_CONV_3D_TRANSPOSE_GENERIC_OPT();
}
}
}
} | #include <cstdint>
#include <initializer_list>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
enum class TestType {
kConst = 0,
kDynamic = 1,
};
class Conv3dTransposeOpModel : public SingleOpModel {
public:
Conv3dTransposeOpModel(
std::initializer_list<int> output_shape_data, const TensorData& filter,
const TensorData& input, const TensorData& bias, const TensorData& output,
TestType test_type, Padding padding = Padding_VALID,
int32_t stride_depth = 1, int32_t stride_width = 1,
int32_t stride_height = 1,
ActivationFunctionType activation = ActivationFunctionType_NONE,
int32_t dilation_depth = 1, int32_t dilation_width = 1,
int32_t dilation_height = 1) {
if (test_type == TestType::kDynamic) {
output_shape_ = AddInput({TensorType_INT32, {5}});
} else {
output_shape_ = AddConstInput(TensorType_INT32, output_shape_data, {5});
}
filter_ = AddInput(filter);
input_ = AddInput(input);
bias_ = AddInput(bias);
output_ = AddOutput(output);
SetBuiltinOp(
BuiltinOperator_CONV_3D_TRANSPOSE, BuiltinOptions_Conv3DOptions,
CreateConv3DOptions(builder_, padding, stride_depth, stride_width,
stride_height, activation, dilation_depth,
dilation_width, dilation_height)
.Union());
BuildInterpreter({GetShape(output_shape_), GetShape(filter_),
GetShape(input_), GetShape(bias_)});
if (test_type == TestType::kDynamic) {
PopulateTensor(output_shape_, output_shape_data);
}
}
Conv3dTransposeOpModel(
std::initializer_list<int> output_shape_data, const TensorData& filter,
const TensorData& input, const TensorData& output, TestType test_type,
Padding padding = Padding_VALID, int32_t stride_depth = 1,
int32_t stride_width = 1, int32_t stride_height = 1,
ActivationFunctionType activation = ActivationFunctionType_NONE,
int32_t dilation_depth = 1, int32_t dilation_width = 1,
int32_t dilation_height = 1) {
if (test_type == TestType::kDynamic) {
output_shape_ = AddInput({TensorType_INT32, {5}});
} else {
output_shape_ = AddConstInput(TensorType_INT32, output_shape_data, {5});
}
filter_ = AddInput(filter);
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(
BuiltinOperator_CONV_3D_TRANSPOSE, BuiltinOptions_Conv3DOptions,
CreateConv3DOptions(builder_, padding, stride_depth, stride_width,
stride_height, activation, dilation_depth,
dilation_width, dilation_height)
.Union());
BuildInterpreter(
{GetShape(output_shape_), GetShape(filter_), GetShape(input_)});
if (test_type == TestType::kDynamic) {
PopulateTensor(output_shape_, output_shape_data);
}
}
void SetFilter(std::vector<float> f) { PopulateTensor(filter_, f); }
void SetBias(std::initializer_list<float> f) { PopulateTensor(bias_, f); }
void SetInput(std::vector<float> data) { PopulateTensor(input_, data); }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int output_shape_;
int input_;
int filter_;
int bias_;
int output_;
};
template <typename T>
std::vector<T> CreateRangeVector(int N) {
std::vector<T> result;
for (int i = 0; i < N; ++i) result.push_back(i);
return result;
}
class Conv3dTransposeOpTest : public ::testing::TestWithParam<TestType> {};
TEST_P(Conv3dTransposeOpTest, InvalidInputDimsTest) {
EXPECT_DEATH_IF_SUPPORTED(
Conv3dTransposeOpModel m(
{1, 2, 3, 4, 5}, {TensorType_FLOAT32, {2, 2, 4, 1}},
{TensorType_FLOAT32, {3, 2, 2, 1}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam()),
"input->dims->size != 5");
}
TEST_P(Conv3dTransposeOpTest, InvalidFilterDimsTest) {
EXPECT_DEATH_IF_SUPPORTED(
Conv3dTransposeOpModel m(
{1, 2, 3, 4, 5}, {TensorType_FLOAT32, {2, 2, 4, 1}},
{TensorType_FLOAT32, {1, 3, 2, 2, 1}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam()),
"filter->dims->size != 5");
}
TEST_P(Conv3dTransposeOpTest, MismatchChannelSizeTest) {
EXPECT_DEATH_IF_SUPPORTED(
Conv3dTransposeOpModel m(
{1, 2, 3, 4, 5}, {TensorType_FLOAT32, {1, 2, 2, 4, 1}},
{TensorType_FLOAT32, {1, 3, 2, 2, 2}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam()),
"SizeOfDimension.input, 4. != SizeOfDimension.filter, 4.");
}
TEST_P(Conv3dTransposeOpTest, MismatchBiasSizeTest) {
EXPECT_DEATH_IF_SUPPORTED(
Conv3dTransposeOpModel m(
{1, 2, 3, 4, 5}, {TensorType_FLOAT32, {1, 3, 2, 2, 2}},
{TensorType_FLOAT32, {1, 2, 2, 4, 2}}, {TensorType_FLOAT32, {3}},
{TensorType_FLOAT32, {}}, Conv3dTransposeOpTest::GetParam()),
"NumElements.bias. != SizeOfDimension.filter, 3.");
}
TEST_P(Conv3dTransposeOpTest, SimpleFloat32Test) {
Conv3dTransposeOpModel m(
{1, 3, 3, 5, 2}, {TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {1, 2, 2, 4, 2}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam());
m.SetInput(CreateRangeVector<float>(32));
m.SetFilter({-1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1,
1, -1, 1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, -1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(1, 3, 3, 5, 2));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(
{-1, -1, -4, -4, -8, -8, -12, -12, 1, 1, -16, -16, -18,
-16, -18, -20, -18, -24, 14, -12, 1, 17, 18, 4, 22, 4,
26, 4, 29, -29, -34, -32, -36, -30, -36, -30, -36, -30, 14,
2, -50, 2, -8, -26, -8, -26, -8, -26, 74, -44, -16, 50,
28, 4, 28, 4, 28, 4, 60, -62, -1, 33, 32, 38, 36,
42, 40, 46, 45, 1, -34, 50, 10, 54, 10, 58, 10, 62,
60, 0, -49, 1, -54, 0, -58, 0, -62, 0, -1, -1}));
}
TEST_P(Conv3dTransposeOpTest, PaddingValidTest) {
Conv3dTransposeOpModel m(
{1, 4, 5, 6, 2}, {TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {1, 3, 4, 5, 2}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam());
m.SetInput(CreateRangeVector<float>(120));
m.SetFilter({-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1,
1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, 1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(1, 4, 5, 6, 2));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(
{-1, -1, -6, -6, -14, -14, -22, -22, -30, -30, -17,
-17, -22, -20, -50, -46, -58, -58, -66, -70, -74, -82,
-20, -54, -62, -40, -90, -106, -98, -118, -106, -130, -114,
-142, -20, -94, -102, -60, -130, -166, -138, -178, -146, -190,
-154, -202, -20, -134, -61, 1, -4, -60, -4, -64, -4,
-68, -4, -72, 77, -77, -80, -80, -160, -164, -164, -172,
-168, -180, -172, -188, -96, -96, -162, -98, -188, -282, -196,
-290, -204, -298, -212, -306, -18, -196, -202, -118, -228, -322,
-236, -330, -244, -338, -252, -346, -18, -216, -242, -138, -268,
-362, -276, -370, -284, -378, -292, -386, -18, -236, -202, 2,
-68, -78, -72, -78, -76, -78, -80, -78, 158, -80, -80,
-160, -240, -324, -244, -332, -248, -340, -252, -348, -176, -176,
-322, -178, -348, -442, -356, -450, -364, -458, -372, -466, -18,
-276, -362, -198, -388, -482, -396, -490, -404, -498, -412, -506,
-18, -296, -402, -218, -428, -522, -436, -530, -444, -538, -452,
-546, -18, -316, -362, 2, -148, -78, -152, -78, -156, -78,
-160, -78, 238, -80, 161, 1, 166, 2, 170, 2, 174,
2, 178, 2, 1, 1, 20, 2, 22, 164, 22, 168,
22, 172, 22, 176, 2, 178, 20, 2, 22, 184, 22,
188, 22, 192, 22, 196, 2, 198, 20, 2, 22, 204,
22, 208, 22, 212, 22, 216, 2, 218, -221, 1, -224,
222, -228, 226, -232, 230, -236, 234, 1, 237}));
}
TEST_P(Conv3dTransposeOpTest, PaddingSameTest) {
Conv3dTransposeOpModel m(
{1, 3, 4, 5, 2}, {TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {1, 3, 4, 5, 2}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam(), Padding_SAME);
m.SetInput(CreateRangeVector<float>(120));
m.SetFilter({1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1,
-1, 1, -1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(1, 3, 4, 5, 2));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(
{-1, -1, -2, 0, -2, 0, -2, 0, -2, 0, -2, 0, -4, 2,
-4, 2, -4, 2, -4, 2, -2, 0, -4, 2, -4, 2, -4, 2,
-4, 2, -2, 0, -4, 2, -4, 2, -4, 2, -4, 2, 0, 0,
-2, 2, -6, 2, -10, 2, -14, 2, 0, 2, -18, 10, -18, 14,
-18, 18, -18, 22, 20, 22, -18, 30, -18, 34, -18, 38, -18, 42,
40, 42, -18, 50, -18, 54, -18, 58, -18, 62, 0, 0, -82, 2,
-86, 2, -90, 2, -94, 2, 80, 82, -18, 90, -18, 94, -18, 98,
-18, 102, 100, 102, -18, 110, -18, 114, -18, 118, -18, 122, 120, 122,
-18, 130, -18, 134, -18, 138, -18, 142}));
}
TEST_P(Conv3dTransposeOpTest, PaddingValidComplexTest) {
Conv3dTransposeOpModel m(
{2, 4, 3, 2, 2}, {TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {2, 3, 2, 1, 2}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam(), Padding_VALID);
m.SetInput(CreateRangeVector<float>(24));
m.SetFilter({1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1,
1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(2, 4, 3, 2, 2));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(
{-1, 1, 1, -1, -2, 4, 2, 0, -1, -5, 1, 5, -2, 10, 2, -2,
-4, 8, 4, 8, -2, -18, 2, 18, -2, 26, 2, -2, -4, 8, 4, 24,
-2, -34, 2, 34, -1, 17, 1, -1, -2, 4, 2, 16, -1, -21, 1, 21,
-1, 25, 1, -1, -2, 4, 2, 24, -1, -29, 1, 29, -2, 58, 2, -2,
-4, 8, 4, 56, -2, -66, 2, 66, -2, 74, 2, -2, -4, 8, 4, 72,
-2, -82, 2, 82, -1, 41, 1, -1, -2, 4, 2, 40, -1, -45, 1, 45}));
}
TEST_P(Conv3dTransposeOpTest, StrideTest) {
Conv3dTransposeOpModel m(
{2, 4, 3, 2, 2}, {TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {2, 2, 2, 1, 2}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam(), Padding_VALID,
2,
1, 1);
m.SetInput(CreateRangeVector<float>(16));
m.SetFilter({1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1,
1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(2, 4, 3, 2, 2));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(
{-1, 1, 1, -1, -2, 4, 2, 0, -1, -5, 1, 5, -1, 1, 1, -1,
-2, 4, 2, 0, -1, -5, 1, 5, -1, 9, 1, -1, -2, 4, 2, 8,
-1, -13, 1, 13, -1, 9, 1, -1, -2, 4, 2, 8, -1, -13, 1, 13,
-1, 17, 1, -1, -2, 4, 2, 16, -1, -21, 1, 21, -1, 17, 1, -1,
-2, 4, 2, 16, -1, -21, 1, 21, -1, 25, 1, -1, -2, 4, 2, 24,
-1, -29, 1, 29, -1, 25, 1, -1, -2, 4, 2, 24, -1, -29, 1, 29}));
}
TEST_P(Conv3dTransposeOpTest, StrideAndPaddingSameTest) {
Conv3dTransposeOpModel m(
{2, 4, 2, 1, 2}, {TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {2, 2, 2, 1, 2}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam(), Padding_SAME,
2,
1, 1);
m.SetInput(CreateRangeVector<float>(16));
m.SetFilter({1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1,
1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(2, 4, 2, 1, 2));
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({-1, 1, -2, 4, -1, 1, -2, 4, -1, 9, -2,
4, -1, 9, -2, 4, -1, 17, -2, 4, -1, 17,
-2, 4, -1, 25, -2, 4, -1, 25, -2, 4}));
}
TEST_P(Conv3dTransposeOpTest, DilationTest) {
Conv3dTransposeOpModel m(
{1, 3, 3, 2, 2}, {TensorType_FLOAT32, {1, 2, 2, 2, 1}},
{TensorType_FLOAT32, {1, 3, 1, 1, 1}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam(), Padding_VALID,
1,
1, 1,
ActivationFunctionType_NONE,
1, 1,
2);
m.SetInput(CreateRangeVector<float>(3));
m.SetFilter({1, -1, 1, 1, -1, 1, 1, -1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(1, 3, 3, 2, 2));
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, -1, 1, 1, 0, 0, 0, 0, -1, 1, 1, -1,
2, -2, 2, 2, 0, 0, 0, 0, -2, 2, 2, -2}));
}
TEST_P(Conv3dTransposeOpTest, BiasTest) {
Conv3dTransposeOpModel m({2, 4, 3, 2, 2},
{TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {2, 3, 2, 1, 2}},
{TensorType_FLOAT32, {2}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam(), Padding_VALID);
m.SetInput(CreateRangeVector<float>(24));
m.SetFilter({1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1,
1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1});
m.SetBias({1, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(2, 4, 3, 2, 2));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(
{0, 3, 2, 1, -1, 6, 3, 2, 0, -3, 2, 7, -1, 12, 3, 0,
-3, 10, 5, 10, -1, -16, 3, 20, -1, 28, 3, 0, -3, 10, 5, 26,
-1, -32, 3, 36, 0, 19, 2, 1, -1, 6, 3, 18, 0, -19, 2, 23,
0, 27, 2, 1, -1, 6, 3, 26, 0, -27, 2, 31, -1, 60, 3, 0,
-3, 10, 5, 58, -1, -64, 3, 68, -1, 76, 3, 0, -3, 10, 5, 74,
-1, -80, 3, 84, 0, 43, 2, 1, -1, 6, 3, 42, 0, -43, 2, 47}));
}
INSTANTIATE_TEST_SUITE_P(Conv3dTransposeOpTest, Conv3dTransposeOpTest,
::testing::Values(TestType::kConst,
TestType::kDynamic));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/conv3d_transpose.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/conv3d_transpose_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ead10a6f-65f0-4f43-9abf-756f28550ec5 | cpp | tensorflow/tensorflow | ceil | tensorflow/lite/experimental/shlo/ops/ceil.cc | tensorflow/lite/delegates/xnnpack/ceil_test.cc | #include "tensorflow/lite/experimental/shlo/ops/ceil.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Ceil {
template <class T>
T operator()(T v) const {
return std::ceil(v);
}
};
template <>
F16 Ceil::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Ceil::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
CeilOp Create(CeilOp::Attributes) { return {}; }
absl::Status Prepare(CeilOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(
CheckCtx("ceil"), input, IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("ceil"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(CeilOp& op, const Tensor& input, Tensor& output) {
Ceil ceil;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), ceil, input,
output)
} else if (IsFloatTensor(input)) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
ceil, input, output);
}
return absl::FailedPreconditionError("Unsupported tensor type.");
}
}; | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/unary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
TEST(Ceil, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_CEIL, xnnpack_delegate.get());
}
TEST(Ceil, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, width, channels})
.Test(BuiltinOperator_CEIL, xnnpack_delegate.get());
}
TEST(Ceil, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, channels})
.Test(BuiltinOperator_CEIL, xnnpack_delegate.get());
}
TEST(Ceil, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
UnaryElementwiseTester().Shape({batch}).Test(BuiltinOperator_CEIL,
xnnpack_delegate.get());
}
TEST(Ceil, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_CEIL, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/ceil.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/ceil_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c6b69f61-0f2e-4cea-9629-0495940f2c47 | cpp | tensorflow/tensorflow | add_n | tensorflow/lite/kernels/add_n.cc | tensorflow/lite/kernels/add_n_test.cc | #include <stdint.h>
#include <algorithm>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_threadpool.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace add_n {
constexpr int kInputTensor1 = 0;
constexpr int kOutputTensor = 0;
struct OpData {
int scratch_tensor_index;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData();
context->AddTensors(context, 1, &op_data->scratch_tensor_index);
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
int num_inputs = NumInputs(node);
TF_LITE_ENSURE(context, num_inputs >= 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
output->type = input1->type;
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(1);
node->temporaries->data[0] = op_data->scratch_tensor_index;
TfLiteTensor* scratch_tensor;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 0, &scratch_tensor));
scratch_tensor->type = input1->type;
scratch_tensor->allocation_type = kTfLiteArenaRw;
CpuBackendContext* cpu_backend_context =
CpuBackendContext::GetFromContext(context);
const int thread_count =
std::min(std::max(1, static_cast<int>(num_inputs) / 2),
cpu_backend_context->max_num_threads());
TfLiteIntArray* scratch_shape = TfLiteIntArrayCreate(1);
scratch_shape->data[0] = thread_count * NumElements(input1);
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, scratch_tensor, scratch_shape));
for (int i = kInputTensor1 + 1; i < num_inputs; ++i) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i, &input));
TF_LITE_ENSURE(context, HaveSameShapes(input1, input));
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input->type);
}
TfLiteIntArray* input1_dims = input1->dims;
TfLiteIntArray* output_dims = TfLiteIntArrayCopy(input1_dims);
return context->ResizeTensor(context, output, output_dims);
}
template <typename T>
TfLiteStatus EvalAddN(TfLiteContext* context, TfLiteNode* node) {
VectorOfTensors<T> all_inputs(*context, *node->inputs);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
int num_inputs = NumInputs(node);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
CpuBackendContext* cpu_backend_context =
CpuBackendContext::GetFromContext(context);
TfLiteTensor* scratch_tensor;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 0, &scratch_tensor));
optimized_ops::AddN<T>(GetTensorShape(input1), num_inputs, all_inputs.data(),
GetTensorData<T>(output),
GetTensorData<T>(scratch_tensor), cpu_backend_context);
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (output->type == kTfLiteFloat32) {
TF_LITE_ENSURE_OK(context, EvalAddN<float>(context, node));
} else if (output->type == kTfLiteInt32) {
TF_LITE_ENSURE_OK(context, EvalAddN<int32_t>(context, node));
} else {
TF_LITE_KERNEL_LOG(context, "AddN only supports FLOAT32|INT32 now, got %s.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_ADD_N() {
static TfLiteRegistration r = {add_n::Init, add_n::Free, add_n::Prepare,
add_n::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/add_n_test_common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
TEST(FloatAddNOpModel, AddMultipleTensors) {
FloatAddNOpModel m({{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}}},
{TensorType_FLOAT32, {}});
m.PopulateTensor<float>(m.input(0), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input(1), {0.1, 0.2, 0.3, 0.5});
m.PopulateTensor<float>(m.input(2), {0.5, 0.1, 0.1, 0.2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
Pointwise(FloatingPointEq(), {-1.4, 0.5, 1.1, 1.5}));
}
TEST(FloatAddNOpModel, Add2Tensors) {
FloatAddNOpModel m(
{{TensorType_FLOAT32, {1, 2, 2, 1}}, {TensorType_FLOAT32, {1, 2, 2, 1}}},
{TensorType_FLOAT32, {}});
m.PopulateTensor<float>(m.input(0), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input(1), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
Pointwise(FloatingPointEq(), {-1.9, 0.4, 1.0, 1.3}));
}
TEST(IntegerAddNOpModel, AddMultipleTensors) {
IntegerAddNOpModel m({{TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1, 2, 2, 1}}},
{TensorType_INT32, {}});
m.PopulateTensor<int32_t>(m.input(0), {-20, 2, 7, 8});
m.PopulateTensor<int32_t>(m.input(1), {1, 2, 3, 5});
m.PopulateTensor<int32_t>(m.input(2), {10, -5, 1, -2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-9, -1, 11, 11}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/add_n.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/add_n_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1f1605fc-b75a-439b-9e49-349ab0cdafed | cpp | tensorflow/tensorflow | rng_util | tensorflow/lite/kernels/rng_util.cc | tensorflow/lite/kernels/rng_util_test.cc | #include "tensorflow/lite/kernels/rng_util.h"
#include <array>
#include <cstdint>
namespace tflite {
namespace rng {
static constexpr uint32_t kThreefryParity = 0x1BD11BDA;
static constexpr uint64_t kPhiloxM4x32A = 0xD2511F53;
static constexpr uint64_t kPhiloxM4x32B = 0xCD9E8D57;
static constexpr uint32_t kPhiloxW32A = 0x9E3779B9;
static constexpr uint32_t kPhiloxW32B = 0xBB67AE85;
std::array<uint32_t, 2> Threefry2x32(uint32_t key_0, uint32_t key_1,
std::array<uint32_t, 2> ctr) {
constexpr std::array<std::array<int, 4>, 2> rotations{
std::array<int, 4>{13, 15, 26, 6}, std::array<int, 4>{17, 29, 16, 24}};
uint32_t key_2 = key_0 ^ key_1 ^ kThreefryParity;
ctr[0] += key_0;
ctr[1] += key_1;
auto apply_round = [&](int r, uint32_t ks0, uint32_t ks1, int b) {
for (int rot : rotations[r]) {
ctr[0] += ctr[1];
ctr[1] = (ctr[1] << rot) | (ctr[1] >> (32 - rot));
ctr[1] ^= ctr[0];
}
ctr[0] += ks0;
ctr[1] += ks1 + b;
};
apply_round(0, key_1, key_2, 1);
apply_round(1, key_2, key_0, 2);
apply_round(0, key_0, key_1, 3);
apply_round(1, key_1, key_2, 4);
apply_round(0, key_2, key_0, 5);
return ctr;
}
std::array<uint32_t, 4> Philox4x32(uint32_t key_0, uint32_t key_1,
std::array<uint32_t, 4> ctr) {
struct u32pair {
uint32_t low;
uint32_t high;
};
union prod {
u32pair hilo;
uint64_t prod;
};
for (int i = 0; i < 10; ++i) {
prod p0, p1;
p0.prod = kPhiloxM4x32A * static_cast<uint64_t>(ctr[0]);
p1.prod = kPhiloxM4x32B * static_cast<uint64_t>(ctr[2]);
ctr = {{p1.hilo.high ^ ctr[1] ^ key_0, p1.hilo.low,
p0.hilo.high ^ ctr[3] ^ key_1, p0.hilo.low}};
key_0 += kPhiloxW32A;
key_1 += kPhiloxW32B;
}
return ctr;
}
}
} | #include "tensorflow/lite/kernels/rng_util.h"
#include <array>
#include <cstdint>
#include <limits>
#include <gtest/gtest.h>
namespace tflite {
namespace {
using tflite::rng::Philox4x32;
using tflite::rng::Threefry2x32;
TEST(RngUtilTest, Threefry2x32Test) {
std::array<uint32_t, 2> results = Threefry2x32(0, 0, {0, 0});
std::array<uint32_t, 2> expected = {0x6B200159u, 0x99BA4EFEu};
ASSERT_EQ(results, expected);
uint32_t u32_max = std::numeric_limits<uint32_t>::max();
results = Threefry2x32(u32_max, u32_max, {u32_max, u32_max});
expected = {0x1CB996FCu, 0xBB002BE7u};
ASSERT_EQ(results, expected);
results = Threefry2x32(0x13198A2Eu, 0x03707344u, {0x243F6A88u, 0x85A308D3u});
expected = {0xC4923A9Cu, 0x483DF7A0u};
ASSERT_EQ(results, expected);
}
TEST(RngUtilTest, Philox4x32Test) {
std::array<uint32_t, 4> results = Philox4x32(0, 0, {0, 0, 0, 0});
std::array<uint32_t, 4> expected = {0x6627E8D5u, 0xE169C58Du, 0xBC57AC4Cu,
0x9B00DBD8u};
ASSERT_EQ(results, expected);
uint32_t u32_max = std::numeric_limits<uint32_t>::max();
results = Philox4x32(u32_max, u32_max, {u32_max, u32_max, u32_max, u32_max});
expected = {0x408F276Du, 0x41C83B0Eu, 0xA20BC7C6u, 0x6D5451FDu};
ASSERT_EQ(results, expected);
results = Philox4x32(0xA4093822u, 0x299F31D0u,
{0x243F6A88u, 0x85A308D3u, 0x13198A2Eu, 0x03707344u});
expected = {0xD16CFE09u, 0x94FDCCEBu, 0x5001E420u, 0x24126EA1u};
ASSERT_EQ(results, expected);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/rng_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/rng_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
18f9c3b8-6cfb-4ca6-8922-7f83ac1b5560 | cpp | tensorflow/tensorflow | matrix_set_diag | tensorflow/lite/kernels/matrix_set_diag.cc | tensorflow/lite/kernels/matrix_set_diag_test.cc | #include <stdint.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace matrix_set_diag {
constexpr int kInputTensor = 0;
constexpr int kDiagonalTensor = 1;
constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteIntArray* input_dims = input->dims;
int input_dims_size = input_dims->size;
TF_LITE_ENSURE(context, input_dims_size >= 2);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(input_dims_size);
for (int i = 0; i < input_dims_size; i++) {
output_shape->data[i] = input_dims->data[i];
}
output->type = input->type;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output, output_shape));
return kTfLiteOk;
}
template <typename T>
void FillDiagImpl(const T* in, const T* diag, T* out, const int batch_size,
const int row_size, const int col_size) {
int idx = 0;
for (int b = 0; b < batch_size; b++) {
for (int i = 0; i < row_size; i++) {
for (int j = 0; j < col_size; ++j) {
if (i == j) {
out[i * col_size + j] = diag[idx];
idx++;
} else {
out[i * col_size + j] = in[i * col_size + j];
}
}
}
out += row_size * col_size;
in += row_size * col_size;
}
}
template <typename T>
void FillDiag(const TfLiteTensor* input, const TfLiteTensor* diag,
TfLiteTensor* output, const int batch_size, const int row_size,
const int col_size) {
FillDiagImpl<T>(GetTensorData<T>(input), GetTensorData<T>(diag),
GetTensorData<T>(output), batch_size, row_size, col_size);
}
void FillDiagHelper(const TfLiteTensor* input, const TfLiteTensor* diag,
TfLiteTensor* output) {
const int num_output_dims = output->dims->size;
int batch_size = 1;
for (int i = 0; i < num_output_dims - 2; ++i) {
batch_size *= output->dims->data[i];
}
const int row_size = output->dims->data[num_output_dims - 2];
const int col_size = output->dims->data[num_output_dims - 1];
switch (output->type) {
case kTfLiteInt64: {
return FillDiag<int64_t>(input, diag, output, batch_size, row_size,
col_size);
}
case kTfLiteInt32: {
return FillDiag<int32_t>(input, diag, output, batch_size, row_size,
col_size);
}
case kTfLiteInt16: {
return FillDiag<int16_t>(input, diag, output, batch_size, row_size,
col_size);
}
case kTfLiteInt8: {
return FillDiag<int8_t>(input, diag, output, batch_size, row_size,
col_size);
}
case kTfLiteUInt8: {
return FillDiag<uint8_t>(input, diag, output, batch_size, row_size,
col_size);
}
default:
return FillDiag<float>(input, diag, output, batch_size, row_size,
col_size);
}
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* diag;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kDiagonalTensor, &diag));
FillDiagHelper(input, diag, output);
return kTfLiteOk;
}
}
TfLiteRegistration* Register_MATRIX_SET_DIAG() {
static TfLiteRegistration r = {nullptr, nullptr, matrix_set_diag::Prepare,
matrix_set_diag::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
template <typename T>
class MatrixSetDiagOpModel : public SingleOpModel {
public:
explicit MatrixSetDiagOpModel(const TensorData& input,
const TensorData& diag) {
input_ = AddInput(input);
diag_ = AddInput(diag);
output_ = AddOutput({input.type, {}});
SetBuiltinOp(BuiltinOperator_MATRIX_SET_DIAG,
BuiltinOptions_MatrixSetDiagOptions,
CreateMatrixSetDiagOptions(builder_).Union());
BuildInterpreter({GetShape(input_), GetShape(diag_)});
}
int input() { return input_; }
int diag() { return diag_; }
std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
TfLiteType GetOutputType() {
TfLiteTensor* t = interpreter_->tensor(output_);
return t->type;
}
private:
int input_;
int diag_;
int output_;
};
template <typename T>
class MatrixSetDiagOpTest : public ::testing::Test {};
using TypesUnderTest =
::testing::Types<TypeUnion<int32_t>, TypeUnion<float>, TypeUnion<int16_t>,
TypeUnion<int8_t>, TypeUnion<uint8_t>>;
TYPED_TEST_SUITE(MatrixSetDiagOpTest, TypesUnderTest);
TYPED_TEST(MatrixSetDiagOpTest, ThreeByThreeDiagScatter) {
MatrixSetDiagOpModel<typename TypeParam::ScalarType> model(
{TypeParam::tensor_type, {3, 3}}, {TypeParam::tensor_type, {3}});
model.template PopulateTensor<typename TypeParam::ScalarType>(model.input(),
{7, 1, 2,
3, 8, 4,
5, 6, 9});
model.template PopulateTensor<typename TypeParam::ScalarType>(model.diag(),
{0, 4, 2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3, 3));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({0, 1, 2,
3, 4, 4,
5, 6, 2}));
EXPECT_THAT(model.GetOutputType(), TypeParam::tflite_type);
}
TEST(MatrixSetDiagTest, Int32TestMoreColumnsThanRows) {
MatrixSetDiagOpModel<int32_t> model({TensorType_INT32, {2, 3}},
{TensorType_INT32, {2}});
model.PopulateTensor<int32_t>(model.input(), {0, 0, 0,
9, 9, 9});
model.PopulateTensor<int32_t>(model.diag(), {1, 1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(2, 3));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 0, 0,
9, 1, 9}));
EXPECT_THAT(model.GetOutputType(), TfLiteType::kTfLiteInt32);
}
TEST(MatrixSetDiagTest, Int32TestTwoDimDiag) {
MatrixSetDiagOpModel<int32_t> model({TensorType_INT32, {2, 4, 4}},
{TensorType_INT32, {2, 4}});
model.PopulateTensor<int32_t>(model.input(), {5, 5, 5, 5,
5, 5, 5, 5,
5, 5, 5, 5,
5, 5, 5, 5,
1, 1, 1, 1,
1, 1, 1, 1,
1, 1, 1, 1,
1, 1, 1, 1});
model.PopulateTensor<int32_t>(model.diag(), {1, 2, 3, 4, 5, 6, 7, 8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(2, 4, 4));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 5, 5, 5,
5, 2, 5, 5,
5, 5, 3, 5,
5, 5, 5, 4,
5, 1, 1, 1,
1, 6, 1, 1,
1, 1, 7, 1,
1, 1, 1, 8}));
EXPECT_THAT(model.GetOutputType(), TfLiteType::kTfLiteInt32);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/matrix_set_diag.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/matrix_set_diag_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8e5fb475-639c-46df-a5e4-ac0be1209b8a | cpp | tensorflow/tensorflow | sign_custom | tensorflow/lite/kernels/sign_custom.cc | tensorflow/lite/kernels/sign_custom_test.cc | #include <cmath>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/custom_ops_register.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace sign {
TfLiteStatus PointwiseUnaryOpPrepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, tflite::NumInputs(node), 1);
const TfLiteTensor* input = tflite::GetInput(context, node, 0);
TfLiteTensor* output = tflite::GetOutput(context, node, 0);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
TfLiteIntArray* output_shape = TfLiteIntArrayCopy(input->dims);
return context->ResizeTensor(context, output, output_shape);
}
template <typename Op, typename T>
TfLiteStatus PointwiseUnaryOpDoEval(
TfLiteContext* context,
const TfLiteTensor* input,
TfLiteTensor* output) {
const T* data = tflite::GetTensorData<T>(input);
T* data_output = tflite::GetTensorData<T>(output);
const int64_t num_elements = NumElements(input);
for (int64_t i = 0; i < num_elements; ++i) {
data_output[i] = Op::template Eval<T>(data[i]);
}
return TfLiteStatus::kTfLiteOk;
}
template <typename Op>
TfLiteStatus PointwiseUnaryOpEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = tflite::GetInput(context, node, 0);
TfLiteTensor* output = tflite::GetOutput(context, node, 0);
switch (output->type) {
case kTfLiteFloat32:
TF_LITE_ENSURE_OK(
context,
(PointwiseUnaryOpDoEval<Op, float>(context, input, output)));
break;
case kTfLiteFloat64:
TF_LITE_ENSURE_OK(
context,
(PointwiseUnaryOpDoEval<Op, double>(context, input, output)));
break;
default: {
TF_LITE_KERNEL_LOG(context, "Unsupported datatype for sign output: %s",
TfLiteTypeGetName(output->type));
return TfLiteStatus::kTfLiteError;
}
}
return TfLiteStatus::kTfLiteOk;
}
struct Sign {
template <typename T>
static T Eval(T x) {
if (x > 0) {
return 1;
}
if (x < 0) {
return -1;
}
return 0;
}
};
}
TfLiteRegistration* Register_SIGN() {
static TfLiteRegistration r = {nullptr, nullptr,
sign::PointwiseUnaryOpPrepare,
sign::PointwiseUnaryOpEval<sign::Sign>};
return &r;
}
}
}
} | #include <cmath>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/custom_ops_register.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace {
template <typename T>
tflite::TensorType GetTTEnum();
template <>
tflite::TensorType GetTTEnum<float>() {
return tflite::TensorType_FLOAT32;
}
template <>
tflite::TensorType GetTTEnum<double>() {
return tflite::TensorType_FLOAT64;
}
class SignModel : public tflite::SingleOpModel {
public:
SignModel(tflite::TensorData x,
tflite::TensorData output) {
x_ = AddInput(x);
output_ = AddOutput(output);
SetCustomOp("Sign", {}, ops::custom::Register_SIGN);
BuildInterpreter({GetShape(x_)});
}
int x_;
int output_;
template <typename T>
std::vector<T> GetOutput(const std::vector<T>& x) {
PopulateTensor<T>(x_, x);
Invoke();
return ExtractVector<T>(output_);
}
};
template <typename Float>
class SignCustomTest : public ::testing::Test {
public:
using FloatType = Float;
};
using TestTypes = ::testing::Types<float, double>;
TYPED_TEST_SUITE(SignCustomTest, TestTypes);
TYPED_TEST(SignCustomTest, TestScalar) {
using Float = typename TestFixture::FloatType;
tflite::TensorData x = {GetTTEnum<Float>(), {}};
tflite::TensorData output = {GetTTEnum<Float>(), {}};
SignModel m(x, output);
auto got = m.GetOutput<Float>({0.0});
ASSERT_EQ(got.size(), 1);
EXPECT_FLOAT_EQ(got[0], 0.0);
ASSERT_FLOAT_EQ(m.GetOutput<Float>({5.0})[0], 1.0);
ASSERT_FLOAT_EQ(m.GetOutput<Float>({-3.0})[0], -1.0);
}
TYPED_TEST(SignCustomTest, TestBatch) {
using Float = typename TestFixture::FloatType;
tflite::TensorData x = {GetTTEnum<Float>(), {4, 2, 1}};
tflite::TensorData output = {GetTTEnum<Float>(), {4, 2, 1}};
SignModel m(x, output);
std::vector<Float> x_data = {0.8, -0.7, 0.6, -0.5, 0.4, -0.3, 0.2, 0.0};
auto got = m.GetOutput<Float>(x_data);
EXPECT_EQ(got, std::vector<Float>(
{1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, 0.0}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/sign_custom.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/sign_custom_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
44a5ad0c-6a54-4da0-90bf-7244d1e5c275 | cpp | tensorflow/tensorflow | audio_spectrogram | tensorflow/lite/kernels/audio_spectrogram.cc | tensorflow/lite/kernels/audio_spectrogram_test.cc | #include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <vector>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/spectrogram.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace audio_spectrogram {
constexpr int kInputTensor = 0;
constexpr int kOutputTensor = 0;
enum KernelType {
kReference,
};
typedef struct {
int window_size;
int stride;
bool magnitude_squared;
int output_height;
internal::Spectrogram* spectrogram;
} TfLiteAudioSpectrogramParams;
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* data = new TfLiteAudioSpectrogramParams;
const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer);
const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap();
data->window_size = m["window_size"].AsInt64();
data->stride = m["stride"].AsInt64();
data->magnitude_squared = m["magnitude_squared"].AsBool();
data->spectrogram = new internal::Spectrogram;
return data;
}
void Free(TfLiteContext* context, void* buffer) {
auto* params = reinterpret_cast<TfLiteAudioSpectrogramParams*>(buffer);
delete params->spectrogram;
delete params;
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteAudioSpectrogramParams*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
TF_LITE_ENSURE(context, params->spectrogram->Initialize(params->window_size,
params->stride));
const int64_t sample_count = input->dims->data[0];
const int64_t length_minus_window = (sample_count - params->window_size);
if (length_minus_window < 0) {
params->output_height = 0;
} else {
params->output_height = 1 + (length_minus_window / params->stride);
}
TfLiteIntArray* output_size = TfLiteIntArrayCreate(3);
output_size->data[0] = input->dims->data[1];
output_size->data[1] = params->output_height;
output_size->data[2] = params->spectrogram->output_frequency_channels();
return context->ResizeTensor(context, output, output_size);
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteAudioSpectrogramParams*>(node->user_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const float* input_data = GetTensorData<float>(input);
const int64_t sample_count = input->dims->data[0];
const int64_t channel_count = input->dims->data[1];
const int64_t output_width = params->spectrogram->output_frequency_channels();
float* output_flat = GetTensorData<float>(output);
std::vector<float> input_for_channel(sample_count);
for (int64_t channel = 0; channel < channel_count; ++channel) {
float* output_slice =
output_flat + (channel * params->output_height * output_width);
for (int i = 0; i < sample_count; ++i) {
input_for_channel[i] = input_data[i * channel_count + channel];
}
std::vector<std::vector<float>> spectrogram_output;
TF_LITE_ENSURE(context, params->spectrogram->Initialize(params->window_size,
params->stride));
TF_LITE_ENSURE(context,
params->spectrogram->ComputeSquaredMagnitudeSpectrogram(
input_for_channel, &spectrogram_output));
TF_LITE_ENSURE_EQ(context, spectrogram_output.size(),
params->output_height);
TF_LITE_ENSURE(context, spectrogram_output.empty() ||
(spectrogram_output[0].size() == output_width));
for (int row_index = 0; row_index < params->output_height; ++row_index) {
const std::vector<float>& spectrogram_row = spectrogram_output[row_index];
TF_LITE_ENSURE_EQ(context, spectrogram_row.size(), output_width);
float* output_row = output_slice + (row_index * output_width);
if (params->magnitude_squared) {
for (int i = 0; i < output_width; ++i) {
output_row[i] = spectrogram_row[i];
}
} else {
for (int i = 0; i < output_width; ++i) {
output_row[i] = sqrtf(spectrogram_row[i]);
}
}
}
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_AUDIO_SPECTROGRAM() {
static TfLiteRegistration r = {
audio_spectrogram::Init, audio_spectrogram::Free,
audio_spectrogram::Prepare,
audio_spectrogram::Eval<audio_spectrogram::kReference>};
return &r;
}
}
}
} | #include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace custom {
TfLiteRegistration* Register_AUDIO_SPECTROGRAM();
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
class BaseAudioSpectrogramOpModel : public SingleOpModel {
public:
BaseAudioSpectrogramOpModel(const TensorData& input1,
const TensorData& output, int window_size,
int stride, bool magnitude_squared) {
input1_ = AddInput(input1);
output_ = AddOutput(output);
flexbuffers::Builder fbb;
fbb.Map([&]() {
fbb.Int("window_size", window_size);
fbb.Int("stride", stride);
fbb.Bool("magnitude_squared", magnitude_squared);
});
fbb.Finish();
SetCustomOp("AudioSpectrogram", fbb.GetBuffer(),
Register_AUDIO_SPECTROGRAM);
BuildInterpreter({GetShape(input1_)});
}
int input1() { return input1_; }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input1_;
int output_;
};
TEST(BaseAudioSpectrogramOpModel, NonSquaredTest) {
BaseAudioSpectrogramOpModel m({TensorType_FLOAT32, {8, 1}},
{TensorType_FLOAT32, {}}, 8, 1, false);
m.PopulateTensor<float>(m.input1(),
{-1.0f, 0.0f, 1.0f, 0.0f, -1.0f, 0.0f, 1.0f, 0.0f});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape = m.GetOutputShape();
EXPECT_EQ(3, output_shape.size());
EXPECT_THAT(output_shape, ElementsAre(1, 1, 5));
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{0.0f, 1.0f, 2.0f, 1.0f, 0.0f}, 1e-3)));
}
TEST(SpectrogramOpTest, SquaredTest) {
BaseAudioSpectrogramOpModel m({TensorType_FLOAT32, {8, 1}},
{TensorType_FLOAT32, {}}, 8, 1, true);
m.PopulateTensor<float>(m.input1(),
{-1.0f, 0.0f, 1.0f, 0.0f, -1.0f, 0.0f, 1.0f, 0.0f});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape = m.GetOutputShape();
EXPECT_EQ(3, output_shape.size());
EXPECT_THAT(output_shape, ElementsAre(1, 1, 5));
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{0.f, 1.f, 4.f, 1.f, 0.f}, 1e-3)));
}
TEST(SpectrogramOpTest, StrideTest) {
BaseAudioSpectrogramOpModel m({TensorType_FLOAT32, {10, 1}},
{TensorType_FLOAT32, {}}, 8, 2, true);
m.PopulateTensor<float>(m.input1(), {-1.0f, 0.0f, 1.0f, 0.0f, -1.0f, 0.0f,
1.0f, 0.0f, 1.0f, 0.0f});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape = m.GetOutputShape();
EXPECT_THAT(output_shape, ElementsAre(1, 2, 5));
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{0, 1, 4, 1, 0, 1, 2, 1, 2, 1}, 1e-3)));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/audio_spectrogram.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/audio_spectrogram_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8c30b0c1-e421-433a-8973-3103f5f3c291 | cpp | tensorflow/tensorflow | complex_support | tensorflow/lite/kernels/complex_support.cc | tensorflow/lite/kernels/complex_support_test.cc | #include <complex>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace complex {
static const int kInputTensor = 0;
static const int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TF_LITE_ENSURE(context, input->type == kTfLiteComplex64 ||
input->type == kTfLiteComplex128);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
if (input->type == kTfLiteComplex64) {
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32);
} else {
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat64);
}
TfLiteIntArray* output_shape = TfLiteIntArrayCopy(input->dims);
return context->ResizeTensor(context, output, output_shape);
}
template <typename T, typename ExtractF>
void ExtractData(const TfLiteTensor* input, ExtractF extract_func,
TfLiteTensor* output) {
const std::complex<T>* input_data = GetTensorData<std::complex<T>>(input);
T* output_data = GetTensorData<T>(output);
const int input_size = NumElements(input);
for (int i = 0; i < input_size; ++i) {
*output_data++ = extract_func(*input_data++);
}
}
TfLiteStatus EvalReal(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
switch (input->type) {
case kTfLiteComplex64: {
ExtractData<float>(
input,
static_cast<float (*)(const std::complex<float>&)>(std::real<float>),
output);
break;
}
case kTfLiteComplex128: {
ExtractData<double>(input,
static_cast<double (*)(const std::complex<double>&)>(
std::real<double>),
output);
break;
}
default: {
TF_LITE_KERNEL_LOG(context,
"Unsupported input type, Real op only supports "
"complex input, but got: %s",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
}
return kTfLiteOk;
}
TfLiteStatus EvalImag(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
switch (input->type) {
case kTfLiteComplex64: {
ExtractData<float>(
input,
static_cast<float (*)(const std::complex<float>&)>(std::imag<float>),
output);
break;
}
case kTfLiteComplex128: {
ExtractData<double>(input,
static_cast<double (*)(const std::complex<double>&)>(
std::imag<double>),
output);
break;
}
default: {
TF_LITE_KERNEL_LOG(context,
"Unsupported input type, Imag op only supports "
"complex input, but got: %s",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
}
return kTfLiteOk;
}
TfLiteStatus EvalAbs(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
switch (input->type) {
case kTfLiteComplex64: {
ExtractData<float>(
input,
static_cast<float (*)(const std::complex<float>&)>(std::abs<float>),
output);
break;
}
case kTfLiteComplex128: {
ExtractData<double>(input,
static_cast<double (*)(const std::complex<double>&)>(
std::abs<double>),
output);
break;
}
default: {
TF_LITE_KERNEL_LOG(context,
"Unsupported input type, ComplexAbs op only supports "
"complex input, but got: %s",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_REAL() {
static TfLiteRegistration r = {nullptr, nullptr,
complex::Prepare, complex::EvalReal};
return &r;
}
TfLiteRegistration* Register_IMAG() {
static TfLiteRegistration r = {nullptr, nullptr,
complex::Prepare, complex::EvalImag};
return &r;
}
TfLiteRegistration* Register_COMPLEX_ABS() {
static TfLiteRegistration r = {nullptr, nullptr,
complex::Prepare, complex::EvalAbs};
return &r;
}
}
}
} | #include <complex>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace {
template <typename T>
class RealOpModel : public SingleOpModel {
public:
RealOpModel(const TensorData& input, const TensorData& output) {
input_ = AddInput(input);
output_ = AddOutput(output);
const std::vector<uint8_t> custom_option;
SetBuiltinOp(BuiltinOperator_REAL, BuiltinOptions_NONE, 0);
BuildInterpreter({GetShape(input_)});
}
int input() { return input_; }
std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
private:
int input_;
int output_;
};
TEST(RealOpTest, SimpleFloatTest) {
RealOpModel<float> m({TensorType_COMPLEX64, {2, 4}},
{TensorType_FLOAT32, {}});
m.PopulateTensor<std::complex<float>>(m.input(), {{75, 0},
{-6, -1},
{9, 0},
{-10, 5},
{-3, 2},
{-6, 11},
{0, 0},
{22.1, 33.3}});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), testing::ElementsAreArray(ArrayFloatNear(
{75, -6, 9, -10, -3, -6, 0, 22.1f})));
}
TEST(RealOpTest, SimpleDoubleTest) {
RealOpModel<double> m({TensorType_COMPLEX128, {2, 4}},
{TensorType_FLOAT64, {}});
m.PopulateTensor<std::complex<double>>(m.input(), {{75, 0},
{-6, -1},
{9, 0},
{-10, 5},
{-3, 2},
{-6, 11},
{0, 0},
{22.1, 33.3}});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), testing::ElementsAreArray(ArrayFloatNear(
{75, -6, 9, -10, -3, -6, 0, 22.1f})));
}
template <typename T>
class ImagOpModel : public SingleOpModel {
public:
ImagOpModel(const TensorData& input, const TensorData& output) {
input_ = AddInput(input);
output_ = AddOutput(output);
const std::vector<uint8_t> custom_option;
SetBuiltinOp(BuiltinOperator_IMAG, BuiltinOptions_NONE, 0);
BuildInterpreter({GetShape(input_)});
}
int input() { return input_; }
std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
private:
int input_;
int output_;
};
TEST(ImagOpTest, SimpleFloatTest) {
ImagOpModel<float> m({TensorType_COMPLEX64, {2, 4}},
{TensorType_FLOAT32, {}});
m.PopulateTensor<std::complex<float>>(m.input(), {{75, 7},
{-6, -1},
{9, 3.5},
{-10, 5},
{-3, 2},
{-6, 11},
{0, 0},
{22.1, 33.3}});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), testing::ElementsAreArray(ArrayFloatNear(
{7, -1, 3.5f, 5, 2, 11, 0, 33.3f})));
}
TEST(ImagOpTest, SimpleDoubleTest) {
ImagOpModel<double> m({TensorType_COMPLEX128, {2, 4}},
{TensorType_FLOAT64, {}});
m.PopulateTensor<std::complex<double>>(m.input(), {{75, 7},
{-6, -1},
{9, 3.5},
{-10, 5},
{-3, 2},
{-6, 11},
{0, 0},
{22.1, 33.3}});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), testing::ElementsAreArray(ArrayFloatNear(
{7, -1, 3.5f, 5, 2, 11, 0, 33.3f})));
}
template <typename T>
class ComplexAbsOpModel : public SingleOpModel {
public:
ComplexAbsOpModel(const TensorData& input, const TensorData& output) {
input_ = AddInput(input);
output_ = AddOutput(output);
const std::vector<uint8_t> custom_option;
SetBuiltinOp(BuiltinOperator_COMPLEX_ABS, BuiltinOptions_NONE, 0);
BuildInterpreter({GetShape(input_)});
}
int input() { return input_; }
std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input_;
int output_;
};
TEST(ComplexAbsOpTest, IncompatibleType64Test) {
EXPECT_DEATH_IF_SUPPORTED(
ComplexAbsOpModel<float> m({TensorType_COMPLEX64, {2, 4}},
{TensorType_FLOAT64, {}}),
"output->type != kTfLiteFloat32");
}
TEST(ComplexAbsOpTest, IncompatibleType128Test) {
EXPECT_DEATH_IF_SUPPORTED(
ComplexAbsOpModel<float> m({TensorType_COMPLEX128, {2, 4}},
{TensorType_FLOAT32, {}}),
"output->type != kTfLiteFloat64");
}
TEST(ComplexAbsOpTest, SimpleFloatTest) {
ComplexAbsOpModel<float> m({TensorType_COMPLEX64, {2, 4}},
{TensorType_FLOAT32, {}});
m.PopulateTensor<std::complex<float>>(m.input(), {{75, 7},
{-6, -1},
{9, 3.5},
{-10, 5},
{-3, 2},
{-6, 11},
{0, 0},
{22.1, 33.3}});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), testing::ElementsAre(2, 4));
EXPECT_THAT(m.GetOutput(), testing::ElementsAreArray(ArrayFloatNear(
{75.32596f, 6.0827627f, 9.656604f, 11.18034f,
3.6055512f, 12.529964f, 0.f, 39.966236f})));
}
TEST(ComplexAbsOpTest, SimpleDoubleTest) {
ComplexAbsOpModel<double> m({TensorType_COMPLEX128, {2, 4}},
{TensorType_FLOAT64, {}});
m.PopulateTensor<std::complex<double>>(m.input(), {{75, 7},
{-6, -1},
{9, 3.5},
{-10, 5},
{-3, 2},
{-6, 11},
{0, 0},
{22.1, 33.3}});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), testing::ElementsAre(2, 4));
EXPECT_THAT(m.GetOutput(), testing::ElementsAreArray(ArrayFloatNear(
{75.32596f, 6.0827627f, 9.656604f, 11.18034f,
3.6055512f, 12.529964f, 0.f, 39.966236f})));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/complex_support.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/complex_support_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7f97eb36-a7ec-4230-9a14-09f091bafb63 | cpp | tensorflow/tensorflow | lstm_eval | tensorflow/lite/kernels/lstm_eval.cc | tensorflow/lite/kernels/lstm_eval_test.cc | #include "tensorflow/lite/kernels/lstm_eval.h"
#include <math.h>
#include <string.h>
#include <algorithm>
#include <cstdint>
#include <memory>
#include <vector>
#include "ruy/matrix.h"
#include "ruy/mul_params.h"
#include "ruy/profiler/instrumentation.h"
#include "ruy/ruy.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/kernel_utils.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/tensor_utils.h"
#include "tensorflow/lite/kernels/op_macros.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace lstm_eval {
namespace {
void MatrixBatchVectorMultiplyAccumulate(
const float* matrix, const float* vector, const float* result,
float* output, int m_rows, int m_cols, int n_batch,
CpuBackendContext* cpu_backend_context) {
tflite::FullyConnectedParams float_fc_params;
float_fc_params.float_activation_min = std::numeric_limits<float>::lowest();
float_fc_params.float_activation_max = std::numeric_limits<float>::max();
float_fc_params.lhs_cacheable = true;
float_fc_params.rhs_cacheable = false;
tflite::RuntimeShape weight_shape({m_rows, m_cols});
tflite::RuntimeShape input_shape({n_batch, m_cols});
tflite::RuntimeShape output_shape({n_batch, m_rows});
if (n_batch == 1) {
tflite::optimized_ops::FullyConnected(
float_fc_params, input_shape, vector, weight_shape, matrix,
output_shape, result, output_shape, output, cpu_backend_context);
} else {
tflite::optimized_ops::FullyConnected(
float_fc_params, input_shape, vector, weight_shape, matrix,
output_shape, nullptr, output_shape, output, cpu_backend_context);
for (int i = 0; i < m_rows * n_batch; ++i) {
output[i] += result[i];
}
}
}
void ComputeRowSums(
int32_t* input_to_input_row_sums, int32_t* input_to_forget_row_sums,
int32_t* input_to_cell_row_sums, int32_t* input_to_output_row_sums,
int32_t* aux_input_to_input_row_sums, int32_t* aux_input_to_forget_row_sums,
int32_t* aux_input_to_cell_row_sums, int32_t* aux_input_to_output_row_sums,
int32_t* recurrent_to_input_row_sums, int32_t* recurrent_to_forget_row_sums,
int32_t* recurrent_to_cell_row_sums, int32_t* recurrent_to_output_row_sums,
int32_t* projection_weights_row_sums, int32_t* row_sums, int n_cell,
int n_input, int n_aux_input, int n_output,
const int8_t* input_to_input_weights_ptr,
const int8_t* input_to_forget_weights_ptr,
const int8_t* input_to_cell_weights_ptr,
const int8_t* input_to_output_weights_ptr,
const int8_t* aux_input_to_input_weights_ptr,
const int8_t* aux_input_to_forget_weights_ptr,
const int8_t* aux_input_to_cell_weights_ptr,
const int8_t* aux_input_to_output_weights_ptr,
const int8_t* recurrent_to_input_weights_ptr,
const int8_t* recurrent_to_forget_weights_ptr,
const int8_t* recurrent_to_cell_weights_ptr,
const int8_t* recurrent_to_output_weights_ptr,
const int8_t* projection_weights_ptr, bool use_cifg,
const float* aux_input_ptr, bool recurrent_to_input_is_diag = false,
bool recurrent_to_forget_is_diag = false,
bool recurrent_to_cell_is_diag = false,
bool recurrent_to_output_is_diag = false) {
if (!use_cifg) {
tensor_utils::ReductionSumVector(input_to_input_weights_ptr,
input_to_input_row_sums, n_cell, n_input);
}
tensor_utils::ReductionSumVector(input_to_forget_weights_ptr,
input_to_forget_row_sums, n_cell, n_input);
tensor_utils::ReductionSumVector(input_to_cell_weights_ptr,
input_to_cell_row_sums, n_cell, n_input);
tensor_utils::ReductionSumVector(input_to_output_weights_ptr,
input_to_output_row_sums, n_cell, n_input);
if (aux_input_ptr) {
if (!use_cifg) {
tensor_utils::ReductionSumVector(aux_input_to_input_weights_ptr,
aux_input_to_input_row_sums, n_cell,
n_aux_input);
}
tensor_utils::ReductionSumVector(aux_input_to_forget_weights_ptr,
aux_input_to_forget_row_sums, n_cell,
n_aux_input);
tensor_utils::ReductionSumVector(aux_input_to_cell_weights_ptr,
aux_input_to_cell_row_sums, n_cell,
n_aux_input);
tensor_utils::ReductionSumVector(aux_input_to_output_weights_ptr,
aux_input_to_output_row_sums, n_cell,
n_aux_input);
}
if (!use_cifg) {
if (!recurrent_to_input_is_diag) {
tensor_utils::ReductionSumVector(recurrent_to_input_weights_ptr,
recurrent_to_input_row_sums, n_cell,
n_output);
}
}
if (!recurrent_to_forget_is_diag) {
tensor_utils::ReductionSumVector(recurrent_to_forget_weights_ptr,
recurrent_to_forget_row_sums, n_cell,
n_output);
}
if (!recurrent_to_cell_is_diag) {
tensor_utils::ReductionSumVector(recurrent_to_cell_weights_ptr,
recurrent_to_cell_row_sums, n_cell,
n_output);
}
if (!recurrent_to_output_is_diag) {
tensor_utils::ReductionSumVector(recurrent_to_output_weights_ptr,
recurrent_to_output_row_sums, n_cell,
n_output);
}
if (projection_weights_ptr != nullptr) {
tensor_utils::ReductionSumVector(
projection_weights_ptr, projection_weights_row_sums, n_output, n_cell);
}
}
inline float GetTensorScale(const TfLiteTensor* tensor) {
return tensor == nullptr ? 1.0f : tensor->params.scale;
}
inline void CalculateLstmGateFloat(
const float* input, const float* input_to_gate_weights,
const float* aux_input, const float* aux_input_to_gate_weights,
const float* output_state, const float* recurrent_to_gate_weights,
const float* cell_state, const float* cell_to_gate_weights,
const float* layer_norm_coefficients, const float* gate_bias,
const int n_batch, const int n_input, const int n_aux_input,
const int n_output, const int n_cell,
const TfLiteFusedActivation activation, float* gate,
const bool is_input_all_zeros, const bool is_aux_input_all_zeros,
float* output, bool recurrent_is_diag, CpuBackendContext* context) {
const bool use_peephole = (cell_to_gate_weights != nullptr);
const bool use_layer_norm = (layer_norm_coefficients != nullptr);
if (use_layer_norm) {
std::fill_n(gate, n_cell * n_batch, 0.0f);
} else {
tensor_utils::VectorBatchVectorAssign(gate_bias, n_cell, n_batch, gate);
}
float* accumulation_buffer = gate;
if (!is_input_all_zeros) {
MatrixBatchVectorMultiplyAccumulate(input_to_gate_weights, input,
accumulation_buffer, output, n_cell,
n_input, n_batch, context);
std::swap(accumulation_buffer, output);
}
if (!is_aux_input_all_zeros) {
MatrixBatchVectorMultiplyAccumulate(aux_input_to_gate_weights, aux_input,
accumulation_buffer, output, n_cell,
n_aux_input, n_batch, context);
std::swap(accumulation_buffer, output);
}
if (recurrent_is_diag) {
tflite::tensor_utils::VectorBatchVectorCwiseProductAccumulate(
recurrent_to_gate_weights, n_cell, output_state, n_batch,
accumulation_buffer);
std::swap(accumulation_buffer, output);
} else {
MatrixBatchVectorMultiplyAccumulate(recurrent_to_gate_weights, output_state,
accumulation_buffer, output, n_cell,
n_output, n_batch, context);
}
if (use_peephole) {
tensor_utils::VectorBatchVectorCwiseProductAccumulate(
cell_to_gate_weights, n_cell, cell_state, n_batch, output);
}
if (use_layer_norm) {
tensor_utils::MeanStddevNormalization(output, output, n_cell, n_batch);
tensor_utils::VectorBatchVectorCwiseProduct(layer_norm_coefficients, n_cell,
output, n_batch, output);
tensor_utils::VectorBatchVectorAdd(gate_bias, n_cell, n_batch, output);
}
tensor_utils::ApplyActivationToVector(output, n_batch * n_cell, activation,
gate);
}
void UpdateLstmCellFloat(int n_batch, int n_cell, float* cell_state,
const float* input_gate, float* forget_gate,
const float* cell_gate, bool use_cifg, float clip) {
tensor_utils::VectorVectorCwiseProduct(forget_gate, cell_state,
n_batch * n_cell, cell_state);
if (use_cifg) {
float* scratch = forget_gate;
tensor_utils::Sub1Vector(forget_gate, n_batch * n_cell, scratch);
tensor_utils::VectorVectorCwiseProductAccumulate(
cell_gate, scratch, n_batch * n_cell, cell_state);
} else {
tensor_utils::VectorVectorCwiseProductAccumulate(
cell_gate, input_gate, n_batch * n_cell, cell_state);
}
if (clip > 0.0f) {
tensor_utils::CwiseClipping(cell_state, n_batch * n_cell, clip);
}
}
void CalculateLstmOutputFloat(int n_batch, int n_cell, int n_output,
const float* cell_state, const float* output_gate,
TfLiteFusedActivation activation,
const float* projection_weights,
const float* projection_bias,
const float proj_clip, float* output_state,
float* scratch, float* projection_bias_scratch,
CpuBackendContext* context) {
tensor_utils::ApplyActivationToVector(cell_state, n_batch * n_cell,
activation, scratch);
tensor_utils::VectorVectorCwiseProduct(output_gate, scratch, n_batch * n_cell,
scratch);
const bool use_projection = (projection_weights != nullptr);
const bool use_projection_bias = (projection_bias != nullptr);
if (use_projection) {
if (use_projection_bias) {
tensor_utils::VectorBatchVectorAssign(projection_bias, n_output, n_batch,
projection_bias_scratch);
} else {
std::fill_n(projection_bias_scratch, n_batch * n_output, 0.0f);
}
MatrixBatchVectorMultiplyAccumulate(projection_weights, scratch,
projection_bias_scratch, output_state,
n_output, n_cell, n_batch, context);
if (proj_clip > 0.0f) {
tensor_utils::CwiseClipping(output_state, n_batch * n_output, proj_clip);
}
} else {
std::copy_n(scratch, n_batch * n_output, output_state);
}
}
void CalculateLstmGateHybrid(
const int8_t* input, const float* input_sf, const int32_t* input_zp,
const int8_t* input_to_gate_weights,
const uint8_t* input_to_gate_weights_ledger,
const float input_to_gate_weights_scale, int32_t* input_to_gate_row_sums,
const int8_t* aux_input, const float* aux_input_sf,
const int32_t* aux_input_zp, const int8_t* aux_input_to_gate_weights,
const float aux_input_to_gate_weights_scale,
int32_t* aux_input_to_gate_row_sums,
const int8_t* output_state, const float* output_state_float,
const float* output_state_sf, const int32_t* output_state_zp,
const int8_t* recurrent_to_gate_weights,
const float* recurrent_to_gate_diag,
const uint8_t* recurrent_to_gate_weights_ledger,
const float recurrent_to_gate_weights_scale,
int32_t* recurrent_to_gate_row_sums,
const float* cell_state, const int8_t* cell_to_gate_weights,
const float cell_to_gate_weights_scale,
const float* layer_norm_coefficients, const float* gate_bias,
const int n_batch, const int n_input, const int n_aux_input,
const int n_output, const int n_cell,
const TfLiteFusedActivation activation,
float* gate,
const bool is_input_all_zeros, const bool is_aux_input_all_zeros,
const bool is_output_state_all_zeros, bool* compute_row_sums,
CpuBackendContext* context,
float* scratch0,
float* scratch1,
int32_t* accum_scratch,
bool recurrent_is_diag) {
const bool use_peephole = (cell_to_gate_weights != nullptr);
const bool use_layer_norm = (layer_norm_coefficients != nullptr);
if (use_layer_norm) {
std::fill_n(gate, n_cell * n_batch, 0.0f);
} else {
tensor_utils::VectorBatchVectorAssign(gate_bias, n_cell, n_batch, gate);
}
if (!is_input_all_zeros) {
if (input_to_gate_weights_ledger != nullptr) {
std::vector<float> scales(n_batch);
for (int i = 0; i < n_batch; i++) {
scales[i] = input_to_gate_weights_scale * input_sf[i];
}
tensor_utils::SparseMatrixBatchVectorMultiplyAccumulate(
input_to_gate_weights, input_to_gate_weights_ledger, n_cell, n_input,
input, scales.data(), n_batch, gate);
} else {
tensor_utils::MatrixBatchVectorMultiplyAccumulate(
input_to_gate_weights, n_cell, n_input, input,
input_to_gate_weights_scale, input_sf, n_batch, gate,
nullptr, input_zp, accum_scratch,
input_to_gate_row_sums, compute_row_sums, scratch0, context);
}
}
if (!is_aux_input_all_zeros) {
tensor_utils::MatrixBatchVectorMultiplyAccumulate(
aux_input_to_gate_weights, n_cell, n_aux_input, aux_input,
aux_input_to_gate_weights_scale, aux_input_sf, n_batch, gate,
nullptr, aux_input_zp, accum_scratch,
aux_input_to_gate_row_sums, compute_row_sums, scratch0, context);
}
if (!is_output_state_all_zeros) {
if (recurrent_to_gate_weights_ledger != nullptr) {
std::vector<float> scales(n_batch);
for (int i = 0; i < n_batch; i++) {
scales[i] = recurrent_to_gate_weights_scale * input_sf[i];
}
tensor_utils::SparseMatrixBatchVectorMultiplyAccumulate(
recurrent_to_gate_weights, recurrent_to_gate_weights_ledger, n_cell,
n_output, output_state, scales.data(), n_batch, gate);
} else {
if (recurrent_is_diag) {
tflite::tensor_utils::VectorBatchVectorCwiseProductAccumulate(
recurrent_to_gate_diag, n_cell, output_state_float, n_batch, gate);
} else {
tensor_utils::MatrixBatchVectorMultiplyAccumulate(
recurrent_to_gate_weights, n_cell, n_output, output_state,
recurrent_to_gate_weights_scale, output_state_sf, n_batch, gate,
nullptr, output_state_zp, accum_scratch,
recurrent_to_gate_row_sums, compute_row_sums, scratch0, context);
}
}
}
if (use_peephole) {
float* recovered_cell_weights = scratch1;
tensor_utils::VectorScalarMultiply(cell_to_gate_weights, n_cell,
cell_to_gate_weights_scale,
recovered_cell_weights);
tensor_utils::VectorBatchVectorCwiseProductAccumulate(
recovered_cell_weights, n_cell, cell_state, n_batch, gate);
}
if (use_layer_norm) {
tensor_utils::MeanStddevNormalization(gate, gate, n_cell, n_batch);
tensor_utils::VectorBatchVectorCwiseProduct(layer_norm_coefficients, n_cell,
gate, n_batch, gate);
tensor_utils::VectorBatchVectorAdd(gate_bias, n_cell, n_batch, gate);
}
tensor_utils::ApplyActivationToVector(gate, n_cell * n_batch, activation,
gate);
}
void CalculateLstmOutputHybrid(
int n_batch, int n_cell, int n_output, const float* cell_state,
const float* output_gate, TfLiteFusedActivation activation,
const int8_t* projection_weights, const uint8_t* projection_weights_ledger,
float projection_weights_scale, const float* projection_bias,
const float proj_clip, float* output_state, bool asymmetric_quantize_inputs,
int32_t* projection_weights_row_sums, bool* compute_row_sums,
CpuBackendContext* context, float* scratch0, int8_t* scratch1,
float* scratch2, int32_t* scratch3, int32_t* scratch4) {
tensor_utils::ApplyActivationToVector(cell_state, n_batch * n_cell,
activation, scratch0);
tensor_utils::VectorVectorCwiseProduct(output_gate, scratch0,
n_batch * n_cell, scratch0);
const bool use_projection = (projection_weights != nullptr);
const bool use_projection_bias = (projection_bias != nullptr);
if (use_projection) {
if (use_projection_bias) {
tensor_utils::VectorBatchVectorAssign(projection_bias, n_output, n_batch,
output_state);
} else {
std::fill_n(output_state, n_batch * n_output, 0.0f);
}
if (!tensor_utils::IsZeroVector(scratch0, n_batch * n_cell)) {
tensor_utils::BatchQuantizeFloats(scratch0, n_batch, n_cell, scratch1,
scratch2, scratch3,
asymmetric_quantize_inputs);
if (projection_weights_ledger != nullptr) {
std::vector<float> scales(n_batch);
for (int i = 0; i < n_batch; i++) {
scales[i] = projection_weights_scale * scratch2[i];
}
tensor_utils::SparseMatrixBatchVectorMultiplyAccumulate(
projection_weights, projection_weights_ledger, n_output, n_cell,
scratch1, scales.data(), n_batch, output_state);
} else {
tensor_utils::MatrixBatchVectorMultiplyAccumulate(
projection_weights, n_output, n_cell, scratch1,
projection_weights_scale, scratch2, n_batch, output_state,
nullptr, scratch3, scratch4,
projection_weights_row_sums, compute_row_sums, scratch2, context);
}
}
if (proj_clip > 0.0f) {
tensor_utils::CwiseClipping(output_state, n_batch * n_output, proj_clip);
}
} else {
std::copy_n(scratch0, n_batch * n_output, output_state);
}
}
void CalculateLstmGateInteger8x8_16(
const int8_t* input, const int8_t* input_to_gate_weights,
const int32_t* input_to_gate_bias, const int32_t input_to_gate_scale_a,
const int32_t input_to_gate_scale_b,
const int8_t* output_state, const int8_t* recurrent_to_gate_weights,
const int32_t* recurrent_to_gate_bias,
const int32_t recurrent_to_gate_scale_a,
const int32_t recurrent_to_gate_scale_b,
const int16_t* cell_state, const int16_t* cell_to_gate_weights,
const int32_t cell_to_gate_scale_a, const int32_t cell_to_gate_scale_b,
const int16_t* layer_norm_coefficients, const int32_t* layer_norm_bias,
const int32_t layer_norm_input_scale_a,
const int32_t layer_norm_input_scale_b,
const int32_t layer_norm_variance_guard,
const int n_batch, const int n_input, const int n_output, const int n_cell,
const TfLiteFusedActivation activation,
int16_t* gate,
CpuBackendContext* context,
int32_t* scratch5) {
const bool use_peephole = (cell_to_gate_weights != nullptr);
const bool use_layer_norm = (layer_norm_coefficients != nullptr);
std::fill_n(gate, n_batch * n_cell, 0);
tensor_utils::MatrixBatchVectorMultiplyAccumulate(
input, input_to_gate_bias, input_to_gate_weights, input_to_gate_scale_a,
input_to_gate_scale_b, n_batch, n_input, n_cell, 0, scratch5, gate,
context);
tensor_utils::MatrixBatchVectorMultiplyAccumulate(
output_state, recurrent_to_gate_bias, recurrent_to_gate_weights,
recurrent_to_gate_scale_a, recurrent_to_gate_scale_b, n_batch, n_output,
n_cell, 0, scratch5, gate, context);
if (use_peephole) {
tensor_utils::VectorBatchVectorCwiseProductAccumulate(
cell_to_gate_weights, n_output, cell_state, n_batch,
cell_to_gate_scale_a, cell_to_gate_scale_b, gate);
}
if (use_layer_norm) {
tensor_utils::ApplyLayerNorm(
gate, layer_norm_coefficients, layer_norm_bias,
layer_norm_input_scale_a, layer_norm_input_scale_b,
layer_norm_variance_guard, n_batch, n_cell, gate);
}
switch (activation) {
case kTfLiteActSigmoid:
tensor_utils::ApplySigmoid(gate, n_batch, n_cell, gate);
break;
case kTfLiteActTanh:
tensor_utils::ApplyTanh(3, gate, n_batch, n_cell, gate);
break;
default:
TFLITE_ASSERT_FALSE;
}
}
void UpdateLstmCellInteger(int n_batch, int n_cell, int16_t* cell_state,
int32_t cell_state_scale, const int16_t* input_gate,
int16_t* forget_gate, const int16_t* cell_gate,
bool use_cifg, int16_t clip) {
int16_t* scratch = forget_gate;
tensor_utils::CwiseMul(forget_gate, cell_state, n_batch, n_cell, 15,
cell_state);
if (use_cifg) {
tensor_utils::Sub1Vector(forget_gate, n_batch * n_cell, scratch);
tensor_utils::CwiseMul(scratch, cell_gate, n_batch, n_cell,
30 + cell_state_scale, scratch);
} else {
tensor_utils::CwiseMul(input_gate, cell_gate, n_batch, n_cell,
30 + cell_state_scale, scratch);
}
tensor_utils::CwiseAdd(cell_state, scratch, n_batch, n_cell, cell_state);
if (clip > 0) {
tensor_utils::CwiseClipping(cell_state, n_batch * n_cell, clip);
}
}
void CalculateLstmOutputInteger8x8_16(
int n_batch, int n_cell, int n_output, const int16_t* cell_state,
int32_t cell_state_scale, const int16_t* output_gate,
int32_t hidden_scale_a, int32_t hidden_scale_b, int32_t hidden_zp,
const int8_t* projection_weights, int32_t proj_scale_a,
int32_t proj_scale_b, const int32_t* projection_bias,
int32_t output_state_zp, int8_t quantized_proj_clip, int8_t* output_state,
CpuBackendContext* context, int16_t* scratch0, int8_t* scratch1,
int32_t* scratch2) {
tensor_utils::ApplyTanh(15 + cell_state_scale, cell_state, n_batch, n_cell,
scratch0);
const bool use_projection = (projection_weights != nullptr);
if (use_projection) {
tensor_utils::CwiseMul(output_gate, scratch0, hidden_scale_a,
hidden_scale_b, n_batch, n_cell, -hidden_zp,
scratch1);
std::fill_n(output_state, n_batch * n_output, 0);
tensor_utils::MatrixBatchVectorMultiplyAccumulate(
scratch1, projection_bias, projection_weights, proj_scale_a,
proj_scale_b, n_batch, n_cell, n_output, output_state_zp, scratch2,
output_state, context);
if (quantized_proj_clip > 0) {
tensor_utils::CwiseClipping(output_state, n_batch * n_output,
quantized_proj_clip);
}
} else {
tensor_utils::CwiseMul(output_gate, scratch0, hidden_scale_a,
hidden_scale_b, n_batch, n_cell, hidden_zp,
output_state);
}
}
void CalculateLstmGateInteger8x8_8(
const int8_t* input, int32_t input_zp, const int8_t* input_to_gate_weight,
const int32_t input_to_gate_scale_a, const int32_t input_to_gate_scale_b,
const int32_t input_times_weights_scale_a,
const int32_t input_times_weights_scale_b,
const int32_t input_times_weights_zp,
const int8_t* output_state, const int32_t output_state_zp,
const int8_t* recurrent_to_gate_weight,
const int32_t recurrent_to_gate_scale_a,
const int32_t recurrent_to_gate_scale_b,
const int32_t output_state_times_weights_scale_a,
const int32_t output_state_times_weights_scale_b,
const int32_t output_state_times_weights_zp,
const int16_t* layer_norm_gate_weight,
const int32_t layer_norm_gate_scale_a,
const int32_t layer_norm_gate_scale_b, const int32_t* gate_bias,
const int n_batch, const int n_input, const int n_output, const int n_cell,
const TfLiteFusedActivation activation,
int16_t* gate,
int8_t* scratch0, int8_t* scratch1) {
tensor_utils::MatrixBatchVectorMultiply(
input, input_zp, input_to_gate_weight, input_to_gate_scale_a,
input_to_gate_scale_b, n_batch, n_input, n_cell, scratch0,
input_times_weights_zp);
tensor_utils::MatrixBatchVectorMultiply(
output_state, output_state_zp, recurrent_to_gate_weight,
recurrent_to_gate_scale_a, recurrent_to_gate_scale_b, n_batch, n_output,
n_cell, scratch1, output_state_times_weights_zp);
tensor_utils::TwoGateSaturatingAdd(
scratch0, input_times_weights_zp, scratch1, output_state_times_weights_zp,
input_times_weights_scale_a, input_times_weights_scale_b,
output_state_times_weights_scale_a, output_state_times_weights_scale_b,
n_batch, n_cell, gate);
tensor_utils::ApplyLayerNormFloat(
gate, layer_norm_gate_weight, layer_norm_gate_scale_a,
layer_norm_gate_scale_b, gate_bias, n_batch, n_cell, gate);
switch (activation) {
case kTfLiteActSigmoid:
tensor_utils::ApplySigmoidFloat(gate, n_batch, n_cell, gate);
break;
case kTfLiteActTanh:
tensor_utils::ApplyTanhFloat(gate, n_batch, n_cell, -12, gate);
break;
default:
TFLITE_ASSERT_FALSE;
}
}
void CalculateLstmOutputInteger8x8_8(
int n_batch, int n_cell, int n_output, const int16_t* cell_state,
const int16_t* output_gate, const int8_t* projection_weights,
int32_t proj_scale_a, int32_t proj_scale_b, const int32_t* projection_bias,
int32_t output_state_zp, int32_t quantized_proj_clip, int8_t* output_state,
int16_t* scratch) {
tensor_utils::ApplyTanhFloat(cell_state, n_batch, n_cell, -15, scratch);
tensor_utils::CwiseMul(output_gate, scratch, n_batch, n_cell, 15 + 15 - 15,
scratch);
tensor_utils::MatrixBatchVectorMultiply(
scratch, projection_weights, proj_scale_a, proj_scale_b, projection_bias,
n_batch, n_cell, n_output, output_state_zp, output_state);
if (quantized_proj_clip > 0) {
tensor_utils::CwiseClipping(output_state, n_batch * n_output,
quantized_proj_clip);
}
}
inline void LstmStepFloat(
const float* input_ptr, const float* input_to_input_weights_ptr,
const float* input_to_forget_weights_ptr,
const float* input_to_cell_weights_ptr,
const float* input_to_output_weights_ptr, const float* aux_input_ptr,
const float* aux_input_to_input_weights_ptr,
const float* aux_input_to_forget_weights_ptr,
const float* aux_input_to_cell_weights_ptr,
const float* aux_input_to_output_weights_ptr,
const float* recurrent_to_input_weights_ptr,
const float* recurrent_to_forget_weights_ptr,
const float* recurrent_to_cell_weights_ptr,
const float* recurrent_to_output_weights_ptr,
const float* cell_to_input_weights_ptr,
const float* cell_to_forget_weights_ptr,
const float* cell_to_output_weights_ptr,
const float* input_layer_norm_coefficients_ptr,
const float* forget_layer_norm_coefficients_ptr,
const float* cell_layer_norm_coefficients_ptr,
const float* output_layer_norm_coefficients_ptr,
const float* input_gate_bias_ptr, const float* forget_gate_bias_ptr,
const float* cell_gate_bias_ptr, const float* output_gate_bias_ptr,
const float* projection_weights_ptr, const float* projection_bias_ptr,
const TfLiteLSTMParams* params, int n_batch, int n_cell, int n_input,
int n_aux_input, int n_output, int output_batch_leading_dim,
float* output_state_ptr, float* cell_state_ptr, float* scratch0,
float* scratch1, float* scratch2, float* scratch3, float* scratch4,
float* output_ptr, bool recurrent_to_input_is_diag,
bool recurrent_to_forget_is_diag, bool recurrent_to_cell_is_diag,
bool recurrent_to_output_is_diag, CpuBackendContext* context) {
ruy::profiler::ScopeLabel label("LstmStepFloat");
const bool use_cifg = (input_to_input_weights_ptr == nullptr);
float* input_gate_scratch = scratch0;
float* forget_gate_scratch = scratch1;
float* cell_gate_scratch = scratch2;
float* output_gate_scratch = scratch3;
float* accumulation_scratch_buffer = scratch4;
const bool is_input_all_zeros =
tensor_utils::IsZeroVector(input_ptr, n_batch * n_input);
const bool is_aux_input_all_zeros =
(aux_input_ptr == nullptr ||
tensor_utils::IsZeroVector(aux_input_ptr, n_batch * n_aux_input));
if (!use_cifg) {
CalculateLstmGateFloat(
input_ptr, input_to_input_weights_ptr, aux_input_ptr,
aux_input_to_input_weights_ptr, output_state_ptr,
recurrent_to_input_weights_ptr,
cell_state_ptr, cell_to_input_weights_ptr,
input_layer_norm_coefficients_ptr, input_gate_bias_ptr, n_batch,
n_input, n_aux_input, n_output, n_cell,
kTfLiteActSigmoid, input_gate_scratch,
is_input_all_zeros, is_aux_input_all_zeros, accumulation_scratch_buffer,
recurrent_to_input_is_diag, context);
}
CalculateLstmGateFloat(
input_ptr, input_to_forget_weights_ptr, aux_input_ptr,
aux_input_to_forget_weights_ptr, output_state_ptr,
recurrent_to_forget_weights_ptr,
cell_state_ptr, cell_to_forget_weights_ptr,
forget_layer_norm_coefficients_ptr, forget_gate_bias_ptr, n_batch,
n_input, n_aux_input, n_output, n_cell,
kTfLiteActSigmoid, forget_gate_scratch, is_input_all_zeros,
is_aux_input_all_zeros, accumulation_scratch_buffer,
recurrent_to_forget_is_diag, context);
CalculateLstmGateFloat(
input_ptr, input_to_cell_weights_ptr, aux_input_ptr,
aux_input_to_cell_weights_ptr, output_state_ptr,
recurrent_to_cell_weights_ptr,
nullptr,
nullptr, cell_layer_norm_coefficients_ptr,
cell_gate_bias_ptr, n_batch, n_input, n_aux_input, n_output, n_cell,
params->activation, cell_gate_scratch, is_input_all_zeros,
is_aux_input_all_zeros, accumulation_scratch_buffer,
recurrent_to_cell_is_diag, context);
UpdateLstmCellFloat(n_batch, n_cell, cell_state_ptr, input_gate_scratch,
forget_gate_scratch, cell_gate_scratch, use_cifg,
params->cell_clip);
CalculateLstmGateFloat(
input_ptr, input_to_output_weights_ptr, aux_input_ptr,
aux_input_to_output_weights_ptr, output_state_ptr,
recurrent_to_output_weights_ptr,
cell_state_ptr, cell_to_output_weights_ptr,
output_layer_norm_coefficients_ptr, output_gate_bias_ptr, n_batch,
n_input, n_aux_input, n_output, n_cell,
kTfLiteActSigmoid, output_gate_scratch, is_input_all_zeros,
is_aux_input_all_zeros, accumulation_scratch_buffer,
recurrent_to_output_is_diag, context);
CalculateLstmOutputFloat(n_batch, n_cell, n_output, cell_state_ptr,
output_gate_scratch, params->activation,
projection_weights_ptr, projection_bias_ptr,
params->proj_clip, output_state_ptr, scratch2,
accumulation_scratch_buffer, context);
for (int b = 0; b < n_batch; b++) {
std::copy_n(output_state_ptr + b * n_output, n_output,
output_ptr + b * output_batch_leading_dim);
}
}
inline void LstmStepHybrid(
const float* input_ptr, const int8_t* input_to_input_weights_ptr,
const uint8_t* input_to_input_weights_ledger_ptr,
float input_to_input_weights_scale,
const int8_t* input_to_forget_weights_ptr,
const uint8_t* input_to_forget_weights_ledger_ptr,
float input_to_forget_weights_scale,
const int8_t* input_to_cell_weights_ptr,
const uint8_t* input_to_cell_weights_ledger_ptr,
float input_to_cell_weights_scale,
const int8_t* input_to_output_weights_ptr,
const uint8_t* input_to_output_weights_ledger_ptr,
float input_to_output_weights_scale, const float* aux_input_ptr,
const int8_t* aux_input_to_input_weights_ptr,
float aux_input_to_input_weights_scale,
const int8_t* aux_input_to_forget_weights_ptr,
float aux_input_to_forget_weights_scale,
const int8_t* aux_input_to_cell_weights_ptr,
float aux_input_to_cell_weights_scale,
const int8_t* aux_input_to_output_weights_ptr,
float aux_input_to_output_weights_scale,
const int8_t* recurrent_to_input_weights_ptr,
const float* recurrent_to_input_diag,
const uint8_t* recurrent_to_input_weights_ledger_ptr,
float recurrent_to_input_weights_scale,
const int8_t* recurrent_to_forget_weights_ptr,
const float* recurrent_to_forget_diag,
const uint8_t* recurrent_to_forget_weights_ledger_ptr,
float recurrent_to_forget_weights_scale,
const int8_t* recurrent_to_cell_weights_ptr,
const float* recurrent_to_cell_diag,
const uint8_t* recurrent_to_cell_weights_ledger_ptr,
float recurrent_to_cell_weights_scale,
const int8_t* recurrent_to_output_weights_ptr,
const float* recurrent_to_output_diag,
const uint8_t* recurrent_to_output_weights_ledger_ptr,
float recurrent_to_output_weights_scale,
const int8_t* cell_to_input_weights_ptr, float cell_to_input_weights_scale,
const int8_t* cell_to_forget_weights_ptr,
float cell_to_forget_weights_scale,
const int8_t* cell_to_output_weights_ptr,
float cell_to_output_weights_scale,
const float* input_layer_norm_coefficients_ptr,
const float* forget_layer_norm_coefficients_ptr,
const float* cell_layer_norm_coefficients_ptr,
const float* output_layer_norm_coefficients_ptr,
const float* input_gate_bias_ptr, const float* forget_gate_bias_ptr,
const float* cell_gate_bias_ptr, const float* output_gate_bias_ptr,
const int8_t* projection_weights_ptr,
const uint8_t* projection_weights_ledger_ptr,
float projection_weights_scale, const float* projection_bias_ptr,
const TfLiteLSTMParams* params, int n_batch, int n_cell, int n_input,
int n_aux_input, int n_output, int output_batch_leading_dim,
float* scratch0, float* scratch1, float* scratch2, float* scratch3,
float* input_sf, float* aux_input_sf, float* output_state_sf,
float* scaling_factors_scratch, float* recovered_cell_weights,
int8_t* quantized_input_ptr, int8_t* quantized_aux_input_ptr,
int8_t* quantized_output_state_ptr, int8_t* quantized_output_scratch,
float* output_state_ptr, float* cell_state_ptr, int32_t* accum_scratch_ptr,
float* output_ptr, int32_t* input_zp, int32_t* aux_input_zp,
int32_t* output_state_zp, int32_t* row_sums, int row_sums_size,
bool* compute_row_sums, bool asymmetric_quantize_inputs,
bool recurrent_to_input_is_diag, bool recurrent_to_forget_is_diag,
bool recurrent_to_cell_is_diag, bool recurrent_to_output_is_diag,
CpuBackendContext* context) {
ruy::profiler::ScopeLabel label("LstmStepHybrid");
const bool use_cifg = (input_to_input_weights_ptr == nullptr);
float* input_gate_scratch = scratch0;
float* forget_gate_scratch = scratch1;
float* cell_gate_scratch = scratch2;
float* output_gate_scratch = scratch3;
int32_t* input_to_input_row_sums = nullptr;
int32_t* input_to_forget_row_sums = nullptr;
int32_t* input_to_cell_row_sums = nullptr;
int32_t* input_to_output_row_sums = nullptr;
int32_t* aux_input_to_input_row_sums = nullptr;
int32_t* aux_input_to_forget_row_sums = nullptr;
int32_t* aux_input_to_cell_row_sums = nullptr;
int32_t* aux_input_to_output_row_sums = nullptr;
int32_t* recurrent_to_input_row_sums = nullptr;
int32_t* recurrent_to_forget_row_sums = nullptr;
int32_t* recurrent_to_cell_row_sums = nullptr;
int32_t* recurrent_to_output_row_sums = nullptr;
int32_t* projection_weights_row_sums = nullptr;
if (asymmetric_quantize_inputs) {
int num_row_sums = use_cifg ? 6 : 8;
if (aux_input_ptr != nullptr) {
num_row_sums += use_cifg ? 3 : 4;
}
if (projection_weights_ptr != nullptr) {
num_row_sums += ceil(static_cast<float>(n_output) / n_cell);
}
TF_LITE_ASSERT(row_sums_size == num_row_sums);
input_to_input_row_sums = row_sums;
input_to_forget_row_sums =
use_cifg ? input_to_input_row_sums : input_to_input_row_sums + n_cell;
input_to_cell_row_sums = input_to_forget_row_sums + n_cell;
input_to_output_row_sums = input_to_cell_row_sums + n_cell;
if (aux_input_ptr != nullptr) {
aux_input_to_input_row_sums = input_to_output_row_sums + n_cell;
aux_input_to_forget_row_sums = use_cifg
? aux_input_to_input_row_sums
: aux_input_to_input_row_sums + n_cell;
aux_input_to_cell_row_sums = aux_input_to_forget_row_sums + n_cell;
aux_input_to_output_row_sums = aux_input_to_cell_row_sums + n_cell;
}
recurrent_to_input_row_sums = aux_input_ptr
? aux_input_to_output_row_sums + n_cell
: input_to_output_row_sums + n_cell;
recurrent_to_forget_row_sums = use_cifg
? recurrent_to_input_row_sums
: recurrent_to_input_row_sums + n_cell;
recurrent_to_cell_row_sums = recurrent_to_forget_row_sums + n_cell;
recurrent_to_output_row_sums = recurrent_to_cell_row_sums + n_cell;
if (projection_weights_ptr != nullptr) {
projection_weights_row_sums = recurrent_to_output_row_sums + n_cell;
}
if (*compute_row_sums) {
ComputeRowSums(
input_to_input_row_sums, input_to_forget_row_sums,
input_to_cell_row_sums, input_to_output_row_sums,
aux_input_to_input_row_sums, aux_input_to_forget_row_sums,
aux_input_to_cell_row_sums, aux_input_to_output_row_sums,
recurrent_to_input_row_sums, recurrent_to_forget_row_sums,
recurrent_to_cell_row_sums, recurrent_to_output_row_sums,
projection_weights_row_sums, row_sums, n_cell, n_input, n_aux_input,
n_output, input_to_input_weights_ptr, input_to_forget_weights_ptr,
input_to_cell_weights_ptr, input_to_output_weights_ptr,
aux_input_to_input_weights_ptr, aux_input_to_forget_weights_ptr,
aux_input_to_cell_weights_ptr, aux_input_to_output_weights_ptr,
recurrent_to_input_weights_ptr, recurrent_to_forget_weights_ptr,
recurrent_to_cell_weights_ptr, recurrent_to_output_weights_ptr,
projection_weights_ptr, use_cifg, aux_input_ptr,
recurrent_to_input_is_diag, recurrent_to_forget_is_diag,
recurrent_to_cell_is_diag, recurrent_to_output_is_diag);
*compute_row_sums = false;
}
}
const bool is_input_all_zeros =
tensor_utils::IsZeroVector(input_ptr, n_batch * n_input);
const bool is_aux_input_all_zeros =
(aux_input_ptr == nullptr ||
tensor_utils::IsZeroVector(aux_input_ptr, n_batch * n_aux_input));
const bool is_output_state_all_zeros =
tensor_utils::IsZeroVector(output_state_ptr, n_batch * n_output);
if (!is_input_all_zeros) {
tensor_utils::BatchQuantizeFloats(input_ptr, n_batch, n_input,
quantized_input_ptr, input_sf, input_zp,
asymmetric_quantize_inputs);
}
if (!is_aux_input_all_zeros) {
tensor_utils::BatchQuantizeFloats(aux_input_ptr, n_batch, n_aux_input,
quantized_aux_input_ptr, aux_input_sf,
aux_input_zp, asymmetric_quantize_inputs);
}
if (!is_output_state_all_zeros) {
tensor_utils::BatchQuantizeFloats(
output_state_ptr, n_batch, n_output, quantized_output_state_ptr,
output_state_sf, output_state_zp, asymmetric_quantize_inputs);
}
if (!use_cifg) {
CalculateLstmGateHybrid(
quantized_input_ptr, input_sf, input_zp, input_to_input_weights_ptr,
input_to_input_weights_ledger_ptr, input_to_input_weights_scale,
input_to_input_row_sums, quantized_aux_input_ptr, aux_input_sf,
aux_input_zp, aux_input_to_input_weights_ptr,
aux_input_to_input_weights_scale, aux_input_to_input_row_sums,
quantized_output_state_ptr, output_state_ptr, output_state_sf,
output_state_zp, recurrent_to_input_weights_ptr,
recurrent_to_input_diag, recurrent_to_input_weights_ledger_ptr,
recurrent_to_input_weights_scale, recurrent_to_input_row_sums,
cell_state_ptr, cell_to_input_weights_ptr, cell_to_input_weights_scale,
input_layer_norm_coefficients_ptr, input_gate_bias_ptr, n_batch,
n_input, n_aux_input, n_output, n_cell, kTfLiteActSigmoid,
input_gate_scratch, is_input_all_zeros, is_aux_input_all_zeros,
is_output_state_all_zeros, compute_row_sums, context,
scaling_factors_scratch, recovered_cell_weights, accum_scratch_ptr,
recurrent_to_input_is_diag);
}
CalculateLstmGateHybrid(
quantized_input_ptr, input_sf, input_zp, input_to_forget_weights_ptr,
input_to_forget_weights_ledger_ptr, input_to_forget_weights_scale,
input_to_forget_row_sums, quantized_aux_input_ptr, aux_input_sf,
aux_input_zp, aux_input_to_forget_weights_ptr,
aux_input_to_forget_weights_scale, aux_input_to_forget_row_sums,
quantized_output_state_ptr, output_state_ptr, output_state_sf,
output_state_zp, recurrent_to_forget_weights_ptr,
recurrent_to_forget_diag, recurrent_to_forget_weights_ledger_ptr,
recurrent_to_forget_weights_scale, recurrent_to_forget_row_sums,
cell_state_ptr, cell_to_forget_weights_ptr, cell_to_forget_weights_scale,
forget_layer_norm_coefficients_ptr, forget_gate_bias_ptr, n_batch,
n_input, n_aux_input, n_output, n_cell, kTfLiteActSigmoid,
forget_gate_scratch, is_input_all_zeros, is_aux_input_all_zeros,
is_output_state_all_zeros, compute_row_sums, context,
scaling_factors_scratch, recovered_cell_weights, accum_scratch_ptr,
recurrent_to_forget_is_diag);
CalculateLstmGateHybrid(
quantized_input_ptr, input_sf, input_zp, input_to_cell_weights_ptr,
input_to_cell_weights_ledger_ptr, input_to_cell_weights_scale,
input_to_cell_row_sums, quantized_aux_input_ptr, aux_input_sf,
aux_input_zp, aux_input_to_cell_weights_ptr,
aux_input_to_cell_weights_scale, aux_input_to_cell_row_sums,
quantized_output_state_ptr, output_state_ptr, output_state_sf,
output_state_zp, recurrent_to_cell_weights_ptr, recurrent_to_cell_diag,
recurrent_to_cell_weights_ledger_ptr, recurrent_to_cell_weights_scale,
recurrent_to_cell_row_sums,
nullptr, nullptr,
0.0f, cell_layer_norm_coefficients_ptr,
cell_gate_bias_ptr, n_batch, n_input, n_aux_input, n_output, n_cell,
params->activation, cell_gate_scratch, is_input_all_zeros,
is_aux_input_all_zeros, is_output_state_all_zeros, compute_row_sums,
context, scaling_factors_scratch, recovered_cell_weights,
accum_scratch_ptr, recurrent_to_cell_is_diag);
UpdateLstmCellFloat(n_batch, n_cell, cell_state_ptr, input_gate_scratch,
forget_gate_scratch, cell_gate_scratch, use_cifg,
params->cell_clip);
CalculateLstmGateHybrid(
quantized_input_ptr, input_sf, input_zp, input_to_output_weights_ptr,
input_to_output_weights_ledger_ptr, input_to_output_weights_scale,
input_to_output_row_sums, quantized_aux_input_ptr, aux_input_sf,
aux_input_zp, aux_input_to_output_weights_ptr,
aux_input_to_output_weights_scale, aux_input_to_output_row_sums,
quantized_output_state_ptr, output_state_ptr, output_state_sf,
output_state_zp, recurrent_to_output_weights_ptr,
recurrent_to_output_diag, recurrent_to_output_weights_ledger_ptr,
recurrent_to_output_weights_scale, recurrent_to_output_row_sums,
cell_state_ptr, cell_to_output_weights_ptr, cell_to_output_weights_scale,
output_layer_norm_coefficients_ptr, output_gate_bias_ptr, n_batch,
n_input, n_aux_input, n_output, n_cell, kTfLiteActSigmoid,
output_gate_scratch, is_input_all_zeros, is_aux_input_all_zeros,
is_output_state_all_zeros, compute_row_sums, context,
scaling_factors_scratch, recovered_cell_weights, accum_scratch_ptr,
recurrent_to_output_is_diag);
CalculateLstmOutputHybrid(
n_batch, n_cell, n_output, cell_state_ptr, output_gate_scratch,
params->activation, projection_weights_ptr, projection_weights_ledger_ptr,
projection_weights_scale, projection_bias_ptr, params->proj_clip,
output_state_ptr, asymmetric_quantize_inputs, projection_weights_row_sums,
compute_row_sums, context, scratch2, quantized_output_scratch, input_sf,
input_zp, accum_scratch_ptr);
for (int b = 0; b < n_batch; b++) {
std::copy_n(output_state_ptr + b * n_output, n_output,
output_ptr + b * output_batch_leading_dim);
}
}
inline void LstmStepInteger8x8_16(
const int8_t* input_ptr, const int8_t* input_to_input_weight_ptr,
int32_t effective_input_to_input_scale_a,
int32_t effective_input_to_input_scale_b,
const int8_t* input_to_forget_weight_ptr,
int32_t effective_input_to_forget_scale_a,
int32_t effective_input_to_forget_scale_b,
const int8_t* input_to_cell_weight_ptr,
int32_t effective_input_to_cell_scale_a,
int32_t effective_input_to_cell_scale_b,
const int8_t* input_to_output_weight_ptr,
int32_t effective_input_to_output_scale_a,
int32_t effective_input_to_output_scale_b,
const int8_t* recurrent_to_input_weight_ptr,
int32_t effective_recurrent_to_input_scale_a,
int32_t effective_recurrent_to_input_scale_b,
const int8_t* recurrent_to_forget_weight_ptr,
int32_t effective_recurrent_to_forget_scale_a,
int32_t effective_recurrent_to_forget_scale_b,
const int8_t* recurrent_to_cell_weight_ptr,
int32_t effective_recurrent_to_cell_scale_a,
int32_t effective_recurrent_to_cell_scale_b,
const int8_t* recurrent_to_output_weight_ptr,
int32_t effective_recurrent_to_output_scale_a,
int32_t effective_recurrent_to_output_scale_b,
const int16_t* cell_to_input_weight_ptr,
int32_t effective_cell_to_input_scale_a,
int32_t effective_cell_to_input_scale_b,
const int16_t* cell_to_forget_weight_ptr,
int32_t effective_cell_to_forget_scale_a,
int32_t effective_cell_to_forget_scale_b,
const int16_t* cell_to_output_weight_ptr,
int32_t effective_cell_to_output_scale_a,
int32_t effective_cell_to_output_scale_b,
const int8_t* projection_weight_ptr, int32_t effective_proj_scale_a,
int32_t effective_proj_scale_b, int32_t hidden_zp,
int32_t effective_hidden_scale_a, int32_t effective_hidden_scale_b,
const int16_t* layer_norm_input_weight_ptr,
int32_t layer_norm_input_scale_a, int32_t layer_norm_input_scale_b,
const int16_t* layer_norm_forget_weight_ptr,
int32_t layer_norm_forget_scale_a, int32_t layer_norm_forget_scale_b,
const int16_t* layer_norm_cell_weight_ptr, int32_t layer_norm_cell_scale_a,
int32_t layer_norm_cell_scale_b,
const int16_t* layer_norm_output_weight_ptr,
int32_t layer_norm_output_scale_a, int32_t layer_norm_output_scale_b,
const int32_t* input_gate_bias_ptr, const int32_t* forget_gate_bias_ptr,
const int32_t* cell_gate_bias_ptr, const int32_t* output_gate_bias_ptr,
int16_t quantized_cell_clip, int8_t quantized_proj_clip,
int32_t cell_state_scale, int32_t input_variance_guard,
int32_t forget_variance_guard, int32_t cell_variance_guard,
int32_t output_variance_guard,
const int32_t* input_to_forget_effective_bias,
const int32_t* recurrent_to_forget_effective_bias,
const int32_t* input_to_cell_effective_bias,
const int32_t* recurrent_to_cell_effective_bias,
const int32_t* input_to_output_effective_bias,
const int32_t* recurrent_to_output_effective_bias,
const int32_t* input_to_input_effective_bias,
const int32_t* recurrent_to_input_effective_bias,
const int32_t* projection_effective_bias, int n_batch, int n_cell,
int n_input, int n_output, int8_t* output_state_ptr,
int32_t output_state_zp, int16_t* cell_state_ptr, int8_t* output_ptr,
int16_t* scratch0, int16_t* scratch1, int16_t* scratch2, int16_t* scratch3,
int8_t* scratch4, int32_t* scratch5, CpuBackendContext* context) {
ruy::profiler::ScopeLabel label("LstmStepInteger8x8_16");
int16_t* input_gate_scratch = scratch0;
int16_t* forget_gate_scratch = scratch1;
int16_t* cell_gate_scratch = scratch2;
int16_t* output_gate_scratch = scratch3;
const bool use_cifg = (input_to_input_weight_ptr == nullptr);
TFLITE_DCHECK(input_to_forget_effective_bias);
TFLITE_DCHECK(recurrent_to_forget_effective_bias);
TFLITE_DCHECK(input_to_cell_effective_bias);
TFLITE_DCHECK(recurrent_to_cell_effective_bias);
TFLITE_DCHECK(input_to_output_effective_bias);
TFLITE_DCHECK(recurrent_to_output_effective_bias);
if (!use_cifg) {
TFLITE_DCHECK(input_to_input_effective_bias);
TFLITE_DCHECK(recurrent_to_input_effective_bias);
}
const bool use_projection = (projection_weight_ptr != nullptr);
if (use_projection) {
TFLITE_DCHECK(projection_effective_bias);
}
if (!use_cifg) {
CalculateLstmGateInteger8x8_16(
input_ptr, input_to_input_weight_ptr, input_to_input_effective_bias,
effective_input_to_input_scale_a, effective_input_to_input_scale_b,
output_state_ptr, recurrent_to_input_weight_ptr,
recurrent_to_input_effective_bias, effective_recurrent_to_input_scale_a,
effective_recurrent_to_input_scale_b, cell_state_ptr,
cell_to_input_weight_ptr, effective_cell_to_input_scale_a,
effective_cell_to_input_scale_b, layer_norm_input_weight_ptr,
input_gate_bias_ptr, layer_norm_input_scale_a, layer_norm_input_scale_b,
input_variance_guard, n_batch, n_input, n_output, n_cell,
kTfLiteActSigmoid, input_gate_scratch, context, scratch5);
}
CalculateLstmGateInteger8x8_16(
input_ptr, input_to_forget_weight_ptr, input_to_forget_effective_bias,
effective_input_to_forget_scale_a, effective_input_to_forget_scale_b,
output_state_ptr, recurrent_to_forget_weight_ptr,
recurrent_to_forget_effective_bias, effective_recurrent_to_forget_scale_a,
effective_recurrent_to_forget_scale_b, cell_state_ptr,
cell_to_forget_weight_ptr, effective_cell_to_forget_scale_a,
effective_cell_to_forget_scale_b, layer_norm_forget_weight_ptr,
forget_gate_bias_ptr, layer_norm_forget_scale_a,
layer_norm_forget_scale_b, forget_variance_guard, n_batch, n_input,
n_output, n_cell, kTfLiteActSigmoid, forget_gate_scratch, context,
scratch5);
CalculateLstmGateInteger8x8_16(
input_ptr, input_to_cell_weight_ptr, input_to_cell_effective_bias,
effective_input_to_cell_scale_a, effective_input_to_cell_scale_b,
output_state_ptr, recurrent_to_cell_weight_ptr,
recurrent_to_cell_effective_bias, effective_recurrent_to_cell_scale_a,
effective_recurrent_to_cell_scale_b, cell_state_ptr,
nullptr, 0,
0, layer_norm_cell_weight_ptr,
cell_gate_bias_ptr, layer_norm_cell_scale_a, layer_norm_cell_scale_b,
cell_variance_guard, n_batch, n_input, n_output, n_cell, kTfLiteActTanh,
cell_gate_scratch, context, scratch5);
UpdateLstmCellInteger(n_batch, n_cell, cell_state_ptr, cell_state_scale,
input_gate_scratch, forget_gate_scratch,
cell_gate_scratch, use_cifg, quantized_cell_clip);
CalculateLstmGateInteger8x8_16(
input_ptr, input_to_output_weight_ptr, input_to_output_effective_bias,
effective_input_to_output_scale_a, effective_input_to_output_scale_b,
output_state_ptr, recurrent_to_output_weight_ptr,
recurrent_to_output_effective_bias, effective_recurrent_to_output_scale_a,
effective_recurrent_to_output_scale_b, cell_state_ptr,
cell_to_output_weight_ptr, effective_cell_to_output_scale_a,
effective_cell_to_output_scale_b, layer_norm_output_weight_ptr,
output_gate_bias_ptr, layer_norm_output_scale_a,
layer_norm_output_scale_b, output_variance_guard, n_batch, n_input,
n_output, n_cell, kTfLiteActSigmoid, output_gate_scratch, context,
scratch5);
CalculateLstmOutputInteger8x8_16(
n_batch, n_cell, n_output, cell_state_ptr, cell_state_scale,
output_gate_scratch, effective_hidden_scale_a, effective_hidden_scale_b,
hidden_zp, projection_weight_ptr, effective_proj_scale_a,
effective_proj_scale_b, projection_effective_bias, output_state_zp,
quantized_proj_clip, output_state_ptr, context, scratch0, scratch4,
scratch5);
std::copy_n(output_state_ptr, n_batch * n_output, output_ptr);
}
inline void LstmStepInteger8x8_8(
const int8_t* input_ptr, int32_t input_zp,
const int8_t* input_to_input_weight_ptr,
int32_t effective_input_to_input_scale_a,
int32_t effective_input_to_input_scale_b,
const int8_t* input_to_forget_weight_ptr,
int32_t effective_input_to_forget_scale_a,
int32_t effective_input_to_forget_scale_b,
const int8_t* input_to_cell_weight_ptr,
int32_t effective_input_to_cell_scale_a,
int32_t effective_input_to_cell_scale_b,
const int8_t* input_to_output_weight_ptr,
int32_t effective_input_to_output_scale_a,
int32_t effective_input_to_output_scale_b,
const int8_t* recurrent_to_input_weight_ptr,
int32_t effective_recurrent_to_input_scale_a,
int32_t effective_recurrent_to_input_scale_b,
const int8_t* recurrent_to_forget_weight_ptr,
int32_t effective_recurrent_to_forget_scale_a,
int32_t effective_recurrent_to_forget_scale_b,
const int8_t* recurrent_to_cell_weight_ptr,
int32_t effective_recurrent_to_cell_scale_a,
int32_t effective_recurrent_to_cell_scale_b,
const int8_t* recurrent_to_output_weight_ptr,
int32_t effective_recurrent_to_output_scale_a,
int32_t effective_recurrent_to_output_scale_b,
const int8_t* cell_to_input_weight_ptr,
int32_t effective_cell_to_input_scale_a,
int32_t effective_cell_to_input_scale_b,
const int8_t* cell_to_forget_weight_ptr,
int32_t effective_cell_to_forget_scale_a,
int32_t effective_cell_to_forget_scale_b,
const int8_t* cell_to_output_weight_ptr,
int32_t effective_cell_to_output_scale_a,
int32_t effective_cell_to_output_scale_b,
const int8_t* projection_weight_ptr, int32_t effective_proj_scale_a,
int32_t effective_proj_scale_b, const int16_t* layer_norm_input_weight_ptr,
int32_t layer_norm_input_scale_a, int32_t layer_norm_input_scale_b,
const int16_t* layer_norm_forget_weight_ptr,
int32_t layer_norm_forget_scale_a, int32_t layer_norm_forget_scale_b,
const int16_t* layer_norm_cell_weight_ptr, int32_t layer_norm_cell_scale_a,
int32_t layer_norm_cell_scale_b,
const int16_t* layer_norm_output_weight_ptr,
int32_t layer_norm_output_scale_a, int32_t layer_norm_output_scale_b,
const int32_t* input_gate_bias_ptr, const int32_t* forget_gate_bias_ptr,
const int32_t* cell_gate_bias_ptr, const int32_t* output_gate_bias_ptr,
const int32_t* projection_bias_ptr, const TfLiteLSTMParams* params,
const int32_t* intermediate_scale_a, const int32_t* intermediate_scale_b,
const int32_t* intermediate_zp, int16_t quantized_cell_clip,
int8_t quantized_proj_clip, int n_batch, int n_cell, int n_input,
int n_output, int output_batch_leading_dim, int8_t* output_state_ptr,
int32_t output_state_zp, int16_t* cell_state_ptr, int8_t* output_ptr,
int8_t* scratch0, int8_t* scratch1, int16_t* scratch2, int16_t* scratch3,
int16_t* scratch4, int16_t* scratch5, int16_t* scratch6,
int16_t* scratch7) {
ruy::profiler::ScopeLabel label("LstmStepInteger8x8_8");
int16_t* forget_gate_scratch = scratch2;
int16_t* cell_gate_scratch = scratch3;
int16_t* output_gate_scratch = scratch4;
CalculateLstmGateInteger8x8_8(
input_ptr, input_zp, input_to_forget_weight_ptr,
effective_input_to_forget_scale_a, effective_input_to_forget_scale_b,
intermediate_scale_a[2], intermediate_scale_b[2], intermediate_zp[4],
output_state_ptr, output_state_zp, recurrent_to_forget_weight_ptr,
effective_recurrent_to_forget_scale_a,
effective_recurrent_to_forget_scale_b, intermediate_scale_a[3],
intermediate_scale_b[3], intermediate_zp[5], layer_norm_forget_weight_ptr,
layer_norm_forget_scale_a, layer_norm_forget_scale_b,
forget_gate_bias_ptr, n_batch, n_input, n_output, n_cell,
kTfLiteActSigmoid, forget_gate_scratch, scratch0, scratch1);
CalculateLstmGateInteger8x8_8(
input_ptr, input_zp, input_to_cell_weight_ptr,
effective_input_to_cell_scale_a, effective_input_to_cell_scale_b,
intermediate_scale_a[4], intermediate_scale_b[4], intermediate_zp[7],
output_state_ptr, output_state_zp, recurrent_to_cell_weight_ptr,
effective_recurrent_to_cell_scale_a, effective_recurrent_to_cell_scale_b,
intermediate_scale_a[5], intermediate_scale_b[5], intermediate_zp[8],
layer_norm_cell_weight_ptr, layer_norm_cell_scale_a,
layer_norm_cell_scale_b, cell_gate_bias_ptr, n_batch, n_input, n_output,
n_cell, kTfLiteActTanh, cell_gate_scratch, scratch0, scratch1);
UpdateLstmCellInteger(n_batch, n_cell, cell_state_ptr,
-15, nullptr,
forget_gate_scratch, cell_gate_scratch,
true, quantized_cell_clip);
CalculateLstmGateInteger8x8_8(
input_ptr, input_zp, input_to_output_weight_ptr,
effective_input_to_output_scale_a, effective_input_to_output_scale_b,
intermediate_scale_a[6], intermediate_scale_b[6], intermediate_zp[10],
output_state_ptr, output_state_zp, recurrent_to_output_weight_ptr,
effective_recurrent_to_output_scale_a,
effective_recurrent_to_output_scale_b, intermediate_scale_a[11],
intermediate_scale_b[7], intermediate_zp[7], layer_norm_output_weight_ptr,
layer_norm_output_scale_a, layer_norm_output_scale_b,
output_gate_bias_ptr, n_batch, n_input, n_output, n_cell,
kTfLiteActSigmoid, output_gate_scratch, scratch0, scratch1);
CalculateLstmOutputInteger8x8_8(
n_batch, n_cell, n_output, cell_state_ptr, output_gate_scratch,
projection_weight_ptr, effective_proj_scale_a, effective_proj_scale_b,
projection_bias_ptr, output_state_zp, quantized_proj_clip,
output_state_ptr, scratch2);
std::copy_n(output_state_ptr, n_batch * n_output, output_ptr);
}
}
TfLiteStatus EvalFloat(
const TfLiteTensor* input, const TfLiteTensor* input_to_input_weights,
const TfLiteTensor* input_to_forget_weights,
const TfLiteTensor* input_to_cell_weights,
const TfLiteTensor* input_to_output_weights,
const TfLiteTensor* recurrent_to_input_weights,
const TfLiteTensor* recurrent_to_forget_weights,
const TfLiteTensor* recurrent_to_cell_weights,
const TfLiteTensor* recurrent_to_output_weights,
const TfLiteTensor* cell_to_input_weights,
const TfLiteTensor* cell_to_forget_weights,
const TfLiteTensor* cell_to_output_weights,
const TfLiteTensor* input_layer_norm_coefficients,
const TfLiteTensor* forget_layer_norm_coefficients,
const TfLiteTensor* cell_layer_norm_coefficients,
const TfLiteTensor* output_layer_norm_coefficients,
const TfLiteTensor* aux_input,
const TfLiteTensor* aux_input_to_input_weights,
const TfLiteTensor* aux_input_to_forget_weights,
const TfLiteTensor* aux_input_to_cell_weights,
const TfLiteTensor* aux_input_to_output_weights,
const TfLiteTensor* input_gate_bias, const TfLiteTensor* forget_gate_bias,
const TfLiteTensor* cell_gate_bias, const TfLiteTensor* output_gate_bias,
const TfLiteTensor* projection_weights, const TfLiteTensor* projection_bias,
const TfLiteLSTMParams* params, bool forward_sequence, bool time_major,
int output_offset, TfLiteTensor* scratch_buffer, TfLiteTensor* output_state,
TfLiteTensor* cell_state, TfLiteTensor* output,
bool recurrent_to_input_is_diag, bool recurrent_to_forget_is_diag,
bool recurrent_to_cell_is_diag, bool recurrent_to_output_is_diag,
CpuBackendContext* context) {
TF_LITE_ASSERT(input->dims->size >= 2 && input->dims->size <= 3);
int max_time, n_batch;
if (input->dims->size == 3) {
max_time = (time_major) ? input->dims->data[0] : input->dims->data[1];
n_batch = (time_major) ? input->dims->data[1] : input->dims->data[0];
} else {
max_time = 1;
n_batch = input->dims->data[0];
}
const int n_input = input->dims->data[input->dims->size - 1];
const int aux_input_size =
(aux_input) ? aux_input->dims->data[aux_input->dims->size - 1] : 0;
const int n_cell = input_to_output_weights->dims->data[0];
const int n_output = recurrent_to_output_is_diag
? recurrent_to_output_weights->dims->data[0]
: recurrent_to_output_weights->dims->data[1];
const bool use_cifg = (input_to_input_weights == nullptr);
float* scratch_buffer_ptr = GetTensorData<float>(scratch_buffer);
float* input_gate_scratch = nullptr;
float* cell_gate_scratch = nullptr;
float* forget_gate_scratch = nullptr;
float* output_gate_scratch = nullptr;
float* accumulation_scratch_buffer = nullptr;
if (use_cifg) {
cell_gate_scratch = scratch_buffer_ptr;
forget_gate_scratch = scratch_buffer_ptr + n_cell * n_batch;
output_gate_scratch = scratch_buffer_ptr + 2 * n_cell * n_batch;
accumulation_scratch_buffer = scratch_buffer_ptr + 3 * n_cell * n_batch;
} else {
input_gate_scratch = scratch_buffer_ptr;
cell_gate_scratch = scratch_buffer_ptr + n_cell * n_batch;
forget_gate_scratch = scratch_buffer_ptr + 2 * n_cell * n_batch;
output_gate_scratch = scratch_buffer_ptr + 3 * n_cell * n_batch;
accumulation_scratch_buffer = scratch_buffer_ptr + 4 * n_cell * n_batch;
}
const int output_batch_leading_dim =
output->dims->data[output->dims->size - 1];
if (time_major) {
const int input_step = n_batch * n_input;
const int output_step = n_batch * output_batch_leading_dim;
for (int t = 0; t < max_time; t++) {
const int t_rel = forward_sequence ? t : max_time - t - 1;
const float* input_ptr = GetTensorData<float>(input) + t_rel * input_step;
const float* aux_input_ptr = nullptr;
if (aux_input) {
aux_input_ptr = GetTensorData<float>(aux_input) + t_rel * input_step;
}
float* output_ptr =
GetTensorData<float>(output) + t_rel * output_step + output_offset;
LstmStepFloat(
input_ptr, GetTensorData<float>(input_to_input_weights),
GetTensorData<float>(input_to_forget_weights),
GetTensorData<float>(input_to_cell_weights),
GetTensorData<float>(input_to_output_weights), aux_input_ptr,
GetTensorData<float>(aux_input_to_input_weights),
GetTensorData<float>(aux_input_to_forget_weights),
GetTensorData<float>(aux_input_to_cell_weights),
GetTensorData<float>(aux_input_to_output_weights),
GetTensorData<float>(recurrent_to_input_weights),
GetTensorData<float>(recurrent_to_forget_weights),
GetTensorData<float>(recurrent_to_cell_weights),
GetTensorData<float>(recurrent_to_output_weights),
GetTensorData<float>(cell_to_input_weights),
GetTensorData<float>(cell_to_forget_weights),
GetTensorData<float>(cell_to_output_weights),
GetTensorData<float>(input_layer_norm_coefficients),
GetTensorData<float>(forget_layer_norm_coefficients),
GetTensorData<float>(cell_layer_norm_coefficients),
GetTensorData<float>(output_layer_norm_coefficients),
GetTensorData<float>(input_gate_bias),
GetTensorData<float>(forget_gate_bias),
GetTensorData<float>(cell_gate_bias),
GetTensorData<float>(output_gate_bias),
GetTensorData<float>(projection_weights),
GetTensorData<float>(projection_bias), params, n_batch, n_cell,
n_input, aux_input_size, n_output, output_batch_leading_dim,
GetTensorData<float>(output_state), GetTensorData<float>(cell_state),
input_gate_scratch, forget_gate_scratch, cell_gate_scratch,
output_gate_scratch, accumulation_scratch_buffer, output_ptr,
recurrent_to_input_is_diag, recurrent_to_forget_is_diag,
recurrent_to_cell_is_diag, recurrent_to_output_is_diag, context);
}
} else {
for (int b = 0; b < n_batch; b++) {
const int input_step = n_input;
const int output_step = output_batch_leading_dim;
for (int t = 0; t < max_time; t++) {
const int t_rel = forward_sequence ? t : max_time - t - 1;
const int time_offset = b * max_time + t_rel;
const float* input_ptr =
GetTensorData<float>(input) + time_offset * input_step;
const float* aux_input_ptr = nullptr;
if (aux_input) {
aux_input_ptr =
GetTensorData<float>(aux_input) + time_offset * input_step;
}
float* output_ptr = GetTensorData<float>(output) +
time_offset * output_step + output_offset;
float* output_state_ptr =
GetTensorData<float>(output_state) + b * output_batch_leading_dim;
float* cell_state_ptr = GetTensorData<float>(cell_state) + b * n_cell;
float* input_gate_scratch_ptr =
input_gate_scratch ? input_gate_scratch + b * n_cell : nullptr;
float* forget_gate_scratch_ptr = forget_gate_scratch + b * n_cell;
float* cell_gate_scratch_ptr = cell_gate_scratch + b * n_cell;
float* output_gate_scratch_ptr = output_gate_scratch + b * n_cell;
LstmStepFloat(
input_ptr, GetTensorData<float>(input_to_input_weights),
GetTensorData<float>(input_to_forget_weights),
GetTensorData<float>(input_to_cell_weights),
GetTensorData<float>(input_to_output_weights), aux_input_ptr,
GetTensorData<float>(aux_input_to_input_weights),
GetTensorData<float>(aux_input_to_forget_weights),
GetTensorData<float>(aux_input_to_cell_weights),
GetTensorData<float>(aux_input_to_output_weights),
GetTensorData<float>(recurrent_to_input_weights),
GetTensorData<float>(recurrent_to_forget_weights),
GetTensorData<float>(recurrent_to_cell_weights),
GetTensorData<float>(recurrent_to_output_weights),
GetTensorData<float>(cell_to_input_weights),
GetTensorData<float>(cell_to_forget_weights),
GetTensorData<float>(cell_to_output_weights),
GetTensorData<float>(input_layer_norm_coefficients),
GetTensorData<float>(forget_layer_norm_coefficients),
GetTensorData<float>(cell_layer_norm_coefficients),
GetTensorData<float>(output_layer_norm_coefficients),
GetTensorData<float>(input_gate_bias),
GetTensorData<float>(forget_gate_bias),
GetTensorData<float>(cell_gate_bias),
GetTensorData<float>(output_gate_bias),
GetTensorData<float>(projection_weights),
GetTensorData<float>(projection_bias), params, 1,
n_cell, n_input, aux_input_size, n_output, output_batch_leading_dim,
output_state_ptr, cell_state_ptr, input_gate_scratch_ptr,
forget_gate_scratch_ptr, cell_gate_scratch_ptr,
output_gate_scratch_ptr, accumulation_scratch_buffer, output_ptr,
recurrent_to_input_is_diag, recurrent_to_forget_is_diag,
recurrent_to_cell_is_diag, recurrent_to_output_is_diag, context);
}
}
}
return kTfLiteOk;
}
TfLiteStatus EvalHybrid(
const TfLiteTensor* input, const TfLiteTensor* input_to_input_weights,
const TfLiteTensor* input_to_input_weights_ledger,
const TfLiteTensor* input_to_forget_weights,
const TfLiteTensor* input_to_forget_weights_ledger,
const TfLiteTensor* input_to_cell_weights,
const TfLiteTensor* input_to_cell_weights_ledger,
const TfLiteTensor* input_to_output_weights,
const TfLiteTensor* input_to_output_weights_ledger,
const TfLiteTensor* recurrent_to_input_weights,
const TfLiteTensor* recurrent_to_input_weights_ledger,
const TfLiteTensor* recurrent_to_forget_weights,
const TfLiteTensor* recurrent_to_forget_weights_ledger,
const TfLiteTensor* recurrent_to_cell_weights,
const TfLiteTensor* recurrent_to_cell_weights_ledger,
const TfLiteTensor* recurrent_to_output_weights,
const TfLiteTensor* recurrent_to_output_weights_ledger,
const TfLiteTensor* cell_to_input_weights,
const TfLiteTensor* cell_to_forget_weights,
const TfLiteTensor* cell_to_output_weights,
const TfLiteTensor* input_layer_norm_coefficients,
const TfLiteTensor* forget_layer_norm_coefficients,
const TfLiteTensor* cell_layer_norm_coefficients,
const TfLiteTensor* output_layer_norm_coefficients,
const TfLiteTensor* aux_input,
const TfLiteTensor* aux_input_to_input_weights,
const TfLiteTensor* aux_input_to_forget_weights,
const TfLiteTensor* aux_input_to_cell_weights,
const TfLiteTensor* aux_input_to_output_weights,
const TfLiteTensor* input_gate_bias, const TfLiteTensor* forget_gate_bias,
const TfLiteTensor* cell_gate_bias, const TfLiteTensor* output_gate_bias,
const TfLiteTensor* projection_weights,
const TfLiteTensor* projection_weights_ledger,
const TfLiteTensor* projection_bias, const TfLiteLSTMParams* params,
bool forward_sequence, bool time_major, int output_offset,
TfLiteTensor* scratch_buffer, TfLiteTensor* input_sf,
TfLiteTensor* aux_input_sf, TfLiteTensor* output_state_sf,
TfLiteTensor* prod_scaling_factors, TfLiteTensor* recovered_cell_weights,
TfLiteTensor* input_quantized, TfLiteTensor* aux_input_quantized,
TfLiteTensor* output_state_quantized, TfLiteTensor* cell_state_quantized,
TfLiteTensor* output_state, TfLiteTensor* cell_state,
TfLiteTensor* output_scratch_buffer, TfLiteTensor* output,
TfLiteTensor* input_zp, TfLiteTensor* aux_input_zp,
TfLiteTensor* output_state_zp, TfLiteTensor* row_sums, int row_sums_size,
bool* compute_row_sums, bool recurrent_to_input_is_diag,
bool recurrent_to_forget_is_diag, bool recurrent_to_cell_is_diag,
bool recurrent_to_output_is_diag, CpuBackendContext* context) {
TF_LITE_ASSERT(input->dims->size >= 2 && input->dims->size <= 3);
const int n_input = input->dims->data[input->dims->size - 1];
int max_time, n_batch;
if (input->dims->size == 2) {
max_time = 1;
n_batch = input->dims->data[0];
} else {
max_time = (time_major) ? input->dims->data[0] : input->dims->data[1];
n_batch = (time_major) ? input->dims->data[1] : input->dims->data[0];
}
const int aux_input_size =
(aux_input) ? aux_input->dims->data[aux_input->dims->size - 1] : 0;
const int n_cell = input_to_output_weights->dims->data[0];
const int n_output = recurrent_to_output_is_diag
? recurrent_to_output_weights->dims->data[0]
: recurrent_to_output_weights->dims->data[1];
const bool use_cifg = (input_to_input_weights == nullptr);
float* scratch_buffer_ptr = GetTensorData<float>(scratch_buffer);
float* input_gate_scratch = nullptr;
float* cell_gate_scratch = nullptr;
float* forget_gate_scratch = nullptr;
float* output_gate_scratch = nullptr;
if (use_cifg) {
cell_gate_scratch = scratch_buffer_ptr;
forget_gate_scratch = scratch_buffer_ptr + n_cell * n_batch;
output_gate_scratch = scratch_buffer_ptr + 2 * n_cell * n_batch;
} else {
input_gate_scratch = scratch_buffer_ptr;
cell_gate_scratch = scratch_buffer_ptr + n_cell * n_batch;
forget_gate_scratch = scratch_buffer_ptr + 2 * n_cell * n_batch;
output_gate_scratch = scratch_buffer_ptr + 3 * n_cell * n_batch;
}
const int output_batch_leading_dim =
output->dims->data[output->dims->size - 1];
int32_t* input_zp_ptr = nullptr;
int32_t* aux_input_zp_ptr = nullptr;
int32_t* output_state_zp_ptr = nullptr;
int32_t* row_sums_ptr = nullptr;
if (params->asymmetric_quantize_inputs) {
input_zp_ptr = GetTensorData<int32_t>(input_zp);
aux_input_zp_ptr = GetTensorData<int32_t>(aux_input_zp);
output_state_zp_ptr = GetTensorData<int32_t>(output_state_zp);
row_sums_ptr = GetTensorData<int32_t>(row_sums);
}
if (time_major) {
const int input_step = n_batch * n_input;
const int output_step = n_batch * output_batch_leading_dim;
for (int t = 0; t < max_time; t++) {
const int t_rel = forward_sequence ? t : max_time - t - 1;
const float* input_ptr = GetTensorData<float>(input) + t_rel * input_step;
const float* aux_input_ptr = nullptr;
if (aux_input) {
aux_input_ptr = GetTensorData<float>(aux_input) + t_rel * input_step;
}
float* output_ptr =
GetTensorData<float>(output) + t_rel * output_step + output_offset;
LstmStepHybrid(
input_ptr, GetTensorData<int8_t>(input_to_input_weights),
GetTensorData<uint8_t>(input_to_input_weights_ledger),
GetTensorScale(input_to_input_weights),
GetTensorData<int8_t>(input_to_forget_weights),
GetTensorData<uint8_t>(input_to_forget_weights_ledger),
GetTensorScale(input_to_forget_weights),
GetTensorData<int8_t>(input_to_cell_weights),
GetTensorData<uint8_t>(input_to_cell_weights_ledger),
GetTensorScale(input_to_cell_weights),
GetTensorData<int8_t>(input_to_output_weights),
GetTensorData<uint8_t>(input_to_output_weights_ledger),
GetTensorScale(input_to_output_weights), aux_input_ptr,
GetTensorData<int8_t>(aux_input_to_input_weights),
GetTensorScale(aux_input_to_input_weights),
GetTensorData<int8_t>(aux_input_to_forget_weights),
GetTensorScale(aux_input_to_forget_weights),
GetTensorData<int8_t>(aux_input_to_cell_weights),
GetTensorScale(aux_input_to_cell_weights),
GetTensorData<int8_t>(aux_input_to_output_weights),
GetTensorScale(aux_input_to_output_weights),
GetTensorData<int8_t>(recurrent_to_input_weights),
GetTensorData<float>(recurrent_to_input_weights),
GetTensorData<uint8_t>(recurrent_to_input_weights_ledger),
GetTensorScale(recurrent_to_input_weights),
GetTensorData<int8_t>(recurrent_to_forget_weights),
GetTensorData<float>(recurrent_to_forget_weights),
GetTensorData<uint8_t>(recurrent_to_forget_weights_ledger),
GetTensorScale(recurrent_to_forget_weights),
GetTensorData<int8_t>(recurrent_to_cell_weights),
GetTensorData<float>(recurrent_to_cell_weights),
GetTensorData<uint8_t>(recurrent_to_cell_weights_ledger),
GetTensorScale(recurrent_to_cell_weights),
GetTensorData<int8_t>(recurrent_to_output_weights),
GetTensorData<float>(recurrent_to_output_weights),
GetTensorData<uint8_t>(recurrent_to_output_weights_ledger),
GetTensorScale(recurrent_to_output_weights),
GetTensorData<int8_t>(cell_to_input_weights),
GetTensorScale(cell_to_input_weights),
GetTensorData<int8_t>(cell_to_forget_weights),
GetTensorScale(cell_to_forget_weights),
GetTensorData<int8_t>(cell_to_output_weights),
GetTensorScale(cell_to_output_weights),
GetTensorData<float>(input_layer_norm_coefficients),
GetTensorData<float>(forget_layer_norm_coefficients),
GetTensorData<float>(cell_layer_norm_coefficients),
GetTensorData<float>(output_layer_norm_coefficients),
GetTensorData<float>(input_gate_bias),
GetTensorData<float>(forget_gate_bias),
GetTensorData<float>(cell_gate_bias),
GetTensorData<float>(output_gate_bias),
GetTensorData<int8_t>(projection_weights),
GetTensorData<uint8_t>(projection_weights_ledger),
GetTensorScale(projection_weights),
GetTensorData<float>(projection_bias), params, n_batch, n_cell,
n_input, aux_input_size, n_output, output_batch_leading_dim,
input_gate_scratch, forget_gate_scratch, cell_gate_scratch,
output_gate_scratch, GetTensorData<float>(input_sf),
GetTensorData<float>(aux_input_sf),
GetTensorData<float>(output_state_sf),
GetTensorData<float>(prod_scaling_factors),
GetTensorData<float>(recovered_cell_weights),
GetTensorData<int8_t>(input_quantized),
GetTensorData<int8_t>(aux_input_quantized),
GetTensorData<int8_t>(output_state_quantized),
GetTensorData<int8_t>(cell_state_quantized),
GetTensorData<float>(output_state), GetTensorData<float>(cell_state),
GetTensorData<int32_t>(output_scratch_buffer), output_ptr,
input_zp_ptr, aux_input_zp_ptr, output_state_zp_ptr, row_sums_ptr,
row_sums_size, compute_row_sums, params->asymmetric_quantize_inputs,
recurrent_to_input_is_diag, recurrent_to_forget_is_diag,
recurrent_to_cell_is_diag, recurrent_to_output_is_diag, context);
}
} else {
for (int b = 0; b < n_batch; b++) {
const int input_step = n_input;
const int output_step = output_batch_leading_dim;
for (int t = 0; t < max_time; t++) {
const int t_rel = forward_sequence ? t : max_time - t - 1;
const int time_offset = b * max_time + t_rel;
const float* input_ptr =
GetTensorData<float>(input) + time_offset * input_step;
const float* aux_input_ptr = nullptr;
if (aux_input) {
aux_input_ptr =
GetTensorData<float>(aux_input) + time_offset * input_step;
}
float* output_ptr = GetTensorData<float>(output) +
time_offset * output_step + output_offset;
float* output_state_ptr =
GetTensorData<float>(output_state) + b * output_batch_leading_dim;
float* cell_state_ptr = GetTensorData<float>(cell_state) + b * n_cell;
float* input_gate_scratch_ptr =
input_gate_scratch ? input_gate_scratch + b * n_cell : nullptr;
float* forget_gate_scratch_ptr = forget_gate_scratch + b * n_cell;
float* cell_gate_scratch_ptr = cell_gate_scratch + b * n_cell;
float* output_gate_scratch_ptr = output_gate_scratch + b * n_cell;
LstmStepHybrid(
input_ptr, GetTensorData<int8_t>(input_to_input_weights),
GetTensorData<uint8_t>(input_to_input_weights_ledger),
GetTensorScale(input_to_input_weights),
GetTensorData<int8_t>(input_to_forget_weights),
GetTensorData<uint8_t>(input_to_forget_weights_ledger),
GetTensorScale(input_to_forget_weights),
GetTensorData<int8_t>(input_to_cell_weights),
GetTensorData<uint8_t>(input_to_cell_weights_ledger),
GetTensorScale(input_to_cell_weights),
GetTensorData<int8_t>(input_to_output_weights),
GetTensorData<uint8_t>(input_to_output_weights_ledger),
GetTensorScale(input_to_output_weights), aux_input_ptr,
GetTensorData<int8_t>(aux_input_to_input_weights),
GetTensorScale(aux_input_to_input_weights),
GetTensorData<int8_t>(aux_input_to_forget_weights),
GetTensorScale(aux_input_to_forget_weights),
GetTensorData<int8_t>(aux_input_to_cell_weights),
GetTensorScale(aux_input_to_cell_weights),
GetTensorData<int8_t>(aux_input_to_output_weights),
GetTensorScale(aux_input_to_output_weights),
GetTensorData<int8_t>(recurrent_to_input_weights),
GetTensorData<float>(recurrent_to_input_weights),
GetTensorData<uint8_t>(recurrent_to_input_weights_ledger),
GetTensorScale(recurrent_to_input_weights),
GetTensorData<int8_t>(recurrent_to_forget_weights),
GetTensorData<float>(recurrent_to_forget_weights),
GetTensorData<uint8_t>(recurrent_to_forget_weights_ledger),
GetTensorScale(recurrent_to_forget_weights),
GetTensorData<int8_t>(recurrent_to_cell_weights),
GetTensorData<float>(recurrent_to_cell_weights),
GetTensorData<uint8_t>(recurrent_to_cell_weights_ledger),
GetTensorScale(recurrent_to_cell_weights),
GetTensorData<int8_t>(recurrent_to_output_weights),
GetTensorData<float>(recurrent_to_output_weights),
GetTensorData<uint8_t>(recurrent_to_output_weights_ledger),
GetTensorScale(recurrent_to_output_weights),
GetTensorData<int8_t>(cell_to_input_weights),
GetTensorScale(cell_to_input_weights),
GetTensorData<int8_t>(cell_to_forget_weights),
GetTensorScale(cell_to_forget_weights),
GetTensorData<int8_t>(cell_to_output_weights),
GetTensorScale(cell_to_output_weights),
GetTensorData<float>(input_layer_norm_coefficients),
GetTensorData<float>(forget_layer_norm_coefficients),
GetTensorData<float>(cell_layer_norm_coefficients),
GetTensorData<float>(output_layer_norm_coefficients),
GetTensorData<float>(input_gate_bias),
GetTensorData<float>(forget_gate_bias),
GetTensorData<float>(cell_gate_bias),
GetTensorData<float>(output_gate_bias),
GetTensorData<int8_t>(projection_weights),
GetTensorData<uint8_t>(projection_weights_ledger),
GetTensorScale(projection_weights),
GetTensorData<float>(projection_bias), params,
1, n_cell, n_input, aux_input_size, n_output,
output_batch_leading_dim, input_gate_scratch_ptr,
forget_gate_scratch_ptr, cell_gate_scratch_ptr,
output_gate_scratch_ptr, GetTensorData<float>(input_sf),
GetTensorData<float>(aux_input_sf),
GetTensorData<float>(output_state_sf),
GetTensorData<float>(prod_scaling_factors),
GetTensorData<float>(recovered_cell_weights),
GetTensorData<int8_t>(input_quantized),
GetTensorData<int8_t>(aux_input_quantized),
GetTensorData<int8_t>(output_state_quantized),
GetTensorData<int8_t>(cell_state_quantized), output_state_ptr,
cell_state_ptr, GetTensorData<int32_t>(output_scratch_buffer),
output_ptr, input_zp_ptr, aux_input_zp_ptr, output_state_zp_ptr,
row_sums_ptr, row_sums_size, compute_row_sums,
params->asymmetric_quantize_inputs, recurrent_to_input_is_diag,
recurrent_to_forget_is_diag, recurrent_to_cell_is_diag,
recurrent_to_output_is_diag, context);
}
}
}
return kTfLiteOk;
}
TfLiteStatus EvalInteger8x8_16(
const TfLiteTensor* input, const TfLiteTensor* input_to_input_weights,
const TfLiteTensor* input_to_forget_weights,
const TfLiteTensor* input_to_cell_weights,
const TfLiteTensor* input_to_output_weights,
const TfLiteTensor* recurrent_to_input_weights,
const TfLiteTensor* recurrent_to_forget_weights,
const TfLiteTensor* recurrent_to_cell_weights,
const TfLiteTensor* recurrent_to_output_weights,
const TfLiteTensor* cell_to_input_weights,
const TfLiteTensor* cell_to_forget_weights,
const TfLiteTensor* cell_to_output_weights,
const TfLiteTensor* input_layer_norm_coefficients,
const TfLiteTensor* forget_layer_norm_coefficients,
const TfLiteTensor* cell_layer_norm_coefficients,
const TfLiteTensor* output_layer_norm_coefficients,
const TfLiteTensor* input_gate_bias, const TfLiteTensor* forget_gate_bias,
const TfLiteTensor* cell_gate_bias, const TfLiteTensor* output_gate_bias,
const TfLiteTensor* projection_weights, const TfLiteTensor* projection_bias,
const TfLiteLSTMParams* params, bool forward_sequence, bool time_major,
const lstm_eval::IntegerLstmParameter* integer_lstm_param,
TfLiteTensor* output_state, TfLiteTensor* cell_state, TfLiteTensor* output,
TfLiteTensor* scratch0, TfLiteTensor* scratch1, TfLiteTensor* scratch2,
TfLiteTensor* scratch3, TfLiteTensor* scratch4, TfLiteTensor* scratch5,
CpuBackendContext* context) {
TF_LITE_ASSERT(input->dims->size >= 2 && input->dims->size <= 3);
const int n_input = input->dims->data[input->dims->size - 1];
int max_time, n_batch;
if (input->dims->size == 2) {
max_time = 1;
n_batch = input->dims->data[0];
} else {
max_time = (time_major) ? input->dims->data[0] : input->dims->data[1];
n_batch = (time_major) ? input->dims->data[1] : input->dims->data[0];
}
const int n_cell = input_to_output_weights->dims->data[0];
const int n_output = recurrent_to_output_weights->dims->data[1];
int output_state_zp = output_state->params.zero_point;
const int output_batch_leading_dim =
output->dims->data[output->dims->size - 1];
if (time_major) {
const int input_step = n_batch * n_input;
const int output_step = n_batch * output_batch_leading_dim;
for (int t = 0; t < max_time; t++) {
const int t_rel = t;
int8_t* output_ptr = GetTensorData<int8_t>(output) + t_rel * output_step;
const int8_t* input_ptr =
GetTensorData<int8_t>(input) + t_rel * input_step;
LstmStepInteger8x8_16(
input_ptr, GetTensorData<int8_t>(input_to_input_weights),
integer_lstm_param->effective_input_to_input_scale_a,
integer_lstm_param->effective_input_to_input_scale_b,
GetTensorData<int8_t>(input_to_forget_weights),
integer_lstm_param->effective_input_to_forget_scale_a,
integer_lstm_param->effective_input_to_forget_scale_b,
GetTensorData<int8_t>(input_to_cell_weights),
integer_lstm_param->effective_input_to_cell_scale_a,
integer_lstm_param->effective_input_to_cell_scale_b,
GetTensorData<int8_t>(input_to_output_weights),
integer_lstm_param->effective_input_to_output_scale_a,
integer_lstm_param->effective_input_to_output_scale_b,
GetTensorData<int8_t>(recurrent_to_input_weights),
integer_lstm_param->effective_recurrent_to_input_scale_a,
integer_lstm_param->effective_recurrent_to_input_scale_b,
GetTensorData<int8_t>(recurrent_to_forget_weights),
integer_lstm_param->effective_recurrent_to_forget_scale_a,
integer_lstm_param->effective_recurrent_to_forget_scale_b,
GetTensorData<int8_t>(recurrent_to_cell_weights),
integer_lstm_param->effective_recurrent_to_cell_scale_a,
integer_lstm_param->effective_recurrent_to_cell_scale_b,
GetTensorData<int8_t>(recurrent_to_output_weights),
integer_lstm_param->effective_recurrent_to_output_scale_a,
integer_lstm_param->effective_recurrent_to_output_scale_b,
GetTensorData<int16_t>(cell_to_input_weights),
integer_lstm_param->effective_cell_to_input_scale_a,
integer_lstm_param->effective_cell_to_input_scale_b,
GetTensorData<int16_t>(cell_to_forget_weights),
integer_lstm_param->effective_cell_to_forget_scale_a,
integer_lstm_param->effective_cell_to_forget_scale_b,
GetTensorData<int16_t>(cell_to_output_weights),
integer_lstm_param->effective_cell_to_output_scale_a,
integer_lstm_param->effective_cell_to_output_scale_b,
GetTensorData<int8_t>(projection_weights),
integer_lstm_param->effective_proj_scale_a,
integer_lstm_param->effective_proj_scale_b,
integer_lstm_param->hidden_zp,
integer_lstm_param->effective_hidden_scale_a,
integer_lstm_param->effective_hidden_scale_b,
GetTensorData<int16_t>(input_layer_norm_coefficients),
integer_lstm_param->layer_norm_input_scale_a,
integer_lstm_param->layer_norm_input_scale_b,
GetTensorData<int16_t>(forget_layer_norm_coefficients),
integer_lstm_param->layer_norm_forget_scale_a,
integer_lstm_param->layer_norm_forget_scale_b,
GetTensorData<int16_t>(cell_layer_norm_coefficients),
integer_lstm_param->layer_norm_cell_scale_a,
integer_lstm_param->layer_norm_cell_scale_b,
GetTensorData<int16_t>(output_layer_norm_coefficients),
integer_lstm_param->layer_norm_output_scale_a,
integer_lstm_param->layer_norm_output_scale_b,
GetTensorData<int32_t>(input_gate_bias),
GetTensorData<int32_t>(forget_gate_bias),
GetTensorData<int32_t>(cell_gate_bias),
GetTensorData<int32_t>(output_gate_bias),
integer_lstm_param->quantized_cell_clip,
integer_lstm_param->quantized_proj_clip,
integer_lstm_param->cell_scale,
integer_lstm_param->input_variance_guard,
integer_lstm_param->forget_variance_guard,
integer_lstm_param->cell_variance_guard,
integer_lstm_param->output_variance_guard,
integer_lstm_param->input_to_forget_effective_bias.get(),
integer_lstm_param->recurrent_to_forget_effective_bias.get(),
integer_lstm_param->input_to_cell_effective_bias.get(),
integer_lstm_param->recurrent_to_cell_effective_bias.get(),
integer_lstm_param->input_to_output_effective_bias.get(),
integer_lstm_param->recurrent_to_output_effective_bias.get(),
integer_lstm_param->input_to_input_effective_bias.get(),
integer_lstm_param->recurrent_to_input_effective_bias.get(),
integer_lstm_param->projection_effective_bias.get(), n_batch, n_cell,
n_input, n_output, GetTensorData<int8_t>(output_state),
output_state_zp, GetTensorData<int16_t>(cell_state), output_ptr,
GetTensorData<int16_t>(scratch0), GetTensorData<int16_t>(scratch1),
GetTensorData<int16_t>(scratch2), GetTensorData<int16_t>(scratch3),
GetTensorData<int8_t>(scratch4), GetTensorData<int32_t>(scratch5),
context);
}
} else {
for (int b = 0; b < n_batch; b++) {
const int input_step = n_input;
const int output_step = output_batch_leading_dim;
for (int t = 0; t < max_time; t++) {
const int t_rel = forward_sequence ? t : max_time - t - 1;
const int time_offset = b * max_time + t_rel;
const int8_t* input_ptr =
GetTensorData<int8_t>(input) + time_offset * input_step;
int8_t* output_ptr =
GetTensorData<int8_t>(output) + time_offset * output_step;
int8_t* output_state_ptr =
GetTensorData<int8_t>(output_state) + b * output_batch_leading_dim;
int16_t* cell_state_ptr =
GetTensorData<int16_t>(cell_state) + b * n_cell;
LstmStepInteger8x8_16(
input_ptr, GetTensorData<int8_t>(input_to_input_weights),
integer_lstm_param->effective_input_to_input_scale_a,
integer_lstm_param->effective_input_to_input_scale_b,
GetTensorData<int8_t>(input_to_forget_weights),
integer_lstm_param->effective_input_to_forget_scale_a,
integer_lstm_param->effective_input_to_forget_scale_b,
GetTensorData<int8_t>(input_to_cell_weights),
integer_lstm_param->effective_input_to_cell_scale_a,
integer_lstm_param->effective_input_to_cell_scale_b,
GetTensorData<int8_t>(input_to_output_weights),
integer_lstm_param->effective_input_to_output_scale_a,
integer_lstm_param->effective_input_to_output_scale_b,
GetTensorData<int8_t>(recurrent_to_input_weights),
integer_lstm_param->effective_recurrent_to_input_scale_a,
integer_lstm_param->effective_recurrent_to_input_scale_b,
GetTensorData<int8_t>(recurrent_to_forget_weights),
integer_lstm_param->effective_recurrent_to_forget_scale_a,
integer_lstm_param->effective_recurrent_to_forget_scale_b,
GetTensorData<int8_t>(recurrent_to_cell_weights),
integer_lstm_param->effective_recurrent_to_cell_scale_a,
integer_lstm_param->effective_recurrent_to_cell_scale_b,
GetTensorData<int8_t>(recurrent_to_output_weights),
integer_lstm_param->effective_recurrent_to_output_scale_a,
integer_lstm_param->effective_recurrent_to_output_scale_b,
GetTensorData<int16_t>(cell_to_input_weights),
integer_lstm_param->effective_cell_to_input_scale_a,
integer_lstm_param->effective_cell_to_input_scale_b,
GetTensorData<int16_t>(cell_to_forget_weights),
integer_lstm_param->effective_cell_to_forget_scale_a,
integer_lstm_param->effective_cell_to_forget_scale_b,
GetTensorData<int16_t>(cell_to_output_weights),
integer_lstm_param->effective_cell_to_output_scale_a,
integer_lstm_param->effective_cell_to_output_scale_b,
GetTensorData<int8_t>(projection_weights),
integer_lstm_param->effective_proj_scale_a,
integer_lstm_param->effective_proj_scale_b,
integer_lstm_param->hidden_zp,
integer_lstm_param->effective_hidden_scale_a,
integer_lstm_param->effective_hidden_scale_b,
GetTensorData<int16_t>(input_layer_norm_coefficients),
integer_lstm_param->layer_norm_input_scale_a,
integer_lstm_param->layer_norm_input_scale_b,
GetTensorData<int16_t>(forget_layer_norm_coefficients),
integer_lstm_param->layer_norm_forget_scale_a,
integer_lstm_param->layer_norm_forget_scale_b,
GetTensorData<int16_t>(cell_layer_norm_coefficients),
integer_lstm_param->layer_norm_cell_scale_a,
integer_lstm_param->layer_norm_cell_scale_b,
GetTensorData<int16_t>(output_layer_norm_coefficients),
integer_lstm_param->layer_norm_output_scale_a,
integer_lstm_param->layer_norm_output_scale_b,
GetTensorData<int32_t>(input_gate_bias),
GetTensorData<int32_t>(forget_gate_bias),
GetTensorData<int32_t>(cell_gate_bias),
GetTensorData<int32_t>(output_gate_bias),
integer_lstm_param->quantized_cell_clip,
integer_lstm_param->quantized_proj_clip,
integer_lstm_param->cell_scale,
integer_lstm_param->input_variance_guard,
integer_lstm_param->forget_variance_guard,
integer_lstm_param->cell_variance_guard,
integer_lstm_param->output_variance_guard,
integer_lstm_param->input_to_forget_effective_bias.get(),
integer_lstm_param->recurrent_to_forget_effective_bias.get(),
integer_lstm_param->input_to_cell_effective_bias.get(),
integer_lstm_param->recurrent_to_cell_effective_bias.get(),
integer_lstm_param->input_to_output_effective_bias.get(),
integer_lstm_param->recurrent_to_output_effective_bias.get(),
integer_lstm_param->input_to_input_effective_bias.get(),
integer_lstm_param->recurrent_to_input_effective_bias.get(),
integer_lstm_param->projection_effective_bias.get(), 1,
n_cell, n_input, n_output, output_state_ptr, output_state_zp,
cell_state_ptr, output_ptr, GetTensorData<int16_t>(scratch0),
GetTensorData<int16_t>(scratch1), GetTensorData<int16_t>(scratch2),
GetTensorData<int16_t>(scratch3), GetTensorData<int8_t>(scratch4),
GetTensorData<int32_t>(scratch5), context);
}
}
}
return kTfLiteOk;
}
TfLiteStatus EvalInteger8x8_8(
const TfLiteTensor* input, const TfLiteTensor* input_to_input_weights,
const TfLiteTensor* input_to_forget_weights,
const TfLiteTensor* input_to_cell_weights,
const TfLiteTensor* input_to_output_weights,
const TfLiteTensor* recurrent_to_input_weights,
const TfLiteTensor* recurrent_to_forget_weights,
const TfLiteTensor* recurrent_to_cell_weights,
const TfLiteTensor* recurrent_to_output_weights,
const TfLiteTensor* cell_to_input_weights,
const TfLiteTensor* cell_to_forget_weights,
const TfLiteTensor* cell_to_output_weights,
const TfLiteTensor* input_layer_norm_coefficients,
const TfLiteTensor* forget_layer_norm_coefficients,
const TfLiteTensor* cell_layer_norm_coefficients,
const TfLiteTensor* output_layer_norm_coefficients,
const TfLiteTensor* input_gate_bias, const TfLiteTensor* forget_gate_bias,
const TfLiteTensor* cell_gate_bias, const TfLiteTensor* output_gate_bias,
const TfLiteTensor* projection_weights, const TfLiteTensor* projection_bias,
const TfLiteLSTMParams* params, TfLiteTensor* output_state,
TfLiteTensor* cell_state, TfLiteTensor* output,
const lstm_eval::IntegerLstmParameter* integer_lstm_param,
TfLiteTensor* scratch0, TfLiteTensor* scratch1, TfLiteTensor* scratch2,
TfLiteTensor* scratch3, TfLiteTensor* scratch4, TfLiteTensor* scratch5,
TfLiteTensor* scratch6, TfLiteTensor* scratch7) {
TF_LITE_ASSERT(input->dims->size >= 2 && input->dims->size <= 3);
const int n_input = input->dims->data[input->dims->size - 1];
int max_time, n_batch;
if (input->dims->size == 2) {
max_time = 1;
n_batch = input->dims->data[0];
} else {
max_time = input->dims->data[0];
n_batch = input->dims->data[1];
}
const int n_cell = input_to_output_weights->dims->data[0];
const int n_output = recurrent_to_output_weights->dims->data[1];
const int32_t input_zp = input->params.zero_point;
const int32_t output_state_zp = output_state->params.zero_point;
const int output_batch_leading_dim =
output->dims->data[output->dims->size - 1];
const int input_step = n_batch * n_input;
const int output_step = n_batch * output_batch_leading_dim;
for (int t = 0; t < max_time; t++) {
const int t_rel = t;
int8_t* output_ptr = GetTensorData<int8_t>(output) + t_rel * output_step;
const int8_t* input_ptr = GetTensorData<int8_t>(input) + t_rel * input_step;
lstm_eval::LstmStepInteger8x8_8(
input_ptr, input_zp,
GetTensorData<int8_t>(input_to_input_weights),
integer_lstm_param->effective_input_to_input_scale_a,
integer_lstm_param->effective_input_to_input_scale_b,
GetTensorData<int8_t>(input_to_forget_weights),
integer_lstm_param->effective_input_to_forget_scale_a,
integer_lstm_param->effective_input_to_forget_scale_b,
GetTensorData<int8_t>(input_to_cell_weights),
integer_lstm_param->effective_input_to_cell_scale_a,
integer_lstm_param->effective_input_to_cell_scale_b,
GetTensorData<int8_t>(input_to_output_weights),
integer_lstm_param->effective_input_to_output_scale_a,
integer_lstm_param->effective_input_to_output_scale_b,
GetTensorData<int8_t>(recurrent_to_input_weights),
integer_lstm_param->effective_recurrent_to_input_scale_a,
integer_lstm_param->effective_recurrent_to_input_scale_b,
GetTensorData<int8_t>(recurrent_to_forget_weights),
integer_lstm_param->effective_recurrent_to_forget_scale_a,
integer_lstm_param->effective_recurrent_to_forget_scale_b,
GetTensorData<int8_t>(recurrent_to_cell_weights),
integer_lstm_param->effective_recurrent_to_cell_scale_a,
integer_lstm_param->effective_recurrent_to_cell_scale_b,
GetTensorData<int8_t>(recurrent_to_output_weights),
integer_lstm_param->effective_recurrent_to_output_scale_a,
integer_lstm_param->effective_recurrent_to_output_scale_b,
GetTensorData<int8_t>(cell_to_input_weights),
integer_lstm_param->effective_cell_to_input_scale_a,
integer_lstm_param->effective_cell_to_input_scale_b,
GetTensorData<int8_t>(cell_to_forget_weights),
integer_lstm_param->effective_cell_to_forget_scale_a,
integer_lstm_param->effective_cell_to_forget_scale_b,
GetTensorData<int8_t>(cell_to_output_weights),
integer_lstm_param->effective_cell_to_output_scale_a,
integer_lstm_param->effective_cell_to_output_scale_b,
GetTensorData<int8_t>(projection_weights),
integer_lstm_param->effective_proj_scale_a,
integer_lstm_param->effective_proj_scale_b,
GetTensorData<int16_t>(input_layer_norm_coefficients),
integer_lstm_param->layer_norm_input_scale_a,
integer_lstm_param->layer_norm_input_scale_b,
GetTensorData<int16_t>(forget_layer_norm_coefficients),
integer_lstm_param->layer_norm_forget_scale_a,
integer_lstm_param->layer_norm_forget_scale_b,
GetTensorData<int16_t>(cell_layer_norm_coefficients),
integer_lstm_param->layer_norm_cell_scale_a,
integer_lstm_param->layer_norm_cell_scale_b,
GetTensorData<int16_t>(output_layer_norm_coefficients),
integer_lstm_param->layer_norm_output_scale_a,
integer_lstm_param->layer_norm_output_scale_b,
GetTensorData<int32_t>(input_gate_bias),
GetTensorData<int32_t>(forget_gate_bias),
GetTensorData<int32_t>(cell_gate_bias),
GetTensorData<int32_t>(output_gate_bias),
GetTensorData<int32_t>(projection_bias),
params, integer_lstm_param->intermediate_scale_a,
integer_lstm_param->intermediate_scale_b,
integer_lstm_param->intermediate_zp,
integer_lstm_param->quantized_cell_clip,
integer_lstm_param->quantized_proj_clip, n_batch, n_cell, n_input,
n_output, output_batch_leading_dim, GetTensorData<int8_t>(output_state),
output_state_zp, GetTensorData<int16_t>(cell_state), output_ptr,
GetTensorData<int8_t>(scratch0), GetTensorData<int8_t>(scratch1),
GetTensorData<int16_t>(scratch2), GetTensorData<int16_t>(scratch3),
GetTensorData<int16_t>(scratch4), GetTensorData<int16_t>(scratch5),
GetTensorData<int16_t>(scratch6), GetTensorData<int16_t>(scratch7));
}
return kTfLiteOk;
}
}
}
}
} | #include "tensorflow/lite/kernels/lstm_eval.h"
#include <stdint.h>
#include <stdlib.h>
#include <algorithm>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
namespace tflite {
namespace {
template <typename T>
bool ArrayEq(const T* result, const T* expected_result, int size) {
for (int i = 0; i < size; ++i) {
if (result[i] != expected_result[i]) {
return false;
}
}
return true;
}
template <typename T>
bool ArrayFloatNear(const T* result, const T* expected_result, int size,
double threshold) {
for (int i = 0; i < size; ++i) {
if (std::abs(result[i] - expected_result[i]) > threshold) {
return false;
}
}
return true;
}
class BaseLstmParam {
public:
TfLiteTensor* Geti2i() {
PackWeightToTensor(&i2i_tensor_, i2i_, i2i_size_);
i2i_tensor_.data.int8 = i2i_.data();
return &i2i_tensor_;
}
TfLiteTensor* Geti2f() {
PackWeightToTensor(&i2f_tensor_, i2f_, i2f_size_);
i2f_tensor_.data.int8 = i2f_.data();
return &i2f_tensor_;
}
TfLiteTensor* Geti2c() {
PackWeightToTensor(&i2c_tensor_, i2c_, i2c_size_);
i2c_tensor_.data.int8 = i2c_.data();
return &i2c_tensor_;
}
TfLiteTensor* Geti2o() {
PackWeightToTensor(&i2o_tensor_, i2o_, i2o_size_);
i2o_tensor_.data.int8 = i2o_.data();
return &i2o_tensor_;
}
TfLiteTensor* Getr2i() {
PackWeightToTensor(&r2i_tensor_, r2i_, r2i_size_);
r2i_tensor_.data.int8 = r2i_.data();
return &r2i_tensor_;
}
TfLiteTensor* Getr2f() {
PackWeightToTensor(&r2f_tensor_, r2f_, r2f_size_);
r2f_tensor_.data.int8 = r2f_.data();
return &r2f_tensor_;
}
TfLiteTensor* Getr2c() {
PackWeightToTensor(&r2c_tensor_, r2c_, r2c_size_);
r2c_tensor_.data.int8 = r2c_.data();
return &r2c_tensor_;
}
TfLiteTensor* Getr2o() {
PackWeightToTensor(&r2o_tensor_, r2o_, r2o_size_);
r2o_tensor_.data.int8 = r2o_.data();
return &r2o_tensor_;
}
TfLiteTensor* GetProjection() {
PackWeightToTensor(&projection_tensor_, projection_, projection_size_);
projection_tensor_.data.int8 = projection_.data();
return &projection_tensor_;
}
~BaseLstmParam() {
TfLiteIntArrayFree(input_tensor_.dims);
TfLiteIntArrayFree(i2i_tensor_.dims);
TfLiteIntArrayFree(i2f_tensor_.dims);
TfLiteIntArrayFree(i2c_tensor_.dims);
TfLiteIntArrayFree(i2o_tensor_.dims);
TfLiteIntArrayFree(r2i_tensor_.dims);
TfLiteIntArrayFree(r2f_tensor_.dims);
TfLiteIntArrayFree(r2c_tensor_.dims);
TfLiteIntArrayFree(r2o_tensor_.dims);
TfLiteIntArrayFree(layer_norm_input_tensor_.dims);
TfLiteIntArrayFree(layer_norm_forget_tensor_.dims);
TfLiteIntArrayFree(layer_norm_cell_tensor_.dims);
TfLiteIntArrayFree(layer_norm_output_tensor_.dims);
TfLiteIntArrayFree(input_gate_bias_tensor_.dims);
TfLiteIntArrayFree(forget_gate_bias_tensor_.dims);
TfLiteIntArrayFree(cell_gate_bias_tensor_.dims);
TfLiteIntArrayFree(output_gate_bias_tensor_.dims);
TfLiteIntArrayFree(projection_tensor_.dims);
TfLiteIntArrayFree(projection_bias_tensor_.dims);
TfLiteIntArrayFree(activation_tensor_.dims);
TfLiteIntArrayFree(cell_tensor_.dims);
TfLiteIntArrayFree(output_tensor_.dims);
}
protected:
template <typename T>
void PackWeightToTensor(TfLiteTensor* tensor, std::vector<T>& data,
std::vector<int32_t> dims) {
if (data.empty()) {
int total = 1;
for (int i = 0; i < dims.size(); ++i) {
total *= dims[i];
}
for (int i = 0; i < total; ++i) {
data.push_back(0);
}
}
tensor->dims = TfLiteIntArrayCreate(dims.size());
for (int i = 0; i < dims.size(); ++i) {
tensor->dims->data[i] = dims[i];
}
}
const int n_batch_ = 2;
const int n_input_ = 18;
const int n_cell_ = 10;
const int n_output_ = 6;
std::vector<int32_t> input_size_ = {n_batch_, n_input_};
TfLiteTensor input_tensor_;
std::vector<int8_t> i2i_ = {
18, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
1, 2, 3, 4, 5, 6, 5, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 0,
8, 2, 3, 4, 3, 6, 1, -2, 3, 4, 5, 6, 1, 2, 3, -4, 5, 6,
1, 2, 3, 4, 5, 6, 1, 2, 3, 4, -5, 6, 1, 7, 3, 4, -5, 6,
8, 2, 3, 4, 5, 6, 3, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
1, -2, 2, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 8, 5, -6,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
1, 2, 3, 4, 3, 6, 1, 2, 6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
8, 2, 3, 4, 5, 6, 7, 2, 3, 4, 5, 6, 1, 2, 3, 14, 5, 6,
1, 2, 3, -4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
};
std::vector<int32_t> i2i_size_ = {n_cell_, n_input_};
TfLiteTensor i2i_tensor_;
std::vector<int8_t> i2f_ = {
1, 2, 3, 4, 5, 6, 5, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 0,
8, 2, 3, 4, 3, 6, 1, -2, 3, 4, 5, 6, 1, 2, 3, -4, 5, 6,
1, 2, 3, 4, 5, 6, 1, 2, 3, 4, -5, 6, 1, 7, 3, 4, -5, 6,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
1, 2, 3, 4, 3, 6, 1, 2, 6, 4, 5, 6, 11, 2, 3, 4, -5, 6,
8, 2, 3, 4, 5, 6, 7, 2, 3, 4, 5, -6, 1, 2, 3, 14, 5, 6,
1, 2, 3, -4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
18, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
8, 2, 3, 4, 5, 6, 3, 2, 3, 4, 5, 6, 13, 2, 3, 4, 5, 6,
1, -2, 2, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 8, 5, -6,
};
std::vector<int32_t> i2f_size_ = {n_cell_, n_input_};
TfLiteTensor i2f_tensor_;
std::vector<int8_t> i2c_ = {
1, 2, 3, 4, 5, 6, 5, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 0,
1, 2, 3, 4, 3, 6, 1, 2, 6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
8, 2, 3, 4, 5, 6, 7, 2, 3, 4, 5, 16, 1, 2, 3, 14, 5, 6,
1, 2, 3, -4, 5, 6, 1, 2, 3, 4, 5, 6, 7, 2, 3, 4, 5, 6,
18, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
8, 2, 3, 4, 5, 6, 3, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
1, -2, 2, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 8, 5, -6,
8, 2, 3, 4, 3, 6, 1, -2, 3, 4, 5, 6, 1, 2, 3, -4, 5, 6,
1, 2, 3, 4, 5, 6, 1, 2, 3, 4, -5, 6, 1, 7, 3, 4, -5, 6,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
};
std::vector<int32_t> i2c_size_ = {n_cell_, n_input_};
TfLiteTensor i2c_tensor_;
std::vector<int8_t> i2o_ = {
1, 2, 3, 4, 5, 6, 1, 2, 3, 4, -5, 6, 1, 7, 3, 4, -5, 6,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, -1, 2, 3, 4, 5, 6,
1, 2, 3, 4, 3, 6, 1, 2, 6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
8, 2, 3, 4, 5, 6, 7, 2, 3, 4, 5, 6, 1, 2, 3, 14, 5, 6,
18, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, -6, 1, 2, 3, 4, 5, 6,
8, 2, 3, 4, 5, 6, 3, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
1, 2, 3, 4, 5, 6, 5, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 0,
8, 2, 3, 4, 3, 6, 1, -2, 3, 4, 5, 6, 1, 2, 3, -4, 5, 6,
1, 2, 3, -4, 5, 6, 1, 2, 3, 4, 5, 6, -1, 2, 3, 4, 5, 6,
1, -2, 2, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 8, 5, -6,
};
std::vector<int32_t> i2o_size_ = {n_cell_, n_input_};
TfLiteTensor i2o_tensor_;
std::vector<int8_t> r2i_ = {
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4,
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4,
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
};
std::vector<int32_t> r2i_size_ = {n_cell_, n_output_};
TfLiteTensor r2i_tensor_;
std::vector<int8_t> r2f_ = {
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4,
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4,
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
};
std::vector<int32_t> r2f_size_ = {n_cell_, n_output_};
TfLiteTensor r2f_tensor_;
std::vector<int8_t> r2c_ = {
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4,
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4,
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
};
std::vector<int32_t> r2c_size_ = {n_cell_, n_output_};
TfLiteTensor r2c_tensor_;
std::vector<int8_t> r2o_ = {
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4,
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4,
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
};
std::vector<int32_t> r2o_size_ = {n_cell_, n_output_};
TfLiteTensor r2o_tensor_;
std::vector<int32_t> layer_norm_input_size_ = {n_cell_};
TfLiteTensor layer_norm_input_tensor_;
TfLiteTensor layer_norm_forget_tensor_;
std::vector<int32_t> layer_norm_forget_size_ = {n_cell_};
std::vector<int32_t> layer_norm_cell_size_ = {n_cell_};
TfLiteTensor layer_norm_cell_tensor_;
std::vector<int32_t> layer_norm_output_size_ = {n_cell_};
TfLiteTensor layer_norm_output_tensor_;
std::vector<int32_t> input_gate_bias_size_ = {n_cell_};
TfLiteTensor input_gate_bias_tensor_;
std::vector<int32_t> forget_gate_bias_size_ = {n_cell_};
TfLiteTensor forget_gate_bias_tensor_;
std::vector<int32_t> cell_gate_bias_size_ = {n_cell_};
TfLiteTensor cell_gate_bias_tensor_;
std::vector<int32_t> output_gate_bias_size_ = {n_cell_};
TfLiteTensor output_gate_bias_tensor_;
std::vector<int8_t> projection_ = {
8, 2, 3, 4, 5, 6, 1, 2, 3, 4,
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4,
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
};
std::vector<int32_t> projection_size_ = {n_cell_, n_output_};
TfLiteTensor projection_tensor_;
std::vector<int32_t> projection_bias_ = {
16, 4, 5, 6, 1, 1
};
std::vector<int32_t> projection_bias_size_ = {n_output_};
TfLiteTensor projection_bias_tensor_;
std::vector<int32_t> activation_size_ = {n_batch_, n_output_};
TfLiteTensor activation_tensor_;
std::vector<int32_t> cell_size_ = {n_batch_, n_cell_};
TfLiteTensor cell_tensor_;
std::vector<int32_t> output_size_ = {n_batch_, n_output_};
TfLiteTensor output_tensor_;
};
class QuantizedLstmParam : public BaseLstmParam {
public:
TfLiteTensor* GetInput() {
PackWeightToTensor(&input_tensor_, input_, input_size_);
input_tensor_.data.int8 = input_.data();
return &input_tensor_;
}
TfLiteTensor* GetInputLayerNorm() {
PackWeightToTensor(&layer_norm_input_tensor_, layer_norm_input_,
layer_norm_input_size_);
layer_norm_input_tensor_.data.i16 = layer_norm_input_.data();
return &layer_norm_input_tensor_;
}
TfLiteTensor* GetForgetLayerNorm() {
PackWeightToTensor(&layer_norm_forget_tensor_, layer_norm_forget_,
layer_norm_forget_size_);
layer_norm_forget_tensor_.data.i16 = layer_norm_forget_.data();
return &layer_norm_forget_tensor_;
}
TfLiteTensor* GetCellLayerNorm() {
PackWeightToTensor(&layer_norm_cell_tensor_, layer_norm_cell_,
layer_norm_cell_size_);
layer_norm_cell_tensor_.data.i16 = layer_norm_cell_.data();
return &layer_norm_cell_tensor_;
}
TfLiteTensor* GetOutputLayerNorm() {
PackWeightToTensor(&layer_norm_output_tensor_, layer_norm_output_,
layer_norm_output_size_);
layer_norm_output_tensor_.data.i16 = layer_norm_output_.data();
return &layer_norm_output_tensor_;
}
TfLiteTensor* GetInputBias() {
PackWeightToTensor(&input_gate_bias_tensor_, input_gate_bias_,
input_gate_bias_size_);
input_gate_bias_tensor_.data.i32 = input_gate_bias_.data();
return &input_gate_bias_tensor_;
}
TfLiteTensor* GetForgetBias() {
PackWeightToTensor(&forget_gate_bias_tensor_, forget_gate_bias_,
forget_gate_bias_size_);
forget_gate_bias_tensor_.data.i32 = forget_gate_bias_.data();
return &forget_gate_bias_tensor_;
}
TfLiteTensor* GetCellBias() {
PackWeightToTensor(&cell_gate_bias_tensor_, cell_gate_bias_,
cell_gate_bias_size_);
cell_gate_bias_tensor_.data.i32 = cell_gate_bias_.data();
return &cell_gate_bias_tensor_;
}
TfLiteTensor* GetOutputBias() {
PackWeightToTensor(&output_gate_bias_tensor_, output_gate_bias_,
output_gate_bias_size_);
output_gate_bias_tensor_.data.i32 = output_gate_bias_.data();
return &output_gate_bias_tensor_;
}
TfLiteTensor* GetProjectionBias() {
PackWeightToTensor(&projection_bias_tensor_, projection_bias_,
projection_bias_size_);
projection_bias_tensor_.data.i32 = projection_bias_.data();
return &projection_bias_tensor_;
}
ops::builtin::lstm_eval::IntegerLstmParameter* GetQuantParam() {
integer_lstm_param_.effective_input_to_input_scale_a = 1808677632;
integer_lstm_param_.effective_input_to_input_scale_b = -1;
integer_lstm_param_.effective_recurrent_to_input_scale_a = 1078887680;
integer_lstm_param_.effective_recurrent_to_input_scale_b = -1;
integer_lstm_param_.effective_cell_to_input_scale_a = 1073741824;
integer_lstm_param_.effective_cell_to_input_scale_b = 1;
integer_lstm_param_.effective_input_to_forget_scale_a = 1845996800;
integer_lstm_param_.effective_input_to_forget_scale_b = -3;
integer_lstm_param_.effective_recurrent_to_forget_scale_a = 1477412736;
integer_lstm_param_.effective_recurrent_to_forget_scale_b = -2;
integer_lstm_param_.effective_cell_to_forget_scale_a = 1073741824;
integer_lstm_param_.effective_cell_to_forget_scale_b = 1;
integer_lstm_param_.effective_input_to_cell_scale_a = 1648385408;
integer_lstm_param_.effective_input_to_cell_scale_b = -2;
integer_lstm_param_.effective_recurrent_to_cell_scale_a = 1185544192,
integer_lstm_param_.effective_recurrent_to_cell_scale_b = -1;
integer_lstm_param_.effective_input_to_output_scale_a = 1328153600;
integer_lstm_param_.effective_input_to_output_scale_b = -1;
integer_lstm_param_.effective_recurrent_to_output_scale_a = 1479582592;
integer_lstm_param_.effective_recurrent_to_output_scale_b = -1;
integer_lstm_param_.effective_cell_to_output_scale_a = 1073741824,
integer_lstm_param_.effective_cell_to_output_scale_b = 1;
integer_lstm_param_.effective_proj_scale_a = 1105682560;
integer_lstm_param_.effective_proj_scale_b = -8;
integer_lstm_param_.effective_hidden_scale_a = 0;
integer_lstm_param_.effective_hidden_scale_b = 0;
integer_lstm_param_.layer_norm_input_scale_a = 2011617664;
integer_lstm_param_.layer_norm_input_scale_b = -11;
integer_lstm_param_.layer_norm_forget_scale_a = 1968024960;
integer_lstm_param_.layer_norm_forget_scale_b = -13;
integer_lstm_param_.layer_norm_cell_scale_a = 1097334528,
integer_lstm_param_.layer_norm_cell_scale_b = -12;
integer_lstm_param_.layer_norm_output_scale_a = 1837163008;
integer_lstm_param_.layer_norm_output_scale_b = -12;
integer_lstm_param_.quantized_cell_clip = 20480;
integer_lstm_param_.quantized_proj_clip = 0;
integer_lstm_param_.cell_scale = -11;
integer_lstm_param_.input_variance_guard = 1;
integer_lstm_param_.forget_variance_guard = 2;
integer_lstm_param_.cell_variance_guard = 2;
integer_lstm_param_.output_variance_guard = 1;
integer_lstm_param_.hidden_zp = 0;
integer_lstm_param_.input_to_forget_effective_bias.reset(
new int32_t[n_cell_]);
integer_lstm_param_.recurrent_to_forget_effective_bias.reset(
new int32_t[n_cell_]);
integer_lstm_param_.input_to_cell_effective_bias.reset(
new int32_t[n_cell_]);
integer_lstm_param_.recurrent_to_cell_effective_bias.reset(
new int32_t[n_cell_]);
integer_lstm_param_.input_to_output_effective_bias.reset(
new int32_t[n_cell_]);
integer_lstm_param_.recurrent_to_output_effective_bias.reset(
new int32_t[n_cell_]);
integer_lstm_param_.input_to_input_effective_bias.reset(
new int32_t[n_cell_]);
integer_lstm_param_.recurrent_to_input_effective_bias.reset(
new int32_t[n_cell_]);
integer_lstm_param_.projection_effective_bias.reset(new int32_t[n_output_]);
std::fill_n(integer_lstm_param_.input_to_forget_effective_bias.get(),
n_cell_, 152);
std::fill_n(integer_lstm_param_.recurrent_to_forget_effective_bias.get(),
n_cell_, 315);
std::fill_n(integer_lstm_param_.input_to_cell_effective_bias.get(), n_cell_,
165);
std::fill_n(integer_lstm_param_.recurrent_to_cell_effective_bias.get(),
n_cell_, 1165);
std::fill_n(integer_lstm_param_.input_to_output_effective_bias.get(),
n_cell_, 159);
std::fill_n(integer_lstm_param_.recurrent_to_output_effective_bias.get(),
n_cell_, 915);
std::fill_n(integer_lstm_param_.input_to_input_effective_bias.get(),
n_cell_, -15);
std::fill_n(integer_lstm_param_.recurrent_to_input_effective_bias.get(),
n_cell_, 315);
std::fill_n(integer_lstm_param_.projection_effective_bias.get(), n_output_,
115);
return &integer_lstm_param_;
}
TfLiteTensor* GetScratch0() {
PackWeightToTensor(&scratch0_tensor_, scratch0_, scratch0_size_);
scratch0_tensor_.data.i16 = scratch0_.data();
return &scratch0_tensor_;
}
TfLiteTensor* GetScratch1() {
PackWeightToTensor(&scratch1_tensor_, scratch1_, scratch1_size_);
scratch1_tensor_.data.i16 = scratch1_.data();
return &scratch1_tensor_;
}
TfLiteTensor* GetScratch2() {
PackWeightToTensor(&scratch2_tensor_, scratch2_, scratch2_size_);
scratch2_tensor_.data.i16 = scratch2_.data();
return &scratch2_tensor_;
}
TfLiteTensor* GetScratch3() {
PackWeightToTensor(&scratch3_tensor_, scratch3_, scratch3_size_);
scratch3_tensor_.data.i16 = scratch3_.data();
return &scratch3_tensor_;
}
TfLiteTensor* GetScratch4() {
PackWeightToTensor(&scratch4_tensor_, scratch4_, scratch4_size_);
scratch4_tensor_.data.int8 = scratch4_.data();
return &scratch4_tensor_;
}
TfLiteTensor* GetScratch5() {
PackWeightToTensor(&scratch5_tensor_, scratch5_, scratch5_size_);
scratch5_tensor_.data.i32 = scratch5_.data();
return &scratch5_tensor_;
}
TfLiteTensor* GetActivation() {
PackWeightToTensor(&activation_tensor_, activation_, activation_size_);
activation_tensor_.data.int8 = activation_.data();
activation_tensor_.params.zero_point = 50;
return &activation_tensor_;
}
TfLiteTensor* GetOutput() {
PackWeightToTensor(&output_tensor_, output_, output_size_);
output_tensor_.data.int8 = output_.data();
return &output_tensor_;
}
TfLiteTensor* GetCell() {
PackWeightToTensor(&cell_tensor_, cell_, cell_size_);
cell_tensor_.data.i16 = cell_.data();
return &cell_tensor_;
}
~QuantizedLstmParam() {
TfLiteIntArrayFree(scratch0_tensor_.dims);
TfLiteIntArrayFree(scratch1_tensor_.dims);
TfLiteIntArrayFree(scratch2_tensor_.dims);
TfLiteIntArrayFree(scratch3_tensor_.dims);
TfLiteIntArrayFree(scratch4_tensor_.dims);
TfLiteIntArrayFree(scratch5_tensor_.dims);
}
private:
std::vector<int8_t> input_ = {
8, 2, 3, 4, 5, 6, 1, -2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
1, 2, -3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
};
std::vector<int16_t> layer_norm_input_ = {8, 2, 3, 4, 5, 6, 1, 2, 3, 4};
std::vector<int16_t> layer_norm_forget_ = {
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
};
std::vector<int16_t> layer_norm_cell_ = {
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
};
std::vector<int16_t> layer_norm_output_ = {
16, 4, 5, 6, 1, 1, 3, 4, -5, 6,
};
std::vector<int32_t> input_gate_bias_ = {
16, 4, 5, 6, 1, 1, 3, 4, -5, 6,
};
std::vector<int32_t> forget_gate_bias_ = {
16, 4, 5, 6, 1, 1, 3, 4, -5, 6,
};
std::vector<int32_t> cell_gate_bias_ = {
16, 4, 5, 6, 1, 1, 3, 4, -5, 6,
};
std::vector<int32_t> output_gate_bias_ = {
16, 4, 5, 6, 1, 1, 3, 4, -5, 6,
};
std::vector<int8_t> activation_;
std::vector<int16_t> cell_ = {
16, 4, 5, 6, 1, 1, 3, 4, -5, 6,
1, 14, 5, 6, 1, 1, 3, 4, -5, 6,
};
std::vector<int8_t> output_ = {
1, 1, 3, 4, -5, 6,
1, 4, 3, 4, -5, 6,
};
ops::builtin::lstm_eval::IntegerLstmParameter integer_lstm_param_;
std::vector<int16_t> scratch0_;
std::vector<int32_t> scratch0_size_ = {n_batch_, n_cell_};
TfLiteTensor scratch0_tensor_;
std::vector<int16_t> scratch1_;
std::vector<int32_t> scratch1_size_ = {n_batch_, n_cell_};
TfLiteTensor scratch1_tensor_;
std::vector<int16_t> scratch2_;
std::vector<int32_t> scratch2_size_ = {n_batch_, n_cell_};
TfLiteTensor scratch2_tensor_;
std::vector<int16_t> scratch3_;
std::vector<int32_t> scratch3_size_ = {n_batch_, n_cell_};
TfLiteTensor scratch3_tensor_;
std::vector<int8_t> scratch4_;
std::vector<int32_t> scratch4_size_ = {n_batch_, n_cell_};
TfLiteTensor scratch4_tensor_;
std::vector<int32_t> scratch5_;
std::vector<int32_t> scratch5_size_ = {n_batch_, n_cell_};
TfLiteTensor scratch5_tensor_;
};
void TestOneFullyQuantizedLSTM() {
CpuBackendContext context;
QuantizedLstmParam one_parameter;
auto activation = one_parameter.GetActivation();
auto output = one_parameter.GetOutput();
auto cell = one_parameter.GetCell();
auto param = one_parameter.GetQuantParam();
ops::builtin::lstm_eval::EvalInteger8x8_16(
one_parameter.GetInput(), one_parameter.Geti2i(), one_parameter.Geti2f(),
one_parameter.Geti2c(), one_parameter.Geti2o(), one_parameter.Getr2i(),
one_parameter.Getr2f(), one_parameter.Getr2c(), one_parameter.Getr2o(),
nullptr, nullptr, nullptr, one_parameter.GetInputLayerNorm(),
one_parameter.GetForgetLayerNorm(), one_parameter.GetCellLayerNorm(),
one_parameter.GetOutputLayerNorm(), one_parameter.GetInputBias(),
one_parameter.GetForgetBias(), one_parameter.GetCellBias(),
one_parameter.GetOutputBias(), one_parameter.GetProjection(),
one_parameter.GetProjectionBias(), nullptr, true,
true, param, activation, cell, output,
one_parameter.GetScratch0(), one_parameter.GetScratch1(),
one_parameter.GetScratch2(), one_parameter.GetScratch3(),
one_parameter.GetScratch4(), one_parameter.GetScratch5(), &context);
const std::vector<int16_t> expected_cell = {
7, 1, 3, 2, 0, 1, 0, 2, -2, 4, 1, 6, 4, 3, 0, 1, 0, 2, -2, 4,
};
const std::vector<int8_t> expected_activation = {
50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50,
};
EXPECT_TRUE(ArrayEq(cell->data.i16, expected_cell.data(), 20));
EXPECT_TRUE(ArrayEq(activation->data.int8, expected_activation.data(), 12));
EXPECT_TRUE(ArrayEq(output->data.int8, expected_activation.data(), 12));
}
TEST(TestOneFullyQuantizedLSTM, TestOneFullyQuantizedLSTM) {
TestOneFullyQuantizedLSTM();
}
class HybridLstmParam : public BaseLstmParam {
public:
TfLiteTensor* GetFloatOutput() {
PackWeightToTensor(&output_tensor_, output_float_, output_size_);
output_tensor_.data.f = output_float_.data();
return &output_tensor_;
}
const TfLiteLSTMParams GetLSTMParam() {
return {kTfLiteActRelu, 0, 0, kTfLiteLSTMFullKernel, true};
}
TfLiteTensor* GetScratchBuffer() {
PackWeightToTensor(&scratch_buffer_tensor_, scratch_buffer_,
scratch_buffer_size_);
scratch_buffer_tensor_.data.f = scratch_buffer_.data();
return &scratch_buffer_tensor_;
}
TfLiteTensor* GetInputScalingFactors() {
PackWeightToTensor(&input_sf_tensor_, input_sf_,
quantization_extra_scratch_buffer_sizes_);
input_sf_tensor_.data.f = input_sf_.data();
return &input_sf_tensor_;
}
TfLiteTensor* GetAuxInputScalingFactors() {
PackWeightToTensor(&aux_input_sf_tensor_, aux_input_sf_,
quantization_extra_scratch_buffer_sizes_);
aux_input_sf_tensor_.data.f = aux_input_sf_.data();
return &aux_input_sf_tensor_;
}
TfLiteTensor* GetOutputStateScalingFactors() {
PackWeightToTensor(&output_state_sf_tensor_, output_state_sf_,
quantization_extra_scratch_buffer_sizes_);
output_state_sf_tensor_.data.f = output_state_sf_.data();
return &output_state_sf_tensor_;
}
TfLiteTensor* GetProdScalingFactors() {
PackWeightToTensor(&prod_scaling_factors_tensor_, prod_scaling_factors_,
quantization_extra_scratch_buffer_sizes_);
prod_scaling_factors_tensor_.data.f = prod_scaling_factors_.data();
return &prod_scaling_factors_tensor_;
}
TfLiteTensor* GetInputQuantized() {
PackWeightToTensor(&input_quantized_tensor_, input_quantized_, input_size_);
input_quantized_tensor_.data.int8 = input_quantized_.data();
return &input_quantized_tensor_;
}
TfLiteTensor* GetActivationStateQuantized() {
PackWeightToTensor(&activation_quantized_tensor_, activation_quantized_,
activation_size_);
activation_quantized_tensor_.data.int8 = activation_quantized_.data();
return &activation_quantized_tensor_;
}
TfLiteTensor* GetCellStateQuantized() {
PackWeightToTensor(&cell_quantized_tensor_, cell_quantized_, cell_size_);
cell_quantized_tensor_.data.int8 = cell_quantized_.data();
return &cell_quantized_tensor_;
}
TfLiteTensor* GetInputZeroPoints() {
PackWeightToTensor(&input_zp_tensor_, input_zp_,
quantization_extra_scratch_buffer_sizes_);
input_zp_tensor_.data.i32 = input_zp_.data();
return &input_zp_tensor_;
}
TfLiteTensor* GetAuxInputZeroPoints() {
PackWeightToTensor(&aux_input_zp_tensor_, aux_input_zp_,
quantization_extra_scratch_buffer_sizes_);
aux_input_zp_tensor_.data.i32 = aux_input_zp_.data();
return &aux_input_zp_tensor_;
}
TfLiteTensor* GetOutputStateZeroPoints() {
PackWeightToTensor(&output_state_zp_tensor_, output_state_zp_,
quantization_extra_scratch_buffer_sizes_);
output_state_zp_tensor_.data.i32 = output_state_zp_.data();
return &output_state_zp_tensor_;
}
TfLiteTensor* GetRowSums() {
PackWeightToTensor(&row_sums_tensor_, row_sums_, row_sums_size_);
row_sums_tensor_.data.i32 = row_sums_.data();
return &row_sums_tensor_;
}
TfLiteTensor* GetFloatInput() {
PackWeightToTensor(&input_tensor_, input_float_, input_size_);
input_tensor_.data.f = input_float_.data();
return &input_tensor_;
}
TfLiteTensor* GetActivation() {
PackWeightToTensor(&activation_tensor_, activation_state_,
activation_size_);
activation_tensor_.data.f = activation_state_.data();
return &activation_tensor_;
}
TfLiteTensor* GetCell() {
PackWeightToTensor(&cell_tensor_, cell_state_, cell_size_);
cell_tensor_.data.f = cell_state_.data();
return &cell_tensor_;
}
TfLiteTensor* GetAccumScratchBuffer() {
PackWeightToTensor(&accum_scratch_tensor_, accum_scratch_,
accum_scratch_size_);
accum_scratch_tensor_.data.i32 = accum_scratch_.data();
return &accum_scratch_tensor_;
}
TfLiteTensor* GetInputBias() {
PackWeightToTensor(&input_gate_bias_tensor_, input_float_bias_,
input_gate_bias_size_);
input_gate_bias_tensor_.data.f = input_float_bias_.data();
return &input_gate_bias_tensor_;
}
TfLiteTensor* GetForgetBias() {
PackWeightToTensor(&forget_gate_bias_tensor_, forget_float_bias_,
forget_gate_bias_size_);
forget_gate_bias_tensor_.data.f = forget_float_bias_.data();
return &forget_gate_bias_tensor_;
}
TfLiteTensor* GetCellBias() {
PackWeightToTensor(&cell_gate_bias_tensor_, cell_float_bias_,
cell_gate_bias_size_);
cell_gate_bias_tensor_.data.f = cell_float_bias_.data();
return &cell_gate_bias_tensor_;
}
TfLiteTensor* GetOutputBias() {
PackWeightToTensor(&output_gate_bias_tensor_, output_float_bias_,
output_gate_bias_size_);
output_gate_bias_tensor_.data.f = output_float_bias_.data();
return &output_gate_bias_tensor_;
}
TfLiteTensor* GetProjectionBias() {
PackWeightToTensor(&projection_bias_tensor_, projection_float_bias_,
projection_bias_size_);
projection_bias_tensor_.data.f = projection_float_bias_.data();
return &projection_bias_tensor_;
}
int GetNumRowSums() { return n_row_sums_; }
TfLiteTensor* GetInputLayerNorm() {
PackWeightToTensor(&layer_norm_input_tensor_, layer_norm_float_input_,
layer_norm_input_size_);
layer_norm_input_tensor_.data.f = layer_norm_float_input_.data();
return &layer_norm_input_tensor_;
}
TfLiteTensor* GetForgetLayerNorm() {
PackWeightToTensor(&layer_norm_forget_tensor_, layer_norm_float_forget_,
layer_norm_forget_size_);
layer_norm_forget_tensor_.data.f = layer_norm_float_forget_.data();
return &layer_norm_forget_tensor_;
}
TfLiteTensor* GetCellLayerNorm() {
PackWeightToTensor(&layer_norm_cell_tensor_, layer_norm_float_cell_,
layer_norm_cell_size_);
layer_norm_cell_tensor_.data.f = layer_norm_float_cell_.data();
return &layer_norm_cell_tensor_;
}
TfLiteTensor* GetOutputLayerNorm() {
PackWeightToTensor(&layer_norm_output_tensor_, layer_norm_float_output_,
layer_norm_output_size_);
layer_norm_output_tensor_.data.f = layer_norm_float_output_.data();
return &layer_norm_output_tensor_;
}
static TfLiteTensor* addScale(TfLiteTensor* t, float scale) {
t->params.scale = scale;
return t;
}
~HybridLstmParam() {
TfLiteIntArrayFree(scratch_buffer_tensor_.dims);
TfLiteIntArrayFree(accum_scratch_tensor_.dims);
TfLiteIntArrayFree(input_sf_tensor_.dims);
TfLiteIntArrayFree(aux_input_sf_tensor_.dims);
TfLiteIntArrayFree(output_state_sf_tensor_.dims);
TfLiteIntArrayFree(prod_scaling_factors_tensor_.dims);
TfLiteIntArrayFree(input_quantized_tensor_.dims);
TfLiteIntArrayFree(activation_quantized_tensor_.dims);
TfLiteIntArrayFree(cell_quantized_tensor_.dims);
TfLiteIntArrayFree(input_zp_tensor_.dims);
TfLiteIntArrayFree(aux_input_zp_tensor_.dims);
TfLiteIntArrayFree(output_state_zp_tensor_.dims);
TfLiteIntArrayFree(row_sums_tensor_.dims);
}
private:
const int n_row_sums_ = 9;
std::vector<float> scratch_buffer_;
std::vector<int32_t> scratch_buffer_size_ = {n_batch_, n_cell_ * 4};
TfLiteTensor scratch_buffer_tensor_;
std::vector<int32_t> quantization_extra_scratch_buffer_sizes_ = {n_batch_};
std::vector<float> input_sf_;
TfLiteTensor input_sf_tensor_;
std::vector<float> aux_input_sf_;
TfLiteTensor aux_input_sf_tensor_;
std::vector<float> output_state_sf_;
TfLiteTensor output_state_sf_tensor_;
std::vector<float> prod_scaling_factors_;
TfLiteTensor prod_scaling_factors_tensor_;
std::vector<int32_t> input_zp_;
TfLiteTensor input_zp_tensor_;
std::vector<int32_t> aux_input_zp_;
TfLiteTensor aux_input_zp_tensor_;
std::vector<int32_t> output_state_zp_;
TfLiteTensor output_state_zp_tensor_;
std::vector<int8_t> input_quantized_;
TfLiteTensor input_quantized_tensor_;
std::vector<int8_t> activation_quantized_;
TfLiteTensor activation_quantized_tensor_;
std::vector<int8_t> cell_quantized_;
TfLiteTensor cell_quantized_tensor_;
std::vector<float> cell_state_ = {
16, 4, 5, 6, 1, 1, 3, 4, -5, 6, 1, 14, 5, 6, 1, 1, 3, 4, -5, 6,
};
std::vector<int32_t> row_sums_;
std::vector<int32_t> row_sums_size_ = {n_row_sums_, n_cell_};
TfLiteTensor row_sums_tensor_;
std::vector<float> activation_state_;
std::vector<int32_t> accum_scratch_;
std::vector<int32_t> accum_scratch_size_ = {n_cell_, n_batch_};
TfLiteTensor accum_scratch_tensor_;
std::vector<float> output_float_ = {
1, 1, 3, 4, -5, 6,
1, 4, 3, 4, -5, 6,
};
std::vector<float> input_float_ = {
6.06, 7.66, 7.10, 9.32, 3.85, 0.33, 7.15, 1.56, 9.54,
5.30, 4.53, 0.19, 1.83, 4.60, 0.84, 5.08, 4.37, 9.92,
4.08, 3.79, 1.17, 8.99, 0.14, 9.22, 3.18, 2.97, 7.53,
0.59, 9.89, 9.13, 7.68, 0.63, 2.15, 4.31, 7.20, 4.09,
};
std::vector<float> input_float_bias_ = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
};
std::vector<float> forget_float_bias_ = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
};
std::vector<float> cell_float_bias_ = {
-11, -7, -4, -5, -1, -1, -2, -3.5, -3, -4,
};
std::vector<float> output_float_bias_ = {0.16, 0.4, 0.5, 0.6, 0.1,
0.1, 0.3, 0.4, -0.5, 0.6};
std::vector<float> projection_float_bias_ = {0, 0, 0, 0, 0, 0};
std::vector<float> layer_norm_float_input_ = {8, 2, 3, 4, 5, 6, 1, -2, 3, 4};
std::vector<float> layer_norm_float_forget_ = {
0.1, 0.2, 0.3, 0.4, 0.7, 0.3, 0.4, -0.5, 0.6, 0.3,
};
std::vector<float> layer_norm_float_cell_ = {
0.6, 0.4, 0.5, 0.6, 0.1, 0.2, 0.3, 0.4, -0.5, 0.6,
};
std::vector<float> layer_norm_float_output_ = {
0.6, 0.4, 0.5, 0.6, 0.1, 0.2, 0.3, 0.4, -0.5, 0.6,
};
};
void TestOneHybridAsymmLSTM() {
CpuBackendContext context;
HybridLstmParam one_parameter;
auto activation = one_parameter.GetActivation();
auto output = one_parameter.GetFloatOutput();
auto cell = one_parameter.GetCell();
auto param = one_parameter.GetLSTMParam();
bool compute_row_sums = true;
constexpr float kDefaultScale = 18.0;
ops::builtin::lstm_eval::EvalHybrid(
one_parameter.GetFloatInput(),
HybridLstmParam::addScale(one_parameter.Geti2i(), kDefaultScale), nullptr,
HybridLstmParam::addScale(one_parameter.Geti2f(), kDefaultScale), nullptr,
HybridLstmParam::addScale(one_parameter.Geti2c(), kDefaultScale), nullptr,
HybridLstmParam::addScale(one_parameter.Geti2o(), kDefaultScale), nullptr,
HybridLstmParam::addScale(one_parameter.Getr2i(), kDefaultScale), nullptr,
HybridLstmParam::addScale(one_parameter.Getr2f(), kDefaultScale), nullptr,
HybridLstmParam::addScale(one_parameter.Getr2c(), kDefaultScale), nullptr,
HybridLstmParam::addScale(one_parameter.Getr2o(), kDefaultScale), nullptr,
nullptr,
nullptr,
nullptr, one_parameter.GetInputLayerNorm(),
one_parameter.GetForgetLayerNorm(), one_parameter.GetCellLayerNorm(),
one_parameter.GetOutputLayerNorm(),
nullptr,
nullptr,
nullptr,
nullptr,
nullptr, one_parameter.GetInputBias(),
one_parameter.GetForgetBias(), one_parameter.GetCellBias(),
one_parameter.GetOutputBias(),
HybridLstmParam::addScale(one_parameter.GetProjection(), 1.0), nullptr,
one_parameter.GetProjectionBias(), ¶m,
true,
true,
0, one_parameter.GetScratchBuffer(),
one_parameter.GetInputScalingFactors(),
one_parameter.GetAuxInputScalingFactors(),
one_parameter.GetOutputStateScalingFactors(),
one_parameter.GetProdScalingFactors(),
nullptr, one_parameter.GetInputQuantized(),
nullptr,
one_parameter.GetActivationStateQuantized(),
one_parameter.GetCellStateQuantized(), activation, cell,
one_parameter.GetAccumScratchBuffer(), output,
one_parameter.GetInputZeroPoints(), one_parameter.GetAuxInputZeroPoints(),
one_parameter.GetOutputStateZeroPoints(), one_parameter.GetRowSums(),
one_parameter.GetNumRowSums(), &compute_row_sums,
false,
false,
false,
false, &context);
const std::vector<float> expected_cell = {
7.83134, 1.96158, 2.18285, 3.28739, 0.483214,
0.618206, 1.21539, 1.4052, -3.17735, 2.24296,
0.498944, 6.91104, 1.74126, 3.28993, 0.580477,
0.489936, 1.2527, 1.50157, -3.71849, 2.76743,
};
const std::vector<float> expected_activation = {
53.0403, 59.3623, 24.8493, 53.0403, 59.3623, 24.8493,
36.7559, 57.5202, 29.7217, 36.7559, 57.5202, 29.7217,
};
EXPECT_TRUE(ArrayFloatNear(cell->data.f, expected_cell.data(), 20, 1e-2));
EXPECT_TRUE(
ArrayFloatNear(activation->data.f, expected_activation.data(), 12, 2e-4));
EXPECT_TRUE(
ArrayFloatNear(output->data.f, expected_activation.data(), 12, 2e-4));
}
TEST(TestOneHybridAsymmLSTM, TestOneHybridAsymmLSTM) {
TestOneHybridAsymmLSTM();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/lstm_eval.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/lstm_eval_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fadd1566-bcfe-4089-be0e-cc0ef7650b6e | cpp | tensorflow/tensorflow | elementwise | tensorflow/lite/delegates/gpu/gl/kernels/elementwise.cc | tensorflow/lite/delegates/gpu/cl/kernels/elementwise_test.cc | #include "tensorflow/lite/delegates/gpu/gl/kernels/elementwise.h"
#include <any>
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/strings/substitute.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
#include "tensorflow/lite/delegates/gpu/gl/object.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class ElementwiseOneArgument : public NodeShader {
public:
explicit ElementwiseOneArgument(OperationType operation_type)
: operation_type_(operation_type) {}
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
std::string source;
switch (operation_type_) {
case OperationType::ABS:
source = "value_0 = abs(value_0);";
break;
case OperationType::COS:
source = "value_0 = cos(value_0);";
break;
case OperationType::COPY:
source = "value_0 = value_0;";
break;
case OperationType::ELU:
source = R"(
value_0.x = value_0.x < 0.0 ? exp(value_0.x) - 1.0 : value_0.x;
value_0.y = value_0.y < 0.0 ? exp(value_0.y) - 1.0 : value_0.y;
value_0.z = value_0.z < 0.0 ? exp(value_0.z) - 1.0 : value_0.z;
value_0.w = value_0.w < 0.0 ? exp(value_0.w) - 1.0 : value_0.w;
)";
break;
case OperationType::EXP:
source = "value_0 = exp(value_0);";
break;
case tflite::gpu::OperationType::FLOOR:
source = "value_0 = floor(value_0);";
break;
case tflite::gpu::OperationType::GELU:
source =
"value_0 = 0.5 * value_0 * (1.0 + tanh(0.7978845608 * (value_0 + "
"0.044715 * value_0 * value_0 * value_0)));";
break;
case OperationType::HARD_SWISH:
source =
"value_0 *= clamp(value_0 / 6.0 + vec4(0.5), vec4(0.0), "
"vec4(1.0));";
break;
case OperationType::LOG:
source = R"(
const float nan = normalize(vec4(0, 0, 0, 0)).x;
value_0.x = value_0.x > 0.0 ? log(value_0.x) : nan;
value_0.y = value_0.y > 0.0 ? log(value_0.y) : nan;
value_0.z = value_0.z > 0.0 ? log(value_0.z) : nan;
value_0.w = value_0.w > 0.0 ? log(value_0.w) : nan;
)";
break;
case OperationType::NEG:
source = "value_0 = -(value_0);";
break;
case OperationType::RSQRT:
source = R"(
const float nan = normalize(vec4(0, 0, 0, 0)).x;
value_0.x = value_0.x > 0.0 ? 1.0 / sqrt(value_0.x) : nan;
value_0.y = value_0.y > 0.0 ? 1.0 / sqrt(value_0.y) : nan;
value_0.z = value_0.z > 0.0 ? 1.0 / sqrt(value_0.z) : nan;
value_0.w = value_0.w > 0.0 ? 1.0 / sqrt(value_0.w) : nan;
)";
break;
case OperationType::SIGMOID:
source = "value_0 = 1.0 / (1.0 + exp(-1.0 * value_0));";
break;
case OperationType::SIN:
source = "value_0 = sin(value_0);";
break;
case OperationType::SQRT:
source = R"(
const float nan = normalize(vec4(0, 0, 0, 0)).x;
value_0.x = value_0.x >= 0.0 ? sqrt(value_0.x) : nan;
value_0.y = value_0.y >= 0.0 ? sqrt(value_0.y) : nan;
value_0.z = value_0.z >= 0.0 ? sqrt(value_0.z) : nan;
value_0.w = value_0.w >= 0.0 ? sqrt(value_0.w) : nan;
)";
break;
case OperationType::SQUARE:
source = "value_0 = value_0 * value_0;";
break;
case OperationType::TANH:
source = "value_0 = tanh(value_0);";
break;
default:
return absl::InvalidArgumentError(
"Incorrect elementwise operation type.");
}
*generated_code = {
{},
{},
{},
uint3(),
uint3(),
source,
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
private:
OperationType operation_type_;
};
class ElementwiseTwoArguments : public NodeShader {
public:
explicit ElementwiseTwoArguments(OperationType operation_type)
: operation_type_(operation_type) {}
inline bool IsElementwiseSupported(const GenerationContext& ctx) const {
return ctx.input_shapes.size() == 2 &&
ctx.input_shapes[0] == ctx.input_shapes[1];
}
inline bool IsBroadcastSupported(const GenerationContext& ctx) const {
return ctx.input_shapes.size() == 2 && ctx.input_shapes[1][1] == 1 &&
ctx.input_shapes[1][2] == 1 &&
ctx.input_shapes[0][3] == ctx.input_shapes[1][3];
}
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
std::vector<Variable> parameters;
std::vector<std::pair<std::string, Object>> objects;
std::string argument0, argument1;
if (IsElementwiseSupported(ctx)) {
argument0 = "value_0";
argument1 = "value_1";
} else if (IsBroadcastSupported(ctx)) {
argument0 = "$input_data_0[gid.x, gid.y, gid.z]$";
argument1 = "$input_data_1[0, 0, gid.z]$";
} else {
const auto& attr =
std::any_cast<const ElementwiseAttributes&>(ctx.op_attr);
const auto* tensor =
std::get_if<Tensor<Linear, DataType::FLOAT32>>(&attr.param);
const auto* scalar = std::get_if<float>(&attr.param);
if (!tensor && !scalar) {
return absl::InvalidArgumentError(
"Couldn't read scalar of const vector data from the attributes.");
}
argument0 = "value_0";
if (tensor) {
argument1 = "$const_data[gid.z]$";
objects.push_back({"const_data", MakeReadonlyObject(tensor->data)});
} else {
argument1 = "vec4($const_data$)";
parameters.push_back({"const_data", *scalar});
}
if (attr.runtime_tensor_is_second) {
argument0 = argument1;
argument1 = "value_0";
}
}
std::string source;
switch (operation_type_) {
case OperationType::DIV: {
source = "value_0 = $0/$1;";
break;
}
case tflite::gpu::OperationType::FLOOR_DIV:
source = "value_0 = floor($0 / $1);";
break;
case tflite::gpu::OperationType::FLOOR_MOD:
source = "value_0 = $0 - floor($0 / $1) * $1;";
break;
case OperationType::MAXIMUM: {
source = "value_0 = max($0, $1);";
break;
}
case OperationType::MINIMUM: {
source = "value_0 = min($0, $1);";
break;
}
case OperationType::SQUARED_DIFF: {
source = "value_0 = ($0 - $1) * ($0 - $1);";
break;
}
case OperationType::SUB: {
source = "value_0 = $0 - $1;";
break;
}
case OperationType::POW: {
source = "value_0 = pow($0, $1);";
break;
}
default:
return absl::InvalidArgumentError(
"Incorrect elementwise with scalar operation type.");
}
source = absl::Substitute(source, argument0, argument1);
*generated_code = {
std::move(parameters),
std::move(objects),
{},
uint3(),
uint3(),
source,
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
private:
OperationType operation_type_;
};
}
std::unique_ptr<NodeShader> NewElementwiseNodeShader(
OperationType operation_type) {
switch (operation_type) {
case OperationType::ABS:
case OperationType::COS:
case OperationType::COPY:
case OperationType::ELU:
case OperationType::EXP:
case OperationType::FLOOR:
case OperationType::GELU:
case OperationType::HARD_SWISH:
case OperationType::LOG:
case OperationType::NEG:
case OperationType::RSQRT:
case OperationType::SIGMOID:
case OperationType::SIN:
case OperationType::SQRT:
case OperationType::SQUARE:
case OperationType::TANH:
return std::make_unique<ElementwiseOneArgument>(operation_type);
case OperationType::DIV:
case OperationType::FLOOR_DIV:
case OperationType::FLOOR_MOD:
case OperationType::MAXIMUM:
case OperationType::MINIMUM:
case OperationType::POW:
case OperationType::SQUARED_DIFF:
case OperationType::SUB:
return std::make_unique<ElementwiseTwoArguments>(operation_type);
default:
return nullptr;
}
}
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/elementwise_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, Abs) { ASSERT_OK(AbsTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Cos) { ASSERT_OK(CosTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Copy) { ASSERT_OK(CopyTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Elu) { ASSERT_OK(EluTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Exp) { ASSERT_OK(ExpTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Floor) { ASSERT_OK(FloorTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, FloorDiv) { ASSERT_OK(FloorDivTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, FloorMod) { ASSERT_OK(FloorModTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Gelu) { ASSERT_OK(GeluTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, HardSwish) { ASSERT_OK(HardSwishTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Log) { ASSERT_OK(LogTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Neg) { ASSERT_OK(NegTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Rsqrt) { ASSERT_OK(RsqrtTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Sigmoid) { ASSERT_OK(SigmoidTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Sin) { ASSERT_OK(SinTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Sqrt) { ASSERT_OK(SqrtTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Square) { ASSERT_OK(SquareTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Tanh) { ASSERT_OK(TanhTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Sub) { ASSERT_OK(SubTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, SquaredDiff) {
ASSERT_OK(SquaredDiffTest(&exec_env_));
}
TEST_F(OpenCLOperationTest, Div) { ASSERT_OK(DivTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Pow) { ASSERT_OK(PowTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Add) { ASSERT_OK(AddTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Maximum) { ASSERT_OK(MaximumTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, MaximumWithScalar) {
ASSERT_OK(MaximumWithScalarTest(&exec_env_));
}
TEST_F(OpenCLOperationTest, MaximumWithConstantLinearTensor) {
ASSERT_OK(MaximumWithConstantLinearTensorTest(&exec_env_));
}
TEST_F(OpenCLOperationTest, MaximumWithConstantHWCTensor) {
ASSERT_OK(MaximumWithConstantHWCTensorTest(&exec_env_));
}
TEST_F(OpenCLOperationTest, MaximumWithConstantHWCTensorBroadcastChannels) {
ASSERT_OK(MaximumWithConstantHWCTensorBroadcastChannelsTest(&exec_env_));
}
TEST_F(OpenCLOperationTest, Minimum) { ASSERT_OK(MinimumTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, MinimumWithScalar) {
ASSERT_OK(MinimumWithScalarTest(&exec_env_));
}
TEST_F(OpenCLOperationTest, Mul) { ASSERT_OK(MulTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, MulBroadcastHW) {
ASSERT_OK(MulBroadcastHWTest(&exec_env_));
}
TEST_F(OpenCLOperationTest, MulBroadcastChannels) {
ASSERT_OK(MulBroadcastChannelsTest(&exec_env_));
}
TEST_F(OpenCLOperationTest, SubWithScalarAtFirstPosition) {
ASSERT_OK(SubWithScalarAtFirstPositionTest(&exec_env_));
}
TEST_F(OpenCLOperationTest, Less) { ASSERT_OK(LessTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, LessEqual) { ASSERT_OK(LessEqualTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Greater) { ASSERT_OK(GreaterTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, GreaterEqual) {
ASSERT_OK(GreaterEqualTest(&exec_env_));
}
TEST_F(OpenCLOperationTest, Equal) { ASSERT_OK(EqualTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, NotEqual) { ASSERT_OK(NotEqualTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, CosBroadcast) {
ASSERT_OK(CosBroadcastTest(&exec_env_));
}
TEST_F(OpenCLOperationTest, MaximumScalarBroadcastInput) {
ASSERT_OK(MaximumScalarBroadcastInputTest(&exec_env_));
}
TEST_F(OpenCLOperationTest, MulLinearBroadcastInput) {
ASSERT_OK(MulLinearBroadcastInputTest(&exec_env_));
}
TEST_F(OpenCLOperationTest, MulBroadcastBothInputs) {
ASSERT_OK(MulBroadcastBothInputsTest(&exec_env_));
}
TEST_F(OpenCLOperationTest, LogicalAndTest) {
ASSERT_OK(LogicalAndTest(&exec_env_));
}
TEST_F(OpenCLOperationTest, LogicalAndWithConstantTest) {
ASSERT_OK(LogicalAndWithConstantTest(&exec_env_));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/kernels/elementwise.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/elementwise_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
af777e7f-ba41-48ea-b590-9d3c0803b996 | cpp | tensorflow/tensorflow | floor | tensorflow/lite/experimental/shlo/ops/floor.cc | tensorflow/lite/delegates/xnnpack/floor_test.cc | #include "tensorflow/lite/experimental/shlo/ops/floor.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Floor {
template <class T>
T operator()(T v) const {
return std::floor(v);
}
};
template <>
F16 Floor::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Floor::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
FloorOp Create(FloorOp::Attributes) { return {}; }
absl::Status Prepare(FloorOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(
CheckCtx("floor"), input, IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("floor"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(FloorOp& op, const Tensor& input, Tensor& output) {
Floor floor;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), floor, input,
output)
} else if (IsFloatTensor(input)) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
floor, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.floor: Unsupported tensor type.");
}
}; | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/unary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
TEST(Floor, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_FLOOR, xnnpack_delegate.get());
}
TEST(Floor, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, width, channels})
.Test(BuiltinOperator_FLOOR, xnnpack_delegate.get());
}
TEST(Floor, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, channels})
.Test(BuiltinOperator_FLOOR, xnnpack_delegate.get());
}
TEST(Floor, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
UnaryElementwiseTester().Shape({batch}).Test(BuiltinOperator_FLOOR,
xnnpack_delegate.get());
}
TEST(Floor, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_FLOOR, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/floor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/floor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5dcd7ac0-6bda-4243-81e7-3c128c6e2b43 | cpp | tensorflow/tensorflow | cast | tensorflow/lite/delegates/gpu/common/tasks/cast.cc | tensorflow/lite/delegates/gpu/cl/kernels/cast_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/cast.h"
#include <map>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/substitute.h"
#include "tensorflow/lite/delegates/gpu/common/task/util.h"
namespace tflite {
namespace gpu {
GPUOperation CreateCast(const OperationDef& definition,
const GpuInfo& gpu_info) {
ElementwiseDescriptor op_desc;
const std::string conversion =
GetTypeConversion(gpu_info, definition.src_tensors[0].GetDataType(),
definition.dst_tensors[0].GetDataType(), 4);
op_desc.code =
"out_value = " + absl::Substitute(conversion, "in_value") + ";\n";
return CreateGpuOperation(definition, std::move(op_desc));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/cast_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, Cast) {
auto status = CastTests(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, CastToBool) {
auto status = CastToBoolTests(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, CastFromBool) {
auto status = CastFromBoolTests(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/cast.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/cast_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c81b80c5-bbcc-42d8-8d3a-a785744ab124 | cpp | tensorflow/tensorflow | numeric_verify | tensorflow/lite/kernels/numeric_verify.cc | tensorflow/lite/kernels/numeric_verify_test.cc | #include <math.h>
#include <stddef.h>
#include <stdlib.h>
#include <algorithm>
#include <cstdint>
#include <numeric>
#include <vector>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/dequantize.h"
#include "tensorflow/lite/kernels/internal/optimized/neon_check.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/dequantize.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace numeric_verify {
static constexpr const char kToleranceStr[] = "tolerance";
static constexpr const char kLogIfFailedStr[] = "log_if_failed";
static constexpr const int kTemporaryDequantizedTensor = 0;
static constexpr const int kOutputTensor = 0;
struct OpContext {
OpContext(TfLiteContext* context, TfLiteNode* node) {
input = GetInput(context, node, 0);
ref = GetInput(context, node, 1);
output = GetOutput(context, node, 0);
}
const TfLiteTensor* input;
const TfLiteTensor* ref;
TfLiteTensor* output;
};
const int kTensorNotAllocated = -1;
struct OpData {
float tolerance;
bool float_input_initialized;
int cache_tensor_id = kTensorNotAllocated;
bool log_if_failed;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData();
op_data->float_input_initialized = false;
const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer);
const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap();
const float tolerance = m[kToleranceStr].AsFloat();
const bool log_if_failed = m[kLogIfFailedStr].AsBool();
op_data->tolerance = tolerance;
op_data->log_if_failed = log_if_failed;
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
OpContext op_context(context, node);
TF_LITE_ENSURE(context, op_context.input->type == kTfLiteUInt8 ||
op_context.input->type == kTfLiteInt8 ||
op_context.input->type == kTfLiteInt16 ||
op_context.input->type == kTfLiteFloat16);
TF_LITE_ENSURE(context, op_context.ref->type == kTfLiteFloat32);
if (op_data->cache_tensor_id == kTensorNotAllocated) {
TF_LITE_ENSURE_OK(
context, context->AddTensors(context, 1, &op_data->cache_tensor_id));
}
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(1);
node->temporaries->data[0] = op_data->cache_tensor_id;
TfLiteTensor* dequantized;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kTemporaryDequantizedTensor,
&dequantized));
dequantized->type = op_context.ref->type;
dequantized->allocation_type = kTfLiteDynamic;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(
context, dequantized,
TfLiteIntArrayCopy(op_context.input->dims)));
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kOutputTensor, &op_context.output));
op_context.output->type = kTfLiteFloat32;
op_context.output->allocation_type = kTfLiteArenaRwPersistent;
return context->ResizeTensor(context, op_context.output,
TfLiteIntArrayCopy(op_context.input->dims));
}
static int32_t GetQuantizedValue(const OpContext& op_context, int index) {
switch (op_context.input->type) {
case kTfLiteUInt8:
return GetTensorData<uint8_t>(op_context.input)[index];
case kTfLiteInt8:
return GetTensorData<int8_t>(op_context.input)[index];
case kTfLiteInt16:
return GetTensorData<int16_t>(op_context.input)[index];
default:
return 0;
}
}
template <builtin::dequantize::KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
OpContext op_context(context, node);
if (IsConstantTensor(op_context.input) && op_data->float_input_initialized) {
return kTfLiteOk;
}
TfLiteTensor* dequantized;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kTemporaryDequantizedTensor,
&dequantized));
auto status = builtin::dequantize::DequantizeImpl<kernel_type>(
context, node, op_context.input, dequantized);
if (status != kTfLiteOk) {
return status;
}
if (IsConstantTensor(op_context.input)) {
op_data->float_input_initialized = true;
}
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kOutputTensor, &op_context.output));
auto output_data = GetTensorData<float>(op_context.output);
const int n = NumElements(dequantized);
if (op_data->log_if_failed && op_data->tolerance >= 0.1) {
auto max_diff = op_data->tolerance * op_context.input->params.scale;
for (int i = 0; i < n; ++i) {
int32_t value = GetQuantizedValue(op_context, i);
float dequant = GetTensorData<float>(dequantized)[i];
float reference = GetTensorData<float>(op_context.ref)[i];
output_data[i] = dequant - reference;
float diff = std::abs(output_data[i]);
if (diff > max_diff) {
TF_LITE_KERNEL_LOG(
context,
"Mismatch: %f is quantized to %d with (%f, %d). "
"abs(%f - %f) = %f > %f (tolerance) range percentage %f.\n",
reference, value, op_context.input->params.scale,
op_context.input->params.zero_point, reference, dequant, diff,
max_diff, op_data->tolerance);
return kTfLiteError;
}
}
} else {
std::vector<double> diffs, temp;
diffs.reserve(n);
temp.reserve(n);
diffs.resize(n);
temp.resize(n);
for (int i = 0; i < n; ++i) {
float dequant = GetTensorData<float>(dequantized)[i];
float reference = GetTensorData<float>(op_context.ref)[i];
diffs[i] = static_cast<double>(dequant - reference);
output_data[i] = dequant - reference;
}
double mean =
std::accumulate(diffs.begin(), diffs.end(), 0.0) / diffs.size();
double max_diff = 0.0;
std::transform(diffs.begin(), diffs.end(), temp.begin(),
[mean, &max_diff](double x) {
max_diff = std::max(max_diff, std::abs(x));
return x - mean;
});
double sq_sum =
std::inner_product(temp.begin(), temp.end(), temp.begin(), 0.0);
double std = std::sqrt(sq_sum / diffs.size());
TF_LITE_KERNEL_LOG(
context,
"std: %f, mean: %f, max_diff: %f (scale: %f, zero_point: %d).\n", std,
mean, max_diff, op_context.input->params.scale,
op_context.input->params.zero_point);
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_NUMERIC_VERIFY_OPT() {
static TfLiteRegistration r = {
numeric_verify::Init, numeric_verify::Free, numeric_verify::Prepare,
numeric_verify::Eval<builtin::dequantize::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_NUMERIC_VERIFY_REF() {
static TfLiteRegistration r = {
numeric_verify::Init, numeric_verify::Free, numeric_verify::Prepare,
numeric_verify::Eval<builtin::dequantize::kReference>};
return &r;
}
TfLiteRegistration* Register_NUMERIC_VERIFY() {
#ifdef USE_NEON
return Register_NUMERIC_VERIFY_OPT();
#else
return Register_NUMERIC_VERIFY_REF();
#endif
}
}
}
} | #include <string.h>
#include <cstdint>
#include <initializer_list>
#include <vector>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "Eigen/Core"
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace custom {
TfLiteRegistration* Register_NUMERIC_VERIFY();
}
}
namespace {
class NumericVerifyOpModel : public SingleOpModel {
public:
NumericVerifyOpModel(TensorType type, std::initializer_list<int> shape,
float scale, int32_t zero_point, int version,
float tolerance = 5.0, bool log_if_failed = true) {
const TensorData input_tensor_data = {type, shape, 0, 0, scale, zero_point};
input_ = AddInput(input_tensor_data);
ref_ = AddInput({TensorType_FLOAT32, shape});
output_ = AddOutput({TensorType_FLOAT32, shape});
std::vector<uint8_t> custom_options(sizeof(float));
flexbuffers::Builder fbb;
fbb.Map([&]() {
fbb.Float("tolerance", tolerance);
fbb.Bool("log_if_failed", log_if_failed);
});
fbb.Finish();
SetCustomOp("NUMERIC_VERIFY", fbb.GetBuffer(),
ops::custom::Register_NUMERIC_VERIFY);
BuildInterpreter({GetShape(input_), GetShape(ref_)});
}
template <typename T>
void SetInputs(std::initializer_list<T> data,
std::initializer_list<float> ref_data) {
PopulateTensor(input_, data);
PopulateTensor(ref_, ref_data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
private:
int input_;
int ref_;
int output_;
};
TEST(NumericVerifyOpTest, Uint8) {
NumericVerifyOpModel m(TensorType_UINT8, {2, 5}, 0.5, 127, 1);
m.SetInputs<uint8_t>({0, 1, 2, 3, 4, 251, 252, 253, 254, 255},
{-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64});
EXPECT_EQ(m.Invoke(), kTfLiteOk);
}
TEST(NumericVerifyOpTest, Int8) {
NumericVerifyOpModel m(TensorType_INT8, {2, 5}, 0.5, -1, 2);
m.SetInputs<int8_t>({-128, -127, -126, -125, -124, 123, 124, 125, 126, 127},
{-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64});
EXPECT_EQ(m.Invoke(), kTfLiteOk);
}
TEST(NumericVerifyOpTest, Float16) {
NumericVerifyOpModel m(TensorType_FLOAT16, {2, 3}, 1.0f, 0, 3,
0.1f);
std::vector<Eigen::half> half{Eigen::half{-535.54f}, Eigen::half{-100.0f},
Eigen::half{-1.0f}, Eigen::half{0.f},
Eigen::half{1.0f}, Eigen::half{100.32f}};
m.PopulateTensor(0, 0, reinterpret_cast<TfLiteFloat16*>(half.data()),
reinterpret_cast<TfLiteFloat16*>(half.data()) + half.size());
m.PopulateTensor(1, {-535.54f, -100.0f, -1.0f, 0.f, 1.0f, 100.32f});
EXPECT_EQ(m.Invoke(), kTfLiteOk);
}
TEST(NumericVerifyOpTest, Int16) {
NumericVerifyOpModel m(TensorType_INT16, {2, 5}, 0.5, -1, 4);
m.SetInputs<int16_t>(
{-130, -127, -126, -125, -124, 123, 124, 125, 126, 130},
{-64.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 65.5});
EXPECT_EQ(m.Invoke(), kTfLiteOk);
}
TEST(NumericVerifyOpFailedTest, Int8) {
NumericVerifyOpModel m(TensorType_INT8, {2, 5}, 0.5, -1, 2);
m.SetInputs<int8_t>({-128, -127, -126, -125, -124, 0, 124, 125, 126, 127},
{-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64});
EXPECT_EQ(m.Invoke(), kTfLiteError);
}
TEST(NumericVerifyOpDebugModeTest, Int8) {
NumericVerifyOpModel m(TensorType_INT8, {2, 5}, 0.5, -1, 2, 5.0, false);
m.SetInputs<int8_t>({-128, -127, -126, -125, -124, 0, 124, 125, 126, 127},
{-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64});
EXPECT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear({0, 0, 0, 0, 0, -61.5, 0, 0, 0, 0})));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/numeric_verify.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/numeric_verify_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b5df6128-3b97-4693-9db3-5a10760bbdc1 | cpp | tensorflow/tensorflow | subgraph_test_util | tensorflow/lite/kernels/subgraph_test_util.cc | tensorflow/lite/kernels/subgraph_test_util_test.cc | #include "tensorflow/lite/kernels/subgraph_test_util.h"
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <random>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/kernels/builtin_op_kernels.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace random_int {
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 0);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
TfLiteTensor* output = GetOutput(context, node, 0);
TfLiteIntArray* outputSize = TfLiteIntArrayCreate(1);
outputSize->data[0] = 1;
return context->ResizeTensor(context, output, outputSize);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor& output = context->tensors[node->outputs->data[0]];
std::random_device rd;
std::uniform_int_distribution<int> dist(1, 32768);
output.data.i32[0] = dist(rd);
return kTfLiteOk;
}
}
TfLiteRegistration* Register_RANDOM_INT() {
static TfLiteRegistration r = {nullptr, nullptr, random_int::Prepare,
random_int::Eval};
return &r;
}
}
}
namespace subgraph_test_util {
namespace {
void AddTileNode(Subgraph* subgraph, int input0, int input1, int output) {
int node_index;
auto* tile_reg = ops::builtin::Register_TILE();
tile_reg->builtin_code = kTfLiteBuiltinTile;
subgraph->AddNodeWithParameters({input0, input1}, {output}, {}, nullptr, 0,
nullptr, tile_reg, &node_index);
}
void AddFlexNode(Subgraph* subgraph, int input_tensor, int output_tensor) {
auto prepare = [](TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor& input = context->tensors[node->inputs->data[0]];
TfLiteTensor& output = context->tensors[node->outputs->data[0]];
TfLiteArrayUniquePtr<int> shape =
BuildTfLiteArray(input.dims->size, input.dims->data);
return context->ResizeTensor(context, &output, shape.release());
};
auto eval = [](TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor& input = context->tensors[node->inputs->data[0]];
TfLiteTensor& output = context->tensors[node->outputs->data[0]];
memcpy(output.data.data, input.data.data, input.bytes);
return kTfLiteOk;
};
TfLiteRegistration reg = {nullptr, nullptr, prepare, eval};
reg.builtin_code = BuiltinOperator_CUSTOM;
reg.custom_name = "Flex";
int node_index;
ASSERT_EQ(
subgraph->AddNodeWithParameters({input_tensor}, {output_tensor}, {},
nullptr, 0, nullptr, ®, &node_index),
kTfLiteOk);
}
void AddReshapeNode(Subgraph* subgraph, int input0, int input1, int output) {
int node_index;
TfLiteReshapeParams* reshape_params = reinterpret_cast<TfLiteReshapeParams*>(
calloc(1, sizeof(TfLiteReshapeParams)));
auto* reshape_reg = ops::builtin::Register_RESHAPE();
reshape_reg->builtin_code = kTfLiteBuiltinReshape;
ASSERT_EQ(subgraph->AddNodeWithParameters({input0, input1}, {output}, {},
nullptr, 0, reshape_params,
reshape_reg, &node_index),
kTfLiteOk);
}
void AddOffsetAddNode(Subgraph* subgraph, int input0, int input1, int output) {
auto prepare = [](TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor& input0 = context->tensors[node->inputs->data[0]];
TfLiteTensor& output = context->tensors[node->outputs->data[0]];
TfLiteIntArray* shape = TfLiteIntArrayCopy(input0.dims);
return context->ResizeTensor(context, &output, shape);
};
auto invoke = [](TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor& input0 = context->tensors[node->inputs->data[0]];
const TfLiteTensor& input1 = context->tensors[node->inputs->data[1]];
TfLiteTensor& output = context->tensors[node->outputs->data[0]];
int num_elements = input0.dims->data[0];
const int kOffset = 1;
const int* i0 = static_cast<int*>(input0.data.data);
const int* i1 = static_cast<int*>(input1.data.data);
int* o = static_cast<int*>(output.data.data);
for (int i = 0; i < num_elements; ++i) {
int input0_pos = (i + kOffset) % num_elements;
o[i] = i0[input0_pos] + i1[i];
}
return kTfLiteOk;
};
int node_index;
TfLiteRegistration offset_add_reg = {nullptr, nullptr,
prepare, invoke};
offset_add_reg.builtin_code = BuiltinOperator_CUSTOM;
offset_add_reg.custom_name = "OffsetAdd";
offset_add_reg.inplace_operator = kTfLiteInplaceOpInput1Shared;
subgraph->AddNodeWithParameters({input0, input1}, {output}, {}, nullptr, 0,
nullptr, &offset_add_reg, &node_index);
}
void AddAddNode(Subgraph* subgraph, int input0, int input1, int output) {
int node_index;
TfLiteAddParams* add_params =
reinterpret_cast<TfLiteAddParams*>(calloc(1, sizeof(TfLiteAddParams)));
auto* add_reg = ops::builtin::Register_ADD();
add_reg->builtin_code = kTfLiteBuiltinAdd;
subgraph->AddNodeWithParameters({input0, input1}, {output}, {}, nullptr, 0,
add_params, add_reg, &node_index);
}
void AddDynamicUpdateSliceNode(Subgraph* subgraph, int input0, int input1,
int input2, int output) {
int node_index;
auto* reg = ops::builtin::Register_DYNAMIC_UPDATE_SLICE();
reg->builtin_code = kTfLiteBuiltinDynamicUpdateSlice;
subgraph->AddNodeWithParameters({input0, input1, input2}, {output}, {},
nullptr, 0, nullptr, reg, &node_index);
}
}
void Setup1DTensor(Subgraph* subgraph, int tensor_index, TfLiteType type) {
int dim = 1;
ASSERT_EQ(subgraph->SetTensorParametersReadWrite(tensor_index, type, "", 1,
&dim, {}, false),
kTfLiteOk);
}
void SetupTensor(Subgraph* subgraph, int tensor_index, TfLiteType type) {
ASSERT_EQ(subgraph->SetTensorParametersReadWrite(tensor_index, type, "", 0,
nullptr, {}, false),
kTfLiteOk);
}
SubgraphBuilder::~SubgraphBuilder() {
for (auto buffer : buffers_) {
free(buffer);
}
}
void SubgraphBuilder::BuildInplaceDynamicUpdateSliceSubgraph(
Subgraph& subgraph, bool multiple_consumers) {
enum {
kInput0,
kInput1,
kInput2,
kConstRhs,
kOutput,
kIntermediate0,
kIntermediate1,
kTensorCount
};
int first_new_tensor_index;
ASSERT_EQ(subgraph.AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph.SetInputs({kInput0, kInput1, kInput2}), kTfLiteOk);
ASSERT_EQ(subgraph.SetOutputs({kOutput}), kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
SetupTensor(&subgraph, i, kTfLiteInt32);
}
CreateConstantTensor(&subgraph, kConstRhs, {1}, {1});
AddAddNode(&subgraph, kInput0, kConstRhs, kIntermediate0);
AddDynamicUpdateSliceNode(&subgraph, kIntermediate0, kInput1, kInput2,
kIntermediate1);
AddAddNode(&subgraph, kIntermediate1,
multiple_consumers ? kIntermediate0 : kConstRhs, kOutput);
}
void SubgraphBuilder::BuildInputDynamicUpdateSliceSubgraph(Subgraph& subgraph) {
enum {
kInput0,
kInput1,
kInput2,
kConstRhs,
kOutput,
kIntermediate0,
kTensorCount
};
int first_new_tensor_index;
ASSERT_EQ(subgraph.AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph.SetInputs({kInput0, kInput1, kInput2}), kTfLiteOk);
ASSERT_EQ(subgraph.SetOutputs({kOutput}), kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
SetupTensor(&subgraph, i, kTfLiteInt32);
}
CreateConstantTensor(&subgraph, kConstRhs, {1}, {1});
AddDynamicUpdateSliceNode(&subgraph, kInput0, kInput1, kInput2,
kIntermediate0);
AddAddNode(&subgraph, kIntermediate0, kConstRhs, kOutput);
}
void SubgraphBuilder::BuildOutputNotConsumedSubgraph(Subgraph& subgraph) {
enum {
kInput0,
kInput1,
kInput2,
kOutput0,
kOutput1,
kConstRhs,
kTensorCount
};
int first_new_tensor_index;
ASSERT_EQ(subgraph.AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph.SetInputs({kInput0, kInput1, kInput2}), kTfLiteOk);
ASSERT_EQ(subgraph.SetOutputs({kOutput0, kOutput1, kConstRhs}), kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
Setup1DTensor(&subgraph, i, kTfLiteInt32);
}
CreateConstantTensor(&subgraph, kConstRhs, {1}, {1});
AddAddNode(&subgraph, kInput0, kInput1, kOutput0);
AddTileNode(&subgraph, kInput0, kInput2, kOutput1);
}
void SubgraphBuilder::BuildXNNPACKSubgraph(Subgraph* subgraph) {
enum {
kInputCounter,
kInputValue,
kOutputCounter,
kOutputValue,
kIntermediateTensor0,
kIntermediateTensor1,
kTensorCount
};
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInputCounter, kInputValue}), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kOutputCounter, kOutputValue}), kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
SetupTensor(subgraph, i, kTfLiteFloat32);
}
AddAddNode(subgraph, kInputCounter, kInputValue, kIntermediateTensor0);
AddAddNode(subgraph, kInputCounter, kInputValue, kIntermediateTensor1);
AddAddNode(subgraph, kIntermediateTensor0, kIntermediateTensor1,
kOutputCounter);
AddAddNode(subgraph, kIntermediateTensor0, kIntermediateTensor1,
kOutputValue);
}
void SubgraphBuilder::BuildInputIsOutputSubgraph(Subgraph* subgraph) {
enum {
kInputCounter,
kInputValue0,
kInputOutput,
kOutputCounter,
kOutputValue0,
kConstRhs,
kTensorCount
};
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInputCounter, kInputValue0, kInputOutput}),
kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kOutputCounter, kOutputValue0, kInputOutput}),
kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
SetupTensor(subgraph, i, kTfLiteInt32);
}
CreateConstantTensor(subgraph, kConstRhs, {1}, {1});
AddAddNode(subgraph, kInputCounter, kConstRhs, kOutputCounter);
AddAddNode(subgraph, kInputValue0, kInputOutput, kOutputValue0);
}
void SubgraphBuilder::BuildInputIsDifferentOutputSubgraph(Subgraph* subgraph) {
enum {
kInputCounter,
kInputValue,
kOutputCounter,
kOutputValue,
kTensorCount
};
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInputCounter, kInputValue}), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kInputValue, kOutputValue}), kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
SetupTensor(subgraph, i, kTfLiteInt32);
}
AddAddNode(subgraph, kInputCounter, kInputValue, kOutputValue);
}
void SubgraphBuilder::BuildFlexOutputSubgraph(Subgraph* subgraph) {
enum {
kInputCounter,
kInputValue,
kOutputCounter,
kOutputValue,
kConstRhs,
kIntermediateTensor,
kTensorCount
};
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInputCounter, kInputValue}), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kOutputCounter, kOutputValue}), kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
SetupTensor(subgraph, i, kTfLiteInt32);
}
CreateConstantTensor(subgraph, kConstRhs, {1}, {1});
AddAddNode(subgraph, kInputCounter, kConstRhs, kOutputCounter);
AddAddNode(subgraph, kConstRhs, kInputValue, kIntermediateTensor);
AddFlexNode(subgraph, kIntermediateTensor, kOutputValue);
}
void SubgraphBuilder::BuildCounterOnlySubgraph(Subgraph* subgraph) {
enum { kInputCounter, kOutputCounter, kConstRhs, kTensorCount };
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInputCounter}), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kOutputCounter}), kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
SetupTensor(subgraph, i, kTfLiteInt32);
}
CreateConstantTensor(subgraph, kConstRhs, {1}, {1});
AddAddNode(subgraph, kInputCounter, kConstRhs, kOutputCounter);
}
void SubgraphBuilder::BuildAddSubgraph(Subgraph* subgraph,
const TfLiteType operand_type) {
TfLiteAddParams* params =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
params->activation = kTfLiteActNone;
BuildBinaryOpSubgraph(subgraph, ops::builtin::Register_ADD, kTfLiteBuiltinAdd,
params, operand_type, operand_type, operand_type);
}
void SubgraphBuilder::BuildStablehloAddSubgraph(Subgraph* subgraph,
const TfLiteType operand_type) {
BuildBinaryOpSubgraph(subgraph, ops::builtin::Register_STABLEHLO_ADD,
kTfLiteBuiltinStablehloAdd, nullptr, operand_type,
operand_type, operand_type);
}
void SubgraphBuilder::BuildAllInplaceScenariosSubgraph(Subgraph* subgraph) {
enum {
kInputCounter,
kInputValue0,
kInputValue1,
kInputValue2,
kOutputCounter,
kOutputValue0,
kOutputValue1,
kOutputValue2,
kIntermediateTensor0,
kIntermediateTensor1,
kInputOutputTensor,
kTensorCount
};
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInputCounter, kInputValue0, kInputValue1,
kInputValue2, kInputOutputTensor}),
kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kOutputCounter, kOutputValue0, kOutputValue1,
kOutputValue2, kInputOutputTensor}),
kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
SetupTensor(subgraph, i, kTfLiteInt32);
}
CreateConstantTensor(subgraph, kInputOutputTensor, {1}, {1});
AddAddNode(subgraph, kInputCounter, kInputOutputTensor, kOutputCounter);
AddAddNode(subgraph, kInputValue0, kInputOutputTensor, kIntermediateTensor0);
AddAddNode(subgraph, kIntermediateTensor0, kInputOutputTensor, kOutputValue0);
AddTileNode(subgraph, kInputValue1, kInputCounter, kOutputValue1);
AddTileNode(subgraph, kInputValue2, kInputCounter, kIntermediateTensor1);
AddAddNode(subgraph, kIntermediateTensor1, kInputOutputTensor, kOutputValue2);
}
void SubgraphBuilder::BuildDynamicOpTriggersAllocationOfUnsedInputSubgraph(
Subgraph* subgraph) {
enum {
kInputCounter,
kInputValue0,
kInputValue1,
kOutputCounter,
kOutputValue0,
kOutputValue1,
kIntermediateTensor0,
kIntermediateTensor1,
kConstRhs,
kTensorCount
};
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInputCounter, kInputValue0, kInputValue1}),
kTfLiteOk);
ASSERT_EQ(
subgraph->SetOutputs({kOutputCounter, kOutputValue0, kOutputValue1}),
kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
SetupTensor(subgraph, i, kTfLiteInt32);
}
CreateConstantTensor(subgraph, kConstRhs, {1}, {1});
AddAddNode(subgraph, kInputCounter, kConstRhs, kOutputCounter);
AddTileNode(subgraph, kInputValue1, kInputCounter, kOutputValue1);
AddAddNode(subgraph, kInputValue0, kConstRhs, kIntermediateTensor0);
AddAddNode(subgraph, kIntermediateTensor0, kOutputValue1, kOutputValue0);
}
void SubgraphBuilder::BuildBinaryOpSubgraph(
Subgraph* subgraph, TfLiteRegistration* (*Register_OP)(),
const TfLiteBuiltinOperator builtin_code, void* const params,
const TfLiteType input1_type, const TfLiteType input2_type,
const TfLiteType output_type) {
enum { kInput1, kInput2, kOutput, kTensorCount };
ASSERT_NE(Register_OP, nullptr);
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInput1, kInput2}), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kOutput}), kTfLiteOk);
SetupTensor(subgraph, kInput1, input1_type);
SetupTensor(subgraph, kInput2, input2_type);
SetupTensor(subgraph, kOutput, output_type);
TfLiteRegistration* reg = Register_OP();
reg->builtin_code = builtin_code;
int node_index;
subgraph->AddNodeWithParameters({kInput1, kInput2}, {kOutput}, {}, nullptr, 0,
params, reg, &node_index);
}
void SubgraphBuilder::BuildMaximumSubgraph(Subgraph* subgraph,
const TfLiteType operand_type) {
BuildBinaryOpSubgraph(subgraph, ops::builtin::Register_MAXIMUM,
kTfLiteBuiltinMaximum, nullptr,
operand_type,
operand_type,
operand_type);
}
void SubgraphBuilder::BuildStablehloMaximumSubgraph(
Subgraph* subgraph, const TfLiteType operand_type) {
BuildBinaryOpSubgraph(subgraph, ops::builtin::Register_STABLEHLO_MAXIMUM,
kTfLiteBuiltinStablehloMaximum, nullptr, operand_type,
operand_type, operand_type);
}
void SubgraphBuilder::BuildMinimumSubgraph(Subgraph* subgraph,
const TfLiteType operand_type) {
BuildBinaryOpSubgraph(subgraph, ops::builtin::Register_MINIMUM,
kTfLiteBuiltinMinimum, nullptr,
operand_type,
operand_type,
operand_type);
}
void SubgraphBuilder::BuildStablehloMinimumSubgraph(
Subgraph* subgraph, const TfLiteType operand_type) {
BuildBinaryOpSubgraph(subgraph, ops::builtin::Register_STABLEHLO_MINIMUM,
kTfLiteBuiltinStablehloMinimum, nullptr, operand_type,
operand_type, operand_type);
}
void SubgraphBuilder::BuildLogicalOrSubgraph(Subgraph* subgraph) {
BuildBinaryOpSubgraph(subgraph, ops::builtin::Register_LOGICAL_OR,
kTfLiteBuiltinLogicalOr, nullptr,
kTfLiteBool,
kTfLiteBool,
kTfLiteBool);
}
void SubgraphBuilder::BuildLogicalAndSubgraph(Subgraph* subgraph) {
BuildBinaryOpSubgraph(subgraph, ops::builtin::Register_LOGICAL_AND,
kTfLiteBuiltinLogicalAnd, nullptr,
kTfLiteBool,
kTfLiteBool,
kTfLiteBool);
}
void SubgraphBuilder::BuildOutputIsSecondInputSubgraph(Subgraph* subgraph) {
const int kInput1 = 0;
const int kInput2 = 1;
const int kTensorCount = 2;
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInput1, kInput2}), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kInput2}), kTfLiteOk);
SetupTensor(subgraph, kInput1, kTfLiteInt32);
SetupTensor(subgraph, kInput2, kTfLiteInt32);
}
void SubgraphBuilder::BuildMulSubgraph(Subgraph* subgraph,
TfLiteType operand_type) {
TfLiteMulParams* params =
reinterpret_cast<TfLiteMulParams*>(malloc(sizeof(TfLiteMulParams)));
params->activation = kTfLiteActNone;
BuildBinaryOpSubgraph(subgraph, ops::builtin::Register_MUL, kTfLiteBuiltinMul,
params, operand_type,
operand_type,
operand_type);
}
void SubgraphBuilder::BuildStablehloMulSubgraph(Subgraph* subgraph,
const TfLiteType operand_type) {
BuildBinaryOpSubgraph(subgraph, ops::builtin::Register_STABLEHLO_MULTIPLY,
kTfLiteBuiltinStablehloMultiply, nullptr, operand_type,
operand_type, operand_type);
}
void SubgraphBuilder::BuildPadSubgraph(Subgraph* subgraph) {
const int kInput1 = 0;
const int kInput2 = 1;
const int kOutput = 2;
const int kTensorCount = 3;
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInput1, kInput2}), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kOutput}), kTfLiteOk);
SetupTensor(subgraph, kInput1, kTfLiteInt32);
SetupTensor(subgraph, kInput2, kTfLiteInt32);
SetupTensor(subgraph, kOutput, kTfLiteInt32);
TfLitePadParams* params =
reinterpret_cast<TfLitePadParams*>(malloc(sizeof(TfLitePadParams)));
auto* pad_reg = ops::builtin::Register_PAD();
pad_reg->builtin_code = kTfLiteBuiltinPad;
int node_index;
subgraph->AddNodeWithParameters({kInput1, kInput2}, {kOutput}, {}, nullptr, 0,
params, pad_reg, &node_index);
}
void SubgraphBuilder::BuildIfSubgraph(Subgraph* subgraph) {
const int kCondInput = 0;
const int kInput1 = 1;
const int kInput2 = 2;
const int kOutput = 3;
const int kTensorCount = 4;
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kCondInput, kInput1, kInput2}), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kOutput}), kTfLiteOk);
SetupTensor(subgraph, kCondInput, kTfLiteBool);
SetupTensor(subgraph, kInput1, kTfLiteInt32);
SetupTensor(subgraph, kInput2, kTfLiteInt32);
SetupTensor(subgraph, kOutput, kTfLiteInt32);
TfLiteIfParams* params =
reinterpret_cast<TfLiteIfParams*>(malloc(sizeof(TfLiteIfParams)));
params->then_subgraph_index = 1;
params->else_subgraph_index = 2;
auto* if_reg = ops::builtin::Register_IF();
if_reg->builtin_code = kTfLiteBuiltinIf;
int node_index;
subgraph->AddNodeWithParameters({kCondInput, kInput1, kInput2}, {kOutput}, {},
nullptr, 0, params, if_reg, &node_index);
}
void SubgraphBuilder::BuildCompositeSubgraph(Subgraph* subgraph,
const Subgraph* decomposition) {
const int decomposition_subgraph_index = decomposition->GetSubgraphIndex();
const auto& inputs = decomposition->inputs();
const auto& outputs = decomposition->outputs();
const int decomposition_tensor_count = inputs.size() + outputs.size();
int first_new_tensor_index;
ASSERT_EQ(
subgraph->AddTensors(decomposition_tensor_count, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs(inputs), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs(outputs), kTfLiteOk);
for (size_t i = 0; i < inputs.size(); ++i) {
const TfLiteTensor* src = decomposition->tensor(inputs[i]);
SetupTensor(subgraph, inputs[i], src->type);
}
for (size_t i = 0; i < outputs.size(); ++i) {
const TfLiteTensor* src = decomposition->tensor(outputs[i]);
SetupTensor(subgraph, outputs[i], src->type);
}
TfLiteStablehloCompositeParams* params =
reinterpret_cast<TfLiteStablehloCompositeParams*>(
malloc(sizeof(TfLiteStablehloCompositeParams)));
params->name = "test_composite";
params->subgraph_index = decomposition_subgraph_index;
params->attributes = nullptr;
params->attributes_size = 0;
params->version = 1;
auto* composite_reg = ops::builtin::Register_STABLEHLO_COMPOSITE();
composite_reg->builtin_code = kTfLiteBuiltinStablehloComposite;
int node_index;
subgraph->AddNodeWithParameters(inputs, outputs, {}, nullptr, 0, params,
composite_reg, &node_index);
}
void SubgraphBuilder::BuildLargeLessEqualCondSubgraph(Subgraph* subgraph,
int rhs, int num_inputs) {
const int kOutput = 0;
const int kConstRhs = 1;
const int kInput0 = 2;
int tensor_count = 3 + num_inputs;
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(tensor_count, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
std::vector<int> input_tensors(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
input_tensors[i] = kInput0 + i;
}
ASSERT_EQ(subgraph->SetInputs(input_tensors), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kOutput}), kTfLiteOk);
for (int i = 0; i < num_inputs; ++i) {
SetupTensor(subgraph, kInput0 + i, kTfLiteInt32);
}
SetupTensor(subgraph, kOutput, kTfLiteBool);
auto* le_reg = ops::builtin::Register_LESS_EQUAL();
le_reg->builtin_code = kTfLiteBuiltinLessEqual;
CreateConstantTensor(subgraph, kConstRhs, {1}, {rhs});
int node_index;
subgraph->AddNodeWithParameters({kInput0, kConstRhs}, {kOutput}, {}, nullptr,
0, nullptr, le_reg, &node_index);
}
void SubgraphBuilder::BuildOffsetAddSharing(Subgraph* subgraph) {
enum {
kInput0,
kInput1,
kIntermediateTensor0,
kIntermediateTensor1,
kIntermediateTensor2,
kOutput,
kConstRhs,
kTensorCount,
};
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInput0, kInput1}), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kOutput}), kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
SetupTensor(subgraph, i, kTfLiteInt32);
}
CreateConstantTensor<int>(subgraph, kConstRhs, {1}, {1});
AddAddNode(subgraph, kInput0, kInput1, kIntermediateTensor0);
AddAddNode(subgraph, kInput0, kInput1, kIntermediateTensor1);
AddOffsetAddNode(subgraph, kIntermediateTensor0, kIntermediateTensor1,
kIntermediateTensor2);
AddAddNode(subgraph, kIntermediateTensor2, kConstRhs, kOutput);
}
void SubgraphBuilder::BuildBroadcastingSubgraph(Subgraph* subgraph) {
enum {
kInput0,
kInput1,
kIntermediateTensor0,
kIntermediateTensor1,
kIntermediateTensor2,
kIntermediateTensor3,
kOutput,
kConstRhs,
kTensorCount,
};
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInput0, kInput1}), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kOutput}), kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
SetupTensor(subgraph, i, kTfLiteInt32);
}
CreateConstantTensor<int>(subgraph, kConstRhs, {1}, {1});
AddAddNode(subgraph, kInput0, kInput1, kIntermediateTensor0);
AddAddNode(subgraph, kIntermediateTensor0, kIntermediateTensor0,
kIntermediateTensor1);
AddAddNode(subgraph, kConstRhs, kIntermediateTensor1, kIntermediateTensor2);
AddAddNode(subgraph, kIntermediateTensor2, kConstRhs, kIntermediateTensor3);
AddAddNode(subgraph, kIntermediateTensor3, kConstRhs, kOutput);
}
void SubgraphBuilder::BuildInplaceOpSubgraph(Subgraph* subgraph) {
enum {
kInput0,
kInput1,
kIntermediateTensor0,
kIntermediateTensor1,
kOutput,
kConstRhs,
kTensorCount,
};
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInput0, kInput1}), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kOutput}), kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
SetupTensor(subgraph, i, kTfLiteInt32);
}
CreateConstantTensor<int>(subgraph, kConstRhs, {1}, {2});
AddAddNode(subgraph, kInput0, kInput1, kIntermediateTensor0);
AddReshapeNode(subgraph, kIntermediateTensor0, kConstRhs,
kIntermediateTensor1);
AddAddNode(subgraph, kIntermediateTensor1, kInput1, kOutput);
}
void SubgraphBuilder::BuildFloatLessCondSubgraph(Subgraph* subgraph,
float rhs) {
enum {
kInput1,
kInput2,
kOutput,
kConstRhs,
kTensorCount,
};
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInput1, kInput2}), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kOutput}), kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
SetupTensor(subgraph, i, kTfLiteInt32);
}
auto* le_reg = ops::builtin::Register_LESS();
le_reg->builtin_code = kTfLiteBuiltinLess;
CreateConstantTensor<float>(subgraph, kConstRhs, {1}, {rhs});
int node_index;
subgraph->AddNodeWithParameters({kInput1, kConstRhs}, {kOutput}, {}, nullptr,
0, nullptr, le_reg, &node_index);
}
void SubgraphBuilder::BuildLessEqualCondSubgraph(Subgraph* subgraph, int rhs) {
enum {
kInput1,
kInput2,
kOutput,
kConstRhs,
kTensorCount,
};
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInput1, kInput2}), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kOutput}), kTfLiteOk);
for (int i = 0; i < kTensorCount - 1; ++i) {
SetupTensor(subgraph, i, kTfLiteInt32);
}
SetupTensor(subgraph, kOutput, kTfLiteBool);
auto* le_reg = ops::builtin::Register_LESS_EQUAL();
le_reg->builtin_code = kTfLiteBuiltinLessEqual;
CreateConstantTensor(subgraph, kConstRhs, {1}, {rhs});
int node_index;
subgraph->AddNodeWithParameters({kInput1, kConstRhs}, {kOutput}, {}, nullptr,
0, nullptr, le_reg, &node_index);
}
void SubgraphBuilder::BuildLargeBodySubgraph(Subgraph* subgraph) {
enum {
kInputCounter,
kInputValue,
kOutputCounter,
kOutputValue,
kConstStep,
kConstSum,
kIntermediateTensor0,
kTensorCount,
};
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInputCounter, kInputValue}), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kOutputCounter, kOutputValue}), kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
SetupTensor(subgraph, i, kTfLiteInt32);
}
CreateConstantTensor(subgraph, kConstSum, {1}, {-1});
CreateConstantTensor(subgraph, kConstStep, {1}, {4});
int node_index;
TfLiteAddParams* params =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
params->activation = kTfLiteActNone;
params->pot_scale_int16 = false;
auto* add_reg0 = ops::builtin::Register_ADD();
add_reg0->builtin_code = kTfLiteBuiltinAdd;
auto* add_reg1 = ops::builtin::Register_ADD();
add_reg1->builtin_code = kTfLiteBuiltinAdd;
subgraph->AddNodeWithParameters({kInputCounter, kInputValue},
{kIntermediateTensor0}, {}, nullptr, 0,
params, add_reg0, &node_index);
auto* sum_reg = ops::builtin::Register_SUM();
sum_reg->builtin_code = kTfLiteBuiltinSum;
TfLiteReducerParams* sum_params = reinterpret_cast<TfLiteReducerParams*>(
calloc(1, sizeof(TfLiteReducerParams)));
subgraph->AddNodeWithParameters({kInputValue, kConstSum}, {kOutputCounter},
{}, nullptr, 0, sum_params, sum_reg,
&node_index);
params = reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
params->activation = kTfLiteActNone;
params->pot_scale_int16 = false;
subgraph->AddNodeWithParameters({kIntermediateTensor0, kConstStep},
{kOutputValue}, {}, nullptr, 0, params,
add_reg1, &node_index);
}
void SubgraphBuilder::BuildDynamicBodySubgraphWithAliases(Subgraph* subgraph) {
enum {
kInputCounter,
kInputValue0,
kInputValue1,
kInputValue2,
kInputValue3,
kOutputCounter,
kOutputValue0,
kOutputValue1,
kOutputValue2,
kOutputValue3,
kConstSum0,
kConstSum1,
kConstSum2,
kIntermediateTensor0,
kIntermediateTensor1,
kIntermediateTensor2,
kTensorCount,
};
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInputCounter, kInputValue0, kInputValue1,
kInputValue2, kInputValue3}),
kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kOutputCounter, kOutputValue0, kOutputValue1,
kOutputValue2, kOutputValue3}),
kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
SetupTensor(subgraph, i, kTfLiteInt32);
}
CreateConstantTensor(subgraph, kConstSum0, {1}, {1});
CreateConstantTensor(subgraph, kConstSum1, {1}, {2});
CreateConstantTensor(subgraph, kConstSum2, {1}, {3});
AddAddNode(subgraph, kInputCounter, kConstSum0, kOutputCounter);
AddAddNode(subgraph, kInputValue0, kInputValue1, kIntermediateTensor0);
AddAddNode(subgraph, kInputValue2, kInputValue3, kIntermediateTensor1);
AddAddNode(subgraph, kIntermediateTensor0, kIntermediateTensor1,
kIntermediateTensor2);
AddAddNode(subgraph, kIntermediateTensor2, kConstSum0, kOutputValue0);
AddAddNode(subgraph, kIntermediateTensor2, kConstSum1, kOutputValue1);
AddAddNode(subgraph, kIntermediateTensor2, kConstSum2, kOutputValue2);
AddAddNode(subgraph, kIntermediateTensor2, kConstSum2, kOutputValue3);
}
void SubgraphBuilder::BuildDynamicIncreasingSizeSubgraph(Subgraph* subgraph) {
enum {
kInputCounter,
kInputValue0,
kInputValue1,
kInputValue2,
kOutputCounter,
kOutputValue0,
kOutputValue1,
kOutputValue2,
kConstSum,
kIntermediateTensor0,
kIntermediateTensor1,
kIntermediateTensor2,
kTensorCount,
};
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs(
{kInputCounter, kInputValue0, kInputValue1, kInputValue2}),
kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs(
{kOutputCounter, kOutputValue0, kOutputValue1, kOutputValue2}),
kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
SetupTensor(subgraph, i, kTfLiteInt32);
}
CreateConstantTensor(subgraph, kConstSum, {1}, {1});
AddAddNode(subgraph, kInputCounter, kConstSum, kOutputCounter);
AddTileNode(subgraph, kInputValue0, kInputCounter, kIntermediateTensor0);
AddAddNode(subgraph, kInputValue1, kConstSum, kIntermediateTensor1);
AddAddNode(subgraph, kInputValue2, kConstSum, kIntermediateTensor2);
AddAddNode(subgraph, kIntermediateTensor0, kConstSum, kOutputValue0);
AddAddNode(subgraph, kIntermediateTensor1, kConstSum, kOutputValue1);
AddAddNode(subgraph, kIntermediateTensor2, kConstSum, kOutputValue2);
}
void SubgraphBuilder::BuildLargePadSubgraph(Subgraph* subgraph,
const std::vector<int> padding) {
enum {
kInputCounter,
kInputValue0,
kInputValue1,
kOutputCounter,
kOutputValue0,
kOutputValue1,
kConstPadding,
kConstSum,
kIntermediateTensor0,
kIntermediateTensor1,
kTensorCount,
};
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInputCounter, kInputValue0, kInputValue1}),
kTfLiteOk);
ASSERT_EQ(
subgraph->SetOutputs({kOutputCounter, kOutputValue0, kOutputValue1}),
kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
SetupTensor(subgraph, i, kTfLiteInt32);
}
CreateConstantTensor(subgraph, kConstSum, {1}, {1});
ASSERT_EQ(padding.size() % 2, 0);
int padding_dims = padding.size();
CreateConstantTensor(subgraph, kConstPadding, {1, padding_dims}, padding);
AddAddNode(subgraph, kInputCounter, kConstSum, kIntermediateTensor0);
AddAddNode(subgraph, kInputCounter, kInputValue1, kIntermediateTensor1);
AddAddNode(subgraph, kIntermediateTensor0, kConstSum, kOutputCounter);
AddAddNode(subgraph, kIntermediateTensor1, kConstSum, kOutputValue0);
int node_index;
auto* pad_reg = ops::builtin::Register_PAD();
pad_reg->builtin_code = kTfLiteBuiltinPad;
TfLitePadParams* pad_params =
reinterpret_cast<TfLitePadParams*>(calloc(1, sizeof(TfLitePadParams)));
subgraph->AddNodeWithParameters({kOutputValue0, kConstPadding},
{kOutputValue1}, {}, nullptr, 0, pad_params,
pad_reg, &node_index);
}
void SubgraphBuilder::BuildDeepBodySubgraph(Subgraph* subgraph) {
enum {
kInputCounter,
kInputValue,
kOutputCounter,
kOutputValue,
kConstStep,
kIntermediateTensor,
kTensorCount,
};
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInputCounter, kInputValue}), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kOutputCounter, kOutputValue}), kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
SetupTensor(subgraph, i, kTfLiteInt32);
}
CreateConstantTensor(subgraph, kConstStep, {1}, {1});
int node_index;
TfLiteAddParams* params =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
params->activation = kTfLiteActNone;
params->pot_scale_int16 = false;
auto* add_reg0 = ops::builtin::Register_ADD();
add_reg0->builtin_code = kTfLiteBuiltinAdd;
auto* add_reg1 = ops::builtin::Register_ADD();
add_reg1->builtin_code = kTfLiteBuiltinAdd;
auto* add_reg2 = ops::builtin::Register_ADD();
add_reg2->builtin_code = kTfLiteBuiltinAdd;
subgraph->AddNodeWithParameters({kInputCounter, kConstStep}, {kOutputCounter},
{}, nullptr, 0, params, add_reg0,
&node_index);
params = reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
params->activation = kTfLiteActNone;
params->pot_scale_int16 = false;
subgraph->AddNodeWithParameters({kOutputCounter, kInputValue},
{kIntermediateTensor}, {}, nullptr, 0, params,
add_reg1, &node_index);
params = reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
params->activation = kTfLiteActNone;
params->pot_scale_int16 = false;
subgraph->AddNodeWithParameters({kIntermediateTensor, kConstStep},
{kOutputValue}, {}, nullptr, 0, params,
add_reg2, &node_index);
}
void SubgraphBuilder::BuildAccumulateLoopBodySubgraph(Subgraph* subgraph) {
const int kInputCounter = 0;
const int kInputValue = 1;
const int kOutputCounter = 2;
const int kOutputValue = 3;
const int kConstStep = 4;
const int kTensorCount = 5;
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInputCounter, kInputValue}), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kOutputCounter, kOutputValue}), kTfLiteOk);
SetupTensor(subgraph, kInputCounter, kTfLiteInt32);
SetupTensor(subgraph, kInputValue, kTfLiteInt32);
SetupTensor(subgraph, kOutputCounter, kTfLiteInt32);
SetupTensor(subgraph, kOutputValue, kTfLiteInt32);
CreateConstantTensor(subgraph, kConstStep, {1}, {1});
int node_index;
TfLiteAddParams* params =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
params->activation = kTfLiteActNone;
params->pot_scale_int16 = false;
auto* add_reg = ops::builtin::Register_ADD();
add_reg->builtin_code = kTfLiteBuiltinAdd;
subgraph->AddNodeWithParameters({0, 4}, {2}, {}, nullptr, 0, params, add_reg,
&node_index);
params = reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
params->activation = kTfLiteActNone;
params->pot_scale_int16 = false;
subgraph->AddNodeWithParameters({2, 1}, {3}, {}, nullptr, 0, params, add_reg,
&node_index);
}
void SubgraphBuilder::BuildPadLoopBodySubgraph(
Subgraph* subgraph, const std::vector<int>& padding) {
const int kInputCounter = 0;
const int kInputValue = 1;
const int kOutputCounter = 2;
const int kOutputValue = 3;
const int kConstStep = 4;
const int kConstPadding = 5;
const int kTensorCount = 6;
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInputCounter, kInputValue}), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kOutputCounter, kOutputValue}), kTfLiteOk);
SetupTensor(subgraph, kInputCounter, kTfLiteInt32);
SetupTensor(subgraph, kInputValue, kTfLiteInt32);
SetupTensor(subgraph, kOutputCounter, kTfLiteInt32);
SetupTensor(subgraph, kOutputValue, kTfLiteInt32);
CreateConstantTensor(subgraph, kConstStep, {1}, {1});
ASSERT_EQ(padding.size() % 2, 0);
int padding_dims = padding.size();
CreateConstantTensor(subgraph, kConstPadding, {1, padding_dims}, padding);
int node_index;
TfLiteAddParams* add_params =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
add_params->activation = kTfLiteActNone;
auto* add_reg = ops::builtin::Register_ADD();
add_reg->builtin_code = kTfLiteBuiltinAdd;
subgraph->AddNodeWithParameters({kInputCounter, kConstStep}, {kOutputCounter},
{}, nullptr, 0, add_params, add_reg,
&node_index);
TfLitePadParams* pad_params =
reinterpret_cast<TfLitePadParams*>(malloc(sizeof(TfLiteAddParams)));
auto* pad_reg = ops::builtin::Register_PAD();
pad_reg->builtin_code = kTfLiteBuiltinPad;
subgraph->AddNodeWithParameters({kInputValue, kConstPadding}, {kOutputValue},
{}, nullptr, 0, pad_params, pad_reg,
&node_index);
}
void SubgraphBuilder::BuildOutputNotConsumedIfSubgraph(Subgraph* subgraph) {
enum {
kInput0,
kInput1,
kInput2,
kInput3,
kOutput0,
kOutput1,
kOutput2,
kTensorCount
};
int num_inputs = 4;
int num_outputs = 3;
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
std::vector<int> input_tensors(num_inputs);
std::vector<int> output_tensors(num_outputs);
for (int i = 0; i < num_inputs; ++i) {
input_tensors[i] = i;
}
for (int i = 0; i < num_outputs; ++i) {
output_tensors[i] = i + num_inputs;
}
ASSERT_EQ(subgraph->SetInputs(input_tensors), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs(output_tensors), kTfLiteOk);
SetupTensor(subgraph, input_tensors[0], kTfLiteBool);
for (int i = 1; i < num_inputs; ++i) {
SetupTensor(subgraph, input_tensors[i], kTfLiteInt32);
}
for (int i = 0; i < num_outputs; ++i) {
SetupTensor(subgraph, output_tensors[i], kTfLiteInt32);
}
TfLiteIfParams* params =
reinterpret_cast<TfLiteIfParams*>(malloc(sizeof(TfLiteIfParams)));
params->then_subgraph_index = 1;
params->else_subgraph_index = 2;
auto* if_reg = ops::builtin::Register_IF();
if_reg->builtin_code = kTfLiteBuiltinIf;
int node_index;
subgraph->AddNodeWithParameters(input_tensors, output_tensors, {}, nullptr, 0,
params, if_reg, &node_index);
}
void SubgraphBuilder::BuildOutputNotConsumedWhileSubgraph(Subgraph* subgraph) {
enum {
kInput0,
kInput1,
kInput2,
kOutput0,
kOutput1,
kOutput2,
kTensorCount
};
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInput0, kInput1, kInput2}), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kOutput0}), kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
SetupTensor(subgraph, i, kTfLiteInt32);
}
TfLiteWhileParams* params =
reinterpret_cast<TfLiteWhileParams*>(malloc(sizeof(TfLiteWhileParams)));
params->cond_subgraph_index = 1;
params->body_subgraph_index = 2;
auto* while_reg = ops::builtin::Register_WHILE();
while_reg->builtin_code = kTfLiteBuiltinWhile;
int node_index;
subgraph->AddNodeWithParameters({0, 1, 2}, {3, 4, 5}, {}, nullptr, 0, params,
while_reg, &node_index);
}
void SubgraphBuilder::BuildFloatIfSubgraph(Subgraph* subgraph, int num_inputs) {
int num_outputs = num_inputs - 1;
int first_new_tensor_index;
ASSERT_EQ(
subgraph->AddTensors(num_inputs + num_outputs, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
std::vector<int> input_tensors(num_inputs);
std::vector<int> output_tensors(num_outputs);
for (int i = 0; i < num_inputs; ++i) {
input_tensors[i] = i;
}
for (int i = 0; i < num_outputs; ++i) {
output_tensors[i] = i + num_inputs;
}
ASSERT_EQ(subgraph->SetInputs(input_tensors), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs(output_tensors), kTfLiteOk);
SetupTensor(subgraph, input_tensors[0], kTfLiteBool);
for (int i = 1; i < num_inputs; ++i) {
SetupTensor(subgraph, input_tensors[i], kTfLiteFloat32);
}
for (int i = 0; i < num_outputs; ++i) {
SetupTensor(subgraph, output_tensors[i], kTfLiteFloat32);
}
TfLiteIfParams* params =
reinterpret_cast<TfLiteIfParams*>(malloc(sizeof(TfLiteWhileParams)));
params->then_subgraph_index = 1;
params->else_subgraph_index = 2;
auto* if_reg = ops::builtin::Register_IF();
if_reg->builtin_code = kTfLiteBuiltinIf;
int node_index;
subgraph->AddNodeWithParameters(input_tensors, output_tensors, {}, nullptr, 0,
params, if_reg, &node_index);
}
void SubgraphBuilder::BuildFloatWhileSubgraph(Subgraph* subgraph,
int num_inputs) {
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(num_inputs * 2, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
std::vector<int> input_tensors(num_inputs);
std::vector<int> output_tensors(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
input_tensors[i] = i;
output_tensors[i] = i + num_inputs;
}
ASSERT_EQ(subgraph->SetInputs(input_tensors), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs(output_tensors), kTfLiteOk);
for (int i = 0; i < num_inputs; ++i) {
SetupTensor(subgraph, input_tensors[i], kTfLiteFloat32);
SetupTensor(subgraph, output_tensors[i], kTfLiteFloat32);
}
TfLiteWhileParams* params =
reinterpret_cast<TfLiteWhileParams*>(malloc(sizeof(TfLiteWhileParams)));
params->cond_subgraph_index = 1;
params->body_subgraph_index = 2;
auto* while_reg = ops::builtin::Register_WHILE();
while_reg->builtin_code = kTfLiteBuiltinWhile;
int node_index;
subgraph->AddNodeWithParameters(input_tensors, output_tensors, {}, nullptr, 0,
params, while_reg, &node_index);
}
void SubgraphBuilder::BuildMultiInputIfSubgraphWithUnconsumedOutput(
Subgraph* subgraph, int num_inputs) {
int num_outputs = num_inputs - 1;
int first_new_tensor_index;
ASSERT_EQ(
subgraph->AddTensors(num_inputs + num_outputs, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
std::vector<int> input_tensors(num_inputs);
std::vector<int> output_tensors(num_outputs);
for (int i = 0; i < num_inputs; ++i) {
input_tensors[i] = i;
}
for (int i = 0; i < num_outputs; ++i) {
output_tensors[i] = i + num_inputs;
}
SetupTensor(subgraph, input_tensors[0], kTfLiteBool);
for (int i = 1; i < num_inputs; ++i) {
SetupTensor(subgraph, input_tensors[i], kTfLiteInt32);
}
for (int i = 0; i < num_outputs; ++i) {
SetupTensor(subgraph, output_tensors[i], kTfLiteInt32);
}
TfLiteIfParams* params =
reinterpret_cast<TfLiteIfParams*>(malloc(sizeof(TfLiteIfParams)));
params->then_subgraph_index = 1;
params->else_subgraph_index = 2;
auto* if_reg = ops::builtin::Register_IF();
if_reg->builtin_code = kTfLiteBuiltinIf;
int node_index;
subgraph->AddNodeWithParameters(input_tensors, output_tensors, {}, nullptr, 0,
params, if_reg, &node_index);
output_tensors.pop_back();
ASSERT_EQ(subgraph->SetInputs(input_tensors), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs(output_tensors), kTfLiteOk);
}
void SubgraphBuilder::BuildMultiInputWhileSubgraphWithUnconsumedOutput(
Subgraph* subgraph, int num_inputs) {
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(num_inputs * 2, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
std::vector<int> input_tensors(num_inputs);
std::vector<int> output_tensors(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
input_tensors[i] = i;
output_tensors[i] = i + num_inputs;
}
for (int i = 0; i < num_inputs; ++i) {
SetupTensor(subgraph, input_tensors[i], kTfLiteInt32);
SetupTensor(subgraph, output_tensors[i], kTfLiteInt32);
}
TfLiteWhileParams* params =
reinterpret_cast<TfLiteWhileParams*>(malloc(sizeof(TfLiteWhileParams)));
params->cond_subgraph_index = 1;
params->body_subgraph_index = 2;
auto* while_reg = ops::builtin::Register_WHILE();
while_reg->builtin_code = kTfLiteBuiltinWhile;
int node_index;
subgraph->AddNodeWithParameters(input_tensors, output_tensors, {}, nullptr, 0,
params, while_reg, &node_index);
output_tensors.pop_back();
ASSERT_EQ(subgraph->SetInputs(input_tensors), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs(output_tensors), kTfLiteOk);
}
void SubgraphBuilder::BuildMultiInputIfSubgraph(Subgraph* subgraph,
int num_inputs) {
int num_outputs = num_inputs - 1;
int first_new_tensor_index;
ASSERT_EQ(
subgraph->AddTensors(num_inputs + num_outputs, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
std::vector<int> input_tensors(num_inputs);
std::vector<int> output_tensors(num_outputs);
for (int i = 0; i < num_inputs; ++i) {
input_tensors[i] = i;
}
for (int i = 0; i < num_outputs; ++i) {
output_tensors[i] = i + num_inputs;
}
ASSERT_EQ(subgraph->SetInputs(input_tensors), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs(output_tensors), kTfLiteOk);
SetupTensor(subgraph, input_tensors[0], kTfLiteBool);
for (int i = 1; i < num_inputs; ++i) {
SetupTensor(subgraph, input_tensors[i], kTfLiteInt32);
}
for (int i = 0; i < num_outputs; ++i) {
SetupTensor(subgraph, output_tensors[i], kTfLiteInt32);
}
TfLiteIfParams* params =
reinterpret_cast<TfLiteIfParams*>(malloc(sizeof(TfLiteIfParams)));
params->then_subgraph_index = 1;
params->else_subgraph_index = 2;
auto* if_reg = ops::builtin::Register_IF();
if_reg->builtin_code = kTfLiteBuiltinIf;
int node_index;
subgraph->AddNodeWithParameters(input_tensors, output_tensors, {}, nullptr, 0,
params, if_reg, &node_index);
}
void SubgraphBuilder::BuildMultiInputWhileSubgraph(Subgraph* subgraph,
int num_inputs) {
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(num_inputs * 2, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
std::vector<int> input_tensors(num_inputs);
std::vector<int> output_tensors(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
input_tensors[i] = i;
output_tensors[i] = i + num_inputs;
}
ASSERT_EQ(subgraph->SetInputs(input_tensors), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs(output_tensors), kTfLiteOk);
for (int i = 0; i < num_inputs; ++i) {
SetupTensor(subgraph, input_tensors[i], kTfLiteInt32);
SetupTensor(subgraph, output_tensors[i], kTfLiteInt32);
}
TfLiteWhileParams* params =
reinterpret_cast<TfLiteWhileParams*>(malloc(sizeof(TfLiteWhileParams)));
params->cond_subgraph_index = 1;
params->body_subgraph_index = 2;
auto* while_reg = ops::builtin::Register_WHILE();
while_reg->builtin_code = kTfLiteBuiltinWhile;
int node_index;
subgraph->AddNodeWithParameters(input_tensors, output_tensors, {}, nullptr, 0,
params, while_reg, &node_index);
}
void SubgraphBuilder::BuildWhileSubgraph(Subgraph* subgraph) {
const int kInput1 = 0;
const int kInput2 = 1;
const int kOutput1 = 2;
const int kOutput2 = 3;
const int kTensorCount = 4;
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInput1, kInput2}), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kOutput1, kOutput2}), kTfLiteOk);
SetupTensor(subgraph, kInput1, kTfLiteInt32);
SetupTensor(subgraph, kInput2, kTfLiteInt32);
SetupTensor(subgraph, kOutput1, kTfLiteInt32);
SetupTensor(subgraph, kOutput2, kTfLiteInt32);
TfLiteWhileParams* params =
reinterpret_cast<TfLiteWhileParams*>(malloc(sizeof(TfLiteWhileParams)));
params->cond_subgraph_index = 1;
params->body_subgraph_index = 2;
auto* while_reg = ops::builtin::Register_WHILE();
while_reg->builtin_code = kTfLiteBuiltinWhile;
int node_index;
subgraph->AddNodeWithParameters({0, 1}, {2, 3}, {}, nullptr, 0, params,
while_reg, &node_index);
}
void SubgraphBuilder::BuildAssignRandomValueToVariableSubgraph(
Subgraph* subgraph) {
const int kConstResourceId = 0;
const int kRandomValue = 1;
const int kTensorCount = 3;
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(subgraph->SetInputs({}), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({}), kTfLiteOk);
SetupTensor(subgraph, kRandomValue, kTfLiteInt32);
CreateConstantTensor(subgraph, kConstResourceId, {1}, {1024});
int node_index;
subgraph->AddNodeWithParameters({}, {kRandomValue}, {}, nullptr, 0, nullptr,
::tflite::ops::custom::Register_RANDOM_INT(),
&node_index);
subgraph->AddNodeWithParameters(
{kConstResourceId, kRandomValue}, {}, {}, nullptr, 0, nullptr,
::tflite::ops::builtin::Register_ASSIGN_VARIABLE(), &node_index);
}
void SubgraphBuilder::BuildCallOnceAndReadVariableSubgraph(Subgraph* subgraph) {
const int kConstResourceId = 0;
const int kOutput = 1;
const int kTensorCount = 2;
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(subgraph->SetInputs({}), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kOutput}), kTfLiteOk);
SetupTensor(subgraph, kOutput, kTfLiteInt32);
CreateConstantTensor(subgraph, kConstResourceId, {1}, {1024});
TfLiteCallOnceParams* params = reinterpret_cast<TfLiteCallOnceParams*>(
malloc(sizeof(TfLiteCallOnceParams)));
params->init_subgraph_index = 1;
int node_index;
subgraph->AddNodeWithParameters({}, {}, {}, nullptr, 0, params,
::tflite::ops::builtin::Register_CALL_ONCE(),
&node_index);
subgraph->AddNodeWithParameters(
{kConstResourceId}, {kOutput}, {}, nullptr, 0, nullptr,
::tflite::ops::builtin::Register_READ_VARIABLE(), &node_index);
}
void SubgraphBuilder::BuildCallOnceAndReadVariablePlusOneSubgraph(
Subgraph* subgraph) {
const int kConstResourceId = 0;
const int kConstOne = 1;
const int kReadVariableResult = 2;
const int kOutput = 3;
const int kTensorCount = 4;
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(subgraph->SetInputs({}), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kOutput}), kTfLiteOk);
SetupTensor(subgraph, kReadVariableResult, kTfLiteInt32);
SetupTensor(subgraph, kOutput, kTfLiteInt32);
CreateConstantTensor(subgraph, kConstResourceId, {1}, {1024});
CreateConstantTensor(subgraph, kConstOne, {1}, {1});
TfLiteCallOnceParams* params = reinterpret_cast<TfLiteCallOnceParams*>(
malloc(sizeof(TfLiteCallOnceParams)));
params->init_subgraph_index = 1;
int node_index;
subgraph->AddNodeWithParameters({}, {}, {}, nullptr, 0, params,
::tflite::ops::builtin::Register_CALL_ONCE(),
&node_index);
subgraph->AddNodeWithParameters(
{kConstResourceId}, {kReadVariableResult}, {}, nullptr, 0, nullptr,
::tflite::ops::builtin::Register_READ_VARIABLE(), &node_index);
TfLiteAddParams* add_params =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
add_params->activation = kTfLiteActNone;
subgraph->AddNodeWithParameters(
{kReadVariableResult, kConstOne}, {kOutput}, {}, nullptr, 0, add_params,
::tflite::ops::builtin::Register_ADD(), &node_index);
}
void SubgraphBuilder::BuildLessEqualCondSubgraphWithDynamicTensor(
Subgraph* subgraph, int rhs) {
const int kStringInput1 = 0;
const int kStringInput2 = 1;
const int kIntegerInput = 2;
const int kOutput = 3;
const int kConstRhs = 4;
const int kTensorCount = 5;
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kStringInput1, kStringInput2, kIntegerInput}),
kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kOutput}), kTfLiteOk);
SetupTensor(subgraph, kStringInput1, kTfLiteString);
SetupTensor(subgraph, kStringInput2, kTfLiteString);
SetupTensor(subgraph, kIntegerInput, kTfLiteInt32);
SetupTensor(subgraph, kOutput, kTfLiteBool);
auto* le_reg = ops::builtin::Register_LESS_EQUAL();
le_reg->builtin_code = kTfLiteBuiltinLessEqual;
CreateConstantTensor(subgraph, kConstRhs, {1}, {rhs});
int node_index;
subgraph->AddNodeWithParameters({kIntegerInput, kConstRhs}, {kOutput}, {},
nullptr, 0, nullptr, le_reg, &node_index);
}
void SubgraphBuilder::BuildBodySubgraphWithDynamicTensor(Subgraph* subgraph) {
const int kStringInput1 = 0;
const int kStringInput2 = 1;
const int kIntegerInput = 2;
const int kStringOutput1 = 0;
const int kStringOutput2 = 4;
const int kIntegerOutput = 5;
const int kConst = 6;
const int kTensorCount = 7;
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kStringInput1, kStringInput2, kIntegerInput}),
kTfLiteOk);
ASSERT_EQ(
subgraph->SetOutputs({kStringOutput1, kStringOutput2, kIntegerOutput}),
kTfLiteOk);
SetupTensor(subgraph, kStringInput1, kTfLiteString);
SetupTensor(subgraph, kStringInput2, kTfLiteString);
SetupTensor(subgraph, kIntegerInput, kTfLiteInt32);
SetupTensor(subgraph, kStringOutput1, kTfLiteString);
SetupTensor(subgraph, kStringOutput2, kTfLiteString);
SetupTensor(subgraph, kIntegerOutput, kTfLiteInt32);
SetupTensor(subgraph, kConst, kTfLiteInt32);
CreateConstantTensor(subgraph, kConst, {1}, {1});
AddAddNode(subgraph, kIntegerInput, kConst, kIntegerOutput);
int node_index;
auto* fill_reg = ops::builtin::Register_FILL();
fill_reg->builtin_code = kTfLiteBuiltinFill;
subgraph->AddNodeWithParameters({kIntegerOutput, kStringInput1},
{kStringOutput2}, {}, nullptr, 0, nullptr,
fill_reg, &node_index);
}
void SubgraphBuilder::BuildIfSubgraphWithDynamicTensor(Subgraph* subgraph) {
enum {
kBoolInput0,
kStringInput1,
kStringInput2,
kIntegerInput,
kStringOutput1,
kStringOutput2,
kIntegerOutput,
kTensorCount
};
int num_inputs = 4;
int num_outputs = num_inputs - 1;
int first_new_tensor_index;
std::vector<int> input_tensors(num_inputs);
std::vector<int> output_tensors(num_outputs);
for (int i = 0; i < num_inputs; ++i) {
input_tensors[i] = i;
}
for (int i = 0; i < num_outputs; ++i) {
output_tensors[i] = i + num_inputs;
}
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs(input_tensors), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs(output_tensors), kTfLiteOk);
SetupTensor(subgraph, kBoolInput0, kTfLiteBool);
SetupTensor(subgraph, kStringInput1, kTfLiteString);
SetupTensor(subgraph, kStringInput2, kTfLiteString);
SetupTensor(subgraph, kIntegerInput, kTfLiteInt32);
SetupTensor(subgraph, kStringOutput1, kTfLiteString);
SetupTensor(subgraph, kStringOutput2, kTfLiteString);
SetupTensor(subgraph, kIntegerOutput, kTfLiteInt32);
TfLiteIfParams* params =
reinterpret_cast<TfLiteIfParams*>(malloc(sizeof(TfLiteWhileParams)));
params->then_subgraph_index = 1;
params->else_subgraph_index = 2;
auto* if_reg = ops::builtin::Register_IF();
if_reg->builtin_code = kTfLiteBuiltinIf;
int node_index;
subgraph->AddNodeWithParameters(input_tensors, output_tensors, {}, nullptr, 0,
params, if_reg, &node_index);
}
void SubgraphBuilder::BuildWhileSubgraphWithDynamicTensor(Subgraph* subgraph) {
const int kStringInput1 = 0;
const int kStringInput2 = 1;
const int kIntegerInput = 2;
const int kStringOutput1 = 3;
const int kStringOutput2 = 4;
const int kIntegerOutput = 5;
const int kTensorCount = 6;
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kStringInput1, kStringInput2, kIntegerInput}),
kTfLiteOk);
ASSERT_EQ(
subgraph->SetOutputs({kStringOutput1, kStringOutput2, kIntegerOutput}),
kTfLiteOk);
SetupTensor(subgraph, kStringInput1, kTfLiteString);
SetupTensor(subgraph, kStringInput2, kTfLiteString);
SetupTensor(subgraph, kIntegerInput, kTfLiteInt32);
SetupTensor(subgraph, kStringOutput1, kTfLiteString);
SetupTensor(subgraph, kStringOutput2, kTfLiteString);
SetupTensor(subgraph, kIntegerOutput, kTfLiteInt32);
TfLiteWhileParams* params =
reinterpret_cast<TfLiteWhileParams*>(malloc(sizeof(TfLiteWhileParams)));
params->cond_subgraph_index = 1;
params->body_subgraph_index = 2;
auto* while_reg = ops::builtin::Register_WHILE();
while_reg->builtin_code = kTfLiteBuiltinWhile;
int node_index;
subgraph->AddNodeWithParameters(
{kStringInput1, kStringInput2, kIntegerInput},
{kStringOutput1, kStringOutput2, kIntegerOutput}, {}, nullptr, 0, params,
while_reg, &node_index);
}
void FillIntTensor(TfLiteTensor* tensor, const std::vector<int32_t>& data) {
int count = NumElements(tensor);
ASSERT_EQ(count, data.size());
for (int i = 0; i < count; ++i) {
tensor->data.i32[i] = data[i];
}
}
void FillScalarStringTensor(TfLiteTensor* tensor, const std::string& data) {
StringRef str_ref;
str_ref.str = data.c_str();
str_ref.len = data.size();
DynamicBuffer buf;
buf.AddString(str_ref);
buf.WriteToTensor(tensor, TfLiteIntArrayCreate(0));
}
void CheckScalarStringTensor(const TfLiteTensor* tensor,
const std::string& data) {
ASSERT_EQ(tensor->dims->size, 0);
ASSERT_EQ(tensor->type, kTfLiteString);
StringRef str_ref = GetString(tensor, 0);
EXPECT_EQ(std::string(str_ref.str, str_ref.len), data);
}
void CheckStringTensor(const TfLiteTensor* tensor,
const std::vector<int>& shape,
const std::vector<std::string>& data) {
ASSERT_EQ(tensor->dims->size, shape.size());
for (int i = 0; i < tensor->dims->size; ++i) {
ASSERT_EQ(tensor->dims->data[i], shape[i]);
}
ASSERT_EQ(tensor->type, kTfLiteString);
int count = GetStringCount(tensor);
ASSERT_EQ(count, data.size());
for (int i = 0; i < count; ++i) {
StringRef str_ref = GetString(tensor, i);
EXPECT_EQ(std::string(str_ref.str, str_ref.len), data[i]);
}
}
void CheckIntTensor(const TfLiteTensor* tensor, const std::vector<int>& shape,
const std::vector<int32_t>& data) {
ASSERT_EQ(tensor->dims->size, shape.size());
for (int i = 0; i < tensor->dims->size; ++i) {
ASSERT_EQ(tensor->dims->data[i], shape[i]);
}
ASSERT_EQ(tensor->type, kTfLiteInt32);
int count = NumElements(tensor);
ASSERT_EQ(count, data.size());
for (int i = 0; i < count; ++i) {
EXPECT_EQ(tensor->data.i32[i], data[i]);
}
}
void CheckBoolTensor(const TfLiteTensor* tensor, const std::vector<int>& shape,
const std::vector<bool>& data) {
ASSERT_EQ(tensor->dims->size, shape.size());
for (int i = 0; i < tensor->dims->size; ++i) {
ASSERT_EQ(tensor->dims->data[i], shape[i]);
}
ASSERT_EQ(tensor->type, kTfLiteBool);
int count = NumElements(tensor);
ASSERT_EQ(count, data.size());
for (int i = 0; i < count; ++i) {
EXPECT_EQ(tensor->data.b[i], data[i]);
}
}
}
} | #include "tensorflow/lite/kernels/subgraph_test_util.h"
#include <stdint.h>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace subgraph_test_util {
namespace {
class SubgraphBuilderTest : public ::testing::Test {
public:
SubgraphBuilderTest()
: interpreter_(new Interpreter), builder_(new SubgraphBuilder) {}
~SubgraphBuilderTest() override {
interpreter_.reset();
builder_.reset();
}
protected:
void TestAccumulateLoopBody(int input1, int input2, int output1,
int output2) {
interpreter_ = std::make_unique<Interpreter>();
builder_->BuildAccumulateLoopBodySubgraph(
&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {input1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {input2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output_tensor1 =
interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output_tensor1, {1}, {output1});
TfLiteTensor* output_tensor2 =
interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output_tensor2, {1}, {output2});
}
std::unique_ptr<Interpreter> interpreter_;
std::unique_ptr<SubgraphBuilder> builder_;
};
TEST_F(SubgraphBuilderTest, TestBuildAddSubgraph) {
builder_->BuildAddSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1, 2});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {5, 7});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1, 2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output, {1, 2}, {6, 9});
}
TEST_F(SubgraphBuilderTest, TestBuildMulSubgraph) {
builder_->BuildMulSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1, 2});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {5, 7});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1, 2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output, {1, 2}, {5, 14});
}
TEST_F(SubgraphBuilderTest, TestBuildPadSubgraph) {
builder_->BuildPadSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1, 2});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {5, 7});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1, 2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output, {5}, {0, 5, 7, 0, 0});
}
TEST_F(SubgraphBuilderTest, TestBuildDynamicPadSubgraph) {
builder_->BuildPadSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1, 2});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {5, 7});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1, 2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
EXPECT_TRUE(IsDynamicTensor(output));
CheckIntTensor(output, {5}, {0, 5, 7, 0, 0});
}
TEST_F(SubgraphBuilderTest, TestBuildLessEqualCondSubgraph) {
builder_->BuildLessEqualCondSubgraph(&interpreter_->primary_subgraph(), 3);
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {5});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {10, 10});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]),
{1, 2, 3, 4, 5});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
CheckBoolTensor(output, {5}, {true, true, true, false, false});
}
TEST_F(SubgraphBuilderTest, TestBuildAccumulateLoopBodySubgraph) {
TestAccumulateLoopBody(1, 1, 2, 3);
TestAccumulateLoopBody(2, 3, 3, 6);
TestAccumulateLoopBody(3, 6, 4, 10);
}
TEST_F(SubgraphBuilderTest, TestBuildPadLoopBodySubgraph) {
builder_->BuildPadLoopBodySubgraph(&interpreter_->primary_subgraph(), {1, 2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {5});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]),
{0, 5, 7, 0, 0});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {2});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output2, {8}, {0, 0, 5, 7, 0, 0, 0, 0});
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/subgraph_test_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/subgraph_test_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d0f44513-47af-46b8-8732-cca97904459b | cpp | tensorflow/tensorflow | svdf | tensorflow/lite/kernels/svdf.cc | tensorflow/lite/kernels/svdf_test.cc | #include "tensorflow/lite/kernels/internal/reference/svdf.h"
#include <cstddef>
#include <cstdint>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/tensor_utils.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace svdf {
namespace {
struct OpData {
int scratch_tensor_index;
bool float_weights_time_initialized;
int32 effective_scale_1_a;
int effective_scale_1_b;
int32 effective_scale_2_a;
int effective_scale_2_b;
bool compute_row_sums = false;
};
}
constexpr int kInputTensor = 0;
constexpr int kWeightsFeatureTensor = 1;
constexpr int kWeightsTimeTensor = 2;
constexpr int kBiasTensor = 3;
constexpr int kStateTensor = 4;
constexpr int kOutputTensor = 0;
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData();
op_data->float_weights_time_initialized = false;
context->AddTensors(context, 6,
&op_data->scratch_tensor_index);
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const auto* params = reinterpret_cast<TfLiteSVDFParams*>(node->builtin_data);
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
int scratch_tensor_index = op_data->scratch_tensor_index;
TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
TF_LITE_ENSURE_EQ(context, node->inputs->size, 5);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* weights_feature;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kWeightsFeatureTensor,
&weights_feature));
const TfLiteTensor* weights_time;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kWeightsTimeTensor, &weights_time));
TF_LITE_ENSURE(context,
input->type == kTfLiteFloat32 || input->type == kTfLiteInt8);
const int rank = params->rank;
const int batch_size = input->dims->data[0];
const int num_filters = weights_feature->dims->data[0];
TF_LITE_ENSURE(context, rank != 0);
TF_LITE_ENSURE_EQ(context, num_filters % rank, 0);
const int num_units = num_filters / rank;
const int memory_size = weights_time->dims->data[1];
TF_LITE_ENSURE_EQ(context, input->dims->data[1],
weights_feature->dims->data[1]);
TF_LITE_ENSURE_EQ(context, weights_time->dims->data[0], num_filters);
const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor);
if (bias) {
TF_LITE_ENSURE_EQ(context, bias->dims->data[0], num_units);
}
const TfLiteTensor* state;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kStateTensor, &state));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_EQ(context, NumDimensions(state), 2);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(state, 0), batch_size);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(state, 1),
memory_size * num_filters);
TfLiteIntArray* output_size_array = TfLiteIntArrayCreate(2);
output_size_array->data[0] = batch_size;
output_size_array->data[1] = num_units;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output, output_size_array));
const bool is_hybrid_op = IsHybridOp(input, weights_feature);
const bool is_full_integer = input->type == kTfLiteInt8;
TfLiteIntArrayFree(node->temporaries);
if (is_hybrid_op) {
node->temporaries = TfLiteIntArrayCreate(6);
} else if (is_full_integer) {
node->temporaries = TfLiteIntArrayCreate(2);
} else {
node->temporaries = TfLiteIntArrayCreate(1);
}
node->temporaries->data[0] = scratch_tensor_index;
TfLiteIntArray* scratch_size_array = TfLiteIntArrayCreate(2);
scratch_size_array->data[0] = batch_size;
scratch_size_array->data[1] = num_filters;
TfLiteTensor* scratch_tensor;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 0, &scratch_tensor));
if (is_full_integer) {
scratch_tensor->type = kTfLiteInt32;
} else {
scratch_tensor->type = kTfLiteFloat32;
}
scratch_tensor->allocation_type = kTfLiteArenaRw;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_tensor,
scratch_size_array));
if (is_hybrid_op) {
op_data->compute_row_sums = true;
node->temporaries->data[1] = scratch_tensor_index + 1;
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 1,
&input_quantized));
input_quantized->type = weights_feature->type;
input_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) {
TfLiteIntArray* input_quantized_size = TfLiteIntArrayCopy(input->dims);
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized,
input_quantized_size));
}
node->temporaries->data[2] = scratch_tensor_index + 2;
TfLiteTensor* scaling_factors;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 2,
&scaling_factors));
scaling_factors->type = kTfLiteFloat32;
scaling_factors->allocation_type = kTfLiteArenaRw;
int scaling_dims[1] = {batch_size};
if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) {
TfLiteIntArray* scaling_factors_size = TfLiteIntArrayCreate(1);
scaling_factors_size->data[0] = batch_size;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors,
scaling_factors_size));
}
node->temporaries->data[3] = scratch_tensor_index + 3;
TfLiteTensor* float_weights_time;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 3,
&float_weights_time));
float_weights_time->type = kTfLiteFloat32;
float_weights_time->name = "Svdf_float_weights_time";
float_weights_time->allocation_type = kTfLiteArenaRwPersistent;
if (!TfLiteIntArrayEqual(float_weights_time->dims, weights_time->dims)) {
TfLiteIntArray* float_weights_time_size =
TfLiteIntArrayCopy(weights_time->dims);
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, float_weights_time,
float_weights_time_size));
}
node->temporaries->data[4] = scratch_tensor_index + 4;
TfLiteTensor* zero_points;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 4, &zero_points));
zero_points->type = kTfLiteFloat32;
zero_points->allocation_type = kTfLiteArenaRw;
int zero_points_dims[1] = {batch_size};
if (!TfLiteIntArrayEqualsArray(zero_points->dims, 1, zero_points_dims)) {
TfLiteIntArray* zero_points_size = TfLiteIntArrayCreate(1);
zero_points_size->data[0] = zero_points_dims[0];
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, zero_points,
zero_points_size));
}
node->temporaries->data[5] = scratch_tensor_index + 5;
TfLiteTensor* row_sums;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 5, &row_sums));
row_sums->type = kTfLiteFloat32;
float_weights_time->name = "Svdf_row_sums";
row_sums->allocation_type = kTfLiteArenaRwPersistent;
int row_sums_dims[1] = {num_filters};
if (!TfLiteIntArrayEqualsArray(row_sums->dims, 1, row_sums_dims)) {
TfLiteIntArray* row_sums_size = TfLiteIntArrayCreate(1);
row_sums_size->data[0] = row_sums_dims[0];
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, row_sums, row_sums_size));
}
}
if (is_full_integer) {
TfLiteIntArray* output_temp_size_array = TfLiteIntArrayCreate(2);
output_temp_size_array->data[0] = num_units;
output_temp_size_array->data[1] = batch_size;
node->temporaries->data[1] = scratch_tensor_index + 1;
TfLiteTensor* output_temp;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 1, &output_temp));
output_temp->type = kTfLiteInt32;
output_temp->allocation_type = kTfLiteArenaRw;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output_temp,
output_temp_size_array));
TF_LITE_ENSURE(context, input->quantization.type != kTfLiteNoQuantization);
auto* input_params =
reinterpret_cast<TfLiteAffineQuantization*>(input->quantization.params);
TF_LITE_ENSURE(context,
weights_feature->quantization.type != kTfLiteNoQuantization);
auto* weights_feature_params = reinterpret_cast<TfLiteAffineQuantization*>(
weights_feature->quantization.params);
TF_LITE_ENSURE(context, state->quantization.type != kTfLiteNoQuantization);
auto* state_params =
reinterpret_cast<TfLiteAffineQuantization*>(state->quantization.params);
TF_LITE_ENSURE(context,
weights_time->quantization.type != kTfLiteNoQuantization);
auto* weight_time_params = reinterpret_cast<TfLiteAffineQuantization*>(
weights_time->quantization.params);
TF_LITE_ENSURE(context, output->quantization.type != kTfLiteNoQuantization);
auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
output->quantization.params);
const double effective_scale_1 = input_params->scale->data[0] *
weights_feature_params->scale->data[0] /
state_params->scale->data[0];
const double effective_scale_2 = state_params->scale->data[0] *
weight_time_params->scale->data[0] /
output_params->scale->data[0];
QuantizeMultiplier(effective_scale_1, &op_data->effective_scale_1_a,
&op_data->effective_scale_1_b);
QuantizeMultiplier(effective_scale_2, &op_data->effective_scale_2_a,
&op_data->effective_scale_2_b);
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteSVDFParams*>(node->builtin_data);
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* weights_feature;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kWeightsFeatureTensor,
&weights_feature));
const TfLiteTensor* weights_time;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kWeightsTimeTensor, &weights_time));
const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor);
TfLiteTensor* scratch;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 0, &scratch));
TfLiteTensor* state = GetVariableInput(context, node, kStateTensor);
TF_LITE_ENSURE(context, state != nullptr);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (weights_feature->type) {
case kTfLiteFloat32: {
reference_ops::EvalFloatSVDF(
params, GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(weights_feature),
GetTensorData<float>(weights_feature), GetTensorShape(weights_time),
GetTensorData<float>(weights_time), GetTensorShape(bias),
GetTensorData<float>(bias), GetTensorData<float>(scratch),
GetTensorData<float>(state), GetTensorShape(output),
GetTensorData<float>(output));
return kTfLiteOk;
}
case kTfLiteUInt8:
case kTfLiteInt8: {
if (input->type == kTfLiteFloat32) {
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 1,
&input_quantized));
TfLiteTensor* scaling_factors;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 2,
&scaling_factors));
TfLiteTensor* float_weights_time;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 3,
&float_weights_time));
TfLiteTensor* zero_points;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 4,
&zero_points));
TfLiteTensor* row_sums;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 5, &row_sums));
if (!op_data->float_weights_time_initialized) {
const float dequantization_scale = weights_time->params.scale;
const int8_t* weights_time_ptr = GetTensorData<int8_t>(weights_time);
float* float_weights_time_ptr =
GetTensorData<float>(float_weights_time);
for (int i = 0; i < NumElements(float_weights_time); ++i) {
float_weights_time_ptr[i] =
weights_time_ptr[i] * dequantization_scale;
}
op_data->float_weights_time_initialized = true;
}
int32_t* zero_points_ptr = nullptr;
int32_t* row_sums_ptr = nullptr;
if (params->asymmetric_quantize_inputs && row_sums != nullptr) {
zero_points_ptr = GetTensorData<int32_t>(zero_points);
row_sums_ptr = GetTensorData<int32_t>(row_sums);
}
reference_ops::EvalHybridSVDF(
params, GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(weights_feature),
GetTensorData<int8_t>(weights_feature),
weights_feature->params.scale, GetTensorShape(float_weights_time),
GetTensorData<float>(float_weights_time), GetTensorShape(bias),
GetTensorData<float>(bias), GetTensorData<float>(scratch),
GetTensorData<float>(scaling_factors),
GetTensorData<int8_t>(input_quantized), GetTensorData<float>(state),
GetTensorShape(output), GetTensorData<float>(output),
zero_points_ptr, row_sums_ptr, &op_data->compute_row_sums);
return kTfLiteOk;
}
auto* input_params = reinterpret_cast<TfLiteAffineQuantization*>(
input->quantization.params);
auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
output->quantization.params);
TfLiteTensor* output_temp;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 1, &output_temp));
TF_LITE_ENSURE_EQ(context, params->activation, kTfLiteActRelu);
reference_ops::EvalIntegerSVDF(
params, GetTensorShape(input), GetTensorData<int8_t>(input),
GetTensorShape(weights_feature),
GetTensorData<int8_t>(weights_feature), GetTensorShape(weights_time),
GetTensorData<int16_t>(weights_time), GetTensorShape(bias),
GetTensorData<int32_t>(bias), GetTensorData<int16_t>(state),
GetTensorShape(output), GetTensorData<int8_t>(output),
GetTensorData<int32_t>(scratch), GetTensorData<int32_t>(output_temp),
op_data->effective_scale_1_a, op_data->effective_scale_1_b,
op_data->effective_scale_2_a, op_data->effective_scale_2_b,
input_params->zero_point->data[0],
output_params->zero_point->data[0]);
return kTfLiteOk;
}
default:
TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.",
TfLiteTypeGetName(weights_feature->type));
return kTfLiteError;
}
}
}
TfLiteRegistration* Register_SVDF() {
static TfLiteRegistration r = {svdf::Init, svdf::Free, svdf::Prepare,
svdf::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
static float svdf_input[] = {
0.12609188, -0.46347019, -0.89598465,
0.35867718, 0.36897406, 0.73463392,
0.14278367, -1.64410412, -0.75222826,
-0.57290924, 0.12729003, 0.7567004,
0.49837467, 0.19278903, 0.26584083,
0.17660543, 0.52949083, -0.77931279,
-0.11186574, 0.13164264, -0.05349274,
-0.72674477, -0.5683046, 0.55900657,
-0.68892461, 0.37783599, 0.18263303,
-0.63690937, 0.44483393, -0.71817774,
-0.81299269, -0.86831826, 1.43940818,
-0.95760226, 1.82078898, 0.71135032,
-1.45006323, -0.82251364, -1.69082689,
-1.65087092, -1.89238167, 1.54172635,
0.03966608, -0.24936394, -0.77526885,
2.06740379, -1.51439476, 1.43768692,
0.11771342, -0.23761693, -0.65898693,
0.31088525, -1.55601168, -0.87661445,
-0.89477462, 1.67204106, -0.53235275,
-0.6230064, 0.29819036, 1.06939757,
};
static float svdf_golden_output_rank_1[] = {
0.014899, -0.0517661, -0.143725, -0.00271883,
-0.03004015, 0.09565311, 0.1587342, 0.00784263,
0.068281, -0.162217, -0.152268, 0.00323521,
0.01582633, 0.03858774, -0.03001583, -0.02671271,
-0.0317821, -0.0333089, 0.0609602, 0.0333759,
-0.01432795, 0.05524484, 0.1101355, -0.02382665,
-0.00623099, -0.077701, -0.391193, -0.0136691,
-0.02333033, 0.02293761, 0.12338032, 0.04326871,
0.201551, -0.164607, -0.179462, -0.0592739,
0.01064911, -0.17503069, 0.07821996, -0.00224009,
0.0886511, -0.0875401, -0.269283, 0.0281379,
-0.02282338, 0.09741908, 0.32973239, 0.12281385,
-0.201174, -0.586145, -0.628624, -0.0330412,
0.24780814, -0.39304617, -0.22473189, 0.02589256,
-0.0839096, -0.299329, 0.108746, 0.109808,
0.10084175, -0.06416984, 0.28936723, 0.0026358,
0.419114, -0.237824, -0.422627, 0.175115,
-0.2314795, -0.18584411, -0.4228974, -0.12928449,
0.36726, -0.522303, -0.456502, -0.175475,
0.17012937, -0.34447709, 0.38505614, -0.28158101,
};
static float svdf_golden_output_rank_2[] = {
-0.09623547, -0.10193135, 0.11083051, -0.0347917,
0.1141196, 0.12965347, -0.12652366, 0.01007236,
-0.16396809, -0.21247184, 0.11259045, -0.04156673,
0.10132131, -0.06143532, -0.00924693, 0.10084561,
0.01257364, 0.0506071, -0.19287863, -0.07162561,
-0.02033747, 0.22673416, 0.15487903, 0.02525555,
-0.1411963, -0.37054959, 0.01774767, 0.05867489,
0.09607603, -0.0141301, -0.08995658, 0.12867066,
-0.27142537, -0.16955489, 0.18521598, -0.12528358,
0.00331409, 0.11167502, 0.02218599, -0.07309391,
0.09593632, -0.28361851, -0.0773851, 0.17199151,
-0.00075242, 0.33691186, -0.1536046, 0.16572715,
-0.27916506, -0.27626723, 0.42615682, 0.3225764,
-0.37472126, -0.55655634, -0.05013514, 0.289112,
-0.24418658, 0.07540751, -0.1940318, -0.08911639,
0.00732617, 0.46737891, 0.26449674, 0.24888524,
-0.17225097, -0.54660404, -0.38795233, 0.08389944,
0.07736043, -0.28260678, 0.15666828, 1.14949894,
-0.57454878, -0.64704704, 0.73235172, -0.34616736,
0.21120001, -0.22927976, 0.02455296, -0.35906726,
};
class BaseSVDFOpModel : public SingleOpModel {
public:
BaseSVDFOpModel(int batches, int units, int input_size, int memory_size,
int rank,
TensorType weights_feature_type = TensorType_FLOAT32,
TensorType weights_time_type = TensorType_FLOAT32,
bool asymmetric_quantize_inputs = false)
: batches_(batches),
units_(units),
input_size_(input_size),
memory_size_(memory_size),
rank_(rank) {
input_ = AddInput(TensorType_FLOAT32);
weights_feature_ = AddInput(weights_feature_type);
weights_time_ = AddInput(weights_time_type);
bias_ = AddNullInput();
const int num_filters = units * rank;
activation_state_ = AddVariableInput(
TensorData{TensorType_FLOAT32, {batches, memory_size * num_filters}});
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(BuiltinOperator_SVDF, BuiltinOptions_SVDFOptions,
CreateSVDFOptions(builder_, rank, ActivationFunctionType_NONE,
asymmetric_quantize_inputs)
.Union());
BuildInterpreter({
{batches_, input_size_},
{units_ * rank, input_size_},
{units_ * rank, memory_size_},
{units_},
{batches, memory_size * num_filters}
});
}
void SetWeightsFeature(std::initializer_list<float> f) {
PopulateTensor(weights_feature_, f);
}
void SetWeightsTime(std::initializer_list<float> f) {
PopulateTensor(weights_time_, f);
}
void SetInput(int offset, float* begin, float* end) {
PopulateTensor(input_, offset, begin, end);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
int input_size() { return input_size_; }
int num_units() { return units_; }
int num_batches() { return batches_; }
protected:
int input_;
int weights_feature_;
int weights_time_;
int bias_;
int activation_state_;
int output_;
int batches_;
int units_;
int input_size_;
int memory_size_;
int rank_;
};
class SVDFOpModel : public BaseSVDFOpModel {
public:
using BaseSVDFOpModel::BaseSVDFOpModel;
};
class HybridSVDFOpModel : public BaseSVDFOpModel {
public:
HybridSVDFOpModel(int batches, int units, int input_size, int memory_size,
int rank, TensorType tensor_type,
bool asymmetric_quantize_inputs)
: BaseSVDFOpModel(batches, units, input_size, memory_size, rank,
tensor_type, tensor_type, asymmetric_quantize_inputs) {
tensor_type_ = tensor_type;
}
void SetWeights(int weights_idx, const std::vector<float>& f) {
if (tensor_type_ == TensorType_UINT8) {
SymmetricQuantizeAndPopulate(weights_idx, f);
} else {
SignedSymmetricQuantizeAndPopulate(weights_idx, f);
}
}
void SetWeightsFeature(std::initializer_list<float> f) {
SetWeights(weights_feature_, f);
}
void SetWeightsTime(std::initializer_list<float> f) {
SetWeights(weights_time_, f);
}
protected:
TensorType tensor_type_;
};
class SVDFOpTest : public ::testing::TestWithParam<bool> {
protected:
void VerifyGoldens(float golden_input[], float golden_output[],
int golden_size, BaseSVDFOpModel* svdf,
float tolerance = 1e-5) {
const int svdf_num_batches = svdf->num_batches();
const int svdf_input_size = svdf->input_size();
const int svdf_num_units = svdf->num_units();
const int input_sequence_size =
golden_size / sizeof(float) / (svdf_input_size * svdf_num_batches);
for (int i = 0; i < input_sequence_size; i++) {
float* batch_start =
golden_input + i * svdf_input_size * svdf_num_batches;
float* batch_end = batch_start + svdf_input_size * svdf_num_batches;
svdf->SetInput(0, batch_start, batch_end);
ASSERT_EQ(svdf->Invoke(), kTfLiteOk);
const float* golden_start =
golden_output + i * svdf_num_units * svdf_num_batches;
const float* golden_end =
golden_start + svdf_num_units * svdf_num_batches;
std::vector<float> expected;
expected.insert(expected.end(), golden_start, golden_end);
EXPECT_THAT(svdf->GetOutput(),
ElementsAreArray(ArrayFloatNear(expected, tolerance)));
}
}
};
INSTANTIATE_TEST_SUITE_P(SVDFOpTest, SVDFOpTest,
::testing::ValuesIn({false, true}));
TEST_F(SVDFOpTest, BlackBoxTestRank1) {
SVDFOpModel svdf(2, 4, 3,
10, 1);
svdf.SetWeightsFeature({-0.31930989, -0.36118156, 0.0079667, 0.37613347,
0.22197971, 0.12416199, 0.27901134, 0.27557442,
0.3905206, -0.36137494, -0.06634006, -0.10640851});
svdf.SetWeightsTime(
{-0.31930989, 0.37613347, 0.27901134, -0.36137494, -0.36118156,
0.22197971, 0.27557442, -0.06634006, 0.0079667, 0.12416199,
0.3905206, -0.10640851, -0.0976817, 0.15294972, 0.39635518,
-0.02702999, 0.39296314, 0.15785322, 0.21931258, 0.31053296,
-0.36916667, 0.38031587, -0.21580373, 0.27072677, 0.23622236,
0.34936687, 0.18174365, 0.35907319, -0.17493086, 0.324846,
-0.10781813, 0.27201805, 0.14324132, -0.23681851, -0.27115166,
-0.01580888, -0.14943552, 0.15465137, 0.09784451, -0.0337657});
VerifyGoldens(svdf_input, svdf_golden_output_rank_1, sizeof(svdf_input),
&svdf);
}
TEST_F(SVDFOpTest, BlackBoxTestRank2) {
SVDFOpModel svdf(2, 4, 3,
10, 2);
svdf.SetWeightsFeature({-0.31930989, 0.0079667, 0.39296314, 0.37613347,
0.12416199, 0.15785322, 0.27901134, 0.3905206,
0.21931258, -0.36137494, -0.10640851, 0.31053296,
-0.36118156, -0.0976817, -0.36916667, 0.22197971,
0.15294972, 0.38031587, 0.27557442, 0.39635518,
-0.21580373, -0.06634006, -0.02702999, 0.27072677});
svdf.SetWeightsTime(
{-0.31930989, 0.37613347, 0.27901134, -0.36137494, -0.36118156,
0.22197971, 0.27557442, -0.06634006, 0.0079667, 0.12416199,
0.3905206, -0.10640851, -0.0976817, 0.15294972, 0.39635518,
-0.02702999, 0.39296314, 0.15785322, 0.21931258, 0.31053296,
-0.36916667, 0.38031587, -0.21580373, 0.27072677, 0.23622236,
0.34936687, 0.18174365, 0.35907319, -0.17493086, 0.324846,
-0.10781813, 0.27201805, 0.14324132, -0.23681851, -0.27115166,
-0.01580888, -0.14943552, 0.15465137, 0.09784451, -0.0337657,
-0.14884081, 0.19931212, -0.36002168, 0.34663299, -0.11405486,
0.12672701, 0.39463779, -0.07886535, -0.06384811, 0.08249187,
-0.26816407, -0.19905911, 0.29211238, 0.31264046, -0.28664589,
0.05698794, 0.11613581, 0.14078894, 0.02187902, -0.21781836,
-0.15567942, 0.08693647, -0.38256618, 0.36580828, -0.22922277,
-0.0226903, 0.12878349, -0.28122205, -0.10850525, -0.11955214,
0.27179423, -0.04710215, 0.31069002, 0.22672787, 0.09580326,
0.08682203, 0.1258215, 0.1851041, 0.29228821, 0.12366763});
VerifyGoldens(svdf_input, svdf_golden_output_rank_2, sizeof(svdf_input),
&svdf);
}
TEST_P(SVDFOpTest, BlackBoxTestHybridRank1Uint8) {
HybridSVDFOpModel svdf(2, 4, 3,
10, 1, TensorType_UINT8,
GetParam());
svdf.SetWeightsFeature({-0.31930989, -0.36118156, 0.0079667, 0.37613347,
0.22197971, 0.12416199, 0.27901134, 0.27557442,
0.3905206, -0.36137494, -0.06634006, -0.10640851});
svdf.SetWeightsTime(
{-0.31930989, 0.37613347, 0.27901134, -0.36137494, -0.36118156,
0.22197971, 0.27557442, -0.06634006, 0.0079667, 0.12416199,
0.3905206, -0.10640851, -0.0976817, 0.15294972, 0.39635518,
-0.02702999, 0.39296314, 0.15785322, 0.21931258, 0.31053296,
-0.36916667, 0.38031587, -0.21580373, 0.27072677, 0.23622236,
0.34936687, 0.18174365, 0.35907319, -0.17493086, 0.324846,
-0.10781813, 0.27201805, 0.14324132, -0.23681851, -0.27115166,
-0.01580888, -0.14943552, 0.15465137, 0.09784451, -0.0337657});
VerifyGoldens(svdf_input, svdf_golden_output_rank_1, sizeof(svdf_input),
&svdf,
0.004285);
}
TEST_P(SVDFOpTest, BlackBoxTestHybridRank2Uint8) {
HybridSVDFOpModel svdf(2, 4, 3,
10, 2, TensorType_UINT8,
GetParam());
svdf.SetWeightsFeature({-0.31930989, 0.0079667, 0.39296314, 0.37613347,
0.12416199, 0.15785322, 0.27901134, 0.3905206,
0.21931258, -0.36137494, -0.10640851, 0.31053296,
-0.36118156, -0.0976817, -0.36916667, 0.22197971,
0.15294972, 0.38031587, 0.27557442, 0.39635518,
-0.21580373, -0.06634006, -0.02702999, 0.27072677});
svdf.SetWeightsTime(
{-0.31930989, 0.37613347, 0.27901134, -0.36137494, -0.36118156,
0.22197971, 0.27557442, -0.06634006, 0.0079667, 0.12416199,
0.3905206, -0.10640851, -0.0976817, 0.15294972, 0.39635518,
-0.02702999, 0.39296314, 0.15785322, 0.21931258, 0.31053296,
-0.36916667, 0.38031587, -0.21580373, 0.27072677, 0.23622236,
0.34936687, 0.18174365, 0.35907319, -0.17493086, 0.324846,
-0.10781813, 0.27201805, 0.14324132, -0.23681851, -0.27115166,
-0.01580888, -0.14943552, 0.15465137, 0.09784451, -0.0337657,
-0.14884081, 0.19931212, -0.36002168, 0.34663299, -0.11405486,
0.12672701, 0.39463779, -0.07886535, -0.06384811, 0.08249187,
-0.26816407, -0.19905911, 0.29211238, 0.31264046, -0.28664589,
0.05698794, 0.11613581, 0.14078894, 0.02187902, -0.21781836,
-0.15567942, 0.08693647, -0.38256618, 0.36580828, -0.22922277,
-0.0226903, 0.12878349, -0.28122205, -0.10850525, -0.11955214,
0.27179423, -0.04710215, 0.31069002, 0.22672787, 0.09580326,
0.08682203, 0.1258215, 0.1851041, 0.29228821, 0.12366763});
VerifyGoldens(svdf_input, svdf_golden_output_rank_2, sizeof(svdf_input),
&svdf,
0.007175);
}
TEST_P(SVDFOpTest, BlackBoxTestHybridRank1Int8) {
HybridSVDFOpModel svdf(2, 4, 3,
10, 1, TensorType_INT8,
GetParam());
svdf.SetWeightsFeature({-0.31930989, -0.36118156, 0.0079667, 0.37613347,
0.22197971, 0.12416199, 0.27901134, 0.27557442,
0.3905206, -0.36137494, -0.06634006, -0.10640851});
svdf.SetWeightsTime(
{-0.31930989, 0.37613347, 0.27901134, -0.36137494, -0.36118156,
0.22197971, 0.27557442, -0.06634006, 0.0079667, 0.12416199,
0.3905206, -0.10640851, -0.0976817, 0.15294972, 0.39635518,
-0.02702999, 0.39296314, 0.15785322, 0.21931258, 0.31053296,
-0.36916667, 0.38031587, -0.21580373, 0.27072677, 0.23622236,
0.34936687, 0.18174365, 0.35907319, -0.17493086, 0.324846,
-0.10781813, 0.27201805, 0.14324132, -0.23681851, -0.27115166,
-0.01580888, -0.14943552, 0.15465137, 0.09784451, -0.0337657});
VerifyGoldens(svdf_input, svdf_golden_output_rank_1, sizeof(svdf_input),
&svdf,
0.004285);
}
TEST_P(SVDFOpTest, BlackBoxTestHybridRank2Int8) {
HybridSVDFOpModel svdf(2, 4, 3,
10, 2, TensorType_INT8,
GetParam());
svdf.SetWeightsFeature({-0.31930989, 0.0079667, 0.39296314, 0.37613347,
0.12416199, 0.15785322, 0.27901134, 0.3905206,
0.21931258, -0.36137494, -0.10640851, 0.31053296,
-0.36118156, -0.0976817, -0.36916667, 0.22197971,
0.15294972, 0.38031587, 0.27557442, 0.39635518,
-0.21580373, -0.06634006, -0.02702999, 0.27072677});
svdf.SetWeightsTime(
{-0.31930989, 0.37613347, 0.27901134, -0.36137494, -0.36118156,
0.22197971, 0.27557442, -0.06634006, 0.0079667, 0.12416199,
0.3905206, -0.10640851, -0.0976817, 0.15294972, 0.39635518,
-0.02702999, 0.39296314, 0.15785322, 0.21931258, 0.31053296,
-0.36916667, 0.38031587, -0.21580373, 0.27072677, 0.23622236,
0.34936687, 0.18174365, 0.35907319, -0.17493086, 0.324846,
-0.10781813, 0.27201805, 0.14324132, -0.23681851, -0.27115166,
-0.01580888, -0.14943552, 0.15465137, 0.09784451, -0.0337657,
-0.14884081, 0.19931212, -0.36002168, 0.34663299, -0.11405486,
0.12672701, 0.39463779, -0.07886535, -0.06384811, 0.08249187,
-0.26816407, -0.19905911, 0.29211238, 0.31264046, -0.28664589,
0.05698794, 0.11613581, 0.14078894, 0.02187902, -0.21781836,
-0.15567942, 0.08693647, -0.38256618, 0.36580828, -0.22922277,
-0.0226903, 0.12878349, -0.28122205, -0.10850525, -0.11955214,
0.27179423, -0.04710215, 0.31069002, 0.22672787, 0.09580326,
0.08682203, 0.1258215, 0.1851041, 0.29228821, 0.12366763});
VerifyGoldens(svdf_input, svdf_golden_output_rank_2, sizeof(svdf_input),
&svdf,
0.007175);
}
class IntegerSVDFOpModel : public SingleOpModel {
public:
IntegerSVDFOpModel(int batches, int units, int input_size, int memory_size,
int rank)
: batches_(batches),
units_(units),
input_size_(input_size),
memory_size_(memory_size),
rank_(rank) {
const int num_filters = units * rank;
input_ = AddInput({TensorType_INT8, {batches, input_size}, -1, 1});
weights_feature_ =
AddInput({TensorType_INT8, {num_filters, input_size}, -0.5, 0.5});
weights_time_ =
AddInput({TensorType_INT16, {num_filters, memory_size}, -1, 1});
bias_ = AddInput({TensorType_INT32, {units}, -512, 512});
activation_state_ = AddVariableInput(
{TensorType_INT16, {batches, memory_size * num_filters}, -16, 16});
output_ = AddOutput({TensorType_INT8, {batches, units}, -0.5, 0.5});
SetBuiltinOp(
BuiltinOperator_SVDF, BuiltinOptions_SVDFOptions,
CreateSVDFOptions(builder_, rank, ActivationFunctionType_RELU).Union());
BuildInterpreter({
{batches, input_size},
{num_filters, input_size},
{num_filters, memory_size},
{units},
{batches, memory_size * num_filters}
});
}
void SetWeightsFeature(const std::vector<float>& f) {
QuantizeAndPopulate<int8_t>(weights_feature_, f);
}
void SetWeightsTime(const std::vector<float>& f) {
QuantizeAndPopulate<int16_t>(weights_time_, f);
}
void SetBias(const std::vector<float>& f) {
QuantizeAndPopulate<int32_t>(bias_, f);
}
void SetInput(const std::vector<float>& f) {
QuantizeAndPopulate<int8_t>(input_, f);
}
std::vector<int8_t> GetOutput() { return ExtractVector<int8_t>(output_); }
protected:
int input_;
int weights_feature_;
int weights_time_;
int bias_;
int activation_state_;
int output_;
int batches_;
int units_;
int input_size_;
int memory_size_;
int rank_;
};
TEST_F(SVDFOpTest, BlackBoxTestInteger) {
IntegerSVDFOpModel svdf(2, 4, 3,
10, 1);
svdf.SetWeightsFeature({-0.31930989, -0.36118156, 0.0079667, 0.37613347,
0.22197971, 0.12416199, 0.27901134, 0.27557442,
0.3905206, -0.36137494, -0.06634006, -0.10640851});
svdf.SetWeightsTime(
{-0.31930989, 0.37613347, 0.27901134, -0.36137494, -0.36118156,
0.22197971, 0.27557442, -0.06634006, 0.0079667, 0.12416199,
0.3905206, -0.10640851, -0.0976817, 0.15294972, 0.39635518,
-0.02702999, 0.39296314, 0.15785322, 0.21931258, 0.31053296,
-0.36916667, 0.38031587, -0.21580373, 0.27072677, 0.23622236,
0.34936687, 0.18174365, 0.35907319, -0.17493086, 0.324846,
-0.10781813, 0.27201805, 0.14324132, -0.23681851, -0.27115166,
-0.01580888, -0.14943552, 0.15465137, 0.09784451, -0.0337657});
svdf.SetBias({-0.0976817, 0.15294972, 0.39635518, -0.02702999});
const std::vector<std::vector<float>> input_sequences = {
{0.49837467, 0.19278903, 0.26584083, 0.17660543, 0.52949083, -0.77931279},
{0.12609188, -0.46347019, -0.89598465, 0.35867718, 0.36897406,
0.73463392},
{0.14278367, -1.64410412, -0.75222826, -0.57290924, 0.12729003,
0.7567004},
{0.49837467, 0.19278903, 0.26584083, 0.17660543, 0.52949083, -0.77931279},
{0.12609188, -0.46347019, -0.89598465, 0.35867718, 0.36897406,
0.73463392},
{0.14278367, -1.64410412, -0.75222826, -0.57290924, 0.12729003,
0.7567004},
{0.49837467, 0.19278903, 0.26584083, 0.17660543, 0.52949083, -0.77931279},
{0.12609188, -0.46347019, -0.89598465, 0.35867718, 0.36897406,
0.73463392},
{0.14278367, -1.64410412, -0.75222826, -0.57290924, 0.12729003,
0.7567004},
{0.49837467, 0.19278903, 0.26584083, 0.17660543, 0.52949083, -0.77931279},
{0.12609188, -0.46347019, -0.89598465, 0.35867718, 0.36897406,
0.73463392},
{0.14278367, -1.64410412, -0.75222826, -0.57290924, 0.12729003,
0.7567004}};
const std::vector<std::vector<int8_t>> expected_output = {
{-9, 24, 31, 1, -10, 10, -3, 0},
{2, 4, -44, -7, -10, 32, 52, 1},
{12, -17, 9, -8, 7, 16, -11, -8},
{-26, 29, 28, 16, -23, 26, 30, -6},
{-8, -25, -86, -5, -44, 59, 81, 15},
{62, -16, -37, 3, 27, 14, 34, -10},
{1, 24, -25, 23, 31, 61, 67, 11},
{-64, -65, -128, -25, -53, 59, 127, 20},
{20, -29, -20, -15, -28, 0, 8, -27},
{54, 61, -67, 38, 38, 64, 115, 0},
{-44, -75, -128, -20, -19, 93, 101, 35},
{-5, -56, 30, -18, -40, -9, -8, -31},
};
for (int sequence_index = 0; sequence_index < 12; ++sequence_index) {
svdf.SetInput(input_sequences[sequence_index]);
ASSERT_EQ(svdf.Invoke(), kTfLiteOk);
const std::vector<int8_t> res = svdf.GetOutput();
EXPECT_THAT(res, ElementsAreArray(expected_output[sequence_index]));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/svdf.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/svdf_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9a766995-4e5a-4ca3-8824-00f4bbbcc0c9 | cpp | tensorflow/tensorflow | gather | tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/gather.cc | tensorflow/lite/delegates/gpu/cl/kernels/gather_test.cc | #include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/gather.h"
#include <cstdint>
#include <optional>
#include <vector>
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/util.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
namespace mlir::odml {
std::optional<bool> IsGatherLegal(mhlo::GatherOp op) { return std::nullopt; }
class LegalizeGatherToSlice : public OpConversionPattern<mhlo::GatherOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::GatherOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
LogicalResult LegalizeGatherToSlice::matchAndRewrite(
mhlo::GatherOp gather_op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
Value operand = gather_op.getOperand();
Value start_indices = gather_op.getStartIndices();
static const int rank_two = 2;
static const int max_batch_size = 50;
ShapedType operand_type = mlir::cast<ShapedType>(operand.getType());
ShapedType start_indices_type =
mlir::cast<ShapedType>(start_indices.getType());
ShapedType result_type =
mlir::cast<ShapedType>(gather_op.getResult().getType());
if (!operand_type.hasStaticShape() || !start_indices_type.hasStaticShape() ||
!result_type.hasStaticShape()) {
return rewriter.notifyMatchFailure(
gather_op,
"Dynamic shaped inputs are not supported when legalizing mhlo.gather "
"op to tf.slice.");
}
auto start_index_map = gather_op.getDimensionNumbers().getStartIndexMap();
auto collapsed_slice_dims =
gather_op.getDimensionNumbers().getCollapsedSliceDims();
auto offset_dims = gather_op.getDimensionNumbers().getOffsetDims();
auto slice_sizes = gather_op.getSliceSizes();
llvm::SmallVector<int64_t, 2> slice_sizes_vector;
slice_sizes_vector.reserve(slice_sizes.size());
for (int64_t s : slice_sizes.getValues<int64_t>()) {
slice_sizes_vector.push_back(s);
}
llvm::SmallVector<int64_t, 1> batch_dims;
int offset_index = 0;
for (int64_t i = 0; i < result_type.getRank(); ++i) {
if (offset_index >= offset_dims.size() || offset_dims[offset_index] != i) {
batch_dims.push_back(i);
} else {
++offset_index;
}
}
if (batch_dims.size() != 1 || batch_dims[0] != 0) {
return failure();
}
int64_t batch_dim = batch_dims[0];
if (operand_type.getDimSize(batch_dim) > max_batch_size ||
operand_type.getRank() != rank_two ||
start_indices_type.getRank() != rank_two ||
operand_type.getDimSize(batch_dim) !=
start_indices_type.getDimSize(batch_dim) ||
slice_sizes_vector[batch_dim] != 1) {
return failure();
}
for (int64_t i = 0; i < start_index_map.size(); i++) {
if (start_index_map[i] != i) {
return failure();
}
}
if (collapsed_slice_dims.size() != start_index_map.size() - 1 ||
collapsed_slice_dims.size() != 1 || collapsed_slice_dims[0] != 0) {
return failure();
}
int64_t index_vector_dim =
gather_op.getDimensionNumbers().getIndexVectorDim();
if (failed(NormalizeIndexVector(gather_op, start_indices, start_indices_type,
index_vector_dim, rewriter))) {
return failure();
}
ImplicitLocOpBuilder builder(gather_op.getLoc(), rewriter);
auto max_start_indices = BuildIntArrayConstOp<arith::ConstantOp>(
builder, rewriter,
llvm::SmallVector<int64_t>(
{operand_type.getDimSize(0) - slice_sizes_vector[0],
operand_type.getDimSize(1) - slice_sizes_vector[1]}),
start_indices_type.getElementType());
auto min_start_indices = BuildIntArrayConstOp<arith::ConstantOp>(
builder, rewriter, llvm::SmallVector<int64_t>({0, 0}),
start_indices_type.getElementType());
auto start_indices_max_op = rewriter.create<TFL::MaximumOp>(
gather_op.getLoc(), start_indices, min_start_indices);
auto clamped_start_indices_op = rewriter.create<TFL::MinimumOp>(
gather_op.getLoc(), start_indices_max_op, max_start_indices);
int64_t batch_size = start_indices_type.getDimSize(batch_dim);
auto slice_size = BuildIntArrayConstOp<arith::ConstantOp>(
builder, rewriter, slice_sizes_vector, rewriter.getI32Type());
if (batch_size == 1) {
auto squeeze_op = rewriter.create<TFL::SqueezeOp>(
gather_op.getLoc(),
RankedTensorType::get({rank_two}, start_indices_type.getElementType()),
clamped_start_indices_op,
rewriter.getI64ArrayAttr(llvm::ArrayRef<int64_t>({batch_dim})));
auto slice_op =
rewriter.create<TFL::SliceOp>(gather_op.getLoc(), gather_op.getType(),
operand, squeeze_op, slice_size);
rewriter.replaceOp(gather_op, slice_op);
return mlir::success();
}
llvm::SmallVector<Value, 1> slices;
slices.reserve(batch_size);
for (int64_t i = 0; i < batch_size; ++i) {
auto zero = BuildIntArrayConstOp<arith::ConstantOp>(
builder, rewriter, llvm::SmallVector<int64_t>({i, 0}),
rewriter.getI32Type());
auto two = BuildIntArrayConstOp<arith::ConstantOp>(
builder, rewriter, llvm::SmallVector<int64_t>({1, 2}),
rewriter.getI32Type());
auto begin = rewriter.create<TFL::SliceOp>(
gather_op.getLoc(),
RankedTensorType::get({1, 2}, start_indices_type.getElementType()),
clamped_start_indices_op, zero, two);
auto squeeze_op = rewriter.create<TFL::SqueezeOp>(
gather_op.getLoc(),
RankedTensorType::get({rank_two}, start_indices_type.getElementType()),
begin, rewriter.getI64ArrayAttr(llvm::ArrayRef<int64_t>({batch_dim})));
auto slice_op = rewriter.create<TFL::SliceOp>(
gather_op.getLoc(),
RankedTensorType::get({1, slice_sizes_vector[1]},
operand_type.getElementType()),
operand, squeeze_op, slice_size);
slices.push_back(slice_op);
}
auto concat_op = rewriter.create<TFL::ConcatenationOp>(
gather_op.getLoc(), result_type, slices, 0,
rewriter.getStringAttr("NONE"));
rewriter.replaceOp(gather_op, concat_op);
return mlir::success();
}
struct TransposeParams {
std::vector<int64_t> permutation;
std::vector<int64_t> canonicalized_output_shape;
std::vector<int64_t> canonicalized_offset_dims;
};
TransposeParams CanonicalizeOffset(ShapedType result_type,
ArrayRef<int64_t> original_offset_dims) {
TransposeParams transpose_params;
int output_rank = result_type.getRank();
for (int start = output_rank - original_offset_dims.size();
start < output_rank; ++start) {
transpose_params.canonicalized_offset_dims.push_back(start);
}
std::vector<int64_t> batch_dims;
int offset_index = 0;
for (int64_t i = 0; i < output_rank; ++i) {
if (offset_index >= original_offset_dims.size() ||
original_offset_dims[offset_index] != i) {
batch_dims.push_back(i);
} else {
++offset_index;
}
}
int batch_idx = 0;
int offset_idx = 0;
int batch_dim_size = batch_dims.size();
for (int i = 0; i < output_rank; ++i) {
if (batch_idx >= batch_dims.size()) {
transpose_params.permutation.push_back(batch_dim_size + offset_idx);
++offset_idx;
} else if (offset_idx < original_offset_dims.size() &&
original_offset_dims[offset_idx] < batch_dims[batch_idx]) {
transpose_params.permutation.push_back(batch_dim_size + offset_idx);
++offset_idx;
} else {
transpose_params.permutation.push_back(batch_idx++);
}
}
for (auto dim : batch_dims) {
transpose_params.canonicalized_output_shape.push_back(
result_type.getDimSize(dim));
}
for (auto dim : original_offset_dims) {
transpose_params.canonicalized_output_shape.push_back(
result_type.getDimSize(dim));
}
return transpose_params;
}
class LegalizeGatherToGatherND : public OpConversionPattern<mhlo::GatherOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::GatherOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
LogicalResult LegalizeGatherToGatherND::matchAndRewrite(
mhlo::GatherOp gather_op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
Value operand = gather_op.getOperand();
Value start_indices = gather_op.getStartIndices();
ShapedType operand_type = mlir::cast<ShapedType>(operand.getType());
ShapedType start_indices_type =
mlir::cast<ShapedType>(start_indices.getType());
ShapedType result_type =
mlir::cast<ShapedType>(gather_op.getResult().getType());
if (!operand_type.hasStaticShape()) {
gather_op.emitOpError() << "Dynamic shaped operand is not supported.";
return failure();
}
int64_t index_vector_dim =
gather_op.getDimensionNumbers().getIndexVectorDim();
if (failed(NormalizeIndexVector(gather_op, start_indices, start_indices_type,
index_vector_dim, rewriter))) {
return failure();
}
auto start_index_map = gather_op.getDimensionNumbers().getStartIndexMap();
auto collapsed_slice_dims =
gather_op.getDimensionNumbers().getCollapsedSliceDims();
if (start_index_map.size() != collapsed_slice_dims.size()) {
return rewriter.notifyMatchFailure(
gather_op,
"different size for start index map and collapsed slice dims");
}
for (auto c : collapsed_slice_dims) {
if (llvm::count(start_index_map, c) == 0) {
return rewriter.notifyMatchFailure(
gather_op, "collapsed slice dim isn't present in start index map");
}
}
auto slice_sizes = gather_op.getSliceSizes();
int64_t index = 0;
for (int64_t s : slice_sizes.getValues<int64_t>()) {
if (llvm::count(start_index_map, index)) {
if (s != 1) {
return rewriter.notifyMatchFailure(gather_op,
"unsupported slice sizes");
}
} else {
if (s != operand_type.getShape()[index]) {
return rewriter.notifyMatchFailure(gather_op,
"unsupported slice sizes");
}
}
++index;
}
auto offset_dims = gather_op.getDimensionNumbers().getOffsetDims();
SmallVector<int64_t, 4> offset_dims_vector(offset_dims.begin(),
offset_dims.end());
const TransposeParams& transpose_params =
CanonicalizeOffset(result_type,
offset_dims_vector);
int64_t offset = start_indices_type.getRank() - 1;
for (int64_t o : transpose_params.canonicalized_offset_dims) {
if (o != offset) {
return rewriter.notifyMatchFailure(gather_op, "unsupported offset dims");
}
++offset;
}
llvm::SmallVector<int64_t, 4> transpose_dimensions;
llvm::SmallVector<int64_t, 4> transpose_shape;
for (auto s : start_index_map) {
transpose_dimensions.push_back(s);
transpose_shape.push_back(operand_type.getShape()[s]);
}
for (int64_t i = 0, e = operand_type.getRank(); i < e; ++i) {
if (llvm::count(start_index_map, i) == 0) {
transpose_dimensions.push_back(i);
transpose_shape.push_back(operand_type.getShape()[i]);
}
}
operand_type =
RankedTensorType::get(transpose_shape, operand_type.getElementType());
operand = rewriter.create<mhlo::TransposeOp>(
gather_op.getLoc(), operand_type, operand,
rewriter.getI64TensorAttr(transpose_dimensions));
bool need_transpose_after = false;
for (int i = 0; i < transpose_params.permutation.size(); ++i) {
if (i != transpose_params.permutation[i]) {
need_transpose_after = true;
break;
}
}
auto tf_gather_nd_result_type =
RankedTensorType::get(transpose_params.canonicalized_output_shape,
result_type.getElementType());
if (start_indices_type.getElementType().isUnsignedInteger(32)) {
start_indices = rewriter.create<TFL::CastOp>(
gather_op->getLoc(),
RankedTensorType::get(start_indices_type.getShape(),
rewriter.getI64Type()),
start_indices);
}
auto tf_gather_nd_op = rewriter.create<TFL::GatherNdOp>(
gather_op->getLoc(), tf_gather_nd_result_type, operand, start_indices);
if (!need_transpose_after) {
rewriter.replaceOp(gather_op, tf_gather_nd_op->getOpResults());
return success();
}
rewriter.replaceOpWithNewOp<mhlo::TransposeOp>(
gather_op, result_type, tf_gather_nd_op,
rewriter.getI64TensorAttr(transpose_params.permutation));
return success();
}
void PopulateGatherPatterns(MLIRContext* ctx, RewritePatternSet& patterns,
ConversionTarget& target) {
patterns.add<LegalizeGatherToSlice, LegalizeGatherToGatherND>(ctx);
target.addDynamicallyLegalOp<mhlo::GatherOp>(IsGatherLegal);
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/gather_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
TEST_F(OpenCLOperationTest, GatherBatch) {
auto status = GatherBatchTest(&exec_env_, false);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, GatherBatchConst) {
auto status = GatherBatchTest(&exec_env_, true);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, GatherHeight) {
auto status = GatherHeightTest(&exec_env_, false);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, GatherHeightConst) {
auto status = GatherHeightTest(&exec_env_, true);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, GatherWidth) {
auto status = GatherWidthTest(&exec_env_, false);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, GatherWidthConst) {
auto status = GatherWidthTest(&exec_env_, true);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, GatherChannels) {
auto status = GatherChannelsTest(&exec_env_, false);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, GatherChannelsConst) {
auto status = GatherChannelsTest(&exec_env_, true);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/gather.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/gather_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4da2097c-a2ff-4511-ab1e-85d582971763 | cpp | tensorflow/tensorflow | mirror_pad | tensorflow/lite/kernels/mirror_pad.cc | tensorflow/lite/delegates/hexagon/builders/tests/mirror_pad_test.cc | #include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <memory>
#include <vector>
#include "ruy/profiler/instrumentation.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/cpu_backend_threadpool.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace mirror_pad {
namespace {
const int kUnsetOffset = -1;
template <typename T>
struct EvalData {
const TfLiteTensor* padding_matrix = nullptr;
const TfLiteIntArray* input_dims = nullptr;
const std::vector<int>* output_dims_num_elements = nullptr;
const std::vector<int>* input_dims_num_elements = nullptr;
const T* input_data = nullptr;
int offset = kUnsetOffset;
T* output_data = nullptr;
int num_dims = 0;
};
template <typename T>
inline void GetPadding(const T* data, int offset, int64_t* left_pad,
int64_t* right_pad) {
*left_pad = static_cast<int64_t>(*(data + offset * 2));
*right_pad = static_cast<int64_t>(*(data + offset * 2 + 1));
}
inline void GetPadding(const TfLiteTensor* padding_matrix, int dimension,
int64_t* left_pad, int64_t* right_pad) {
switch (padding_matrix->type) {
case kTfLiteInt32:
GetPadding(padding_matrix->data.i32, dimension, left_pad, right_pad);
break;
case kTfLiteInt64:
GetPadding(padding_matrix->data.i64, dimension, left_pad, right_pad);
break;
default:
return;
}
}
std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> GetPaddedOutputShape(
const TfLiteTensor* input, const TfLiteTensor* padding_matrix) {
const int input_dims = NumDimensions(input);
std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> shape(
TfLiteIntArrayCreate(input_dims), TfLiteIntArrayFree);
int64_t left_pad = 0, right_pad = 0;
for (int i = 0; i < input_dims; ++i) {
GetPadding(padding_matrix, i, &left_pad, &right_pad);
shape->data[i] = SizeOfDimension(input, i) + left_pad + right_pad;
}
return shape;
}
inline int GetInputDimension(int padded_dimension, int left_pad, int right_pad,
int input_dim_size, int offset) {
if (padded_dimension < left_pad) {
const int original_ind = left_pad + offset - 1;
return original_ind - (std::min(padded_dimension, original_ind - offset));
}
padded_dimension -= left_pad;
if (padded_dimension >= input_dim_size) {
padded_dimension -= input_dim_size;
const int original_ind = input_dim_size - (1 + offset);
return original_ind - std::min(padded_dimension, original_ind);
}
return padded_dimension;
}
template <typename T>
int GetFlatIndex(int index, EvalData<T>* eval_data) {
int flat_index = 0;
int64_t left_pad = 0, right_pad = 0, dimension_index, index_in_input;
for (int i = 0; i < eval_data->num_dims; ++i) {
switch (eval_data->padding_matrix->type) {
case kTfLiteInt32:
GetPadding(eval_data->padding_matrix->data.i32, i, &left_pad,
&right_pad);
break;
case kTfLiteInt64:
GetPadding(eval_data->padding_matrix->data.i64, i, &left_pad,
&right_pad);
break;
default:
break;
}
dimension_index = index / (*eval_data->output_dims_num_elements)[i];
index_in_input =
GetInputDimension(dimension_index, left_pad, right_pad,
eval_data->input_dims->data[i], eval_data->offset);
flat_index += index_in_input * (*eval_data->input_dims_num_elements)[i];
index %= (*eval_data->output_dims_num_elements)[i];
}
return flat_index;
}
template <typename T>
struct MirrorPadWorkerTask : cpu_backend_threadpool::Task {
MirrorPadWorkerTask(EvalData<T>* eval_data, int start, int end)
: eval_data(eval_data), start(start), end(end) {}
void Run() override {
auto* input_data = eval_data->input_data;
auto* output_data = eval_data->output_data;
for (int i = start; i < end; ++i) {
output_data[i] = input_data[GetFlatIndex(i, eval_data)];
}
}
private:
EvalData<T>* eval_data;
int start;
int end;
};
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
ruy::profiler::ScopeLabel label("MirrorPad");
const TfLiteTensor* input_tensor;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input_tensor));
const TfLiteTensor* padding_matrix;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &padding_matrix));
auto* params =
reinterpret_cast<TfLiteMirrorPaddingParams*>(node->builtin_data);
if (params == nullptr) {
return kTfLiteError;
}
const int input_dims = NumDimensions(input_tensor);
TfLiteTensor* output_tensor;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output_tensor));
if (IsDynamicTensor(output_tensor)) {
auto output_size = GetPaddedOutputShape(input_tensor, padding_matrix);
if (output_size == nullptr) {
return kTfLiteError;
}
TF_LITE_ENSURE_STATUS(
context->ResizeTensor(context, output_tensor, output_size.release()));
}
std::vector<int> output_dims_num_elements(input_dims, 1);
std::vector<int> input_dims_num_elements(input_dims, 1);
for (int i = input_dims - 2; i >= 0; i--) {
output_dims_num_elements[i] =
output_dims_num_elements[i + 1] * output_tensor->dims->data[i + 1];
input_dims_num_elements[i] =
input_dims_num_elements[i + 1] * input_tensor->dims->data[i + 1];
}
const int offset =
params->mode != TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingReflect ? 0
: 1;
CpuBackendContext* cpu_backend_context =
CpuBackendContext::GetFromContext(context);
const int thread_count = cpu_backend_context->max_num_threads();
TfLiteStatus status = kTfLiteOk;
const int output_size = NumElements(output_tensor);
#define TF_LITE_MIRROR_PAD(type) \
EvalData<type> eval_data; \
eval_data.input_data = GetTensorData<type>(input_tensor); \
eval_data.input_dims = input_tensor->dims; \
eval_data.input_dims = input_tensor->dims; \
eval_data.output_dims_num_elements = &output_dims_num_elements; \
eval_data.input_dims_num_elements = &input_dims_num_elements; \
eval_data.num_dims = input_dims; \
eval_data.offset = offset; \
eval_data.output_data = GetTensorData<type>(output_tensor); \
eval_data.padding_matrix = padding_matrix; \
std::vector<MirrorPadWorkerTask<type>> tasks; \
tasks.reserve(thread_count); \
int start = 0; \
for (int i = 0; i < thread_count; ++i) { \
int end = start + (output_size - start) / (thread_count - i); \
tasks.emplace_back(MirrorPadWorkerTask<type>(&eval_data, start, end)); \
start = end; \
} \
cpu_backend_threadpool::Execute(tasks.size(), tasks.data(), \
cpu_backend_context);
switch (output_tensor->type) {
case kTfLiteFloat32: {
TF_LITE_MIRROR_PAD(float);
break;
}
case kTfLiteInt32: {
TF_LITE_MIRROR_PAD(int32_t);
break;
}
case kTfLiteUInt8: {
TF_LITE_MIRROR_PAD(uint8_t);
break;
}
case kTfLiteInt8: {
TF_LITE_MIRROR_PAD(int8_t);
break;
}
case kTfLiteInt64: {
TF_LITE_MIRROR_PAD(int64_t);
break;
}
case kTfLiteInt16: {
TF_LITE_MIRROR_PAD(int16_t);
break;
}
default:
status = kTfLiteError;
break;
}
#undef TF_LITE_MIRROR_PAD
return status;
}
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
return nullptr;
}
void Free(TfLiteContext* context, void* buffer) {}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input_tensor;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input_tensor));
const TfLiteTensor* padding_matrix;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &padding_matrix));
TfLiteTensor* output_tensor;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output_tensor));
TF_LITE_ENSURE_EQ(context, NumDimensions(padding_matrix), 2);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(padding_matrix, 0),
NumDimensions(input_tensor));
if (input_tensor->type == kTfLiteUInt8 || input_tensor->type == kTfLiteInt8 ||
input_tensor->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, input_tensor->params.scale,
output_tensor->params.scale);
TF_LITE_ENSURE_EQ(context, input_tensor->params.zero_point,
output_tensor->params.zero_point);
}
if (input_tensor->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, input_tensor->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, output_tensor->params.zero_point, 0);
}
if (!IsConstantOrPersistentTensor(padding_matrix)) {
SetTensorToDynamic(output_tensor);
return kTfLiteOk;
}
auto output_size = GetPaddedOutputShape(input_tensor, padding_matrix);
if (output_size == nullptr) {
return kTfLiteError;
}
return context->ResizeTensor(context, output_tensor, output_size.release());
}
}
TfLiteRegistration* Register_MIRROR_PAD() {
static TfLiteRegistration r = {mirror_pad::Init, mirror_pad::Free,
mirror_pad::Prepare, mirror_pad::Eval};
return &r;
}
}
}
} | #include <initializer_list>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
using testing::ElementsAreArray;
template <typename T>
class MirrorPadOpModel : public SingleOpModelWithHexagon {
public:
MirrorPadOpModel(const TensorData& input,
std::initializer_list<int> paddings_shape,
std::initializer_list<int> paddings,
const TensorData& output, const tflite::MirrorPadMode mode) {
input_id_ = AddInput(input);
padding_matrix_id_ =
AddConstInput(TensorType_INT32, paddings, paddings_shape);
output_id_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_MIRROR_PAD, BuiltinOptions_MirrorPadOptions,
CreateMirrorPadOptions(builder_, mode).Union());
BuildInterpreter({GetShape(input_id_), GetShape(padding_matrix_id_)});
}
int input_tensor_id() { return input_id_; }
std::vector<T> GetOutput() { return ExtractVector<T>(output_id_); }
protected:
int input_id_;
int padding_matrix_id_;
int output_id_;
};
TEST(MirrorPadTest, EmptyPad_UInt8) {
MirrorPadOpModel<uint8_t> model(
{TensorType_UINT8, {2, 3}, -1.0, 1.0}, {2, 2}, {0, 0, 0, 0},
{TensorType_UINT8, {}, -1.0, 1.0}, tflite::MirrorPadMode_REFLECT);
model.PopulateTensor<uint8_t>(model.input_tensor_id(), {1, 2, 3, 4, 5, 6});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 2, 3, 4, 5, 6}));
}
TEST(MirrorPadTest, PadBothSides_Symmetric_Int8) {
MirrorPadOpModel<int8_t> model({TensorType_INT8, {2, 3}, -1.0, 1.0}, {2, 2},
{1, 1, 1, 1}, {TensorType_INT8, {}, -1.0, 1.0},
tflite::MirrorPadMode_SYMMETRIC);
model.PopulateTensor<int8_t>(model.input_tensor_id(), {1, 2, 3, 4, 5, 6});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({1, 1, 2, 3, 3, 1, 1, 2, 3, 3,
4, 4, 5, 6, 6, 4, 4, 5, 6, 6}));
}
TEST(MirrorPadTest, PadBothSides_Reflect_UInt8) {
MirrorPadOpModel<uint8_t> model(
{TensorType_UINT8, {2, 3}, -1.0, 1.0}, {2, 2}, {1, 1, 1, 1},
{TensorType_UINT8, {}, -1.0, 1.0}, tflite::MirrorPadMode_REFLECT);
model.PopulateTensor<uint8_t>(model.input_tensor_id(), {1, 2, 3, 4, 5, 6});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({5, 4, 5, 6, 5, 2, 1, 2, 3, 2,
5, 4, 5, 6, 5, 2, 1, 2, 3, 2}));
}
TEST(MirrorPadTest, PadOneSide_left_Reflect_Int8) {
MirrorPadOpModel<int8_t> model({TensorType_INT8, {2, 3}, -1.0, 1.0}, {2, 2},
{1, 0, 1, 0}, {TensorType_INT8, {}, -1.0, 1.0},
tflite::MirrorPadMode_REFLECT);
model.PopulateTensor<int8_t>(model.input_tensor_id(), {1, 2, 3, 4, 5, 6});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({5, 4, 5, 6, 2, 1, 2, 3, 5, 4, 5, 6}));
}
TEST(MirrorPadTest, PadOneSide_right_Symmetric_UInt8) {
MirrorPadOpModel<uint8_t> model(
{TensorType_UINT8, {2, 3}, -1.0, 1.0}, {2, 2}, {0, 1, 0, 1},
{TensorType_UINT8, {}, -1.0, 1.0}, tflite::MirrorPadMode_SYMMETRIC);
model.PopulateTensor<uint8_t>(model.input_tensor_id(), {1, 2, 3, 4, 5, 6});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({1, 2, 3, 3, 4, 5, 6, 6, 4, 5, 6, 6}));
}
TEST(MirrorPadTest, Pad_1D_Reflect_Int8) {
MirrorPadOpModel<int8_t> model({TensorType_INT8, {3}, -1.0, 1.0}, {1, 2},
{0, 2}, {TensorType_INT8, {}, -1.0, 1.0},
tflite::MirrorPadMode_REFLECT);
model.PopulateTensor<int8_t>(model.input_tensor_id(), {1, 2, 3});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 2, 3, 2, 1}));
}
TEST(MirrorPadTest, Pad_1D_Symmetric_UInt8) {
MirrorPadOpModel<uint8_t> model({TensorType_UINT8, {3}, -1.0, 1.0}, {1, 2},
{0, 2}, {TensorType_UINT8, {}, -1.0, 1.0},
tflite::MirrorPadMode_SYMMETRIC);
model.PopulateTensor<uint8_t>(model.input_tensor_id(), {1, 2, 3});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 2, 3, 3, 2}));
}
TEST(MirrorPadTest, PadBothSides_Reflect_Whole_UInt8) {
MirrorPadOpModel<uint8_t> model(
{TensorType_UINT8, {2, 3}, -1.0, 1.0}, {2, 2}, {1, 1, 2, 2},
{TensorType_UINT8, {}, -1.0, 1.0}, tflite::MirrorPadMode_REFLECT);
model.PopulateTensor<uint8_t>(model.input_tensor_id(), {1, 2, 3, 4, 5, 6});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({6, 5, 4, 5, 6, 5, 4, 3, 2, 1, 2, 3, 2, 1,
6, 5, 4, 5, 6, 5, 4, 3, 2, 1, 2, 3, 2, 1}));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/mirror_pad.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/hexagon/builders/tests/mirror_pad_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
947b07fc-9e93-4800-abe6-ab323707e4f4 | cpp | tensorflow/tensorflow | segment_sum | tensorflow/lite/kernels/segment_sum.cc | tensorflow/lite/kernels/segment_sum_test.cc | #include <stdint.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace segment_sum {
static const int kInputDataTensor = 0;
static const int kInputSegmentIdsTensor = 1;
static const int kOutputTensor = 0;
TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
const TfLiteTensor* data,
const TfLiteTensor* segment_ids,
TfLiteTensor* output) {
const int segment_id_size = segment_ids->dims->data[0];
TF_LITE_ENSURE_EQ(context, segment_id_size, data->dims->data[0]);
int previous_segment_id = -1;
for (int i = 0; i < segment_id_size; i++) {
const int current_segment_id = GetTensorData<int32_t>(segment_ids)[i];
if (i == 0) {
TF_LITE_ENSURE_EQ(context, current_segment_id, 0);
} else {
int delta = current_segment_id - previous_segment_id;
TF_LITE_ENSURE(context, delta == 0 || delta == 1);
}
previous_segment_id = current_segment_id;
}
const int max_index = previous_segment_id;
const int data_rank = NumDimensions(data);
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(NumDimensions(data));
output_shape->data[0] = max_index + 1;
for (int i = 1; i < data_rank; ++i) {
output_shape->data[i] = data->dims->data[i];
}
return context->ResizeTensor(context, output, output_shape);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* data;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputDataTensor, &data));
const TfLiteTensor* segment_ids;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputSegmentIdsTensor,
&segment_ids));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE(context,
data->type == kTfLiteInt32 || data->type == kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, segment_ids->type, kTfLiteInt32);
if (!IsConstantOrPersistentTensor(data) ||
!IsConstantOrPersistentTensor(segment_ids)) {
SetTensorToDynamic(output);
return kTfLiteOk;
}
return ResizeOutputTensor(context, data, segment_ids, output);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* data;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputDataTensor, &data));
const TfLiteTensor* segment_ids;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputSegmentIdsTensor,
&segment_ids));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context,
ResizeOutputTensor(context, data, segment_ids, output));
}
#define TF_LITE_SEGMENT_SUM(dtype) \
reference_ops::SegmentSum<dtype>( \
GetTensorShape(data), GetTensorData<dtype>(data), \
GetTensorShape(segment_ids), GetTensorData<int32_t>(segment_ids), \
GetTensorShape(output), GetTensorData<dtype>(output));
switch (data->type) {
case kTfLiteInt32:
TF_LITE_SEGMENT_SUM(int32_t);
break;
case kTfLiteFloat32:
TF_LITE_SEGMENT_SUM(float);
break;
default:
TF_LITE_KERNEL_LOG(context,
"Currently SegmentSum doesn't support type: %s",
TfLiteTypeGetName(data->type));
return kTfLiteError;
}
#undef TF_LITE_SEGMENT_SUM
return kTfLiteOk;
}
}
TfLiteRegistration* Register_SEGMENT_SUM() {
static TfLiteRegistration r = {nullptr, nullptr, segment_sum::Prepare,
segment_sum::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
template <typename T>
class SegmentSumOpModel : public SingleOpModel {
public:
SegmentSumOpModel(const TensorData& data, const TensorData& segment_ids) {
data_id_ = AddInput(data);
segment_ids_id_ = AddInput(segment_ids);
output_id_ = AddOutput(data.type);
SetBuiltinOp(BuiltinOperator_SEGMENT_SUM, BuiltinOptions_NONE, 0);
BuildInterpreter({GetShape(data_id_), GetShape(segment_ids_id_)});
}
int data() const { return data_id_; }
int segment_ids() const { return segment_ids_id_; }
std::vector<T> GetOutput() { return ExtractVector<T>(output_id_); }
std::vector<int32_t> GetOutputShape() { return GetTensorShape(output_id_); }
protected:
int data_id_;
int segment_ids_id_;
int output_id_;
};
TEST(SegmentSumOpModelTest, Int32Test_Simple) {
SegmentSumOpModel<int32_t> model({TensorType_INT32, {3, 4}},
{TensorType_INT32, {3}});
model.PopulateTensor<int32_t>(model.data(),
{1, 2, 3, 4, 4, 3, 2, 1, 5, 6, 7, 8});
model.PopulateTensor<int32_t>(model.segment_ids(), {0, 0, 1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAreArray({5, 5, 5, 5, 5, 6, 7, 8}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 4}));
}
TEST(SegmentSumOpModelTest, Int32Test_OneDimension) {
SegmentSumOpModel<int32_t> model({TensorType_INT32, {3}},
{TensorType_INT32, {3}});
model.PopulateTensor<int32_t>(model.data(), {1, 2, 3});
model.PopulateTensor<int32_t>(model.segment_ids(), {0, 0, 1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAreArray({3, 3}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2}));
}
TEST(SegmentSumOpModelTest, Int32Test_ThreeDimensions) {
SegmentSumOpModel<int32_t> model({TensorType_INT32, {3, 2, 1}},
{TensorType_INT32, {3}});
model.PopulateTensor<int32_t>(model.data(), {1, 2, 3, 4, 5, 6});
model.PopulateTensor<int32_t>(model.segment_ids(), {0, 0, 1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAreArray({4, 6, 5, 6}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 2, 1}));
}
TEST(SegmentSumOpModelTest, Float32Test_Simple) {
SegmentSumOpModel<float> model({TensorType_FLOAT32, {3, 4}},
{TensorType_INT32, {3}});
model.PopulateTensor<float>(model.data(),
{1, 2, 3, 4, 4, 3, 2, 1, 5, 6, 7, 8});
model.PopulateTensor<int>(model.segment_ids(), {0, 0, 1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
Pointwise(FloatingPointEq(),
{5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 6.0f, 7.0f, 8.0f}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 4}));
}
TEST(SegmentSumOpModelTest, Float32Test_OneDimension) {
SegmentSumOpModel<float> model({TensorType_FLOAT32, {3}},
{TensorType_INT32, {3}});
model.PopulateTensor<float>(model.data(), {1, 2, 3});
model.PopulateTensor<int32_t>(model.segment_ids(), {0, 0, 1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), Pointwise(FloatingPointEq(), {3.0f, 3.0f}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2}));
}
TEST(SegmentSumOpModelTest, Float32Test_ThreeDimensions) {
SegmentSumOpModel<float> model({TensorType_FLOAT32, {3, 2, 1}},
{TensorType_INT32, {3}});
model.PopulateTensor<float>(model.data(), {1, 2, 3, 4, 5, 6});
model.PopulateTensor<int32_t>(model.segment_ids(), {0, 0, 1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
Pointwise(FloatingPointEq(), {4.0f, 6.0f, 5.0f, 6.0f}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 2, 1}));
}
TEST(SegmentSumOpModelTest, TestFailIfSegmentsAreNotSorted) {
SegmentSumOpModel<int32_t> model({TensorType_INT32, {3, 2}},
{TensorType_INT32, {3}});
model.PopulateTensor<int32_t>(model.data(), {1, 2, 3, 4, 5, 6});
model.PopulateTensor<int32_t>(model.segment_ids(), {0, 3, 1});
ASSERT_EQ(model.Invoke(), kTfLiteError);
}
TEST(SegmentSumOpModelTest, TestFailIfSegmentsAreNotConsecutive) {
SegmentSumOpModel<int32_t> model({TensorType_INT32, {3, 2}},
{TensorType_INT32, {3}});
model.PopulateTensor<int32_t>(model.data(), {1, 2, 3, 4, 5, 6});
model.PopulateTensor<int32_t>(model.segment_ids(), {0, 3, 5});
ASSERT_EQ(model.Invoke(), kTfLiteError);
}
TEST(SegmentSumOpModelTest, TestFailIfSegmentsAreNegative) {
SegmentSumOpModel<int32_t> model({TensorType_INT32, {3, 2}},
{TensorType_INT32, {3}});
model.PopulateTensor<int32_t>(model.data(), {1, 2, 3, 4, 5, 6});
model.PopulateTensor<int32_t>(model.segment_ids(), {-1, 0, 1});
ASSERT_EQ(model.Invoke(), kTfLiteError);
}
TEST(SegmentSumOpModelTest, TestFailIfSegmentsAreNotTheRightCardinality) {
SegmentSumOpModel<int32_t> model({TensorType_INT32, {3, 2}},
{TensorType_INT32, {2}});
model.PopulateTensor<int32_t>(model.data(), {1, 2, 3, 4, 5, 6});
model.PopulateTensor<int32_t>(model.segment_ids(), {0, 1});
ASSERT_EQ(model.Invoke(), kTfLiteError);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/segment_sum.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/segment_sum_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d59c0929-967a-4365-b0e0-3845a60b3e63 | cpp | tensorflow/tensorflow | pow | tensorflow/lite/kernels/pow.cc | tensorflow/lite/kernels/pow_test.cc | #include <stddef.h>
#include <stdint.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace pow {
namespace {
constexpr int kInputTensor1 = 0;
constexpr int kInputTensor2 = 1;
constexpr int kOutputTensor = 0;
struct OpData {
bool requires_broadcast;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* data = new OpData;
data->requires_broadcast = false;
return data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
const TfLiteType type = input1->type;
if (type != kTfLiteInt32 && type != kTfLiteFloat32) {
TF_LITE_KERNEL_LOG(context, "Unsupported data type %s.",
TfLiteTypeGetName(type));
return kTfLiteError;
}
output->type = type;
data->requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (data->requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
return context->ResizeTensor(context, output, output_size);
}
template <typename T>
void PowImpl(const TfLiteTensor* input1, const TfLiteTensor* input2,
TfLiteTensor* output, bool requires_broadcast) {
if (requires_broadcast) {
optimized_ops::BroadcastPow4D(
GetTensorShape(input1), GetTensorData<T>(input1),
GetTensorShape(input2), GetTensorData<T>(input2),
GetTensorShape(output), GetTensorData<T>(output));
} else {
reference_ops::Pow(GetTensorShape(input1), GetTensorData<T>(input1),
GetTensorShape(input2), GetTensorData<T>(input2),
GetTensorShape(output), GetTensorData<T>(output));
}
}
TfLiteStatus CheckValue(TfLiteContext* context, const TfLiteTensor* input) {
const int64_t num_elements = NumElements(input);
const int32_t* data = GetTensorData<int32_t>(input);
for (int i = 0; i < num_elements; ++i) {
if (data[i] < 0) {
TF_LITE_KERNEL_LOG(context,
"POW does not support negative value for int32.");
return kTfLiteError;
}
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (output->type) {
case kTfLiteInt32: {
TF_LITE_ENSURE_OK(context, CheckValue(context, input2));
PowImpl<int32_t>(input1, input2, output, data->requires_broadcast);
break;
}
case kTfLiteFloat32: {
PowImpl<float>(input1, input2, output, data->requires_broadcast);
break;
}
default: {
TF_LITE_KERNEL_LOG(context, "Unsupported data type: %d", output->type);
return kTfLiteError;
}
}
return kTfLiteOk;
}
}
}
TfLiteRegistration* Register_POW() {
static TfLiteRegistration r = {pow::Init, pow::Free, pow::Prepare, pow::Eval};
return &r;
}
}
}
} | #include <math.h>
#include <stdint.h>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/internal/test_util.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
template <typename T>
class PowOpModel : public SingleOpModel {
public:
PowOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_POW, BuiltinOptions_PowOptions,
CreatePowOptions(builder_).Union());
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
int input1() { return input1_; }
int input2() { return input2_; }
std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input1_;
int input2_;
int output_;
};
TEST(PowOpModel, Simple) {
PowOpModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {}});
model.PopulateTensor<int32_t>(model.input1(), {12, 2, 7, 8});
model.PopulateTensor<int32_t>(model.input2(), {1, 2, 3, 1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(12, 4, 343, 8));
}
TEST(PowOpModel, NegativeAndZeroValue) {
PowOpModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {}});
model.PopulateTensor<int32_t>(model.input1(), {0, 2, -7, 8});
model.PopulateTensor<int32_t>(model.input2(), {1, 2, 3, 0});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 4, -343, 1));
}
TEST(PowOpModel, Float) {
PowOpModel<float> model({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}});
model.PopulateTensor<float>(model.input1(), {0.3, 0.4, 0.7, 5.8});
model.PopulateTensor<float>(model.input2(), {0.5, 2.7, 3.1, 3.2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{0.5477226, 0.08424846, 0.33098164, 277.313}, 1e-3)));
}
TEST(PowOpModel, NegativeFloatTest) {
PowOpModel<float> model({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}});
model.PopulateTensor<float>(model.input1(), {0.3, 0.4, 0.7, 5.8});
model.PopulateTensor<float>(model.input2(), {0.5, -2.7, 3.1, -3.2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{0.5477226, 11.869653, 0.33098164, 0.003606}, 1e-3)));
}
TEST(PowOpModel, BroadcastTest) {
PowOpModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1}}, {TensorType_INT32, {}});
model.PopulateTensor<int32_t>(model.input1(), {12, 2, 7, 8});
model.PopulateTensor<int32_t>(model.input2(), {4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(20736, 16, 2401, 4096));
}
TEST(PowOpModel, BroadcastFloatTest) {
PowOpModel<float> model({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1}}, {TensorType_FLOAT32, {}});
model.PopulateTensor<float>(model.input1(), {12, 2, 7, 8});
model.PopulateTensor<float>(model.input2(), {4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(20736, 16, 2401, 4096));
}
template <typename T>
void CalculateTrueResults(const std::vector<T>& input_data, T exponent,
int flat_size, std::vector<T>* output_data) {
for (int i = 0; i < flat_size; ++i) {
output_data->at(i) = std::pow(input_data[i], exponent);
}
}
TEST(PowOpModel, FloatSingleIntegerExponentTest) {
PowOpModel<float> model({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1}}, {TensorType_FLOAT32, {}});
const int input_size = 1 * 2 * 2 * 1;
for (int i = 1; i < 20; ++i) {
std::vector<float> input_data(input_size);
for (int index = 0; index < input_size; ++index) {
input_data[index] = UniformRandomFloat(0, 1.5);
}
model.PopulateTensor<float>(model.input1(), input_data);
float exponent = static_cast<float>(i);
exponent += UniformRandomInt(-1, 1) * 1e-5;
model.PopulateTensor<float>(model.input2(), {exponent});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
std::vector<float> output_data(input_size);
CalculateTrueResults(input_data, exponent, input_size, &output_data);
EXPECT_THAT(model.GetOutput(),
ElementsAreArray(ArrayFloatNear(output_data, 1e-2)));
}
}
TEST(PowOpModel, IntSingleIntegerExponentTest) {
PowOpModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1}}, {TensorType_INT32, {}});
const int input_size = 1 * 2 * 2 * 1;
for (int i = 1; i < 20; ++i) {
std::vector<int32_t> input_data(input_size);
for (int index = 0; index < input_size; ++index) {
input_data[index] = UniformRandomInt(-2, -2);
}
model.PopulateTensor<int32_t>(model.input1(), input_data);
int exponent = i;
model.PopulateTensor<int32_t>(model.input2(), {exponent});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
std::vector<int32_t> output_data(input_size);
CalculateTrueResults(input_data, exponent, input_size, &output_data);
EXPECT_THAT(model.GetOutput(), ElementsAreArray(output_data));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/pow.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/pow_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
15fd9202-a8ac-4fc4-9660-18c249c6c32b | cpp | tensorflow/tensorflow | broadcast_args | tensorflow/lite/kernels/broadcast_args.cc | tensorflow/lite/kernels/broadcast_args_test.cc | #include "tensorflow/lite/kernels/internal/reference/broadcast_args.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace broadcast_args {
constexpr int kShape1Tensor = 0;
constexpr int kShape2Tensor = 1;
constexpr int kOutputTensor = 0;
struct BroadcastArgsContext {
BroadcastArgsContext(TfLiteContext* context, TfLiteNode* node) {
shape1 = GetInput(context, node, kShape1Tensor);
shape2 = GetInput(context, node, kShape2Tensor);
output = GetOutput(context, node, kOutputTensor);
}
const TfLiteTensor* shape1;
const TfLiteTensor* shape2;
TfLiteTensor* output;
};
TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node);
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE(context, NumInputs(node) == 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
BroadcastArgsContext op_context(context, node);
TF_LITE_ENSURE(context, op_context.shape1->type == kTfLiteInt32 ||
op_context.shape1->type == kTfLiteInt64);
TF_LITE_ENSURE_EQ(context, op_context.shape1->type, op_context.shape2->type);
TF_LITE_ENSURE_EQ(context, op_context.shape1->type, op_context.output->type);
TF_LITE_ENSURE_EQ(context, NumDimensions(op_context.shape1), 1);
TF_LITE_ENSURE_EQ(context, NumDimensions(op_context.shape2), 1);
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(1);
output_shape->data[0] = std::max(SizeOfDimension(op_context.shape1, 0),
SizeOfDimension(op_context.shape2, 0));
if (IsConstantOrPersistentTensor(op_context.shape1) &&
IsConstantOrPersistentTensor(op_context.shape2)) {
SetTensorToPersistentRo(op_context.output);
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, op_context.output,
output_shape));
return EvalImpl(context, node);
}
return context->ResizeTensor(context, op_context.output, output_shape);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
BroadcastArgsContext op_context(context, node);
if (IsConstantOrPersistentTensor(op_context.output)) {
return kTfLiteOk;
} else {
return EvalImpl(context, node);
}
}
TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node) {
BroadcastArgsContext op_context(context, node);
#define TF_LITE_BROADCAST_ARG(data_type) \
reference_ops::BroadcastArgs(GetTensorShape(op_context.shape1), \
GetTensorData<data_type>(op_context.shape1), \
GetTensorShape(op_context.shape2), \
GetTensorData<data_type>(op_context.shape2), \
GetTensorShape(op_context.output), \
GetTensorData<data_type>(op_context.output))
if (op_context.output->type == kTfLiteInt32) {
TF_LITE_BROADCAST_ARG(int32_t);
} else {
TF_LITE_BROADCAST_ARG(int64_t);
}
#undef TF_LITE_BROADCAST_ARG
return kTfLiteOk;
}
}
TfLiteRegistration* Register_BROADCAST_ARGS() {
static TfLiteRegistration r = {nullptr, nullptr, broadcast_args::Prepare,
broadcast_args::Eval};
return &r;
}
}
}
} | #include <cstdint>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/kernels/test_util.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
template <class ShapeType = int32_t>
class BroadcastArgsOpModel : public SingleOpModel {
public:
BroadcastArgsOpModel(std::initializer_list<ShapeType> input1,
std::initializer_list<ShapeType> input2,
bool constant_tensor) {
int input1_length = input1.size();
int input2_length = input2.size();
if (constant_tensor) {
shape1_ =
AddConstInput({GetTensorType<ShapeType>(), {input1_length}}, input1);
shape2_ =
AddConstInput({GetTensorType<ShapeType>(), {input2_length}}, input2);
} else {
shape1_ = AddInput({GetTensorType<ShapeType>(), {input1_length}});
shape2_ = AddInput({GetTensorType<ShapeType>(), {input2_length}});
}
output_ = AddOutput(GetTensorType<ShapeType>());
SetBuiltinOp(BuiltinOperator_BROADCAST_ARGS, BuiltinOptions_NONE, 0);
BuildInterpreter({{input1_length}, {input2_length}});
if (!constant_tensor) {
if (input1.size() > 0) SetInput1(input1);
if (input2.size() > 0) SetInput2(input2);
}
}
void SetInput1(std::initializer_list<ShapeType> data) {
PopulateTensor(shape1_, data);
}
void SetInput2(std::initializer_list<ShapeType> data) {
PopulateTensor(shape2_, data);
}
std::vector<ShapeType> GetOutput() {
return ExtractVector<ShapeType>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int shape1_;
int shape2_;
int output_;
};
template <typename T>
class BroadcastArgsOpTest : public ::testing::Test {};
using DataTypes = ::testing::Types<int64_t, int32_t>;
TYPED_TEST_SUITE(BroadcastArgsOpTest, DataTypes);
#if GTEST_HAS_DEATH_TEST
TYPED_TEST(BroadcastArgsOpTest, ShapeNotBroadcastableConstant) {
EXPECT_DEATH(BroadcastArgsOpModel<TypeParam> m({2, 3, 4, 4}, {2, 2},
true),
"");
}
TYPED_TEST(BroadcastArgsOpTest, ShapeNotBroadcastable) {
BroadcastArgsOpModel<TypeParam> m({2, 3, 4, 4}, {2, 2},
false);
EXPECT_DEATH(ASSERT_EQ(m.Invoke(), kTfLiteOk), "");
}
#endif
TYPED_TEST(BroadcastArgsOpTest, BroadcastArgsWithScalar) {
for (bool constant_tensor : {true, false}) {
BroadcastArgsOpModel<TypeParam> m({}, {2, 4}, constant_tensor);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({2, 4}));
}
}
TYPED_TEST(BroadcastArgsOpTest, BroadcastArgsDifferentDims) {
for (bool constant_tensor : {true, false}) {
BroadcastArgsOpModel<TypeParam> m({1}, {2, 4}, constant_tensor);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({2, 4}));
}
}
TYPED_TEST(BroadcastArgsOpTest, BroadcastArgsSameDims) {
for (bool constant_tensor : {true, false}) {
BroadcastArgsOpModel<TypeParam> m({1, 4, 6, 3, 1, 5}, {4, 4, 1, 3, 4, 1},
constant_tensor);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({4, 4, 6, 3, 4, 5}));
}
}
TYPED_TEST(BroadcastArgsOpTest, BroadcastArgsComplex) {
for (bool constant_tensor : {true, false}) {
BroadcastArgsOpModel<TypeParam> m({6, 3, 1, 5}, {4, 4, 1, 3, 4, 1},
constant_tensor);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({4, 4, 6, 3, 4, 5}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/broadcast_args.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/broadcast_args_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ca7e9c6e-6253-4c51-8b8a-45f53ca74cd7 | cpp | tensorflow/tensorflow | maximum_minimum | tensorflow/lite/kernels/maximum_minimum.cc | tensorflow/lite/kernels/maximum_minimum_test.cc | #include "tensorflow/lite/kernels/internal/reference/maximum_minimum.h"
#include <stdint.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace maximum_minimum {
enum KernelType {
kReference,
kGenericOptimized,
};
constexpr int kInputTensor1 = 0;
constexpr int kInputTensor2 = 1;
constexpr int kOutputTensor = 0;
struct OpContext {
OpContext(TfLiteContext* context, TfLiteNode* node) {
input1 = GetInput(context, node, kInputTensor1);
input2 = GetInput(context, node, kInputTensor2);
output = GetOutput(context, node, kOutputTensor);
}
const TfLiteTensor* input1;
const TfLiteTensor* input2;
TfLiteTensor* output;
};
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpContext op_context(context, node);
TF_LITE_ENSURE_TYPES_EQ(context, op_context.input1->type,
op_context.input2->type);
op_context.output->type = op_context.input1->type;
bool requires_broadcast =
!HaveSameShapes(op_context.input1, op_context.input2);
TfLiteIntArray* output_size = nullptr;
if (requires_broadcast) {
TF_LITE_ENSURE_OK(
context, CalculateShapeForBroadcast(context, op_context.input1,
op_context.input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(op_context.input1->dims);
}
return context->ResizeTensor(context, op_context.output, output_size);
}
struct MaximumOp {
template <typename data_type>
static data_type op(data_type el1, data_type el2) {
return el1 > el2 ? el1 : el2;
}
};
struct MinimumOp {
template <typename data_type>
static data_type op(data_type el1, data_type el2) {
return el1 < el2 ? el1 : el2;
}
};
template <KernelType kernel_type, typename data_type, typename op_type>
void TFLiteOperation(TfLiteContext* context, TfLiteNode* node,
const OpContext& op_context) {
reference_ops::MaximumMinimumBroadcastSlow(
GetTensorShape(op_context.input1),
GetTensorData<data_type>(op_context.input1),
GetTensorShape(op_context.input2),
GetTensorData<data_type>(op_context.input2),
GetTensorShape(op_context.output),
GetTensorData<data_type>(op_context.output),
op_type::template op<data_type>);
}
template <>
void TFLiteOperation<maximum_minimum::kGenericOptimized, int8, MaximumOp>(
TfLiteContext* context, TfLiteNode* node, const OpContext& op_context) {
tflite::ArithmeticParams op_params;
const bool need_broadcast = optimized_ops::ProcessBroadcastShapes(
GetTensorShape(op_context.input1), GetTensorShape(op_context.input2),
&op_params);
if (need_broadcast) {
optimized_ops::BroadcastMaximumDispatch(
op_params, GetTensorShape(op_context.input1),
GetTensorData<int8>(op_context.input1),
GetTensorShape(op_context.input2),
GetTensorData<int8>(op_context.input2),
GetTensorShape(op_context.output),
GetTensorData<int8>(op_context.output), MaximumOp::template op<int8>);
return;
}
reference_ops::MaximumMinimumBroadcastSlow(
GetTensorShape(op_context.input1), GetTensorData<int8>(op_context.input1),
GetTensorShape(op_context.input2), GetTensorData<int8>(op_context.input2),
GetTensorShape(op_context.output), GetTensorData<int8>(op_context.output),
MaximumOp::template op<int8>);
}
template <>
void TFLiteOperation<maximum_minimum::kGenericOptimized, int8, MinimumOp>(
TfLiteContext* context, TfLiteNode* node, const OpContext& op_context) {
tflite::ArithmeticParams op_params;
const bool need_broadcast = optimized_ops::ProcessBroadcastShapes(
GetTensorShape(op_context.input1), GetTensorShape(op_context.input2),
&op_params);
if (need_broadcast) {
optimized_ops::BroadcastMinimumDispatch(
op_params, GetTensorShape(op_context.input1),
GetTensorData<int8>(op_context.input1),
GetTensorShape(op_context.input2),
GetTensorData<int8>(op_context.input2),
GetTensorShape(op_context.output),
GetTensorData<int8>(op_context.output), MinimumOp::template op<int8>);
return;
}
reference_ops::MaximumMinimumBroadcastSlow(
GetTensorShape(op_context.input1), GetTensorData<int8>(op_context.input1),
GetTensorShape(op_context.input2), GetTensorData<int8>(op_context.input2),
GetTensorShape(op_context.output), GetTensorData<int8>(op_context.output),
MinimumOp::template op<int8>);
}
template <KernelType kernel_type, typename OpType>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpContext op_context(context, node);
if (NumElements(op_context.input1) == 0 ||
NumElements(op_context.input2) == 0) {
return kTfLiteOk;
}
switch (op_context.output->type) {
case kTfLiteFloat32: {
TFLiteOperation<kernel_type, float, OpType>(context, node, op_context);
break;
}
case kTfLiteUInt8:
TFLiteOperation<kernel_type, uint8_t, OpType>(context, node, op_context);
break;
case kTfLiteInt8:
TFLiteOperation<kernel_type, int8_t, OpType>(context, node, op_context);
break;
case kTfLiteInt32:
TFLiteOperation<kernel_type, int32_t, OpType>(context, node, op_context);
break;
case kTfLiteInt64:
TFLiteOperation<kernel_type, int64_t, OpType>(context, node, op_context);
break;
case kTfLiteInt16:
TFLiteOperation<kernel_type, int16_t, OpType>(context, node, op_context);
break;
default:
TF_LITE_KERNEL_LOG(context,
"Type %d is currently not supported by Maximum.",
op_context.output->type);
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_MAXIMUM_REF() {
static TfLiteRegistration r = {
nullptr, nullptr, maximum_minimum::Prepare,
maximum_minimum::Eval<maximum_minimum::kReference,
maximum_minimum::MaximumOp>};
return &r;
}
TfLiteRegistration* Register_MAXIMUM_GENERIC_OPT() {
static TfLiteRegistration r = {
nullptr, nullptr, maximum_minimum::Prepare,
maximum_minimum::Eval<maximum_minimum::kGenericOptimized,
maximum_minimum::MaximumOp>};
return &r;
}
TfLiteRegistration* Register_MINIMUM_REF() {
static TfLiteRegistration r = {
nullptr, nullptr, maximum_minimum::Prepare,
maximum_minimum::Eval<maximum_minimum::kReference,
maximum_minimum::MinimumOp>};
return &r;
}
TfLiteRegistration* Register_MINIMUM_GENERIC_OPT() {
static TfLiteRegistration r = {
nullptr, nullptr, maximum_minimum::Prepare,
maximum_minimum::Eval<maximum_minimum::kGenericOptimized,
maximum_minimum::MinimumOp>};
return &r;
}
TfLiteRegistration* Register_MAXIMUM() {
return Register_MAXIMUM_GENERIC_OPT();
}
TfLiteRegistration* Register_MINIMUM() {
return Register_MINIMUM_GENERIC_OPT();
}
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
template <class T>
class MaxMinOpModel : public SingleOpModel {
public:
MaxMinOpModel(tflite::BuiltinOperator op, const TensorData& input1,
const TensorData& input2, const TensorType& output) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(op, BuiltinOptions_MaximumMinimumOptions,
CreateMaximumMinimumOptions(builder_).Union());
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
MaxMinOpModel(tflite::BuiltinOperator op, const TensorData& input1,
const TensorData& input2,
std::initializer_list<T> input2_values,
const TensorType& output) {
input1_ = AddInput(input1);
input2_ = AddConstInput<T>(input2, input2_values);
output_ = AddOutput(output);
SetBuiltinOp(op, BuiltinOptions_MaximumMinimumOptions,
CreateMaximumMinimumOptions(builder_).Union());
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
void SetInput1(std::initializer_list<T> data) {
PopulateTensor(input1_, data);
}
void SetInput2(std::initializer_list<T> data) {
PopulateTensor(input2_, data);
}
std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input1_;
int input2_;
int output_;
};
template <typename data_type>
void TestModel(tflite::BuiltinOperator op, const TensorData& input1,
const TensorData& input2, const TensorData& output,
std::initializer_list<data_type> input1_values,
std::initializer_list<data_type> input2_values,
std::initializer_list<data_type> output_values,
int is_constant = false) {
std::unique_ptr<MaxMinOpModel<data_type>> m;
if (is_constant) {
m = std::make_unique<MaxMinOpModel<data_type>>(op, input1, input2,
input2_values, output.type);
} else {
m = std::make_unique<MaxMinOpModel<data_type>>(op, input1, input2,
output.type);
m->SetInput2(input2_values);
}
m->SetInput1(input1_values);
ASSERT_EQ(m->Invoke(), kTfLiteOk);
EXPECT_THAT(m->GetOutputShape(), ElementsAreArray(output.shape));
if constexpr (std::is_same_v<data_type, float>) {
EXPECT_THAT(m->GetOutput(), Pointwise(FloatingPointEq(), output_values));
} else {
EXPECT_THAT(m->GetOutput(), ElementsAreArray(output_values));
}
}
TEST(MaximumOpTest, FloatTest) {
std::initializer_list<float> data1 = {1.0, 0.0, -1.0, 11.0, -2.0, -1.44};
std::initializer_list<float> data2 = {-1.0, 0.0, 1.0, 12.0, -3.0, -1.43};
TestModel<float>(BuiltinOperator_MAXIMUM, {TensorType_FLOAT32, {3, 1, 2}},
{TensorType_FLOAT32, {3, 1, 2}},
{TensorType_FLOAT32, {3, 1, 2}}, data1, data2,
{1.0, 0.0, 1.0, 12.0, -2.0, -1.43});
TestModel<float>(BuiltinOperator_MINIMUM, {TensorType_FLOAT32, {3, 1, 2}},
{TensorType_FLOAT32, {3, 1, 2}},
{TensorType_FLOAT32, {3, 1, 2}}, data1, data2,
{-1.0, 0.0, -1.0, 11.0, -3.0, -1.44});
}
TEST(MaxMinOpTest, Uint8Test) {
std::initializer_list<uint8_t> data1 = {1, 0, 2, 11, 2, 23};
std::initializer_list<uint8_t> data2 = {0, 0, 1, 12, 255, 1};
TestModel<uint8_t>(BuiltinOperator_MAXIMUM, {TensorType_UINT8, {3, 1, 2}},
{TensorType_UINT8, {3, 1, 2}},
{TensorType_UINT8, {3, 1, 2}}, data1, data2,
{1, 0, 2, 12, 255, 23});
TestModel<uint8_t>(BuiltinOperator_MINIMUM, {TensorType_UINT8, {3, 1, 2}},
{TensorType_UINT8, {3, 1, 2}},
{TensorType_UINT8, {3, 1, 2}}, data1, data2,
{0, 0, 1, 11, 2, 1});
}
TEST(MaxMinOpTest, Int8Test) {
std::initializer_list<int8_t> data1 = {1, 0, 2, 11, 2, 23};
std::initializer_list<int8_t> data2 = {0, 0, 1, 12, 123, 1};
TestModel<int8_t>(BuiltinOperator_MAXIMUM, {TensorType_INT8, {3, 1, 2}},
{TensorType_INT8, {3, 1, 2}}, {TensorType_INT8, {3, 1, 2}},
data1, data2, {1, 0, 2, 12, 123, 23});
TestModel<int8_t>(BuiltinOperator_MINIMUM, {TensorType_INT8, {3, 1, 2}},
{TensorType_INT8, {3, 1, 2}}, {TensorType_INT8, {3, 1, 2}},
data1, data2, {0, 0, 1, 11, 2, 1});
}
TEST(MaxMinOpTest, Int16Test) {
std::initializer_list<int16_t> data1 = {-32768, 0, 2, 11, 2, 23};
std::initializer_list<int16_t> data2 = {0, 0, 1, 32767, 123, 1};
TestModel<int16_t>(BuiltinOperator_MAXIMUM, {TensorType_INT16, {3, 1, 2}},
{TensorType_INT16, {3, 1, 2}},
{TensorType_INT16, {3, 1, 2}}, data1, data2,
{0, 0, 2, 32767, 123, 23});
TestModel<int16_t>(BuiltinOperator_MINIMUM, {TensorType_INT16, {3, 1, 2}},
{TensorType_INT16, {3, 1, 2}},
{TensorType_INT16, {3, 1, 2}}, data1, data2,
{-32768, 0, 1, 11, 2, 1});
}
TEST(MaximumOpTest, FloatWithBroadcastTest) {
std::initializer_list<float> data1 = {1.0, 0.0, -1.0, -2.0, -1.44, 11.0};
std::initializer_list<float> data2 = {0.5, 2.0};
TestModel<float>(BuiltinOperator_MAXIMUM, {TensorType_FLOAT32, {3, 1, 2}},
{TensorType_FLOAT32, {2}}, {TensorType_FLOAT32, {3, 1, 2}},
data1, data2, {1.0, 2.0, 0.5, 2.0, 0.5, 11.0});
TestModel<float>(BuiltinOperator_MINIMUM, {TensorType_FLOAT32, {3, 1, 2}},
{TensorType_FLOAT32, {2}}, {TensorType_FLOAT32, {3, 1, 2}},
data1, data2, {0.5, 0.0, -1.0, -2.0, -1.44, 2.0});
}
TEST(MaximumOpTest, FloatWithBroadcastTest_ScalarY) {
std::initializer_list<float> data1 = {1.0, 0.0, -1.0, -2.0, -1.44, 11.0};
std::initializer_list<float> data2 = {0.5};
TestModel<float>(BuiltinOperator_MAXIMUM, {TensorType_FLOAT32, {3, 1, 2}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {3, 1, 2}},
data1, data2, {1.0, 0.5, 0.5, 0.5, 0.5, 11.0},
true);
TestModel<float>(BuiltinOperator_MINIMUM, {TensorType_FLOAT32, {3, 1, 2}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {3, 1, 2}},
data1, data2, {0.5, 0.0, -1.0, -2.0, -1.44, 0.5},
true);
}
TEST(MaximumOpTest, Int32WithBroadcastTest) {
std::initializer_list<int32_t> data1 = {1, 0, -1, -2, 3, 11};
std::initializer_list<int32_t> data2 = {2};
TestModel<int32_t>(BuiltinOperator_MAXIMUM, {TensorType_INT32, {3, 1, 2}},
{TensorType_INT32, {1}}, {TensorType_INT32, {3, 1, 2}},
data1, data2, {2, 2, 2, 2, 3, 11});
TestModel<int32_t>(BuiltinOperator_MINIMUM, {TensorType_INT32, {3, 1, 2}},
{TensorType_INT32, {1}}, {TensorType_INT32, {3, 1, 2}},
data1, data2, {1, 0, -1, -2, 2, 2});
}
TEST(MaximumOpTest, Int32WithBroadcastTest_ScalarY) {
std::initializer_list<int32_t> data1 = {1, 0, -1, -2, 3, 11};
std::initializer_list<int32_t> data2 = {2};
TestModel<int32_t>(BuiltinOperator_MAXIMUM, {TensorType_INT32, {3, 1, 2}},
{TensorType_INT32, {}}, {TensorType_INT32, {3, 1, 2}},
data1, data2, {2, 2, 2, 2, 3, 11}, true);
TestModel<int32_t>(BuiltinOperator_MINIMUM, {TensorType_INT32, {3, 1, 2}},
{TensorType_INT32, {}}, {TensorType_INT32, {3, 1, 2}},
data1, data2, {1, 0, -1, -2, 2, 2}, true);
}
TEST(MaximumOpTest, Int8WithBroadcastTest_ScalarY) {
std::initializer_list<int8_t> data1 = {1, 0, -1, -2, 3, 11};
std::initializer_list<int8_t> data2 = {2};
TestModel<int8_t>(BuiltinOperator_MAXIMUM, {TensorType_INT8, {3, 1, 2}},
{TensorType_INT8, {}}, {TensorType_INT8, {3, 1, 2}}, data1,
data2, {2, 2, 2, 2, 3, 11}, true);
TestModel<int8_t>(BuiltinOperator_MINIMUM, {TensorType_INT8, {3, 1, 2}},
{TensorType_INT8, {}}, {TensorType_INT8, {3, 1, 2}}, data1,
data2, {1, 0, -1, -2, 2, 2}, true);
}
TEST(MaxMinOpTest, Int8Test8D) {
std::initializer_list<int8_t> data1 = {1, 0, 2, 11, 2, 23};
std::initializer_list<int8_t> data2 = {0, 0, 1, 12, 123, 1};
TestModel<int8_t>(BuiltinOperator_MAXIMUM,
{TensorType_INT8, {3, 1, 2, 1, 1, 1, 1, 1}},
{TensorType_INT8, {3, 1, 2, 1, 1, 1, 1, 1}},
{TensorType_INT8, {3, 1, 2, 1, 1, 1, 1, 1}}, data1, data2,
{1, 0, 2, 12, 123, 23});
TestModel<int8_t>(BuiltinOperator_MINIMUM,
{TensorType_INT8, {3, 1, 2, 1, 1, 1, 1, 1}},
{TensorType_INT8, {3, 1, 2, 1, 1, 1, 1, 1}},
{TensorType_INT8, {3, 1, 2, 1, 1, 1, 1, 1}}, data1, data2,
{0, 0, 1, 11, 2, 1});
}
TEST(MaximumOpTest, FloatWithBroadcastTest5D) {
std::initializer_list<float> data1 = {1.0, 0.0, -1.0, -2.0, -1.44, 11.0};
std::initializer_list<float> data2 = {0.5, 2.0};
TestModel<float>(
BuiltinOperator_MAXIMUM, {TensorType_FLOAT32, {3, 1, 1, 1, 2}},
{TensorType_FLOAT32, {2}}, {TensorType_FLOAT32, {3, 1, 1, 1, 2}}, data1,
data2, {1.0, 2.0, 0.5, 2.0, 0.5, 11.0});
TestModel<float>(
BuiltinOperator_MINIMUM, {TensorType_FLOAT32, {3, 1, 1, 1, 2}},
{TensorType_FLOAT32, {2}}, {TensorType_FLOAT32, {3, 1, 1, 1, 2}}, data1,
data2, {0.5, 0.0, -1.0, -2.0, -1.44, 2.0});
}
TEST(MaximumOpTest, Int32WithBroadcastTest5D) {
std::initializer_list<int32_t> data1 = {1, 0, -1, -2, 3, 11};
std::initializer_list<int32_t> data2 = {2};
TestModel<int32_t>(
BuiltinOperator_MAXIMUM, {TensorType_INT32, {3, 1, 2, 1, 1}},
{TensorType_INT32, {1}}, {TensorType_INT32, {3, 1, 2, 1, 1}}, data1,
data2, {2, 2, 2, 2, 3, 11});
TestModel<int32_t>(
BuiltinOperator_MINIMUM, {TensorType_INT32, {3, 1, 2, 1, 1}},
{TensorType_INT32, {1}}, {TensorType_INT32, {3, 1, 2, 1, 1}}, data1,
data2, {1, 0, -1, -2, 2, 2});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/maximum_minimum.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/maximum_minimum_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
84de1a0c-21bf-43f6-a9d7-127424df4d8d | cpp | tensorflow/tensorflow | bucketize | tensorflow/lite/kernels/bucketize.cc | tensorflow/lite/kernels/bucketize_test.cc | #include <stdint.h>
#include <algorithm>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace bucketize {
namespace {
constexpr int kInputTensor = 0;
constexpr int kOutputTensor = 0;
struct OpData {
const float* boundaries;
int num_boundaries;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData();
const auto* params = reinterpret_cast<const TfLiteBucketizeParams*>(buffer);
if (!FLATBUFFERS_LITTLEENDIAN) {
int32_t* p =
reinterpret_cast<int32_t*>(const_cast<float*>(params->boundaries));
for (size_t i = 0; i < params->num_boundaries; i++, p++)
*p = flatbuffers::EndianSwap(*p);
}
op_data->boundaries = params->boundaries;
op_data->num_boundaries = params->num_boundaries;
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpData* opdata = reinterpret_cast<OpData*>(node->user_data);
if (!std::is_sorted(opdata->boundaries,
opdata->boundaries + opdata->num_boundaries)) {
TF_LITE_KERNEL_LOG(context, "Expected sorted boundaries");
return kTfLiteError;
}
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
if (input->type != kTfLiteInt32 && input->type != kTfLiteFloat32 &&
input->type != kTfLiteInt64 && input->type != kTfLiteFloat64) {
TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by bucketize.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
output->type = kTfLiteInt32;
TfLiteIntArray* output_shape = TfLiteIntArrayCopy(input->dims);
return context->ResizeTensor(context, output, output_shape);
}
template <typename T>
inline void Bucketize(const RuntimeShape& input_shape, const T* input_data,
const float* boundaries, int num_boundaries,
const RuntimeShape& output_shape, int32_t* output_data) {
const int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; i++) {
auto first_bigger_it = std::upper_bound(
boundaries, boundaries + num_boundaries, input_data[i]);
output_data[i] = first_bigger_it - boundaries;
}
}
template <typename T>
TfLiteStatus BucketizeImpl(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
OpData* opdata = reinterpret_cast<OpData*>(node->user_data);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt32);
Bucketize<T>(GetTensorShape(input), GetTensorData<T>(input),
opdata->boundaries, opdata->num_boundaries,
GetTensorShape(output), GetTensorData<int32_t>(output));
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
switch (input->type) {
case kTfLiteFloat32: {
return BucketizeImpl<float>(context, node);
}
case kTfLiteFloat64: {
return BucketizeImpl<double>(context, node);
}
case kTfLiteInt32: {
return BucketizeImpl<int32_t>(context, node);
}
case kTfLiteInt64: {
return BucketizeImpl<int64_t>(context, node);
}
default: {
TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by bucketize.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
}
}
}
}
TfLiteRegistration* Register_BUCKETIZE() {
static TfLiteRegistration r = {bucketize::Init, bucketize::Free,
bucketize::Prepare, bucketize::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
template <typename T>
class BucketizeOpModel : public SingleOpModel {
public:
BucketizeOpModel(const TensorData& input,
const std::vector<float>& boundaries) {
input_ = AddInput(input);
boundaries_ = boundaries;
output_ = AddOutput({TensorType_INT32, input.shape});
SetBuiltinOp(BuiltinOperator_BUCKETIZE, BuiltinOptions_BucketizeOptions,
CreateBucketizeOptions(
builder_, builder_.CreateVector<float>(boundaries_))
.Union());
BuildInterpreter({GetShape(input_)});
}
int input() { return input_; }
const std::vector<float>& boundaries() { return boundaries_; }
std::vector<int> GetOutput() { return ExtractVector<int>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input_;
std::vector<float> boundaries_;
int output_;
};
TEST(BucketizeOpTest, Float) {
BucketizeOpModel<float> model(
{TensorType_FLOAT32, {3, 2}},
{0.0f, 10.0f, 100.0f});
model.PopulateTensor<float>(model.input(),
{-5.0f, 10000.0f, 150.0f, 10.0f, 5.0f, 100.0f});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3, 2));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({0, 3, 3, 2, 1, 3}));
}
TEST(BucketizeOpTest, Int32) {
BucketizeOpModel<int32_t> model(
{TensorType_INT32, {3, 2}},
{0, 10, 100});
model.PopulateTensor<int32_t>(model.input(), {-5, 10000, 150, 10, 5, 100});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3, 2));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({0, 3, 3, 2, 1, 3}));
}
#if GTEST_HAS_DEATH_TEST
TEST(BucketizeOpTest, UnsortedBuckets) {
EXPECT_DEATH(BucketizeOpModel<float>(
{TensorType_INT32, {3, 2}},
{0, 10, -10}),
"Expected sorted boundaries");
}
#endif
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/bucketize.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/bucketize_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1ea6cafa-d2ab-4fd4-aa3e-15210d5072f6 | cpp | tensorflow/tensorflow | pack | tensorflow/lite/kernels/pack.cc | tensorflow/lite/delegates/hexagon/builders/tests/pack_test.cc | #include <stdint.h>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace pack {
namespace {
constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TfLitePackParams* data =
reinterpret_cast<TfLitePackParams*>(node->builtin_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), data->values_count);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input0;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input0));
const int dimension_size = NumDimensions(input0) + 1;
if (data->axis < 0) {
data->axis += dimension_size;
}
TF_LITE_ENSURE(context, NumDimensions(input0) >= data->axis);
TF_LITE_ENSURE(context, data->axis >= 0);
if (input0->type != kTfLiteInt32 && input0->type != kTfLiteFloat32 &&
input0->type != kTfLiteUInt8 && input0->type != kTfLiteUInt32 &&
input0->type != kTfLiteInt8 && input0->type != kTfLiteInt16 &&
input0->type != kTfLiteInt64) {
TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by pack.",
TfLiteTypeGetName(input0->type));
return kTfLiteError;
}
for (int i = 1; i < data->values_count; ++i) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i, &input));
TF_LITE_ENSURE(context, HaveSameShapes(input0, input));
TF_LITE_ENSURE_TYPES_EQ(context, input0->type, input->type);
}
const TfLiteIntArray* input_shape = input0->dims;
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(dimension_size);
int i = 0;
for (int index = 0; index < dimension_size; ++index) {
if (index == data->axis) {
output_shape->data[index] = data->values_count;
} else {
output_shape->data[index] = input_shape->data[i++];
}
}
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_TYPES_EQ(context, output->type, input0->type);
for (int i = 0; i < data->values_count; i++) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i, &input));
TF_LITE_ENSURE_EQ(context, input->params.zero_point,
output->params.zero_point);
TF_LITE_ENSURE_EQ(context, input->params.scale, output->params.scale);
}
return context->ResizeTensor(context, output, output_shape);
}
template <typename T>
TfLiteStatus PackImpl(TfLiteContext* context, TfLiteNode* node,
TfLiteTensor* output, int values_count, int axis) {
TF_LITE_ENSURE(context, axis >= 0);
VectorOfTensors<T> all_inputs(*context, *node->inputs);
tflite::PackParams op_params;
op_params.axis = axis;
op_params.inputs_count = values_count;
reference_ops::Pack<T>(op_params, all_inputs.shapes(), all_inputs.data(),
GetTensorShape(output), GetTensorData<T>(output));
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLitePackParams* data =
reinterpret_cast<TfLitePackParams*>(node->builtin_data);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (output->type) {
case kTfLiteInt8:
case kTfLiteUInt8:
return PackImpl<int8_t>(context, node, output, data->values_count,
data->axis);
case kTfLiteInt16:
return PackImpl<int16_t>(context, node, output, data->values_count,
data->axis);
case kTfLiteFloat32:
case kTfLiteInt32:
case kTfLiteUInt32:
return PackImpl<int32_t>(context, node, output, data->values_count,
data->axis);
case kTfLiteInt64:
return PackImpl<int64_t>(context, node, output, data->values_count,
data->axis);
default: {
TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by pack.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
}
}
}
}
TfLiteRegistration* Register_PACK() {
static TfLiteRegistration r = {nullptr, nullptr, pack::Prepare, pack::Eval};
return &r;
}
}
}
} | #include <initializer_list>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
using testing::ElementsAreArray;
class PackOpModel : public SingleOpModelWithHexagon {
public:
PackOpModel(const TensorData& input_template, int axis, int values_count) {
std::vector<std::vector<int>> all_input_shapes;
for (int i = 0; i < values_count; ++i) {
all_input_shapes.push_back(input_template.shape);
AddInput(input_template);
}
output_ = AddOutput({input_template.type, {}, input_template.min,
input_template.max});
SetBuiltinOp(BuiltinOperator_PACK, BuiltinOptions_PackOptions,
CreatePackOptions(builder_, values_count, axis).Union());
BuildInterpreter(all_input_shapes);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
template <typename integer_type>
void SetInput(int index, std::initializer_list<float> data) {
QuantizeAndPopulate<integer_type>(index, data);
}
template <typename integer_type>
std::vector<float> GetDequantizedOutput() {
return Dequantize<integer_type>(ExtractVector<integer_type>(output_),
GetScale(output_), GetZeroPoint(output_));
}
private:
int output_;
};
template <typename InputType>
struct PackOpTest : public ::testing::Test {
using TypeToTest = InputType;
TensorType TENSOR_TYPE =
(std::is_same<InputType, int16_t>::value
? TensorType_INT16
: (std::is_same<InputType, uint8_t>::value ? TensorType_UINT8
: TensorType_INT8));
};
using TestTypes = testing::Types<int8_t, uint8_t>;
TYPED_TEST_CASE(PackOpTest, TestTypes);
TYPED_TEST(PackOpTest, ThreeInputs) {
PackOpModel model({TestFixture::TENSOR_TYPE, {2}, -10, 10}, 0, 3);
model.SetInput<typename TestFixture::TypeToTest>(0, {1, 4});
model.SetInput<typename TestFixture::TypeToTest>(1, {2, 5});
model.SetInput<typename TestFixture::TypeToTest>(2, {3, 6});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
auto ref_output_shape = model.GetOutputShape();
auto ref_output =
model.GetDequantizedOutput<typename TestFixture::TypeToTest>();
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(ref_output_shape));
EXPECT_THAT(model.GetDequantizedOutput<typename TestFixture::TypeToTest>(),
ElementsAreArray(ArrayFloatNear(ref_output)));
}
TYPED_TEST(PackOpTest, ThreeInputsDifferentAxis) {
PackOpModel model({TestFixture::TENSOR_TYPE, {2}, -10, 10}, 1, 3);
model.SetInput<typename TestFixture::TypeToTest>(0, {1, 4});
model.SetInput<typename TestFixture::TypeToTest>(1, {2, 5});
model.SetInput<typename TestFixture::TypeToTest>(2, {3, 6});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
auto ref_output_shape = model.GetOutputShape();
auto ref_output =
model.GetDequantizedOutput<typename TestFixture::TypeToTest>();
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(ref_output_shape));
EXPECT_THAT(model.GetDequantizedOutput<typename TestFixture::TypeToTest>(),
ElementsAreArray(ArrayFloatNear(ref_output)));
}
TYPED_TEST(PackOpTest, ThreeInputsNegativeAxis) {
PackOpModel model({TestFixture::TENSOR_TYPE, {2}, -10, 10}, -1, 3);
model.SetInput<typename TestFixture::TypeToTest>(0, {1, 4});
model.SetInput<typename TestFixture::TypeToTest>(1, {2, 5});
model.SetInput<typename TestFixture::TypeToTest>(2, {3, 6});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
auto ref_output_shape = model.GetOutputShape();
auto ref_output =
model.GetDequantizedOutput<typename TestFixture::TypeToTest>();
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(ref_output_shape));
EXPECT_THAT(model.GetDequantizedOutput<typename TestFixture::TypeToTest>(),
ElementsAreArray(ArrayFloatNear(ref_output)));
}
TYPED_TEST(PackOpTest, MultilDimensions) {
PackOpModel model({TestFixture::TENSOR_TYPE, {2, 3}, -10, 20}, 1, 2);
model.SetInput<typename TestFixture::TypeToTest>(0, {1, 2, 3, 4, 5, 6});
model.SetInput<typename TestFixture::TypeToTest>(1, {7, 8, 9, 10, 11, 12});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
auto ref_output_shape = model.GetOutputShape();
auto ref_output =
model.GetDequantizedOutput<typename TestFixture::TypeToTest>();
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(ref_output_shape));
EXPECT_THAT(model.GetDequantizedOutput<typename TestFixture::TypeToTest>(),
ElementsAreArray(ArrayFloatNear(ref_output)));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/pack.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/hexagon/builders/tests/pack_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
35cd33cd-4013-4952-895a-f99341d3a88c | cpp | tensorflow/tensorflow | tile | tensorflow/compiler/tf2tensorrt/convert/ops/tile.cc | tensorflow/lite/delegates/gpu/cl/kernels/tile_test.cc | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h"
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.h"
#include "tensorflow/compiler/tf2tensorrt/convert/ops/layer_utils.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
class ConvertTile : public OpConverterBase<ConvertTile> {
public:
explicit ConvertTile(const OpConverterParams *params)
: OpConverterBase<ConvertTile>(
params,
{DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32}) {}
static constexpr std::array<InputArgSpec, 2> InputSpec() {
return std::array<InputArgSpec, 2>{
InputArgSpec::Create("input_tensor", TrtInputArg::kBoth),
InputArgSpec::Create("weight", TrtInputArg::kBoth)};
}
Status Validate() {
const auto ¶ms = *this->params_;
const auto &inputs = params.inputs;
const auto &repl = inputs.at(1);
if (params.use_implicit_batch && repl.is_tensor()) {
return errors::InvalidArgument(
"Conversion for Tile is not implemented for multipliers "
"passed as a tensor in implicit batch mode.");
}
nvinfer1::DataType dtype;
const int *multiplies;
if (repl.is_weights()) {
TFTRT_CHECK_SHAPE_TENSOR(repl.weights().GetTensor());
dtype = repl.weights().TrtDType();
multiplies = repl.weights().GetPointer<int>();
} else {
dtype = repl.tensor()->getType();
multiplies = nullptr;
}
const auto &node = params.node_def;
TF_RETURN_IF_ERROR(check_type(dtype, nvinfer1::DataType::kINT32, node, 1));
const auto dims = inputs.at(0).GetTrtDims();
const auto nb_dims =
dims.nbDims +
(params.use_implicit_batch && inputs.at(0).is_tensor() ? 1 : 0);
if (multiplies) {
const int mult_numb = repl.weights().count();
if (mult_numb != nb_dims) {
return errors::InvalidArgument(
"The length of the replication vector (", mult_numb,
") of the Tile operation in '", node.name(),
"' is expected to be equal to the rank of the input vector (",
nb_dims, ").");
}
if (std::any_of(multiplies, multiplies + nb_dims,
[](int i) { return i <= 0; })) {
const auto &mul = absl::StrJoin(multiplies, multiplies + nb_dims, ", ");
return errors::InvalidArgument(
"All replications of the Tile operation in '", node.name(),
"' should be positive, got (", mul, ").");
}
if (params.use_implicit_batch && multiplies[0] > 1) {
return errors::Unimplemented(
"The Tile operation along the batch dimension in '", node.name(),
"' is not implemented.");
}
} else {
const auto &repl_dims = repl.GetTrtDims();
if (repl_dims.nbDims != 1) {
return errors::InvalidArgument(
"When replications are defined as a tensor, that tensor must be "
"1-dimensional. Got ",
repl_dims.nbDims, "-dimensional tensor.");
}
if (repl_dims.d[0] >= 0 && repl_dims.d[0] != nb_dims) {
return errors::InvalidArgument(
"When replications are defined as a tensor, "
"the number of its elements (",
repl_dims.d[0], ") must be equal to the rank of the input tensor (",
nb_dims, ").");
}
}
return OkStatus();
}
Status Convert() {
const auto ¶ms = *this->params_;
const auto &inputs = params.inputs;
auto *converter = params.converter;
auto *network = converter->network();
const auto &tensor = inputs.at(0);
const auto &replics = inputs.at(1);
const auto dims = tensor.GetTrtDims();
const auto nb_dims = dims.nbDims;
nvinfer1::Dims output_size{nb_dims, {1}};
bool dynamic_flag = replics.is_tensor() || !HasStaticShape(dims);
if (!dynamic_flag) {
const auto dim_offset =
params.use_implicit_batch && tensor.is_tensor() ? 1 : 0;
const auto *input_size = dims.d;
const int *pReplics = replics.weights().GetPointer<int>() + dim_offset;
for (int i = 0; i < nb_dims; i++)
output_size.d[i] = pReplics[i] * input_size[i];
}
StatusOr<TRTNetworkBuilder> builder;
if (tensor.is_weights() || (dynamic_flag && replics.is_weights())) {
builder =
TRTNetworkBuilder::Create(converter->network(), params.weight_store);
TRT_ENSURE_OK(builder);
}
ITensorProxyPtr input_tensor;
if (tensor.is_weights()) {
StatusOr<nvinfer1::IConstantLayer *> weights_const =
builder->WeightsToConstant(tensor.weights().GetTrtWeights(), dims);
TRT_ENSURE_PTR_OK(weights_const);
input_tensor = (*weights_const)->getOutput(0);
} else {
input_tensor = tensor.tensor();
}
auto &input_trt_tensor = *input_tensor->trt_tensor();
nvinfer1::ITensor *target_shape = nullptr;
if (dynamic_flag) {
nvinfer1::ITensor *mult;
if (replics.is_weights()) {
StatusOr<nvinfer1::IConstantLayer *> weights_const =
builder->WeightsToConstant(replics.weights().GetTrtWeights(),
replics.GetTrtDims());
TRT_ENSURE_PTR_OK(weights_const);
mult = (*weights_const)->getOutput(0);
} else {
const ITensorProxyPtr multiplies = replics.tensor()->trt_tensor();
mult = multiplies->trt_tensor();
}
nvinfer1::ITensor *shape =
network->addShape(input_trt_tensor)->getOutput(0);
target_shape = network
->addElementWise(*shape, *mult,
nvinfer1::ElementWiseOperation::kPROD)
->getOutput(0);
}
nvinfer1::Dims start{nb_dims, {}};
DimsAdapter stride(std::vector<int>(nb_dims, 1));
auto layer = network->addSlice(input_trt_tensor, start, output_size,
stride.AsTrtDims());
layer->setMode(nvinfer1::SliceMode::kWRAP);
if (target_shape) layer->setInput(2, *target_shape);
converter->SetLayerName(layer, params.node_def.name(), "to_tile");
ITensorProxyPtr output_tensor = layer->getOutput(0);
if (tensor.is_weights() && params.use_implicit_batch) {
DimsAdapter adap(output_tensor->getDimensions());
TF_RETURN_IF_ERROR(adap.RemoveBatchDimension());
TF_RETURN_IF_ERROR(PrepareTensorForShape(
params.converter, TRT_TensorOrWeights(output_tensor),
adap.AsTrtDims(), false, &output_tensor, params.node_def));
}
AddOutput(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
};
REGISTER_DEFAULT_TRT_OP_CONVERTER(MakeConverterFunction<ConvertTile>(), "Tile");
}
}
}
#endif | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/tile_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
TEST_F(OpenCLOperationTest, TileChannels) {
auto status = TileChannelsTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, TileChannelsX4) {
auto status = TileChannelsX4Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, TileWidth) {
auto status = TileWidthTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, TileHeight) {
auto status = TileHeightTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, TileHWC) {
auto status = TileHWCTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/ops/tile.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/tile_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2cefcc3a-e62a-4763-accb-bb2b96cb0243 | cpp | tensorflow/tensorflow | split_v | tensorflow/lite/kernels/split_v.cc | tensorflow/lite/kernels/split_v_test.cc | #include <stdint.h>
#include <vector>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace split_v {
struct OpContext {
OpContext(TfLiteContext* context, TfLiteNode* node) {
params = reinterpret_cast<TfLiteSplitVParams*>(node->builtin_data);
input = GetInput(context, node, 0);
size_splits = GetInput(context, node, 1);
axis = GetInput(context, node, 2);
}
TfLiteSplitVParams* params;
const TfLiteTensor* input;
const TfLiteTensor* size_splits;
const TfLiteTensor* axis;
};
TfLiteStatus UseDynamicOutputTensors(TfLiteContext* context, TfLiteNode* node) {
for (int i = 0; i < NumOutputs(node); ++i) {
TfLiteTensor* tensor;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, i, &tensor));
SetTensorToDynamic(tensor);
}
return kTfLiteOk;
}
template <typename T>
void GetSizeSplitsVector(const TfLiteTensor* size_splits,
std::vector<int64_t>* size_splits_vector) {
const auto num_elements = NumElements(size_splits);
for (int i = 0; i < num_elements; ++i) {
size_splits_vector->push_back(GetTensorData<T>(size_splits)[i]);
}
}
TfLiteStatus ResizeOutputTensors(TfLiteContext* context, TfLiteNode* node,
const TfLiteTensor* input,
const TfLiteTensor* size_splits,
const TfLiteTensor* axis) {
int axis_value = GetTensorData<int>(axis)[0];
if (axis_value < 0) {
axis_value += NumDimensions(input);
}
std::vector<int64_t> size_splits_vector;
if (size_splits->type == kTfLiteInt32) {
GetSizeSplitsVector<int32_t>(size_splits, &size_splits_vector);
} else if (size_splits->type == kTfLiteInt64) {
GetSizeSplitsVector<int64_t>(size_splits, &size_splits_vector);
} else {
TF_LITE_KERNEL_LOG(context, "size_splits only support type int32|int64.");
return kTfLiteError;
}
int minus_one_index = -1;
int64_t size_splits_sum = 0;
for (int i = 0; i < size_splits_vector.size(); ++i) {
if (size_splits_vector.at(i) == -1) {
if (minus_one_index == -1) {
minus_one_index = i;
} else {
TF_LITE_KERNEL_LOG(context,
"The size_splits contains more than one -1.");
return kTfLiteError;
}
} else {
size_splits_sum += size_splits_vector.at(i);
}
}
TF_LITE_ENSURE(context, axis_value >= 0);
TF_LITE_ENSURE(context, axis_value < NumDimensions(input));
const int input_size = SizeOfDimension(input, axis_value);
if (minus_one_index != -1) {
if (size_splits_sum > input_size) {
TF_LITE_KERNEL_LOG(
context,
"The sum of size_splits must be less than the dimension of value.");
return kTfLiteError;
} else {
size_splits_vector[minus_one_index] = input_size - size_splits_sum;
}
} else if (size_splits_sum != input_size) {
TF_LITE_KERNEL_LOG(
context,
"The size_splits must sum to the dimension of value along axis.");
return kTfLiteError;
}
for (int i = 0; i < NumOutputs(node); ++i) {
TfLiteIntArray* output_dims = TfLiteIntArrayCopy(input->dims);
output_dims->data[axis_value] = size_splits_vector.at(i);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, i, &output));
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, output, output_dims));
}
return kTfLiteOk;
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
OpContext op_context(context, node);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), op_context.params->num_splits);
auto input_type = op_context.input->type;
TF_LITE_ENSURE(context,
input_type == kTfLiteFloat32 || input_type == kTfLiteUInt8 ||
input_type == kTfLiteInt16 || input_type == kTfLiteInt32 ||
input_type == kTfLiteInt64 || input_type == kTfLiteInt8);
for (int i = 0; i < NumOutputs(node); ++i) {
TfLiteTensor* tensor;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, i, &tensor));
tensor->type = input_type;
}
auto size_splits = op_context.size_splits;
TF_LITE_ENSURE_EQ(context, NumDimensions(size_splits), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), NumElements(size_splits));
if (IsConstantOrPersistentTensor(op_context.size_splits) &&
IsConstantOrPersistentTensor(op_context.axis)) {
return ResizeOutputTensors(context, node, op_context.input,
op_context.size_splits, op_context.axis);
} else {
return UseDynamicOutputTensors(context, node);
}
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpContext op_context(context, node);
if (!IsConstantOrPersistentTensor(op_context.axis) ||
!IsConstantOrPersistentTensor(op_context.size_splits)) {
TF_LITE_ENSURE_OK(
context, ResizeOutputTensors(context, node, op_context.input,
op_context.size_splits, op_context.axis));
}
int axis_value = GetTensorData<int>(op_context.axis)[0];
#define TF_LITE_SPLIT_V(scalar) \
VectorOfTensors<scalar> all_outputs(*context, *node->outputs); \
tflite::SplitParams op_params; \
op_params.num_split = NumOutputs(node); \
op_params.axis = axis_value; \
reference_ops::Split(op_params, GetTensorShape(op_context.input), \
GetTensorData<scalar>(op_context.input), \
all_outputs.shapes(), all_outputs.data());
switch (op_context.input->type) {
case kTfLiteFloat32: {
TF_LITE_SPLIT_V(float);
break;
}
case kTfLiteUInt8: {
TF_LITE_SPLIT_V(uint8_t);
break;
}
case kTfLiteInt16: {
TF_LITE_SPLIT_V(int16_t);
break;
}
case kTfLiteInt32: {
TF_LITE_SPLIT_V(int32_t);
break;
}
case kTfLiteInt64: {
TF_LITE_SPLIT_V(int64_t);
break;
}
case kTfLiteInt8: {
TF_LITE_SPLIT_V(int8_t);
break;
}
default:
TF_LITE_KERNEL_LOG(context, "Type %s currently not supported.",
TfLiteTypeGetName(op_context.input->type));
return kTfLiteError;
}
#undef TF_LITE_SPLIT_V
return kTfLiteOk;
}
}
TfLiteRegistration* Register_SPLIT_V() {
static TfLiteRegistration r = {nullptr, nullptr, split_v::Prepare,
split_v::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
constexpr int kAxisIsATensor = -1000;
enum class TestType {
kDynamic = 0,
kConstAxis = 1,
kConstSplits = 2,
};
class SplitVOpModel : public SingleOpModel {
public:
SplitVOpModel(const TensorData& input, const TensorData& size_splits,
int num_splits, int axis,
std::initializer_list<int> size_splits_data) {
input_ = AddInput(input);
if (size_splits_data.size() == 0) {
size_splits_ = AddInput(size_splits);
} else {
size_splits_ = AddConstInput(size_splits, size_splits_data);
}
if (axis == kAxisIsATensor) {
axis_ = AddInput({TensorType_INT32, {1}});
} else {
axis_ = AddConstInput(TensorType_INT32, {axis}, {1});
}
for (int i = 0; i < num_splits; ++i) {
outputs_.push_back(AddOutput(input.type));
}
SetBuiltinOp(BuiltinOperator_SPLIT_V, BuiltinOptions_SplitVOptions,
CreateSplitVOptions(builder_, num_splits).Union());
if (axis == kAxisIsATensor) {
BuildInterpreter(
{GetShape(input_), GetShape(size_splits_), GetShape(axis_)});
} else {
BuildInterpreter({GetShape(input_), GetShape(size_splits_), {}});
}
}
template <typename T>
void SetInput(std::initializer_list<T> data) {
PopulateTensor<T>(input_, data);
}
void SetSizeSplits(std::initializer_list<int> data) {
PopulateTensor(size_splits_, data);
}
void SetAxis(int axis) { PopulateTensor(axis_, {axis}); }
template <typename T>
std::vector<T> GetOutput(int i) {
return ExtractVector<T>(outputs_[i]);
}
std::vector<int> GetOutputShape(int i) { return GetTensorShape(outputs_[i]); }
private:
int input_;
int size_splits_;
int axis_;
std::vector<int> outputs_;
};
template <typename T>
void Check(TestType test_type, int axis, std::initializer_list<int> input_shape,
std::initializer_list<int> size_splits_shape,
std::vector<std::initializer_list<int>> output_shapes,
const std::initializer_list<T>& input_data,
const std::initializer_list<int>& size_splits_data,
const std::vector<std::initializer_list<T>>& output_data) {
int num_splits = size_splits_data.size();
switch (test_type) {
case TestType::kDynamic: {
SplitVOpModel m({GetTensorType<T>(), input_shape},
{TensorType_INT32, size_splits_shape}, num_splits,
kAxisIsATensor, {});
m.SetInput<T>(input_data);
m.SetSizeSplits(size_splits_data);
m.SetAxis(axis);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
for (int i = 0; i < num_splits; ++i) {
EXPECT_THAT(m.GetOutput<T>(i), ElementsAreArray(output_data[i]));
EXPECT_THAT(m.GetOutputShape(i), ElementsAreArray(output_shapes[i]));
}
} break;
case TestType::kConstAxis: {
SplitVOpModel m({GetTensorType<T>(), input_shape},
{TensorType_INT32, size_splits_shape}, num_splits, axis,
{});
m.SetInput<T>(input_data);
m.SetSizeSplits(size_splits_data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
for (int i = 0; i < num_splits; ++i) {
EXPECT_THAT(m.GetOutput<T>(i), ElementsAreArray(output_data[i]));
EXPECT_THAT(m.GetOutputShape(i), ElementsAreArray(output_shapes[i]));
}
} break;
case TestType::kConstSplits: {
SplitVOpModel m({GetTensorType<T>(), input_shape},
{TensorType_INT32, size_splits_shape}, num_splits, axis,
size_splits_data);
m.SetInput<T>(input_data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
for (int i = 0; i < num_splits; ++i) {
EXPECT_THAT(m.GetOutputShape(i), ElementsAreArray(output_shapes[i]));
if (output_data[i].size() != 0) {
EXPECT_THAT(m.GetOutput<T>(i), ElementsAreArray(output_data[i]));
}
}
} break;
}
}
template <typename T>
class SplitVOpTypedTest : public ::testing::Test {};
using DataTypes = ::testing::Types<float, uint8_t, int8_t, int16_t, int32_t>;
TYPED_TEST_SUITE(SplitVOpTypedTest, DataTypes);
#define TYPED_SPLIT_V_TEST(TestSuiteName, CaseName) \
template <typename TypeParam> \
void Check##TestSuiteName##CaseName(TestType test_type); \
\
TYPED_TEST(TestSuiteName, Dynamic##CaseName) { \
Check##TestSuiteName##CaseName<TypeParam>(TestType::kDynamic); \
} \
TYPED_TEST(TestSuiteName, ConstAxis##CaseName) { \
Check##TestSuiteName##CaseName<TypeParam>(TestType::kConstAxis); \
} \
TYPED_TEST(TestSuiteName, ConstSplits##CaseName) { \
Check##TestSuiteName##CaseName<TypeParam>(TestType::kConstSplits); \
} \
\
template <typename TypeParam> \
void Check##TestSuiteName##CaseName(TestType test_type)
TYPED_SPLIT_V_TEST(SplitVOpTypedTest, TwoDimensional) {
Check<TypeParam>(test_type,
0, {4, 3}, {3}, {{1, 3}, {1, 3}, {2, 3}},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}, {1, 1, 2},
{{1, 2, 3}, {4, 5, 6}, {7, 8, 9, 10, 11, 12}});
}
TYPED_SPLIT_V_TEST(SplitVOpTypedTest, FourDimensional) {
Check<TypeParam>(test_type,
0, {2, 2, 2, 2}, {2}, {{1, 2, 2, 2}, {1, 2, 2, 2}},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
{1, 1},
{
{1, 2, 3, 4, 5, 6, 7, 8},
{9, 10, 11, 12, 13, 14, 15, 16},
});
Check<TypeParam>(test_type,
1, {2, 2, 2, 2}, {2}, {{2, 1, 2, 2}, {2, 1, 2, 2}},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
{1, -1},
{
{1, 2, 3, 4, 9, 10, 11, 12},
{5, 6, 7, 8, 13, 14, 15, 16},
});
Check<TypeParam>(test_type,
2, {2, 2, 2, 2}, {2}, {{2, 2, 1, 2}, {2, 2, 1, 2}},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
{1, 1},
{
{1, 2, 5, 6, 9, 10, 13, 14},
{3, 4, 7, 8, 11, 12, 15, 16},
});
Check<TypeParam>(test_type,
3, {2, 2, 2, 2}, {2}, {{2, 2, 2, 1}, {2, 2, 2, 1}},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
{1, 1},
{
{1, 3, 5, 7, 9, 11, 13, 15},
{2, 4, 6, 8, 10, 12, 14, 16},
});
}
TYPED_SPLIT_V_TEST(SplitVOpTypedTest, OneDimensional) {
Check<TypeParam>(test_type,
0, {8}, {8},
{{1}, {1}, {1}, {1}, {1}, {1}, {1}, {1}},
{1, 2, 3, 4, 5, 6, 7, 8}, {1, 1, 1, 1, 1, 1, 1, 1},
{{1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}});
}
TYPED_SPLIT_V_TEST(SplitVOpTypedTest, OneDimensional2) {
Check<TypeParam>(test_type,
0, {8}, {8},
{{1}, {1}, {1}, {1}, {1}, {1}, {2}, {0}},
{1, 2, 3, 4, 5, 6, 7, 8}, {1, 1, 1, 1, 1, 1, 2, -1},
{{1}, {2}, {3}, {4}, {5}, {6}, {7, 8}, {}});
}
TYPED_SPLIT_V_TEST(SplitVOpTypedTest, NegativeAxis) {
Check<TypeParam>(test_type,
-4, {2, 2, 2, 2}, {2}, {{1, 2, 2, 2}, {1, 2, 2, 2}},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
{1, 1},
{
{1, 2, 3, 4, 5, 6, 7, 8},
{9, 10, 11, 12, 13, 14, 15, 16},
});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/split_v.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/split_v_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
570e99a0-b00f-43b7-841a-542291098e88 | cpp | tensorflow/tensorflow | batch_matmul | tensorflow/lite/kernels/batch_matmul.cc | tensorflow/lite/kernels/batch_matmul_test.cc | #include "tensorflow/lite/kernels/internal/reference/batch_matmul.h"
#include <stddef.h>
#include <algorithm>
#include <cstdint>
#include <limits>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/batch_matmul.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/tensor_utils.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace batch_matmul {
static const int kInputLHSTensor = 0;
static const int kInputRHSTensor = 1;
static const int kOutputTensor = 0;
static const int kNumTempTensorsForAdjoints = 2;
static const int kNumTempTensorsForHybrid = 5;
enum KernelType {
kReference,
kGenericOptimized,
};
struct OpData {
int32_t output_multiplier;
int output_shift;
int32_t output_activation_min;
int32_t output_activation_max;
int scratch_tensor_index;
bool rhs_transposed;
bool compute_row_sums = false;
};
struct OpContext {
OpContext(TfLiteContext* context, TfLiteNode* node) {
params = reinterpret_cast<TfLiteBatchMatMulParams*>(node->builtin_data);
lhs = GetInput(context, node, kInputLHSTensor);
rhs = GetInput(context, node, kInputRHSTensor);
output = GetOutput(context, node, 0);
}
TfLiteBatchMatMulParams* params;
const TfLiteTensor* lhs;
const TfLiteTensor* rhs;
TfLiteTensor* output;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData();
op_data->rhs_transposed = false;
context->AddTensors(context,
kNumTempTensorsForAdjoints + kNumTempTensorsForHybrid,
&op_data->scratch_tensor_index);
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete static_cast<OpData*>(buffer);
}
TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
const RuntimeShape& extended_lhs_shape,
const RuntimeShape& extended_rhs_shape,
bool adj_x, bool adj_y, int output_rank,
TfLiteTensor* output) {
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(output_rank);
for (int i = 0; i < output_rank - 2; ++i) {
const int lhs_dim = extended_lhs_shape.Dims(i);
const int rhs_dim = extended_rhs_shape.Dims(i);
int broadcast_dim = lhs_dim;
if ((lhs_dim != rhs_dim) && (lhs_dim == 1)) {
broadcast_dim = rhs_dim;
}
output_shape->data[i] = broadcast_dim;
}
int lhs_rows_index = adj_x ? output_rank - 1 : output_rank - 2;
int rhs_cols_index = adj_y ? output_rank - 2 : output_rank - 1;
output_shape->data[output_rank - 2] = extended_lhs_shape.Dims(lhs_rows_index);
output_shape->data[output_rank - 1] = extended_rhs_shape.Dims(rhs_cols_index);
TfLiteStatus stat = context->ResizeTensor(context, output, output_shape);
return stat;
}
TfLiteStatus InitializeTemporaries(TfLiteContext* context, TfLiteNode* node,
OpContext* op_context) {
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* lhs = op_context->lhs;
const TfLiteTensor* rhs = op_context->rhs;
TfLiteIntArrayFree(node->temporaries);
bool is_hybrid =
(op_context->lhs->type == kTfLiteFloat32 && rhs->type == kTfLiteInt8);
if (is_hybrid) {
node->temporaries = TfLiteIntArrayCreate(kNumTempTensorsForAdjoints +
kNumTempTensorsForHybrid);
} else {
node->temporaries = TfLiteIntArrayCreate(kNumTempTensorsForAdjoints);
}
const int lhs_rank = NumDimensions(lhs);
const int rhs_rank = NumDimensions(rhs);
const int batch_size = op_context->params->adj_x
? lhs->dims->data[lhs_rank - 1]
: lhs->dims->data[lhs_rank - 2];
const int num_units = op_context->params->adj_y
? rhs->dims->data[rhs_rank - 2]
: rhs->dims->data[rhs_rank - 1];
{
node->temporaries->data[0] = op_data->scratch_tensor_index;
TfLiteTensor* scratch_buffer;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 0, &scratch_buffer));
TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(lhs_rank);
for (int i = 0; i < lhs_rank - 2; ++i) {
scratch_buffer_size->data[i] = lhs->dims->data[i];
}
scratch_buffer_size->data[lhs_rank - 2] = lhs->dims->data[lhs_rank - 1];
scratch_buffer_size->data[lhs_rank - 1] = lhs->dims->data[lhs_rank - 2];
scratch_buffer->type = op_context->lhs->type;
scratch_buffer->allocation_type = kTfLiteArenaRw;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_buffer,
scratch_buffer_size));
}
{
node->temporaries->data[1] = op_data->scratch_tensor_index + 1;
TfLiteTensor* scratch_buffer;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 1, &scratch_buffer));
scratch_buffer->name = "BatchMatMul_scratch_buffer";
const TfLiteTensor* rhs = op_context->rhs;
int rhs_rank = NumDimensions(rhs);
TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(rhs_rank);
for (int i = 0; i < rhs_rank - 2; ++i) {
scratch_buffer_size->data[i] = rhs->dims->data[i];
}
scratch_buffer_size->data[rhs_rank - 2] = rhs->dims->data[rhs_rank - 1];
scratch_buffer_size->data[rhs_rank - 1] = rhs->dims->data[rhs_rank - 2];
if (IsConstantTensor(op_context->rhs)) {
scratch_buffer->allocation_type = kTfLiteArenaRwPersistent;
} else {
scratch_buffer->allocation_type = kTfLiteArenaRw;
}
scratch_buffer->type = op_context->rhs->type;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_buffer,
scratch_buffer_size));
}
if (is_hybrid) {
int num_batches = 1;
for (int i = 0; i < lhs_rank - 2; ++i) {
num_batches *= lhs->dims->data[i];
}
int num_weights_matrices = 1;
for (int i = 0; i < rhs_rank - 2; ++i) {
num_weights_matrices *= rhs->dims->data[i];
}
op_data->compute_row_sums = true;
node->temporaries->data[2] = op_data->scratch_tensor_index + 2;
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 2,
&input_quantized));
input_quantized->type = op_context->rhs->type;
input_quantized->allocation_type = kTfLiteArenaRw;
TfLiteIntArray* input_quantized_size =
TfLiteIntArrayCopy(op_context->lhs->dims);
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized,
input_quantized_size));
node->temporaries->data[3] = op_data->scratch_tensor_index + 3;
TfLiteTensor* scaling_factors;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 3,
&scaling_factors));
scaling_factors->type = kTfLiteFloat32;
scaling_factors->allocation_type = kTfLiteArenaRw;
int scaling_dims[1] = {num_batches * batch_size};
if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) {
TfLiteIntArray* scaling_factors_size = TfLiteIntArrayCreate(1);
scaling_factors_size->data[0] = scaling_dims[0];
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors,
scaling_factors_size));
}
node->temporaries->data[4] = op_data->scratch_tensor_index + 4;
TfLiteTensor* accum_scratch;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 4, &accum_scratch));
accum_scratch->type = kTfLiteInt32;
accum_scratch->allocation_type = kTfLiteArenaRw;
int accum_scratch_dims[2] = {num_units, batch_size};
if (!TfLiteIntArrayEqualsArray(accum_scratch->dims, 2,
accum_scratch_dims)) {
TfLiteIntArray* accum_size = TfLiteIntArrayCreate(2);
accum_size->data[0] = num_units;
accum_size->data[1] = batch_size;
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, accum_scratch, accum_size));
}
node->temporaries->data[5] = op_data->scratch_tensor_index + 5;
TfLiteTensor* input_offsets;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 5, &input_offsets));
input_offsets->type = kTfLiteInt32;
input_offsets->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqualsArray(input_offsets->dims, 1, scaling_dims)) {
TfLiteIntArray* input_offsets_size = TfLiteIntArrayCreate(1);
input_offsets_size->data[0] = num_batches * batch_size;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_offsets,
input_offsets_size));
}
node->temporaries->data[6] = op_data->scratch_tensor_index + 6;
TfLiteTensor* row_sums;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 6, &row_sums));
row_sums->type = kTfLiteInt32;
row_sums->allocation_type = kTfLiteArenaRwPersistent;
int row_sums_dims[1] = {num_weights_matrices * num_units};
if (!TfLiteIntArrayEqualsArray(row_sums->dims, 1, row_sums_dims)) {
TfLiteIntArray* row_sums_size = TfLiteIntArrayCreate(1);
row_sums_size->data[0] = row_sums_dims[0];
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, row_sums, row_sums_size));
}
}
return kTfLiteOk;
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpContext op_context(context, node);
TF_LITE_ENSURE_OK(context, InitializeTemporaries(context, node, &op_context));
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
bool adj_x = op_context.params->adj_x;
bool adj_y = op_context.params->adj_y;
const TfLiteTensor* lhs_data;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputLHSTensor, &lhs_data));
const TfLiteTensor* rhs_data;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputRHSTensor, &rhs_data));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if ((lhs_data->type == kTfLiteInt8 || lhs_data->type == kTfLiteInt16) &&
output->type != kTfLiteInt32) {
double real_multiplier = 0.0;
TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler(
context, lhs_data, rhs_data, output, &real_multiplier));
int exponent;
QuantizeMultiplier(real_multiplier, &op_data->output_multiplier, &exponent);
op_data->output_shift = exponent;
if (lhs_data->type == kTfLiteInt8) {
op_data->output_activation_min = std::numeric_limits<int8_t>::min();
op_data->output_activation_max = std::numeric_limits<int8_t>::max();
} else {
op_data->output_activation_min = std::numeric_limits<int16_t>::min();
op_data->output_activation_max = std::numeric_limits<int16_t>::max();
}
}
if (lhs_data->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, lhs_data->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, rhs_data->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
}
TF_LITE_ENSURE(context, lhs_data->type == kTfLiteFloat32 ||
lhs_data->type == kTfLiteInt8 ||
lhs_data->type == kTfLiteInt16);
TF_LITE_ENSURE(context, rhs_data->type == kTfLiteFloat32 ||
rhs_data->type == kTfLiteInt8 ||
rhs_data->type == kTfLiteInt16);
TF_LITE_ENSURE(context, (lhs_data->type == kTfLiteFloat32 &&
rhs_data->type == kTfLiteInt8) ||
lhs_data->type == rhs_data->type);
TF_LITE_ENSURE(context, NumDimensions(lhs_data) >= 2);
TF_LITE_ENSURE(context, NumDimensions(lhs_data) <= 5);
TF_LITE_ENSURE(context, NumDimensions(rhs_data) >= 2);
TF_LITE_ENSURE(context, NumDimensions(rhs_data) <= 5);
const int lhs_rank = NumDimensions(lhs_data);
const int rhs_rank = NumDimensions(rhs_data);
const int output_rank = std::max(lhs_rank, rhs_rank);
const RuntimeShape extended_lhs_shape =
RuntimeShape::ExtendedShape(output_rank, GetTensorShape(lhs_data));
const RuntimeShape extended_rhs_shape =
RuntimeShape::ExtendedShape(output_rank, GetTensorShape(rhs_data));
for (int i = 0; i < output_rank - 2; ++i) {
const int lhs_dim = extended_lhs_shape.Dims(i);
const int rhs_dim = extended_rhs_shape.Dims(i);
if (lhs_dim != rhs_dim) {
if (lhs_dim != 1) {
TF_LITE_ENSURE_EQ(context, rhs_dim, 1);
}
}
}
int accum_dim_lhs = adj_x ? extended_lhs_shape.Dims(output_rank - 2)
: extended_lhs_shape.Dims(output_rank - 1);
int accum_dim_rhs = adj_y ? extended_rhs_shape.Dims(output_rank - 1)
: extended_rhs_shape.Dims(output_rank - 2);
TF_LITE_ENSURE_EQ(context, accum_dim_lhs, accum_dim_rhs);
TfLiteStatus status =
ResizeOutputTensor(context, extended_lhs_shape, extended_rhs_shape, adj_x,
adj_y, output_rank, output);
return status;
}
template <typename scalar>
void TransposeRowsColumnsImpl(const TfLiteTensor* tensor_in,
const scalar* input, TfLiteTensor* tensor_out,
scalar* output) {
RuntimeShape transposed_shape(GetTensorShape(tensor_in));
RuntimeShape shape(GetTensorShape(tensor_in));
TransposeParams params;
int rank = NumDimensions(tensor_in);
params.perm_count = rank;
for (int i = 0; i < rank - 2; ++i) {
params.perm[i] = i;
}
params.perm[rank - 2] = rank - 1;
params.perm[rank - 1] = rank - 2;
transposed_shape.SetDim(rank - 1, shape.Dims(rank - 2));
transposed_shape.SetDim(rank - 2, shape.Dims(rank - 1));
optimized_ops::Transpose(params, shape, input, transposed_shape, output);
}
TfLiteStatus TransposeRowsColumns(TfLiteContext* context,
const TfLiteTensor* tensor_in,
TfLiteTensor* tensor_out) {
if (tensor_in->type == kTfLiteFloat32) {
TransposeRowsColumnsImpl<float>(tensor_in, GetTensorData<float>(tensor_in),
tensor_out,
GetTensorData<float>(tensor_out));
return kTfLiteOk;
} else if (tensor_in->type == kTfLiteInt8) {
TransposeRowsColumnsImpl<int8_t>(
tensor_in, GetTensorData<int8_t>(tensor_in), tensor_out,
GetTensorData<int8_t>(tensor_out));
return kTfLiteOk;
} else if (tensor_in->type == kTfLiteInt16) {
TransposeRowsColumnsImpl<int16_t>(
tensor_in, GetTensorData<int16_t>(tensor_in), tensor_out,
GetTensorData<int16_t>(tensor_out));
return kTfLiteOk;
} else {
TF_LITE_KERNEL_LOG(
context, "Can only transpose tensors with float, int8 or int16 type.");
return kTfLiteError;
}
}
RuntimeShape SwapRowColumnDims(const RuntimeShape& shape) {
RuntimeShape swapped_shape(shape);
const int32_t dims = shape.DimensionsCount();
swapped_shape.SetDim(dims - 2, shape.Dims(dims - 1));
swapped_shape.SetDim(dims - 1, shape.Dims(dims - 2));
return swapped_shape;
}
TfLiteStatus EvalHybrid(TfLiteContext* context, TfLiteNode* node, OpData* data,
const RuntimeShape& input_shape,
const TfLiteTensor* input,
const RuntimeShape& filter_shape,
const TfLiteTensor* filter,
TfLiteTensor* input_quantized,
TfLiteTensor* scaling_factors,
TfLiteTensor* accum_scratch, TfLiteTensor* row_sums,
TfLiteTensor* input_offsets, TfLiteTensor* output) {
const auto* params =
reinterpret_cast<TfLiteBatchMatMulParams*>(node->builtin_data);
const int32_t num_input_dims = input_shape.DimensionsCount();
const int input_size = input_shape.Dims(num_input_dims - 2);
const int batch_size = input_shape.Dims(num_input_dims - 1);
int num_batches_to_quantize = batch_size;
for (int i = 0; i < input_shape.DimensionsCount() - 2; ++i) {
num_batches_to_quantize *= input_shape.Dims(i);
}
const int scaling_factor_size = GetTensorShape(scaling_factors).FlatSize();
TF_LITE_ENSURE(context, scaling_factor_size >= num_batches_to_quantize);
float* scaling_factors_ptr = GetTensorData<float>(scaling_factors);
int32_t* input_offset_ptr = nullptr;
int32_t* row_sums_ptr = nullptr;
input_offset_ptr = GetTensorData<int32_t>(input_offsets);
row_sums_ptr = GetTensorData<int32_t>(row_sums);
if (!params->asymmetric_quantize_inputs) {
memset(input_offset_ptr, 0, input_offsets->bytes);
}
int8_t* quant_data = GetTensorData<int8_t>(input_quantized);
const int8_t* filter_data = GetTensorData<int8_t>(filter);
const float* input_ptr = GetTensorData<float>(input);
tensor_utils::BatchQuantizeFloats(input_ptr, num_batches_to_quantize,
input_size, quant_data, scaling_factors_ptr,
input_offset_ptr,
params->asymmetric_quantize_inputs);
for (int b = 0; b < num_batches_to_quantize; ++b) {
scaling_factors_ptr[b] *= filter->params.scale;
}
RuntimeShape output_shape = GetTensorShape(output);
int output_size = 1;
for (int i = 0; i < output_shape.DimensionsCount(); ++i) {
output_size *= output_shape.Dims(i);
}
std::fill_n(GetTensorData<float>(output), output_size, 0.0f);
reference_ops::BatchMatMul(
filter_shape, filter_data, input_shape, quant_data, scaling_factors_ptr,
input_offset_ptr, row_sums_ptr, GetTensorShape(output),
GetTensorData<float>(output), &(data->compute_row_sums));
return kTfLiteOk;
}
template <KernelType kernel_type>
TfLiteStatus EvalInt8Int8(TfLiteContext* context, const OpData* data,
const RuntimeShape& lhs_shape,
const TfLiteTensor* lhs,
const RuntimeShape& rhs_shape,
const TfLiteTensor* rhs,
const RuntimeShape& output_shape,
TfLiteTensor* output, bool transpose_lhs) {
FullyConnectedParams op_params;
int32_t input_offset = -lhs->params.zero_point;
int32_t filter_offset = -rhs->params.zero_point;
int32_t output_offset = output->params.zero_point;
op_params.input_offset = input_offset;
op_params.weights_offset = filter_offset;
op_params.output_offset = output_offset;
op_params.output_multiplier = data->output_multiplier;
op_params.output_shift = data->output_shift;
op_params.quantized_activation_min = data->output_activation_min;
op_params.quantized_activation_max = data->output_activation_max;
op_params.lhs_cacheable = IsConstantTensor(lhs);
op_params.rhs_cacheable = IsConstantTensor(rhs);
if (kernel_type == kReference) {
reference_ops::BatchMatMul<int8_t, int32_t>(
op_params, rhs_shape, GetTensorData<int8_t>(rhs), lhs_shape,
GetTensorData<int8_t>(lhs), GetTensorShape(output),
GetTensorData<int8_t>(output));
} else {
optimized_ops::BatchMatMul(
op_params, rhs_shape, GetTensorData<int8_t>(rhs), lhs_shape,
GetTensorData<int8_t>(lhs), GetTensorShape(output),
GetTensorData<int8_t>(output),
CpuBackendContext::GetFromContext(context), transpose_lhs);
}
return kTfLiteOk;
}
template <KernelType kernel_type>
TfLiteStatus EvalInt8Int32(TfLiteContext* context, const OpData* data,
const RuntimeShape& lhs_shape,
const TfLiteTensor* lhs,
const RuntimeShape& rhs_shape,
const TfLiteTensor* rhs,
const RuntimeShape& output_shape,
TfLiteTensor* output, bool transpose_lhs) {
FullyConnectedParams op_params;
int32_t input_offset = -lhs->params.zero_point;
int32_t weights_offset = -rhs->params.zero_point;
int32_t output_offset = output->params.zero_point;
op_params.input_offset = input_offset;
op_params.weights_offset = weights_offset;
op_params.output_offset = output_offset;
op_params.output_multiplier = data->output_multiplier;
op_params.output_shift = data->output_shift;
op_params.quantized_activation_min = data->output_activation_min;
op_params.quantized_activation_max = data->output_activation_max;
op_params.lhs_cacheable = IsConstantTensor(lhs);
op_params.rhs_cacheable = IsConstantTensor(rhs);
if (kernel_type == kReference) {
reference_ops::BatchMatMul<int8, int8, int32>(
rhs_shape, GetTensorData<int8>(rhs), lhs_shape,
GetTensorData<int8>(lhs), GetTensorShape(output),
GetTensorData<int32>(output));
} else {
optimized_ops::BatchMatMul(
op_params, rhs_shape, GetTensorData<int8_t>(rhs), lhs_shape,
GetTensorData<int8_t>(lhs), GetTensorShape(output),
GetTensorData<int32_t>(output),
CpuBackendContext::GetFromContext(context), transpose_lhs);
}
return kTfLiteOk;
}
template <KernelType kernel_type>
TfLiteStatus EvalInt16(TfLiteContext* context, const OpData* data,
const RuntimeShape& lhs_shape, const TfLiteTensor* lhs,
const RuntimeShape& rhs_shape, const TfLiteTensor* rhs,
const RuntimeShape& output_shape, TfLiteTensor* output) {
FullyConnectedParams op_params;
int32_t input_offset = -lhs->params.zero_point;
int32_t filter_offset = -rhs->params.zero_point;
int32_t output_offset = output->params.zero_point;
op_params.input_offset = input_offset;
op_params.weights_offset = filter_offset;
op_params.output_offset = output_offset;
op_params.output_multiplier = data->output_multiplier;
op_params.output_shift = data->output_shift;
op_params.quantized_activation_min = data->output_activation_min;
op_params.quantized_activation_max = data->output_activation_max;
reference_ops::BatchMatMul<int16_t, int64_t>(
op_params, rhs_shape, GetTensorData<int16_t>(rhs), lhs_shape,
GetTensorData<int16_t>(lhs), GetTensorShape(output),
GetTensorData<int16_t>(output));
return kTfLiteOk;
}
template <KernelType kernel_type>
TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node,
OpData* data, const RuntimeShape& lhs_shape,
const TfLiteTensor* lhs,
const RuntimeShape& rhs_shape,
const TfLiteTensor* rhs, TfLiteTensor* output,
bool transpose_lhs) {
if (lhs->type == kTfLiteFloat32 && rhs->type == kTfLiteInt8) {
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 2,
&input_quantized));
TfLiteTensor* scaling_factors;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 3,
&scaling_factors));
TfLiteTensor* accum_scratch;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 4, &accum_scratch));
TfLiteTensor* input_offsets;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 5, &input_offsets));
TfLiteTensor* row_sums;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 6, &row_sums));
return EvalHybrid(context, node, data, lhs_shape, lhs, rhs_shape, rhs,
input_quantized, scaling_factors, accum_scratch, row_sums,
input_offsets, output);
} else if (lhs->type == kTfLiteInt8 && rhs->type == kTfLiteInt8) {
if (output->type == kTfLiteInt8) {
return EvalInt8Int8<kernel_type>(context, data, lhs_shape, lhs, rhs_shape,
rhs, GetTensorShape(output), output,
transpose_lhs);
} else {
return EvalInt8Int32<kernel_type>(context, data, lhs_shape, lhs,
rhs_shape, rhs, GetTensorShape(output),
output, transpose_lhs);
}
} else if (lhs->type == kTfLiteInt16 && rhs->type == kTfLiteInt16) {
return EvalInt16<kernel_type>(context, data, lhs_shape, lhs, rhs_shape, rhs,
GetTensorShape(output), output);
} else {
TF_LITE_KERNEL_LOG(
context,
"Currently only hybrid, int8 and int16 quantization are supported.\n");
return kTfLiteError;
}
return kTfLiteOk;
}
TfLiteTensor* GetTempRhs(TfLiteContext* context, TfLiteNode* node,
const TfLiteTensor* rhs) {
TfLiteTensor* transposed_rhs = GetTemporary(context, node, 1);
if (transposed_rhs == nullptr) {
return nullptr;
}
if (rhs->type == kTfLiteInt8 || rhs->type == kTfLiteInt16) {
transposed_rhs->params.scale = rhs->params.scale;
transposed_rhs->params.zero_point = rhs->params.zero_point;
}
return transposed_rhs;
}
TfLiteTensor* GetTempLhs(TfLiteContext* context, TfLiteNode* node,
const TfLiteTensor* lhs) {
TfLiteTensor* transposed_lhs = GetTemporary(context, node, 0);
if (transposed_lhs == nullptr) {
return nullptr;
}
if (lhs->type == kTfLiteInt8 || lhs->type == kTfLiteInt16) {
transposed_lhs->params.scale = lhs->params.scale;
transposed_lhs->params.zero_point = lhs->params.zero_point;
}
return transposed_lhs;
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpContext op_context(context, node);
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* lhs;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputLHSTensor, &lhs));
const TfLiteTensor* rhs;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputRHSTensor, &rhs));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
RuntimeShape orig_lhs_shape = GetTensorShape(lhs);
RuntimeShape orig_rhs_shape = GetTensorShape(rhs);
bool adj_y = op_context.params->adj_y;
bool adj_x = op_context.params->adj_x;
int32_t rhs_dims_count = orig_rhs_shape.DimensionsCount();
int32_t lhs_dims_count = orig_lhs_shape.DimensionsCount();
if (rhs_dims_count > 2 && lhs_dims_count > 2) {
int rhs_one = orig_rhs_shape.DimsData()[rhs_dims_count - 3];
if (rhs_one == 1) {
int32_t* lhs_dims = orig_lhs_shape.DimsData();
int32_t* rhs_dims = orig_rhs_shape.DimsData();
RuntimeShape tmp_l(lhs_dims_count - 1, lhs_dims);
tmp_l.SetDim(lhs_dims_count - 3,
lhs_dims[lhs_dims_count - 3] * lhs_dims[lhs_dims_count - 2]);
tmp_l.SetDim(lhs_dims_count - 2, lhs_dims[lhs_dims_count - 1]);
orig_lhs_shape.ReplaceWith(tmp_l.DimensionsCount(), tmp_l.DimsData());
RuntimeShape tmp_r(rhs_dims_count - 1, orig_rhs_shape.DimsData());
tmp_r.SetDim(rhs_dims_count - 3, rhs_dims[rhs_dims_count - 2]);
tmp_r.SetDim(rhs_dims_count - 2, rhs_dims[rhs_dims_count - 1]);
orig_rhs_shape.ReplaceWith(tmp_r.DimensionsCount(), tmp_r.DimsData());
}
}
rhs_dims_count = orig_rhs_shape.DimensionsCount();
lhs_dims_count = orig_lhs_shape.DimensionsCount();
const TfLiteTensor* rhs_tensor = rhs;
bool implicit_transpose_possible = true;
if ((lhs->type == kTfLiteFloat32 && rhs->type == kTfLiteInt8) ||
kernel_type == kReference || rhs->type == kTfLiteInt16) {
implicit_transpose_possible = false;
}
bool do_implicit_transpose = !adj_y && implicit_transpose_possible;
if (!adj_y && !implicit_transpose_possible) {
rhs_tensor = GetTempRhs(context, node, rhs);
}
const TfLiteTensor* lhs_tensor = adj_x ? GetTempLhs(context, node, lhs) : lhs;
if (!adj_y && !implicit_transpose_possible) {
if (!(IsConstantTensor(rhs) && op_data->rhs_transposed)) {
TransposeRowsColumns(context, rhs, GetTemporary(context, node, 1));
op_data->rhs_transposed = true;
}
}
if (adj_x) {
TransposeRowsColumns(context, lhs, GetTemporary(context, node, 0));
}
RuntimeShape rhs_shape = (adj_y && !do_implicit_transpose)
? orig_rhs_shape
: SwapRowColumnDims(orig_rhs_shape);
RuntimeShape lhs_shape =
adj_x ? orig_lhs_shape : SwapRowColumnDims(orig_lhs_shape);
switch (rhs->type) {
case kTfLiteFloat32:
if (kernel_type == kGenericOptimized) {
optimized_ops::BatchMatMul(
rhs_shape, GetTensorData<float>(rhs_tensor), lhs_shape,
GetTensorData<float>(lhs_tensor), GetTensorShape(output),
GetTensorData<float>(output),
CpuBackendContext::GetFromContext(context), do_implicit_transpose);
} else {
reference_ops::BatchMatMul(rhs_shape, GetTensorData<float>(rhs_tensor),
lhs_shape, GetTensorData<float>(lhs_tensor),
GetTensorShape(output),
GetTensorData<float>(output));
}
break;
case kTfLiteInt8:
case kTfLiteInt16:
EvalQuantized<kernel_type>(context, node, op_data, lhs_shape, lhs_tensor,
rhs_shape, rhs_tensor, output,
do_implicit_transpose);
break;
default:
TF_LITE_KERNEL_LOG(context,
"Currently BatchMatMul doesn't support type: %s",
TfLiteTypeGetName(lhs->type));
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_BATCH_MATMUL_REF() {
static TfLiteRegistration r = {batch_matmul::Init, batch_matmul::Free,
batch_matmul::Prepare,
batch_matmul::Eval<batch_matmul::kReference>};
return &r;
}
TfLiteRegistration* Register_BATCH_MATMUL_GENERIC_OPTIMIZED() {
static TfLiteRegistration r = {
batch_matmul::Init, batch_matmul::Free, batch_matmul::Prepare,
batch_matmul::Eval<batch_matmul::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_BATCH_MATMUL() {
return Register_BATCH_MATMUL_GENERIC_OPTIMIZED();
}
}
}
} | #include <stddef.h>
#include <stdint.h>
#include <initializer_list>
#include <limits>
#include <map>
#include <numeric>
#include <type_traits>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
namespace tflite {
namespace ops {
namespace builtin {
TfLiteRegistration* Register_BATCH_MATMUL_REF();
TfLiteRegistration* Register_BATCH_MATMUL_GENERIC_OPTIMIZED();
}
}
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
template <typename T>
tflite::TensorType GetTFLiteType() {
if (std::is_same<T, int8_t>::value) {
return TensorType_INT8;
}
if (std::is_same<T, int16_t>::value) {
return TensorType_INT16;
}
if (std::is_same<T, int32_t>::value) {
return TensorType_INT32;
}
return TensorType_FLOAT32;
}
template <typename T>
class BatchMatMulOpModel : public SingleOpModel {
public:
BatchMatMulOpModel(const TensorData& lhs, const TensorData& rhs,
bool adj_x = false, bool adj_y = false) {
lhs_id_ = AddInput(lhs);
rhs_id_ = AddInput(rhs);
output_id_ = AddOutput(GetTFLiteType<T>());
SetBuiltinOp(BuiltinOperator_BATCH_MATMUL,
BuiltinOptions_BatchMatMulOptions,
CreateBatchMatMulOptions(builder_, adj_x, adj_y).Union());
BuildInterpreter({GetShape(lhs_id_), GetShape(rhs_id_)});
}
int lhs() const { return lhs_id_; }
int rhs() const { return rhs_id_; }
std::vector<T> GetOutput() { return ExtractVector<T>(output_id_); }
std::vector<int32_t> GetOutputShape() { return GetTensorShape(output_id_); }
protected:
int lhs_id_;
int rhs_id_;
int output_id_;
};
const auto kKernelMap = new std::map<string, TfLiteRegistration*>({
{"Reference", ops::builtin::Register_BATCH_MATMUL_REF()},
{"GenericOptimized",
ops::builtin::Register_BATCH_MATMUL_GENERIC_OPTIMIZED()},
});
class BatchMatMulOpTest : public SingleOpTest {
protected:
const std::map<string, TfLiteRegistration*>& GetKernelMap() override {
return *kKernelMap;
}
};
TEST_P(BatchMatMulOpTest, Float32Test_Ones) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {3, 2, 1, 4}},
{TensorType_FLOAT32, {3, 1, 4, 1}});
std::vector<float> lhs(24);
std::iota(lhs.begin(), lhs.end(), 1);
std::vector<float> rhs(12);
std::iota(rhs.begin(), rhs.end(), 1);
std::vector<float> res{30, 70, 278, 382, 782, 950};
model.PopulateTensor<float>(model.lhs(), lhs);
model.PopulateTensor<float>(model.rhs(), rhs);
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAreArray(res));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({3, 2, 1, 1}));
}
TEST_P(BatchMatMulOpTest, Float32Test_Flatten) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {3, 2, 2, 4}},
{TensorType_FLOAT32, {3, 1, 4, 1}});
std::vector<float> lhs(48);
std::iota(lhs.begin(), lhs.end(), 1);
std::vector<float> rhs(12);
std::iota(rhs.begin(), rhs.end(), 1);
std::vector<float> res{30, 70, 110, 150, 486, 590,
694, 798, 1454, 1622, 1790, 1958};
model.PopulateTensor<float>(model.lhs(), lhs);
model.PopulateTensor<float>(model.rhs(), rhs);
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAreArray(res));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({3, 2, 2, 1}));
}
TEST_P(BatchMatMulOpTest, Float32Test_Simple) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {1, 2, 3}},
{TensorType_FLOAT32, {1, 3, 4}});
model.PopulateTensor<float>(model.lhs(), {1, 2, 3, 4, 5, 6});
model.PopulateTensor<float>(model.rhs(),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
Pointwise(FloatingPointEq(),
{74., 80., 86., 92., 173., 188., 203., 218.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 2, 4}));
}
TEST_P(BatchMatMulOpTest, Int8Test_Simple) {
BatchMatMulOpModel<int32_t> model({TensorType_INT8, {1, 2, 3}},
{TensorType_INT8, {1, 3, 4}});
model.PopulateTensor<int8_t>(model.lhs(), {1, 2, 3, 4, 5, 6});
model.PopulateTensor<int8_t>(model.rhs(),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({74, 80, 86, 92, 173, 188, 203, 218}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 2, 4}));
}
TEST_P(BatchMatMulOpTest, Int8Test_LargeElement) {
BatchMatMulOpModel<int32_t> model({TensorType_INT8, {1, 2, 3}},
{TensorType_INT8, {1, 3, 4}});
model.PopulateTensor<int8_t>(model.lhs(), {121, 122, 123, 124, 125, 126});
model.PopulateTensor<int8_t>(model.rhs(), {117, 118, 119, 110, 111, 112, 113,
114, 115, 116, 117, 118});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAreArray(
{41844, 42210, 42576, 41732, 42873, 43248, 43623, 42758}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 2, 4}));
}
TEST_P(BatchMatMulOpTest, Float32Test_SimpleRHSAdjoint) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {1, 2, 3}},
{TensorType_FLOAT32, {1, 4, 3}}, false, true);
model.PopulateTensor<float>(model.lhs(), {1, 2, 3, 4, 5, 6});
model.PopulateTensor<float>(model.rhs(),
{7, 11, 15, 8, 12, 16, 9, 13, 17, 10, 14, 18});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
Pointwise(FloatingPointEq(),
{74., 80., 86., 92., 173., 188., 203., 218.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 2, 4}));
}
TEST_P(BatchMatMulOpTest, Float32Test_SimpleLHSAdjoint) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {1, 3, 2}},
{TensorType_FLOAT32, {1, 3, 4}}, true, false);
model.PopulateTensor<float>(model.lhs(), {1, 4, 2, 5, 3, 6});
model.PopulateTensor<float>(model.rhs(),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
Pointwise(FloatingPointEq(),
{74., 80., 86., 92., 173., 188., 203., 218.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 2, 4}));
}
TEST_P(BatchMatMulOpTest, Float32Test_BatchSizeTwo) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {2, 2, 3}},
{TensorType_FLOAT32, {2, 3, 4}});
model.PopulateTensor<float>(model.lhs(),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
model.PopulateTensor<float>(model.rhs(),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
Pointwise(FloatingPointEq(),
{74., 80., 86., 92., 173., 188., 203., 218., 560., 584.,
608., 632., 767., 800., 833., 866.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 2, 4}));
}
TEST_P(BatchMatMulOpTest, Float32Test_Broadcast) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {2, 2, 3}},
{TensorType_FLOAT32, {3, 4}});
model.PopulateTensor<float>(model.lhs(),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
model.PopulateTensor<float>(model.rhs(),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
Pointwise(FloatingPointEq(),
{74., 80., 86., 92., 173., 188., 203., 218., 272., 296.,
320., 344., 371., 404., 437., 470.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 2, 4}));
}
TEST_P(BatchMatMulOpTest, Float32Test_BroadcastLHSAdjoint) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {2, 3, 2}},
{TensorType_FLOAT32, {3, 4}}, true, false);
model.PopulateTensor<float>(model.lhs(),
{1, 4, 2, 5, 3, 6, 7, 10, 8, 11, 9, 12});
model.PopulateTensor<float>(model.rhs(),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
Pointwise(FloatingPointEq(),
{74., 80., 86., 92., 173., 188., 203., 218., 272., 296.,
320., 344., 371., 404., 437., 470.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 2, 4}));
}
TEST_P(BatchMatMulOpTest, Float32Test_Broadcast2) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {2, 1, 3, 2}},
{TensorType_FLOAT32, {3, 2, 4}});
model.PopulateTensor<float>(model.lhs(),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
model.PopulateTensor<float>(model.rhs(),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(
model.GetOutput(),
Pointwise(
FloatingPointEq(),
{29., 32., 35., 38., 65., 72., 79., 86., 101., 112., 123.,
134., 53., 56., 59., 62., 121., 128., 135., 142., 189., 200.,
211., 222., 77., 80., 83., 86., 177., 184., 191., 198., 277.,
288., 299., 310., 137., 152., 167., 182., 173., 192., 211., 230.,
209., 232., 255., 278., 257., 272., 287., 302., 325., 344., 363.,
382., 393., 416., 439., 462., 377., 392., 407., 422., 477., 496.,
515., 534., 577., 600., 623., 646.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 3, 3, 4}));
}
TEST_P(BatchMatMulOpTest, Float32Test_Broadcast2LHSAdjoint) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {2, 1, 2, 3}},
{TensorType_FLOAT32, {3, 2, 4}}, true, false);
model.PopulateTensor<float>(model.lhs(),
{1, 3, 5, 2, 4, 6, 7, 9, 11, 8, 10, 12});
model.PopulateTensor<float>(model.rhs(),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(
model.GetOutput(),
Pointwise(
FloatingPointEq(),
{29., 32., 35., 38., 65., 72., 79., 86., 101., 112., 123.,
134., 53., 56., 59., 62., 121., 128., 135., 142., 189., 200.,
211., 222., 77., 80., 83., 86., 177., 184., 191., 198., 277.,
288., 299., 310., 137., 152., 167., 182., 173., 192., 211., 230.,
209., 232., 255., 278., 257., 272., 287., 302., 325., 344., 363.,
382., 393., 416., 439., 462., 377., 392., 407., 422., 477., 496.,
515., 534., 577., 600., 623., 646.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 3, 3, 4}));
}
TEST_P(BatchMatMulOpTest, Float32Test_Broadcast2RHSAdjoint) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {2, 1, 3, 2}},
{TensorType_FLOAT32, {3, 4, 2}}, false, true);
model.PopulateTensor<float>(model.lhs(),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
model.PopulateTensor<float>(model.rhs(),
{7, 11, 8, 12, 9, 13, 10, 14, 15, 19, 16, 20,
17, 21, 18, 22, 23, 27, 24, 28, 25, 29, 26, 30});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(
model.GetOutput(),
Pointwise(
FloatingPointEq(),
{29., 32., 35., 38., 65., 72., 79., 86., 101., 112., 123.,
134., 53., 56., 59., 62., 121., 128., 135., 142., 189., 200.,
211., 222., 77., 80., 83., 86., 177., 184., 191., 198., 277.,
288., 299., 310., 137., 152., 167., 182., 173., 192., 211., 230.,
209., 232., 255., 278., 257., 272., 287., 302., 325., 344., 363.,
382., 393., 416., 439., 462., 377., 392., 407., 422., 477., 496.,
515., 534., 577., 600., 623., 646.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 3, 3, 4}));
}
TEST_P(BatchMatMulOpTest, Float32Test_Broadcast2BothAdjoint) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {2, 1, 2, 3}},
{TensorType_FLOAT32, {3, 4, 2}}, true, true);
model.PopulateTensor<float>(model.lhs(),
{1, 3, 5, 2, 4, 6, 7, 9, 11, 8, 10, 12});
model.PopulateTensor<float>(model.rhs(),
{7, 11, 8, 12, 9, 13, 10, 14, 15, 19, 16, 20,
17, 21, 18, 22, 23, 27, 24, 28, 25, 29, 26, 30});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(
model.GetOutput(),
Pointwise(
FloatingPointEq(),
{29., 32., 35., 38., 65., 72., 79., 86., 101., 112., 123.,
134., 53., 56., 59., 62., 121., 128., 135., 142., 189., 200.,
211., 222., 77., 80., 83., 86., 177., 184., 191., 198., 277.,
288., 299., 310., 137., 152., 167., 182., 173., 192., 211., 230.,
209., 232., 255., 278., 257., 272., 287., 302., 325., 344., 363.,
382., 393., 416., 439., 462., 377., 392., 407., 422., 477., 496.,
515., 534., 577., 600., 623., 646.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 3, 3, 4}));
}
TEST_P(BatchMatMulOpTest, Float32Test_BroadcastFromRHS) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {4, 5}},
{TensorType_FLOAT32, {3, 1, 5, 2}});
model.PopulateTensor<float>(
model.lhs(),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
model.PopulateTensor<float>(
model.rhs(),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(
model.GetOutput(),
Pointwise(FloatingPointEq(),
{185., 200., 460., 500., 735., 800., 1010., 1100.,
335., 350., 860., 900., 1385., 1450., 1910., 2000.,
485., 500., 1260., 1300., 2035., 2100., 2810., 2900.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({3, 1, 4, 2}));
}
INSTANTIATE_TEST_SUITE_P(
BatchMatMulOpTest, BatchMatMulOpTest,
::testing::ValuesIn(SingleOpTest::GetKernelTags(*kKernelMap)));
class ConstRHSBatchMatMulOpModel : public MultiOpModel {
public:
ConstRHSBatchMatMulOpModel(const TensorData& lhs,
std::initializer_list<int> rhs_shape,
std::initializer_list<float> rhs_data,
bool adj_x = false, bool adj_y = false) {
lhs_id_ = AddInput(lhs);
rhs_id_ = AddConstInput<float>(TensorType_FLOAT32, rhs_data, rhs_shape);
matmul_output_id_ = AddOutput(lhs.type);
std::vector<int> matmul_inputs{lhs_id_, rhs_id_};
std::vector<int> matmul_outputs{matmul_output_id_};
AddBuiltinOp(BuiltinOperator_BATCH_MATMUL,
BuiltinOptions_BatchMatMulOptions,
CreateBatchMatMulOptions(builder_, adj_x, adj_y).Union(),
matmul_inputs, matmul_outputs);
neg_output_id_ = AddOutput(lhs.type);
std::vector<int> neg_inputs{matmul_output_id_};
std::vector<int> neg_outputs{neg_output_id_};
AddBuiltinOp(BuiltinOperator_NEG, BuiltinOptions_NegOptions,
CreateNegOptions(builder_).Union(), neg_inputs, neg_outputs);
BuildInterpreter({GetShape(lhs_id_), GetShape(rhs_id_)});
}
int lhs() const { return lhs_id_; }
std::vector<float> GetOutput() {
return ExtractVector<float>(neg_output_id_);
}
std::vector<int32_t> GetOutputShape() {
return GetTensorShape(neg_output_id_);
}
protected:
int lhs_id_;
int rhs_id_;
int matmul_output_id_;
int neg_output_id_;
};
TEST(ConstRHSBatchMatMulOpModel, RHSNotAdjoint) {
ConstRHSBatchMatMulOpModel model({TensorType_FLOAT32, {1, 6, 2}}, {2, 3},
{6, 3, 7, 4, 6, 9});
model.PopulateTensor<float>(model.lhs(),
{6, 3, 7, 4, 6, 9, 2, 6, 7, 4, 3, 7});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({-48, -36, -69, -58, -45, -85, -72, -72, -123,
-36, -42, -68, -58, -45, -85, -46, -51, -84}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 6, 3}));
model.PopulateTensor<float>(model.lhs(),
{6, 3, 7, 4, 6, 9, 2, 6, 7, 4, 3, 7});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({-48, -36, -69, -58, -45, -85, -72, -72, -123,
-36, -42, -68, -58, -45, -85, -46, -51, -84}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 6, 3}));
}
class HybridBatchMatMulOpModel : public SingleOpModel {
public:
HybridBatchMatMulOpModel(int units, int batches, const TensorData& lhs,
const TensorData& rhs,
const TensorData& output = {TensorType_FLOAT32},
bool asymmetric_quantize_inputs = true,
bool adj_x = false, bool adj_y = false)
: units_(units), batches_(batches) {
int total_input_size = 1;
for (size_t i = 0; i < lhs.shape.size(); ++i) {
total_input_size *= lhs.shape[i];
}
input_size_ = total_input_size / batches_;
lhs_id_ = AddInput(lhs);
rhs_id_ = AddInput(rhs);
output_id_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_BATCH_MATMUL,
BuiltinOptions_BatchMatMulOptions,
CreateBatchMatMulOptions(builder_, adj_x, adj_y,
asymmetric_quantize_inputs)
.Union());
BuildInterpreter({GetShape(lhs_id_), GetShape(rhs_id_)},
-1,
false,
false);
}
void SetWeights(const std::vector<float>& data) {
SymmetricQuantizeAndPopulate(rhs_id_, data);
AllocateAndDelegate(true);
}
void SetSignedWeights(std::initializer_list<float> f) {
SignedSymmetricQuantizeAndPopulate(rhs_id_, f);
AllocateAndDelegate(true);
}
void SetInput(const std::vector<float>& f) { PopulateTensor(lhs_id_, f); }
std::vector<float> GetOutput() { return ExtractVector<float>(output_id_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_id_); }
int input_size() { return input_size_; }
int num_units() { return units_; }
int num_batches() { return batches_; }
int lhs() const { return lhs_id_; }
int rhs() const { return rhs_id_; }
protected:
int lhs_id_;
int rhs_id_;
int output_id_;
int units_;
int batches_;
int input_size_;
};
class HybridAsymmetricBatchMatMulOpTest : public SingleOpTest {
protected:
const std::map<string, TfLiteRegistration*>& GetKernelMap() override {
return *kKernelMap;
}
};
TEST_P(HybridAsymmetricBatchMatMulOpTest, SimpleTestQuantizedInt8) {
HybridBatchMatMulOpModel m(
3, 2,
{TensorType_FLOAT32, {2, 10}},
{TensorType_INT8, {10, 3}, 0, 0, 10.0 / 127.0, 0});
m.SetSignedWeights({
1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10,
});
m.SetInput({
11, 12, 13, 14, 15, 16, 17, 18, -19, -20,
11, 12, 13, 14, 15, 16, 17, -18, 19, -20,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
193,
193,
193,
247,
247,
247,
},
3.f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3}));
}
TEST_P(HybridAsymmetricBatchMatMulOpTest, MultipleNumBatchQuantizedInt8) {
HybridBatchMatMulOpModel m(
10, 4,
{TensorType_FLOAT32, {1, 2, 2, 3}},
{TensorType_INT8, {3, 10}, 0, 0, 10.0 / 127.0, 0});
m.SetSignedWeights({
1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
});
m.SetInput({
11, 12, 13,
11, 12, 13,
11, 12, 13,
11, 12, 13,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{
73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
},
0.64f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 2, 2, 10}));
}
TEST_P(HybridAsymmetricBatchMatMulOpTest, RegressionTestQuantizedInt8) {
HybridBatchMatMulOpModel m(
10, 2,
{TensorType_FLOAT32, {2, 3}},
{TensorType_INT8, {3, 10}, 0, 0, 10.0 / 127.0, 0});
m.SetSignedWeights({
1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
});
m.SetInput({
11, 12, 13,
11, 12, 13,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
},
0.64f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 10}));
}
TEST_P(HybridAsymmetricBatchMatMulOpTest,
TestQuantizedInt8BatchesAndUnitsGreaterThanAccumDimSize) {
HybridBatchMatMulOpModel m(
8, 6,
{TensorType_FLOAT32, {6, 3}},
{TensorType_INT8, {3, 8}, 0, 0, 10.0 / 127.0, 0});
m.SetSignedWeights(
{1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3});
m.SetInput({
11, 12, 13,
11, 12, 13,
11, 12, 13,
11, 12, 13,
11, 12, 13,
11, 12, 13,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74,
74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74,
74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74},
0.15f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 8}));
}
TEST_P(HybridAsymmetricBatchMatMulOpTest,
TestQuantizedInt8BatchesAndUnitsGreaterThanAccumDimSizeAdjX) {
HybridBatchMatMulOpModel m(
8, 6,
{TensorType_FLOAT32, {3, 6}},
{TensorType_INT8, {3, 8}, 0, 0, 10.0 / 127.0, 0},
{TensorType_FLOAT32},
true,
true);
m.SetSignedWeights(
{1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3});
m.SetInput(
{11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74,
74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74,
74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74},
0.15f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 8}));
}
TEST_P(HybridAsymmetricBatchMatMulOpTest,
TestQuantizedInt8BatchesAndUnitsGreaterThanAccumDimSizeAdjY) {
HybridBatchMatMulOpModel m(
8, 6,
{TensorType_FLOAT32, {6, 3}},
{TensorType_INT8, {8, 3}, 0, 0, 10.0 / 127.0, 0},
{TensorType_FLOAT32},
true,
false,
true);
m.SetSignedWeights(
{1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3});
m.SetInput({
11, 12, 13,
11, 12, 13,
11, 12, 13,
11, 12, 13,
11, 12, 13,
11, 12, 13,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74,
74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74,
74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74},
0.15f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 8}));
}
TEST_P(HybridAsymmetricBatchMatMulOpTest,
TestQuantizedInt8BatchesAndUnitsGreaterThanAccumDimSizeAdjXAdjY) {
HybridBatchMatMulOpModel m(
8, 6,
{TensorType_FLOAT32, {3, 6}},
{TensorType_INT8, {8, 3}, 0, 0, 10.0 / 127.0, 0},
{TensorType_FLOAT32},
true,
true,
true);
m.SetSignedWeights(
{1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3});
m.SetInput(
{11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74,
74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74,
74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74},
0.15f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 8}));
}
TEST_P(HybridAsymmetricBatchMatMulOpTest, QuantizedInt8BroadcastWeights) {
HybridBatchMatMulOpModel m(
3, 2,
{TensorType_FLOAT32, {2, 2, 10}},
{TensorType_INT8, {10, 3}, 0, 0, 10.0 / 127.0, 0});
m.SetSignedWeights({
1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10,
});
m.SetInput({
1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, -8, 9, -10,
11, 12, 13, 14, 15, 16, 17, 18, -19, -20,
11, 12, 13, 14, 15, 16, 17, -18, 19, -20,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
23, 23, 23,
57, 57, 57,
193, 193, 193,
247, 247, 247,
},
3.f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 3}));
}
TEST_P(HybridAsymmetricBatchMatMulOpTest, QuantizedInt8BroadcastBigWeights) {
HybridBatchMatMulOpModel m(
9, 2,
{TensorType_FLOAT32, {2, 2, 10}},
{TensorType_INT8, {10, 9}, 0, 0, 10.0 / 127.0, 0});
m.SetSignedWeights({
1, 1, 1, 17, 17, 17, 26, 26, 26,
2, 2, 2, 18, 18, 18, 27, 27, 27,
3, 3, 3, 19, 19, 19, 28, 28, 28,
4, 4, 4, 20, 20, 20, 29, 29, 29,
5, 5, 5, 21, 21, 21, 30, 30, 30,
6, 6, 6, 22, 22, 22, 31, 31, 31,
7, 7, 7, 23, 23, 23, 32, 32, 32,
8, 8, 8, 24, 24, 24, 33, 33, 33,
9, 9, 9, 25, 25, 25, 34, 34, 34,
10, 10, 10, 26, 26, 26, 35, 35, 35,
});
m.SetInput({
1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, -8, 9, -10,
11, 12, 13, 14, 15, 16, 17, 18, -19, -20,
11, 12, 13, 14, 15, 16, 17, -18, 19, -20,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{
23, 23, 23, 295, 295, 295, 448, 448, 448,
57, 57, 57, 361, 361, 361, 532, 532, 532,
193, 193, 193, 1425, 1425, 1425, 2118, 2118, 2118,
247, 247, 247, 1511, 1511, 1511, 2222, 2222, 2222
},
10.0f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 9}));
}
TEST_P(HybridAsymmetricBatchMatMulOpTest, QuantizedInt8BroadcastInputs) {
HybridBatchMatMulOpModel m(
3, 2,
{TensorType_FLOAT32, {2, 10}},
{TensorType_INT8, {2, 10, 3}, 0, 0, 10.0 / 127.0, 0});
m.SetSignedWeights({
1, -3, 1,
2, -2, 2,
3, -1, 3,
4, 0, 4,
5, 1, 5,
6, 2, 6,
7, 3, 7,
8, 4, 8,
9, 5, 9,
10, 6, 10,
1, 1, 1,
2, 2, 2,
3, 3, 3,
4, 4, 4,
5, 5, 5,
6, 6, 6,
7, 7, 7,
8, 8, 8,
9, 9, 9,
10, 10, 10,
});
m.SetInput({
1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, -8, 9, -10,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
23, -45, 23,
57, -19, 57,
23, 23, 23,
57, 57, 57,
},
1.5f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 3}));
}
INSTANTIATE_TEST_SUITE_P(
HybridAsymmetricBatchMatMulOpTest, HybridAsymmetricBatchMatMulOpTest,
::testing::ValuesIn(SingleOpTest::GetKernelTags(*kKernelMap)));
class HybridSymmetricBatchMatMulOpTest : public SingleOpTest {
protected:
const std::map<string, TfLiteRegistration*>& GetKernelMap() override {
return *kKernelMap;
}
};
TEST_P(HybridSymmetricBatchMatMulOpTest, SimpleTestQuantizedInt8) {
HybridBatchMatMulOpModel m(
3, 2,
{TensorType_FLOAT32, {2, 10}},
{TensorType_INT8, {10, 3}, 0, 0, 10.0 / 127.0, 0},
{TensorType_FLOAT32}, false);
m.SetSignedWeights({
1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10,
});
m.SetInput({
11, 12, 13, 14, 15, 16, 17, 18, -19, -20,
11, 12, 13, 14, 15, 16, 17, -18, 19, -20,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
193,
193,
193,
247,
247,
247,
},
1.5f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3}));
}
TEST_P(HybridSymmetricBatchMatMulOpTest, QuantizedInt8BroadcastWeights) {
HybridBatchMatMulOpModel m(
3, 2,
{TensorType_FLOAT32, {2, 2, 10}},
{TensorType_INT8, {10, 3}, 0, 0, 10.0 / 127.0, 0},
{TensorType_FLOAT32}, false);
m.SetSignedWeights({
1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10,
});
m.SetInput({
1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, -8, 9, -10,
11, 12, 13, 14, 15, 16, 17, 18, -19, -20,
11, 12, 13, 14, 15, 16, 17, -18, 19, -20,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
23, 23, 23,
57, 57, 57,
193, 193, 193,
247, 247, 247,
},
1.5f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 3}));
}
TEST_P(HybridSymmetricBatchMatMulOpTest, QuantizedInt8BroadcastBigWeights) {
HybridBatchMatMulOpModel m(
9, 2,
{TensorType_FLOAT32, {2, 2, 10}},
{TensorType_INT8, {10, 9}, 0, 0, 10.0 / 127.0, 0},
{TensorType_FLOAT32}, false);
m.SetSignedWeights({
1, 1, 1, 17, 17, 17, 26, 26, 26,
2, 2, 2, 18, 18, 18, 27, 27, 27,
3, 3, 3, 19, 19, 19, 28, 28, 28,
4, 4, 4, 20, 20, 20, 29, 29, 29,
5, 5, 5, 21, 21, 21, 30, 30, 30,
6, 6, 6, 22, 22, 22, 31, 31, 31,
7, 7, 7, 23, 23, 23, 32, 32, 32,
8, 8, 8, 24, 24, 24, 33, 33, 33,
9, 9, 9, 25, 25, 25, 34, 34, 34,
10, 10, 10, 26, 26, 26, 35, 35, 35,
});
m.SetInput({
1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, -8, 9, -10,
11, 12, 13, 14, 15, 16, 17, 18, -19, -20,
11, 12, 13, 14, 15, 16, 17, -18, 19, -20,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{
23, 23, 23, 295, 295, 295, 448, 448, 448,
57, 57, 57, 361, 361, 361, 532, 532, 532,
193, 193, 193, 1425, 1425, 1425, 2118, 2118, 2118,
247, 247, 247, 1511, 1511, 1511, 2222, 2222, 2222
},
10.0f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 9}));
}
TEST_P(HybridSymmetricBatchMatMulOpTest, QuantizedInt8BroadcastInputs) {
HybridBatchMatMulOpModel m(
3, 2,
{TensorType_FLOAT32, {2, 10}},
{TensorType_INT8, {2, 10, 3}, 0, 0, 10.0 / 127.0, 0},
{TensorType_FLOAT32}, false);
m.SetSignedWeights({
1, -3, 1,
2, -2, 2,
3, -1, 3,
4, 0, 4,
5, 1, 5,
6, 2, 6,
7, 3, 7,
8, 4, 8,
9, 5, 9,
10, 6, 10,
1, 1, 1,
2, 2, 2,
3, 3, 3,
4, 4, 4,
5, 5, 5,
6, 6, 6,
7, 7, 7,
8, 8, 8,
9, 9, 9,
10, 10, 10,
});
m.SetInput({
1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, -8, 9, -10,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
23, -45, 23,
57, -19, 57,
23, 23, 23,
57, 57, 57,
},
1.5f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 3}));
}
INSTANTIATE_TEST_SUITE_P(
HybridSymmetricBatchMatMulOpTest, HybridSymmetricBatchMatMulOpTest,
::testing::ValuesIn(SingleOpTest::GetKernelTags(*kKernelMap)));
class QuantizedBatchMatMulOpModel : public SingleOpModel {
public:
QuantizedBatchMatMulOpModel(int units, int batches, const TensorData& lhs,
const TensorData& output = {TensorType_INT8},
bool adj_x = false, bool adj_y = false)
: units_(units), batches_(batches) {
int total_input_size = 1;
for (size_t i = 0; i < lhs.shape.size(); ++i) {
total_input_size *= lhs.shape[i];
}
input_size_ = total_input_size / batches_;
int rhs_batch_size = adj_y ? units_ : input_size_;
int rhs_channels = adj_y ? input_size_ : units_;
lhs_id_ = AddInput(lhs);
rhs_id_ = AddInput({lhs.type,
{rhs_batch_size, rhs_channels},
0,
0,
GetScale(lhs_id_),
GetZeroPoint(lhs_id_)});
output_id_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_BATCH_MATMUL,
BuiltinOptions_BatchMatMulOptions,
CreateBatchMatMulOptions(builder_, adj_x, adj_y).Union());
BuildInterpreter({GetShape(lhs_id_), GetShape(rhs_id_)});
}
template <typename T>
void SetWeights(const std::vector<float>& data) {
QuantizeAndPopulate<T>(rhs_id_, data);
}
template <typename T>
void SetInput(const std::vector<float>& data) {
QuantizeAndPopulate<T>(lhs_id_, data);
}
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_id_);
}
template <typename T>
std::vector<float> GetDequantizedOutput() {
return Dequantize<T>(ExtractVector<T>(output_id_), GetScale(output_id_),
GetZeroPoint(output_id_));
}
protected:
int lhs_id_;
int rhs_id_;
int output_id_;
int units_;
int batches_;
int input_size_;
};
class QuantizedBatchMatMulOpTest : public SingleOpTest {
protected:
const std::map<string, TfLiteRegistration*>& GetKernelMap() override {
return *kKernelMap;
}
};
TEST_P(QuantizedBatchMatMulOpTest, SimpleTestQuantizedInt8) {
QuantizedBatchMatMulOpModel m(
3, 2,
{TensorType_INT8, {2, 10}, -63.5, 64},
{TensorType_INT8, {}, -127, 128});
m.SetWeights<int8_t>({
1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10,
});
m.SetInput<int8_t>({
1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, -8, 9, -10,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetDequantizedOutput<int8_t>(),
ElementsAreArray(ArrayFloatNear({23, 23, 23, 57, 57, 57})));
EXPECT_THAT(m.GetOutput<int8_t>(), ElementsAre(22, 22, 22, 56, 56, 56));
}
TEST_P(QuantizedBatchMatMulOpTest, SimpleTestQuantizedInt8AdjRHS) {
QuantizedBatchMatMulOpModel m(
3, 2,
{TensorType_INT8, {2, 10}, -63.5, 64},
{TensorType_INT8, {}, -127, 128}, false, true);
m.SetWeights<int8_t>({
1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10,
});
m.SetInput<int8_t>({
1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, -8, 9, -10,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetDequantizedOutput<int8_t>(),
ElementsAreArray(ArrayFloatNear({14, 65, 128, 20, 95, 128})));
EXPECT_THAT(m.GetOutput<int8_t>(), ElementsAre(13, 64, 127, 19, 94, 127));
}
TEST_P(QuantizedBatchMatMulOpTest, SimpleTestQuantizedInt16) {
const float inputs_scale = 10.0 / std::numeric_limits<int16_t>::max();
const float output_scale = 1.0;
const int32_t zero_point = 0;
QuantizedBatchMatMulOpModel m(
3, 2,
{TensorType_INT16, {2, 10}, 0, 0, inputs_scale, zero_point},
{TensorType_INT16, {}, 0, 0, output_scale, zero_point});
m.SetWeights<int16_t>({
1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10,
});
m.SetInput<int16_t>({
1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, -8, 9, -10,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetDequantizedOutput<int16_t>(),
ElementsAreArray(ArrayFloatNear({23, 23, 23, 57, 57, 57})));
EXPECT_THAT(m.GetOutput<int16_t>(), ElementsAre(23, 23, 23, 57, 57, 57));
}
INSTANTIATE_TEST_SUITE_P(
QuantizedBatchMatMulOpTest, QuantizedBatchMatMulOpTest,
::testing::ValuesIn(SingleOpTest::GetKernelTags(*kKernelMap)));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/batch_matmul.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/batch_matmul_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7d0598b4-bb58-4832-ae50-e5f28c105d5d | cpp | tensorflow/tensorflow | acceleration_test_util_internal | tensorflow/lite/kernels/acceleration_test_util_internal.cc | tensorflow/lite/kernels/acceleration_test_util_internal_test.cc | #include "tensorflow/lite/kernels/acceleration_test_util_internal.h"
#include <ctype.h>
#include <algorithm>
#include <functional>
#include <iterator>
#include <sstream>
#include <string>
namespace tflite {
void ReadAccelerationConfig(
const char* config,
const std::function<void(std::string, std::string, bool)>& consumer) {
if (config) {
std::istringstream istream{config};
std::string curr_config_line;
while (std::getline(istream, curr_config_line)) {
curr_config_line.erase(
curr_config_line.begin(),
std::find_if_not(curr_config_line.begin(), curr_config_line.end(),
[](int ch) { return std::isspace(ch); }));
if (curr_config_line.empty() || curr_config_line.at(0) == '#') {
continue;
}
auto first_sep_pos =
std::find(curr_config_line.begin(), curr_config_line.end(), ',');
bool is_denylist = false;
std::string key = curr_config_line;
std::string value{};
if (first_sep_pos != curr_config_line.end()) {
key = std::string(curr_config_line.begin(), first_sep_pos);
value = std::string(first_sep_pos + 1, curr_config_line.end());
}
if (key[0] == '-') {
key = key.substr(1);
is_denylist = true;
}
consumer(key, value, is_denylist);
}
}
}
} | #include "tensorflow/lite/kernels/acceleration_test_util_internal.h"
#include <functional>
#include <optional>
#include <string>
#include <unordered_map>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
using ::testing::Eq;
using ::testing::Not;
using ::testing::Test;
struct SimpleConfig {
public:
static constexpr char kAccelerationTestConfig[] =
R"(
#test-id,some-other-data
test-1,data-1
test-2,
test-3,data-3
test-4.*,data-4
-test-5
test-6
test-7,data-7
)";
static const char* AccelerationTestConfig() {
return kAccelerationTestConfig;
}
static SimpleConfig ParseConfigurationLine(const std::string& conf_line) {
return {conf_line};
}
std::string value;
};
class ReadAccelerationConfigTest : public ::testing::Test {
public:
std::unordered_map<std::string, SimpleConfig> allowlist_;
std::unordered_map<std::string, SimpleConfig> denylist_;
std::function<void(std::string, std::string, bool)> consumer_ =
[this](std::string key, std::string value, bool is_denylist) {
if (is_denylist) {
denylist_[key] = {value};
} else {
allowlist_[key] = {value};
}
};
};
TEST_F(ReadAccelerationConfigTest, ReadsAKeyOnlyLine) {
ReadAccelerationConfig("key", consumer_);
EXPECT_THAT(allowlist_.find("key"), Not(Eq(allowlist_.end())));
EXPECT_TRUE(denylist_.empty());
}
TEST_F(ReadAccelerationConfigTest, ReadsADenylistKeyOnlyLine) {
ReadAccelerationConfig("-key", consumer_);
EXPECT_THAT(denylist_.find("key"), Not(Eq(allowlist_.end())));
EXPECT_TRUE(allowlist_.empty());
}
TEST_F(ReadAccelerationConfigTest, ReadsAKeyValueLine) {
ReadAccelerationConfig("key,value", consumer_);
EXPECT_THAT(allowlist_["key"].value, Eq("value"));
EXPECT_TRUE(denylist_.empty());
}
TEST_F(ReadAccelerationConfigTest, ReadsADenyListKeyValueLine) {
ReadAccelerationConfig("-key,value", consumer_);
EXPECT_THAT(denylist_["key"].value, Eq("value"));
EXPECT_TRUE(allowlist_.empty());
}
TEST_F(ReadAccelerationConfigTest, KeysAreLeftTrimmed) {
ReadAccelerationConfig(" key,value", consumer_);
EXPECT_THAT(allowlist_["key"].value, Eq("value"));
EXPECT_TRUE(denylist_.empty());
}
TEST_F(ReadAccelerationConfigTest, BlKeysAreLeftTrimmed) {
ReadAccelerationConfig(" -key,value", consumer_);
EXPECT_THAT(denylist_["key"].value, Eq("value"));
EXPECT_TRUE(allowlist_.empty());
}
TEST_F(ReadAccelerationConfigTest, IgnoresCommentedLines) {
ReadAccelerationConfig("#key,value", consumer_);
EXPECT_TRUE(allowlist_.empty());
EXPECT_TRUE(denylist_.empty());
}
TEST_F(ReadAccelerationConfigTest, CommentCanHaveTrailingBlanks) {
ReadAccelerationConfig(" #key,value", consumer_);
EXPECT_TRUE(allowlist_.empty());
EXPECT_TRUE(denylist_.empty());
}
TEST_F(ReadAccelerationConfigTest, CommentsAreOnlyForTheFullLine) {
ReadAccelerationConfig("key,value #comment", consumer_);
EXPECT_THAT(allowlist_["key"].value, Eq("value #comment"));
}
TEST_F(ReadAccelerationConfigTest, IgnoresEmptyLines) {
ReadAccelerationConfig("", consumer_);
EXPECT_TRUE(allowlist_.empty());
EXPECT_TRUE(denylist_.empty());
}
TEST_F(ReadAccelerationConfigTest, ParsesMultipleLines) {
ReadAccelerationConfig("key1,value1\nkey2,value2\n-key3,value3", consumer_);
EXPECT_THAT(allowlist_["key1"].value, Eq("value1"));
EXPECT_THAT(allowlist_["key2"].value, Eq("value2"));
EXPECT_THAT(denylist_["key3"].value, Eq("value3"));
}
TEST_F(ReadAccelerationConfigTest, ParsesMultipleLinesWithCommentsAndSpaces) {
ReadAccelerationConfig("key1,value1\n#comment\n\nkey2,value2", consumer_);
EXPECT_THAT(allowlist_["key1"].value, Eq("value1"));
EXPECT_THAT(allowlist_["key2"].value, Eq("value2"));
}
TEST_F(ReadAccelerationConfigTest, ParsesMultipleLinesWithMissingConfigValues) {
ReadAccelerationConfig("key1\nkey2,value2\nkey3\nkey4,value4", consumer_);
EXPECT_THAT(allowlist_["key1"].value, Eq(""));
EXPECT_THAT(allowlist_["key2"].value, Eq("value2"));
EXPECT_THAT(allowlist_["key3"].value, Eq(""));
EXPECT_THAT(allowlist_["key4"].value, Eq("value4"));
}
TEST(GetAccelerationTestParam, LoadsTestConfig) {
const auto config_value_maybe =
GetAccelerationTestParam<SimpleConfig>("test-3");
ASSERT_TRUE(config_value_maybe.has_value());
ASSERT_THAT(config_value_maybe.value().value, Eq("data-3"));
}
TEST(GetAccelerationTestParam, LoadsTestConfigWithEmptyValue) {
const auto config_value_maybe =
GetAccelerationTestParam<SimpleConfig>("test-2");
ASSERT_TRUE(config_value_maybe.has_value());
ASSERT_THAT(config_value_maybe.value().value, Eq(""));
}
TEST(GetAccelerationTestParam, SupportsWildcards) {
const auto config_value_maybe =
GetAccelerationTestParam<SimpleConfig>("test-41");
ASSERT_TRUE(config_value_maybe.has_value());
ASSERT_THAT(config_value_maybe.value().value, Eq("data-4"));
}
TEST(GetAccelerationTestParam, SupportDenylist) {
const auto config_value_maybe =
GetAccelerationTestParam<SimpleConfig>("test-5");
ASSERT_FALSE(config_value_maybe.has_value());
}
struct UnmatchedSimpleConfig {
public:
static constexpr const char* kAccelerationTestConfig = nullptr;
static const char* AccelerationTestConfig() {
return kAccelerationTestConfig;
}
static UnmatchedSimpleConfig ParseConfigurationLine(
const std::string& conf_line) {
return {conf_line};
}
std::string value;
};
TEST(GetAccelerationTestParam, ReturnEmptyOptionalForNullConfig) {
ASSERT_FALSE(
GetAccelerationTestParam<UnmatchedSimpleConfig>("test-3").has_value());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/acceleration_test_util_internal.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/acceleration_test_util_internal_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9ea1616d-5ea7-4b14-815c-362c83e857c4 | cpp | tensorflow/tensorflow | fill | tensorflow/lite/kernels/fill.cc | tensorflow/lite/kernels/fill_test.cc | #include <stdint.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace fill {
namespace {
constexpr int kDimsTensor = 0;
constexpr int kValueTensor = 1;
constexpr int kOutputTensor = 0;
template <typename T>
TfLiteStatus ResizeOutputImpl(TfLiteContext* context, const TfLiteTensor* dims,
TfLiteTensor* output) {
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(dims->dims->data[0]);
for (int i = 0; i < output_shape->size; ++i) {
T data = GetTensorData<T>(dims)[i];
if (data < 0) {
TfLiteIntArrayFree(output_shape);
TF_LITE_KERNEL_LOG(context, "Fill dimensions must be >= 0 got %d",
dims->type);
return kTfLiteError;
}
output_shape->data[i] = data;
}
return context->ResizeTensor(context, output, output_shape);
}
TfLiteStatus ResizeOutput(TfLiteContext* context, const TfLiteTensor* dims,
TfLiteTensor* output) {
switch (dims->type) {
case kTfLiteInt32:
return ResizeOutputImpl<int32_t>(context, dims, output);
case kTfLiteInt64:
return ResizeOutputImpl<int64_t>(context, dims, output);
default:
TF_LITE_KERNEL_LOG(
context,
"Fill only currently supports int32, int64 for input 0, "
"got %d.",
dims->type);
return kTfLiteError;
}
}
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* dims;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kDimsTensor, &dims));
const TfLiteTensor* value;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kValueTensor, &value));
TF_LITE_ENSURE_EQ(context, NumDimensions(dims), 1);
const auto dtype = dims->type;
TF_LITE_ENSURE(context, dtype == kTfLiteInt32 || dtype == kTfLiteInt64);
TF_LITE_ENSURE_EQ(context, NumDimensions(value), 0);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
output->type = value->type;
TF_LITE_ENSURE_EQ(context, output->params.scale, value->params.scale);
TF_LITE_ENSURE_EQ(context, output->params.zero_point,
value->params.zero_point);
if (value->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, value->params.zero_point, 0);
}
if (IsConstantOrPersistentTensor(dims)) {
TF_LITE_ENSURE_OK(context, ResizeOutput(context, dims, output));
} else {
SetTensorToDynamic(output);
}
return kTfLiteOk;
}
TfLiteStatus FillString(const TfLiteTensor* value, TfLiteTensor* output) {
DynamicBuffer buffer;
const auto string_ref = GetString(value, 0);
int n = 1;
for (int i = 0; i < output->dims->size; ++i) {
n *= output->dims->data[i];
}
for (int i = 0; i < n; ++i) {
buffer.AddString(string_ref.str, string_ref.len);
}
buffer.WriteToTensor(output, nullptr);
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* value;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kValueTensor, &value));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (IsDynamicTensor(output)) {
const TfLiteTensor* dims;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kDimsTensor, &dims));
TF_LITE_ENSURE_OK(context, ResizeOutput(context, dims, output));
}
#define TF_LITE_FILL(data_type) \
reference_ops::Fill(GetTensorShape(value), GetTensorData<data_type>(value), \
GetTensorShape(output), \
GetTensorData<data_type>(output))
switch (output->type) {
case kTfLiteInt8:
TF_LITE_FILL(int8_t);
break;
case kTfLiteInt16:
TF_LITE_FILL(int16_t);
break;
case kTfLiteInt32:
TF_LITE_FILL(int32_t);
break;
case kTfLiteInt64:
TF_LITE_FILL(int64_t);
break;
case kTfLiteFloat16:
TF_LITE_FILL(Eigen::half);
break;
case kTfLiteFloat32:
TF_LITE_FILL(float);
break;
case kTfLiteBool:
TF_LITE_FILL(bool);
break;
case kTfLiteString:
FillString(value, output);
break;
default:
TF_LITE_KERNEL_LOG(
context,
"Fill only currently supports int8, int16, int32, int64, float32, "
"bool, string for input 1, got %d.",
value->type);
return kTfLiteError;
}
#undef TF_LITE_FILL
return kTfLiteOk;
}
}
TfLiteRegistration* Register_FILL() {
static TfLiteRegistration r = {nullptr, nullptr,
fill::Prepare, fill::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "Eigen/Core"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
using ::testing::IsEmpty;
enum class TestType {
kConst = 0,
kDynamic = 1,
};
template <typename dims_type, typename value_type>
class FillOpModel : public SingleOpModel {
public:
explicit FillOpModel(TensorType dims_tensor_type,
std::initializer_list<int> dims_shape,
std::initializer_list<dims_type> dims_data,
value_type value, TestType input_tensor_types) {
if (input_tensor_types == TestType::kDynamic) {
dims_ = AddInput(dims_tensor_type);
} else {
dims_ = AddConstInput(dims_tensor_type, dims_data, dims_shape);
}
value_ = AddInput(GetTensorType<value_type>());
output_ = AddOutput(GetTensorType<value_type>());
SetBuiltinOp(BuiltinOperator_FILL, BuiltinOptions_FillOptions,
CreateFillOptions(builder_).Union());
BuildInterpreter({dims_shape, {}});
if (input_tensor_types == TestType::kDynamic) {
if (dims_data.size() > 0) {
PopulateTensor<dims_type>(dims_, dims_data);
}
}
PopulateTensor<value_type>(value_, {value});
}
std::vector<value_type> GetOutput() {
return ExtractVector<value_type>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int dims_;
int value_;
int output_;
};
template <typename dims_type, typename quant_type>
class QuantizedFillOpModel : public SingleOpModel {
public:
explicit QuantizedFillOpModel(TensorType dims_tensor_type,
std::initializer_list<int> dims_shape,
std::initializer_list<dims_type> dims_data,
const TensorData& tensor_data, float value) {
dims_ = AddInput(dims_tensor_type);
value_ = AddInput(tensor_data);
output_ = AddOutput(tensor_data);
SetBuiltinOp(BuiltinOperator_FILL, BuiltinOptions_FillOptions,
CreateFillOptions(builder_).Union());
BuildInterpreter({dims_shape, {}});
if (dims_data.size() > 0) {
PopulateTensor<dims_type>(dims_, dims_data);
}
QuantizeAndPopulate<quant_type>(value_, {value});
}
std::vector<quant_type> GetOutput() {
return ExtractVector<quant_type>(output_);
}
std::vector<float> GetDequantizedOutput() {
TfLiteTensor* t = interpreter_->tensor(output_);
return Dequantize(GetOutput(), t->params.scale, t->params.zero_point);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int dims_;
int value_;
int output_;
};
class FillOpTest : public ::testing::TestWithParam<TestType> {};
TEST_P(FillOpTest, FillInt32) {
FillOpModel<int32_t, int32_t> m(TensorType_INT32, {2}, {2, 3}, -11,
GetParam());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-11, -11, -11, -11, -11, -11}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3}));
}
TEST_P(FillOpTest, FillInt64) {
FillOpModel<int64_t, int64_t> m(TensorType_INT64, {2}, {2, 4}, 1LL << 45,
GetParam());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({1LL << 45, 1LL << 45, 1LL << 45, 1LL << 45,
1LL << 45, 1LL << 45, 1LL << 45, 1LL << 45}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 4}));
}
TEST_P(FillOpTest, FillFloat) {
FillOpModel<int64_t, float> m(TensorType_INT64, {3}, {2, 2, 2}, 4.0,
GetParam());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
Pointwise(FloatingPointEq(), {4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 2}));
}
TEST_P(FillOpTest, FillFloat16) {
FillOpModel<int64_t, Eigen::half> m(TensorType_INT64, {3}, {2, 2, 2},
Eigen::half(4.0f), GetParam());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
Pointwise(FloatingPointEq(), {4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 2}));
}
TEST_P(FillOpTest, FillFloatInt32Dims) {
FillOpModel<int32_t, float> m(TensorType_INT32, {3}, {2, 2, 2}, 4.0,
GetParam());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
Pointwise(FloatingPointEq(), {4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 2}));
}
TEST_P(FillOpTest, FillOutputScalar) {
FillOpModel<int64_t, float> m(TensorType_INT64, {0}, {}, 4.0, GetParam());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), Pointwise(FloatingPointEq(), {4.0}));
EXPECT_THAT(m.GetOutputShape(), IsEmpty());
}
TEST_P(FillOpTest, FillBool) {
FillOpModel<int64_t, bool> m(TensorType_INT64, {3}, {2, 2, 2}, true,
GetParam());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({true, true, true, true, true,
true, true, true}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 2}));
}
TEST(FillOpTest, FillString) {
FillOpModel<int64_t, std::string> m(TensorType_INT64, {3}, {2, 2, 2}, "AB",
TestType::kDynamic);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({"AB", "AB", "AB", "AB", "AB",
"AB", "AB", "AB"}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 2}));
}
TEST_P(FillOpTest, FillInt8) {
FillOpModel<int64_t, int8_t> m(TensorType_INT64, {3}, {2, 2, 2}, 5,
GetParam());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({5, 5, 5, 5, 5, 5, 5, 5}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 2}));
}
template <typename quant_type>
void QuantizedFill(float value) {
const float kMin = -1;
const float kMax =
std::numeric_limits<quant_type>::max() /
static_cast<float>(std::numeric_limits<quant_type>::max() + 1);
const TensorData tensor_data(GetTensorType<quant_type>(), {},
std::abs(value) * kMin, std::abs(value) * kMax);
QuantizedFillOpModel<int32_t, quant_type> m(TensorType_INT32, {2}, {2, 3},
tensor_data, value);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
constexpr float epsilon = 0.01f;
const float min_value = tensor_data.min - epsilon;
const float max_value = tensor_data.max + epsilon;
const float kQuantizedTolerance =
(max_value - min_value) / (std::numeric_limits<quant_type>::max() -
std::numeric_limits<quant_type>::min());
EXPECT_THAT(
m.GetDequantizedOutput(),
ElementsAreArray(ArrayFloatNear(
{value, value, value, value, value, value}, kQuantizedTolerance)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3}));
}
TEST(FillOpTest, QuantizedFillInt8) { QuantizedFill<int8_t>(3.14f); }
TEST(FillOpTest, QuantizedFillInt16) { QuantizedFill<int16_t>(3.14f); }
INSTANTIATE_TEST_SUITE_P(FillOpTest, FillOpTest,
::testing::Values(TestType::kConst,
TestType::kDynamic));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/fill.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/fill_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5a3a6fb6-e758-4bea-854b-db3f3cb2cf0e | cpp | tensorflow/tensorflow | space_to_depth | tensorflow/lite/delegates/gpu/gl/kernels/space_to_depth.cc | tensorflow/lite/delegates/xnnpack/space_to_depth_test.cc | #include "tensorflow/lite/delegates/gpu/gl/kernels/space_to_depth.h"
#include <any>
#include <memory>
#include <string>
#include <utility>
#include "absl/memory/memory.h"
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class SpaceToDepth : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
const auto& attr =
std::any_cast<const SpaceToDepthAttributes&>(ctx.op_attr);
std::string code = R"(
for (int i = 0; i < 4; ++i) {
int dst_c = 4 * gid.z + i;
int block_id = dst_c / $input_data_0_c$;
int src_x = gid.x * $block_size$ + block_id % $block_size$;
int src_y = gid.y * $block_size$ + block_id / $block_size$;
int src_c = dst_c % $input_data_0_c$;
value_0[i] = $input_data_0[src_x, src_y, src_c / 4]$[src_c % 4];
}
)";
*generated_code = {
{
{"block_size", attr.block_size},
{"input_data_0_c", static_cast<int>(ctx.input_shapes[0][3])},
},
{},
{},
uint3(),
uint3(),
std::move(code),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
class DepthToSpace : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
const auto& attr =
std::any_cast<const SpaceToDepthAttributes&>(ctx.op_attr);
std::string code = R"(
for (int i = 0; i < 4; ++i) {
int dst_c = 4 * gid.z + i;
int block_x = gid.x % $block_size$;
int src_x = gid.x / $block_size$;
int block_y = gid.y % $block_size$;
int src_y = gid.y / $block_size$;
int block_id = block_y * $block_size$ + block_x;
int src_c = block_id * $output_channels$ + dst_c;
value_0[i] = $input_data_0[src_x, src_y, src_c / 4]$[src_c % 4];
}
)";
*generated_code = {
{
{"block_size", attr.block_size},
{"output_channels", static_cast<int>(ctx.output_shapes[0][3])},
},
{},
{},
uint3(),
uint3(),
std::move(code),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewSpaceToDepthNodeShader() {
return std::make_unique<SpaceToDepth>();
}
std::unique_ptr<NodeShader> NewDepthToSpaceNodeShader() {
return std::make_unique<DepthToSpace>();
}
}
}
} | #include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/space_to_depth_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite::xnnpack {
namespace {
TEST(SpaceToDepth, SinglePixel) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto block_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
const int32_t block_size = block_rng();
SpaceToDepthTester()
.BatchSize(batch_rng())
.InputHeight(block_size)
.InputWidth(block_size)
.InputChannels(channel_rng())
.BlockSize(block_size)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
TEST(SpaceToDepth, SingleRow) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto width_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto block_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
const int32_t block_size = block_rng();
SpaceToDepthTester()
.BatchSize(batch_rng())
.InputHeight(block_size)
.InputWidth(width_rng() * block_size)
.InputChannels(channel_rng())
.BlockSize(block_size)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
TEST(SpaceToDepth, SingleColumn) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto height_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto block_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
const int32_t block_size = block_rng();
SpaceToDepthTester()
.BatchSize(batch_rng())
.InputHeight(height_rng() * block_size)
.InputWidth(block_size)
.InputChannels(channel_rng())
.BlockSize(block_size)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
TEST(SpaceToDepth, FullImage) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto size_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto block_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
const int32_t block_size = block_rng();
SpaceToDepthTester()
.BatchSize(batch_rng())
.InputHeight(size_rng() * block_size)
.InputWidth(size_rng() * block_size)
.InputChannels(channel_rng())
.BlockSize(block_size)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
TEST(SpaceToDepth, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto size_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto block_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
const int32_t block_size = block_rng();
SpaceToDepthTester()
.BatchSize(batch_rng())
.InputHeight(size_rng() * block_size)
.InputWidth(size_rng() * block_size)
.InputChannels(channel_rng())
.BlockSize(block_size)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/kernels/space_to_depth.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/space_to_depth_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3467b85c-9553-47ea-8c9c-3ebd61a23de5 | cpp | tensorflow/tensorflow | stablehlo_gather | tensorflow/lite/kernels/stablehlo_gather.cc | tensorflow/lite/kernels/stablehlo_gather_test.cc | #include <algorithm>
#include <cstdint>
#include <memory>
#include <vector>
#include "Eigen/Core"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/tensor_slice_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace stablehlo_gather {
namespace {
constexpr int kOperandTensor = 0;
constexpr int kStartIndicesTensor = 1;
constexpr int kOutputTensor = 0;
using TfLiteIntArrayUniquePtr =
std::unique_ptr<TfLiteIntArray, decltype(&TfLiteIntArrayFree)>;
template <typename IndexType>
TfLiteStatus ClipStartingIndex(const RuntimeShape& operand_shape,
const int64_t* slice_sizes, int num_slice_sizes,
Index<IndexType>& starting_index) {
if (operand_shape.DimensionsCount() != starting_index.size() ||
operand_shape.DimensionsCount() != num_slice_sizes) {
return kTfLiteError;
}
for (int dim = 0; dim < starting_index.size(); ++dim) {
starting_index[dim] = std::min((int64_t)starting_index[dim],
operand_shape.Dims(dim) - slice_sizes[dim]);
}
return kTfLiteOk;
}
static std::vector<int64_t> GetCollapsedSliceShape(
const int64_t* slice_sizes, int num_slice_sizes,
const int64_t* collapsed_slice_dims, int num_collapsed_slice_dims) {
std::vector<int64_t> result(num_slice_sizes - num_collapsed_slice_dims);
int result_ctr = 0;
for (int dim = 0; dim < num_slice_sizes; dim++) {
if (!ArrayContains(collapsed_slice_dims, num_collapsed_slice_dims, dim)) {
result[result_ctr] = slice_sizes[dim];
result_ctr++;
}
}
return result;
}
static TfLiteIntArrayUniquePtr GetResultShape(
int64_t result_rank, const TfLiteStablehloGatherParams* data,
const RuntimeShape& start_indices_shape) {
TfLiteIntArrayUniquePtr result = TfLiteIntArrayUniquePtr(
TfLiteIntArrayCreate(result_rank), &TfLiteIntArrayFree);
int result_ctr = 0;
std::vector<int64_t> collapsed_slice_shape = GetCollapsedSliceShape(
data->slice_sizes, data->num_slice_sizes, data->collapsed_slice_dims,
data->num_collapsed_slice_dims);
int64_t slice_shape_ctr = 0;
int64_t start_indices_shape_ctr = 0;
for (int64_t dim = 0; dim < result_rank; dim++) {
if (ArrayContains(data->offset_dims, data->num_offset_dims, dim)) {
result->data[result_ctr] = collapsed_slice_shape[slice_shape_ctr];
slice_shape_ctr++;
} else {
if (start_indices_shape_ctr == data->index_vector_dim) {
start_indices_shape_ctr++;
}
result->data[result_ctr] =
start_indices_shape.Dims(start_indices_shape_ctr);
start_indices_shape_ctr++;
}
result_ctr++;
}
return result;
}
template <typename IndexType>
TfLiteStatus SetBatchAndOffsetIndices(const Index<IndexType>& result_index,
const int64_t* offset_dims,
int num_offset_dims,
Index<IndexType>& batch_index,
Index<IndexType>& offset_index) {
int offset_index_ctr = 0;
int batch_index_ctr = 0;
for (int result_dim = 0; result_dim < result_index.size(); ++result_dim) {
if (ArrayContains(offset_dims, num_offset_dims, result_dim)) {
if (offset_index_ctr >= num_offset_dims) {
return kTfLiteError;
}
offset_index[offset_index_ctr] = result_index[result_dim];
offset_index_ctr++;
} else {
if (batch_index_ctr >= result_index.size() - num_offset_dims) {
return kTfLiteError;
}
batch_index[batch_index_ctr] = result_index[result_dim];
batch_index_ctr++;
}
}
return kTfLiteOk;
}
template <typename IndexType, typename DataType>
TfLiteStatus EvalWithTypes(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* operand;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kOperandTensor, &operand));
int operand_rank = operand->dims->size;
RuntimeShape operand_shape = GetTensorShape(operand);
const TfLiteTensor* start_indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kStartIndicesTensor,
&start_indices));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const TfLiteStablehloGatherParams* data =
reinterpret_cast<TfLiteStablehloGatherParams*>(node->builtin_data);
RuntimeShape start_indices_shape = GetTensorShape(start_indices);
int result_rank = output->dims->size;
RuntimeShape result_runtime_shape(result_rank, output->dims->data);
Index<IndexType> result_index = Index<IndexType>(result_rank, 0);
int64_t num_batch_dims = result_rank - data->num_offset_dims;
Index<IndexType> batch_index(num_batch_dims);
Index<IndexType> offset_index(data->num_offset_dims);
do {
TF_LITE_ENSURE_OK(
context, SetBatchAndOffsetIndices(result_index, data->offset_dims,
data->num_offset_dims, batch_index,
offset_index));
Index<IndexType> starting_index_vector =
ReadIndexVector(start_indices, start_indices_shape, batch_index,
data->index_vector_dim);
Index<IndexType> final_starting_index;
ScatterIndex(starting_index_vector, data->start_index_map,
data->num_start_index_map, operand_rank,
&final_starting_index);
TF_LITE_ENSURE_OK(
context,
ClipStartingIndex(operand_shape, data->slice_sizes,
data->num_slice_sizes, final_starting_index));
Index<IndexType> full_offset_index;
ExpandDims(offset_index, data->collapsed_slice_dims,
data->num_collapsed_slice_dims, &full_offset_index);
Index<IndexType> operand_lookup_index =
AddIndices(final_starting_index, full_offset_index);
const DataType* operand_data = GetTensorData<DataType>(operand);
IndexType flat_operand_index =
TensorIndexToFlat(operand_lookup_index.data(),
operand_lookup_index.size(), GetTensorShape(operand));
DataType looked_up_value = operand_data[flat_operand_index];
DataType* result_data = GetTensorData<DataType>(output);
IndexType flat_result_index = TensorIndexToFlat(
result_index.data(), result_index.size(), GetTensorShape(output));
result_data[flat_result_index] = looked_up_value;
} while (NextIndex(result_rank, result_runtime_shape.DimsData(),
result_index.data()));
return TfLiteStatus::kTfLiteOk;
}
template <typename IndexType>
TfLiteStatus EvalWithIndexType(TfLiteContext* context, TfLiteNode* node,
TfLiteType index_type, TfLiteType data_type) {
switch (data_type) {
case kTfLiteFloat16:
return EvalWithTypes<IndexType, Eigen::half>(context, node);
case kTfLiteFloat32:
return EvalWithTypes<IndexType, float>(context, node);
case kTfLiteFloat64:
return EvalWithTypes<IndexType, double>(context, node);
case kTfLiteInt8:
return EvalWithTypes<IndexType, int8_t>(context, node);
case kTfLiteInt16:
return EvalWithTypes<IndexType, int16_t>(context, node);
case kTfLiteInt32:
return EvalWithTypes<IndexType, int32_t>(context, node);
case kTfLiteInt64:
return EvalWithTypes<IndexType, int64_t>(context, node);
case kTfLiteUInt8:
return EvalWithTypes<IndexType, uint8_t>(context, node);
case kTfLiteUInt16:
return EvalWithTypes<IndexType, uint16_t>(context, node);
case kTfLiteUInt32:
return EvalWithTypes<IndexType, uint32_t>(context, node);
case kTfLiteUInt64:
return EvalWithTypes<IndexType, uint64_t>(context, node);
default:
TF_LITE_KERNEL_LOG(
context, "(Index Type: %s, Data Type: %s) currently not supported.\n",
TfLiteTypeGetName(index_type), TfLiteTypeGetName(data_type));
return TfLiteStatus::kTfLiteError;
}
}
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* operand;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kOperandTensor, &operand));
const TfLiteTensor* start_indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kStartIndicesTensor,
&start_indices));
TfLiteType index_type = start_indices->type;
TfLiteType data_type = operand->type;
if (index_type == kTfLiteInt32) {
return EvalWithIndexType<int32_t>(context, node, index_type, data_type);
} else if (index_type == kTfLiteInt64) {
return EvalWithIndexType<int64_t>(context, node, index_type, data_type);
} else {
TF_LITE_KERNEL_LOG(context, "(Index Type: %s) currently not supported.\n",
TfLiteTypeGetName(index_type));
return TfLiteStatus::kTfLiteError;
}
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* operand;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kOperandTensor, &operand));
const TfLiteTensor* start_indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kStartIndicesTensor,
&start_indices));
TfLiteType index_type = start_indices->type;
if (index_type != kTfLiteInt32 && index_type != kTfLiteInt64) {
TF_LITE_KERNEL_LOG(context, "(Index Type: %s) currently not supported.\n",
TfLiteTypeGetName(index_type));
return TfLiteStatus::kTfLiteError;
}
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const TfLiteStablehloGatherParams* data =
reinterpret_cast<TfLiteStablehloGatherParams*>(node->builtin_data);
RuntimeShape start_indices_shape = GetTensorShape(start_indices);
TfLiteIntArrayUniquePtr result_shape =
GetResultShape(output->dims->size, data, start_indices_shape);
TF_LITE_ENSURE_STATUS(
context->ResizeTensor(context, output, result_shape.release()));
return TfLiteStatus::kTfLiteOk;
}
}
TfLiteRegistration* Register_STABLEHLO_GATHER() {
static TfLiteRegistration r = {nullptr, nullptr, stablehlo_gather::Prepare,
stablehlo_gather::Eval};
return &r;
}
}
}
} | #include <cstdint>
#include <initializer_list>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class StablehloGatherOpModel : public SingleOpModel {
public:
StablehloGatherOpModel(const TensorData& input, const TensorData& indices,
const TfLiteStablehloGatherParams& params) {
input_ = AddInput(input);
indices_ = AddInput(indices);
output_ = AddOutput(TensorData(input.type, {2, 3, 2, 2}));
SetBuiltinOp(
BuiltinOperator_STABLEHLO_GATHER,
BuiltinOptions2_StablehloGatherOptions,
CreateStablehloGatherOptions(
builder_,
builder_.CreateVector(
std::vector(params.offset_dims,
params.offset_dims + params.num_offset_dims)),
builder_.CreateVector(std::vector(
params.collapsed_slice_dims,
params.collapsed_slice_dims + params.num_collapsed_slice_dims)),
builder_.CreateVector(std::vector(
params.start_index_map,
params.start_index_map + params.num_start_index_map)),
params.index_vector_dim,
builder_.CreateVector(
std::vector(params.slice_sizes,
params.slice_sizes + params.num_slice_sizes)),
params.indices_are_sorted)
.Union());
BuildInterpreter({GetShape(input_), GetShape(indices_)});
}
template <typename T>
void SetInput(std::initializer_list<T> data) {
PopulateTensor<T>(input_, data);
}
template <typename T>
void SetIndices(std::initializer_list<T> data) {
PopulateTensor<T>(indices_, data);
}
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
protected:
int input_;
int indices_;
int output_;
};
TEST(StablehloScatterOpTest, GathersSlices) {
TfLiteStablehloGatherParams params = {
{2, 3},
2,
{0},
1,
{1, 0},
2,
2,
{1, 2, 2},
3,
false
};
StablehloGatherOpModel model({TensorType_FLOAT32, {3, 4, 2}},
{TensorType_INT64, {2, 3, 2}}, params);
model.SetInput<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.SetIndices<int64_t>({0, 0, 1, 0, 2, 1, 0, 1, 1, 1, 0, 2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::vector<float> expected_values = {1, 2, 3, 4, 3, 4, 5, 6,
13, 14, 15, 16, 9, 10, 11, 12,
11, 12, 13, 14, 17, 18, 19, 20};
EXPECT_THAT(model.GetOutput<float>(), ElementsAreArray(expected_values));
}
TEST(StablehloScatterOpTest, ClipsStartingIndices) {
TfLiteStablehloGatherParams params = {
{2, 3},
2,
{0},
1,
{1, 0},
2,
2,
{1, 2, 2},
3,
false
};
StablehloGatherOpModel model({TensorType_FLOAT32, {3, 4, 2}},
{TensorType_INT64, {2, 3, 2}}, params);
model.SetInput<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.SetIndices<int64_t>({0, 0, 1, 0, 2, 1, 0, 1, 1, 1, 0, 9});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::vector<float> expected_values = {1, 2, 3, 4, 3, 4, 5, 6,
13, 14, 15, 16, 9, 10, 11, 12,
11, 12, 13, 14, 17, 18, 19, 20};
EXPECT_THAT(model.GetOutput<float>(), ElementsAreArray(expected_values));
}
TEST(StablehloScatterOpTest, WorksWithDynamicShapes) {
TfLiteStablehloGatherParams params = {
{2, 3},
2,
{0},
1,
{1, 0},
2,
2,
{1, 2, 2},
3,
false
};
TensorData indices_tensor = {TensorType_INT64,
{2, 3, 2},
0.0f,
0.0f,
0.0f,
0,
false,
{},
{},
0,
{},
{},
{},
{},
{{-1, -1, 2}}};
StablehloGatherOpModel model({TensorType_FLOAT32, {3, 4, 2}}, indices_tensor,
params);
model.SetInput<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.SetIndices<int64_t>({0, 0, 1, 0, 2, 1, 0, 1, 1, 1, 0, 9});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::vector<float> expected_values = {1, 2, 3, 4, 3, 4, 5, 6,
13, 14, 15, 16, 9, 10, 11, 12,
11, 12, 13, 14, 17, 18, 19, 20};
EXPECT_THAT(model.GetOutput<float>(), ElementsAreArray(expected_values));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_gather.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_gather_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b6c66bda-f15d-4819-892f-c0d9a4e0df25 | cpp | tensorflow/tensorflow | dynamic_update_slice | tensorflow/lite/kernels/dynamic_update_slice.cc | tensorflow/lite/kernels/dynamic_update_slice_test.cc | #include <algorithm>
#include <cmath>
#include <cstdint>
#include <vector>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace dynamic_update_slice {
constexpr int kOperandTensor = 0;
constexpr int kUpdateTensor = 1;
constexpr int kStartIndicesTensor = 2;
constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* operand;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kOperandTensor, &operand));
const TfLiteTensor* update;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kUpdateTensor, &update));
const TfLiteTensor* start_indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kStartIndicesTensor,
&start_indices));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE(context, NumDimensions(start_indices) == 1);
TF_LITE_ENSURE(context,
SizeOfDimension(start_indices, 0) == NumDimensions(operand));
TF_LITE_ENSURE(context, NumDimensions(update) == NumDimensions(operand));
for (int i = 0; i < NumDimensions(operand); i++) {
TF_LITE_ENSURE(context,
SizeOfDimension(update, i) <= SizeOfDimension(operand, i));
}
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
TF_LITE_ENSURE_TYPES_EQ(context, operand->type, update->type);
TF_LITE_ENSURE(context, start_indices->type == kTfLiteInt32 ||
start_indices->type == kTfLiteInt64);
output->type = operand->type;
TfLiteIntArray* output_size = TfLiteIntArrayCopy(operand->dims);
return context->ResizeTensor(context, output, output_size);
}
int TensorIndexToFlat(const int* index, const int dims,
const RuntimeShape& shape,
const int* start_indices = nullptr) {
int flat_index = index[0] + (start_indices ? start_indices[0] : 0);
for (int i = 1; i < dims; i++) {
flat_index = flat_index * shape.Dims(i) + index[i] +
(start_indices ? start_indices[i] : 0);
}
return flat_index;
}
std::vector<int> ClampStartIndices(int input_dims, const int64_t* indices_data,
const RuntimeShape& input_shape,
const RuntimeShape& update_shape) {
std::vector<int> clamped_start_indices(input_dims, 0);
for (int i = 0; i < input_dims; i++) {
clamped_start_indices[i] = static_cast<int32_t>(
std::min<int64_t>(std::max<int64_t>(0, indices_data[i]),
input_shape.Dims(i) - update_shape.Dims(i)));
}
return clamped_start_indices;
}
template <typename T>
void update_slice(int current_dim, int max_dim, const int32_t* output_stride,
const int32_t* update_stride, const int32_t* update_shape,
const T* update, const int32_t* indices_data, T* output) {
if (current_dim == max_dim) return;
if (current_dim == max_dim - 1) {
output += indices_data[current_dim] * output_stride[current_dim];
memcpy(output, update, update_shape[max_dim - 1] * sizeof(T));
} else {
output += indices_data[current_dim] * output_stride[current_dim];
for (int i = 0; i < update_shape[current_dim]; ++i) {
update_slice(current_dim + 1, max_dim, output_stride, update_stride,
update_shape, update, indices_data, output);
output += output_stride[current_dim];
update += update_stride[current_dim];
}
}
}
template <typename T>
void DynamicUpdateSlice(const TfLiteTensor* input, const TfLiteTensor* update,
const int64_t* indices_data, TfLiteTensor* output) {
const auto& input_shape = GetTensorShape(input);
const auto& update_shape = GetTensorShape(update);
const T* update_data = GetTensorData<T>(update);
T* output_data = GetTensorData<T>(output);
const int input_dims = input_shape.DimensionsCount();
if (input_shape.FlatSize() == update_shape.FlatSize()) {
memcpy(output_data, update_data, input_shape.FlatSize() * sizeof(T));
return;
}
std::vector<int> clamped_start_indices =
ClampStartIndices(input_dims, indices_data, input_shape, update_shape);
if (input->data.data != output->data.data) {
memcpy(output->data.data, input->data.data, input->bytes);
}
if (update_shape.FlatSize() == 0) {
return;
}
std::vector<int> output_stride(input_dims);
std::vector<int> update_stride(input_dims);
output_stride[input_dims - 1] = 1;
update_stride[input_dims - 1] = 1;
const int32_t* input_shape_data = input_shape.DimsData();
const int32_t* update_shape_data = update_shape.DimsData();
for (int i = input_dims - 2; i >= 0; --i) {
output_stride[i] = output_stride[i + 1] * input_shape_data[i + 1];
update_stride[i] = update_stride[i + 1] * update_shape_data[i + 1];
}
update_slice(0, input_dims, output_stride.data(), update_stride.data(),
update_shape.DimsData(), update_data,
clamped_start_indices.data(), output_data);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* operand;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kOperandTensor, &operand));
const TfLiteTensor* update;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kUpdateTensor, &update));
const TfLiteTensor* indice;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kStartIndicesTensor, &indice));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const auto& input_shape = GetTensorShape(operand);
const int input_dims = input_shape.DimensionsCount();
std::vector<int64_t> indices_data_i64;
if (indice->type == kTfLiteInt32) {
for (int i = 0; i < input_dims; i++)
indices_data_i64.push_back(static_cast<int64_t>(indice->data.i32[i]));
} else if (indice->type == kTfLiteInt64) {
for (int i = 0; i < input_dims; i++)
indices_data_i64.push_back(indice->data.i64[i]);
} else {
TF_LITE_KERNEL_LOG(context,
"DynamicUpdateSlice only currently supports "
"int32 or int64 indices type, got %d.",
indice->type);
return kTfLiteError;
}
switch (operand->type) {
case kTfLiteFloat32:
DynamicUpdateSlice<float>(operand, update, indices_data_i64.data(),
output);
break;
case kTfLiteBool:
DynamicUpdateSlice<bool>(operand, update, indices_data_i64.data(),
output);
break;
case kTfLiteInt8:
DynamicUpdateSlice<int8_t>(operand, update, indices_data_i64.data(),
output);
break;
case kTfLiteInt32:
DynamicUpdateSlice<int32_t>(operand, update, indices_data_i64.data(),
output);
break;
case kTfLiteInt64:
DynamicUpdateSlice<int64_t>(operand, update, indices_data_i64.data(),
output);
break;
default:
TF_LITE_KERNEL_LOG(context,
"DynamicUpdateSlice only currently supports "
"1-bit/8-bit/32-bit/64-bit integer or "
"float type, got %d.",
operand->type);
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_DYNAMIC_UPDATE_SLICE() {
static TfLiteRegistration r = {nullptr,
nullptr,
dynamic_update_slice::Prepare,
dynamic_update_slice::Eval,
nullptr,
0,
nullptr,
0,
nullptr,
nullptr,
kTfLiteInplaceOpInput0Shared};
return &r;
}
}
}
} | #include <stdint.h>
#include <algorithm>
#include <initializer_list>
#include <string>
#include <unordered_map>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/subgraph_test_util.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
class DynamicUpdateSliceOpModel : public SingleOpModel {
public:
DynamicUpdateSliceOpModel(const TensorData& operand, const TensorData& update,
const TensorData& start_indices) {
input_ = AddInput(operand);
update_ = AddInput(update);
start_indices_ = AddInput(start_indices);
output_ = AddOutput(operand.type);
SetBuiltinOp(BuiltinOperator_DYNAMIC_UPDATE_SLICE,
BuiltinOptions_DynamicUpdateSliceOptions,
CreateDynamicUpdateSliceOptions(builder_).Union());
BuildInterpreter(
{GetShape(input_), GetShape(update_), GetShape(start_indices_)});
}
template <typename T>
void SetInput(std::initializer_list<T> data) {
PopulateTensor<T>(input_, data);
}
template <typename T>
void SetUpdate(std::initializer_list<T> data) {
PopulateTensor<T>(update_, data);
}
void SetStringInput(std::initializer_list<string> data) {
PopulateStringTensor(input_, data);
}
template <typename T>
void SetStartIndices(std::initializer_list<T> data) {
PopulateTensor<T>(start_indices_, data);
}
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
std::vector<string> GetStringOutput() {
return ExtractVector<string>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input_;
int update_;
int start_indices_;
int output_;
};
TEST(DynamicUpdateSliceOpTest, SimpleTestF32InPlaceInput) {
DynamicUpdateSliceOpModel m({TensorType_FLOAT32, {3, 3}},
{TensorType_FLOAT32, {2, 1}},
{TensorType_INT32, {2}});
m.SetInput<float>({1, 2, 3,
4, 5, 6,
7, 8, 9});
m.SetUpdate<float>({-1, -2});
m.SetStartIndices<int32_t>({1, 1});
const int kInplaceInputTensorIdx = 0;
const int kInplaceOutputTensorIdx = 0;
const TfLiteTensor* input_tensor = m.GetInputTensor(kInplaceInputTensorIdx);
TfLiteTensor* output_tensor = m.GetOutputTensor(kInplaceOutputTensorIdx);
output_tensor->data.data = input_tensor->data.data;
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear({1, 2, 3,
4, -1, 6,
7, -2, 9})));
EXPECT_EQ(output_tensor->data.data, input_tensor->data.data);
}
TEST(DynamicUpdateSliceOpTest, SimpleTestF32) {
DynamicUpdateSliceOpModel m({TensorType_FLOAT32, {3, 3}},
{TensorType_FLOAT32, {2, 1}},
{TensorType_INT32, {2}});
m.SetInput<float>({1, 2, 3,
4, 5, 6,
7, 8, 9});
m.SetUpdate<float>({-1, -2});
m.SetStartIndices<int32_t>({1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear({1, 2, 3,
4, -1, 6,
7, -2, 9})));
}
TEST(DynamicUpdateSliceOpTest, SimpleTestI1) {
DynamicUpdateSliceOpModel m({TensorType_BOOL, {3, 3}},
{TensorType_BOOL, {2, 1}},
{TensorType_INT32, {2}});
m.SetInput<bool>({true, true, true,
true, true, true,
true, true, true});
m.SetUpdate<bool>({false, false});
m.SetStartIndices<int32_t>({1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<bool>(), ElementsAreArray({true, true, true,
true, false, true,
true, false, true}));
}
TEST(DynamicUpdateSliceOpTest, SimpleTestI8) {
DynamicUpdateSliceOpModel m({TensorType_INT8, {3, 3}},
{TensorType_INT8, {2, 1}},
{TensorType_INT32, {2}});
m.SetInput<int8_t>({1, 2, 3,
4, 5, 6,
7, 8, 9});
m.SetUpdate<int8_t>({-1, -2});
m.SetStartIndices<int32_t>({1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int8_t>(), ElementsAreArray({1, 2, 3,
4, -1, 6,
7, -2, 9}));
}
TEST(DynamicUpdateSliceOpTest, SimpleTestI32) {
DynamicUpdateSliceOpModel m({TensorType_INT32, {3, 3}},
{TensorType_INT32, {2, 1}},
{TensorType_INT32, {2}});
m.SetInput<int32_t>({1, 2, 3,
4, 5, 6,
7, 8, 9});
m.SetUpdate<int32_t>({-1, -2});
m.SetStartIndices<int32_t>({1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int32_t>(), ElementsAreArray({1, 2, 3,
4, -1, 6,
7, -2, 9}));
}
TEST(DynamicUpdateSliceOpTest, ZeroSizeTestI32) {
DynamicUpdateSliceOpModel m({TensorType_INT32, {3, 3}},
{TensorType_INT32, {2, 0}},
{TensorType_INT32, {2}});
m.SetInput<int32_t>({1, 2, 3,
4, 5, 6,
7, 8, 9});
m.SetStartIndices<int32_t>({1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int32_t>(), ElementsAreArray({1, 2, 3,
4, 5, 6,
7, 8, 9}));
}
TEST(DynamicUpdateSliceOpTest, SimpleTestI64) {
DynamicUpdateSliceOpModel m({TensorType_INT64, {3, 3}},
{TensorType_INT64, {2, 1}},
{TensorType_INT32, {2}});
m.SetInput<int64_t>({1, 2, 3,
4, 5, 6,
7, 8, 9});
m.SetUpdate<int64_t>({-1, -2});
m.SetStartIndices<int32_t>({1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int64_t>(), ElementsAreArray({1, 2, 3,
4, -1, 6,
7, -2, 9}));
}
TEST(DynamicUpdateSliceOpTest, SimpleTestI64Indices) {
DynamicUpdateSliceOpModel m({TensorType_INT64, {3, 3}},
{TensorType_INT64, {2, 1}},
{TensorType_INT64, {2}});
m.SetInput<int64_t>({1, 2, 3,
4, 5, 6,
7, 8, 9});
m.SetUpdate<int64_t>({-1, -2});
m.SetStartIndices<int64_t>({1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int64_t>(), ElementsAreArray({1, 2, 3,
4, -1, 6,
7, -2, 9}));
}
TEST(DynamicUpdateSliceOpTest, BoundaryTest) {
DynamicUpdateSliceOpModel m({TensorType_FLOAT32, {3, 3}},
{TensorType_FLOAT32, {2, 2}},
{TensorType_INT32, {2}});
m.SetInput<float>({1, 2, 3,
4, 5, 6,
7, 8, 9});
m.SetUpdate<float>({-1, -2,
-3, -4});
m.SetStartIndices<int32_t>({2, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear({1, 2, 3,
4, -1, -2,
7, -3, -4})));
}
TEST(DynamicUpdateSliceOpTest, UpdateShapeTooLargeTest) {
EXPECT_DEATH_IF_SUPPORTED(
DynamicUpdateSliceOpModel({TensorType_FLOAT32, {3, 3}},
{TensorType_FLOAT32, {4, 2}},
{TensorType_INT32, {2}}),
"SizeOfDimension\\(update, i\\) <= SizeOfDimension\\(operand, "
"i\\) was not true.");
}
class DynamicUpdateSliceGraphModel {
public:
static constexpr struct InPlaceGraph {
} kInPlaceGraph{};
static constexpr struct NotInPlaceGraph {
} kNotInPlaceGraph{};
DynamicUpdateSliceGraphModel(InPlaceGraph, bool multiple_consumers) {
builder_.BuildInplaceDynamicUpdateSliceSubgraph(
interpreter_.primary_subgraph(), multiple_consumers);
SetUpInterpreter();
}
explicit DynamicUpdateSliceGraphModel(NotInPlaceGraph) {
builder_.BuildInputDynamicUpdateSliceSubgraph(
interpreter_.primary_subgraph());
SetUpInterpreter();
}
void SetUpInterpreter() {
interpreter_.ResizeInputTensor(interpreter_.inputs()[0], {2, 3});
interpreter_.ResizeInputTensor(interpreter_.inputs()[1], {1, 3});
interpreter_.ResizeInputTensor(interpreter_.inputs()[2], {2});
CHECK_EQ(interpreter_.AllocateTensors(), kTfLiteOk);
subgraph_test_util::FillIntTensor(&GetInputTensor(0), {0, 0, 0, 0, 0, 0});
subgraph_test_util::FillIntTensor(&GetInputTensor(1), {3, 3, 3});
subgraph_test_util::FillIntTensor(&GetInputTensor(2), {1, 0});
}
Interpreter& GetInterpreter() { return interpreter_; }
TfLiteTensor& GetTensor(int index) { return *interpreter_.tensor(index); }
TfLiteTensor& GetInputTensor(int index) {
return GetTensor(interpreter_.inputs()[index]);
}
TfLiteTensor& GetOutputTensor(int index) {
return GetTensor(interpreter_.outputs()[index]);
}
protected:
Interpreter interpreter_;
subgraph_test_util::SubgraphBuilder builder_;
};
absl::Span<int> ShapeOf(const TfLiteTensor& tensor) {
if (!tensor.dims) {
return {};
}
return absl::Span<int>(tensor.dims->data, tensor.dims->size);
}
template <class T>
absl::Span<int32_t> DataOf(const TfLiteTensor& tensor) {
return absl::Span<int>(tensor.data.i32, tensor.bytes / sizeof(T));
}
TEST(DynamicUpdateSliceOpTest, DoNotReuseGraphInputBuffer) {
auto model = DynamicUpdateSliceGraphModel(
DynamicUpdateSliceGraphModel::kNotInPlaceGraph);
ASSERT_EQ(model.GetInterpreter().Invoke(), kTfLiteOk);
const TfLiteTensor& output = model.GetOutputTensor(0);
EXPECT_THAT(ShapeOf(output), ElementsAre(2, 3));
EXPECT_THAT(DataOf<int32_t>(output), ElementsAre(1, 1, 1, 4, 4, 4));
const TfLiteTensor& input0 = model.GetInputTensor(0);
const TfLiteTensor& intermediate = model.GetTensor(5);
EXPECT_NE(input0.data.data, intermediate.data.data);
}
TEST(DynamicUpdateSliceOpTest, OnlyShareBufferForASingleConsumer) {
for (bool multiple_consumers : {true, false}) {
auto model = DynamicUpdateSliceGraphModel(
DynamicUpdateSliceGraphModel::kInPlaceGraph, multiple_consumers);
ASSERT_EQ(model.GetInterpreter().Invoke(), kTfLiteOk);
const TfLiteTensor& output = model.GetOutputTensor(0);
EXPECT_THAT(ShapeOf(output), ElementsAre(2, 3));
EXPECT_THAT(DataOf<int32_t>(output), ElementsAre(2, 2, 2, 4, 4, 4));
const TfLiteTensor& intermediate0 = model.GetTensor(5);
const TfLiteTensor& intermediate1 = model.GetTensor(6);
if (multiple_consumers) {
EXPECT_NE(intermediate0.data.data, intermediate1.data.data);
} else {
EXPECT_EQ(intermediate0.data.data, intermediate1.data.data);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/dynamic_update_slice.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/dynamic_update_slice_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9a5e4792-13bf-44f5-910b-69ecbfb4df07 | cpp | tensorflow/tensorflow | comparisons | tensorflow/lite/kernels/internal/reference/comparisons.cc | tensorflow/lite/kernels/comparisons_test.cc | #include "tensorflow/lite/kernels/internal/reference/comparisons.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
namespace tflite {
namespace reference_ops {
BroadcastComparison4DSlowCommon BroadcastComparison4DSlowPreprocess(
const RuntimeShape& unextended_input1_shape,
const RuntimeShape& unextended_input2_shape,
const RuntimeShape& unextended_output_shape) {
TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4);
NdArrayDesc<4> desc1;
NdArrayDesc<4> desc2;
NdArrayDescsForElementwiseBroadcast(unextended_input1_shape,
unextended_input2_shape, &desc1, &desc2);
return {RuntimeShape::ExtendedShape(4, unextended_output_shape), desc1,
desc2};
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
class ComparisonOpModel : public SingleOpModel {
public:
ComparisonOpModel(std::initializer_list<int> input1_shape,
std::initializer_list<int> input2_shape,
TensorType input_type, BuiltinOperator op) {
input1_ = AddInput(input_type);
input2_ = AddInput(input_type);
output_ = AddOutput(TensorType_BOOL);
ConfigureBuiltinOp(op);
BuildInterpreter({input1_shape, input2_shape});
}
ComparisonOpModel(const TensorData& input1, const TensorData& input2,
TensorType input_type, BuiltinOperator op) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(TensorType_BOOL);
ConfigureBuiltinOp(op);
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
int input1() { return input1_; }
int input2() { return input2_; }
std::vector<bool> GetOutput() { return ExtractVector<bool>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input1_;
int input2_;
int output_;
void ConfigureBuiltinOp(BuiltinOperator op) {
switch (op) {
case BuiltinOperator_EQUAL: {
SetBuiltinOp(op, BuiltinOptions_EqualOptions,
CreateEqualOptions(builder_).Union());
break;
}
case BuiltinOperator_NOT_EQUAL: {
SetBuiltinOp(op, BuiltinOptions_NotEqualOptions,
CreateNotEqualOptions(builder_).Union());
break;
}
case BuiltinOperator_GREATER: {
SetBuiltinOp(op, BuiltinOptions_GreaterOptions,
CreateGreaterOptions(builder_).Union());
break;
}
case BuiltinOperator_GREATER_EQUAL: {
SetBuiltinOp(op, BuiltinOptions_GreaterEqualOptions,
CreateGreaterEqualOptions(builder_).Union());
break;
}
case BuiltinOperator_LESS: {
SetBuiltinOp(op, BuiltinOptions_LessOptions,
CreateLessOptions(builder_).Union());
break;
}
case BuiltinOperator_LESS_EQUAL: {
SetBuiltinOp(op, BuiltinOptions_LessEqualOptions,
CreateLessEqualOptions(builder_).Union());
break;
}
default: {
FAIL() << "We shouldn't get here.";
}
}
}
};
TEST(ComparisonsTest, EqualBool) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_BOOL,
BuiltinOperator_EQUAL);
model.PopulateTensor<bool>(model.input1(), {true, false, true, false});
model.PopulateTensor<bool>(model.input2(), {true, true, false, false});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, false, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, EqualFloat) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_FLOAT32,
BuiltinOperator_EQUAL);
model.PopulateTensor<float>(model.input1(), {0.1, 0.9, 0.7, 0.3});
model.PopulateTensor<float>(model.input2(), {0.1, 0.2, 0.6, 0.5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, false, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, EqualInt) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT32,
BuiltinOperator_EQUAL);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int>(model.input2(), {1, 2, 7, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, false, true, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, EqualInt16) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT16,
BuiltinOperator_EQUAL);
model.PopulateTensor<int16_t>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int16_t>(model.input2(), {1, 2, 7, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, false, true, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, EqualString) {
if (SingleOpModel::GetForceUseNnapi()) {
return;
}
ComparisonOpModel model({1, 1, 1, 4, 1}, {1, 1, 1, 4, 1}, TensorType_STRING,
BuiltinOperator_EQUAL);
model.PopulateTensor<std::string>(model.input1(), {"A", "B", "C", "D"});
model.PopulateTensor<std::string>(model.input2(), {"A", "C", "B", "D"});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, false, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4, 1));
}
TEST(ComparisonsTest, EqualBroadcast) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 1}, TensorType_INT32,
BuiltinOperator_EQUAL);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int>(model.input2(), {7});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, false, true, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, EqualBroadcastTwoD) {
ComparisonOpModel model({1, 1, 2, 4}, {1, 1, 1, 4}, TensorType_INT32,
BuiltinOperator_EQUAL);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3, 2, 4, 2, 8});
model.PopulateTensor<int>(model.input2(), {7, 1, 2, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, false, false, false, false,
false, true, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 2, 4));
}
TEST(ComparisonsTest, EqualBroadcastString) {
if (SingleOpModel::GetForceUseNnapi()) {
return;
}
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 1}, TensorType_STRING,
BuiltinOperator_EQUAL);
model.PopulateTensor<std::string>(model.input1(), {"A", "B", "A", "B"});
model.PopulateTensor<std::string>(model.input2(), {"A"});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, true, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, NotEqualBool) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_BOOL,
BuiltinOperator_NOT_EQUAL);
model.PopulateTensor<bool>(model.input1(), {true, false, true, false});
model.PopulateTensor<bool>(model.input2(), {true, true, false, false});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, true, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, NotEqualFloat) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_FLOAT32,
BuiltinOperator_NOT_EQUAL);
model.PopulateTensor<float>(model.input1(), {0.1, 0.9, 0.7, 0.3});
model.PopulateTensor<float>(model.input2(), {0.1, 0.2, 0.6, 0.5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, true, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, NotEqualInt) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT32,
BuiltinOperator_NOT_EQUAL);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int>(model.input2(), {1, 2, 7, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, true, false, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, NotEqualString) {
if (SingleOpModel::GetForceUseNnapi()) {
return;
}
ComparisonOpModel model({1, 1, 1, 1, 4}, {1, 1, 1, 1, 4}, TensorType_STRING,
BuiltinOperator_NOT_EQUAL);
model.PopulateTensor<std::string>(model.input1(), {"A", "B", "C", "D"});
model.PopulateTensor<std::string>(model.input2(), {"A", "C", "B", "D"});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, true, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 1, 4));
}
TEST(ComparisonsTest, NotEqualBroadcast) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 1}, TensorType_INT32,
BuiltinOperator_NOT_EQUAL);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int>(model.input2(), {7});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, true, false, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, NotEqualBroadcastTwoD) {
ComparisonOpModel model({1, 1, 2, 4}, {1, 1, 1, 4}, TensorType_INT32,
BuiltinOperator_NOT_EQUAL);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3, 2, 4, 2, 8});
model.PopulateTensor<int>(model.input2(), {7, 1, 2, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(true, true, true, true, true, true, false, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 2, 4));
}
TEST(ComparisonsTest, NotEqualBroadcastString) {
if (SingleOpModel::GetForceUseNnapi()) {
return;
}
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 1}, TensorType_STRING,
BuiltinOperator_NOT_EQUAL);
model.PopulateTensor<std::string>(model.input1(), {"A", "B", "A", "B"});
model.PopulateTensor<std::string>(model.input2(), {"A"});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, false, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, GreaterFloat) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_FLOAT32,
BuiltinOperator_GREATER);
model.PopulateTensor<float>(model.input1(), {0.1, 0.9, 0.7, 0.3});
model.PopulateTensor<float>(model.input2(), {0.1, 0.2, 0.6, 0.5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, true, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, GreaterInt) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT32,
BuiltinOperator_GREATER);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int>(model.input2(), {1, 2, 7, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, false, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, GreaterBroadcast) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 1}, TensorType_INT32,
BuiltinOperator_GREATER);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int>(model.input2(), {7});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, false, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, GreaterBroadcastTwoD) {
ComparisonOpModel model({1, 1, 2, 4}, {1, 1, 1, 4}, TensorType_INT32,
BuiltinOperator_GREATER);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3, 2, 4, 2, 8});
model.PopulateTensor<int>(model.input2(), {7, 1, 2, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(false, true, true, false, false, true, false, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 2, 4));
}
TEST(ComparisonsTest, GreaterEqualFloat) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_FLOAT32,
BuiltinOperator_GREATER_EQUAL);
model.PopulateTensor<float>(model.input1(), {0.1, 0.9, 0.7, 0.3});
model.PopulateTensor<float>(model.input2(), {0.1, 0.2, 0.6, 0.5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, true, true, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, GreaterEqualInt) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT32,
BuiltinOperator_GREATER_EQUAL);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int>(model.input2(), {1, 2, 7, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, true, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, GreaterEqualInt16) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT16,
BuiltinOperator_GREATER_EQUAL);
model.PopulateTensor<int16_t>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int16_t>(model.input2(), {1, 2, 7, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, true, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, GreaterEqualBroadcast) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 1}, TensorType_INT32,
BuiltinOperator_GREATER_EQUAL);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int>(model.input2(), {7});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, true, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, GreaterEqualBroadcastTwoD) {
ComparisonOpModel model({1, 1, 2, 4}, {1, 1, 1, 4}, TensorType_INT32,
BuiltinOperator_GREATER_EQUAL);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3, 2, 4, 2, 8});
model.PopulateTensor<int>(model.input2(), {7, 1, 2, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(false, true, true, false, false, true, true, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 2, 4));
}
TEST(ComparisonsTest, LessFloat) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_FLOAT32,
BuiltinOperator_LESS);
model.PopulateTensor<float>(model.input1(), {0.1, 0.9, 0.7, 0.3});
model.PopulateTensor<float>(model.input2(), {0.1, 0.2, 0.6, 0.5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, false, false, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, LessInt) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT32,
BuiltinOperator_LESS);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int>(model.input2(), {1, 2, 6, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, false, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, LessInt16) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT16,
BuiltinOperator_LESS);
model.PopulateTensor<int16_t>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int16_t>(model.input2(), {1, 2, 6, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, false, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, LessBroadcast) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 1}, TensorType_INT32,
BuiltinOperator_LESS);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int>(model.input2(), {7});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, false, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, LessBroadcastTwoD) {
ComparisonOpModel model({1, 1, 2, 4}, {1, 1, 1, 4}, TensorType_INT32,
BuiltinOperator_LESS);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3, 2, 4, 6, 8});
model.PopulateTensor<int>(model.input2(), {7, 1, 2, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(true, false, false, true, true, false, false, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 2, 4));
}
TEST(ComparisonsTest, LessEqualFloat) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_FLOAT32,
BuiltinOperator_LESS_EQUAL);
model.PopulateTensor<float>(model.input1(), {0.1, 0.9, 0.7, 0.3});
model.PopulateTensor<float>(model.input2(), {0.1, 0.2, 0.6, 0.5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, false, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, LessEqualInt) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT32,
BuiltinOperator_LESS_EQUAL);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int>(model.input2(), {1, 2, 7, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, true, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, LessEqualBroadcast) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 1}, TensorType_INT32,
BuiltinOperator_LESS_EQUAL);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int>(model.input2(), {7});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, true, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, LessEqualBroadcastTwoD) {
ComparisonOpModel model({1, 1, 2, 4}, {1, 1, 1, 4}, TensorType_INT32,
BuiltinOperator_LESS_EQUAL);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3, 2, 4, 2, 8});
model.PopulateTensor<int>(model.input2(), {7, 1, 2, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(true, false, false, true, true, false, true, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 2, 4));
}
TEST(QuantizedComparisonsTest, EqualUInt8Quantized) {
const float kMin = -1.f;
const float kMax = 128.f;
ComparisonOpModel model({TensorType_UINT8, {1, 2, 2, 1}, kMin, kMax},
{TensorType_UINT8, {1, 2, 2, 1}, kMin, kMax},
TensorType_UINT8, BuiltinOperator_EQUAL);
model.QuantizeAndPopulate<uint8_t>(model.input1(), {1, 9, 7, 3});
model.QuantizeAndPopulate<uint8_t>(model.input2(), {1, 2, 7, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, true, false));
}
TEST(QuantizedComparisonsTest, EqualInt8Quantized) {
const float kMin = -127.f;
const float kMax = 127.f;
ComparisonOpModel model({TensorType_INT8, {1, 2, 2, 1}, kMin, kMax},
{TensorType_INT8, {1, 2, 2, 1}, kMin, kMax},
TensorType_INT8, BuiltinOperator_EQUAL);
model.QuantizeAndPopulate<int8_t>(model.input1(), {1, -9, 7, 3});
model.QuantizeAndPopulate<int8_t>(model.input2(), {-1, 2, 7, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, false, true, false));
}
TEST(QuantizedComparisonsTest, NotEqualUInt8Quantized) {
const float kMin = -1.f;
const float kMax = 128.f;
ComparisonOpModel model({TensorType_UINT8, {1, 2, 2, 1}, kMin, kMax},
{TensorType_UINT8, {1, 2, 2, 1}, kMin, kMax},
TensorType_UINT8, BuiltinOperator_NOT_EQUAL);
model.QuantizeAndPopulate<uint8_t>(model.input1(), {1, 9, 7, 3});
model.QuantizeAndPopulate<uint8_t>(model.input2(), {1, 2, 7, 0});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, false, true));
}
TEST(QuantizedComparisonsTest, NotEqualInt8Quantized) {
const float kMin = -127.f;
const float kMax = 127.f;
ComparisonOpModel model({TensorType_INT8, {1, 2, 2, 1}, kMin, kMax},
{TensorType_INT8, {1, 2, 2, 1}, kMin, kMax},
TensorType_INT8, BuiltinOperator_NOT_EQUAL);
model.QuantizeAndPopulate<int8_t>(model.input1(), {1, -9, 7, 3});
model.QuantizeAndPopulate<int8_t>(model.input2(), {1, 2, 7, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, false, true));
}
TEST(ComparisonsTest, GreaterQuantized) {
const float kMin = -1.f;
const float kMax = 128.f;
ComparisonOpModel model({TensorType_UINT8, {1, 2, 2, 1}, kMin, kMax},
{TensorType_UINT8, {1, 2, 2, 1}, kMin, kMax},
TensorType_UINT8, BuiltinOperator_GREATER);
model.QuantizeAndPopulate<uint8_t>(model.input1(), {1, 9, 7, 3});
model.QuantizeAndPopulate<uint8_t>(model.input2(), {1, 2, 6, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, true, false));
}
TEST(ComparisonsTest, GreaterQuantizedSmallRange) {
ComparisonOpModel model({TensorType_UINT8, {1, 2, 2, 1}, 0.0, 1.0},
{TensorType_UINT8, {1, 2, 2, 1}, 0.0, 2.0},
TensorType_UINT8, BuiltinOperator_GREATER);
model.QuantizeAndPopulate<uint8_t>(model.input1(), {1.0, 0.5, 0.35, 0.1});
model.QuantizeAndPopulate<uint8_t>(model.input2(), {1.01, 0.25, 0.3, 0.4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, true, false));
}
TEST(ComparisonsTest, GreaterEqualQuantized) {
const float kMin = -1.f;
const float kMax = 128.f;
ComparisonOpModel model({TensorType_UINT8, {1, 2, 2, 1}, kMin, kMax},
{TensorType_UINT8, {1, 2, 2, 1}, kMin, kMax},
TensorType_UINT8, BuiltinOperator_GREATER_EQUAL);
model.QuantizeAndPopulate<uint8_t>(model.input1(), {1, 9, 7, 3});
model.QuantizeAndPopulate<uint8_t>(model.input2(), {1, 2, 6, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, true, true, false));
}
TEST(ComparisonsTest, LessQuantized) {
const float kMin = -1.f;
const float kMax = 128.f;
ComparisonOpModel model({TensorType_UINT8, {1, 2, 2, 1}, kMin, kMax},
{TensorType_UINT8, {1, 2, 2, 1}, kMin, kMax},
TensorType_UINT8, BuiltinOperator_LESS);
model.QuantizeAndPopulate<uint8_t>(model.input1(), {1, 9, 7, 3});
model.QuantizeAndPopulate<uint8_t>(model.input2(), {1, 2, 6, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, false, false, true));
}
TEST(ComparisonsTest, LessEqualQuantized) {
const float kMin = -1.f;
const float kMax = 128.f;
ComparisonOpModel model({TensorType_UINT8, {1, 2, 2, 1}, kMin, kMax},
{TensorType_UINT8, {1, 2, 2, 1}, kMin, kMax},
TensorType_UINT8, BuiltinOperator_LESS_EQUAL);
model.QuantizeAndPopulate<uint8_t>(model.input1(), {1, 9, 7, 3});
model.QuantizeAndPopulate<uint8_t>(model.input2(), {1, 2, 6, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, false, true));
}
TEST(ComparisonsTest, QuantizedEqualWithBroadcast) {
const float kMin = -1.f;
const float kMax = 128.f;
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
ComparisonOpModel model({TensorType_UINT8, test_shapes[i], kMin, kMax},
{TensorType_UINT8, {}, kMin, kMax},
TensorType_UINT8, BuiltinOperator_EQUAL);
model.QuantizeAndPopulate<uint8_t>(model.input1(), {20, 2, 7, 8, 11, 20});
model.QuantizeAndPopulate<uint8_t>(model.input2(), {2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(false, true, false, false, false, false))
<< "With shape number " << i;
}
}
TEST(ComparisonsTest, QuantizedUInt8NotEqualWithBroadcast) {
const float kMin = -1.f;
const float kMax = 128.f;
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
ComparisonOpModel model({TensorType_UINT8, test_shapes[i], kMin, kMax},
{TensorType_UINT8, {}, kMin, kMax},
TensorType_UINT8, BuiltinOperator_NOT_EQUAL);
model.QuantizeAndPopulate<uint8_t>(model.input1(), {20, 2, 7, 8, 11, 20});
model.QuantizeAndPopulate<uint8_t>(model.input2(), {2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(true, false, true, true, true, true))
<< "With shape number " << i;
}
}
TEST(ComparisonsTest, QuantizedInt8NotEqualWithBroadcast) {
const float kMin = -127.f;
const float kMax = 127.f;
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
ComparisonOpModel model({TensorType_INT8, test_shapes[i], kMin, kMax},
{TensorType_INT8, {}, kMin, kMax}, TensorType_INT8,
BuiltinOperator_NOT_EQUAL);
model.QuantizeAndPopulate<int8_t>(model.input1(), {-20, 2, 7, -8, 11, 20});
model.QuantizeAndPopulate<int8_t>(model.input2(), {2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(true, false, true, true, true, true))
<< "With shape number " << i;
}
}
TEST(ComparisonsTest, QuantizedUInt8GreaterWithBroadcast) {
const float kMin = -1.f;
const float kMax = 128.f;
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
ComparisonOpModel model({TensorType_UINT8, test_shapes[i], kMin, kMax},
{TensorType_UINT8, {}, kMin, kMax},
TensorType_UINT8, BuiltinOperator_GREATER);
model.QuantizeAndPopulate<uint8_t>(model.input1(), {20, 2, 7, 8, 11, 20});
model.QuantizeAndPopulate<uint8_t>(model.input2(), {8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(true, false, false, false, true, true))
<< "With shape number " << i;
}
}
TEST(ComparisonsTest, QuantizedInt8GreaterWithBroadcast) {
const float kMin = -127.f;
const float kMax = 127.f;
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
ComparisonOpModel model({TensorType_INT8, test_shapes[i], kMin, kMax},
{TensorType_INT8, {}, kMin, kMax}, TensorType_INT8,
BuiltinOperator_GREATER);
model.QuantizeAndPopulate<int8_t>(model.input1(), {20, -2, -71, 8, 11, 20});
model.QuantizeAndPopulate<int8_t>(model.input2(), {8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(true, false, false, false, true, true))
<< "With shape number " << i;
}
}
TEST(ComparisonsTest,
QuantizedInt8GreaterWithBroadcastMultiplierGreaterThanOne) {
const float kMin = -127.f;
const float kMax = 127.f;
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
ComparisonOpModel model({TensorType_INT8, test_shapes[i], kMin, kMax},
{TensorType_INT8, {}, kMin, kMax}, TensorType_INT8,
BuiltinOperator_GREATER);
model.QuantizeAndPopulate<int8_t>(model.input1(),
{572, -2, -71, 8, 11, 20});
model.QuantizeAndPopulate<int8_t>(model.input2(), {8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(true, false, false, false, true, true))
<< "With shape number " << i;
}
}
TEST(ComparisonsTest, QuantizedUInt8GreaterEqualWithBroadcast) {
const float kMin = -1.f;
const float kMax = 128.f;
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
ComparisonOpModel model({TensorType_UINT8, test_shapes[i], kMin, kMax},
{TensorType_UINT8, {}, kMin, kMax},
TensorType_UINT8, BuiltinOperator_GREATER_EQUAL);
model.QuantizeAndPopulate<uint8_t>(model.input1(), {20, 2, 7, 8, 11, 20});
model.QuantizeAndPopulate<uint8_t>(model.input2(), {8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(true, false, false, true, true, true))
<< "With shape number " << i;
}
}
TEST(ComparisonsTest, QuantizedInt8GreaterEqualWithBroadcast) {
const float kMin = -127.f;
const float kMax = 127.f;
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
ComparisonOpModel model({TensorType_INT8, test_shapes[i], kMin, kMax},
{TensorType_INT8, {}, kMin, kMax}, TensorType_INT8,
BuiltinOperator_GREATER_EQUAL);
model.QuantizeAndPopulate<int8_t>(model.input1(), {20, -2, -71, 8, 11, 20});
model.QuantizeAndPopulate<int8_t>(model.input2(), {8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(true, false, false, true, true, true))
<< "With shape number " << i;
}
}
TEST(ComparisonsTest, QuantizedUInt8LessWithBroadcast) {
const float kMin = -1.f;
const float kMax = 128.f;
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
ComparisonOpModel model({TensorType_UINT8, test_shapes[i], kMin, kMax},
{TensorType_UINT8, {}, kMin, kMax},
TensorType_UINT8, BuiltinOperator_LESS);
model.QuantizeAndPopulate<uint8_t>(model.input1(), {20, 2, 7, 8, 11, 20});
model.QuantizeAndPopulate<uint8_t>(model.input2(), {8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(false, true, true, false, false, false))
<< "With shape number " << i;
}
}
TEST(ComparisonsTest, QuantizedInt8LessWithBroadcast) {
const float kMin = -127.f;
const float kMax = 127.f;
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
ComparisonOpModel model({TensorType_INT8, test_shapes[i], kMin, kMax},
{TensorType_INT8, {}, kMin, kMax}, TensorType_INT8,
BuiltinOperator_LESS);
model.QuantizeAndPopulate<int8_t>(model.input1(), {20, -2, -71, 8, 11, 20});
model.QuantizeAndPopulate<int8_t>(model.input2(), {8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(false, true, true, false, false, false))
<< "With shape number " << i;
}
}
TEST(ComparisonsTest, QuantizedUInt8LessEqualWithBroadcast) {
const float kMin = -1.f;
const float kMax = 128.f;
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
ComparisonOpModel model({TensorType_UINT8, test_shapes[i], kMin, kMax},
{TensorType_UINT8, {}, kMin, kMax},
TensorType_UINT8, BuiltinOperator_LESS_EQUAL);
model.QuantizeAndPopulate<uint8_t>(model.input1(), {20, 2, 7, 8, 11, 20});
model.QuantizeAndPopulate<uint8_t>(model.input2(), {8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(false, true, true, true, false, false))
<< "With shape number " << i;
}
}
TEST(ComparisonsTest, QuantizedInt8LessEqualWithBroadcast) {
const float kMin = -127.f;
const float kMax = 127.f;
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
ComparisonOpModel model({TensorType_INT8, test_shapes[i], kMin, kMax},
{TensorType_INT8, {}, kMin, kMax}, TensorType_INT8,
BuiltinOperator_LESS_EQUAL);
model.QuantizeAndPopulate<int8_t>(model.input1(), {20, -2, -71, 8, 11, 20});
model.QuantizeAndPopulate<int8_t>(model.input2(), {8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(false, true, true, true, false, false))
<< "With shape number " << i;
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/reference/comparisons.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/comparisons_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
87bf6f05-6542-43b7-9e3e-b2ea90d363ad | cpp | tensorflow/tensorflow | reduce | tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/reduce.cc | tensorflow/lite/delegates/hexagon/builders/tests/reduce_test.cc | #include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/reduce.h"
#include <cstdint>
#include <optional>
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/IR/Block.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Matchers.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/hlo_matchers.h"
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/util.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
namespace mlir {
namespace odml {
LogicalResult MatchReduceToArgMinMaxType2(mhlo::ReduceOp reduce_op,
bool is_argmax) {
Block& body = reduce_op.getBody().front();
if (body.getNumArguments() != 4) return failure();
mhlo::ReturnOp return_op = dyn_cast<mhlo::ReturnOp>(body.back());
if (!return_op || return_op.getNumOperands() != 2) return failure();
mhlo::SelectOp value_select = llvm::dyn_cast_or_null<mhlo::SelectOp>(
return_op.getOperand(0).getDefiningOp());
if (!value_select || value_select.getOnTrue() != body.getArgument(0) ||
value_select.getOnFalse() != body.getArgument(2))
return failure();
auto compare_direction_included =
is_argmax ? mhlo::ComparisonDirection::GE : mhlo::ComparisonDirection::LE;
mhlo::CompareOp value_gt = llvm::dyn_cast_or_null<mhlo::CompareOp>(
value_select.getOperand(0).getDefiningOp());
if (!value_gt ||
value_gt.getComparisonDirection() != compare_direction_included ||
value_gt.getLhs() != body.getArgument(0) ||
value_gt.getRhs() != body.getArgument(2))
return failure();
mhlo::SelectOp index_select = llvm::dyn_cast_or_null<mhlo::SelectOp>(
return_op.getOperand(1).getDefiningOp());
if (!index_select) return failure();
mhlo::MinOp index_select_min = llvm::dyn_cast_or_null<mhlo::MinOp>(
index_select.getOnTrue().getDefiningOp());
if (!index_select_min || index_select_min.getLhs() != body.getArgument(1) ||
index_select_min.getRhs() != body.getArgument(3))
return failure();
mhlo::SelectOp index_select_select = llvm::dyn_cast_or_null<mhlo::SelectOp>(
index_select.getOnFalse().getDefiningOp());
if (!index_select_select ||
index_select_select.getOnTrue() != body.getArgument(1) ||
index_select_select.getOnFalse() != body.getArgument(3) ||
index_select_select.getOperand(0).getDefiningOp() != value_gt)
return failure();
mhlo::CompareOp value_eq = llvm::dyn_cast_or_null<mhlo::CompareOp>(
index_select.getOperand(0).getDefiningOp());
if (!value_eq ||
value_eq.getComparisonDirection() != mhlo::ComparisonDirection::EQ ||
value_eq.getLhs() != body.getArgument(0) ||
value_eq.getRhs() != body.getArgument(2))
return failure();
return success();
}
LogicalResult MatchReduceToArgMinMaxType1(mhlo::ReduceOp reduce_op,
bool is_float, bool is_argmax) {
Block& body = reduce_op.getBody().front();
if (body.getNumArguments() != 4) return failure();
mhlo::ReturnOp return_op = dyn_cast<mhlo::ReturnOp>(body.back());
if (!return_op || return_op.getNumOperands() != 2) return failure();
mhlo::SelectOp value_select = llvm::dyn_cast_or_null<mhlo::SelectOp>(
return_op.getOperand(0).getDefiningOp());
if (!value_select || value_select.getOnTrue() != body.getArgument(0) ||
value_select.getOnFalse() != body.getArgument(2))
return failure();
auto compare_direction =
is_argmax ? mhlo::ComparisonDirection::GT : mhlo::ComparisonDirection::LT;
if (is_float) {
mhlo::OrOp value_or = llvm::dyn_cast_or_null<mhlo::OrOp>(
value_select.getOperand(0).getDefiningOp());
if (!value_or) return failure();
mhlo::CompareOp value_gt = llvm::dyn_cast_or_null<mhlo::CompareOp>(
value_or.getLhs().getDefiningOp());
if (!value_gt || value_gt.getComparisonDirection() != compare_direction ||
value_gt.getLhs() != body.getArgument(0) ||
value_gt.getRhs() != body.getArgument(2))
return failure();
mhlo::CompareOp value_ne = llvm::dyn_cast_or_null<mhlo::CompareOp>(
value_or.getRhs().getDefiningOp());
if (!value_ne ||
value_ne.getComparisonDirection() != mhlo::ComparisonDirection::NE ||
value_ne.getLhs() != body.getArgument(0) ||
value_ne.getRhs() != body.getArgument(0))
return failure();
} else {
mhlo::CompareOp value_gt = llvm::dyn_cast_or_null<mhlo::CompareOp>(
value_select.getOperand(0).getDefiningOp());
if (!value_gt || value_gt.getComparisonDirection() != compare_direction ||
value_gt.getLhs() != body.getArgument(0) ||
value_gt.getRhs() != body.getArgument(2))
return failure();
}
mhlo::SelectOp index_select = llvm::dyn_cast_or_null<mhlo::SelectOp>(
return_op.getOperand(1).getDefiningOp());
if (!index_select || index_select.getOnTrue() != body.getArgument(1) ||
index_select.getOnFalse() != body.getArgument(3))
return failure();
mhlo::OrOp index_or = llvm::dyn_cast_or_null<mhlo::OrOp>(
index_select.getPred().getDefiningOp());
if (!index_or || index_or.getLhs() != value_select.getPred())
return failure();
mhlo::AndOp index_and =
llvm::dyn_cast_or_null<mhlo::AndOp>(index_or.getRhs().getDefiningOp());
if (!index_and) return failure();
mhlo::CompareOp value_eq = llvm::dyn_cast_or_null<mhlo::CompareOp>(
index_and.getLhs().getDefiningOp());
if (!value_eq ||
value_eq.getComparisonDirection() != mhlo::ComparisonDirection::EQ ||
value_eq.getLhs() != body.getArgument(0) ||
value_eq.getRhs() != body.getArgument(2))
return failure();
mhlo::CompareOp index_lt = llvm::dyn_cast_or_null<mhlo::CompareOp>(
index_and.getRhs().getDefiningOp());
if (!index_lt ||
index_lt.getComparisonDirection() != mhlo::ComparisonDirection::LT ||
index_lt.getLhs() != body.getArgument(1) ||
index_lt.getRhs() != body.getArgument(3))
return failure();
return success();
}
template <typename Reduce, typename ArgReduce, typename BooleanReduce,
bool is_argmax>
class ConvertReduceOpToArgMinMax : public OpConversionPattern<mhlo::ReduceOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::ReduceOp reduce_op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
virtual bool IsValueInitValue(const DenseElementsAttr& attr) const = 0;
};
template <typename Reduce, typename ArgReduce, typename BooleanReduce,
bool is_argmax>
LogicalResult ConvertReduceOpToArgMinMax<
Reduce, ArgReduce, BooleanReduce,
is_argmax>::matchAndRewrite(mhlo::ReduceOp reduce_op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
if (reduce_op.getInputs().size() != 2) return failure();
if (reduce_op.getDimensions().getNumElements() != 1) return failure();
DenseElementsAttr operand_init;
if (!matchPattern(reduce_op.getInitValues().front(),
m_Constant(&operand_init)))
return failure();
if (!IsValueInitValue(operand_init)) return failure();
DenseElementsAttr iota_init;
if (!matchPattern(reduce_op.getInitValues().back(), m_Constant(&iota_init)))
return failure();
if (iota_init.getValues<APInt>()[0] != 0) return failure();
Value iota = reduce_op.getInputs().back();
if (!MatchIota(reduce_op.getDimensions(), iota)) return failure();
const bool is_float = mlir::isa<FloatType>(operand_init.getElementType());
if (failed(MatchReduceToArgMinMaxType1(reduce_op, is_float, is_argmax)) &&
failed(MatchReduceToArgMinMaxType2(reduce_op, is_argmax)))
return rewriter.notifyMatchFailure(
reduce_op, "Unsupported Reduce -> ArgMax/ArgMin pattern");
Value operand = reduce_op.getInputs().front();
int64_t axis = reduce_op.getDimensions().getValues<int64_t>()[0];
auto dim_type = RankedTensorType::get({1}, rewriter.getI32Type());
auto reduction_indices = rewriter.create<arith::ConstantOp>(
reduce_op.getLoc(), dim_type,
rewriter.getI32TensorAttr({static_cast<int32_t>(axis)}));
if (!mlir::isa<ShapedType>(operand.getType())) return failure();
auto operand_type = mlir::cast<ShapedType>(operand.getType());
if (operand_type.getElementType().isInteger(1)) {
auto tf_reduce_op = rewriter.create<BooleanReduce>(
reduce_op.getLoc(), reduce_op->getResult(0).getType(), operand,
reduction_indices,
rewriter.getBoolAttr(false));
auto tf_argreduce_op = rewriter.create<ArgReduce>(
reduce_op.getLoc(), reduce_op->getResult(1).getType(), operand,
reduction_indices);
rewriter.replaceOp(reduce_op, {tf_reduce_op, tf_argreduce_op});
} else {
auto tf_reduce_op = rewriter.create<Reduce>(
reduce_op.getLoc(), reduce_op->getResult(0).getType(), operand,
reduction_indices,
rewriter.getBoolAttr(false));
auto tf_argreduce_op = rewriter.create<ArgReduce>(
reduce_op.getLoc(), reduce_op->getResult(1).getType(), operand,
reduction_indices);
rewriter.replaceOp(reduce_op, {tf_reduce_op, tf_argreduce_op});
}
return success();
}
template <typename Reduce, typename ArgReduce, typename BooleanReduce>
class ConvertReduceOpToArgMax
: public ConvertReduceOpToArgMinMax<Reduce, ArgReduce, BooleanReduce,
true> {
public:
using ConvertReduceOpToArgMinMax<Reduce, ArgReduce, BooleanReduce,
true>::ConvertReduceOpToArgMinMax;
bool IsValueInitValue(const DenseElementsAttr& attr) const override;
};
template <typename Reduce, typename ArgReduce, typename BooleanReduce>
bool ConvertReduceOpToArgMax<Reduce, ArgReduce, BooleanReduce>::
IsValueInitValue(const DenseElementsAttr& attr) const {
auto element_type = attr.getType().getElementType();
if (attr.getNumElements() != 1 || !element_type.isIntOrFloat()) return false;
if (mlir::isa<FloatType>(element_type)) {
auto value = *attr.value_begin<APFloat>();
return value.isNegative() && value.isInfinity();
} else if (element_type.isInteger(1)) {
auto value = *attr.value_begin<APInt>();
return value.isZero();
} else {
auto value = *attr.value_begin<APInt>();
return element_type.isUnsignedInteger() ? value.isMinValue()
: value.isMinSignedValue();
}
}
template <typename Reduce, typename ArgReduce, typename BooleanReduce>
class ConvertReduceOpToArgMin
: public ConvertReduceOpToArgMinMax<Reduce, ArgReduce, BooleanReduce,
false> {
public:
using ConvertReduceOpToArgMinMax<Reduce, ArgReduce, BooleanReduce,
false>::ConvertReduceOpToArgMinMax;
bool IsValueInitValue(const DenseElementsAttr& attr) const override;
};
template <typename Reduce, typename ArgReduce, typename BooleanReduce>
bool ConvertReduceOpToArgMin<Reduce, ArgReduce, BooleanReduce>::
IsValueInitValue(const DenseElementsAttr& attr) const {
auto element_type = attr.getType().getElementType();
if (attr.getNumElements() != 1 || !element_type.isIntOrFloat()) return false;
if (mlir::isa<FloatType>(element_type)) {
auto value = *attr.value_begin<APFloat>();
return !value.isNegative() && value.isInfinity();
} else if (element_type.isInteger(1)) {
auto value = *attr.value_begin<APInt>();
return value.isZero();
} else {
auto value = *attr.value_begin<APInt>();
return element_type.isUnsignedInteger() ? value.isMaxValue()
: value.isMaxSignedValue();
}
}
template <typename SplatValueType>
LogicalResult GetConstantSplatValue(Value value, SplatValueType& splat_value) {
DenseElementsAttr attr;
if (!matchPattern(value, m_Constant(&attr)) || !attr.isSplat()) {
return failure();
}
splat_value = attr.getSplatValue<SplatValueType>();
return success();
}
template <typename ReduceOp, typename BinaryOp, bool BuilderHasFAF = false>
LogicalResult rewriteNonMatchInitValue(mhlo::ReduceOp reduce_op, Value input,
arith::ConstantOp reduction_indices,
ConversionPatternRewriter& rewriter) {
Value reduce_result = rewriter.create<ReduceOp>(
reduce_op.getLoc(), reduce_op.getType(0), input, reduction_indices,
rewriter.getBoolAttr(false));
if constexpr (BuilderHasFAF) {
rewriter.replaceOpWithNewOp<BinaryOp>(reduce_op, reduce_result,
reduce_op.getInitValues()[0],
rewriter.getStringAttr("NONE"));
} else {
rewriter.replaceOpWithNewOp<BinaryOp>(reduce_op, reduce_result.getType(),
reduce_result,
reduce_op.getInitValues()[0]);
}
return success();
}
DenseIntElementsAttr GetDimsAsI32Elements(OpBuilder& b, mhlo::ReduceOp op) {
auto dims_attr = op.getDimensions();
const auto n_dims = dims_attr.getNumElements();
SmallVector<int32_t> reduce_dims;
reduce_dims.reserve(n_dims);
for (auto dim : dims_attr.getValues<int64_t>()) {
reduce_dims.push_back(dim);
}
auto dim_type = RankedTensorType::get({n_dims}, b.getI32Type());
return DenseIntElementsAttr::get(dim_type, reduce_dims);
}
template <>
LogicalResult rewriteNonMatchInitValue<TFL::ReduceMaxOp, void>(
mhlo::ReduceOp reduce_op, Value input, arith::ConstantOp reduction_indices,
ConversionPatternRewriter& rewriter) {
return failure();
}
template <>
LogicalResult rewriteNonMatchInitValue<TFL::ReduceMinOp, void>(
mhlo::ReduceOp reduce_op, Value input, arith::ConstantOp reduction_indices,
ConversionPatternRewriter& rewriter) {
return failure();
}
template <>
LogicalResult rewriteNonMatchInitValue<TFL::ReduceAnyOp, void>(
mhlo::ReduceOp reduce_op, Value input, arith::ConstantOp reduction_indices,
ConversionPatternRewriter& rewriter) {
return failure();
}
template <typename SrcBinaryOp, typename TargetReduceOp,
typename TargetBinaryOp = void, bool BuilderHasFAF = false>
class ConvertReduce : public OpConversionPattern<mhlo::ReduceOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::ReduceOp reduce_op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final {
if (failed(MatchReduceOpOperand(reduce_op))) {
return failure();
}
if (failed(MatchBinaryReduceFunction<SrcBinaryOp>(reduce_op.getBody()))) {
return failure();
}
auto operand = reduce_op.getInputs()[0];
auto tfl_dims = GetDimsAsI32Elements(rewriter, reduce_op);
auto tfl_dims_op =
rewriter.create<arith::ConstantOp>(reduce_op.getLoc(), tfl_dims);
if (succeeded(MatchInitValue(reduce_op.getInitValues()[0]))) {
rewriter.replaceOpWithNewOp<TargetReduceOp>(
reduce_op, reduce_op.getType(0), operand, tfl_dims_op,
rewriter.getBoolAttr(false));
return success();
}
return rewriteNonMatchInitValue<TargetReduceOp, TargetBinaryOp,
BuilderHasFAF>(reduce_op, operand,
tfl_dims_op, rewriter);
}
private:
virtual LogicalResult MatchInitValue(Value init_value) const = 0;
LogicalResult MatchReduceOpOperand(mhlo::ReduceOp reduce_op) const {
if (reduce_op.getInputs().size() != 1 ||
reduce_op.getInitValues().size() != 1 ||
reduce_op.getResults().size() != 1)
return failure();
if (!mlir::isa<RankedTensorType>(reduce_op.getInputs()[0].getType()))
return failure();
if (!mlir::isa<RankedTensorType>(reduce_op.getType(0))) return failure();
return success();
}
};
class ConvertReduceMul
: public ConvertReduce<mhlo::MulOp, TFL::ReduceProdOp, TFL::MulOp, true> {
public:
using ConvertReduce::ConvertReduce;
LogicalResult MatchInitValue(Value init_value) const override {
auto type = mlir::cast<ShapedType>(init_value.getType()).getElementType();
if (mlir::isa<FloatType>(type)) {
float const_value;
if (failed(GetConstantSplatValue<float>(init_value, const_value)) ||
const_value != 1.0)
return failure();
} else if (mlir::isa<IntegerType>(type) && type.isSignlessInteger()) {
int32_t const_value;
if (failed(GetConstantSplatValue<int32_t>(init_value, const_value)) ||
const_value != 1)
return failure();
} else {
return failure();
}
return success();
}
};
class ConvertReduceAdd
: public ConvertReduce<mhlo::AddOp, TFL::SumOp, TFL::AddOp, true> {
public:
using ConvertReduce::ConvertReduce;
LogicalResult MatchInitValue(Value init_value) const override {
auto type = mlir::cast<ShapedType>(init_value.getType()).getElementType();
if (mlir::isa<FloatType>(type)) {
APFloat const_value(.0);
if (failed(GetConstantSplatValue(init_value, const_value)) ||
!const_value.isZero())
return failure();
} else if (mlir::isa<IntegerType>(type) && type.isSignlessInteger()) {
APInt const_value;
if (failed(GetConstantSplatValue(init_value, const_value)) ||
!const_value.isZero())
return failure();
} else {
return failure();
}
return success();
}
};
class ConvertReduceMaxToReduceAny
: public ConvertReduce<mhlo::MaxOp, TFL::ReduceAnyOp> {
public:
using ConvertReduce::ConvertReduce;
LogicalResult MatchInitValue(Value init_value) const override {
auto type = mlir::cast<ShapedType>(init_value.getType()).getElementType();
if (!mlir::isa<IntegerType>(type) || !type.isSignlessInteger() ||
!(type.getIntOrFloatBitWidth() == 1))
return failure();
APInt const_value;
if (failed(GetConstantSplatValue(init_value, const_value)) ||
(const_value == 1))
return failure();
return success();
}
};
class ConvertReduceMax : public ConvertReduce<mhlo::MaxOp, TFL::ReduceMaxOp> {
public:
using ConvertReduce::ConvertReduce;
LogicalResult MatchInitValue(Value init_value) const override {
auto type = mlir::cast<ShapedType>(init_value.getType()).getElementType();
if (mlir::isa<FloatType>(type)) {
APFloat const_value(.0);
if (failed(GetConstantSplatValue(init_value, const_value)) ||
!const_value.isInfinity() || !const_value.isNegative())
return failure();
} else if (mlir::isa<IntegerType>(type) && type.isSignlessInteger()) {
if (type.getIntOrFloatBitWidth() == 1) return failure();
APInt const_value;
if (failed(GetConstantSplatValue(init_value, const_value)) ||
!const_value.isMinSignedValue())
return failure();
} else {
return failure();
}
return success();
}
};
class ConvertReduceMin : public ConvertReduce<mhlo::MinOp, TFL::ReduceMinOp> {
public:
using ConvertReduce::ConvertReduce;
LogicalResult MatchInitValue(Value init_value) const override {
auto type = mlir::cast<ShapedType>(init_value.getType()).getElementType();
if (mlir::isa<FloatType>(type)) {
APFloat const_value(.0);
if (failed(GetConstantSplatValue(init_value, const_value)) ||
!const_value.isInfinity() || const_value.isNegative())
return failure();
} else if (mlir::isa<IntegerType>(type) && type.isSignlessInteger()) {
APInt const_value;
if (failed(GetConstantSplatValue(init_value, const_value)) ||
!const_value.isMaxSignedValue())
return failure();
} else {
return failure();
}
return success();
}
};
class ConvertReduceAnd
: public ConvertReduce<mhlo::AndOp, TFL::ReduceAllOp, TFL::LogicalAndOp> {
public:
using ConvertReduce<mhlo::AndOp, TFL::ReduceAllOp,
TFL::LogicalAndOp>::ConvertReduce;
LogicalResult MatchInitValue(Value init_value) const override {
DenseIntElementsAttr init_attr;
if (!matchPattern(init_value, m_Constant(&init_attr)) ||
!init_attr.getType().getElementType().isInteger(1) ||
!init_attr.isSplat() || !init_attr.getSplatValue<BoolAttr>().getValue())
return failure();
return success();
}
};
class ConvertReduceOr
: public ConvertReduce<mhlo::OrOp, TFL::ReduceAnyOp, TFL::LogicalOrOp> {
public:
using ConvertReduce<mhlo::OrOp, TFL::ReduceAnyOp,
TFL::LogicalOrOp>::ConvertReduce;
LogicalResult MatchInitValue(Value init_value) const override {
DenseIntElementsAttr init_attr;
if (!matchPattern(init_value, m_Constant(&init_attr)) ||
!init_attr.getType().getElementType().isInteger(1) ||
!init_attr.isSplat() || init_attr.getSplatValue<BoolAttr>().getValue())
return failure();
return success();
}
};
std::optional<bool> IsReduceOpLegal(mhlo::ReduceOp reduce_op) {
if (succeeded(MatchReduceToArgMinMaxType1(reduce_op, true, true)) ||
succeeded(MatchReduceToArgMinMaxType1(reduce_op, false, true)) ||
succeeded(MatchReduceToArgMinMaxType1(reduce_op, true, false)) ||
succeeded(MatchReduceToArgMinMaxType1(reduce_op, false, false)) ||
succeeded(MatchReduceToArgMinMaxType2(reduce_op, false)) ||
succeeded(MatchReduceToArgMinMaxType2(reduce_op, true))) {
return false;
}
return std::nullopt;
}
template class ConvertReduceOpToArgMinMax<TFL::ReduceMaxOp, TFL::ArgMaxOp,
TFL::ReduceAnyOp, true>;
template class ConvertReduceOpToArgMax<TFL::ReduceMaxOp, TFL::ArgMaxOp,
TFL::ReduceAnyOp>;
template class ConvertReduceOpToArgMinMax<TFL::ReduceMinOp, TFL::ArgMinOp,
TFL::ReduceAllOp, false>;
template class ConvertReduceOpToArgMin<TFL::ReduceMinOp, TFL::ArgMinOp,
TFL::ReduceAllOp>;
template class ConvertReduceOpToArgMinMax<TF::MaxOp, TF::ArgMaxOp, TF::AnyOp,
true>;
template class ConvertReduceOpToArgMax<TF::MaxOp, TF::ArgMaxOp, TF::AnyOp>;
template class ConvertReduceOpToArgMinMax<TF::MinOp, TF::ArgMinOp, TF::AllOp,
false>;
template class ConvertReduceOpToArgMin<TF::MinOp, TF::ArgMinOp, TF::AllOp>;
void PopulateReduceArgMinMaxTFPatterns(MLIRContext* ctx,
RewritePatternSet& patterns) {
using ConvertReduceOpToTfArgmax =
ConvertReduceOpToArgMax<TF::MaxOp, TF::ArgMaxOp, TF::AnyOp>;
using ConvertReduceOpToTfArgmin =
ConvertReduceOpToArgMin<TF::MinOp, TF::ArgMinOp, TF::AllOp>;
patterns.add<ConvertReduceOpToTfArgmin, ConvertReduceOpToTfArgmax>(ctx);
}
void PopulateReducePatterns(MLIRContext* ctx, RewritePatternSet& patterns,
ConversionTarget& target) {
using ConvertReduceOpToTFLiteArgmax =
ConvertReduceOpToArgMax<TFL::ReduceMaxOp, TFL::ArgMaxOp,
TFL::ReduceAnyOp>;
using ConvertReduceOpToTFLiteArgmin =
ConvertReduceOpToArgMin<TFL::ReduceMinOp, TFL::ArgMinOp,
TFL::ReduceAllOp>;
patterns.add<ConvertReduceOpToTFLiteArgmax, ConvertReduceOpToTFLiteArgmin,
ConvertReduceMul, ConvertReduceAdd, ConvertReduceMax,
ConvertReduceMaxToReduceAny, ConvertReduceMin, ConvertReduceAnd,
ConvertReduceOr>(ctx);
target.addDynamicallyLegalOp<mhlo::ReduceOp>(IsReduceOpLegal);
}
}
} | #include <initializer_list>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
using testing::ElementsAreArray;
class ReduceOpModel : public SingleOpModelWithHexagon {
public:
ReduceOpModel(BuiltinOperator type, const TensorData& input,
const TensorData& output, std::initializer_list<int> axis_shape,
std::initializer_list<int> axis, bool keep_dims) {
input_ = AddInput(input);
axis_ = AddConstInput(TensorType_INT32, axis, axis_shape);
output_ = AddOutput(output);
SetBuiltinOp(type, BuiltinOptions_ReducerOptions,
CreateReducerOptions(builder_, keep_dims).Union());
BuildInterpreter({GetShape(input_)});
}
int Input() { return input_; }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
template <typename T>
std::vector<float> GetDequantizedOutput() {
return Dequantize<T>(ExtractVector<T>(output_), GetScale(output_),
GetZeroPoint(output_));
}
private:
int input_;
int axis_;
int output_;
};
template <TensorType Tensor_Type, typename input_type>
void TestMeanImpl(bool full_input_dims = true) {
float kQuantizedTolerance = 2.0 / 255;
std::vector<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
ReduceOpModel m(BuiltinOperator_MEAN,
{Tensor_Type,
(full_input_dims ? std::vector<int>({1, 1, 3, 2})
: std::vector<int>({1, 3, 2})),
-1.0, 1.0},
{Tensor_Type, {2}, -1.0, 1.0}, {1}, {full_input_dims ? 2 : 1},
false);
m.QuantizeAndPopulate<input_type>(m.Input(), data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto reference_output = m.GetDequantizedOutput<input_type>();
m.ApplyDelegateAndInvoke();
if (full_input_dims) {
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 2}));
} else {
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 2}));
}
EXPECT_THAT(
m.GetDequantizedOutput<input_type>(),
ElementsAreArray(ArrayFloatNear(reference_output, kQuantizedTolerance)));
}
TEST(ReduceOpModel, MeanNotKeepDims_Uint8) {
TestMeanImpl<TensorType_UINT8, uint8_t>(true);
TestMeanImpl<TensorType_UINT8, uint8_t>(false);
}
TEST(ReduceOpModel, MeanNotKeepDims_Int8) {
TestMeanImpl<TensorType_INT8, int8_t>(true);
TestMeanImpl<TensorType_INT8, int8_t>(false);
}
template <TensorType Tensor_Type, typename input_type>
void TestMeanKeppDimsImpl(bool full_input_dims = true) {
float kQuantizedTolerance = 2.0 / 255;
std::vector<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
ReduceOpModel m(BuiltinOperator_MEAN,
{Tensor_Type,
(full_input_dims ? std::vector<int>({1, 1, 3, 2})
: std::vector<int>({1, 3, 2})),
-1.0, 1.0},
{Tensor_Type, {3}, -1.0, 1.0}, {1}, {full_input_dims ? 3 : 2},
true);
m.QuantizeAndPopulate<input_type>(m.Input(), data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto reference_output = m.GetDequantizedOutput<input_type>();
m.ApplyDelegateAndInvoke();
if (full_input_dims) {
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 3, 1}));
} else {
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 3, 1}));
}
EXPECT_THAT(
m.GetDequantizedOutput<input_type>(),
ElementsAreArray(ArrayFloatNear(reference_output, kQuantizedTolerance)));
}
TEST(ReduceOpModel, MeanKeepDims_Int8) {
TestMeanKeppDimsImpl<TensorType_INT8, int8_t>(true);
TestMeanKeppDimsImpl<TensorType_INT8, int8_t>(false);
}
TEST(ReduceOpModel, MeanKeepDims_Uint8) {
TestMeanKeppDimsImpl<TensorType_UINT8, uint8_t>(true);
TestMeanKeppDimsImpl<TensorType_UINT8, uint8_t>(false);
}
TEST(ReduceOpModel, DISABLED_SumNotKeepDims) {
float kQuantizedTolerance = 2.0 / 255;
std::vector<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
ReduceOpModel m(BuiltinOperator_SUM,
{TensorType_UINT8, {1, 1, 3, 2}, -1.0, 1.0},
{TensorType_UINT8, {2}, -1.0, 1.0}, {1}, {2}, false);
m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 2}));
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(
ArrayFloatNear({-0.823529, -0.815686}, kQuantizedTolerance)));
}
TEST(ReduceOpModel, DISABLED_SumKeepDims) {
float kQuantizedTolerance = 2.0 / 255;
std::vector<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
ReduceOpModel m(BuiltinOperator_SUM,
{TensorType_UINT8, {1, 1, 3, 2}, -1.0, 1.0},
{TensorType_UINT8, {3}, -1.0, 1.0}, {1}, {3}, true);
m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 3, 1}));
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear({-0.407843, -0.313726, 0.0941177},
kQuantizedTolerance)));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/reduce.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/hexagon/builders/tests/reduce_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
368e0049-e1af-47f0-8969-3f472eb1b158 | cpp | tensorflow/tensorflow | quantize | tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize.cc | tensorflow/lite/delegates/hexagon/builders/tests/quantize_test.cc | #include <memory>
#include <string>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Quant/IR/QuantTypes.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/Types.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassRegistry.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Support/TypeID.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.h"
#include "tensorflow/compiler/mlir/lite/transforms/passes.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_config.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/core/framework/types.pb.h"
namespace mlir {
namespace quant {
namespace {
using ::tensorflow::quantization::OpSet;
enum QuantizationTrait { kFullQuantization, kDynamicRangeQuantization };
template <QuantizationTrait quantization_trait, typename ConcreteT,
typename RootOpT = quantfork::DequantizeCastOp>
struct TFQuantizationBase
: public QuantizationPattern<ConcreteT, quantfork::QuantizeCastOp,
quantfork::DequantizeCastOp,
void, RootOpT> {
explicit TFQuantizationBase(MLIRContext* ctx,
const QuantPassSpec& quant_params)
: QuantizationPattern<ConcreteT, quantfork::QuantizeCastOp,
quantfork::DequantizeCastOp,
void, RootOpT>(ctx, quant_params) {}
static bool IsQuantizableCustomOp(Operation* op,
const CustomMap& custom_op_map) {
return false;
}
static bool AllowDynamicRangeQuantizedOperand(
Operation* quantized_op, const CustomMap& custom_op_map) {
auto call_op = cast<TF::PartitionedCallOp>(quantized_op);
StringRef function_name =
call_op.getFAttr().cast<FlatSymbolRefAttr>().getValue();
const bool is_gather = function_name.contains("gather");
return quantization_trait != kFullQuantization || is_gather;
}
static bool AllowDynamicRangeQuantizedResult(Operation* quantized_op,
const CustomMap& custom_op_map) {
auto call_op = cast<TF::PartitionedCallOp>(quantized_op);
StringRef function_name =
call_op.getFAttr().cast<FlatSymbolRefAttr>().getValue();
bool is_gather = false;
if (function_name.contains("gather")) is_gather = true;
return quantization_trait != kFullQuantization ||
(quantization_trait == kFullQuantization && is_gather);
}
static bool IsWeightOnlyOp(Operation* quantized_op,
absl::flat_hash_set<std::string>& ops_blocklist,
bool weight_only_quantization,
const CustomMap& custom_op_map) {
return weight_only_quantization;
}
};
struct TFFullQuantization
: public TFQuantizationBase<kFullQuantization, TFFullQuantization> {
explicit TFFullQuantization(MLIRContext* ctx,
const QuantPassSpec& quant_params)
: TFQuantizationBase<kFullQuantization, TFFullQuantization>(
ctx, quant_params) {}
};
struct TFFullQuantizationReverse
: public TFQuantizationBase<kFullQuantization, TFFullQuantizationReverse,
quantfork::QuantizeCastOp> {
explicit TFFullQuantizationReverse(MLIRContext* ctx,
const QuantPassSpec& quant_params)
: TFQuantizationBase<kFullQuantization, TFFullQuantizationReverse,
quantfork::QuantizeCastOp>(ctx, quant_params) {}
};
struct TFDynamicRangeQuantization
: public TFQuantizationBase<kDynamicRangeQuantization,
TFDynamicRangeQuantization> {
explicit TFDynamicRangeQuantization(MLIRContext* ctx,
const quant::QuantPassSpec& quant_params)
: TFQuantizationBase<kDynamicRangeQuantization,
TFDynamicRangeQuantization>(ctx, quant_params) {}
};
class RemoveUnusedQdqPattern
: public OpRewritePattern<quantfork::DequantizeCastOp> {
public:
explicit RemoveUnusedQdqPattern(MLIRContext* context)
: OpRewritePattern<quantfork::DequantizeCastOp>(context) {}
LogicalResult matchAndRewrite(quantfork::DequantizeCastOp dq_op,
PatternRewriter& rewriter) const override {
auto q_op = dq_op.getArg().getDefiningOp<quantfork::QuantizeCastOp>();
if (!q_op) return failure();
dq_op.replaceAllUsesWith(q_op.getArg());
return success();
}
};
class QuantizeSameScaleOpsPattern
: public OpRewritePattern<quantfork::DequantizeCastOp> {
public:
explicit QuantizeSameScaleOpsPattern(
MLIRContext* context, OpQuantScaleSpecGetter op_quant_scale_spec_getter,
OpSet target_opset)
: OpRewritePattern<quantfork::DequantizeCastOp>(context, 200),
op_quant_scale_spec_getter_(op_quant_scale_spec_getter),
target_opset_(target_opset) {}
LogicalResult matchAndRewrite(quantfork::DequantizeCastOp op,
PatternRewriter& rewriter) const override {
SmallVector<Operation*, 4> quantizing_ops;
auto users = op.getResult().getUsers();
quantizing_ops.append(users.begin(), users.end());
bool changed = false;
for (Operation* quantizing_op : quantizing_ops) {
if (llvm::isa<quantfork::QuantizeCastOp, quantfork::DequantizeCastOp>(
quantizing_op)) {
return failure();
}
if (quantizing_op->hasTrait<OpTrait::IsTerminator>()) {
return failure();
}
if (!op_quant_scale_spec_getter_(quantizing_op)
->has_same_scale_requirement) {
continue;
}
if (target_opset_ == OpSet::XLA &&
!IsConnectedWithCompsiteFunction(quantizing_op)) {
continue;
}
if (target_opset_ == OpSet::UNIFORM_QUANTIZED) {
continue;
}
SmallVector<Value, 4> inputs;
inputs.reserve(quantizing_op->getNumOperands());
for (const auto& operand : quantizing_op->getOperands()) {
Type operand_type = operand.getType();
if (operand_type.isa<NoneType>()) {
inputs.push_back(operand);
continue;
}
Type elem_type = operand_type.cast<TensorType>().getElementType();
if (auto dq_op = dyn_cast_or_null<quantfork::DequantizeCastOp>(
operand.getDefiningOp())) {
auto dq_arg_type = dq_op.getArg().getType().cast<TensorType>();
auto qtype = dq_arg_type.getElementType().cast<QuantizedType>();
auto scast_op = rewriter.create<quantfork::StorageCastOp>(
dq_op->getLoc(), dq_arg_type.clone(qtype.getStorageType()),
dq_op.getArg());
inputs.push_back(scast_op.getResult());
} else if (!elem_type.isF32()) {
inputs.push_back(operand);
} else {
return failure();
}
}
llvm::SmallDenseMap<Value, int> outputs_replaced;
SmallVector<Type, 4> output_types;
output_types.reserve(quantizing_op->getNumResults());
for (const auto& enumerated_result :
llvm::enumerate(quantizing_op->getResults())) {
Value result = enumerated_result.value();
Type result_type = result.getType();
if (result_type.isa<NoneType>()) {
outputs_replaced.insert({result, enumerated_result.index()});
output_types.push_back(result_type);
continue;
}
auto result_tensor_type = result_type.cast<TensorType>();
if (result.hasOneUse() &&
llvm::isa<quantfork::QuantizeCastOp>(*result.user_begin())) {
auto user =
llvm::cast<quantfork::QuantizeCastOp>(*result.user_begin());
outputs_replaced.insert(
{user.getResult(), enumerated_result.index()});
auto qtype = user.getType()
.cast<TensorType>()
.getElementType()
.cast<QuantizedType>();
output_types.push_back(
result_tensor_type.clone(qtype.getStorageType()));
} else if (!result_tensor_type.getElementType().isF32()) {
outputs_replaced.insert({result, enumerated_result.index()});
output_types.push_back(result.getType());
} else {
return failure();
}
}
rewriter.setInsertionPointAfter(quantizing_op);
OperationState new_state(quantizing_op->getLoc(),
quantizing_op->getName().getStringRef(), inputs,
output_types, quantizing_op->getAttrs());
for (int i = 0; i < quantizing_op->getNumRegions(); ++i) {
new_state.addRegion();
}
Operation* quantized_op = rewriter.create(new_state);
if (quantizing_op->getNumRegions() != 0) {
for (const auto& indexed_regions :
llvm::enumerate(quantizing_op->getRegions())) {
IRMapping mapping;
indexed_regions.value().cloneInto(
&quantized_op->getRegion(indexed_regions.index()), mapping);
}
}
for (const auto& output_index_pair : outputs_replaced) {
Value output = output_index_pair.getFirst();
int output_index = output_index_pair.getSecond();
auto scast_op = rewriter.create<quantfork::StorageCastOp>(
output.getLoc(), output.getType(),
quantized_op->getResult(output_index));
output.replaceAllUsesWith(scast_op);
}
changed = true;
}
return success(changed);
}
private:
bool IsConnectedWithCompsiteFunction(Operation* same_scale_op) const {
for (const auto& operand : same_scale_op->getOperands()) {
auto dq_op = dyn_cast_or_null<quantfork::DequantizeCastOp>(
operand.getDefiningOp());
if (!dq_op) continue;
Operation* preceding_op = dq_op.getArg().getDefiningOp();
if (!preceding_op) continue;
if (llvm::isa<TF::PartitionedCallOp>(preceding_op)) {
auto call_op = llvm::cast<TF::PartitionedCallOp>(preceding_op);
if (!IsCompositeFunction(call_op)) continue;
return true;
}
if (llvm::isa<quantfork::StorageCastOp>(preceding_op)) {
auto sc_op = llvm::cast<quantfork::StorageCastOp>(preceding_op);
auto sc_arg_type = sc_op.getArg().getType().dyn_cast<TensorType>();
if (sc_arg_type.getElementType().isInteger(8)) {
return true;
}
}
}
for (const auto& result : same_scale_op->getResults()) {
if (!result.hasOneUse() ||
!llvm::isa<quantfork::QuantizeCastOp>(*result.user_begin())) {
continue;
}
auto q_op = llvm::cast<quantfork::QuantizeCastOp>(*result.user_begin());
for (auto following_op : q_op->getUsers()) {
if (llvm::isa<TF::PartitionedCallOp>(following_op)) {
auto call_op = llvm::cast<TF::PartitionedCallOp>(following_op);
if (!IsCompositeFunction(call_op)) continue;
return true;
}
if (llvm::isa<quantfork::StorageCastOp>(following_op)) {
auto sc_op = llvm::cast<quantfork::StorageCastOp>(following_op);
auto sc_arg_type = sc_op.getResult().getType().dyn_cast<TensorType>();
if (sc_arg_type.getElementType().isInteger(8)) {
return true;
}
}
}
}
return false;
}
bool IsCompositeFunction(TF::PartitionedCallOp call_op) const {
if (!call_op->hasAttr(kQuantTraitAttrName)) {
return false;
}
const auto f_attr = call_op.getFAttr().dyn_cast<FlatSymbolRefAttr>();
if (!f_attr || !f_attr.getValue().starts_with("composite_")) {
return false;
}
bool has_quantized_types = false;
for (Value input : call_op.getArgs()) {
if (auto type = input.getType().dyn_cast<TensorType>()) {
if (type.getElementType().isa<FloatType>()) {
return false;
}
if (type.getElementType().isa<QuantizedType>()) {
has_quantized_types = true;
}
}
}
for (Value output : call_op.getOutput()) {
if (auto type = output.getType().dyn_cast<TensorType>()) {
if (type.getElementType().isa<FloatType>()) {
return false;
}
if (type.getElementType().isa<QuantizedType>()) {
has_quantized_types = true;
}
}
}
return has_quantized_types;
}
OpQuantScaleSpecGetter op_quant_scale_spec_getter_;
OpSet target_opset_;
};
struct QuantizeAvgPoolOpPattern
: public OpRewritePattern<quantfork::StorageCastOp> {
explicit QuantizeAvgPoolOpPattern(MLIRContext* context)
: OpRewritePattern<quantfork::StorageCastOp>(context, 100) {}
LogicalResult matchAndRewrite(quantfork::StorageCastOp sc_op,
PatternRewriter& rewriter) const override {
auto avg_pool_op = sc_op.getArg().getDefiningOp<TF::AvgPoolOp>();
if (!avg_pool_op) return failure();
auto preceding_sc_op = dyn_cast_or_null<quantfork::StorageCastOp>(
avg_pool_op.getValue().getDefiningOp());
if (!preceding_sc_op) return failure();
auto dq_arg_type = preceding_sc_op.getArg().getType().cast<TensorType>();
auto qtype = dq_arg_type.getElementType().cast<QuantizedType>();
auto q_result_type = sc_op.getType().cast<TensorType>();
auto out_qtype = q_result_type.getElementType().cast<QuantizedType>();
if (qtype != out_qtype) {
avg_pool_op.emitError(
"The preceding StorageCastOp and the following "
"StorageCastOp must have the same quantized type");
return failure();
}
OpBuilder::InsertionGuard g(rewriter);
rewriter.setInsertionPointAfter(preceding_sc_op);
auto fcast_op = rewriter.create<TF::CastOp>(
preceding_sc_op->getLoc(), dq_arg_type.clone(rewriter.getF32Type()),
preceding_sc_op.getResult());
TF::AvgPoolOp float_avg_pool_op = rewriter.create<TF::AvgPoolOp>(
avg_pool_op->getLoc(),
avg_pool_op.getType().clone(rewriter.getF32Type()),
fcast_op.getResult(),
avg_pool_op->getAttrs());
auto round_val = rewriter.create<TF::RoundOp>(
sc_op.getLoc(), float_avg_pool_op.getOutput());
auto icast_op = rewriter.create<TF::CastOp>(
sc_op.getLoc(), q_result_type.clone(qtype.getStorageType()), round_val);
avg_pool_op.getResult().replaceAllUsesWith(icast_op.getResult());
return success();
}
};
class QuantizePass
: public PassWrapper<QuantizePass, OperationPass<func::FuncOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(QuantizePass)
explicit QuantizePass() {
quant_specs_.inference_type = tensorflow::DT_QINT8;
}
explicit QuantizePass(const QuantizationSpecs& quant_specs,
OpSet target_opset)
: quant_specs_(quant_specs) {
weight_quantization_ = quant_specs.weight_quantization;
target_opset_ = target_opset;
}
QuantizePass(const QuantizePass& other) : quant_specs_(other.quant_specs_) {
weight_quantization_ = other.weight_quantization_;
target_opset_ = other.target_opset_;
}
StringRef getArgument() const final {
return "quant-quantize";
}
StringRef getDescription() const final {
return "Apply quantization on models in TensorFlow dialect";
}
bool shouldKeepUnusedQdqPattern();
void runOnOperation() override;
private:
QuantizationSpecs quant_specs_;
Option<bool> weight_quantization_{
*this, "weight-quantization", llvm::cl::init(false),
llvm::cl::desc("Whether to enable weight quantization.")};
Option<OpSet> target_opset_{
*this, "target-opset", llvm::cl::init(OpSet::TF),
llvm::cl::desc("Choose target opset."),
llvm::cl::values(
clEnumValN(OpSet::TF, "TF",
"Uses TF ops that mimic quantization behavior"),
clEnumValN(OpSet::XLA, "XLA", "Uses TF XLA ops"),
clEnumValN(OpSet::UNIFORM_QUANTIZED, "UNIFORM_QUANTIZED",
"Uses TF Uniform Quantized ops"))};
};
bool QuantizePass::shouldKeepUnusedQdqPattern() {
return target_opset_ == OpSet::XLA &&
(quant_specs_.weight_only_quantization ||
quant_specs_.weight_quantization);
}
void QuantizePass::runOnOperation() {
RewritePatternSet patterns(&getContext());
auto func = getOperation();
auto* ctx = func.getContext();
quant_specs_.weight_quantization = weight_quantization_;
const QuantPassSpec quant_params = {
{quant_specs_.verify_numeric, 5.0f,
quant_specs_.whole_model_verify, false},
quant_specs_};
if (quant_specs_.weight_quantization) {
patterns.add<TFDynamicRangeQuantization>(ctx, quant_params);
} else {
patterns.add<TFFullQuantization, TFFullQuantizationReverse>(ctx,
quant_params);
patterns.add<QuantizeSameScaleOpsPattern>(ctx, GetTfQuantScaleSpec,
target_opset_);
patterns.add<QuantizeAvgPoolOpPattern>(ctx);
}
if (failed(applyPatternsAndFoldGreedily(func, std::move(patterns)))) {
func.emitWarning("Failed to converge pattern at QuantizePass.");
}
if (!shouldKeepUnusedQdqPattern()) {
RewritePatternSet patterns_2(&getContext());
patterns_2.add<RemoveUnusedQdqPattern>(ctx);
if (failed(applyPatternsAndFoldGreedily(func, std::move(patterns_2)))) {
signalPassFailure();
}
}
}
}
std::unique_ptr<OperationPass<func::FuncOp>> CreateQuantizePass() {
QuantizationSpecs quant_specs;
return std::make_unique<QuantizePass>(quant_specs, OpSet::TF);
}
std::unique_ptr<OperationPass<func::FuncOp>> CreateQuantizePass(
QuantizationSpecs quant_specs, OpSet target_opset) {
return std::make_unique<QuantizePass>(quant_specs, target_opset);
}
static PassRegistration<QuantizePass> pass;
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
using testing::ElementsAreArray;
class QuantizeOpModel : public SingleOpModelWithHexagon {
public:
explicit QuantizeOpModel(const TensorData& input, const TensorData& output) {
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_QUANTIZE, BuiltinOptions_QuantizeOptions,
CreateQuantizeOptions(builder_).Union());
BuildInterpreter({GetShape(input_)});
}
template <typename T>
void SetInput(const std::vector<float>& data) {
QuantizeAndPopulate<T>(input_, data);
}
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
protected:
BuiltinOperator op_code_;
int input_;
int output_;
};
TEST(QuantizeOpTest, UInt8UInt8SameScale) {
QuantizeOpModel m({TensorType_UINT8, {1, 1, 2, 5}, -63.5, 64},
{TensorType_UINT8, {1, 1, 2, 5}, -63.5, 64});
m.SetInput<uint8_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(
m.GetOutput<uint8_t>(),
ElementsAreArray({129, 131, 133, 135, 137, 139, 141, 143, 145, 147}));
}
TEST(QuantizeOpTest, Uint8Uint8LargerScale) {
QuantizeOpModel m({TensorType_UINT8, {1, 1, 2, 5}, -63.5, 64},
{TensorType_UINT8, {1, 1, 2, 5}, -127, 128});
m.SetInput<uint8_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(
m.GetOutput<uint8_t>(),
ElementsAreArray({128, 129, 130, 131, 132, 133, 134, 135, 136, 137}));
}
TEST(QuantizeOpTest, Uint8Uint8SmallerScale) {
QuantizeOpModel m({TensorType_UINT8, {1, 1, 2, 5}, -127, 128},
{TensorType_UINT8, {1, 1, 2, 5}, -63.5, 64});
m.SetInput<uint8_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(
m.GetOutput<uint8_t>(),
ElementsAreArray({129, 131, 133, 135, 137, 139, 141, 143, 145, 147}));
}
TEST(QuantizeOpTest, Int8Uint8SmallerScale) {
QuantizeOpModel m({TensorType_INT8, {1, 1, 2, 5}, -127, 128},
{TensorType_UINT8, {1, 1, 2, 5}, -63.5, 64});
m.SetInput<int8_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(
m.GetOutput<uint8_t>(),
ElementsAreArray({129, 131, 133, 135, 137, 139, 141, 143, 145, 147}));
}
TEST(QuantizeOpTest, Int8Uint8LargerScale) {
QuantizeOpModel m({TensorType_INT8, {1, 1, 2, 5}, -127, 128},
{TensorType_UINT8, {1, 1, 2, 5}, -254, 256});
m.SetInput<int8_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(
m.GetOutput<uint8_t>(),
ElementsAreArray({128, 128, 129, 129, 130, 130, 131, 131, 132, 132}));
}
TEST(QuantizeOpTest, UInt8Int8SameScale128Diff) {
QuantizeOpModel m({TensorType_UINT8, {1, 1, 2, 5}, -127, 128},
{TensorType_INT8, {1, 1, 2, 5}, -127, 128});
m.SetInput<uint8_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetOutput<int8_t>(),
ElementsAreArray({0, 1, 2, 3, 4, 5, 6, 7, 8, 9}));
}
TEST(QuantizeOpTest, Int8Int8SameScale) {
QuantizeOpModel m({TensorType_INT8, {1, 1, 2, 5}, -63.5, 64},
{TensorType_INT8, {1, 1, 2, 5}, -63.5, 64});
m.SetInput<int8_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetOutput<int8_t>(),
ElementsAreArray({1, 3, 5, 7, 9, 11, 13, 15, 17, 19}));
}
TEST(QuantizeOpTest, Int8Int8LargerScale) {
QuantizeOpModel m({TensorType_INT8, {1, 1, 2, 5}, -63.5, 64},
{TensorType_INT8, {1, 1, 2, 5}, -127, 128});
m.SetInput<int8_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetOutput<int8_t>(),
ElementsAreArray({0, 1, 2, 3, 4, 5, 6, 7, 8, 9}));
}
TEST(QuantizeOpTest, Int8Int8SmallerScale) {
QuantizeOpModel m({TensorType_INT8, {1, 1, 2, 5}, -127, 128},
{TensorType_INT8, {1, 1, 2, 5}, -63.5, 64});
m.SetInput<int8_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetOutput<int8_t>(),
ElementsAreArray({1, 3, 5, 7, 9, 11, 13, 15, 17, 19}));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/hexagon/builders/tests/quantize_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
762545b1-0374-4b94-9e37-dd8913f00e17 | cpp | tensorflow/tensorflow | div | tensorflow/lite/kernels/div.cc | tensorflow/lite/delegates/xnnpack/div_test.cc | #include <stddef.h>
#include <stdint.h>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
#include "tensorflow/lite/kernels/internal/optimized/neon_check.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace div {
enum KernelType {
kReference,
kGenericOptimized,
kNeonOptimized,
};
constexpr int kInputTensor1 = 0;
constexpr int kInputTensor2 = 1;
constexpr int kOutputTensor = 0;
struct OpData {
bool requires_broadcast;
int32 output_activation_min;
int32 output_activation_max;
int32_t output_multiplier;
int output_shift;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* data = new OpData;
data->requires_broadcast = false;
return data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteDivParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
output->type = input2->type;
data->requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (data->requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
if (output->type == kTfLiteUInt8) {
TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized(
context, params->activation, output, &data->output_activation_min,
&data->output_activation_max));
const double real_multiplier =
input1->params.scale / (input2->params.scale * output->params.scale);
QuantizeMultiplier(real_multiplier, &data->output_multiplier,
&data->output_shift);
}
return context->ResizeTensor(context, output, output_size);
}
template <KernelType kernel_type>
void EvalDiv(TfLiteContext* context, TfLiteNode* node, TfLiteDivParams* params,
const OpData* data, const TfLiteTensor* input1,
const TfLiteTensor* input2, TfLiteTensor* output) {
#define TF_LITE_DIV(type, opname, data_type) \
tflite::ArithmeticParams op_params; \
data_type output_activation_min, output_activation_max; \
CalculateActivationRange(params->activation, &output_activation_min, \
&output_activation_max); \
SetActivationParams(output_activation_min, output_activation_max, \
&op_params); \
type::opname(op_params, GetTensorShape(input1), \
GetTensorData<data_type>(input1), GetTensorShape(input2), \
GetTensorData<data_type>(input2), GetTensorShape(output), \
GetTensorData<data_type>(output))
if (output->type == kTfLiteInt32) {
if (kernel_type == kReference) {
if (data->requires_broadcast) {
TF_LITE_DIV(reference_ops, BroadcastDivSlow, int32_t);
} else {
TF_LITE_DIV(reference_ops, Div, int32_t);
}
} else {
if (data->requires_broadcast) {
TF_LITE_DIV(optimized_ops, BroadcastDivSlow, int32_t);
} else {
TF_LITE_DIV(optimized_ops, Div, int32_t);
}
}
} else if (output->type == kTfLiteFloat32) {
if (kernel_type == kReference) {
if (data->requires_broadcast) {
TF_LITE_DIV(reference_ops, BroadcastDivSlow, float);
} else {
TF_LITE_DIV(reference_ops, Div, float);
}
} else {
if (data->requires_broadcast) {
TF_LITE_DIV(optimized_ops, BroadcastDivSlow, float);
} else {
TF_LITE_DIV(optimized_ops, Div, float);
}
}
}
#undef TF_LITE_DIV
}
template <KernelType kernel_type>
TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node,
TfLiteDivParams* params, const OpData* data,
const TfLiteTensor* input1,
const TfLiteTensor* input2, TfLiteTensor* output) {
if (input1->type == kTfLiteUInt8 && input2->type == kTfLiteUInt8 &&
output->type == kTfLiteUInt8) {
tflite::ArithmeticParams op_params;
SetActivationParams(data->output_activation_min,
data->output_activation_max, &op_params);
op_params.input1_offset = -input1->params.zero_point;
op_params.input2_offset = -input2->params.zero_point;
op_params.output_offset = output->params.zero_point;
op_params.output_multiplier = data->output_multiplier;
op_params.output_shift = data->output_shift;
bool need_broadcast = optimized_ops::ProcessBroadcastShapes(
GetTensorShape(input1), GetTensorShape(input2), &op_params);
#define TF_LITE_DIV(type, opname, dtype) \
type::opname(op_params, GetTensorShape(input1), \
GetTensorData<dtype>(input1), GetTensorShape(input2), \
GetTensorData<dtype>(input2), GetTensorShape(output), \
GetTensorData<dtype>(output))
if (kernel_type == kReference) {
if (need_broadcast) {
TF_LITE_DIV(reference_ops, BroadcastDivSlow, uint8_t);
} else {
TF_LITE_DIV(reference_ops, Div, uint8_t);
}
} else {
if (need_broadcast) {
TF_LITE_DIV(optimized_ops, BroadcastDivSlow, uint8_t);
} else {
TF_LITE_DIV(optimized_ops, Div, uint8_t);
}
}
#undef TF_LITE_DIV
} else {
TF_LITE_KERNEL_LOG(
context, "Unsupported combination of input and output types in Div.");
return kTfLiteError;
}
return kTfLiteOk;
}
template <typename T>
TfLiteStatus CheckNonZero(TfLiteContext* context, const TfLiteTensor* tensor) {
const auto* data = GetTensorData<T>(tensor);
const size_t number_elements = tensor->bytes / sizeof(T);
for (size_t i = 0; i < number_elements; i++) {
TF_LITE_ENSURE(context, data[i] != 0);
}
return kTfLiteOk;
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteDivParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (output->type == kTfLiteFloat32) {
EvalDiv<kernel_type>(context, node, params, data, input1, input2, output);
} else if (output->type == kTfLiteInt32) {
CheckNonZero<int32_t>(context, input2);
EvalDiv<kernel_type>(context, node, params, data, input1, input2, output);
} else if (output->type == kTfLiteUInt8) {
CheckNonZero<uint8_t>(context, input2);
TF_LITE_ENSURE_OK(
context, EvalQuantized<kernel_type>(context, node, params, data, input1,
input2, output));
} else {
TF_LITE_KERNEL_LOG(
context,
"Div only supports FLOAT32, INT32 and quantized UINT8 now, got %d.",
output->type);
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_DIV_REF() {
static TfLiteRegistration r = {
div::Init,
div::Free,
div::Prepare,
div::Eval<div::kReference>,
nullptr,
0,
nullptr,
0,
nullptr,
nullptr,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpInput1Shared};
return &r;
}
TfLiteRegistration* Register_DIV_GENERIC_OPT() {
static TfLiteRegistration r = {
div::Init,
div::Free,
div::Prepare,
div::Eval<div::kGenericOptimized>,
nullptr,
0,
nullptr,
0,
nullptr,
nullptr,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpInput1Shared};
return &r;
}
TfLiteRegistration* Register_DIV_NEON_OPT() {
static TfLiteRegistration r = {
div::Init,
div::Free,
div::Prepare,
div::Eval<div::kNeonOptimized>,
nullptr,
0,
nullptr,
0,
nullptr,
nullptr,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpInput1Shared};
return &r;
}
TfLiteRegistration* Register_DIV() {
#ifdef USE_NEON
return Register_DIV_NEON_OPT();
#else
return Register_DIV_GENERIC_OPT();
#endif
}
}
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/binary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
TEST(Div, 4DBy4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DBy4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, 1, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, 1, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DBy4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, width, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, width, 1})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DBy4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, 1, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, 1, 1})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DBy4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, 1, 1, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, 1, 1, 1})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DBy4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, width, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DBy3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({height, width, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({width, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 2DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 2DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 2DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DByStatic4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DByStatic4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, 1, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, 1, channels})
.Input2Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DByStatic4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, width, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, width, 1})
.Input2Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DByStatic4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, 1, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, 1, 1})
.Input2Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DByStatic4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, 1, 1, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, 1, 1, 1})
.Input2Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DByStatic4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DByStatic3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DByStatic2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({width, channels})
.Input2Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DByStatic1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({channels})
.Input2Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DByStatic0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({})
.Input2Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 2DByStatic2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Input2Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 2DByStatic1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({channels})
.Input2Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 2DByStatic0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({})
.Input2Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, FP16Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.FP16Weights()
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.FP16Weights()
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, INT8Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.INT8Weights()
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.INT8Weights()
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, INT8ChannelWiseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.INT8ChannelWiseWeights()
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.INT8ChannelWiseWeights()
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, SparseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.SparseWeights()
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.SparseWeights()
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, ReluActivation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.ReluActivation()
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, Relu6Activation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Relu6Activation()
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, ReluMinus1To1Activation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.ReluMinus1To1Activation()
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, DISABLED_TanhActivation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.TanhActivation()
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, DISABLED_SignBitActivation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.SignBitActivation()
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/div.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/div_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
85256940-c06d-4a66-ad6c-9c9d871a9f1c | cpp | tensorflow/tensorflow | bidirectional_sequence_rnn | tensorflow/lite/kernels/bidirectional_sequence_rnn.cc | tensorflow/lite/kernels/bidirectional_sequence_rnn_test.cc | #include <algorithm>
#include <cstddef>
#include <cstdint>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/kernel_utils.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/op_macros.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace bidirectional_sequence_rnn {
namespace {
struct OpData {
int scratch_tensor_index;
bool fw_compute_row_sums = false;
bool bw_compute_row_sums = false;
};
}
constexpr int kInputTensor = 0;
constexpr int kFwWeightsTensor = 1;
constexpr int kFwRecurrentWeightsTensor = 2;
constexpr int kFwBiasTensor = 3;
constexpr int kFwHiddenStateTensor = 4;
constexpr int kBwWeightsTensor = 5;
constexpr int kBwRecurrentWeightsTensor = 6;
constexpr int kBwBiasTensor = 7;
constexpr int kBwHiddenStateTensor = 8;
constexpr int kAuxInputTensor = 9;
constexpr int kFwAuxWeightsTensor = 10;
constexpr int kBwAuxWeightsTensor = 11;
constexpr int kFwOutputTensor = 0;
constexpr int kBwOutputTensor = 1;
enum TemporaryTensor {
kInputQuantized = 0,
kFwHiddenStateQuantized = 1,
kBwHiddenStateQuantized = 2,
kScalingFactors = 3,
kAccumScratch = 4,
kZeroPoints = 5,
kFwRowSums = 6,
kBwRowSums = 7,
kAuxInputQuantized = 8,
kNumTemporaryTensors = 9
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData();
context->AddTensors(context, kNumTemporaryTensors,
&op_data->scratch_tensor_index);
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const auto* params = reinterpret_cast<TfLiteBidirectionalSequenceRNNParams*>(
node->builtin_data);
TF_LITE_ENSURE_EQ(context, node->inputs->size, 12);
TF_LITE_ENSURE_EQ(context, node->outputs->size,
params->merge_outputs ? 1 : 2);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* fw_input_weights;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kFwWeightsTensor,
&fw_input_weights));
const TfLiteTensor* fw_recurrent_weights;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFwRecurrentWeightsTensor,
&fw_recurrent_weights));
const TfLiteTensor* fw_bias;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFwBiasTensor, &fw_bias));
const TfLiteTensor* fw_hidden_state;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kFwHiddenStateTensor,
&fw_hidden_state));
const TfLiteTensor* bw_input_weights;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kBwWeightsTensor,
&bw_input_weights));
const TfLiteTensor* bw_recurrent_weights;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kBwRecurrentWeightsTensor,
&bw_recurrent_weights));
const TfLiteTensor* bw_bias;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kBwBiasTensor, &bw_bias));
const TfLiteTensor* bw_hidden_state;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kBwHiddenStateTensor,
&bw_hidden_state));
const TfLiteTensor* aux_input =
GetOptionalInputTensor(context, node, kAuxInputTensor);
const TfLiteTensor* fw_aux_input_weights =
GetOptionalInputTensor(context, node, kFwAuxWeightsTensor);
const TfLiteTensor* bw_aux_input_weights =
GetOptionalInputTensor(context, node, kBwAuxWeightsTensor);
const bool aux_inputs_weights_or_none =
((fw_aux_input_weights != nullptr) &&
(bw_aux_input_weights != nullptr)) ||
((fw_aux_input_weights == nullptr) && (bw_aux_input_weights == nullptr));
TF_LITE_ENSURE(context, aux_inputs_weights_or_none);
const bool has_aux_input = (fw_aux_input_weights != nullptr);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, input->dims->size, 3);
const bool time_major = params->time_major;
const int batch_size =
(time_major) ? input->dims->data[1] : input->dims->data[0];
const int max_time =
(time_major) ? input->dims->data[0] : input->dims->data[1];
const int fw_num_units = fw_input_weights->dims->data[0];
const int bw_num_units = bw_input_weights->dims->data[0];
TF_LITE_ENSURE_EQ(context, input->dims->data[2],
fw_input_weights->dims->data[1]);
TF_LITE_ENSURE_EQ(context, input->dims->data[2],
bw_input_weights->dims->data[1]);
TF_LITE_ENSURE_EQ(context, fw_input_weights->dims->data[0],
fw_bias->dims->data[0]);
TF_LITE_ENSURE_EQ(context, bw_input_weights->dims->data[0],
bw_bias->dims->data[0]);
TF_LITE_ENSURE_EQ(context, fw_recurrent_weights->dims->data[0],
fw_bias->dims->data[0]);
TF_LITE_ENSURE_EQ(context, bw_recurrent_weights->dims->data[1],
bw_bias->dims->data[0]);
TF_LITE_ENSURE_EQ(context, NumDimensions(fw_hidden_state), 2);
TF_LITE_ENSURE_EQ(context, fw_hidden_state->dims->data[0], batch_size);
TF_LITE_ENSURE_EQ(context, fw_hidden_state->dims->data[1], fw_num_units);
TF_LITE_ENSURE_EQ(context, NumDimensions(bw_hidden_state), 2);
TF_LITE_ENSURE_EQ(context, bw_hidden_state->dims->data[0], batch_size);
TF_LITE_ENSURE_EQ(context, bw_hidden_state->dims->data[1], bw_num_units);
if (has_aux_input) {
TF_LITE_ASSERT_EQ(aux_input->dims->data[0], input->dims->data[0]);
TF_LITE_ASSERT_EQ(aux_input->dims->data[1], input->dims->data[1]);
TF_LITE_ASSERT_EQ(fw_aux_input_weights->dims->data[0], fw_num_units);
TF_LITE_ASSERT_EQ(bw_aux_input_weights->dims->data[0], bw_num_units);
TF_LITE_ASSERT_EQ(aux_input->dims->data[2],
fw_aux_input_weights->dims->data[1]);
TF_LITE_ASSERT_EQ(aux_input->dims->data[2],
bw_aux_input_weights->dims->data[1]);
}
if (IsHybridOp(input, fw_input_weights)) {
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
op_data->fw_compute_row_sums = true;
op_data->bw_compute_row_sums = true;
TfLiteIntArrayFree(node->temporaries);
if (has_aux_input) {
node->temporaries = TfLiteIntArrayCreate(kNumTemporaryTensors);
} else {
node->temporaries = TfLiteIntArrayCreate(kNumTemporaryTensors - 1);
}
node->temporaries->data[kInputQuantized] =
op_data->scratch_tensor_index + kInputQuantized;
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kInputQuantized,
&input_quantized));
input_quantized->type = fw_input_weights->type;
input_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) {
TfLiteIntArray* input_quantized_size = TfLiteIntArrayCopy(input->dims);
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized,
input_quantized_size));
}
node->temporaries->data[kFwHiddenStateQuantized] =
op_data->scratch_tensor_index + kFwHiddenStateQuantized;
TfLiteTensor* fw_hidden_state_quantized;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kFwHiddenStateQuantized,
&fw_hidden_state_quantized));
fw_hidden_state_quantized->type = fw_input_weights->type;
fw_hidden_state_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(fw_hidden_state_quantized->dims,
fw_hidden_state->dims)) {
TfLiteIntArray* fw_hidden_state_quantized_size =
TfLiteIntArrayCopy(fw_hidden_state->dims);
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, fw_hidden_state_quantized,
fw_hidden_state_quantized_size));
}
node->temporaries->data[kBwHiddenStateQuantized] =
op_data->scratch_tensor_index + kBwHiddenStateQuantized;
TfLiteTensor* bw_hidden_state_quantized;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kBwHiddenStateQuantized,
&bw_hidden_state_quantized));
bw_hidden_state_quantized->type = fw_input_weights->type;
bw_hidden_state_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(bw_hidden_state_quantized->dims,
bw_hidden_state->dims)) {
TfLiteIntArray* bw_hidden_state_quantized_size =
TfLiteIntArrayCopy(bw_hidden_state->dims);
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, bw_hidden_state_quantized,
bw_hidden_state_quantized_size));
}
node->temporaries->data[kScalingFactors] =
op_data->scratch_tensor_index + kScalingFactors;
TfLiteTensor* scaling_factors;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kScalingFactors,
&scaling_factors));
scaling_factors->type = kTfLiteFloat32;
scaling_factors->allocation_type = kTfLiteArenaRw;
int scaling_dims[1] = {batch_size};
if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) {
TfLiteIntArray* scaling_factors_size = TfLiteIntArrayCreate(1);
scaling_factors_size->data[0] = batch_size;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors,
scaling_factors_size));
}
node->temporaries->data[kAccumScratch] =
op_data->scratch_tensor_index + kAccumScratch;
TfLiteTensor* accum_scratch;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kAccumScratch,
&accum_scratch));
accum_scratch->type = kTfLiteInt32;
accum_scratch->allocation_type = kTfLiteArenaRw;
int accum_scratch_dims[2] = {std::max(fw_num_units, bw_num_units),
batch_size};
if (!TfLiteIntArrayEqualsArray(accum_scratch->dims, 2,
accum_scratch_dims)) {
TfLiteIntArray* accum_scratch_size = TfLiteIntArrayCreate(2);
accum_scratch_size->data[0] = accum_scratch_dims[0];
accum_scratch_size->data[1] = accum_scratch_dims[1];
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, accum_scratch,
accum_scratch_size));
}
node->temporaries->data[kZeroPoints] =
op_data->scratch_tensor_index + kZeroPoints;
TfLiteTensor* zero_points;
TF_LITE_ENSURE_OK(
context,
GetTemporarySafe(context, node, kZeroPoints, &zero_points));
zero_points->type = kTfLiteInt32;
zero_points->allocation_type = kTfLiteArenaRw;
int zero_points_dims[1] = {batch_size};
if (!TfLiteIntArrayEqualsArray(zero_points->dims, 1, zero_points_dims)) {
TfLiteIntArray* zero_points_size = TfLiteIntArrayCreate(1);
zero_points_size->data[0] = batch_size;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, zero_points,
zero_points_size));
}
const int num_row_sums = has_aux_input ? 3 : 2;
node->temporaries->data[kFwRowSums] =
op_data->scratch_tensor_index + kFwRowSums;
TfLiteTensor* fw_row_sums;
TF_LITE_ENSURE_OK(
context,
GetTemporarySafe(context, node, kFwRowSums, &fw_row_sums));
fw_row_sums->type = kTfLiteInt32;
fw_row_sums->name = "Lstm_fw_row_sums";
fw_row_sums->allocation_type = kTfLiteArenaRwPersistent;
int fw_row_sums_dims[2] = {num_row_sums, fw_num_units};
if (!TfLiteIntArrayEqualsArray(fw_row_sums->dims, 2, fw_row_sums_dims)) {
TfLiteIntArray* fw_row_sums_size = TfLiteIntArrayCreate(2);
fw_row_sums_size->data[0] = fw_row_sums_dims[0];
fw_row_sums_size->data[1] = fw_row_sums_dims[1];
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, fw_row_sums,
fw_row_sums_size));
}
node->temporaries->data[kBwRowSums] =
op_data->scratch_tensor_index + kBwRowSums;
TfLiteTensor* bw_row_sums;
TF_LITE_ENSURE_OK(
context,
GetTemporarySafe(context, node, kBwRowSums, &bw_row_sums));
bw_row_sums->type = kTfLiteInt32;
bw_row_sums->name = "Lstm_bw_row_sums";
bw_row_sums->allocation_type = kTfLiteArenaRwPersistent;
int bw_row_sums_dims[2] = {num_row_sums, bw_num_units};
if (!TfLiteIntArrayEqualsArray(bw_row_sums->dims, 2, bw_row_sums_dims)) {
TfLiteIntArray* bw_row_sums_size = TfLiteIntArrayCreate(2);
bw_row_sums_size->data[0] = bw_row_sums_dims[0];
bw_row_sums_size->data[1] = bw_row_sums_dims[1];
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, bw_row_sums,
bw_row_sums_size));
}
if (has_aux_input) {
node->temporaries->data[kAuxInputQuantized] =
op_data->scratch_tensor_index + kAuxInputQuantized;
TfLiteTensor* aux_input_quantized;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kAuxInputQuantized,
&aux_input_quantized));
aux_input_quantized->type = fw_input_weights->type;
aux_input_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(aux_input_quantized->dims, aux_input->dims)) {
TfLiteIntArray* aux_input_quantized_size =
TfLiteIntArrayCopy(aux_input->dims);
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, aux_input_quantized,
aux_input_quantized_size));
}
}
}
TfLiteTensor* fw_output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kFwOutputTensor, &fw_output));
TfLiteIntArray* fw_output_size_array = TfLiteIntArrayCreate(3);
fw_output_size_array->data[0] = (time_major) ? max_time : batch_size;
fw_output_size_array->data[1] = (time_major) ? batch_size : max_time;
fw_output_size_array->data[2] =
params->merge_outputs ? fw_num_units + bw_num_units : fw_num_units;
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, fw_output, fw_output_size_array));
if (!params->merge_outputs) {
TfLiteTensor* bw_output;
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kBwOutputTensor, &bw_output));
TfLiteIntArray* bw_output_size_array = TfLiteIntArrayCreate(3);
bw_output_size_array->data[0] = (time_major) ? max_time : batch_size;
bw_output_size_array->data[1] = (time_major) ? batch_size : max_time;
bw_output_size_array->data[2] = bw_num_units;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, bw_output,
bw_output_size_array));
}
return kTfLiteOk;
}
TfLiteStatus EvalFloat(const TfLiteTensor* input, const TfLiteTensor* bw_input,
const TfLiteTensor* fw_input_weights,
const TfLiteTensor* fw_recurrent_weights,
const TfLiteTensor* fw_bias,
const TfLiteTensor* bw_input_weights,
const TfLiteTensor* bw_recurrent_weights,
const TfLiteTensor* bw_bias,
const TfLiteTensor* aux_input,
const TfLiteTensor* fw_aux_input_weights,
const TfLiteTensor* bw_aux_input_weights,
const TfLiteBidirectionalSequenceRNNParams* params,
TfLiteTensor* fw_hidden_state, TfLiteTensor* fw_output,
TfLiteTensor* bw_hidden_state, TfLiteTensor* bw_output) {
const bool time_major = params->time_major;
const int batch_size =
(time_major) ? input->dims->data[1] : input->dims->data[0];
const int max_time =
(time_major) ? input->dims->data[0] : input->dims->data[1];
const int input_size = input->dims->data[2];
const int aux_input_size = (aux_input) ? aux_input->dims->data[2] : 0;
const int fw_num_units = fw_input_weights->dims->data[0];
const float* fw_bias_ptr = GetTensorData<float>(fw_bias);
const float* fw_input_weights_ptr = GetTensorData<float>(fw_input_weights);
const float* fw_recurrent_weights_ptr =
GetTensorData<float>(fw_recurrent_weights);
const int bw_num_units = bw_input_weights->dims->data[0];
const float* bw_bias_ptr = GetTensorData<float>(bw_bias);
const float* bw_input_weights_ptr = GetTensorData<float>(bw_input_weights);
const float* bw_recurrent_weights_ptr =
GetTensorData<float>(bw_recurrent_weights);
const float* fw_aux_input_weights_ptr =
(fw_aux_input_weights != nullptr)
? GetTensorData<float>(fw_aux_input_weights)
: nullptr;
const float* bw_aux_input_weights_ptr =
(bw_aux_input_weights != nullptr)
? GetTensorData<float>(bw_aux_input_weights)
: nullptr;
const int fw_output_step =
params->merge_outputs ? fw_num_units + bw_num_units : fw_num_units;
const int bw_output_step =
params->merge_outputs ? fw_num_units + bw_num_units : bw_num_units;
if (time_major) {
float* fw_hidden_state_ptr_batch = GetTensorData<float>(fw_hidden_state);
for (int s = 0; s < max_time; s++) {
const float* input_ptr_batch =
GetTensorData<float>(input) + s * input_size * batch_size;
const float* aux_input_ptr_batch =
(aux_input != nullptr)
? GetTensorData<float>(aux_input) + s * input_size * batch_size
: nullptr;
float* output_ptr_batch =
GetTensorData<float>(fw_output) + s * fw_output_step * batch_size;
kernel_utils::RnnBatchStep(
input_ptr_batch, fw_input_weights_ptr, aux_input_ptr_batch,
fw_aux_input_weights_ptr, fw_recurrent_weights_ptr, fw_bias_ptr,
input_size, aux_input_size, fw_num_units, batch_size, fw_output_step,
params->activation, fw_hidden_state_ptr_batch, output_ptr_batch);
}
float* bw_hidden_state_ptr_batch = GetTensorData<float>(bw_hidden_state);
for (int s = max_time - 1; s >= 0; s--) {
const float* input_ptr_batch =
GetTensorData<float>(bw_input) + s * input_size * batch_size;
const float* aux_input_ptr_batch =
(aux_input != nullptr)
? GetTensorData<float>(aux_input) + s * input_size * batch_size
: nullptr;
float* output_ptr_batch =
(params->merge_outputs
? GetTensorData<float>(fw_output) + fw_num_units
: GetTensorData<float>(bw_output)) +
s * bw_output_step * batch_size;
kernel_utils::RnnBatchStep(
input_ptr_batch, bw_input_weights_ptr, aux_input_ptr_batch,
bw_aux_input_weights_ptr, bw_recurrent_weights_ptr, bw_bias_ptr,
input_size, aux_input_size, bw_num_units, batch_size, bw_output_step,
params->activation, bw_hidden_state_ptr_batch, output_ptr_batch);
}
} else {
for (int b = 0; b < batch_size; b++) {
float* fw_hidden_state_ptr_batch =
GetTensorData<float>(fw_hidden_state) + b * fw_num_units;
float* fw_output_offset =
GetTensorData<float>(fw_output) + b * fw_output_step * max_time;
for (int s = 0; s < max_time; s++) {
const float* input_ptr_batch = GetTensorData<float>(input) +
b * input_size * max_time +
s * input_size;
const float* aux_input_ptr_batch =
(aux_input != nullptr)
? GetTensorData<float>(aux_input) +
b * aux_input_size * max_time + s * aux_input_size
: nullptr;
float* output_ptr_batch = fw_output_offset + s * fw_output_step;
kernel_utils::RnnBatchStep(
input_ptr_batch, fw_input_weights_ptr, aux_input_ptr_batch,
fw_aux_input_weights_ptr, fw_recurrent_weights_ptr, fw_bias_ptr,
input_size, aux_input_size, fw_num_units, 1,
fw_output_step, params->activation, fw_hidden_state_ptr_batch,
output_ptr_batch);
}
float* bw_hidden_state_ptr_batch =
GetTensorData<float>(bw_hidden_state) + b * bw_num_units;
float* bw_output_offset =
params->merge_outputs
? GetTensorData<float>(fw_output) +
b * bw_output_step * max_time + fw_num_units
: GetTensorData<float>(bw_output) + b * bw_output_step * max_time;
for (int s = max_time - 1; s >= 0; s--) {
const float* input_ptr_batch = GetTensorData<float>(input) +
b * input_size * max_time +
s * input_size;
const float* aux_input_ptr_batch =
(aux_input != nullptr)
? GetTensorData<float>(aux_input) +
b * aux_input_size * max_time + s * aux_input_size
: nullptr;
float* output_ptr_batch = bw_output_offset + s * bw_output_step;
kernel_utils::RnnBatchStep(
input_ptr_batch, bw_input_weights_ptr, aux_input_ptr_batch,
bw_aux_input_weights_ptr, bw_recurrent_weights_ptr, bw_bias_ptr,
input_size, aux_input_size, bw_num_units, 1,
bw_output_step, params->activation, bw_hidden_state_ptr_batch,
output_ptr_batch);
}
}
}
return kTfLiteOk;
}
TfLiteStatus EvalHybrid(
const TfLiteTensor* input, const TfLiteTensor* bw_input,
const TfLiteTensor* fw_input_weights,
const TfLiteTensor* fw_recurrent_weights, const TfLiteTensor* fw_bias,
const TfLiteTensor* bw_input_weights,
const TfLiteTensor* bw_recurrent_weights, const TfLiteTensor* bw_bias,
const TfLiteTensor* aux_input, const TfLiteTensor* aux_fw_input_weights,
const TfLiteTensor* aux_bw_input_weights,
const TfLiteBidirectionalSequenceRNNParams* params,
TfLiteTensor* scaling_factors, TfLiteTensor* input_quantized,
TfLiteTensor* aux_input_quantized, TfLiteTensor* fw_hidden_state_quantized,
TfLiteTensor* fw_hidden_state, TfLiteTensor* fw_output,
TfLiteTensor* bw_hidden_state_quantized, TfLiteTensor* bw_hidden_state,
TfLiteTensor* bw_output, TfLiteTensor* zero_points,
TfLiteTensor* accum_scratch, TfLiteTensor* fw_row_sums,
TfLiteTensor* bw_row_sums, bool* fw_compute_row_sums,
bool* bw_compute_row_sums) {
const bool time_major = params->time_major;
const int batch_size =
(time_major) ? input->dims->data[1] : input->dims->data[0];
const int max_time =
(time_major) ? input->dims->data[0] : input->dims->data[1];
const int input_size = input->dims->data[2];
const int aux_input_size = (aux_input) ? aux_input->dims->data[2] : 0;
const int fw_num_units = fw_input_weights->dims->data[0];
const float* fw_bias_ptr = GetTensorData<float>(fw_bias);
const int8_t* fw_input_weights_ptr = GetTensorData<int8_t>(fw_input_weights);
float fw_input_weights_scale = fw_input_weights->params.scale;
const int8_t* fw_recurrent_weights_ptr =
GetTensorData<int8_t>(fw_recurrent_weights);
float fw_recurrent_weights_scale = fw_recurrent_weights->params.scale;
const int bw_num_units = bw_input_weights->dims->data[0];
const float* bw_bias_ptr = GetTensorData<float>(bw_bias);
const int8_t* bw_input_weights_ptr = GetTensorData<int8_t>(bw_input_weights);
float bw_input_weights_scale = bw_input_weights->params.scale;
const int8_t* bw_recurrent_weights_ptr =
GetTensorData<int8_t>(bw_recurrent_weights);
float bw_recurrent_weights_scale = bw_recurrent_weights->params.scale;
const int8_t* aux_fw_input_weights_ptr = nullptr;
float aux_fw_input_weights_scale = 0.0f;
const int8_t* aux_bw_input_weights_ptr = nullptr;
float aux_bw_input_weights_scale = 0.0f;
int8_t* aux_quantized_input_ptr = nullptr;
if (aux_input_size > 0) {
aux_fw_input_weights_ptr = GetTensorData<int8_t>(aux_fw_input_weights);
aux_fw_input_weights_scale = aux_fw_input_weights->params.scale;
aux_bw_input_weights_ptr = GetTensorData<int8_t>(aux_bw_input_weights);
aux_bw_input_weights_scale = aux_bw_input_weights->params.scale;
aux_quantized_input_ptr = GetTensorData<int8_t>(aux_input_quantized);
}
int8_t* quantized_input_ptr = GetTensorData<int8_t>(input_quantized);
int8_t* fw_quantized_hidden_state_ptr =
GetTensorData<int8_t>(fw_hidden_state_quantized);
int8_t* bw_quantized_hidden_state_ptr =
GetTensorData<int8_t>(bw_hidden_state_quantized);
float* scaling_factors_ptr = GetTensorData<float>(scaling_factors);
int32_t* accum_scratch_ptr = GetTensorData<int32_t>(accum_scratch);
int32_t* zero_points_ptr = nullptr;
int32_t* fw_row_sums_ptr = nullptr;
int32_t* bw_row_sums_ptr = nullptr;
if (params->asymmetric_quantize_inputs) {
zero_points_ptr = GetTensorData<int32_t>(zero_points);
fw_row_sums_ptr = GetTensorData<int32_t>(fw_row_sums);
bw_row_sums_ptr = GetTensorData<int32_t>(bw_row_sums);
}
const int fw_output_step =
params->merge_outputs ? fw_num_units + bw_num_units : fw_num_units;
const int bw_output_step =
params->merge_outputs ? fw_num_units + bw_num_units : bw_num_units;
if (time_major) {
for (int t = 0; t < max_time; t++) {
float* fw_hidden_state_ptr_batch = GetTensorData<float>(fw_hidden_state);
for (int s = 0; s < max_time; s++) {
const float* input_ptr_batch =
GetTensorData<float>(input) + s * input_size * batch_size;
const float* aux_input_ptr_batch =
(aux_input != nullptr)
? GetTensorData<float>(aux_input) + s * input_size * batch_size
: nullptr;
float* output_ptr_batch =
GetTensorData<float>(fw_output) + s * fw_output_step * batch_size;
kernel_utils::RnnBatchStep(
input_ptr_batch, fw_input_weights_ptr, fw_input_weights_scale,
aux_input_ptr_batch, aux_fw_input_weights_ptr,
aux_fw_input_weights_scale, fw_recurrent_weights_ptr,
fw_recurrent_weights_scale, fw_bias_ptr, input_size, aux_input_size,
fw_num_units, batch_size, fw_output_step, params->activation,
quantized_input_ptr, aux_quantized_input_ptr,
fw_quantized_hidden_state_ptr, scaling_factors_ptr,
fw_hidden_state_ptr_batch, output_ptr_batch,
params->asymmetric_quantize_inputs, zero_points_ptr,
accum_scratch_ptr, fw_row_sums_ptr, fw_compute_row_sums);
}
float* bw_hidden_state_ptr_batch = GetTensorData<float>(bw_hidden_state);
for (int s = max_time - 1; s >= 0; s--) {
const float* input_ptr_batch =
GetTensorData<float>(bw_input) + s * input_size * batch_size;
const float* aux_input_ptr_batch =
(aux_input != nullptr)
? GetTensorData<float>(aux_input) + s * input_size * batch_size
: nullptr;
float* output_ptr_batch =
(params->merge_outputs
? GetTensorData<float>(fw_output) + fw_num_units
: GetTensorData<float>(bw_output)) +
s * bw_output_step * batch_size;
kernel_utils::RnnBatchStep(
input_ptr_batch, bw_input_weights_ptr, bw_input_weights_scale,
aux_input_ptr_batch, aux_bw_input_weights_ptr,
aux_bw_input_weights_scale, bw_recurrent_weights_ptr,
bw_recurrent_weights_scale, bw_bias_ptr, input_size, aux_input_size,
bw_num_units, batch_size, bw_output_step, params->activation,
quantized_input_ptr, aux_quantized_input_ptr,
bw_quantized_hidden_state_ptr, scaling_factors_ptr,
bw_hidden_state_ptr_batch, output_ptr_batch,
params->asymmetric_quantize_inputs, zero_points_ptr,
accum_scratch_ptr, bw_row_sums_ptr, bw_compute_row_sums);
}
}
} else {
for (int b = 0; b < batch_size; b++) {
float* fw_hidden_state_ptr_batch =
GetTensorData<float>(fw_hidden_state) + b * fw_num_units;
float* fw_output_offset =
GetTensorData<float>(fw_output) + b * fw_output_step * max_time;
for (int s = 0; s < max_time; s++) {
const float* input_ptr_batch = GetTensorData<float>(input) +
b * input_size * max_time +
s * input_size;
const float* aux_input_ptr_batch =
(aux_input != nullptr)
? GetTensorData<float>(aux_input) + b * input_size * max_time +
s * input_size
: nullptr;
float* output_ptr_batch = fw_output_offset + s * fw_output_step;
kernel_utils::RnnBatchStep(
input_ptr_batch, fw_input_weights_ptr, fw_input_weights_scale,
aux_input_ptr_batch, aux_fw_input_weights_ptr,
aux_fw_input_weights_scale, fw_recurrent_weights_ptr,
fw_recurrent_weights_scale, fw_bias_ptr, input_size, aux_input_size,
fw_num_units, 1, fw_output_step, params->activation,
quantized_input_ptr, aux_quantized_input_ptr,
fw_quantized_hidden_state_ptr, scaling_factors_ptr,
fw_hidden_state_ptr_batch, output_ptr_batch,
params->asymmetric_quantize_inputs, zero_points_ptr,
accum_scratch_ptr, fw_row_sums_ptr, fw_compute_row_sums);
}
float* bw_hidden_state_ptr_batch =
GetTensorData<float>(bw_hidden_state) + b * bw_num_units;
float* bw_output_offset =
params->merge_outputs
? GetTensorData<float>(fw_output) +
b * bw_output_step * max_time + fw_num_units
: GetTensorData<float>(bw_output) + b * bw_output_step * max_time;
for (int s = max_time - 1; s >= 0; s--) {
const float* input_ptr_batch = GetTensorData<float>(input) +
b * input_size * max_time +
s * input_size;
const float* aux_input_ptr_batch =
(aux_input != nullptr)
? GetTensorData<float>(aux_input) + b * input_size * max_time +
s * input_size
: nullptr;
float* output_ptr_batch = bw_output_offset + s * bw_output_step;
kernel_utils::RnnBatchStep(
input_ptr_batch, bw_input_weights_ptr, bw_input_weights_scale,
aux_input_ptr_batch, aux_bw_input_weights_ptr,
aux_bw_input_weights_scale, bw_recurrent_weights_ptr,
bw_recurrent_weights_scale, bw_bias_ptr, input_size, aux_input_size,
bw_num_units, 1, bw_output_step, params->activation,
quantized_input_ptr, aux_quantized_input_ptr,
bw_quantized_hidden_state_ptr, scaling_factors_ptr,
bw_hidden_state_ptr_batch, output_ptr_batch,
params->asymmetric_quantize_inputs, zero_points_ptr,
accum_scratch_ptr, bw_row_sums_ptr, bw_compute_row_sums);
}
}
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const auto* params = reinterpret_cast<TfLiteBidirectionalSequenceRNNParams*>(
node->builtin_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* fw_input_weights;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kFwWeightsTensor,
&fw_input_weights));
const TfLiteTensor* fw_recurrent_weights;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFwRecurrentWeightsTensor,
&fw_recurrent_weights));
const TfLiteTensor* fw_bias;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFwBiasTensor, &fw_bias));
const TfLiteTensor* bw_input_weights;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kBwWeightsTensor,
&bw_input_weights));
const TfLiteTensor* bw_recurrent_weights;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kBwRecurrentWeightsTensor,
&bw_recurrent_weights));
const TfLiteTensor* bw_bias;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kBwBiasTensor, &bw_bias));
const TfLiteTensor* aux_input =
GetOptionalInputTensor(context, node, kAuxInputTensor);
const TfLiteTensor* fw_aux_input_weights =
GetOptionalInputTensor(context, node, kFwAuxWeightsTensor);
const TfLiteTensor* bw_aux_input_weights =
GetOptionalInputTensor(context, node, kBwAuxWeightsTensor);
TfLiteTensor* fw_hidden_state =
GetVariableInput(context, node, kFwHiddenStateTensor);
TFLITE_DCHECK(fw_hidden_state != nullptr);
TfLiteTensor* bw_hidden_state =
GetVariableInput(context, node, kBwHiddenStateTensor);
TFLITE_DCHECK(bw_hidden_state != nullptr);
TfLiteTensor* fw_output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kFwOutputTensor, &fw_output));
TfLiteTensor* bw_output = params->merge_outputs
? nullptr
: GetOutput(context, node, kBwOutputTensor);
const bool has_previous_bw_output = (aux_input != nullptr);
const bool use_aux_input = (fw_aux_input_weights != nullptr);
const bool non_stacking_mode = !use_aux_input && has_previous_bw_output;
const TfLiteTensor* bw_input = non_stacking_mode ? aux_input : input;
const TfLiteTensor* real_aux_input = non_stacking_mode ? nullptr : aux_input;
switch (fw_input_weights->type) {
case kTfLiteFloat32:
return EvalFloat(input, bw_input, fw_input_weights, fw_recurrent_weights,
fw_bias, bw_input_weights, bw_recurrent_weights, bw_bias,
real_aux_input, fw_aux_input_weights,
bw_aux_input_weights, params, fw_hidden_state, fw_output,
bw_hidden_state, bw_output);
case kTfLiteUInt8:
case kTfLiteInt8: {
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(
context,
GetTemporarySafe(context, node, kInputQuantized, &input_quantized));
TfLiteTensor* fw_hidden_state_quantized;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kFwHiddenStateQuantized,
&fw_hidden_state_quantized));
TfLiteTensor* bw_hidden_state_quantized;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kBwHiddenStateQuantized,
&bw_hidden_state_quantized));
TfLiteTensor* scaling_factors;
TF_LITE_ENSURE_OK(
context,
GetTemporarySafe(context, node, kScalingFactors, &scaling_factors));
TfLiteTensor* zero_points;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kZeroPoints, &zero_points));
TfLiteTensor* accum_scratch;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kAccumScratch,
&accum_scratch));
TfLiteTensor* fw_row_sums;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kFwRowSums, &fw_row_sums));
TfLiteTensor* bw_row_sums;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kBwRowSums, &bw_row_sums));
TfLiteTensor* aux_input_quantized =
use_aux_input ? GetTemporary(context, node, kAuxInputQuantized)
: nullptr;
auto* op_data = reinterpret_cast<OpData*>(node->user_data);
return EvalHybrid(
input, bw_input, fw_input_weights, fw_recurrent_weights, fw_bias,
bw_input_weights, bw_recurrent_weights, bw_bias, real_aux_input,
fw_aux_input_weights, bw_aux_input_weights, params, scaling_factors,
input_quantized, aux_input_quantized, fw_hidden_state_quantized,
fw_hidden_state, fw_output, bw_hidden_state_quantized,
bw_hidden_state, bw_output, zero_points, accum_scratch, fw_row_sums,
bw_row_sums, &op_data->fw_compute_row_sums,
&op_data->bw_compute_row_sums);
}
default:
TF_LITE_KERNEL_LOG(context, "Type not currently supported.");
return kTfLiteError;
}
}
}
TfLiteRegistration* Register_BIDIRECTIONAL_SEQUENCE_RNN() {
static TfLiteRegistration r = {
bidirectional_sequence_rnn::Init, bidirectional_sequence_rnn::Free,
bidirectional_sequence_rnn::Prepare, bidirectional_sequence_rnn::Eval};
return &r;
}
}
}
} | #include <algorithm>
#include <functional>
#include <initializer_list>
#include <iterator>
#include <tuple>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
enum class AuxInputMode {
kNoAuxInput,
kCrossLinking,
kNoCrossLinking,
};
using ::testing::ElementsAreArray;
static float rnn_input[] = {
0.23689353, 0.285385, 0.037029743, -0.19858193, -0.27569133,
0.43773448, 0.60379338, 0.35562468, -0.69424844, -0.93421471,
-0.87287879, 0.37144363, -0.62476718, 0.23791671, 0.40060222,
0.1356622, -0.99774903, -0.98858172, -0.38952237, -0.47685933,
0.31073618, 0.71511042, -0.63767755, -0.31729108, 0.33468103,
0.75801885, 0.30660987, -0.37354088, 0.77002847, -0.62747043,
-0.68572164, 0.0069220066, 0.65791464, 0.35130811, 0.80834007,
-0.61777675, -0.21095741, 0.41213346, 0.73784804, 0.094794154,
0.47791874, 0.86496925, -0.53376222, 0.85315156, 0.10288584,
0.86684, -0.011186242, 0.10513687, 0.87825835, 0.59929144,
0.62827742, 0.18899453, 0.31440187, 0.99059987, 0.87170351,
-0.35091716, 0.74861872, 0.17831337, 0.2755419, 0.51864719,
0.55084288, 0.58982027, -0.47443086, 0.20875752, -0.058871567,
-0.66609079, 0.59098077, 0.73017097, 0.74604273, 0.32882881,
-0.17503482, 0.22396147, 0.19379807, 0.29120302, 0.077113032,
-0.70331609, 0.15804303, -0.93407321, 0.40182066, 0.036301374,
0.66521823, 0.0300982, -0.7747041, -0.02038002, 0.020698071,
-0.90300065, 0.62870288, -0.23068321, 0.27531278, -0.095755219,
-0.712036, -0.17384434, -0.50593495, -0.18646687, -0.96508682,
0.43519354, 0.14744234, 0.62589407, 0.1653645, -0.10651493,
-0.045277178, 0.99032974, -0.88255352, -0.85147917, 0.28153265,
0.19455957, -0.55479527, -0.56042433, 0.26048636, 0.84702539,
0.47587705, -0.074295521, -0.12287641, 0.70117295, 0.90532446,
0.89782166, 0.79817224, 0.53402734, -0.33286154, 0.073485017,
-0.56172788, -0.044897556, 0.89964068, -0.067662835, 0.76863563,
0.93455386, -0.6324693, -0.083922029};
static float rnn_golden_fw_output[] = {
0.496726, 0, 0.965996, 0, 0.0584254, 0,
0, 0.12315, 0, 0, 0.612266, 0.456601,
0, 0.52286, 1.16099, 0.0291232,
0, 0, 0.524901, 0, 0, 0,
0, 1.02116, 0, 1.35762, 0, 0.356909,
0.436415, 0.0355727, 0, 0,
0, 0, 0, 0.262335, 0, 0,
0, 1.33992, 0, 2.9739, 0, 0,
1.31914, 2.66147, 0, 0,
0.942568, 0, 0, 0, 0.025507, 0,
0, 0, 0.321429, 0.569141, 1.25274, 1.57719,
0.8158, 1.21805, 0.586239, 0.25427,
1.04436, 0, 0.630725, 0, 0.133801, 0.210693,
0.363026, 0, 0.533426, 0, 1.25926, 0.722707,
0, 1.22031, 1.30117, 0.495867,
0.222187, 0, 0.72725, 0, 0.767003, 0,
0, 0.147835, 0, 0, 0, 0.608758,
0.469394, 0.00720298, 0.927537, 0,
0.856974, 0.424257, 0, 0, 0.937329, 0,
0, 0, 0.476425, 0, 0.566017, 0.418462,
0.141911, 0.996214, 1.13063, 0,
0.967899, 0, 0, 0, 0.0831304, 0,
0, 1.00378, 0, 0, 0, 1.44818,
1.01768, 0.943891, 0.502745, 0,
0.940135, 0, 0, 0, 0, 0,
0, 2.13243, 0, 0.71208, 0.123918, 1.53907,
1.30225, 1.59644, 0.70222, 0,
0.804329, 0, 0.430576, 0, 0.505872, 0.509603,
0.343448, 0, 0.107756, 0.614544, 1.44549, 1.52311,
0.0454298, 0.300267, 0.562784, 0.395095,
0.228154, 0, 0.675323, 0, 1.70536, 0.766217,
0, 0, 0, 0.735363, 0.0759267, 1.91017,
0.941888, 0, 0, 0,
0, 0, 1.5909, 0, 0, 0,
0, 0.5755, 0, 0.184687, 0, 1.56296,
0.625285, 0, 0, 0,
0, 0, 0.0857888, 0, 0, 0,
0, 0.488383, 0.252786, 0, 0, 0,
1.02817, 1.85665, 0, 0,
0.00981836, 0, 1.06371, 0, 0, 0,
0, 0, 0, 0.290445, 0.316406, 0,
0.304161, 1.25079, 0.0707152, 0,
0.986264, 0.309201, 0, 0, 0, 0,
0, 1.64896, 0.346248, 0, 0.918175, 0.78884,
0.524981, 1.92076, 2.07013, 0.333244,
0.415153, 0.210318, 0, 0, 0, 0,
0, 2.02616, 0, 0.728256, 0.84183, 0.0907453,
0.628881, 3.58099, 1.49974, 0};
static float rnn_golden_bw_output[] = {
0.496726, 0, 1.00883, 0, 0.0584256, 0, 0,
0.236412, 0, 0, 0.612267, 0.487726, 0, 0.54883,
1.16099, 0.0291233, 0, 0, 0.428302, 0, 0,
0, 0, 1.13262, 0, 1.64415, 0, 0.311249,
0.570804, 0.259696, 0, 0, 0, 0, 0,
0.262334, 0, 0, 0, 1.23781, 0, 2.86532,
0, 0, 1.34389, 2.76409, 0, 0, 1.03969,
0, 0.00410865, 0, 0.0470295, 0, 0, 0,
0.371556, 0.27175, 1.36614, 1.63956, 0.683887, 1.06176, 0.719552,
0.301314, 0.971195, 0, 0.697143, 0, 0.215219, 0.210693,
0.363027, 0, 0.501283, 0, 1.13399, 0.623774, 0,
1.09851, 1.33313, 0.470441, 0.210965, 0, 0.664178, 0,
0.839686, 0, 0, 0.147834, 0, 0, 0,
0.58786, 0.490128, 0, 0.905806, 0, 0.932134, 0.424257,
0, 0, 0.860629, 0, 0, 0, 0.476425,
0, 0.566017, 0.513721, 0.207341, 1.09508, 1.08385, 0,
0.973787, 0, 0, 0, 0, 0, 0,
1.20698, 0, 0, 0, 1.56135, 1.12369, 0.99588,
0.459803, 0, 0.915854, 0, 0, 0, 0,
0, 0, 2.03206, 0, 0.773264, 0.267228, 1.55012,
1.202, 1.51611, 0.701202, 0, 0.725088, 0, 0.509069,
0, 0.671349, 0.581129, 0.343447, 0, 0.107755, 0.611838,
1.4331, 1.55871, 0.015242, 0.140624, 0.492562, 0.395095, 0.147722,
0, 0.784925, 0, 1.65477, 0.715257, 0, 0,
0, 0.685024, 0, 1.89505, 1.00037, 0, 0,
0, 0, 0, 1.52659, 0, 0, 0,
0, 0.618583, 0, 0.11115, 0, 1.37194, 0.630225,
0, 0, 0, 0, 0, 0.0322124, 0,
0, 0, 0, 0.430834, 0.252786, 0, 0,
0, 0.991297, 1.98451, 0, 0, 0.111511, 0,
1.05513, 0, 0, 0, 0, 0, 0,
0.290445, 0.412559, 0.0429958, 0.256564, 1.27858, 0.289948, 0,
1.01693, 0.327141, 0, 0, 0, 0, 0,
1.83508, 0.346248, 0, 0.961535, 0.790026, 0.552203, 2.13457,
2.19233, 0.333244, 0.316526, 0.179398, 0, 0, 0,
0, 0, 1.86126, 0, 0.728256, 0.750013, 0.011861,
0.576383, 3.38891, 1.29273, 0};
const std::initializer_list<float> weights = {
0.461459, 0.153381, 0.529743, -0.00371218, 0.676267, -0.211346,
0.317493, 0.969689, -0.343251, 0.186423, 0.398151, 0.152399,
0.448504, 0.317662, 0.523556, -0.323514, 0.480877, 0.333113,
-0.757714, -0.674487, -0.643585, 0.217766, -0.0251462, 0.79512,
-0.595574, -0.422444, 0.371572, -0.452178, -0.556069, -0.482188,
-0.685456, -0.727851, 0.841829, 0.551535, -0.232336, 0.729158,
-0.00294906, -0.69754, 0.766073, -0.178424, 0.369513, -0.423241,
0.548547, -0.0152023, -0.757482, -0.85491, 0.251331, -0.989183,
0.306261, -0.340716, 0.886103, -0.0726757, -0.723523, -0.784303,
0.0354295, 0.566564, -0.485469, -0.620498, 0.832546, 0.697884,
-0.279115, 0.294415, -0.584313, 0.548772, 0.0648819, 0.968726,
0.723834, -0.0080452, -0.350386, -0.272803, 0.115121, -0.412644,
-0.824713, -0.992843, -0.592904, -0.417893, 0.863791, -0.423461,
-0.147601, -0.770664, -0.479006, 0.654782, 0.587314, -0.639158,
0.816969, -0.337228, 0.659878, 0.73107, 0.754768, -0.337042,
0.0960841, 0.368357, 0.244191, -0.817703, -0.211223, 0.442012,
0.37225, -0.623598, -0.405423, 0.455101, 0.673656, -0.145345,
-0.511346, -0.901675, -0.81252, -0.127006, 0.809865, -0.721884,
0.636255, 0.868989, -0.347973, -0.10179, -0.777449, 0.917274,
0.819286, 0.206218, -0.00785118, 0.167141, 0.45872, 0.972934,
-0.276798, 0.837861, 0.747958, -0.0151566, -0.330057, -0.469077,
0.277308, 0.415818};
static float endtoend_input[] = {
0.996808, 0.060710, 0.981855, 0.570017, 0.525164, 0.796859, 0.696547,
0.505925, 0.991844, 0.461208, 0.949371, 0.027624, 0.539236, 0.841854,
0.915222, 0.538569, 0.069375, 0.237905, 0.903700, 0.441703, 0.536196,
0.402724, 0.761635, 0.025063, 0.082592, 0.688245, 0.239310, 0.256931,
0.658900, 0.105695, 0.301983, 0.655708, 0.166405, 0.283837, 0.225725,
0.691569, 0.080696, 0.922272, 0.197494, 0.072540, 0.383481, 0.146865,
0.100163, 0.922717, 0.988720, 0.015386, 0.461286, 0.058095, 0.253290,
0.364986, 0.499797, 0.789487, 0.767709, 0.261433, 0.814549, 0.850302,
0.949678, 0.053859, 0.107233, 0.608577, 0.159554, 0.409215, 0.264285,
0.325960, 0.693053, 0.490011, 0.017529, 0.773749, 0.412283, 0.215023,
0.846288, 0.795764, 0.361889, 0.946452, 0.718481, 0.350608, 0.961837,
0.179767, 0.408703, 0.215128, 0.544753, 0.908500, 0.004614, 0.312462,
0.169933, 0.819163, 0.162764, 0.119611, 0.873022, 0.269997, 0.728188,
0.032576, 0.679212, 0.992474, 0.358536, 0.372265, 0.482484, 0.376065,
0.146014, 0.894767, 0.591088, 0.992302, 0.690531, 0.952977, 0.938754,
0.409012, 0.303585, 0.900591, 0.588780, 0.712287, 0.115719, 0.133533,
0.620788, 0.120334, 0.445995, 0.790720, 0.939497, 0.608759, 0.910331,
0.812519, 0.878756, 0.638519, 0.845096, 0.557968, 0.630993, 0.203632,
0.930233, 0.113477, 0.579697, 0.076247, 0.008244, 0.170785, 0.068549,
0.698776, 0.123761, 0.007303, 0.107788, 0.427346, 0.907894, 0.696568,
0.139633, 0.023613, 0.830100, 0.760421, 0.143947, 0.276096, 0.551141,
0.083444, 0.884855, 0.461472, 0.895963, 0.763611, 0.099992, 0.741059,
0.321579, 0.730984, 0.944691, 0.251812, 0.844461, 0.524388, 0.328059,
0.852706, 0.695172, 0.396607, 0.551482, 0.818934, 0.403910, 0.659270,
0.246280, 0.311804, 0.355838, 0.385913, 0.335418, 0.185938, 0.146334,
0.479364, 0.462034, 0.697475, 0.562808, 0.346888, 0.158948, 0.458771,
0.110499, 0.258939, 0.199830, 0.432078, 0.989924, 0.144521, 0.683890,
0.834385, 0.668908, 0.011949, 0.687091, 0.364081, 0.408556, 0.238572,
0.183015, 0.812466, 0.897842, 0.429294, 0.124271, 0.253680, 0.815207,
0.459688, 0.439618, 0.961541, 0.939053, 0.901651, 0.659016, 0.501861,
0.248539, 0.817964, 0.960632, 0.359038, 0.076903, 0.160462, 0.791117,
0.066826, 0.304983, 0.475007, 0.901211, 0.973891, 0.486955, 0.588302,
0.337972, 0.895512, 0.826874, 0.520987, 0.707978, 0.724716, 0.950281,
0.832249, 0.978396, 0.765488, 0.291937, 0.418014, 0.727029, 0.230990,
0.319665, 0.386045, 0.732850, 0.568204, 0.204009, 0.693482, 0.927242,
0.280912, 0.853944, 0.718359, 0.347738, 0.158927, 0.193366, 0.248950,
0.132818, 0.680321, 0.837252, 0.470790, 0.575833, 0.664126, 0.991777,
0.283811, 0.388843, 0.942058, 0.116060, 0.367239, 0.707546, 0.407997,
0.785253, 0.434575, 0.638986, 0.104917, 0.820620, 0.371837, 0.673121,
0.024629, 0.065319, 0.600363, 0.305541, 0.919263, 0.318722, 0.653279,
0.078190, 0.512088, 0.902229, 0.211009, 0.192409, 0.739480, 0.681799,
0.768242, 0.403607, 0.673576, 0.052052, 0.792450, 0.615634, 0.168112,
0.159689, 0.323180, 0.576109, 0.944941, 0.757755, 0.215095, 0.049858,
0.578375, 0.586932, 0.722979, 0.603003, 0.652251, 0.323343, 0.908544,
0.571514, 0.642065, 0.561823, 0.649704, 0.154153, 0.464051, 0.860713,
0.346562, 0.203532, 0.542512, 0.114804, 0.607139, 0.216088, 0.166856,
0.399588, 0.831722, 0.334968, 0.559277, 0.154902, 0.911077, 0.504218,
0.912656, 0.126172, 0.554076, 0.491031, 0.713104, 0.277055, 0.094034,
0.365355, 0.600398, 0.002578, 0.936869, 0.242463, 0.564401, 0.586574,
0.396616, 0.028452, 0.447287, 0.743178, 0.231984, 0.989799, 0.857982,
0.839122, 0.205887, 0.024838, 0.238711, 0.037608, 0.359806, 0.797987,
0.192510, 0.270883, 0.302205, 0.105166, 0.397055, 0.856281, 0.596197,
0.110160, 0.133336, 0.690231, 0.475515, 0.733734, 0.692809, 0.412384,
0.976196, 0.257209, 0.998958, 0.372812, 0.285661, 0.446245, 0.115990,
0.517645, 0.436044, 0.973972, 0.356767, 0.641930, 0.998810, 0.595478,
0.679539, 0.358617, 0.393465, 0.872049, 0.629500, 0.695670, 0.977215,
0.026555, 0.551951, 0.573412, 0.136715, 0.685287, 0.263643, 0.612229,
0.419020, 0.956451, 0.024613, 0.395216, 0.213661, 0.023572, 0.768029,
0.499322, 0.469816, 0.884019, 0.016967, 0.905860, 0.857991, 0.373734,
0.547791, 0.856802, 0.969211, 0.227330, 0.215418, 0.362676, 0.099378,
0.844918, 0.058346, 0.076594, 0.871473, 0.610297, 0.650006, 0.008188,
0.295583, 0.913648, 0.620417, 0.714603, 0.870100, 0.645031, 0.109820,
0.083760, 0.668602, 0.877849, 0.583082, 0.138419, 0.761868, 0.600049,
0.044279, 0.619859, 0.973783, 0.592069, 0.476661, 0.942994, 0.819399,
0.692079, 0.305670, 0.918778, 0.536997, 0.364016, 0.995371, 0.408470,
0.974313, 0.645377, 0.416658, 0.269896, 0.559025, 0.037075, 0.984499,
0.429125, 0.682105, 0.094319, 0.512885, 0.350707, 0.972168, 0.095967,
0.489126, 0.734035, 0.696016, 0.533405, 0.353894, 0.669799, 0.125474,
0.830555, 0.612793, 0.944873, 0.522634, 0.918463, 0.863651, 0.059631,
0.282479, 0.859022, 0.468101, 0.256791, 0.504398, 0.884758, 0.526687,
0.063423, 0.921833, 0.511186, 0.492548, 0.603939, 0.605505, 0.005433,
0.954646, 0.577673, 0.101400, 0.443772, 0.311708, 0.797417, 0.977176,
0.665602, 0.467216, 0.102650, 0.496157, 0.080009, 0.047524, 0.018791,
0.998471, 0.911174, 0.078422, 0.280950, 0.770196, 0.546523, 0.537741,
0.274594, 0.431281, 0.064428, 0.338017, 0.353115, 0.575615, 0.830565,
0.957053, 0.181120, 0.835998, 0.911699, 0.758793, 0.937398, 0.355471,
0.070501, 0.734815, 0.332647, 0.736103, 0.202031, 0.435297, 0.232261,
0.282039, 0.482821, 0.251052, 0.280511, 0.393995, 0.329474, 0.561460,
0.164191, 0.875997, 0.099202, 0.438785, 0.307278, 0.163630, 0.776802,
0.660393, 0.739244, 0.607367, 0.617446, 0.920364, 0.443365, 0.529145,
0.679157, 0.380763, 0.884616, 0.749658, 0.115578, 0.217263, 0.485761,
0.317609, 0.652560, 0.718021, 0.599648, 0.135381, 0.969073, 0.880159,
0.529376, 0.298547, 0.441619, 0.693567, 0.174544, 0.540821, 0.132351,
0.481822, 0.704450, 0.909153, 0.142215, 0.443695, 0.516520, 0.759661,
0.364059, 0.959885, 0.288806, 0.043216, 0.340648, 0.173422, 0.792874,
0.456226, 0.390685, 0.278634, 0.773834, 0.043245, 0.996656, 0.373483,
0.178625, 0.965729, 0.253641, 0.708001, 0.264276, 0.695260, 0.401568,
0.438820, 0.236081, 0.533919, 0.920642, 0.940531, 0.443072, 0.062857,
0.384226, 0.959592, 0.822518, 0.748285, 0.919477, 0.111325, 0.791501,
0.260124, 0.284747, 0.584375, 0.716350, 0.675431, 0.863009, 0.490184,
0.718676, 0.859665, 0.863666, 0.897301, 0.825393, 0.117308, 0.605302,
0.089669, 0.812568, 0.006870, 0.528489, 0.048649, 0.540788, 0.449131,
0.989180, 0.983860, 0.511988, 0.373407, 0.943452, 0.334506, 0.121692,
0.862929, 0.445831, 0.913193, 0.123053, 0.730578, 0.497568, 0.839402,
0.406009, 0.360577, 0.329586, 0.124685, 0.220241, 0.193253, 0.021986,
0.045634, 0.310560, 0.627288, 0.135303, 0.123128, 0.634158, 0.663792,
0.171777, 0.174946, 0.112923, 0.160958, 0.158806, 0.624911, 0.534364,
0.102259, 0.959418, 0.656056, 0.965187, 0.405249, 0.569249, 0.088240,
0.135827, 0.066817, 0.927642, 0.541836, 0.427393, 0.257229, 0.666520,
0.647634, 0.450481, 0.688506, 0.693269, 0.761042, 0.315794, 0.828572,
0.884170, 0.949952, 0.492364, 0.055947, 0.124898, 0.605288, 0.216905,
0.283705, 0.230199, 0.751269, 0.385963, 0.189616, 0.407326, 0.351151,
0.594865, 0.976575, 0.439391, 0.730692, 0.043392, 0.367033, 0.272527,
0.470785, 0.624261, 0.939048, 0.118419, 0.074743, 0.627554, 0.811688,
0.835784, 0.943348, 0.640260, 0.719954, 0.893300, 0.132625, 0.775901,
0.018199, 0.737913, 0.992806, 0.301903, 0.968111, 0.744076, 0.687867,
0.157728, 0.151401, 0.039017, 0.752593, 0.127976, 0.478408, 0.483284,
0.171368, 0.845441, 0.755811, 0.642153, 0.469702, 0.694859, 0.760572,
0.544445, 0.322413, 0.572260, 0.380229, 0.265761, 0.212521, 0.100183,
0.159062, 0.345146, 0.876084, 0.177261, 0.083058, 0.868891, 0.479164,
0.051169, 0.612966, 0.167030, 0.208897, 0.764367, 0.206048, 0.961490,
0.892343, 0.684456, 0.444774, 0.063711, 0.529896, 0.200585, 0.705863,
0.999598, 0.895444, 0.466435, 0.544043, 0.217857, 0.038696, 0.924272,
0.483618, 0.251217, 0.024455, 0.642680, 0.596362, 0.900539, 0.819941,
0.679420, 0.769430, 0.299105, 0.730590, 0.382396, 0.466135, 0.939487,
0.146763, 0.672183, 0.900977, 0.039106, 0.356638, 0.345750, 0.102817,
0.886535, 0.546336, 0.808681, 0.886133, 0.441780, 0.275116, 0.430176,
0.659637, 0.313812, 0.354448, 0.143255, 0.565028, 0.378903, 0.785935,
0.161391, 0.279443, 0.605876, 0.840811, 0.048873, 0.904980, 0.571401,
0.431269, 0.371115, 0.510887, 0.578032, 0.043298, 0.411864, 0.617138,
0.399936, 0.757614, 0.719955, 0.286471, 0.303950, 0.528636, 0.172604,
0.745730, 0.803752, 0.602780, 0.405367, 0.117564, 0.957228, 0.548622,
0.682592, 0.336131, 0.334557, 0.843983, 0.615574, 0.940433, 0.684794,
0.664447, 0.845413, 0.256194, 0.095715, 0.216529, 0.767082, 0.673747,
0.259827, 0.178946, 0.290885, 0.659763, 0.936560, 0.010840, 0.946234,
0.240510, 0.539476, 0.118838, 0.986240, 0.343228, 0.721618, 0.391606,
0.460792, 0.678846, 0.940228, 0.143384, 0.014977, 0.274785, 0.987367,
0.630551, 0.215218, 0.672161, 0.294998, 0.060631, 0.928355, 0.390713,
0.277160, 0.695436, 0.064460, 0.536987, 0.874382, 0.355345, 0.196751,
0.810942, 0.366185, 0.142985, 0.051452, 0.905661, 0.261823, 0.037691,
0.248889, 0.983441, 0.429297, 0.709681, 0.662286, 0.369525, 0.853066,
0.677263, 0.644310, 0.840433, 0.307814, 0.859528, 0.512593, 0.602812,
0.920160, 0.440948, 0.993525, 0.197320, 0.136384, 0.057984, 0.734307,
0.010766, 0.413329, 0.931058, 0.821707, 0.779514, 0.074043, 0.873159,
0.685175, 0.335865, 0.910850, 0.934065, 0.319306, 0.340147, 0.643746,
0.981592, 0.709673, 0.496812, 0.658856, 0.353983, 0.337245, 0.966670,
0.213511, 0.849838, 0.569482, 0.133671, 0.290786, 0.563007, 0.330991,
0.427170, 0.620991, 0.065299, 0.437936, 0.034320, 0.996356, 0.259643,
0.813834, 0.070399, 0.132802, 0.499009, 0.406265, 0.043652, 0.433074,
0.725570, 0.383800, 0.076820, 0.707163, 0.093473, 0.573632, 0.366018,
0.447456, 0.910877, 0.332688, 0.660967, 0.760714, 0.902170, 0.794638,
0.051500, 0.465177, 0.125630, 0.478670, 0.086168, 0.190928, 0.916605,
0.120488, 0.187285, 0.176248, 0.934322, 0.257684, 0.309050, 0.433331,
0.663949, 0.352703, 0.866405, 0.389519, 0.736502, 0.943226, 0.096682,
0.829975, 0.516858, 0.462700, 0.277430, 0.427734, 0.795388, 0.938398,
0.188449, 0.697558, 0.733036, 0.239948, 0.162735, 0.858666, 0.718618,
0.248903, 0.049594, 0.635223, 0.369391, 0.236879, 0.811472, 0.303713,
0.494563, 0.120522, 0.737044, 0.158511, 0.473225, 0.603450, 0.548030,
0.209727, 0.546675, 0.644712, 0.039702, 0.063533, 0.107412, 0.317132,
0.491267, 0.902800, 0.255530, 0.679716, 0.600359, 0.988566, 0.919664,
0.763094, 0.847232, 0.638283, 0.011997, 0.896825, 0.273506, 0.381388,
0.133704, 0.084978, 0.685101, 0.628267, 0.205500, 0.422145, 0.786778,
0.678725, 0.025595, 0.334808, 0.888452, 0.572271, 0.979520, 0.928154,
0.635804, 0.086932, 0.245286, 0.127071, 0.989732, 0.500816, 0.806787,
0.590091, 0.489382, 0.726451, 0.353185, 0.336614, 0.364734, 0.365182,
0.233439, 0.638240, 0.746570, 0.367143, 0.723218, 0.431671, 0.995410,
0.928718, 0.853816, 0.782188, 0.607442, 0.879411, 0.116995, 0.495894,
0.451682, 0.096515, 0.424048, 0.087485, 0.183447, 0.669334, 0.214556,
0.173179, 0.170151, 0.021343, 0.763269, 0.659533, 0.747794, 0.116454,
0.996147, 0.112528, 0.481635, 0.229586, 0.750768, 0.228205, 0.596730,
0.473985, 0.659876, 0.592139, 0.402703, 0.513692, 0.374327, 0.010145,
0.393103, 0.491322, 0.506039, 0.844785, 0.587837, 0.930088, 0.932270,
0.771284, 0.599422, 0.146826, 0.944463, 0.769573, 0.168169, 0.707732,
0.429106, 0.915964, 0.824186, 0.425253, 0.028492, 0.305821, 0.654839,
0.779259, 0.534026, 0.251569, 0.253245, 0.193901, 0.843708, 0.655947,
0.707593, 0.218035, 0.666093, 0.100696, 0.709357, 0.172132, 0.945481,
0.297195, 0.102220, 0.877751, 0.068479, 0.701642, 0.024577, 0.012941,
0.471215, 0.192747, 0.720673, 0.900321, 0.108710, 0.544859, 0.325574,
0.137202, 0.850679, 0.980413, 0.916462, 0.384705, 0.231982, 0.169706,
0.578607, 0.075690, 0.825654, 0.286200, 0.293725, 0.491746, 0.386896,
0.003083, 0.663878, 0.332377, 0.300278, 0.766098, 0.210128, 0.368756,
0.467740, 0.234705, 0.381697, 0.938955, 0.427451, 0.102370, 0.839275,
0.536162, 0.647229, 0.164849, 0.673364, 0.497908, 0.145262, 0.589825,
0.882613, 0.377244, 0.759532, 0.461220, 0.452934, 0.585185, 0.747420,
0.746660, 0.076932, 0.134316, 0.749743, 0.740810, 0.466692, 0.050020,
0.506908, 0.676820, 0.418776, 0.974648, 0.911525, 0.800474, 0.913602,
0.338976, 0.902844, 0.752878, 0.875138, 0.550072, 0.917727, 0.548502,
0.047981, 0.062989, 0.138327, 0.930594, 0.440233, 0.897859, 0.391814,
0.893168, 0.483044, 0.139234, 0.639828, 0.559975, 0.273549, 0.389570,
0.300785, 0.740242, 0.439590, 0.807693, 0.417062, 0.858367, 0.782341,
0.328586, 0.658840, 0.695943, 0.667562, 0.561684, 0.448821, 0.542700,
0.111756, 0.366548, 0.091202, 0.159737, 0.429537, 0.229529, 0.090331,
0.869770, 0.127388, 0.482145, 0.762938, 0.610432, 0.621379, 0.402765,
0.170407, 0.894928, 0.792336, 0.471192, 0.635170, 0.231926, 0.278886,
0.052232, 0.090293, 0.061226, 0.380818, 0.749133, 0.757170, 0.048380,
0.310817, 0.205990, 0.591080, 0.422573, 0.572538, 0.682282, 0.582310,
0.002075, 0.911812, 0.672641, 0.871845, 0.039199, 0.154786, 0.634783,
0.649631, 0.776165, 0.037548, 0.820038, 0.671093, 0.829884, 0.291231,
0.306263, 0.061810, 0.570116, 0.358495, 0.152103, 0.631343, 0.739313,
0.901236, 0.388512, 0.787693, 0.212053, 0.594503, 0.378773, 0.634626,
0.167040, 0.061056, 0.216937, 0.169115, 0.972867, 0.889578, 0.040960,
0.012067, 0.044364, 0.675743, 0.661698, 0.820529, 0.713291, 0.481736,
0.491623, 0.543175, 0.772966, 0.797886, 0.604985, 0.343083, 0.156380,
0.757088, 0.974425, 0.895693, 0.658324, 0.362938, 0.683386, 0.870376,
0.957440, 0.062159, 0.505002, 0.124481, 0.123215, 0.721939, 0.293596,
0.096082, 0.611517, 0.334556, 0.108149, 0.655881, 0.010299, 0.769846,
0.476411, 0.723590, 0.251582, 0.968033, 0.266765, 0.024548, 0.765919,
0.871750, 0.367631, 0.922299, 0.628838, 0.342056, 0.817992, 0.287162,
0.704994, 0.501378, 0.157538, 0.662434, 0.563537, 0.662541, 0.786915,
0.686752, 0.384480, 0.080511, 0.782834, 0.995997, 0.415067, 0.890983,
0.651878, 0.425365, 0.660829, 0.128289, 0.148956, 0.912411, 0.096322,
0.415721, 0.936959, 0.862241, 0.287471, 0.304590, 0.784540, 0.916309,
0.646646, 0.602533, 0.203471, 0.351640, 0.103911, 0.361009, 0.014074,
0.667448, 0.023550, 0.800989, 0.354200, 0.408030, 0.881500, 0.137034,
0.404026, 0.296566, 0.028017, 0.055904, 0.721932, 0.688846, 0.184193,
0.870887, 0.601257, 0.280515, 0.286608, 0.538216, 0.142755, 0.574079,
0.842806, 0.927296, 0.490388, 0.489452, 0.529828, 0.693859, 0.841092,
0.633739, 0.054869, 0.855167, 0.301187, 0.078419, 0.656156, 0.655388,
0.486448, 0.537656, 0.792422, 0.890475, 0.834222, 0.820439, 0.946379,
0.556153, 0.509285, 0.130571, 0.427041, 0.110542, 0.411086, 0.713648,
0.648758, 0.553842, 0.287727, 0.491563, 0.481137, 0.778116, 0.981015,
0.010966, 0.471975, 0.822107, 0.644705, 0.526844, 0.677274, 0.945892,
0.605263, 0.333430, 0.601280, 0.091711, 0.871086, 0.393702, 0.982186,
0.705307, 0.214141, 0.928564, 0.261461, 0.723426, 0.059136, 0.688501,
0.833968, 0.470222, 0.402150, 0.482725, 0.024063, 0.689877, 0.974289,
0.505201, 0.467993, 0.955304, 0.516166, 0.939968, 0.777411, 0.160871,
0.466812, 0.454685, 0.106763, 0.072075, 0.788115, 0.708043, 0.163786,
0.659201, 0.101744, 0.145971, 0.364508, 0.315885, 0.074536, 0.625969,
0.039311, 0.133672, 0.314471, 0.873279, 0.603893, 0.716620, 0.356004,
0.627957, 0.406498, 0.330292, 0.133157, 0.874490, 0.285596, 0.649324,
0.814458, 0.063007, 0.810195, 0.281270, 0.517693, 0.916958, 0.353345,
0.305808, 0.625000, 0.517131, 0.965009, 0.726745, 0.663102, 0.329518,
0.042630, 0.737638, 0.955487, 0.081940, 0.871310, 0.269957, 0.955219,
0.475203, 0.986578, 0.311223, 0.103160, 0.393075, 0.641515, 0.236317,
0.267566, 0.927112, 0.885641, 0.082024, 0.990119, 0.695835, 0.363295,
0.507812, 0.612793, 0.716640, 0.813620, 0.237793, 0.233770, 0.778629,
0.964538, 0.896872, 0.108147, 0.007167, 0.634510, 0.063633, 0.089108,
0.505820, 0.333591, 0.044327, 0.981023, 0.320168, 0.355550, 0.084182,
0.713244, 0.997065, 0.320499, 0.980810, 0.924177, 0.206140, 0.062834,
0.914296, 0.901975, 0.426129, 0.422107, 0.514768, 0.142768, 0.235727,
0.752561, 0.376539, 0.014356, 0.717099, 0.273411, 0.122502, 0.724266,
0.907921, 0.186136, 0.813374, 0.413741, 0.519726, 0.857701, 0.394764,
0.839895, 0.213251, 0.478946, 0.553139, 0.210317, 0.799446, 0.533948,
0.134493, 0.005586, 0.596782, 0.048789, 0.907561, 0.022911, 0.470896,
0.422329, 0.165679, 0.706623, 0.174890, 0.542218, 0.720979, 0.891989,
0.815629, 0.843481, 0.616255, 0.723551, 0.029617, 0.429630, 0.137292,
0.549343, 0.287331, 0.532056, 0.389238, 0.500583, 0.011002, 0.942377,
0.710899, 0.810448, 0.476326, 0.845392, 0.816033, 0.073108, 0.894181,
0.723594, 0.096019, 0.365077, 0.145923, 0.261699, 0.071700, 0.320813,
0.803917, 0.792679, 0.212802, 0.619546, 0.636160, 0.829057, 0.343096,
0.665777, 0.258687, 0.480388, 0.215121, 0.546018, 0.012444, 0.604359,
0.046601, 0.023446, 0.546736, 0.757500, 0.833893, 0.023062, 0.602892,
0.649927, 0.096170, 0.497074, 0.373521, 0.192189, 0.862151, 0.519444,
0.453887, 0.933851, 0.840257, 0.257804, 0.726531, 0.053058, 0.877350,
0.362691, 0.882115, 0.220446, 0.028468, 0.140802, 0.700834, 0.243589,
0.686821, 0.713278, 0.847948, 0.733421, 0.736723, 0.394684, 0.490921,
0.570617, 0.417746, 0.093813, 0.220543, 0.513916, 0.590887, 0.594064,
0.706105, 0.453038, 0.113508, 0.159992, 0.386889, 0.953765, 0.417796,
0.113420, 0.006823, 0.295146, 0.476111, 0.888938, 0.515592, 0.504579,
0.029741, 0.216426, 0.748168, 0.716561, 0.929703, 0.596117, 0.449982,
0.666427, 0.990801, 0.940903, 0.237043, 0.408547, 0.034717, 0.457587,
0.922463, 0.625603, 0.051651, 0.628568, 0.078641, 0.165159, 0.788560,
0.465530, 0.118923, 0.206356, 0.578950, 0.125746, 0.501502, 0.055060,
0.014685, 0.017094, 0.559640, 0.044425, 0.233519, 0.307808, 0.760986,
0.163223, 0.903925, 0.210969, 0.829650, 0.894726, 0.151872, 0.066693,
0.303273, 0.186589, 0.524279, 0.225736, 0.812192, 0.575930, 0.854304,
0.890833, 0.741089, 0.642864, 0.356363, 0.860012, 0.849220, 0.935313,
0.985758, 0.350722, 0.990373, 0.000443, 0.367815, 0.550013, 0.044868,
0.601335, 0.857820, 0.805855, 0.764557, 0.761745, 0.016823, 0.594207,
0.656471, 0.168696, 0.660900, 0.959744, 0.355284, 0.185179, 0.185480,
0.167477, 0.761110, 0.039784, 0.058310, 0.502199, 0.682648, 0.414673,
0.362211, 0.531868, 0.349985, 0.347969, 0.882589, 0.340358, 0.348412,
0.250404, 0.890371, 0.393280, 0.851739, 0.748191, 0.199135, 0.616297,
0.509936, 0.215958, 0.210504, 0.166407, 0.384654, 0.871404, 0.126151,
0.739938, 0.056583, 0.311631, 0.907415, 0.817693, 0.351415, 0.965724,
0.319891, 0.034062, 0.380397, 0.682102, 0.565930, 0.730382, 0.030072,
0.448519, 0.070741, 0.378484, 0.698924, 0.961112, 0.771764, 0.550663,
0.709303, 0.970899, 0.166959, 0.219239, 0.186857, 0.377463, 0.385647,
0.571511, 0.248867, 0.511798, 0.311449, 0.305450, 0.823429, 0.218864,
0.123142, 0.174844, 0.184588, 0.443034, 0.208906, 0.564986, 0.125136,
0.774836, 0.295368, 0.155207, 0.223355, 0.366109, 0.533691, 0.922279,
0.327221, 0.305455, 0.472942, 0.036524, 0.276354, 0.639901, 0.255763,
0.463211, 0.017364, 0.641410, 0.034722, 0.266231, 0.153207, 0.346171,
0.571680, 0.976636, 0.565036, 0.694822, 0.151480, 0.749624, 0.137856,
0.360386, 0.314610, 0.262992, 0.135222, 0.609978, 0.418200, 0.358578,
0.976087, 0.951891, 0.280856, 0.303307, 0.257346, 0.753798, 0.339831,
0.533700, 0.393699, 0.595594, 0.996911, 0.411063, 0.237003, 0.031634,
0.677294, 0.390211, 0.377805, 0.248974, 0.366847, 0.942841, 0.943796,
0.518327, 0.692465, 0.081653, 0.878713, 0.007074, 0.344645, 0.013936,
0.617052, 0.762845, 0.372513, 0.593138, 0.714736, 0.653370, 0.896446,
0.972082, 0.407168, 0.236276, 0.505782, 0.800867, 0.831870, 0.502693,
0.211930, 0.068873, 0.534327, 0.889224, 0.459084, 0.912132, 0.138197,
0.825931, 0.854972, 0.081994, 0.344259, 0.547437, 0.163646, 0.222972,
0.554511, 0.508291, 0.236908, 0.171563, 0.271135, 0.609421, 0.764701,
0.985871, 0.262790, 0.661147, 0.957953, 0.669958, 0.897423, 0.463734,
0.470825, 0.729293, 0.966427, 0.682755, 0.798166, 0.500754, 0.571978,
0.257251, 0.412886, 0.710176, 0.083182, 0.267858, 0.792169, 0.427441,
0.815295, 0.955815, 0.650413, 0.369805, 0.464106, 0.887320, 0.541368,
0.735242, 0.496741, 0.306069, 0.721113, 0.759531, 0.967216, 0.679065,
0.429489, 0.864639, 0.142799, 0.900314, 0.593932, 0.109227, 0.583069,
0.392098, 0.609981, 0.155047, 0.649349, 0.022867, 0.865222, 0.732531,
0.290725, 0.657392, 0.159972, 0.106019, 0.613207, 0.810384, 0.475824,
0.077313, 0.697704, 0.017192, 0.812555};
static float golden_endtoend_output[] = {
-1.881211, -0.028385, -3.585066, 1.939770, -3.461155, 1.280415, -4.408978,
0.608663, -2.704937, 1.859742, -5.777429, 2.691839, -1.049012, 1.640870,
-4.856245, 1.604236, 0.992707, 0.422858, -4.307465, 1.887332, -0.884831,
-0.154277, -2.634801, 0.586827, -1.849960, 1.399608, -4.531559, 1.943591,
0.271676, -2.893054, -2.066826, 0.235467, -1.248263, -1.164534, -2.640174,
-0.112878, -4.386484, 1.253024, -4.135623, 1.068984, -0.043579, -0.832957,
-3.257258, -0.514396, -1.651174, 0.638630, -4.364372, 1.548441, -0.289455,
0.539845, -4.097627, 0.635001, -0.465071, -0.927701, -2.481498, 0.356616,
-2.355012, 0.728806, -3.340283, 1.609038, -4.786268, -0.532272, -1.886150,
0.254797, 0.746620, -1.657134, -3.264265, 0.525551, -1.756837, 0.845446,
-5.572190, 1.715797, -2.856942, 3.394245, -5.803662, 2.281806, -3.014739,
2.616136, -4.728482, 1.659984, -2.106307, 2.711709, -6.173832, 1.352869,
-0.038035, 0.107619, -4.279774, 2.341930, -0.980413, -0.119538, -4.049717,
1.172128, -3.477744, 2.602274, -6.231380, 2.537300, -0.862214, 0.568722,
-3.858362, 0.197867, -1.725885, 3.687312, -7.067363, 2.403544, -0.944963,
0.235639, -3.250094, 0.659117, -1.459576, 0.426128, -3.637207, 1.030386,
-4.224351, 3.516220, -6.053367, 0.993473, -2.182416, -0.762625, -1.884405,
-0.113736, -2.572602, 0.329290, -1.913233, 0.517418, -0.019757, 0.203176,
-3.715881, 0.482136, -1.912823, 1.357907, -5.473043, 1.714658, -3.177160,
0.089285, -3.127669, 1.268076, 0.772498, -1.622712, -3.850314, 0.436124,
-1.495983, 3.439982, -7.623405, 1.726721, -0.423979, 0.180201, -2.902406,
0.986457, -1.845638, 0.460903, -5.359343, -1.133931, -1.074456, 0.717304,
-3.519856, 1.012126, -0.562301, 1.881967, -6.716627, 2.525036, 0.945480,
0.337081, -5.210562, 2.572035, -0.943370, 0.442026, -2.666313, 0.411296,
0.002787, -0.000735, -2.498933, 0.771719, -3.568153, 3.833721, -6.617026,
2.813922, -0.573970, 1.025208, -3.909923, 1.722648, -1.406849, 0.719783,
-5.207438, 1.819442, -0.530895, -0.010887, -2.939614, 0.971225, -1.660297,
1.345243, -4.454571, 2.244876, -2.021213, 1.756090, -4.880947, 0.364597,
-2.380270, 2.763117, -5.613013, 2.137534, 0.289101, -2.279400, -3.365582,
0.170028, -1.142254, -0.709604, -3.656223, 1.804870, -0.854690, 0.592102,
-5.010415, 2.462687, -1.474710, 0.566002, -3.621819, -0.391946, -0.423524,
-0.631428, -3.513310, 0.962825, -1.480262, 0.319791, -3.610137, 1.842339,
-0.250073, 1.182022, -6.249267, 1.604172, 1.153759, -0.734054, -4.620415,
-0.030858, 0.050911, 1.524406, -4.724010, 1.451846, -3.277104, 2.414182,
-4.605285, 1.846092, -1.503047, -0.618200, -2.746546, -0.459332, -0.980326,
-1.199977, -2.043865, -0.165793, -2.214698, 3.108281, -7.127830, -0.123065,
1.244948, -3.039923, -4.660061, -0.225957, -0.307210, -1.513205, -2.456005,
0.840048, -0.741445, 2.328635, -6.015267, 2.723240, -1.381171, -0.728878,
-5.114925, -0.362034, -0.574923, 0.518080, -3.892457, 1.798948, 0.435119,
-0.371696, -2.807571, 1.302864, -2.063052, 1.036388, -4.232038, 1.397059,
-1.615668, -1.511019, -3.095508, 1.290955, -3.428723, 2.000287, -4.196487,
1.566983, 0.196957, 0.224343, -4.926359, -0.691975, -0.214941, 1.546821,
-5.384868, 2.290820, -1.878865, 0.493692, -4.129823, 2.112036, 0.516558,
-2.553077, -2.717338, 0.017146, -2.016057, 1.628995, -4.240602, 1.189533,
-5.460220, 1.254738, -4.214903, 0.755659, -2.893235, 2.937762, -6.169453,
2.035456, -5.613212, -0.122254, -1.973646, -0.060619, -2.119598, 1.413512,
-4.938738, 1.890244, 0.544169, -2.062413, -3.329637, -0.062515, -1.855805,
-0.791297, -2.570353, 0.607615, 0.305812, 0.338930, -4.150270, 2.274937,
0.042653, 0.133825, -3.538155, 1.523639, -3.173690, -1.496599, -2.414655,
0.464687, -1.448998, -0.368907, -3.520129, 0.203382, -2.443626, 1.266233,
-3.393848, 0.605911, -0.015353, 1.402006, -4.441003, 1.419281, 0.603587,
0.434146, -4.966566, 2.171872, -0.688264, -0.009981, -4.461103, 1.538354,
-5.029816, -0.264424, -1.713510, -0.315258, -1.891606, 0.252074, -2.419428,
0.043970, -1.291143, 2.048704, -4.590105, 0.524734, -1.889576, 0.134836,
-3.462745, 1.390663, -0.112773, 0.402735, -4.203784, 1.381043, -1.201634,
-1.968277, -1.425637, -0.181725, -1.250742, -2.102041, -3.925464, -1.256797,
-3.701354, -1.754610, -1.917231, -1.455910, -1.838006, 2.041781, -5.666212,
2.752957, -2.659553, 2.553637, -4.872212, 1.443437, -2.081846, 3.311263,
-5.912457, 1.871049, 0.196148, -0.307044, -4.024967, 2.149149, 0.361809,
0.620415, -5.939984, 0.180672, -1.209180, -0.269122, -3.240285, 1.460315,
-1.040803, 1.125700, -6.060366, 0.887767, -3.214111, 1.314368, -3.026808,
1.023640, -3.815175, 1.795642, -4.355603, 1.064454, -0.046472, 0.618463,
-5.941646, 2.861891, -2.852155, -0.990457, -2.624445, 1.794494, -1.176747,
-0.358159, -3.206776, 1.138721, -2.819523, -1.825522, -1.450902, -0.187312,
-0.808727, 0.636872, -4.120567, 1.192623, 0.810731, -1.768519, -3.699450,
1.527116, -2.772720, 3.012835, -5.912736, 1.599365, -4.696381, 2.234591,
-4.139552, 1.061768, -1.880089, 3.596274, -7.006379, 2.382152, -3.158115,
3.844430, -7.044156, 2.307596, -2.473970, 1.312644, -5.467269, 0.197154,
-1.530040, 1.762275, -5.550757, 0.630276, -3.048947, 1.043777, -3.096658,
1.345893, -1.329494, 2.065748, -4.711032, 2.227600, -0.413321, -0.032428,
-4.599650, 1.668734, -4.351490, -0.200022, -2.359903, 0.021997, 0.116028,
1.159718, -5.093972, -0.142951, -2.409895, 0.906133, -2.728812, 0.809932,
-2.597363, 0.494130, -2.357861, 0.369825, -2.165235, 1.148522, -3.130562,
0.759034, 0.646335, -1.463660, -3.508299, 1.059679, -1.485465, 1.007319,
-4.340716, 1.789864, -1.590654, 1.612324, -4.452007, 2.389805, -5.200148,
-1.068398, -1.306923, -0.472408, -0.392165, -0.524996, -2.933478, 1.518430,
-1.287781, 0.113422, -3.020525, 1.338359, -0.105982, 0.936014, -4.132197,
1.836807, -0.616589, -1.029716, -3.271347, 0.284889, -2.653359, 2.135829,
-4.643613, 1.627981, 0.287733, -2.017263, -2.776574, 1.184792, 1.004161,
-1.483019, -4.339290, -0.787322, 0.582420, 1.137839, -5.673941, -0.001862,
-1.219142, 0.532561, -4.457245, 1.826807, -3.343291, 3.034610, -6.179855,
2.235917, -4.369989, 4.018128, -6.632714, 0.926585, -0.485469, 0.536073,
-4.179557, 1.489637, -0.521762, 1.636089, -6.137912, 1.500867, -4.086009,
1.961372, -3.688977, 1.358220, -1.544034, 1.763837, -4.357567, 1.852201,
-2.018725, 1.046264, -6.211127, 1.609419, -0.118441, 1.602284, -6.242423,
1.518578, -0.604078, 1.106613, -5.393445, 2.595629, 0.142712, -1.903953,
-2.821177, 0.032758, -0.009152, 0.184628, -4.227636, 2.046843, -2.240138,
1.256176, -5.108516, -0.308447, -2.998571, 4.657396, -7.582112, 2.510951,
-3.535784, 1.704560, -5.068484, 1.318466, -3.058265, 3.073172, -6.998089,
3.178849, -2.420286, 2.277806, -4.999528, 1.423890, -1.672914, 0.447460,
-4.088940, 1.351087, -1.051546, -0.417955, -4.042147, 1.604102, -1.700931,
2.796663, -6.497579, 2.857974, -0.240828, 0.858001, -5.778933, 2.778508,
-0.406211, 1.300766, -5.073671, 2.089362, -0.201673, 1.588396, -6.000150,
2.185055, -2.332125, 0.768216, -2.609184, 0.327277, -3.358943, -1.020736,
-2.389984, 0.315512, -0.561905, 1.948740, -6.408485, 2.231985, -0.603652,
0.661829, -5.070386, -1.063058, -0.624796, 1.375772, -4.379606, 1.929358,
-1.047263, 0.739100, -5.217857, 2.127625, -5.025338, 0.650344, -2.068460,
0.076936, -0.457505, -1.050984, -1.917765, 1.150908, 0.782625, 0.855595,
-5.321719, 0.787209, -0.460232, 1.106736, -5.552326, 2.801043, -0.360217,
-0.434432, -4.273378, 0.967556, -0.972652, 0.874811, -5.429918, -0.331039,
0.115477, 0.111883, -5.418786, 1.240546, -1.842794, 0.505880, -3.676064,
-0.682369, 1.858984, -0.742566, -5.784060, 0.673239, -1.280398, 0.280842,
-4.848077, 2.214860, -0.785100, -0.588488, -2.438206, 0.786651, -1.568752,
1.935400, -6.320256, 2.125338, -1.476457, -1.651941, -2.695734, 0.007338,
-3.280860, 2.310385, -5.319578, 1.890123, -0.775723, 0.630606, -4.321582,
1.085521, -1.847371, 1.188521, -4.596577, 2.056443, -2.340172, -0.108501,
-3.156392, 0.933279, -0.495331, 0.122405, -5.171133, 1.763245, -0.796913,
2.310487, -7.247197, 2.401678, -1.908860, 0.043798, -2.393796, 0.573806,
-0.608531, 0.154710, -4.669001, 0.750680, 0.468380, 0.392591, -4.755001,
2.615217, -1.957774, 1.153513, -4.530099, 1.124362, -3.569415, 1.697154,
-3.536335, 0.910758, -2.976264, 1.833129, -4.287203, -0.547050, -2.409768,
0.061585, -1.324116, 0.268497, -2.962222, -1.524245, -2.063413, 0.442058,
-4.292337, 3.538863, -6.699603, 1.718664, -2.290363, 1.994596, -6.245037,
-0.433084, -0.367059, 1.020297, -4.940721, 2.902264, -0.577056, -0.709887,
-5.001413, -0.268316, -1.112048, -1.083307, -1.753492, 0.209973, 0.139540,
0.917602, -5.232745, 2.538467, -2.139234, -0.187388, -1.837249, -0.478582,
-0.731653, -0.481550, -2.531261, 1.044770, 0.707750, 0.279971, -3.221119,
1.552074, -2.373144, 0.859518, -3.665156, 1.620278, -1.440871, -0.525581,
-2.758271, 1.491873, -2.302013, 1.119935, -5.257080, 2.627170, -3.174739,
1.363282, -4.831639, 1.101076, -4.337008, 2.689639, -5.165915, 1.069201,
-1.882078, -0.120370, -2.287967, 1.147619, -1.403616, 1.077150, -5.084296,
1.658236, -0.919642, 0.487423, -3.001075, 0.741268, 0.107300, 0.943556,
-3.544311, 1.000239, -1.627171, 2.871253, -5.179172, 1.429893, -0.826040,
0.188670, -4.499894, 1.013447, -2.101299, 0.317516, -3.452141, -0.833776,
-1.362144, 1.272437, -4.449355, 1.613591, -2.039873, 2.613175, -6.229640,
1.659790, -1.595520, -0.237462, -2.744997, 0.337841, 0.148981, -1.703771,
-2.388023, 1.276469, 1.058508, -0.401642, -4.680769, 0.861881, -1.336381,
1.153080, -2.834378, 0.721075, 0.900115, 1.360511, -5.573611, 0.949182,
-2.970844, 2.017563, -5.186108, -0.201038, -1.192824, 0.610142, -4.450919,
-0.897114, -1.812093, 0.422310, -5.245487, 0.256549, 0.320275, -2.324150,
-2.967040, -0.260536, -0.721467, 0.454148, -5.058031, 0.526370, -0.895656,
0.732240, -3.327363, 1.353953, -1.277912, -0.483171, -1.926713, 0.065044,
-2.167506, -0.196606, -1.923437, 0.604962, -2.088319, 1.406834, -5.227296,
2.247351, -4.421744, 1.729791, -5.007922, 1.264769, -0.897019, 0.922902,
-3.887108, 2.087432, -1.310226, -0.101938, -3.359082, -0.079662, -0.514988,
-0.963179, -4.038209, 2.223278, -0.590083, -2.310458, -1.748338, 0.363406,
-0.540731, -0.885913, -4.179595, 2.216781, -3.044339, -0.447100, -2.446098,
0.931101, -1.676190, 2.096175, -4.980755, 2.262151, -1.095047, 1.897516,
-5.996138, 2.191038, 0.297128, -0.780974, -2.884299, 1.195408, -0.521065,
-1.955837, -3.091064, -0.404183, -1.961519, 4.076096, -7.521851, 2.242064,
-1.988043, 0.303300, -2.422585, 0.322230, -3.377634, 3.499955, -7.084434,
2.375587, -0.718851, 2.150076, -5.412241, 2.374280, -2.006088, 2.229828,
-5.848188, 2.543077, -2.171042, 2.096026, -5.300007, 0.141405, -1.187745,
0.105340, -4.003816, 1.034281, -3.980804, 1.856709, -5.103042, 0.623737,
-2.080307, 0.896140, -3.104050, 0.983158, -0.424898, -1.154270, -3.805728,
1.978917, -1.314387, 1.235096, -3.148906, 1.113173, 0.111713, 2.055213,
-7.565283, 2.100342};
const std::initializer_list<float> biases = {
0.065691948, -0.69055247, 0.1107955, -0.97084129, -0.23957068, -0.23566568,
-0.389184, 0.47481549, -0.4791103, 0.29931796, 0.10463274, 0.83918178,
0.37197268, 0.61957061, 0.3956964, -0.37609905};
const std::initializer_list<float> recurrent_weights = {
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1};
class BidirectionalRNNOpModel : public SingleOpModel {
public:
BidirectionalRNNOpModel(int batches, int sequence_len, int fw_units,
int bw_units, int input_size, int aux_input_size,
AuxInputMode aux_input_mode, bool time_major,
bool merge_outputs, bool quantize_weights = false,
bool asymmetric_quantize_weights = false)
: batches_(batches),
sequence_len_(sequence_len),
fw_units_(fw_units),
bw_units_(bw_units),
input_size_(input_size),
aux_input_size_(aux_input_size),
quantize_weights_(quantize_weights) {
const TensorType tensor_type =
quantize_weights ? TensorType_UINT8 : TensorType_FLOAT32;
input_ = AddInput(TensorType_FLOAT32);
fw_weights_ = AddInput(tensor_type);
fw_recurrent_weights_ = AddInput(tensor_type);
fw_bias_ = AddInput(TensorType_FLOAT32);
fw_hidden_state_ = AddVariableInput(TensorType_FLOAT32);
bw_weights_ = AddInput(tensor_type);
bw_recurrent_weights_ = AddInput(tensor_type);
bw_bias_ = AddInput(TensorType_FLOAT32);
bw_hidden_state_ = AddVariableInput(TensorType_FLOAT32);
const auto input_shape =
(time_major) ? std::vector<int>({sequence_len_, batches_, input_size_})
: std::vector<int>({batches_, sequence_len_, input_size_});
std::vector<int> aux_input_shape = {0};
std::vector<int> aux_fw_weights_shape = {0};
std::vector<int> aux_bw_weights_shape = {0};
if (aux_input_mode != AuxInputMode::kNoAuxInput) {
aux_input_ = AddInput(TensorType_FLOAT32);
aux_input_shape =
(time_major)
? std::vector<int>({sequence_len_, batches_, aux_input_size_})
: std::vector<int>({batches_, sequence_len_, aux_input_size_});
} else {
aux_input_ = AddNullInput();
}
if (aux_input_mode == AuxInputMode::kCrossLinking) {
aux_fw_weights_ = AddInput(tensor_type);
aux_bw_weights_ = AddInput(tensor_type);
aux_fw_weights_shape = {fw_units, aux_input_size_};
aux_bw_weights_shape = {bw_units, aux_input_size_};
} else {
aux_fw_weights_ = AddNullInput();
aux_bw_weights_ = AddNullInput();
}
fw_output_ = AddOutput(TensorType_FLOAT32);
if (!merge_outputs) {
bw_output_ = AddOutput(TensorType_FLOAT32);
}
SetBuiltinOp(BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN,
BuiltinOptions_BidirectionalSequenceRNNOptions,
CreateBidirectionalSequenceRNNOptions(
builder_, time_major, ActivationFunctionType_RELU,
merge_outputs, asymmetric_quantize_weights)
.Union());
BuildInterpreter({
input_shape,
{fw_units_, input_size_},
{fw_units_, fw_units_},
{fw_units_},
{batches_, fw_units_},
{bw_units_, input_size_},
{bw_units_, bw_units_},
{bw_units_},
{batches_, bw_units_},
aux_input_shape,
aux_fw_weights_shape,
aux_bw_weights_shape,
});
}
void SetFwBias(std::initializer_list<float> f) {
PopulateTensor(fw_bias_, f);
}
void SetBwBias(std::initializer_list<float> f) {
PopulateTensor(bw_bias_, f);
}
void SetFwWeights(const std::vector<float>& f) {
if (quantize_weights_) {
SymmetricQuantizeAndPopulate(fw_weights_, f);
} else {
PopulateTensor(fw_weights_, f);
}
}
void SetBwWeights(const std::vector<float>& f) {
if (quantize_weights_) {
SymmetricQuantizeAndPopulate(bw_weights_, f);
} else {
PopulateTensor(bw_weights_, f);
}
}
void SetFwRecurrentWeights(const std::vector<float>& f) {
if (quantize_weights_) {
SymmetricQuantizeAndPopulate(fw_recurrent_weights_, f);
} else {
PopulateTensor(fw_recurrent_weights_, f);
}
}
void SetBwRecurrentWeights(const std::vector<float>& f) {
if (quantize_weights_) {
SymmetricQuantizeAndPopulate(bw_recurrent_weights_, f);
} else {
PopulateTensor(bw_recurrent_weights_, f);
}
}
void SetInput(std::initializer_list<float> data) {
PopulateTensor(input_, data);
}
void SetInput(int offset, float* begin, float* end) {
PopulateTensor(input_, offset, begin, end);
}
void SetAuxInput(int offset, float* begin, float* end) {
PopulateTensor(aux_input_, offset, begin, end);
}
void SetAuxFwWeights(const std::vector<float>& f) {
if (quantize_weights_) {
SymmetricQuantizeAndPopulate(aux_fw_weights_, f);
} else {
PopulateTensor(aux_fw_weights_, f);
}
}
void SetAuxBwWeights(const std::vector<float>& f) {
if (quantize_weights_) {
SymmetricQuantizeAndPopulate(aux_bw_weights_, f);
} else {
PopulateTensor(aux_bw_weights_, f);
}
}
std::vector<float> GetFwOutput() { return ExtractVector<float>(fw_output_); }
std::vector<float> GetBwOutput() { return ExtractVector<float>(bw_output_); }
int input_size() { return input_size_; }
int aux_input_size() { return aux_input_size_; }
int num_fw_units() { return fw_units_; }
int num_bw_units() { return bw_units_; }
int num_batches() { return batches_; }
int sequence_len() { return sequence_len_; }
private:
int input_;
int fw_weights_;
int fw_recurrent_weights_;
int fw_bias_;
int fw_hidden_state_;
int fw_output_;
int bw_weights_;
int bw_recurrent_weights_;
int bw_bias_;
int bw_hidden_state_;
int bw_output_;
int aux_input_;
int aux_fw_weights_;
int aux_bw_weights_;
int batches_;
int sequence_len_;
int fw_units_;
int bw_units_;
int input_size_;
int aux_input_size_;
bool quantize_weights_;
};
class BidirectionalRNNOpTest
: public ::testing::TestWithParam<::testing::tuple<bool, bool>> {};
INSTANTIATE_TEST_SUITE_P(QuantizationOrNot, BidirectionalRNNOpTest,
::testing::Combine(
::testing::Bool(),
::testing::Bool()));
TEST_P(BidirectionalRNNOpTest, ClosedBoxTest) {
auto params = GetParam();
const bool quantize_weights = std::get<0>(params);
const bool asymmetric_quantize_inputs = std::get<1>(params);
BidirectionalRNNOpModel rnn(2, 16,
16, 16,
8, 0,
AuxInputMode::kNoAuxInput,
false,
false, quantize_weights,
asymmetric_quantize_inputs);
rnn.SetFwWeights(weights);
rnn.SetBwWeights(weights);
rnn.SetFwBias(biases);
rnn.SetBwBias(biases);
rnn.SetFwRecurrentWeights(recurrent_weights);
rnn.SetBwRecurrentWeights(recurrent_weights);
const int input_sequence_size = rnn.input_size() * rnn.sequence_len();
float* batch_start = rnn_input;
float* batch_end = batch_start + input_sequence_size;
rnn.SetInput(0, batch_start, batch_end);
rnn.SetInput(input_sequence_size, batch_start, batch_end);
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
float* golden_fw_start = rnn_golden_fw_output;
float* golden_fw_end =
golden_fw_start + rnn.num_fw_units() * rnn.sequence_len();
std::vector<float> fw_expected;
fw_expected.insert(fw_expected.end(), golden_fw_start, golden_fw_end);
fw_expected.insert(fw_expected.end(), golden_fw_start, golden_fw_end);
EXPECT_THAT(rnn.GetFwOutput(),
ElementsAreArray(ArrayFloatNear(
fw_expected, quantize_weights ? 1.42e-2 : 1e-5)));
float* golden_bw_start = rnn_golden_bw_output;
float* golden_bw_end =
golden_bw_start + rnn.num_bw_units() * rnn.sequence_len();
std::vector<float> bw_expected;
bw_expected.insert(bw_expected.end(), golden_bw_start, golden_bw_end);
bw_expected.insert(bw_expected.end(), golden_bw_start, golden_bw_end);
EXPECT_THAT(rnn.GetBwOutput(),
ElementsAreArray(ArrayFloatNear(
bw_expected, quantize_weights ? 1.42e-2 : 1e-5)));
}
TEST_P(BidirectionalRNNOpTest, ClosedBoxTestTimeMajor) {
auto params = GetParam();
const bool quantize_weights = std::get<0>(params);
const bool asymmetric_quantize_inputs = std::get<1>(params);
BidirectionalRNNOpModel rnn(2, 16,
16, 16,
8, 0,
AuxInputMode::kNoAuxInput,
true,
false, quantize_weights,
asymmetric_quantize_inputs);
rnn.SetFwWeights(weights);
rnn.SetBwWeights(weights);
rnn.SetFwBias(biases);
rnn.SetBwBias(biases);
rnn.SetFwRecurrentWeights(recurrent_weights);
rnn.SetBwRecurrentWeights(recurrent_weights);
for (int i = 0; i < rnn.sequence_len(); i++) {
float* batch_start = rnn_input + i * rnn.input_size();
float* batch_end = batch_start + rnn.input_size();
rnn.SetInput(2 * i * rnn.input_size(), batch_start, batch_end);
rnn.SetInput((2 * i + 1) * rnn.input_size(), batch_start, batch_end);
}
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
std::vector<float> fw_expected;
for (int i = 0; i < rnn.sequence_len(); i++) {
float* golden_fw_start = rnn_golden_fw_output + i * rnn.num_fw_units();
float* golden_fw_end = golden_fw_start + rnn.num_fw_units();
fw_expected.insert(fw_expected.end(), golden_fw_start, golden_fw_end);
fw_expected.insert(fw_expected.end(), golden_fw_start, golden_fw_end);
}
constexpr float kHybridTolerance = 3.57e-1;
constexpr float kFloatTolerance = 1e-5;
EXPECT_THAT(
rnn.GetFwOutput(),
ElementsAreArray(ArrayFloatNear(
fw_expected, quantize_weights ? kHybridTolerance : kFloatTolerance)));
}
TEST_P(BidirectionalRNNOpTest, ClosedBoxTestMergeOutputs) {
auto params = GetParam();
const bool quantize_weights = std::get<0>(params);
const bool asymmetric_quantize_inputs = std::get<1>(params);
BidirectionalRNNOpModel rnn(2, 16,
16, 16,
8, 0,
AuxInputMode::kNoAuxInput,
false,
true, quantize_weights,
asymmetric_quantize_inputs);
rnn.SetFwWeights(weights);
rnn.SetBwWeights(weights);
rnn.SetFwBias(biases);
rnn.SetBwBias(biases);
rnn.SetFwRecurrentWeights(recurrent_weights);
rnn.SetBwRecurrentWeights(recurrent_weights);
const int input_sequence_size = rnn.input_size() * rnn.sequence_len();
float* batch_start = rnn_input;
float* batch_end = batch_start + input_sequence_size;
rnn.SetInput(0, batch_start, batch_end);
rnn.SetInput(input_sequence_size, batch_start, batch_end);
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
std::vector<float> merged_expected;
for (int bid = 0; bid < rnn.num_batches(); bid++) {
for (int step = 0; step < rnn.sequence_len(); step++) {
merged_expected.insert(
merged_expected.end(),
rnn_golden_fw_output + rnn.num_fw_units() * step,
rnn_golden_fw_output + rnn.num_fw_units() * (step + 1));
merged_expected.insert(
merged_expected.end(),
rnn_golden_bw_output + rnn.num_bw_units() * step,
rnn_golden_bw_output + rnn.num_bw_units() * (step + 1));
}
}
EXPECT_THAT(rnn.GetFwOutput(),
ElementsAreArray(ArrayFloatNear(
merged_expected, quantize_weights ? 1.42e-2 : 1e-5)));
}
TEST(BidirectionalRNNOpTest, ClosedBoxTestTimeMajorMergeOutputs) {
BidirectionalRNNOpModel rnn(2, 16,
16, 16,
8, 0,
AuxInputMode::kNoAuxInput,
true,
true);
rnn.SetFwWeights(weights);
rnn.SetBwWeights(weights);
rnn.SetFwBias(biases);
rnn.SetBwBias(biases);
rnn.SetFwRecurrentWeights(recurrent_weights);
rnn.SetBwRecurrentWeights(recurrent_weights);
for (int i = 0; i < rnn.sequence_len(); i++) {
float* batch_start = rnn_input + i * rnn.input_size();
float* batch_end = batch_start + rnn.input_size();
rnn.SetInput(2 * i * rnn.input_size(), batch_start, batch_end);
rnn.SetInput((2 * i + 1) * rnn.input_size(), batch_start, batch_end);
}
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
std::vector<float> merged_expected;
for (int step = 0; step < rnn.sequence_len(); step++) {
for (int bid = 0; bid < rnn.num_batches(); bid++) {
merged_expected.insert(
merged_expected.end(),
rnn_golden_fw_output + rnn.num_fw_units() * step,
rnn_golden_fw_output + rnn.num_fw_units() * (step + 1));
merged_expected.insert(
merged_expected.end(),
rnn_golden_bw_output + rnn.num_bw_units() * step,
rnn_golden_bw_output + rnn.num_bw_units() * (step + 1));
}
}
EXPECT_THAT(rnn.GetFwOutput(),
ElementsAreArray(ArrayFloatNear(merged_expected)));
}
TEST(BidirectionalRNNOpTest, ClosedBoxTestReverseInputs) {
BidirectionalRNNOpModel rnn(2, 16,
16, 16,
8, 0,
AuxInputMode::kNoAuxInput,
false,
false);
rnn.SetFwWeights(weights);
rnn.SetBwWeights(weights);
rnn.SetFwBias(biases);
rnn.SetBwBias(biases);
rnn.SetFwRecurrentWeights(recurrent_weights);
rnn.SetBwRecurrentWeights(recurrent_weights);
for (int i = 0; i < rnn.sequence_len(); i++) {
float* batch_start = rnn_input + i * rnn.input_size();
float* batch_end = batch_start + rnn.input_size();
const int reverse_idx = rnn.sequence_len() - i - 1;
rnn.SetInput(reverse_idx * rnn.input_size(), batch_start, batch_end);
rnn.SetInput((rnn.sequence_len() + reverse_idx) * rnn.input_size(),
batch_start, batch_end);
}
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
std::vector<float> fw_expected;
for (int i = 0; i < rnn.sequence_len(); i++) {
float* golden_fw_start = rnn_golden_bw_output + i * rnn.num_fw_units();
float* golden_fw_end = golden_fw_start + rnn.num_fw_units();
fw_expected.insert(fw_expected.begin(), golden_fw_start, golden_fw_end);
}
fw_expected.insert(fw_expected.end(), fw_expected.begin(), fw_expected.end());
EXPECT_THAT(rnn.GetFwOutput(), ElementsAreArray(ArrayFloatNear(fw_expected)));
std::vector<float> bw_expected;
for (int i = 0; i < rnn.sequence_len(); i++) {
float* golden_bw_start = rnn_golden_fw_output + i * rnn.num_bw_units();
float* golden_bw_end = golden_bw_start + rnn.num_bw_units();
bw_expected.insert(bw_expected.begin(), golden_bw_start, golden_bw_end);
}
bw_expected.insert(bw_expected.end(), bw_expected.begin(), bw_expected.end());
EXPECT_THAT(rnn.GetBwOutput(), ElementsAreArray(ArrayFloatNear(bw_expected)));
}
TEST(BidirectionalRNNOpTest, EndToEndTest) {
BidirectionalRNNOpModel rnn(1, 4,
16, 16,
8, 0,
AuxInputMode::kNoAuxInput,
false,
false);
const int output_size = 4;
float dnn_weights[] = {
-0.5782342, -0.052212059, 0.73036242, -0.81216097, -0.80088139,
-0.23420811, -0.39647382, 0.31423986, 0.61819065, -0.73659575,
-0.89698344, -0.8931554, -0.0845688, 0.5617367, 0.38415289,
-0.11487955, -0.7617774, 0.17927337, 0.15726972, 0.059798479,
0.19009054, -0.27616632, -0.39142907, 0.77744663, -0.046830714,
-0.6603595, 0.21945822, 0.051494241, 0.23785079, 0.19239247,
-0.53268754, 0.65961659, -0.85981959, -0.80232513, 0.84745562,
-0.66070104, -0.036533296, -0.54901814, 0.65353882, -0.41834265,
-0.28561389, 0.75655544, -0.31149811, 0.62981737, 0.31829214,
-0.92734522, -0.48506218, 0.55651462, 0.25192821, 0.67220747,
-0.3836869, -0.55798125, -0.60395885, 0.22488403, -0.78053463,
0.3492105, 0.56452453, 0.4389236, -0.59929526, -0.19762468,
-0.36868393, -0.13198286, -0.53800809, -0.22850353};
std::initializer_list<float> dnn_biases = {0.29177809, -0.98799044,
0.065919638, 0.68781924};
rnn.SetFwWeights(weights);
rnn.SetBwWeights(weights);
rnn.SetFwBias(biases);
rnn.SetBwBias(biases);
rnn.SetFwRecurrentWeights(recurrent_weights);
rnn.SetBwRecurrentWeights(recurrent_weights);
const int input_sequence_size = rnn.input_size() * rnn.sequence_len();
const int output_sequence_size = output_size * rnn.sequence_len();
const int num_examples = 64;
for (int k = 0; k < num_examples; k++) {
float* batch_start = endtoend_input + k * input_sequence_size;
float* batch_end = batch_start + input_sequence_size;
rnn.SetInput(0, batch_start, batch_end);
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
std::vector<float> fw_output = rnn.GetFwOutput();
std::vector<float> bw_output = rnn.GetBwOutput();
EXPECT_EQ(fw_output.size(), bw_output.size());
std::transform(fw_output.begin(), fw_output.end(), bw_output.begin(),
fw_output.begin(), std::plus<float>());
std::vector<float> sequence_result;
for (int s = 0; s < rnn.sequence_len(); s++) {
const float* rnn_output = fw_output.data() + s * rnn.num_fw_units();
std::vector<float> results(dnn_biases);
for (int i = 0; i < output_size; i++) {
for (int j = 0; j < rnn.num_fw_units(); j++) {
results[i] += *(rnn_output + j) * dnn_weights[output_size * j + i];
}
}
sequence_result.insert(sequence_result.end(), results.begin(),
results.end());
}
float* golden_start = golden_endtoend_output + k * output_sequence_size;
float* golden_end = golden_start + output_sequence_size;
std::vector<float> expected;
expected.insert(expected.end(), golden_start, golden_end);
EXPECT_THAT(sequence_result, ElementsAreArray(ArrayFloatNear(expected)));
}
}
TEST(BidirectionalRNNOpTest, ClosedBoxTestNoCrossLinkingRegularAndAuxInput) {
BidirectionalRNNOpModel rnn(2, 16,
16, 16,
8, 8,
AuxInputMode::kNoCrossLinking,
true,
false);
rnn.SetFwWeights(weights);
rnn.SetBwWeights(weights);
rnn.SetFwBias(biases);
rnn.SetBwBias(biases);
rnn.SetFwRecurrentWeights(recurrent_weights);
rnn.SetBwRecurrentWeights(recurrent_weights);
for (int i = 0; i < rnn.sequence_len(); i++) {
float* batch_start = rnn_input + i * rnn.input_size();
float* batch_end = batch_start + rnn.input_size();
rnn.SetInput(2 * i * rnn.input_size(), batch_start, batch_end);
rnn.SetAuxInput(2 * i * rnn.input_size(), batch_start, batch_end);
rnn.SetInput((2 * i + 1) * rnn.input_size(), batch_start, batch_end);
rnn.SetAuxInput((2 * i + 1) * rnn.input_size(), batch_start, batch_end);
}
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
std::vector<float> fw_expected;
std::vector<float> bw_expected;
for (int i = 0; i < rnn.sequence_len(); i++) {
float* golden_fw_start = rnn_golden_fw_output + i * rnn.num_fw_units();
float* golden_fw_end = golden_fw_start + rnn.num_fw_units();
fw_expected.insert(fw_expected.end(), golden_fw_start, golden_fw_end);
fw_expected.insert(fw_expected.end(), golden_fw_start, golden_fw_end);
float* golden_bw_start = rnn_golden_bw_output + i * rnn.num_fw_units();
float* golden_bw_end = golden_bw_start + rnn.num_fw_units();
bw_expected.insert(bw_expected.end(), golden_bw_start, golden_bw_end);
bw_expected.insert(bw_expected.end(), golden_bw_start, golden_bw_end);
}
EXPECT_THAT(rnn.GetFwOutput(), ElementsAreArray(ArrayFloatNear(fw_expected)));
EXPECT_THAT(rnn.GetBwOutput(), ElementsAreArray(ArrayFloatNear(bw_expected)));
}
TEST(BidirectionalRNNOpTest, ClosedBoxTestNoCrossLinkingRegularInputOnly) {
BidirectionalRNNOpModel rnn(2, 16,
16, 16,
8, 8,
AuxInputMode::kNoCrossLinking,
true,
false);
rnn.SetFwWeights(weights);
rnn.SetBwWeights(weights);
rnn.SetFwBias(biases);
rnn.SetBwBias(biases);
rnn.SetFwRecurrentWeights(recurrent_weights);
rnn.SetBwRecurrentWeights(recurrent_weights);
std::vector<float> bw_inputs(rnn.input_size(), 0);
for (int i = 0; i < rnn.sequence_len(); i++) {
float* batch_start = rnn_input + i * rnn.input_size();
float* batch_end = batch_start + rnn.input_size();
rnn.SetInput(2 * i * rnn.input_size(), batch_start, batch_end);
rnn.SetAuxInput(2 * i * rnn.input_size(), &bw_inputs[0],
&bw_inputs[bw_inputs.size() - 1]);
rnn.SetInput((2 * i + 1) * rnn.input_size(), batch_start, batch_end);
rnn.SetAuxInput((2 * i + 1) * rnn.input_size(), &bw_inputs[0],
&bw_inputs[bw_inputs.size() - 1]);
}
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
std::vector<float> fw_expected;
for (int i = 0; i < rnn.sequence_len(); i++) {
float* golden_fw_start = rnn_golden_fw_output + i * rnn.num_fw_units();
float* golden_fw_end = golden_fw_start + rnn.num_fw_units();
fw_expected.insert(fw_expected.end(), golden_fw_start, golden_fw_end);
fw_expected.insert(fw_expected.end(), golden_fw_start, golden_fw_end);
}
EXPECT_THAT(rnn.GetFwOutput(), ElementsAreArray(ArrayFloatNear(fw_expected)));
}
TEST(BidirectionalRNNOpTest, ClosedBoxTestNoCrossLinkingAuxInputOnly) {
BidirectionalRNNOpModel rnn(2, 16,
16, 16,
8, 8,
AuxInputMode::kNoCrossLinking,
true,
false);
rnn.SetFwWeights(weights);
rnn.SetBwWeights(weights);
rnn.SetFwBias(biases);
rnn.SetBwBias(biases);
rnn.SetFwRecurrentWeights(recurrent_weights);
rnn.SetBwRecurrentWeights(recurrent_weights);
std::vector<float> fw_inputs(rnn.input_size(), 0);
for (int i = 0; i < rnn.sequence_len(); i++) {
float* batch_start = rnn_input + i * rnn.input_size();
float* batch_end = batch_start + rnn.input_size();
rnn.SetAuxInput(2 * i * rnn.input_size(), batch_start, batch_end);
rnn.SetInput(2 * i * rnn.input_size(), &fw_inputs[0],
&fw_inputs[fw_inputs.size() - 1]);
rnn.SetAuxInput((2 * i + 1) * rnn.input_size(), batch_start, batch_end);
rnn.SetInput((2 * i + 1) * rnn.input_size(), &fw_inputs[0],
&fw_inputs[fw_inputs.size() - 1]);
}
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
std::vector<float> bw_expected;
for (int i = 0; i < rnn.sequence_len(); i++) {
float* golden_bw_start = rnn_golden_bw_output + i * rnn.num_fw_units();
float* golden_bw_end = golden_bw_start + rnn.num_fw_units();
bw_expected.insert(bw_expected.end(), golden_bw_start, golden_bw_end);
bw_expected.insert(bw_expected.end(), golden_bw_start, golden_bw_end);
}
EXPECT_THAT(rnn.GetBwOutput(), ElementsAreArray(ArrayFloatNear(bw_expected)));
}
TEST(BidirectionalRNNOpTest, ClosedBoxTestCrossLinkingAuxInputOnly) {
BidirectionalRNNOpModel rnn(2, 16,
16, 16,
8, 8,
AuxInputMode::kCrossLinking,
false,
false);
rnn.SetFwWeights(std::vector<float>(weights.size(), 0.0));
rnn.SetBwWeights(std::vector<float>(weights.size(), 0.0));
rnn.SetFwBias(biases);
rnn.SetBwBias(biases);
rnn.SetFwRecurrentWeights(recurrent_weights);
rnn.SetBwRecurrentWeights(recurrent_weights);
rnn.SetAuxFwWeights(weights);
rnn.SetAuxBwWeights(weights);
const int input_sequence_size = rnn.input_size() * rnn.sequence_len();
std::vector<float> zero_input(input_sequence_size, 0.f);
float* batch_start = rnn_input;
float* batch_end = batch_start + input_sequence_size;
rnn.SetInput(0, zero_input.data(), zero_input.data() + zero_input.size());
rnn.SetAuxInput(0, batch_start, batch_end);
rnn.SetInput(input_sequence_size, zero_input.data(),
zero_input.data() + zero_input.size());
rnn.SetAuxInput(input_sequence_size, batch_start, batch_end);
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
float* golden_fw_start = rnn_golden_fw_output;
float* golden_fw_end =
golden_fw_start + rnn.num_fw_units() * rnn.sequence_len();
std::vector<float> fw_expected;
fw_expected.insert(fw_expected.end(), golden_fw_start, golden_fw_end);
fw_expected.insert(fw_expected.end(), golden_fw_start, golden_fw_end);
EXPECT_THAT(rnn.GetFwOutput(), ElementsAreArray(ArrayFloatNear(fw_expected)));
float* golden_bw_start = rnn_golden_bw_output;
float* golden_bw_end =
golden_bw_start + rnn.num_bw_units() * rnn.sequence_len();
std::vector<float> bw_expected;
bw_expected.insert(bw_expected.end(), golden_bw_start, golden_bw_end);
bw_expected.insert(bw_expected.end(), golden_bw_start, golden_bw_end);
EXPECT_THAT(rnn.GetBwOutput(), ElementsAreArray(ArrayFloatNear(bw_expected)));
}
TEST(BidirectionalRNNOpTest, ClosedBoxTestCrossLinkingAuxInputOnlyTimeMajor) {
BidirectionalRNNOpModel rnn(2, 16,
16, 16,
8, 8,
AuxInputMode::kCrossLinking,
true,
false);
rnn.SetFwWeights(std::vector<float>(weights.size(), 0.0));
rnn.SetBwWeights(std::vector<float>(weights.size(), 0.0));
rnn.SetFwBias(biases);
rnn.SetBwBias(biases);
rnn.SetFwRecurrentWeights(recurrent_weights);
rnn.SetBwRecurrentWeights(recurrent_weights);
rnn.SetAuxFwWeights(weights);
rnn.SetAuxBwWeights(weights);
std::vector<float> zero_input(rnn.input_size(), 0.f);
for (int i = 0; i < rnn.sequence_len(); i++) {
float* batch_start = rnn_input + i * rnn.input_size();
float* batch_end = batch_start + rnn.input_size();
rnn.SetInput(2 * i * rnn.input_size(), &zero_input.front(),
&zero_input.back() + 1);
rnn.SetAuxInput(2 * i * rnn.input_size(), batch_start, batch_end);
rnn.SetInput((2 * i + 1) * rnn.input_size(), &zero_input.front(),
&zero_input.back() + 1);
rnn.SetAuxInput((2 * i + 1) * rnn.input_size(), batch_start, batch_end);
}
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
std::vector<float> fw_expected;
for (int i = 0; i < rnn.sequence_len(); i++) {
float* golden_fw_start = rnn_golden_fw_output + i * rnn.num_fw_units();
float* golden_fw_end = golden_fw_start + rnn.num_fw_units();
fw_expected.insert(fw_expected.end(), golden_fw_start, golden_fw_end);
fw_expected.insert(fw_expected.end(), golden_fw_start, golden_fw_end);
}
EXPECT_THAT(rnn.GetFwOutput(), ElementsAreArray(ArrayFloatNear(fw_expected)));
}
void run_closedbox_test_with_input_split(int input_size, int aux_input_size) {
const int num_units = 16;
BidirectionalRNNOpModel rnn(2, 16,
num_units, num_units,
input_size, aux_input_size,
AuxInputMode::kCrossLinking,
false,
false);
std::vector<float> reg_weights(num_units * rnn.input_size());
std::vector<float> aux_weights(num_units * rnn.aux_input_size());
int full_weights_size = weights.size();
int reg_weights_offset = 0;
int aux_weights_offset = 0;
int weights_offset = 0;
while (weights_offset < full_weights_size) {
std::copy(weights.begin() + weights_offset,
weights.begin() + weights_offset + rnn.input_size(),
reg_weights.begin() + reg_weights_offset);
weights_offset += rnn.input_size();
reg_weights_offset += rnn.input_size();
std::copy(weights.begin() + weights_offset,
weights.begin() + weights_offset + rnn.aux_input_size(),
aux_weights.begin() + aux_weights_offset);
weights_offset += rnn.aux_input_size();
aux_weights_offset += rnn.aux_input_size();
}
rnn.SetFwWeights(reg_weights);
rnn.SetBwWeights(reg_weights);
rnn.SetFwBias(biases);
rnn.SetBwBias(biases);
rnn.SetFwRecurrentWeights(recurrent_weights);
rnn.SetBwRecurrentWeights(recurrent_weights);
rnn.SetAuxFwWeights(aux_weights);
rnn.SetAuxBwWeights(aux_weights);
int full_input_size =
(rnn.input_size() + rnn.aux_input_size()) * rnn.sequence_len();
int reg_input_offset = 0;
int aux_input_offset = 0;
for (int batch = 0; batch < 2; ++batch) {
int input_offset = 0;
while (input_offset < full_input_size) {
rnn.SetInput(reg_input_offset, rnn_input + input_offset,
rnn_input + input_offset + rnn.input_size());
input_offset += rnn.input_size();
reg_input_offset += rnn.input_size();
rnn.SetAuxInput(aux_input_offset, rnn_input + input_offset,
rnn_input + input_offset + rnn.aux_input_size());
input_offset += rnn.aux_input_size();
aux_input_offset += rnn.aux_input_size();
}
}
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
float* golden_fw_start = rnn_golden_fw_output;
float* golden_fw_end =
golden_fw_start + rnn.num_fw_units() * rnn.sequence_len();
std::vector<float> fw_expected;
fw_expected.insert(fw_expected.end(), golden_fw_start, golden_fw_end);
fw_expected.insert(fw_expected.end(), golden_fw_start, golden_fw_end);
EXPECT_THAT(rnn.GetFwOutput(), ElementsAreArray(ArrayFloatNear(fw_expected)));
float* golden_bw_start = rnn_golden_bw_output;
float* golden_bw_end =
golden_bw_start + rnn.num_bw_units() * rnn.sequence_len();
std::vector<float> bw_expected;
bw_expected.insert(bw_expected.end(), golden_bw_start, golden_bw_end);
bw_expected.insert(bw_expected.end(), golden_bw_start, golden_bw_end);
EXPECT_THAT(rnn.GetBwOutput(), ElementsAreArray(ArrayFloatNear(bw_expected)));
}
TEST(BidirectionalRNNOpTest,
ClosedBoxTestCrossLinkingRegularAndAuxInputEvenSplit) {
run_closedbox_test_with_input_split(4, 4);
}
TEST(BidirectionalRNNOpTest,
ClosedBoxTestCrossLinkingRegularAndAuxInputUnevenSplit) {
run_closedbox_test_with_input_split(2, 6);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/bidirectional_sequence_rnn.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/bidirectional_sequence_rnn_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
74fda3c0-aa6b-4ac7-b8c6-35e66f71e68a | cpp | tensorflow/tensorflow | logical | tensorflow/lite/kernels/logical.cc | tensorflow/lite/kernels/logical_test.cc | #include <stddef.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/binary_function.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace logical {
namespace {
constexpr int kInputTensor1 = 0;
constexpr int kInputTensor2 = 1;
constexpr int kOutputTensor = 0;
struct OpData {
bool requires_broadcast;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* data = new OpData;
data->requires_broadcast = false;
return data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
const TfLiteType type = input1->type;
if (type != kTfLiteBool) {
TF_LITE_KERNEL_LOG(context, "Logical ops only support bool type.");
return kTfLiteError;
}
output->type = type;
data->requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (data->requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
return context->ResizeTensor(context, output, output_size);
}
TfLiteStatus LogicalImpl(TfLiteContext* context, TfLiteNode* node,
bool (*func)(bool, bool)) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (data->requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<bool, bool, bool>(
GetTensorShape(input1), GetTensorData<bool>(input1),
GetTensorShape(input2), GetTensorData<bool>(input2),
GetTensorShape(output), GetTensorData<bool>(output), func);
} else {
reference_ops::BinaryFunction<bool, bool, bool>(
GetTensorShape(input1), GetTensorData<bool>(input1),
GetTensorShape(input2), GetTensorData<bool>(input2),
GetTensorShape(output), GetTensorData<bool>(output), func);
}
return kTfLiteOk;
}
bool LogicalOr(bool x, bool y) { return x || y; }
TfLiteStatus LogicalOrEval(TfLiteContext* context, TfLiteNode* node) {
return LogicalImpl(context, node, LogicalOr);
}
bool LogicalAnd(bool x, bool y) { return x && y; }
TfLiteStatus LogicalAndEval(TfLiteContext* context, TfLiteNode* node) {
return LogicalImpl(context, node, LogicalAnd);
}
}
}
TfLiteRegistration* Register_LOGICAL_OR() {
static TfLiteRegistration r = {logical::Init, logical::Free, logical::Prepare,
logical::LogicalOrEval};
return &r;
}
TfLiteRegistration* Register_LOGICAL_AND() {
static TfLiteRegistration r = {logical::Init, logical::Free, logical::Prepare,
logical::LogicalAndEval};
return &r;
}
}
}
} | #include <initializer_list>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
class LogicalOpModel : public SingleOpModel {
public:
LogicalOpModel(std::initializer_list<int> input1_shape,
std::initializer_list<int> input2_shape, BuiltinOperator op) {
input1_ = AddInput(TensorType_BOOL);
input2_ = AddInput(TensorType_BOOL);
output_ = AddOutput(TensorType_BOOL);
ConfigureBuiltinOp(op);
BuildInterpreter({input1_shape, input2_shape});
}
int input1() { return input1_; }
int input2() { return input2_; }
std::vector<bool> GetOutput() { return ExtractVector<bool>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input1_;
int input2_;
int output_;
void ConfigureBuiltinOp(BuiltinOperator op) {
switch (op) {
case BuiltinOperator_LOGICAL_OR: {
SetBuiltinOp(op, BuiltinOptions_LogicalOrOptions,
CreateLogicalOrOptions(builder_).Union());
break;
}
case BuiltinOperator_LOGICAL_AND: {
SetBuiltinOp(op, BuiltinOptions_LogicalAndOptions,
CreateLogicalAndOptions(builder_).Union());
break;
}
default: { FAIL() << "We shouldn't get here."; }
}
}
};
TEST(LogicalTest, LogicalOr) {
LogicalOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, BuiltinOperator_LOGICAL_OR);
model.PopulateTensor<bool>(model.input1(), {true, false, false, true});
model.PopulateTensor<bool>(model.input2(), {true, false, true, false});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, true, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(LogicalTest, BroadcastLogicalOr) {
LogicalOpModel model({1, 1, 1, 4}, {1, 1, 1, 1}, BuiltinOperator_LOGICAL_OR);
model.PopulateTensor<bool>(model.input1(), {true, false, false, true});
model.PopulateTensor<bool>(model.input2(), {false});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, false, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(LogicalTest, LogicalAnd) {
LogicalOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, BuiltinOperator_LOGICAL_AND);
model.PopulateTensor<bool>(model.input1(), {true, false, false, true});
model.PopulateTensor<bool>(model.input2(), {true, false, true, false});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, false, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(LogicalTest, BroadcastLogicalAnd) {
LogicalOpModel model({1, 1, 1, 4}, {1, 1, 1, 1}, BuiltinOperator_LOGICAL_AND);
model.PopulateTensor<bool>(model.input1(), {true, false, false, true});
model.PopulateTensor<bool>(model.input2(), {true});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, false, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/logical.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/logical_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
112ffa20-b592-45ff-bca5-897f16c2a502 | cpp | tensorflow/tensorflow | embedding_lookup | tensorflow/lite/kernels/embedding_lookup.cc | tensorflow/lite/kernels/embedding_lookup_test.cc | #include <stdint.h>
#include <cstring>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace embedding_lookup {
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* lookup;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &lookup));
TF_LITE_ENSURE_EQ(context, NumDimensions(lookup), 1);
TF_LITE_ENSURE_EQ(context, lookup->type, kTfLiteInt32);
const TfLiteTensor* value;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &value));
TF_LITE_ENSURE(context, NumDimensions(value) >= 2);
if (value->quantization.type == kTfLiteAffineQuantization) {
const auto qparams = static_cast<const TfLiteAffineQuantization*>(
value->quantization.params);
TF_LITE_ENSURE(context, qparams->scale != nullptr);
TF_LITE_ENSURE(context, qparams->zero_point != nullptr);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
if ((value->type == kTfLiteUInt8 || value->type == kTfLiteInt8 ||
value->type == kTfLiteInt4) &&
(output->type == kTfLiteFloat32)) {
TF_LITE_ENSURE(context, qparams->zero_point->data[0] == 0);
}
if (qparams->scale->size > 1 || qparams->zero_point->size > 1) {
TF_LITE_ENSURE(context, value->type == kTfLiteUInt8 ||
value->type == kTfLiteInt8 ||
value->type == kTfLiteInt4);
TF_LITE_ENSURE(context, output->type == kTfLiteFloat32);
TF_LITE_ENSURE(context, qparams->quantized_dimension == 0);
const int row_size = SizeOfDimension(value, 0);
TF_LITE_ENSURE(context, qparams->scale->size == row_size);
TF_LITE_ENSURE(context, qparams->zero_point->size == row_size);
}
}
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TfLiteIntArray* output_size = TfLiteIntArrayCreate(NumDimensions(value));
output_size->data[0] = SizeOfDimension(lookup, 0);
output_size->data[1] = SizeOfDimension(value, 1);
for (int i = 2; i < NumDimensions(value); i++) {
output_size->data[i] = SizeOfDimension(value, i);
}
return context->ResizeTensor(context, output, output_size);
}
TfLiteStatus EvalSimple(TfLiteContext* context, TfLiteNode* node,
const TfLiteTensor* lookup, const TfLiteTensor* value,
TfLiteTensor* output) {
const int row_size = SizeOfDimension(value, 0);
if (row_size == 0) {
return kTfLiteOk;
}
const int64_t row_bytes = value->bytes / row_size;
char* output_raw = GetTensorData<char>(output);
const char* value_raw = GetTensorData<char>(value);
const int32_t* lookup_data = GetTensorData<int32_t>(lookup);
for (int i = 0; i < SizeOfDimension(lookup, 0); i++) {
int64_t idx = lookup_data[i];
if (idx >= row_size || idx < 0) {
TF_LITE_KERNEL_LOG(context,
"Embedding Lookup: index out of bounds. "
"Got %d, and bounds are [0, %d]",
idx, row_size - 1);
return kTfLiteError;
} else {
std::memcpy(output_raw + i * row_bytes, value_raw + idx * row_bytes,
row_bytes);
}
}
return kTfLiteOk;
}
TfLiteStatus EvalHybrid(TfLiteContext* context, TfLiteNode* node,
const TfLiteTensor* lookup, const TfLiteTensor* value,
TfLiteTensor* output) {
const int row_size = SizeOfDimension(value, 0);
int col_size = 1;
for (int i = 1; i < NumDimensions(value); i++) {
col_size *= SizeOfDimension(value, i);
}
float* output_ptr = GetTensorData<float>(output);
const int8_t* value_ptr = GetTensorData<int8_t>(value);
const int32_t* lookup_data = GetTensorData<int32_t>(lookup);
for (int i = 0; i < SizeOfDimension(lookup, 0); i++) {
int idx = lookup_data[i];
if (idx >= row_size || idx < 0) {
TF_LITE_KERNEL_LOG(context,
"Embedding Lookup: index out of bounds. "
"Got %d, and bounds are [0, %d]",
idx, row_size - 1);
return kTfLiteError;
} else {
double scaling_factor = value->params.scale;
if (value->quantization.type == kTfLiteAffineQuantization) {
const auto qparams = static_cast<const TfLiteAffineQuantization*>(
value->quantization.params);
if (qparams->scale->size > 1) {
scaling_factor = qparams->scale->data[idx];
}
}
if (value->type == kTfLiteInt4) {
for (int j = 0; j < col_size; j++) {
int i8_idx = j + idx * col_size;
int i4_idx = i8_idx / 2;
bool even = i8_idx % 2 == 0;
int8_t i4_val = value_ptr[i4_idx];
int8_t i8_val =
even ? static_cast<int8_t>(i4_val << 4) >> 4 : i4_val >> 4;
output_ptr[j + i * col_size] = i8_val * scaling_factor;
}
} else {
for (int j = 0; j < col_size; j++) {
output_ptr[j + i * col_size] =
value_ptr[j + idx * col_size] * scaling_factor;
}
}
}
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* lookup;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &lookup));
const TfLiteTensor* value;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &value));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
switch (value->type) {
case kTfLiteFloat32:
return EvalSimple(context, node, lookup, value, output);
case kTfLiteInt4:
return EvalHybrid(context, node, lookup, value, output);
case kTfLiteUInt8:
case kTfLiteInt8:
if (output->type == kTfLiteFloat32) {
return EvalHybrid(context, node, lookup, value, output);
} else {
return EvalSimple(context, node, lookup, value, output);
}
default:
TF_LITE_KERNEL_LOG(context, "Type not currently supported.");
return kTfLiteError;
}
}
}
TfLiteRegistration* Register_EMBEDDING_LOOKUP() {
static TfLiteRegistration r = {nullptr, nullptr, embedding_lookup::Prepare,
embedding_lookup::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <functional>
#include <initializer_list>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
float kTestTolerance = 7.41e-03;
using ::testing::ElementsAreArray;
class BaseEmbeddingLookupOpModel : public SingleOpModel {
public:
BaseEmbeddingLookupOpModel(
std::initializer_list<int> index_shape,
std::initializer_list<int> weight_shape,
TensorType weight_type = TensorType_FLOAT32,
TensorType output_type = TensorType_FLOAT32,
const std::vector<float>& per_channel_quantization_scales = {}) {
input_ = AddInput(TensorType_INT32);
if (per_channel_quantization_scales.empty()) {
weight_ = AddInput(weight_type);
} else {
std::vector<int64_t> per_channel_quantization_offsets(
per_channel_quantization_scales.size(), 0);
weight_ = AddInput({weight_type, weight_shape, 0, 0, 0, 0, true,
per_channel_quantization_scales,
per_channel_quantization_offsets, 0});
}
output_ = AddOutput(output_type);
SetBuiltinOp(BuiltinOperator_EMBEDDING_LOOKUP, BuiltinOptions_NONE, 0);
BuildInterpreter({index_shape, weight_shape});
}
void SetInput(std::initializer_list<int> data) {
PopulateTensor(input_, data);
}
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
protected:
int input_;
int weight_;
int output_;
};
class EmbeddingLookupOpModel : public BaseEmbeddingLookupOpModel {
public:
using BaseEmbeddingLookupOpModel::BaseEmbeddingLookupOpModel;
template <typename T>
void Set3DWeightMatrix(const std::function<T(int, int, int)>& function) {
TfLiteTensor* tensor = interpreter_->tensor(weight_);
int rows = tensor->dims->data[0];
int columns = tensor->dims->data[1];
int features = tensor->dims->data[2];
T* data = GetTensorData<T>(tensor);
for (int i = 0; i < rows; i++) {
for (int j = 0; j < columns; j++) {
for (int k = 0; k < features; k++) {
data[(i * columns + j) * features + k] = function(i, j, k);
}
}
}
}
template <typename T>
void Set2DWeightMatrix(const std::function<T(int, int)>& function) {
TfLiteTensor* tensor = interpreter_->tensor(weight_);
int64_t rows = tensor->dims->data[0];
int64_t columns = tensor->dims->data[1];
T* data = GetTensorData<T>(tensor);
for (int64_t i = 0; i < rows; i++) {
for (int64_t j = 0; j < columns; j++) {
data[i * columns + j] = function(i, j);
}
}
}
};
class HybridEmbeddingLookupOpModel : public BaseEmbeddingLookupOpModel {
public:
HybridEmbeddingLookupOpModel(std::initializer_list<int> index_shape,
std::initializer_list<int> weight_shape,
TensorType type)
: BaseEmbeddingLookupOpModel(index_shape, weight_shape, type) {}
void SetWeight(std::initializer_list<float> data) {
SymmetricQuantizeAndPopulate(weight_, data);
}
void SetSignedWeight(std::initializer_list<float> data) {
SignedSymmetricQuantizeAndPopulate(weight_, data);
}
};
class PerAxisHybridEmbeddingLookupOpModel : public BaseEmbeddingLookupOpModel {
public:
PerAxisHybridEmbeddingLookupOpModel(
std::initializer_list<int> index_shape,
std::initializer_list<int> weight_shape,
const std::vector<float>& per_channel_quantization_scales,
TensorType type)
: BaseEmbeddingLookupOpModel(index_shape, weight_shape, type,
TensorType_FLOAT32,
per_channel_quantization_scales) {}
void SetSignedWeight(std::initializer_list<float> data) {
PerChannelSymmetricQuantizeAndPopulate(weight_, data);
}
};
TEST(EmbeddingLookupOpTest, SimpleTest) {
EmbeddingLookupOpModel m({3}, {3, 2, 4});
m.SetInput({1, 0, 2});
m.Set3DWeightMatrix<float>(
[](int i, int j, int k) -> float { return i + j / 10.0f + k / 100.0f; });
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear({
1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
})));
}
#if !defined(MEMORY_SANITIZER) && !defined(GOOGLE_UNSUPPORTED_OS_LOONIX) && \
defined(__LP64__)
TEST(EmbeddingLookupOpTest, LargeTableTest) {
EmbeddingLookupOpModel m({1}, {256000, 9216});
m.SetInput({235248});
m.Set2DWeightMatrix<float>(
[](int i, int j) -> float { return j + i / 100.; });
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<float> exp(9216);
for (int s = 0; s < exp.size(); s++) {
exp[s] = static_cast<float>(s) + 2352.48f;
}
EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray(ArrayFloatNear(exp)));
}
#endif
TEST(HybridEmbeddingLookupHybridOpTest, Simple2DTestUint8) {
HybridEmbeddingLookupOpModel m({3}, {3, 8}, TensorType_UINT8);
m.SetInput({1, 0, 2});
m.SetWeight({
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear(
{
1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
},
kTestTolerance)));
}
TEST(HybridEmbeddingLookupHybridOpTest, Simple3DTestUint8) {
HybridEmbeddingLookupOpModel m({3}, {3, 2, 4}, TensorType_UINT8);
m.SetInput({1, 0, 2});
m.SetWeight({
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear(
{
1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
},
kTestTolerance)));
}
TEST(HybridEmbeddingLookupHybridOpTest, Simple4DTestUint8) {
HybridEmbeddingLookupOpModel m({3}, {3, 2, 2, 2}, TensorType_UINT8);
m.SetInput({1, 0, 2});
m.SetWeight({
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear(
{
1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
},
kTestTolerance)));
}
TEST(HybridEmbeddingLookupHybridOpTest, Simple2DTestInt8) {
HybridEmbeddingLookupOpModel m({3}, {3, 8}, TensorType_INT8);
m.SetInput({1, 0, 2});
m.SetSignedWeight({
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear(
{
1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
},
kTestTolerance)));
}
TEST(HybridEmbeddingLookupHybridOpTest, Simple3DTestInt8) {
HybridEmbeddingLookupOpModel m({3}, {3, 2, 4}, TensorType_INT8);
m.SetInput({1, 0, 2});
m.SetSignedWeight({
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear(
{
1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
},
kTestTolerance)));
}
TEST(HybridEmbeddingLookupHybridOpTest, Simple4DTestInt8) {
HybridEmbeddingLookupOpModel m({3}, {3, 2, 2, 2}, TensorType_INT8);
m.SetInput({1, 0, 2});
m.SetSignedWeight({
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear(
{
1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
},
kTestTolerance)));
}
TEST(EmbeddingLookupHybridOpTest, Simple3DTestQuantized) {
EmbeddingLookupOpModel m({3}, {3, 2, 4}, TensorType_UINT8, TensorType_INT8);
m.SetInput({1, 0, 2});
m.Set3DWeightMatrix<uint8_t>(
[](int i, int j, int k) -> uint8_t { return 100 * i + 10 * j + k; });
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int8_t>(),
ElementsAreArray({
100, 101, 102, 103, 110, 111, 112, 113,
0, 1, 2, 3, 10, 11, 12, 13,
200, 201, 202, 203, 210, 211, 212, 213,
}));
}
TEST(PerAxisHybridEmbeddingLookupHybridOpTest, PerAxisSimple2DTestInt8) {
PerAxisHybridEmbeddingLookupOpModel m(
{3}, {3, 8}, {0.00102, 0.0089, 0.016772}, TensorType_INT8);
m.SetInput({1, 0, 2});
m.SetSignedWeight({
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear(
{
1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
},
kTestTolerance)));
}
TEST(PerAxisHybridEmbeddingLookupHybridOpTest, PerAxisSimple3DTestInt8) {
PerAxisHybridEmbeddingLookupOpModel m(
{3}, {3, 2, 4}, {0.00102, 0.0089, 0.016772}, TensorType_INT8);
m.SetInput({1, 0, 2});
m.SetSignedWeight({
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear(
{
1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
},
kTestTolerance)));
}
TEST(PerAxisHybridEmbeddingLookupHybridOpTest, PerAxisSimple4DTestInt8) {
PerAxisHybridEmbeddingLookupOpModel m(
{3}, {3, 2, 2, 2}, {0.00102, 0.0089, 0.016772}, TensorType_INT8);
m.SetInput({1, 0, 2});
m.SetSignedWeight({
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear(
{
1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
},
kTestTolerance)));
}
TEST(PerAxisHybridEmbeddingLookupHybridOpTest, PerAxisSimple2DTestInt4) {
PerAxisHybridEmbeddingLookupOpModel m({3}, {3, 8}, {0.001, 0.02, 0.3},
TensorType_INT4);
m.SetInput({1, 0, 2});
m.SetSignedWeight({
0.00, 0.007, 0.006, 0.005, 0.004, 0.003, 0.002, 0.001,
0.02, -0.02, 0.04, 0.06, 0.08, -0.04, -0.08, -0.06,
0.3, 0.6, 0.9, 1.2, 1.5, -0.3, -0.6, -0.9,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear(
{
0.02, -0.02, 0.04, 0.06, 0.08, -0.04, -0.08, -0.06,
0.00, 0.007, 0.006, 0.005, 0.004, 0.003, 0.002, 0.001,
0.3, 0.6, 0.9, 1.2, 1.5, -0.3, -0.6, -0.9,
},
kTestTolerance)));
}
TEST(PerAxisHybridEmbeddingLookupHybridOpTest, PerAxisSimple3DTestInt4) {
PerAxisHybridEmbeddingLookupOpModel m({3}, {3, 2, 4}, {0.001, 0.02, 0.3},
TensorType_INT4);
m.SetInput({1, 0, 2});
m.SetSignedWeight({
0.00, 0.007, 0.006, 0.005, 0.004, 0.003, 0.002, 0.001,
0.02, -0.02, 0.04, 0.06, 0.08, -0.04, -0.08, -0.06,
0.3, 0.6, 0.9, 1.2, 1.5, -0.3, -0.6, -0.9,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear(
{
0.02, -0.02, 0.04, 0.06, 0.08, -0.04, -0.08, -0.06,
0.00, 0.007, 0.006, 0.005, 0.004, 0.003, 0.002, 0.001,
0.3, 0.6, 0.9, 1.2, 1.5, -0.3, -0.6, -0.9,
},
kTestTolerance)));
}
TEST(PerAxisHybridEmbeddingLookupHybridOpTest, PerAxisSimple4DTestInt4) {
PerAxisHybridEmbeddingLookupOpModel m({3}, {3, 2, 2, 2}, {0.001, 0.02, 0.3},
TensorType_INT4);
m.SetInput({1, 0, 2});
m.SetSignedWeight({
0.00, 0.007, 0.006, 0.005, 0.004, 0.003, 0.002, 0.001,
0.02, -0.02, 0.04, 0.06, 0.08, -0.04, -0.08, -0.06,
0.3, 0.6, 0.9, 1.2, 1.5, -0.3, -0.6, -0.9,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear(
{
0.02, -0.02, 0.04, 0.06, 0.08, -0.04, -0.08, -0.06,
0.00, 0.007, 0.006, 0.005, 0.004, 0.003, 0.002, 0.001,
0.3, 0.6, 0.9, 1.2, 1.5, -0.3, -0.6, -0.9,
},
kTestTolerance)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/embedding_lookup.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/embedding_lookup_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0bc608bf-99a8-4a3a-a9fb-cecb4d4824b5 | cpp | tensorflow/tensorflow | l2norm | tensorflow/lite/kernels/l2norm.cc | tensorflow/lite/kernels/l2norm_test.cc | #include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h"
#include "tensorflow/lite/kernels/internal/reference/l2normalization.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace l2norm {
enum KernelType {
kReference,
kGenericOptimized,
};
constexpr int kInputTensor = 0;
constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteL2NormParams*>(node->builtin_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE(context, NumDimensions(input) <= 4);
TF_LITE_ENSURE(context, output->type == kTfLiteFloat32 ||
output->type == kTfLiteUInt8 ||
output->type == kTfLiteInt8);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) {
TF_LITE_ENSURE_EQ(context, output->params.scale, (1. / 128.));
if (output->type == kTfLiteUInt8) {
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 128);
}
if (output->type == kTfLiteInt8) {
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
}
}
TF_LITE_ENSURE_EQ(context, params->activation, kTfLiteActNone);
TfLiteIntArray* output_size = TfLiteIntArrayCopy(input->dims);
return context->ResizeTensor(context, output, output_size);
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const float epsilon = 1e-6f;
if (output->type == kTfLiteFloat32) {
#define TF_LITE_L2NORM(type) \
tflite::L2NormalizationParams op_params; \
op_params.input_zero_point = 0; \
type::L2Normalization(op_params, GetTensorShape(input), \
GetTensorData<float>(input), GetTensorShape(output), \
GetTensorData<float>(output), epsilon)
if (kernel_type == kReference) {
TF_LITE_L2NORM(reference_ops);
}
if (kernel_type == kGenericOptimized) {
TF_LITE_L2NORM(optimized_ops);
}
#undef TF_LITE_L2NORM
} else if (output->type == kTfLiteUInt8) {
#define TF_LITE_L2NORM(type) \
tflite::L2NormalizationParams op_params; \
op_params.input_zero_point = input->params.zero_point; \
type::L2Normalization(op_params, GetTensorShape(input), \
GetTensorData<uint8>(input), GetTensorShape(output), \
GetTensorData<uint8>(output))
if (kernel_type == kReference) {
TF_LITE_L2NORM(reference_ops);
}
if (kernel_type == kGenericOptimized) {
TF_LITE_L2NORM(optimized_ops);
}
#undef TF_LITE_L2NORM
} else if (output->type == kTfLiteInt8) {
const auto input_shape = GetTensorShape(input);
const auto output_shape = GetTensorShape(output);
const int trailing_dim = input_shape.DimensionsCount() - 1;
const int depth =
MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim);
const int outer_size =
MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape);
reference_integer_ops::L2Normalization(input->params.zero_point, outer_size,
depth, GetTensorData<int8>(input),
GetTensorData<int8>(output));
} else {
TF_LITE_KERNEL_LOG(context, "Output type is %s, requires float.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_L2NORM_REF() {
static TfLiteRegistration r = {nullptr, nullptr, l2norm::Prepare,
l2norm::Eval<l2norm::kReference>};
return &r;
}
TfLiteRegistration* Register_L2NORM_GENERIC_OPT() {
static TfLiteRegistration r = {nullptr, nullptr, l2norm::Prepare,
l2norm::Eval<l2norm::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_L2_NORMALIZATION() {
return Register_L2NORM_GENERIC_OPT();
}
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class L2NormOpModel : public SingleOpModel {
public:
L2NormOpModel(const std::initializer_list<int> input_shape,
const TensorType tensor_type,
const ActivationFunctionType activation_type) {
TensorData data = TensorData{tensor_type};
if (tensor_type != TensorType_FLOAT32) {
data.min = -2.0;
data.max = 2.0;
data.scale = 2.0;
data.zero_point = 128;
}
input_ = AddInput(data);
if (tensor_type != TensorType_FLOAT32) {
data.min = -1.0;
data.max = 127.0 / 128.0;
}
output_ = AddOutput(data);
SetBuiltinOp(BuiltinOperator_L2_NORMALIZATION, BuiltinOptions_L2NormOptions,
CreateL2NormOptions(builder_, activation_type).Union());
BuildInterpreter({input_shape});
}
void SetInput(std::initializer_list<float> data) {
PopulateTensor(input_, data);
}
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
template <typename T>
std::vector<float> GetDequantizedOutput() {
return Dequantize<T>(ExtractVector<T>(output_), GetScale(output_),
GetZeroPoint(output_));
}
int input() const { return input_; }
private:
int input_;
int output_;
};
TEST(L2NormOpTest, SimpleFloatTest) {
L2NormOpModel m({1, 1, 1, 6}, TensorType_FLOAT32,
ActivationFunctionType_NONE);
m.SetInput({-1.1, 0.6, 0.7, 1.2, -0.7, 0.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput<float>(),
Pointwise(FloatingPointEq(), {-0.55, 0.3, 0.35, 0.6, -0.35, 0.05}));
}
TEST(L2NormOpTest, ZerosVectorFloatTest) {
L2NormOpModel m({1, 1, 1, 6}, TensorType_FLOAT32,
ActivationFunctionType_NONE);
m.SetInput({0, 0, 0, 0, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear({0, 0, 0, 0, 0, 0})));
}
TEST(L2NormOpTest, SimpleFloatWithRankLessThanFourTest) {
L2NormOpModel m({1, 6}, TensorType_FLOAT32, ActivationFunctionType_NONE);
m.SetInput({-1.1, 0.6, 0.7, 1.2, -0.7, 0.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput<float>(),
Pointwise(FloatingPointEq(), {-0.55, 0.3, 0.35, 0.6, -0.35, 0.05}));
}
TEST(L2NormOpTest, MultipleBatchFloatTest) {
L2NormOpModel m({3, 1, 1, 6}, TensorType_FLOAT32,
ActivationFunctionType_NONE);
m.SetInput({
-1.1, 0.6, 0.7, 1.2, -0.7, 0.1,
-1.1, 0.6, 0.7, 1.2, -0.7, 0.1,
-1.1, 0.6, 0.7, 1.2, -0.7, 0.1,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
Pointwise(FloatingPointEq(),
{
-0.55, 0.3, 0.35, 0.6, -0.35, 0.05,
-0.55, 0.3, 0.35, 0.6, -0.35, 0.05,
-0.55, 0.3, 0.35, 0.6, -0.35, 0.05,
}));
}
TEST(L2NormOpTest, ZerosVectorUint8Test) {
L2NormOpModel m({1, 1, 1, 6}, TensorType_UINT8, ActivationFunctionType_NONE);
m.QuantizeAndPopulate<uint8_t>(m.input(), {0, 0, 0, 0, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<uint8_t>(),
ElementsAreArray({128, 128, 128, 128, 128, 128}));
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear({0, 0, 0, 0, 0, 0}, 0.1)));
}
TEST(L2NormOpTest, SimpleUint8Test) {
L2NormOpModel m({1, 1, 1, 6}, TensorType_UINT8, ActivationFunctionType_NONE);
m.QuantizeAndPopulate<uint8_t>(m.input(), {-1.1, 0.6, 0.7, 1.2, -0.7, 0.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<uint8_t>(),
ElementsAreArray({58, 166, 173, 205, 83, 134}));
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(
ArrayFloatNear({-0.55, 0.3, 0.35, 0.6, -0.35, 0.05}, 0.1)));
}
TEST(L2NormOpTest, SimpleInt8Test) {
L2NormOpModel m({1, 1, 1, 6}, TensorType_INT8, ActivationFunctionType_NONE);
m.QuantizeAndPopulate<int8_t>(m.input(), {-1.1, 0.6, 0.7, 1.2, -0.7, 0.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int8_t>(),
ElementsAreArray({-70, 38, 45, 77, -45, 6}));
EXPECT_THAT(m.GetDequantizedOutput<int8_t>(),
ElementsAreArray(
ArrayFloatNear({-0.55, 0.3, 0.35, 0.6, -0.35, 0.05}, 0.1)));
}
TEST(L2NormOpTest, ZerosVectorInt8Test) {
L2NormOpModel m({1, 1, 1, 6}, TensorType_INT8, ActivationFunctionType_NONE);
m.QuantizeAndPopulate<int8_t>(m.input(), {0, 0, 0, 0, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int8_t>(), ElementsAreArray({0, 0, 0, 0, 0, 0}));
EXPECT_THAT(m.GetDequantizedOutput<int8_t>(),
ElementsAreArray(ArrayFloatNear({0, 0, 0, 0, 0, 0}, 0.1)));
}
TEST(L2NormOpTest, MultipleBatchUint8Test) {
L2NormOpModel m({3, 1, 1, 6}, TensorType_UINT8, ActivationFunctionType_NONE);
m.QuantizeAndPopulate<uint8_t>(m.input(),
{
-1.1, 0.6, 0.7, 1.2, -0.7, 0.1,
-1.1, 0.6, 0.7, 1.2, -0.7, 0.1,
-1.1, 0.6, 0.7, 1.2, -0.7, 0.1,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<uint8_t>(),
ElementsAreArray({
58, 166, 173, 205, 83, 134,
58, 166, 173, 205, 83, 134,
58, 166, 173, 205, 83, 134,
}));
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear(
{
-0.55, 0.3, 0.35, 0.6, -0.35, 0.05,
-0.55, 0.3, 0.35, 0.6, -0.35, 0.05,
-0.55, 0.3, 0.35, 0.6, -0.35, 0.05,
},
0.1)));
}
TEST(L2NormOpTest, MultipleBatchInt8Test) {
L2NormOpModel m({3, 1, 1, 6}, TensorType_INT8, ActivationFunctionType_NONE);
m.QuantizeAndPopulate<int8_t>(m.input(),
{
-1.1, 0.6, 0.7, 1.2, -0.7, 0.1,
-1.1, 0.6, 0.7, 1.2, -0.7, 0.1,
-1.1, 0.6, 0.7, 1.2, -0.7, 0.1,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int8_t>(), ElementsAreArray({
-70, 38, 45, 77, -45, 6,
-70, 38, 45, 77, -45, 6,
-70, 38, 45, 77, -45, 6,
}));
EXPECT_THAT(m.GetDequantizedOutput<int8_t>(),
ElementsAreArray(ArrayFloatNear(
{
-0.55, 0.3, 0.35, 0.6, -0.35, 0.05,
-0.55, 0.3, 0.35, 0.6, -0.35, 0.05,
-0.55, 0.3, 0.35, 0.6, -0.35, 0.05,
},
0.1)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/l2norm.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/l2norm_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f32ef733-e25f-4261-9b9a-e3a16b86671f | cpp | tensorflow/tensorflow | resize_bilinear | tensorflow/lite/kernels/resize_bilinear.cc | tensorflow/lite/delegates/xnnpack/resize_bilinear_test.cc | #include <stdint.h>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/neon_check.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/optimized/resize_bilinear.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace resize_bilinear {
enum KernelType {
kReference,
kOptimized,
};
constexpr int kInputTensor = 0;
constexpr int kSizeTensor = 1;
constexpr int kOutputTensor = 0;
TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
const TfLiteTensor* input,
const TfLiteTensor* size,
TfLiteTensor* output) {
const int32* size_data = GetTensorData<int32>(size);
TF_LITE_ENSURE(context, size_data[0] > 0);
TF_LITE_ENSURE(context, size_data[1] > 0);
TfLiteIntArray* output_size = TfLiteIntArrayCreate(4);
output_size->data[0] = input->dims->data[0];
output_size->data[1] = size_data[0];
output_size->data[2] = size_data[1];
output_size->data[3] = input->dims->data[3];
return context->ResizeTensor(context, output, output_size);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* size;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSizeTensor, &size));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4);
TF_LITE_ENSURE_EQ(context, NumDimensions(size), 1);
TF_LITE_ENSURE_EQ(context, size->type, kTfLiteInt32);
output->type = input->type;
if (!IsConstantOrPersistentTensor(size)) {
SetTensorToDynamic(output);
return kTfLiteOk;
}
auto* params =
reinterpret_cast<TfLiteResizeBilinearParams*>(node->builtin_data);
if (params->half_pixel_centers && params->align_corners) {
TF_LITE_KERNEL_LOG(
context, "If half_pixel_centers is True, align_corners must be False.");
return kTfLiteError;
}
return ResizeOutputTensor(context, input, size, output);
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteResizeBilinearParams*>(node->builtin_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const TfLiteTensor* size;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSizeTensor, &size));
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context,
ResizeOutputTensor(context, input, size, output));
}
if (output->type == kTfLiteFloat32) {
#define TF_LITE_RESIZE_BILINEAR(type, opname, datatype) \
tflite::ResizeBilinearParams op_params; \
op_params.align_corners = params->align_corners; \
op_params.half_pixel_centers = params->half_pixel_centers; \
type::opname(op_params, GetTensorShape(input), \
GetTensorData<datatype>(input), GetTensorShape(size), \
GetTensorData<int32>(size), GetTensorShape(output), \
GetTensorData<datatype>(output))
if (kernel_type == kReference) {
TF_LITE_RESIZE_BILINEAR(reference_ops, ResizeBilinear, float);
} else if (kernel_type == kOptimized) {
TF_LITE_RESIZE_BILINEAR(optimized_ops, ResizeBilinear, float);
}
} else if (output->type == kTfLiteUInt8) {
if (kernel_type == kReference) {
TF_LITE_RESIZE_BILINEAR(reference_ops, ResizeBilinear, uint8_t);
} else if (kernel_type == kOptimized) {
TF_LITE_RESIZE_BILINEAR(optimized_ops, ResizeBilinear, uint8_t);
}
} else if (output->type == kTfLiteInt8) {
if (kernel_type == kReference) {
TF_LITE_RESIZE_BILINEAR(reference_ops, ResizeBilinearInteger, int8_t);
} else if (kernel_type == kOptimized) {
TF_LITE_RESIZE_BILINEAR(optimized_ops, ResizeBilinear, int8_t);
}
} else if (output->type == kTfLiteInt16) {
TF_LITE_RESIZE_BILINEAR(reference_ops, ResizeBilinearInteger, int16_t);
#undef TF_LITE_RESIZE_BILINEAR
} else {
TF_LITE_KERNEL_LOG(context, "Output type is %d, requires float.",
output->type);
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_RESIZE_BILINEAR_REF() {
static TfLiteRegistration r = {
nullptr, nullptr, resize_bilinear::Prepare,
resize_bilinear::Eval<resize_bilinear::kReference>};
return &r;
}
TfLiteRegistration* Register_RESIZE_BILINEAR() {
static TfLiteRegistration r = {
nullptr, nullptr, resize_bilinear::Prepare,
resize_bilinear::Eval<resize_bilinear::kOptimized>};
return &r;
}
}
}
} | #include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/resize_bilinear_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(ResizeBilinear, AlignCenters) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto size_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
ResizeBilinearTester()
.HalfPixelCenters(true)
.InputHeight(size_rng())
.InputWidth(size_rng())
.OutputHeight(size_rng())
.OutputWidth(size_rng())
.Channels(channel_rng())
.Test(xnnpack_delegate.get());
}
TEST(ResizeBilinear, AlignCentersTF1X) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto size_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
ResizeBilinearTester()
.InputHeight(size_rng())
.InputWidth(size_rng())
.OutputHeight(size_rng())
.OutputWidth(size_rng())
.Channels(channel_rng())
.Test(xnnpack_delegate.get());
}
TEST(ResizeBilinear, AlignCorners) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto size_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
ResizeBilinearTester()
.AlignCorners(true)
.InputHeight(size_rng())
.InputWidth(size_rng())
.OutputHeight(size_rng())
.OutputWidth(size_rng())
.Channels(channel_rng())
.Test(xnnpack_delegate.get());
}
TEST(ResizeBilinear, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto size_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
ResizeBilinearTester()
.InputHeight(size_rng())
.InputWidth(size_rng())
.OutputHeight(size_rng())
.OutputWidth(size_rng())
.Channels(channel_rng())
.Test(xnnpack_delegate.get());
}
TEST(ResizeBilinear, TransientIndirectionBuffer) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
delegate_options.flags |=
TFLITE_XNNPACK_DELEGATE_FLAG_TRANSIENT_INDIRECTION_BUFFER;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto size_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
ResizeBilinearTester()
.InputHeight(size_rng())
.InputWidth(size_rng())
.OutputHeight(size_rng())
.OutputWidth(size_rng())
.Channels(channel_rng())
.Test(xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/resize_bilinear.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/resize_bilinear_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2f120f00-d9e2-4867-8061-62cad0e7bc44 | cpp | tensorflow/tensorflow | neg | tensorflow/lite/kernels/neg.cc | tensorflow/lite/delegates/xnnpack/neg_test.cc | #include "tensorflow/lite/kernels/internal/reference/neg.h"
#include <stdint.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace neg {
constexpr int kInputTensor = 0;
constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
output->type = input->type;
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (input->type) {
case kTfLiteInt64:
reference_ops::Negate(
GetTensorShape(input), GetTensorData<int64_t>(input),
GetTensorShape(output), GetTensorData<int64_t>(output));
break;
case kTfLiteInt32:
reference_ops::Negate(
GetTensorShape(input), GetTensorData<int32_t>(input),
GetTensorShape(output), GetTensorData<int32_t>(output));
break;
case kTfLiteFloat32:
reference_ops::Negate(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output),
GetTensorData<float>(output));
break;
default:
TF_LITE_KERNEL_LOG(
context,
"Neg only currently supports int64, int32, and float32, got %d.",
input->type);
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_NEG() {
static TfLiteRegistration r = {nullptr, nullptr,
neg::Prepare, neg::Eval};
return &r;
}
}
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/unary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
TEST(Neg, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_NEG, xnnpack_delegate.get());
}
TEST(Neg, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, width, channels})
.Test(BuiltinOperator_NEG, xnnpack_delegate.get());
}
TEST(Neg, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, channels})
.Test(BuiltinOperator_NEG, xnnpack_delegate.get());
}
TEST(Neg, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
UnaryElementwiseTester().Shape({batch}).Test(BuiltinOperator_NEG,
xnnpack_delegate.get());
}
TEST(Neg, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_NEG, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/neg.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/neg_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c134ac2d-3c3a-4322-9883-f66ef4a81450 | cpp | tensorflow/tensorflow | topk_v2 | tensorflow/lite/kernels/topk_v2.cc | tensorflow/lite/kernels/topk_v2_test.cc | #include <stdint.h>
#include <algorithm>
#include <iterator>
#include <vector>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace topk_v2 {
constexpr int kInputTensor = 0;
constexpr int kInputTopK = 1;
constexpr int kOutputValues = 0;
constexpr int kOutputIndexes = 1;
namespace {
TfLiteStatus ResizeOutput(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* top_k;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTopK, &top_k));
TF_LITE_ENSURE(context,
top_k->type == kTfLiteInt32 || top_k->type == kTfLiteInt16);
TF_LITE_ENSURE_EQ(context, NumElements(top_k), 1);
int32 k;
if (top_k->type == kTfLiteInt16) {
k = *GetTensorData<int16_t>(top_k);
} else {
k = *GetTensorData<int32_t>(top_k);
}
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const int num_dimensions = NumDimensions(input);
TF_LITE_ENSURE_MSG(context, input->dims->size >= 1,
"TopK k input must have 1 or more dimensions.");
TF_LITE_ENSURE_MSG(context, k <= input->dims->data[num_dimensions - 1],
"TopK k is higher than the internal dimension.");
TfLiteIntArray* output_indexes_shape = TfLiteIntArrayCreate(num_dimensions);
TfLiteIntArray* output_values_shape = TfLiteIntArrayCreate(num_dimensions);
for (int i = 0; i < num_dimensions - 1; ++i) {
output_indexes_shape->data[i] = input->dims->data[i];
output_values_shape->data[i] = input->dims->data[i];
}
output_indexes_shape->data[num_dimensions - 1] = k;
output_values_shape->data[num_dimensions - 1] = k;
TfLiteTensor* output_indexes;
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kOutputIndexes, &output_indexes));
TfLiteTensor* output_values;
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kOutputValues, &output_values));
output_values->type = input->type;
auto resize_tensor = [context](TfLiteTensor* tensor, TfLiteIntArray* new_size,
TfLiteIntArray* delete_on_error) {
TfLiteStatus status = context->ResizeTensor(context, tensor, new_size);
if (status != kTfLiteOk) {
if (delete_on_error != nullptr) {
TfLiteIntArrayFree(delete_on_error);
}
}
return status;
};
TF_LITE_ENSURE_OK(context, resize_tensor(output_indexes, output_indexes_shape,
output_values_shape));
TF_LITE_ENSURE_OK(context,
resize_tensor(output_values, output_values_shape, nullptr));
return kTfLiteOk;
}
template <typename T, typename Tidx>
class TopContainer {
public:
TopContainer() = delete;
TopContainer(int32 k, int32 row_size) : k_(k) {
container_.reserve(std::min(k, row_size) + 1);
}
void start_collecting(const T* values) {
values_ = values;
container_.clear();
is_heap_ = false;
}
void push(Tidx a) {
auto comparator = [this](Tidx a, Tidx b) { return compare_fun(a, b); };
if (!is_heap_) {
container_.push_back(a);
if (container_.size() == k_ + 1) {
std::make_heap(container_.begin(), container_.end(), comparator);
std::pop_heap(container_.begin(), container_.end(), comparator);
container_.pop_back();
is_heap_ = true;
}
} else if (comparator(a, container_.front())) {
std::pop_heap(container_.begin(), container_.end(), comparator);
container_.back() = a;
std::push_heap(container_.begin(), container_.end(), comparator);
}
}
const std::vector<Tidx>& sorted_result() {
auto comparator = [this](Tidx a, Tidx b) { return compare_fun(a, b); };
if (!is_heap_) {
std::sort(container_.begin(), container_.end(), comparator);
} else {
std::sort_heap(container_.begin(), container_.end(), comparator);
}
return container_;
}
private:
const int32 k_;
std::vector<Tidx> container_;
bool is_heap_ = false;
const T* values_ = nullptr;
bool compare_fun(Tidx a, Tidx b) const {
if (values_[b] < values_[a]) {
return true;
} else if (values_[b] > values_[a]) {
return false;
} else {
return a < b;
}
}
};
template <typename T, typename Tidx = int32>
void TopK(int32 row_size, int32 num_rows, const T* data, int32 k,
Tidx* output_indexes, T* output_values) {
TopContainer<T, Tidx> topc(k, row_size);
for (int row = 0; row < num_rows; ++row) {
const T* values_row = data + row * row_size;
topc.start_collecting(values_row);
for (int32 c = 0; c < row_size; ++c) {
topc.push(c);
}
Tidx* indexes_row = output_indexes + row * k;
T* output_row = output_values + row * k;
const auto& top_k = topc.sorted_result();
std::copy(top_k.begin(), top_k.end(), indexes_row);
std::transform(top_k.begin(), top_k.end(), output_row,
[values_row](const int32 loc) { return values_row[loc]; });
}
}
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output_values;
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kOutputValues, &output_values));
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output_values->type);
const TfLiteTensor* top_k;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTopK, &top_k));
TF_LITE_ENSURE(context,
top_k->type != kTfLiteInt32 || top_k->type != kTfLiteInt16);
if (IsConstantOrPersistentTensor(top_k) && !HasUnspecifiedDimension(input)) {
TF_LITE_ENSURE_OK(context, ResizeOutput(context, node));
} else {
TfLiteTensor* output_indexes;
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kOutputIndexes, &output_indexes));
TfLiteTensor* output_values;
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kOutputValues, &output_values));
SetTensorToDynamic(output_indexes);
SetTensorToDynamic(output_values);
}
return kTfLiteOk;
}
template <typename idx_type>
TfLiteStatus TopKImpl(TfLiteContext* context, TfLiteNode* node, int32_t k,
idx_type* output_indexes) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output_values;
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kOutputValues, &output_values));
const int32 row_size = input->dims->data[input->dims->size - 1];
int32 num_rows = 1;
for (int i = 0; i < input->dims->size - 1; ++i) {
num_rows *= input->dims->data[i];
}
switch (output_values->type) {
case kTfLiteFloat32:
TopK(row_size, num_rows, GetTensorData<float>(input), k, output_indexes,
GetTensorData<float>(output_values));
break;
case kTfLiteUInt8:
TopK(row_size, num_rows, GetTensorData<uint8_t>(input), k, output_indexes,
output_values->data.uint8);
break;
case kTfLiteInt8:
TopK(row_size, num_rows, GetTensorData<int8_t>(input), k, output_indexes,
output_values->data.int8);
break;
case kTfLiteInt16:
TopK(row_size, num_rows, GetTensorData<int16_t>(input), k, output_indexes,
output_values->data.i16);
break;
case kTfLiteInt32:
TopK(row_size, num_rows, GetTensorData<int32_t>(input), k, output_indexes,
output_values->data.i32);
break;
case kTfLiteInt64:
TopK(row_size, num_rows, GetTensorData<int64_t>(input), k, output_indexes,
output_values->data.i64);
break;
default:
TF_LITE_KERNEL_LOG(context, "Type %s is currently not supported by TopK.",
TfLiteTypeGetName(output_values->type));
return kTfLiteError;
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output_values;
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kOutputValues, &output_values));
TfLiteTensor* output_indexes;
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kOutputIndexes, &output_indexes));
if (IsDynamicTensor(output_values)) {
TF_LITE_ENSURE_OK(context, ResizeOutput(context, node));
}
const TfLiteTensor* top_k;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTopK, &top_k));
int32 k;
switch (top_k->type) {
case kTfLiteInt32:
k = top_k->data.i32[0];
break;
case kTfLiteInt16:
k = top_k->data.i16[0];
break;
default:
TF_LITE_KERNEL_LOG(context,
"Type %s is currently not supported k Type by TopK.",
TfLiteTypeGetName(output_values->type));
return kTfLiteError;
}
switch (output_indexes->type) {
case kTfLiteInt32: {
return TopKImpl(context, node, k, GetTensorData<int32_t>(output_indexes));
}
case kTfLiteInt16: {
return TopKImpl(context, node, k, GetTensorData<int16_t>(output_indexes));
}
default:
TF_LITE_KERNEL_LOG(
context, "Output index type %s is currently not supported by TopK.",
TfLiteTypeGetName(output_values->type));
return kTfLiteError;
}
}
}
TfLiteRegistration* Register_TOPK_V2() {
static TfLiteRegistration r = {nullptr, nullptr, topk_v2::Prepare,
topk_v2::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
enum class TestType {
kConst = 0,
kDynamic = 1,
};
template <typename InputType>
class TopKV2OpModel : public SingleOpModel {
public:
TopKV2OpModel(int top_k, std::initializer_list<int> input_shape,
std::initializer_list<InputType> input_data,
TestType input_tensor_types) {
input_ = AddInput(GetTensorType<InputType>());
if (input_tensor_types == TestType::kDynamic) {
top_k_ = AddInput(TensorType_INT32);
} else {
top_k_ = AddConstInput(TensorType_INT32, {top_k}, {1});
}
output_values_ = AddOutput(GetTensorType<InputType>());
output_indexes_ = AddOutput(TensorType_INT32);
SetBuiltinOp(BuiltinOperator_TOPK_V2, BuiltinOptions_TopKV2Options, 0);
BuildInterpreter({input_shape, {1}});
PopulateTensor<InputType>(input_, input_data);
if (input_tensor_types == TestType::kDynamic) {
PopulateTensor<int32_t>(top_k_, {top_k});
}
}
std::vector<int32_t> GetIndexes() {
return ExtractVector<int32_t>(output_indexes_);
}
std::vector<InputType> GetValues() {
return ExtractVector<InputType>(output_values_);
}
protected:
int input_;
int top_k_;
int output_indexes_;
int output_values_;
};
class TopKV2OpTest : public ::testing::TestWithParam<TestType> {};
TEST_P(TopKV2OpTest, EqualFloat) {
TopKV2OpModel<float> m(2, {2, 2}, {-2.0, 0.2, 0.8, 0.1}, GetParam());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetIndexes(), ElementsAreArray({1, 0, 0, 1}));
EXPECT_THAT(m.GetValues(),
ElementsAreArray(ArrayFloatNear({0.2, -2.0, 0.8, 0.1})));
}
TEST_P(TopKV2OpTest, BorderFloat) {
TopKV2OpModel<float> m(2, {2, 3}, {-2.0, -3.0, 0.2, 0.8, 0.1, -0.1},
GetParam());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetIndexes(), ElementsAreArray({2, 0, 0, 1}));
EXPECT_THAT(m.GetValues(),
ElementsAreArray(ArrayFloatNear({0.2, -2.0, 0.8, 0.1})));
}
TEST_P(TopKV2OpTest, LargeFloat) {
TopKV2OpModel<float> m(
2, {2, 4}, {-2.0, -3.0, -4.0, 0.2, 0.8, 0.1, -0.1, -0.8}, GetParam());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetIndexes(), ElementsAreArray({3, 0, 0, 1}));
EXPECT_THAT(m.GetValues(),
ElementsAreArray(ArrayFloatNear({0.2, -2.0, 0.8, 0.1})));
}
TEST_P(TopKV2OpTest, VectorFloat) {
TopKV2OpModel<float> m(2, {8}, {-2.0, -3.0, -4.0, 0.2, 0.8, 0.1, -0.1, -0.8},
GetParam());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetIndexes(), ElementsAreArray({4, 3}));
EXPECT_THAT(m.GetValues(), ElementsAreArray(ArrayFloatNear({0.8, 0.2})));
}
TEST_P(TopKV2OpTest, TypeInt32) {
TopKV2OpModel<int32_t> m(2, {2, 3}, {1, 2, 3, 10251, 10250, 10249},
GetParam());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetIndexes(), ElementsAreArray({2, 1, 0, 1}));
EXPECT_THAT(m.GetValues(), ElementsAreArray({3, 2, 10251, 10250}));
}
INSTANTIATE_TEST_SUITE_P(TopKV2OpTest, TopKV2OpTest,
::testing::Values(TestType::kConst,
TestType::kDynamic));
TEST_P(TopKV2OpTest, TypeUint8) {
TopKV2OpModel<uint8_t> m(2, {2, 3}, {1, 2, 3, 251, 250, 249}, GetParam());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetIndexes(), ElementsAreArray({2, 1, 0, 1}));
EXPECT_THAT(m.GetValues(), ElementsAreArray({3, 2, 251, 250}));
}
TEST_P(TopKV2OpTest, TypeInt8) {
TopKV2OpModel<int8_t> m(2, {2, 3}, {1, 2, 3, -126, 125, -24}, GetParam());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetIndexes(), ElementsAreArray({2, 1, 1, 2}));
EXPECT_THAT(m.GetValues(), ElementsAreArray({3, 2, 125, -24}));
}
TEST_P(TopKV2OpTest, TypeInt64) {
TopKV2OpModel<int64_t> m(2, {2, 3}, {1, 2, 3, -1, -2, -3}, GetParam());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetIndexes(), ElementsAreArray({2, 1, 0, 1}));
EXPECT_THAT(m.GetValues(), ElementsAreArray({3, 2, -1, -2}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/topk_v2.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/topk_v2_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
43adc7c9-b886-457c-a5e5-6eed4f63e185 | cpp | tensorflow/tensorflow | atan2_custom | tensorflow/lite/kernels/atan2_custom.cc | tensorflow/lite/kernels/atan2_custom_test.cc | #include <cmath>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/custom_ops_register.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace atan2 {
TfLiteStatus EnsureSameShape(TfLiteContext* context, const TfLiteTensor* a,
const TfLiteTensor* b) {
TF_LITE_ENSURE_EQ(context, tflite::NumDimensions(a),
tflite::NumDimensions(b));
return TfLiteStatus::kTfLiteOk;
}
TfLiteStatus Atan2Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, tflite::NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, tflite::NumOutputs(node), 1);
const TfLiteTensor* input_y = tflite::GetInput(context, node, 0);
const TfLiteTensor* input_x = tflite::GetInput(context, node, 1);
TfLiteTensor* output = tflite::GetOutput(context, node, 0);
TF_LITE_ENSURE_OK(context, EnsureSameShape(context, input_y, input_x));
TF_LITE_ENSURE_TYPES_EQ(context, input_y->type, input_x->type);
TF_LITE_ENSURE_TYPES_EQ(context, input_y->type, output->type);
TF_LITE_ENSURE(context, input_y->type == kTfLiteFloat32 ||
input_y->type == kTfLiteFloat64);
TfLiteIntArray* output_shape = TfLiteIntArrayCopy(input_y->dims);
return context->ResizeTensor(context, output, output_shape);
}
template <typename Float>
TfLiteStatus Atan2(const TfLiteTensor* input_y, const TfLiteTensor* input_x,
TfLiteTensor* output) {
const Float* data_y = tflite::GetTensorData<Float>(input_y);
const Float* data_x = tflite::GetTensorData<Float>(input_x);
Float* data_output = tflite::GetTensorData<Float>(output);
const int64_t num_elements = NumElements(input_y);
for (int64_t i = 0; i < num_elements; ++i) {
data_output[i] = std::atan2(data_y[i], data_x[i]);
}
return TfLiteStatus::kTfLiteOk;
}
TfLiteStatus Atan2Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input_y = tflite::GetInput(context, node, 0);
const TfLiteTensor* input_x = tflite::GetInput(context, node, 1);
TfLiteTensor* output = tflite::GetOutput(context, node, 0);
switch (output->type) {
case kTfLiteFloat32:
TF_LITE_ENSURE_OK(context, Atan2<float>(input_y, input_x, output));
break;
case kTfLiteFloat64:
TF_LITE_ENSURE_OK(context, Atan2<double>(input_y, input_x, output));
break;
default: {
TF_LITE_KERNEL_LOG(context, "Unsupported datatype for atan2 output: %s",
TfLiteTypeGetName(output->type));
return TfLiteStatus::kTfLiteError;
}
}
return TfLiteStatus::kTfLiteOk;
}
}
TfLiteRegistration* Register_ATAN2() {
static TfLiteRegistration r = {nullptr, nullptr, atan2::Atan2Prepare,
atan2::Atan2Eval};
return &r;
}
}
}
} | #include <cmath>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/custom_ops_register.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace {
template <typename T>
tflite::TensorType GetTTEnum();
template <>
tflite::TensorType GetTTEnum<float>() {
return tflite::TensorType_FLOAT32;
}
template <>
tflite::TensorType GetTTEnum<double>() {
return tflite::TensorType_FLOAT64;
}
class Atan2Model : public tflite::SingleOpModel {
public:
Atan2Model(tflite::TensorData y, tflite::TensorData x,
tflite::TensorData output) {
y_ = AddInput(y);
x_ = AddInput(x);
output_ = AddOutput(output);
SetCustomOp("atan2", {}, ops::custom::Register_ATAN2);
BuildInterpreter({GetShape(y_), GetShape(x_)});
}
int y_;
int x_;
int output_;
template <typename T>
std::vector<T> GetOutput(const std::vector<T>& y, const std::vector<T>& x) {
PopulateTensor<T>(y_, y);
PopulateTensor<T>(x_, x);
Invoke();
return ExtractVector<T>(output_);
}
};
template <typename Float>
class Atan2CustomTest : public ::testing::Test {
public:
using FloatType = Float;
};
using TestTypes = ::testing::Types<float, double>;
TYPED_TEST_SUITE(Atan2CustomTest, TestTypes);
TYPED_TEST(Atan2CustomTest, TestScalar) {
using Float = typename TestFixture::FloatType;
tflite::TensorData y = {GetTTEnum<Float>(), {}};
tflite::TensorData x = {GetTTEnum<Float>(), {}};
tflite::TensorData output = {GetTTEnum<Float>(), {}};
Atan2Model m(y, x, output);
auto got = m.GetOutput<Float>({0.0}, {0.0});
ASSERT_EQ(got.size(), 1);
EXPECT_FLOAT_EQ(got[0], 0.0);
ASSERT_FLOAT_EQ(m.GetOutput<Float>({1.0}, {0.0})[0], M_PI / 2);
ASSERT_FLOAT_EQ(m.GetOutput<Float>({0.0}, {1.0})[0], 0.0);
ASSERT_FLOAT_EQ(m.GetOutput<Float>({-1.0}, {0.0})[0], -M_PI / 2);
}
TYPED_TEST(Atan2CustomTest, TestBatch) {
using Float = typename TestFixture::FloatType;
tflite::TensorData y = {GetTTEnum<Float>(), {4, 2, 1}};
tflite::TensorData x = {GetTTEnum<Float>(), {4, 2, 1}};
tflite::TensorData output = {GetTTEnum<Float>(), {4, 2, 1}};
Atan2Model m(y, x, output);
std::vector<Float> y_data = {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8};
std::vector<Float> x_data = {0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1};
auto got = m.GetOutput<Float>(y_data, x_data);
ASSERT_EQ(got.size(), 8);
for (int i = 0; i < 8; ++i) {
EXPECT_FLOAT_EQ(got[i], std::atan2(y_data[i], x_data[i]));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/atan2_custom.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/atan2_custom_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b1d8c80a-0c7a-41ea-a5ca-6806d5cb43e9 | cpp | tensorflow/tensorflow | stablehlo_add | tensorflow/lite/kernels/stablehlo_add.cc | tensorflow/lite/kernels/stablehlo_add_test.cc | #include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/stablehlo_elementwise.h"
namespace tflite::ops::builtin {
TfLiteRegistration* Register_STABLEHLO_ADD() {
static TfLiteRegistration r = {nullptr, nullptr, ElementwisePrepare,
ElementwiseEval<ComputationType::kAdd>};
return &r;
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
class AddOpModel : public SingleOpModel {
public:
AddOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_STABLEHLO_ADD, BuiltinOptions_NONE, 0);
SetBypassDefaultDelegates();
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
int input1() { return input1_; }
int input2() { return input2_; }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
protected:
int input1_;
int input2_;
int output_;
};
TEST(StablehloElementwise, AddWorks) {
AddOpModel model({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}});
model.PopulateTensor<float>(model.input1(), {-2.0, 0.2, 0.7, 0.8});
model.PopulateTensor<float>(model.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(-1.9, 0.4, 1.0, 1.3));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_add.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_add_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b3b06e56-e085-401e-9f5d-336551870fb7 | cpp | tensorflow/tensorflow | add | tensorflow/lite/delegates/gpu/gl/kernels/add.cc | tensorflow/lite/delegates/xnnpack/add_test.cc | #include "tensorflow/lite/delegates/gpu/gl/kernels/add.h"
#include <algorithm>
#include <any>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/lite/delegates/gpu/common/convert.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class Add : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
const auto& attr = std::any_cast<const ElementwiseAttributes&>(ctx.op_attr);
auto adds = std::get_if<Tensor<Linear, DataType::FLOAT32>>(&attr.param);
auto scalar = std::get_if<float>(&attr.param);
const auto* hwc_tensor =
std::get_if<Tensor<HWC, DataType::FLOAT32>>(&attr.param);
if (hwc_tensor) {
std::string code;
const std::string x_coord = hwc_tensor->shape.w == 1 ? "0" : "gid.x";
const std::string y_coord = hwc_tensor->shape.h == 1 ? "0" : "gid.y";
const std::string s_coord = hwc_tensor->shape.c == 1 ? "0" : "gid.z";
code = absl::StrCat("vec4 second_val = $hwc_buffer[", x_coord, ", ",
y_coord, ", ", s_coord, "]$;\n");
if (hwc_tensor->shape.c == 1) {
code += " second_val.y = second_val.x;\n";
code += " second_val.z = second_val.x;\n";
code += " second_val.w = second_val.x;\n";
}
code += " value_0 += second_val;\n";
*generated_code = {
{},
{{"hwc_buffer",
MakeReadonlyObject(
uint3(hwc_tensor->shape.w, hwc_tensor->shape.h,
DivideRoundUp(hwc_tensor->shape.c, 4)),
ConvertToPHWC4(
std::get<Tensor<HWC, DataType::FLOAT32>>(attr.param)))}},
{},
uint3(static_cast<int>(ctx.input_shapes[0][2]),
static_cast<int>(ctx.input_shapes[0][1]),
DivideRoundUp(static_cast<int>(ctx.input_shapes[0][3]), 4)),
uint3(),
std::move(code),
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
if (!adds && !scalar) {
if (ctx.input_shapes.size() == 2 &&
ctx.input_shapes[0] != ctx.input_shapes[1] &&
ctx.input_shapes[1][1] == 1 && ctx.input_shapes[1][2] == 1 &&
ctx.input_shapes[0][3] == ctx.input_shapes[1][3]) {
*generated_code = {
{},
{},
{},
uint3(),
uint3(),
"value_0 = $input_data_0[gid.x, gid.y, gid.z]$ + "
" $input_data_1[0, 0, gid.z]$;",
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
std::string code = "value_0 = value_0";
for (int index = 1; index < ctx.input_shapes.size(); ++index) {
if (ctx.input_shapes[index] != ctx.input_shapes[0]) {
return absl::InvalidArgumentError("Shapes are not equal");
}
absl::StrAppend(&code, " + value_", index);
}
absl::StrAppend(&code, ";");
*generated_code = {
{},
{},
{},
uint3(),
uint3(),
std::move(code),
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
if (scalar) {
*generated_code = {
{{"scalar", *scalar}},
{},
{},
uint3(),
uint3(),
"value_0 += $scalar$;",
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
*generated_code = {
{},
{{"add_buffer", MakeReadonlyObject(adds->data)}},
{},
uint3(ctx.input_shapes[0][2], ctx.input_shapes[0][1],
DivideRoundUp(ctx.input_shapes[0][3], 4)),
uint3(),
"value_0 += $add_buffer[gid.z]$;",
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewAddNodeShader() {
return std::make_unique<Add>();
}
}
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/binary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
TEST(Add, 4DBy4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, 4DBy4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, 1, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, 1, channels})
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, 4DBy4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, width, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, width, 1})
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, 4DBy4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, 1, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, 1, 1})
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, 4DBy4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, 1, 1, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, 1, 1, 1})
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, 4DBy4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, width, channels})
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, 4DBy3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({height, width, channels})
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, 4DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({width, channels})
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, 4DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({channels})
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, 4DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({})
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, 2DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, 2DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({channels})
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, 2DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({})
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, 4DByStatic4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, 4DByStatic4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, 1, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, 1, channels})
.Input2Static(true)
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, 4DByStatic4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, width, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, width, 1})
.Input2Static(true)
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, 4DByStatic4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, 1, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, 1, 1})
.Input2Static(true)
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, 4DByStatic4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, 1, 1, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, 1, 1, 1})
.Input2Static(true)
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, 4DByStatic4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, 4DByStatic3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, 4DByStatic2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({width, channels})
.Input2Static(true)
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, 4DByStatic1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({channels})
.Input2Static(true)
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, 4DByStatic0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({})
.Input2Static(true)
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, 2DByStatic2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Input2Static(true)
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, 2DByStatic1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({channels})
.Input2Static(true)
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, 2DByStatic0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({})
.Input2Static(true)
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, FP16Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.FP16Weights()
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.FP16Weights()
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, INT8Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.INT8Weights()
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.INT8Weights()
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, INT8ChannelWiseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.INT8ChannelWiseWeights()
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.INT8ChannelWiseWeights()
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, SparseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.SparseWeights()
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.SparseWeights()
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, ReluActivation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.ReluActivation()
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, Relu6Activation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Relu6Activation()
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, ReluMinus1To1Activation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.ReluMinus1To1Activation()
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, DISABLED_TanhActivation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.TanhActivation()
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, DISABLED_SignBitActivation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.SignBitActivation()
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
TEST(Add, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_ADD, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/kernels/add.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/add_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c677a6b6-3049-486c-ad27-da23b7b21c14 | cpp | tensorflow/tensorflow | floor_div | tensorflow/lite/kernels/floor_div.cc | tensorflow/lite/kernels/floor_div_test.cc | #include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <functional>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/binary_function.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace floor_div {
namespace {
constexpr int kInputTensor1 = 0;
constexpr int kInputTensor2 = 1;
constexpr int kOutputTensor = 0;
struct OpData {
bool requires_broadcast;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* data = new OpData;
data->requires_broadcast = false;
return data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
const TfLiteType type = input1->type;
switch (type) {
case kTfLiteFloat32:
case kTfLiteInt32:
case kTfLiteInt16:
case kTfLiteInt8:
break;
default:
TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by floor_div.",
TfLiteTypeGetName(type));
return kTfLiteError;
}
output->type = type;
data->requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (data->requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
return context->ResizeTensor(context, output, output_size);
}
template <typename T>
TfLiteStatus EvalImpl(TfLiteContext* context, bool requires_broadcast,
const TfLiteTensor* input1, const TfLiteTensor* input2,
TfLiteTensor* output) {
const T* denominator_data = GetTensorData<T>(input2);
for (int i = 0; i < NumElements(input2); ++i) {
if (std::equal_to<T>()(denominator_data[i], 0)) {
TF_LITE_KERNEL_LOG(context, "Division by 0");
return kTfLiteError;
}
}
if (requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<T, T, T>(
GetTensorShape(input1), GetTensorData<T>(input1),
GetTensorShape(input2), denominator_data, GetTensorShape(output),
GetTensorData<T>(output), reference_ops::FloorDiv<T>);
} else {
reference_ops::BinaryFunction<T, T, T>(
GetTensorShape(input1), GetTensorData<T>(input1),
GetTensorShape(input2), GetTensorData<T>(input2),
GetTensorShape(output), GetTensorData<T>(output),
reference_ops::FloorDiv<T>);
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (input1->type) {
case kTfLiteInt8: {
return EvalImpl<int8_t>(context, data->requires_broadcast, input1, input2,
output);
}
case kTfLiteInt16: {
return EvalImpl<int16_t>(context, data->requires_broadcast, input1,
input2, output);
}
case kTfLiteInt32: {
return EvalImpl<int32_t>(context, data->requires_broadcast, input1,
input2, output);
}
case kTfLiteFloat32: {
return EvalImpl<float>(context, data->requires_broadcast, input1, input2,
output);
}
default: {
TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by floor_div.",
TfLiteTypeGetName(input1->type));
return kTfLiteError;
}
}
}
}
}
TfLiteRegistration* Register_FLOOR_DIV() {
static TfLiteRegistration r = {floor_div::Init, floor_div::Free,
floor_div::Prepare, floor_div::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
template <typename T>
class FloorDivModel : public SingleOpModel {
public:
FloorDivModel(const TensorData& input1, const TensorData& input2,
const TensorData& output) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_FLOOR_DIV, BuiltinOptions_FloorDivOptions,
CreateFloorDivOptions(builder_).Union());
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
int input1() { return input1_; }
int input2() { return input2_; }
std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input1_;
int input2_;
int output_;
};
TEST(FloorDivModel, Simple) {
FloorDivModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {}});
model.PopulateTensor<int32_t>(model.input1(), {10, 9, 11, 3});
model.PopulateTensor<int32_t>(model.input2(), {2, 2, 3, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(5, 4, 3, 0));
}
TEST(FloorDivModel, NegativeValue) {
FloorDivModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {}});
model.PopulateTensor<int32_t>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<int32_t>(model.input2(), {2, 2, -3, -4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(5, -5, 3, -2));
}
TEST(FloorDivModel, BroadcastFloorDiv) {
FloorDivModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1}}, {TensorType_INT32, {}});
model.PopulateTensor<int32_t>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<int32_t>(model.input2(), {-3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(-4, 3, 3, -3));
}
TEST(FloorDivModel, SimpleFloat) {
FloorDivModel<float> model({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}});
model.PopulateTensor<float>(model.input1(), {10.05, 9.09, 11.9, 3.01});
model.PopulateTensor<float>(model.input2(), {2.05, 2.03, 3.03, 4.03});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(4.0, 4.0, 3.0, 0.0));
}
TEST(FloorDivModel, NegativeValueFloat) {
FloorDivModel<float> model({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}});
model.PopulateTensor<float>(model.input1(), {10.03, -9.9, -11.0, 7.0});
model.PopulateTensor<float>(model.input2(), {2.0, 2.3, -3.0, -4.1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(5.0, -5.0, 3.0, -2.0));
}
TEST(FloorDivModel, BroadcastFloorDivFloat) {
FloorDivModel<float> model({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1}},
{TensorType_FLOAT32, {}});
model.PopulateTensor<float>(model.input1(), {10.03, -9.9, -11.0, 7.0});
model.PopulateTensor<float>(model.input2(), {-3.3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(-4.0, 2.0, 3.0, -3.0));
}
TEST(FloorDivModel, SimpleInt16) {
FloorDivModel<int16_t> model({TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {}});
model.PopulateTensor<int16_t>(model.input1(), {10, 9, 11, 3});
model.PopulateTensor<int16_t>(model.input2(), {2, 2, 3, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(5, 4, 3, 0));
}
TEST(FloorDivModel, NegativeValueInt16) {
FloorDivModel<int16_t> model({TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {}});
model.PopulateTensor<int16_t>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<int16_t>(model.input2(), {2, 2, -3, -4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(5, -5, 3, -2));
}
TEST(FloorDivModel, BroadcastFloorDivInt16) {
FloorDivModel<int16_t> model({TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {1}}, {TensorType_INT16, {}});
model.PopulateTensor<int16_t>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<int16_t>(model.input2(), {-3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(-4, 3, 3, -3));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/floor_div.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/floor_div_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6df479bc-f418-4217-a178-bcaed157eb1d | cpp | tensorflow/tensorflow | bcast_grad_args | tensorflow/lite/kernels/gradient/bcast_grad_args.cc | tensorflow/lite/kernels/gradient/bcast_grad_args_test.cc | #include <algorithm>
#include <array>
#include <cmath>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace {
static const int kInputOneTensor = 0;
static const int kInputTwoTensor = 1;
static const int kOutputOneTensor = 0;
static const int kOutputTwoTensor = 1;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
const TfLiteTensor* input1 = GetInput(context, node, kInputOneTensor);
TF_LITE_ENSURE(context, input1 != nullptr);
const RuntimeShape input1_shape = GetTensorShape(input1);
TF_LITE_ENSURE(context,
input1->type == kTfLiteInt32 || input1->type == kTfLiteInt64);
TF_LITE_ENSURE_EQ(context, input1_shape.DimensionsCount(), 1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTwoTensor);
TF_LITE_ENSURE(context, input2 != nullptr);
const RuntimeShape input2_shape = GetTensorShape(input2);
TF_LITE_ENSURE_TYPES_EQ(context, input2->type, input1->type);
TF_LITE_ENSURE_EQ(context, input2_shape.DimensionsCount(), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2);
TfLiteTensor* output1 = GetOutput(context, node, kOutputOneTensor);
TF_LITE_ENSURE(context, output1 != nullptr);
TF_LITE_ENSURE_TYPES_EQ(context, output1->type, input1->type);
TfLiteTensor* output2 = GetOutput(context, node, kOutputTwoTensor);
TF_LITE_ENSURE(context, output2 != nullptr);
TF_LITE_ENSURE_TYPES_EQ(context, output2->type, input1->type);
SetTensorToDynamic(output1);
SetTensorToDynamic(output2);
return kTfLiteOk;
}
TfLiteStatus Invoke(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input1 = GetInput(context, node, kInputOneTensor);
TF_LITE_ENSURE(context, input1 != nullptr);
const RuntimeShape input1_shape = GetTensorShape(input1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTwoTensor);
TF_LITE_ENSURE(context, input2 != nullptr);
const RuntimeShape input2_shape = GetTensorShape(input2);
TfLiteTensor* output1 = GetOutput(context, node, kOutputOneTensor);
TF_LITE_ENSURE(context, output1 != nullptr);
TfLiteTensor* output2 = GetOutput(context, node, kOutputTwoTensor);
TF_LITE_ENSURE(context, output2 != nullptr);
std::vector<int64_t> input1_vec;
std::vector<int64_t> input2_vec;
if (input1->type == kTfLiteInt32) {
input1_vec = std::vector<int64_t>(input1->data.i32,
input1->data.i32 + input1_shape.Dims(0));
} else {
input1_vec = std::vector<int64_t>(input1->data.i64,
input1->data.i64 + input1_shape.Dims(0));
}
if (input2->type == kTfLiteInt32) {
input2_vec = std::vector<int64_t>(input2->data.i32,
input2->data.i32 + input2_shape.Dims(0));
} else {
input2_vec = std::vector<int64_t>(input2->data.i64,
input2->data.i64 + input2_shape.Dims(0));
}
if (input1_vec == input2_vec) {
TfLiteIntArray* output1_shape = TfLiteIntArrayCreate(1);
output1_shape->data[0] = 0;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output1, output1_shape));
TfLiteIntArray* output2_shape = TfLiteIntArrayCreate(1);
output2_shape->data[0] = 0;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output2, output2_shape));
return kTfLiteOk;
}
size_t largest_rank = std::max(input1_vec.size(), input2_vec.size());
std::vector<int64_t> copy[2];
copy[0] = std::vector<int64_t>(input1_vec.rbegin(), input1_vec.rend());
copy[1] = std::vector<int64_t>(input2_vec.rbegin(), input2_vec.rend());
for (int i = 0; i < 2; ++i) {
if (copy[i].size() < largest_rank) {
copy[i].resize(largest_rank, 1);
}
}
std::array<bool, 2> prev_is_one = {false, false};
std::array<bool, 2> current_is_one = {false, false};
bool set_one = false;
std::vector<int64_t> grad_reduce_idx[2];
for (int j = 0; j < largest_rank; ++j) {
int output_dim = -1;
int output_dim_set = false;
bool none_is_one = true;
for (int i = 0; i < 2; ++i) {
if (copy[i][j] == 1) {
current_is_one[i] = true;
none_is_one = false;
} else {
current_is_one[i] = false;
if (!output_dim_set || copy[i][j] == output_dim) {
output_dim = copy[i][j];
output_dim_set = true;
} else {
return kTfLiteError;
}
}
}
if (!output_dim_set) {
for (int i = 0; i < 2; ++i) {
grad_reduce_idx[i].push_back(largest_rank - 1 - j);
}
continue;
} else if (current_is_one == prev_is_one && set_one) {
for (int i = 0; i < 2; ++i) {
if (current_is_one[i] && !none_is_one) {
grad_reduce_idx[i].push_back(largest_rank - 1 - j);
}
}
} else {
for (int i = 0; i < 2; ++i) {
if (current_is_one[i] && !none_is_one) {
grad_reduce_idx[i].push_back(largest_rank - 1 - j);
}
}
}
set_one = true;
for (int i = 0; i < 2; ++i) {
prev_is_one[i] = current_is_one[i];
}
}
for (int i = 0; i < 2; ++i) {
std::reverse(grad_reduce_idx[i].begin(), grad_reduce_idx[i].end());
}
TfLiteIntArray* output1_shape = TfLiteIntArrayCreate(1);
output1_shape->data[0] = grad_reduce_idx[0].size();
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output1, output1_shape));
if (output1->type == kTfLiteInt32) {
for (int i = 0; i < grad_reduce_idx[0].size(); ++i) {
output1->data.i32[i] = grad_reduce_idx[0][i];
}
} else if (output1->type == kTfLiteInt64) {
for (int i = 0; i < grad_reduce_idx[0].size(); ++i) {
output1->data.i64[i] = grad_reduce_idx[0][i];
}
}
TfLiteIntArray* output2_shape = TfLiteIntArrayCreate(1);
output2_shape->data[0] = grad_reduce_idx[1].size();
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output2, output2_shape));
if (output2->type == kTfLiteInt32) {
for (int i = 0; i < grad_reduce_idx[1].size(); ++i) {
output2->data.i32[i] = grad_reduce_idx[1][i];
}
} else if (output2->type == kTfLiteInt64) {
for (int i = 0; i < grad_reduce_idx[1].size(); ++i) {
output2->data.i64[i] = grad_reduce_idx[1][i];
}
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_BROADCAST_GRADIENT_ARGS() {
static TfLiteRegistration reg = {nullptr,
nullptr,
Prepare,
Invoke};
return ®
}
}
}
} | #include "tensorflow/lite/kernels/gradient/bcast_grad_args.h"
#include <cstdint>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace custom {
namespace {
using testing::ElementsAreArray;
class BcastGradArgsInt32OpModel : public SingleOpModel {
public:
BcastGradArgsInt32OpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output1,
const TensorData& output2) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output1_ = AddOutput(output1);
output2_ = AddOutput(output2);
std::vector<uint8_t> custom_option;
SetCustomOp("BroadcastGradientArgs", custom_option,
Register_BROADCAST_GRADIENT_ARGS);
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
void SetInput1(const std::vector<int>& data) {
PopulateTensor(input1_, data);
}
void SetInput2(const std::vector<int>& data) {
PopulateTensor(input2_, data);
}
std::vector<int> GetOutput1() { return ExtractVector<int>(output1_); }
std::vector<int> GetOutput1Shape() { return GetTensorShape(output1_); }
std::vector<int> GetOutput2() { return ExtractVector<int>(output2_); }
std::vector<int> GetOutput2Shape() { return GetTensorShape(output2_); }
protected:
int input1_;
int input2_;
int output1_;
int output2_;
};
TEST(BcastGradArgsInt32OpModel, AllEqualsInt32DTypes) {
BcastGradArgsInt32OpModel model(
{TensorType_INT32, {4}},
{TensorType_INT32, {4}},
{TensorType_INT32, {}},
{TensorType_INT32, {}});
model.SetInput1({3, 1, 2, 3});
model.SetInput2({3, 1, 2, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput1().size(), 0);
EXPECT_THAT(model.GetOutput2().size(), 0);
}
TEST(BcastGradArgsInt32OpModel, BroadcastableDimAtInput1Int32DTypes) {
BcastGradArgsInt32OpModel model(
{TensorType_INT32, {4}},
{TensorType_INT32, {4}},
{TensorType_INT32, {}},
{TensorType_INT32, {}});
model.SetInput1({3, 4, 1, 3});
model.SetInput2({3, 4, 2, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput1(), ElementsAreArray({2}));
EXPECT_THAT(model.GetOutput2().size(), 0);
}
TEST(BcastGradArgsInt32OpModel, BroadcastableDimAtInput2Int32DTypes) {
BcastGradArgsInt32OpModel model(
{TensorType_INT32, {4}},
{TensorType_INT32, {4}},
{TensorType_INT32, {}},
{TensorType_INT32, {}});
model.SetInput1({3, 4, 2, 3});
model.SetInput2({3, 1, 2, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput1().size(), 0);
EXPECT_THAT(model.GetOutput2(), ElementsAreArray({1}));
}
TEST(BcastGradArgsInt32OpModel, DifferentInputSizesInt32DTypes) {
BcastGradArgsInt32OpModel model(
{TensorType_INT32, {4}},
{TensorType_INT32, {3}},
{TensorType_INT32, {}},
{TensorType_INT32, {}});
model.SetInput1({3, 4, 2, 3});
model.SetInput2({4, 2, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput1().size(), 0);
EXPECT_THAT(model.GetOutput2(), ElementsAreArray({0}));
}
TEST(BcastGradArgsInt32OpModel, NonBroadcastableDimsInt32DTypes) {
BcastGradArgsInt32OpModel model(
{TensorType_INT32, {4}},
{TensorType_INT32, {4}},
{TensorType_INT32, {}},
{TensorType_INT32, {}});
model.SetInput1({3, 4, 2, 3});
model.SetInput2({9, 9, 9, 9});
EXPECT_THAT(model.Invoke(), kTfLiteError);
}
class BcastGradArgsInt64OpModel : public SingleOpModel {
public:
BcastGradArgsInt64OpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output1,
const TensorData& output2) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output1_ = AddOutput(output1);
output2_ = AddOutput(output2);
std::vector<uint8_t> custom_option;
SetCustomOp("BroadcastGradientArgs", custom_option,
Register_BROADCAST_GRADIENT_ARGS);
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
void SetInput1(const std::vector<int64_t>& data) {
PopulateTensor(input1_, data);
}
void SetInput2(const std::vector<int64_t>& data) {
PopulateTensor(input2_, data);
}
std::vector<int64_t> GetOutput1() { return ExtractVector<int64_t>(output1_); }
std::vector<int> GetOutput1Shape() { return GetTensorShape(output1_); }
std::vector<int64_t> GetOutput2() { return ExtractVector<int64_t>(output2_); }
std::vector<int> GetOutput2Shape() { return GetTensorShape(output2_); }
protected:
int input1_;
int input2_;
int output1_;
int output2_;
};
TEST(BcastGradArgsInt32OpModel, AllEqualsInt64DTypes) {
BcastGradArgsInt64OpModel model(
{TensorType_INT64, {4}},
{TensorType_INT64, {4}},
{TensorType_INT64, {}},
{TensorType_INT64, {}});
model.SetInput1({3, 1, 2, 3});
model.SetInput2({3, 1, 2, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput1().size(), 0);
EXPECT_THAT(model.GetOutput2().size(), 0);
}
TEST(BcastGradArgsInt32OpModel, BroadcastableDimAtInput1Int64DTypes) {
BcastGradArgsInt64OpModel model(
{TensorType_INT64, {4}},
{TensorType_INT64, {4}},
{TensorType_INT64, {}},
{TensorType_INT64, {}});
model.SetInput1({3, 4, 1, 3});
model.SetInput2({3, 4, 2, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput1(), ElementsAreArray({2}));
EXPECT_THAT(model.GetOutput2().size(), 0);
}
TEST(BcastGradArgsInt32OpModel, BroadcastableDimAtInput2Int64DTypes) {
BcastGradArgsInt64OpModel model(
{TensorType_INT64, {4}},
{TensorType_INT64, {4}},
{TensorType_INT64, {}},
{TensorType_INT64, {}});
model.SetInput1({3, 4, 2, 3});
model.SetInput2({3, 1, 2, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput1().size(), 0);
EXPECT_THAT(model.GetOutput2(), ElementsAreArray({1}));
}
TEST(BcastGradArgsInt32OpModel, DifferentInputSizesInt64DTypes) {
BcastGradArgsInt64OpModel model(
{TensorType_INT64, {4}},
{TensorType_INT64, {3}},
{TensorType_INT64, {}},
{TensorType_INT64, {}});
model.SetInput1({3, 4, 2, 3});
model.SetInput2({4, 2, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput1().size(), 0);
EXPECT_THAT(model.GetOutput2(), ElementsAreArray({0}));
}
TEST(BcastGradArgsInt32OpModel, NonBroadcastableDimsInt64DTypes) {
BcastGradArgsInt64OpModel model(
{TensorType_INT64, {4}},
{TensorType_INT64, {4}},
{TensorType_INT64, {}},
{TensorType_INT64, {}});
model.SetInput1({3, 4, 2, 3});
model.SetInput2({9, 9, 9, 9});
EXPECT_THAT(model.Invoke(), kTfLiteError);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/gradient/bcast_grad_args.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/gradient/bcast_grad_args_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2cf421c1-f469-45b8-8be7-7c73f872ea22 | cpp | tensorflow/tensorflow | list_ops_util | tensorflow/lite/kernels/variants/list_ops_util.cc | tensorflow/lite/kernels/variants/list_ops_util_test.cc | #include "tensorflow/lite/kernels/variants/list_ops_util.h"
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/variants/tensor_array.h"
namespace tflite {
namespace variants {
IntArrayUniquePtr TensorAsShape(const TfLiteTensor& shape) {
if (shape.dims->size == 0) {
return BuildTfLiteArray({});
}
const int rank = shape.dims->data[0];
const int* begin = reinterpret_cast<const int*>(shape.data.data);
const int* end = begin + rank;
return BuildTfLiteArray(std::vector<int>(begin, end));
}
IntArrayUniquePtr MergeShapesOrNull(IntArrayUniquePtr l, IntArrayUniquePtr r) {
if (l == nullptr) {
return r;
}
if (r == nullptr) {
return l;
}
if (l->size == 0) {
return r;
}
if (r->size == 0) {
return l;
}
if (l->size != r->size) {
return nullptr;
}
for (int i = 0; i < r->size; ++i) {
if (l->data[i] == -1) {
l->data[i] = r->data[i];
} else if (r->data[i] != -1 && l->data[i] != r->data[i]) {
return nullptr;
}
}
return l;
}
bool IsShapeFullyDefined(const TfLiteIntArray& shape) {
for (int i = 0; i < shape.size; ++i) {
if (shape.data[i] < 0) {
return false;
}
}
return true;
}
TfLiteStatus GetShapeIfAllEqual(const TensorArray& arr,
IntArrayUniquePtr& result) {
const TfLiteIntArray* common_shape = nullptr;
for (int i = 0; i < arr.NumElements(); ++i) {
const TfLiteTensor* cur_element = arr.At(i);
if (cur_element == nullptr) {
continue;
}
if (common_shape == nullptr) {
common_shape = cur_element->dims;
continue;
}
if (!TfLiteIntArrayEqual(common_shape, cur_element->dims)) {
return kTfLiteError;
}
}
result = common_shape != nullptr ? BuildTfLiteArray(*common_shape) : nullptr;
return kTfLiteOk;
}
}
} | #include "tensorflow/lite/kernels/variants/list_ops_util.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/kernels/variants/tensor_array.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace variants {
namespace {
TEST(TensorAsShape, ScalarTensor_ReturnsEmptyIntArray) {
TensorUniquePtr scalar_tensor =
BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({}), kTfLiteDynamic);
IntArrayUniquePtr shape_from_tensor = TensorAsShape(*scalar_tensor);
ASSERT_THAT(shape_from_tensor.get(), DimsAre({}));
}
TEST(TensorAsShape, SingleElementTensor_ReturnsSize1Shape) {
TensorUniquePtr single_el_tensor =
BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({1}), kTfLiteDynamic);
single_el_tensor->data.i32[0] = 10;
IntArrayUniquePtr shape_from_tensor = TensorAsShape(*single_el_tensor);
ASSERT_THAT(shape_from_tensor.get(), DimsAre({10}));
}
TEST(TensorAsShape, OneDMultipleElementShape_ReturnsHighRankedShape) {
TensorUniquePtr one_d_mul_el_tensor =
BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({3}), kTfLiteDynamic);
one_d_mul_el_tensor->data.i32[0] = 10;
one_d_mul_el_tensor->data.i32[1] = 9;
one_d_mul_el_tensor->data.i32[2] = 8;
IntArrayUniquePtr shape_from_tensor = TensorAsShape(*one_d_mul_el_tensor);
ASSERT_THAT(shape_from_tensor.get(), DimsAre({10, 9, 8}));
}
TEST(MergeShapesOrNull, IncompatibleSameRank_ReturnsNull) {
IntArrayUniquePtr l = BuildTfLiteArray({2, 3});
IntArrayUniquePtr r = BuildTfLiteArray({3, 3});
EXPECT_EQ(MergeShapesOrNull(std::move(l), std::move(r)).get(), nullptr);
}
TEST(MergeShapesOrNull, NotSameRank_ReturnsNull) {
IntArrayUniquePtr l = BuildTfLiteArray({1});
IntArrayUniquePtr r = BuildTfLiteArray({1, 2});
EXPECT_EQ(MergeShapesOrNull(std::move(l), std::move(r)).get(), nullptr);
}
TEST(MergeShapesOrNull, MergeShapesOrNullSameRankNENull) {
IntArrayUniquePtr l = BuildTfLiteArray({1});
IntArrayUniquePtr r = BuildTfLiteArray({2});
EXPECT_EQ(MergeShapesOrNull(std::move(l), std::move(r)).get(), nullptr);
}
TEST(MergeShapesOrNull, RankedUnknownLKnownR_ReturnsStatic) {
IntArrayUniquePtr l = BuildTfLiteArray({-1});
IntArrayUniquePtr r = BuildTfLiteArray({2});
EXPECT_THAT(MergeShapesOrNull(std::move(l), std::move(r)).get(),
DimsAre({2}));
}
TEST(MergeShapesOrNull, UnknownRKnownL_ReturnsStatic) {
IntArrayUniquePtr l = BuildTfLiteArray({2});
IntArrayUniquePtr r = BuildTfLiteArray({-1});
EXPECT_THAT(MergeShapesOrNull(std::move(l), std::move(r)).get(),
DimsAre({2}));
}
TEST(MergeShapesOrNull, UnknownBoth_ReturnsUnknown) {
IntArrayUniquePtr l = BuildTfLiteArray({-1});
IntArrayUniquePtr r = BuildTfLiteArray({-1});
EXPECT_THAT(MergeShapesOrNull(std::move(l), std::move(r)).get(),
DimsAre({-1}));
}
TEST(MergeShapesOrNull, RankedUnknownDifferentDims_ConstrainsUnknownDims) {
IntArrayUniquePtr l = BuildTfLiteArray({-1, 2, 5});
IntArrayUniquePtr r = BuildTfLiteArray({1, -1, 5});
EXPECT_THAT(MergeShapesOrNull(std::move(l), std::move(r)).get(),
DimsAre({1, 2, 5}));
}
TEST(MergeShapesOrNull, BothUnranked_ReturnsUnranked) {
IntArrayUniquePtr l = BuildTfLiteArray({});
IntArrayUniquePtr r = BuildTfLiteArray({});
EXPECT_THAT(MergeShapesOrNull(std::move(l), std::move(r)).get(), DimsAre({}));
}
TEST(MergeShapesOrNull, UrankedAndStatic1D_ReturnsStatic1D) {
IntArrayUniquePtr l = BuildTfLiteArray({});
IntArrayUniquePtr r = BuildTfLiteArray({1});
EXPECT_THAT(MergeShapesOrNull(std::move(l), std::move(r)).get(),
DimsAre({1}));
}
TEST(MergeShapesOrNull, UnrankedAndStaticND_ReturnsStaticND) {
IntArrayUniquePtr l = BuildTfLiteArray({});
IntArrayUniquePtr r = BuildTfLiteArray({2, 3});
EXPECT_THAT(MergeShapesOrNull(std::move(l), std::move(r)).get(),
DimsAre({2, 3}));
}
TEST(MergeShapesOrNull, UnrankedAndRankedUnknown_ReturnsRankedUnknown) {
IntArrayUniquePtr l = BuildTfLiteArray({});
IntArrayUniquePtr r = BuildTfLiteArray({-1});
EXPECT_THAT(MergeShapesOrNull(std::move(l), std::move(r)).get(),
DimsAre({-1}));
}
TEST(MergeShapesOrNull, NullInput_ReturnsOther) {
EXPECT_THAT(MergeShapesOrNull(BuildTfLiteArray({3}), nullptr).get(),
DimsAre({3}));
EXPECT_THAT(MergeShapesOrNull(nullptr, BuildTfLiteArray({2})).get(),
DimsAre({2}));
EXPECT_EQ(MergeShapesOrNull(nullptr, nullptr).get(), nullptr);
}
TEST(MergeShapesOrNull, NullInput_ReturnsUnrankedOther) {
EXPECT_THAT(MergeShapesOrNull(BuildTfLiteArray({}), nullptr).get(),
DimsAre({}));
EXPECT_THAT(MergeShapesOrNull(nullptr, BuildTfLiteArray({})).get(),
DimsAre({}));
}
TEST(ElementsSameShape, NoElements_SucceedsWithNullptr) {
TensorArray arr = {kTfLiteInt32, BuildTfLiteArray({})};
arr.Resize(2);
IntArrayUniquePtr res;
ASSERT_EQ(GetShapeIfAllEqual(arr, res), kTfLiteOk);
EXPECT_EQ(res.get(), nullptr);
}
TEST(ElementsSameShape, ZeroSize_SucceedsWithNullptr) {
TensorArray arr = {kTfLiteInt32, BuildTfLiteArray({})};
IntArrayUniquePtr res;
ASSERT_EQ(GetShapeIfAllEqual(arr, res), kTfLiteOk);
EXPECT_EQ(res.get(), nullptr);
}
TEST(ElementsSameShape, OneSize_SucceedsWithShape) {
TensorArray arr = {kTfLiteInt32, BuildTfLiteArray({})};
arr.Resize(1);
arr.Set(0, BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({2}),
kTfLiteDynamic));
IntArrayUniquePtr res;
ASSERT_EQ(GetShapeIfAllEqual(arr, res), kTfLiteOk);
EXPECT_THAT(res.get(), DimsAre({2}));
}
TEST(ElementsSameShape, MultipleElements_AllSet_SucceedsWithShape) {
TensorArray arr = {kTfLiteInt32, BuildTfLiteArray({})};
arr.Resize(2);
arr.Set(0, BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({2}),
kTfLiteDynamic));
arr.Set(1, BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({2}),
kTfLiteDynamic));
IntArrayUniquePtr res;
EXPECT_EQ(GetShapeIfAllEqual(arr, res), kTfLiteOk);
EXPECT_THAT(res.get(), DimsAre({2}));
}
TEST(ElementsSameShape, MultipleElements_SomeSet_SucceedsWithShape) {
TensorArray arr = {kTfLiteInt32, BuildTfLiteArray({})};
arr.Resize(3);
arr.Set(0, BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({2}),
kTfLiteDynamic));
arr.Set(2, BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({2}),
kTfLiteDynamic));
IntArrayUniquePtr res;
EXPECT_EQ(GetShapeIfAllEqual(arr, res), kTfLiteOk);
EXPECT_THAT(res.get(), DimsAre({2}));
}
TEST(ElementsSameShape, MultipleElements_SomeSetNotSameRank_Fails) {
TensorArray arr = {kTfLiteInt32, BuildTfLiteArray({})};
arr.Resize(3);
arr.Set(0, BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({2}),
kTfLiteDynamic));
arr.Set(2, BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({2, 3}),
kTfLiteDynamic));
IntArrayUniquePtr res;
EXPECT_EQ(GetShapeIfAllEqual(arr, res), kTfLiteError);
}
TEST(ElementsSameShape, MultipleElements_SomeSetNotSameDim_Fails) {
TensorArray arr = {kTfLiteInt32, BuildTfLiteArray({})};
arr.Resize(3);
arr.Set(0, BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({2, 2}),
kTfLiteDynamic));
arr.Set(2, BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({2, 3}),
kTfLiteDynamic));
IntArrayUniquePtr res;
EXPECT_EQ(GetShapeIfAllEqual(arr, res), kTfLiteError);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/list_ops_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/list_ops_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c413006a-54fa-4d6d-80dc-14dda06506c3 | cpp | tensorflow/tensorflow | list_length | tensorflow/lite/kernels/variants/list_kernels/list_length.cc | tensorflow/lite/kernels/variants/list_kernels/list_length_test.cc | #include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/variants/list_ops_lib.h"
#include "tensorflow/lite/kernels/variants/tensor_array.h"
namespace tflite {
namespace variants {
namespace ops {
namespace list_length {
namespace {
using ::tflite::variants::TensorArray;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* list_input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &list_input));
TF_LITE_ENSURE_TYPES_EQ(context, list_input->type, kTfLiteVariant);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt32);
TF_LITE_ENSURE_EQ(context, output->dims->size, 0);
output->allocation_type = kTfLiteArenaRw;
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* list_input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &list_input));
TF_LITE_ENSURE_EQ(context, list_input->allocation_type, kTfLiteVariantObject);
const TensorArray* const input_arr =
reinterpret_cast<TensorArray*>(list_input->data.data);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const int length = input_arr->NumElements();
output->data.i32[0] = length;
return kTfLiteOk;
}
}
}
TfLiteRegistration* Register_LIST_LENGTH() {
static TfLiteRegistration r = {nullptr, nullptr, list_length::Prepare,
list_length::Eval};
return &r;
}
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/kernels/variants/list_kernels/test_util.h"
#include "tensorflow/lite/kernels/variants/list_ops_lib.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace variants {
namespace ops {
namespace {
class ListLengthModel : public ListOpModel {
public:
ListLengthModel() {
list_input_ = AddInput({TensorType_VARIANT, {}});
length_output_ = AddOutput({TensorType_INT32, {}});
SetCustomOp("ListLength", {}, Register_LIST_LENGTH);
BuildInterpreter({{}});
}
const TfLiteTensor* GetOutputTensor() {
return interpreter_->tensor(length_output_);
}
int list_input_;
int length_output_;
};
class ListLengthTest : public ::testing::TestWithParam<int> {};
TEST_P(ListLengthTest, OutputIsListLength) {
const int length = GetParam();
ListLengthModel m;
m.PopulateListTensor(m.list_input_, {2, 2}, length, kTfLiteInt32);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* const output = m.GetOutputTensor();
ASSERT_EQ(output->type, kTfLiteInt32);
ASSERT_EQ(output->allocation_type, kTfLiteArenaRw);
ASSERT_THAT(output, DimsAre({}));
ASSERT_EQ(output->data.i32[0], length);
}
INSTANTIATE_TEST_SUITE_P(ListLengthTests, ListLengthTest,
testing::Values(0, 1, 5, 10, 100));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/list_kernels/list_length.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/list_kernels/list_length_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4d1c1e7d-aee8-4526-8ad0-2a8d3a0803f6 | cpp | tensorflow/tensorflow | list_reserve | tensorflow/lite/kernels/variants/list_kernels/list_reserve.cc | tensorflow/lite/kernels/variants/list_kernels/list_reserve_test.cc | #include <cstring>
#include <utility>
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/variants/list_ops_lib.h"
#include "tensorflow/lite/kernels/variants/list_ops_util.h"
#include "tensorflow/lite/kernels/variants/tensor_array.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace variants {
namespace ops {
namespace list_reserve {
namespace {
using ::tflite::variants::TensorArray;
using ::tflite::variants::detail::ListReserveOptions;
TfLiteType ConvertTensorType(TensorType src) {
switch (src) {
case TensorType_INT32:
return kTfLiteInt32;
case TensorType_FLOAT32:
return kTfLiteFloat32;
case TensorType_BOOL:
return kTfLiteBool;
case TensorType_INT64:
return kTfLiteInt64;
default:
return kTfLiteNoType;
}
}
constexpr int kListOut = 0;
struct SemanticOutType {
TfLiteType element_type;
IntArrayUniquePtr element_shape;
int num_elements;
};
class ReserveSemantic {
public:
ReserveSemantic(TfLiteContext* context, TfLiteNode* node)
: context_(context), node_(node) {}
constexpr static int kElementShapeInput = 0;
constexpr static int kNumElementsInput = 1;
TfLiteStatus CheckInputs() const {
TF_LITE_ENSURE_EQ(context_, NumInputs(node_), 2);
const TfLiteTensor* element_shape;
TF_LITE_ENSURE_OK(
context_,
GetInputSafe(context_, node_, kElementShapeInput, &element_shape));
TF_LITE_ENSURE(context_, element_shape->type == kTfLiteInt32);
const TfLiteTensor* num_elements;
TF_LITE_ENSURE_OK(context_, GetInputSafe(context_, node_, kNumElementsInput,
&num_elements));
TF_LITE_ENSURE_TYPES_EQ(context_, num_elements->type, kTfLiteInt32);
return kTfLiteOk;
}
TfLiteStatus Compute(SemanticOutType& result) const {
auto* options =
reinterpret_cast<const ListReserveOptions*>(node_->custom_initial_data);
TfLiteType element_type = ConvertTensorType(options->element_type);
TF_LITE_ENSURE(context_, element_type != kTfLiteNoType);
const TfLiteTensor* num_elements;
TF_LITE_ENSURE_OK(context_, GetInputSafe(context_, node_, kNumElementsInput,
&num_elements));
TF_LITE_ENSURE_TYPES_EQ(context_, num_elements->type, kTfLiteInt32);
TF_LITE_ENSURE_EQ(context_, num_elements->dims->size, 0);
const int num_elements_value = num_elements->data.i32[0];
TF_LITE_ENSURE(context_, num_elements_value >= 0);
const TfLiteTensor* element_shape_tensor;
TF_LITE_ENSURE_OK(context_,
GetInputSafe(context_, node_, kElementShapeInput,
&element_shape_tensor));
IntArrayUniquePtr element_shape = TensorAsShape(*element_shape_tensor);
result = SemanticOutType{element_type, std::move(element_shape),
num_elements_value};
return kTfLiteOk;
}
TfLiteStatus PopulateOutput(TensorArray* const output) const {
return kTfLiteOk;
}
private:
TfLiteContext* const context_;
TfLiteNode* const node_;
};
class ZerosLikeSemantic {
public:
ZerosLikeSemantic(TfLiteContext* context, TfLiteNode* node)
: context_(context), node_(node) {}
constexpr static int kListInput = 0;
TfLiteStatus CheckInputs() const {
TF_LITE_ENSURE_EQ(context_, NumInputs(node_), 1);
const TfLiteTensor* list_input;
TF_LITE_ENSURE_OK(context_,
GetInputSafe(context_, node_, kListInput, &list_input));
TF_LITE_ENSURE(context_, list_input->type == kTfLiteVariant);
return kTfLiteOk;
}
TfLiteStatus Compute(SemanticOutType& result) const {
const TfLiteTensor* list_input;
TF_LITE_ENSURE_OK(context_,
GetInputSafe(context_, node_, kListInput, &list_input));
const TensorArray* const input =
reinterpret_cast<const TensorArray*>(list_input->data.data);
result = SemanticOutType{input->ElementType(),
BuildTfLiteArray(*input->ElementShape()),
input->NumElements()};
return kTfLiteOk;
}
TfLiteStatus PopulateOutput(TensorArray* const output) const {
const TfLiteTensor* list_input;
TF_LITE_ENSURE_OK(context_,
GetInputSafe(context_, node_, kListInput, &list_input));
const TensorArray* const input =
reinterpret_cast<const TensorArray*>(list_input->data.data);
for (int i = 0; i < input->NumElements(); ++i) {
const TfLiteTensor* const at = input->At(i);
if (at == nullptr) continue;
TensorUniquePtr output_at = BuildTfLiteTensor(
at->type, BuildTfLiteArray(*at->dims), kTfLiteDynamic);
memset(output_at->data.data, 0, output_at->bytes);
TF_LITE_ENSURE(context_, output->Set(i, std::move(output_at)));
}
return kTfLiteOk;
}
private:
TfLiteContext* const context_;
TfLiteNode* const node_;
};
template <class Semantic>
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const Semantic sem(context, node);
TF_LITE_ENSURE_OK(context, sem.CheckInputs());
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kListOut, &output));
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteVariant);
output->allocation_type = kTfLiteVariantObject;
return kTfLiteOk;
}
template <class Semantic>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const Semantic sem(context, node);
SemanticOutType data;
TF_LITE_ENSURE_OK(context, sem.Compute(data));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kListOut, &output));
TfLiteStatus stat = TfLiteTensorVariantRealloc<TensorArray>(
output, data.element_type, std::move(data.element_shape));
TF_LITE_ENSURE_OK(context, stat);
TensorArray* const arr =
static_cast<TensorArray*>(static_cast<VariantData*>(output->data.data));
arr->Resize(data.num_elements);
TF_LITE_ENSURE_OK(context, sem.PopulateOutput(arr));
return kTfLiteOk;
}
}
}
TfLiteRegistration* Register_LIST_RESERVE() {
static TfLiteRegistration r = {
nullptr, nullptr, list_reserve::Prepare<list_reserve::ReserveSemantic>,
list_reserve::Eval<list_reserve::ReserveSemantic>};
return &r;
}
TfLiteRegistration* Register_VARIANT_ZEROS_LIKE() {
static TfLiteRegistration r = {
nullptr, nullptr, list_reserve::Prepare<list_reserve::ZerosLikeSemantic>,
list_reserve::Eval<list_reserve::ZerosLikeSemantic>};
return &r;
}
}
}
} | #include <cstring>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/kernels/variants/list_ops_lib.h"
#include "tensorflow/lite/kernels/variants/tensor_array.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace variants {
namespace ops {
namespace {
using ::tflite::variants::TensorArray;
std::vector<uint8_t> CustomOptionsToRaw(const std::vector<int32_t>& options) {
std::vector<uint8_t> raw(options.size() * sizeof(int32_t));
std::memcpy(raw.data(), options.data(), raw.size());
return raw;
}
class ListReserveModel : public SingleOpModel {
public:
explicit ListReserveModel(TensorType element_type) {
element_shape_input_ = AddInput({TensorType_INT32, {1}});
list_len_input_ = AddInput({TensorType_INT32, {}});
reserve_output_ = AddOutput({TensorType_VARIANT, {}});
SetCustomOp("ListReserve", CustomOptionsToRaw({element_type}),
Register_LIST_RESERVE);
BuildInterpreter({{1}, {}});
}
const TfLiteTensor* GetOutputTensor(int index) {
return interpreter_->tensor(index);
}
int list_len_input_;
int reserve_output_;
int element_shape_input_;
};
TEST(ListReserveTest, NonZeroNumElements_StaticShape) {
ListReserveModel m(TensorType_INT32);
m.PopulateTensor(m.list_len_input_, {5});
m.PopulateTensor(m.element_shape_input_, {2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* tensor = m.GetOutputTensor(m.reserve_output_);
EXPECT_EQ(tensor->type, kTfLiteVariant);
EXPECT_EQ(tensor->allocation_type, kTfLiteVariantObject);
TensorArray* arr = reinterpret_cast<TensorArray*>(tensor->data.data);
EXPECT_EQ(arr->ElementType(), kTfLiteInt32);
EXPECT_EQ(arr->ElementShape()->size, 1);
ASSERT_EQ(arr->ElementShape()->data[0], 2);
ASSERT_EQ(arr->NumElements(), 5);
for (int i = 0; i < 5; ++i) {
ASSERT_EQ(arr->At(i), nullptr);
}
}
TEST(ListReserveTest, NegativeNumElements_Fails) {
ListReserveModel m(TensorType_INT32);
m.PopulateTensor(m.list_len_input_, {-1});
m.PopulateTensor(m.element_shape_input_, {2});
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
TEST(ListReserveTest, NumElements0_StaticShape_Succeeds) {
ListReserveModel m(TensorType_INT32);
m.PopulateTensor(m.list_len_input_, {0});
m.PopulateTensor(m.element_shape_input_, {2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* tensor = m.GetOutputTensor(m.reserve_output_);
TensorArray* arr = reinterpret_cast<TensorArray*>(tensor->data.data);
EXPECT_EQ(arr->NumElements(), 0);
EXPECT_EQ(arr->ElementType(), kTfLiteInt32);
}
TEST(ListReserveTest, NumElements0_StaticShape_FloatType) {
ListReserveModel m(TensorType_FLOAT32);
m.PopulateTensor(m.list_len_input_, {0});
m.PopulateTensor(m.element_shape_input_, {2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* tensor = m.GetOutputTensor(m.reserve_output_);
TensorArray* arr = reinterpret_cast<TensorArray*>(tensor->data.data);
EXPECT_EQ(arr->NumElements(), 0);
EXPECT_EQ(arr->ElementType(), kTfLiteFloat32);
}
TEST(ListReserveTest, UnsupportedDataType_Fails) {
ListReserveModel m(TensorType_COMPLEX64);
m.PopulateTensor(m.list_len_input_, {0});
m.PopulateTensor(m.element_shape_input_, {2});
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/list_kernels/list_reserve.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/list_kernels/list_reserve_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1157f66a-b3ed-4eb8-96c5-4f7828a45e7f | cpp | tensorflow/tensorflow | variant_add_n | tensorflow/lite/kernels/variants/list_kernels/variant_add_n.cc | tensorflow/lite/kernels/variants/list_kernels/variant_add_n_test.cc | #include <algorithm>
#include <cstring>
#include <utility>
#include <vector>
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/portable_tensor.h"
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/variants/list_ops_util.h"
#include "tensorflow/lite/kernels/variants/tensor_array.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace variants {
namespace ops {
namespace add_n {
namespace {
using ::tflite::variants::TensorArray;
constexpr int kInputTensor1 = 0;
constexpr int kOutputTensor = 0;
struct OpData {
int scratch_tensor_index;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData();
context->AddTensors(context, 1, &op_data->scratch_tensor_index);
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE(context, NumInputs(node) >= 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(1);
node->temporaries->data[0] = op_data->scratch_tensor_index;
TfLiteTensor* scratch_tensor;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 0, &scratch_tensor));
scratch_tensor->type = kTfLiteNoType;
scratch_tensor->allocation_type = kTfLiteDynamic;
for (int i = kInputTensor1 + 1; i < NumInputs(node); ++i) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i, &input));
TF_LITE_ENSURE_EQ(context, input->type, kTfLiteVariant);
}
output->type = kTfLiteVariant;
output->allocation_type = kTfLiteVariantObject;
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
CpuBackendContext* cpu_backend_context =
CpuBackendContext::GetFromContext(context);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TfLiteTensor* scratch_tensor;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 0, &scratch_tensor));
const TensorArray* const arr =
reinterpret_cast<const TensorArray*>(input1->data.data);
const int num_elements = arr->NumElements();
const TfLiteType t = arr->ElementType();
const int num_inputs = NumInputs(node);
IntArrayUniquePtr merged_shape = BuildTfLiteArray(*arr->ElementShape());
std::vector<const TensorArray*> input_arrs;
input_arrs.reserve(num_inputs);
input_arrs.push_back(arr);
for (int i = 1; i < num_inputs; ++i) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i, &input));
const TensorArray* const arr_i =
reinterpret_cast<const TensorArray*>(input->data.data);
TF_LITE_ENSURE_EQ(context, num_elements, arr_i->NumElements());
TF_LITE_ENSURE_EQ(context, t, arr_i->ElementType());
merged_shape = variants::MergeShapesOrNull(
std::move(merged_shape), BuildTfLiteArray(*arr_i->ElementShape()));
TF_LITE_ENSURE(context, merged_shape != nullptr);
input_arrs.push_back(arr_i);
}
TF_LITE_ENSURE_OK(context, TfLiteTensorVariantRealloc<TensorArray>(
output, t, BuildTfLiteArray(0)));
TensorArray* const output_arr =
reinterpret_cast<TensorArray*>(output->data.data);
output_arr->Resize(num_elements);
for (int i = 0; i < num_elements; ++i) {
TfLiteIntArray* row_shape = nullptr;
std::vector<TfLiteTensor*> row_tensors;
for (const auto* array : input_arrs) {
const TfLiteTensor* at = array->At(i);
if (!at) continue;
if (!row_shape)
row_shape = at->dims;
else
TF_LITE_ENSURE(context, TfLiteIntArrayEqual(row_shape, at->dims));
row_tensors.push_back(const_cast<TfLiteTensor*>(at));
}
if (row_shape == nullptr) {
TF_LITE_ENSURE(context,
variants::IsShapeFullyDefined(*merged_shape.get()));
TensorUniquePtr row_output = BuildTfLiteTensor(
t, BuildTfLiteArray(*merged_shape.get()), kTfLiteDynamic);
memset(row_output->data.data, 0, row_output->bytes);
output_arr->Set(i, std::move(row_output));
continue;
}
TensorUniquePtr row_output =
BuildTfLiteTensor(t, BuildTfLiteArray(*row_shape), kTfLiteDynamic);
if (row_tensors.size() < 2) {
TfLiteTensorCopy(row_tensors[0], row_output.get());
output_arr->Set(i, std::move(row_output));
continue;
}
const int num_inputs_for_row = static_cast<int>(row_tensors.size());
const int thread_count =
std::min(std::max(1, static_cast<int>(num_inputs_for_row) / 2),
cpu_backend_context->max_num_threads());
IntArrayUniquePtr scratch_shape = BuildTfLiteArray(
{thread_count * static_cast<int>(NumElements(row_tensors[0]))});
scratch_tensor->type = t;
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, scratch_tensor,
BuildTfLiteArray(*row_shape).release()));
const RuntimeShape row_runtime_shape(row_shape->size, row_shape->data);
if (t == kTfLiteInt32) {
VectorOfTensors<int> tensors(row_tensors);
optimized_ops::AddN<int>(row_runtime_shape, num_inputs, tensors.data(),
GetTensorData<int>(row_output.get()),
GetTensorData<int>(scratch_tensor),
cpu_backend_context);
} else if (t == kTfLiteFloat32) {
VectorOfTensors<float> tensors(row_tensors);
optimized_ops::AddN<float>(row_runtime_shape, num_inputs, tensors.data(),
GetTensorData<float>(row_output.get()),
GetTensorData<float>(scratch_tensor),
cpu_backend_context);
} else {
TF_LITE_KERNEL_LOG(context, "Subtype is not supported for variant addn.");
return kTfLiteError;
}
TF_LITE_ENSURE(context, output_arr->Set(i, std::move(row_output)));
}
return kTfLiteOk;
}
}
}
TfLiteRegistration* Register_VARIANT_ADD_N() {
static TfLiteRegistration r = {add_n::Init, add_n::Free, add_n::Prepare,
add_n::Eval};
return &r;
}
}
}
} | #include <optional>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/kernels/variants/list_kernels/test_util.h"
#include "tensorflow/lite/kernels/variants/list_ops_lib.h"
#include "tensorflow/lite/kernels/variants/tensor_array.h"
#include "tensorflow/lite/portable_type_to_tflitetype.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace variants {
namespace ops {
namespace {
using ::testing::AllOf;
class ListAddNModel : public ListOpModel {
public:
explicit ListAddNModel(int num_inputs) {
std::vector<std::vector<int>> input_shapes(num_inputs, std::vector<int>{});
for (int i = 0; i < num_inputs; ++i) {
input_inds_.push_back(AddInput({TensorType_VARIANT, {}}));
}
output_ind_ = AddOutput({TensorType_VARIANT, {}});
SetCustomOp("VariantAddN", {}, Register_VARIANT_ADD_N);
BuildInterpreter(input_shapes);
}
const TensorArray* GetOutput() {
TfLiteTensor* tensor = interpreter_->tensor(output_ind_);
TFLITE_CHECK(tensor != nullptr && tensor->type == kTfLiteVariant &&
tensor->allocation_type == kTfLiteVariantObject);
return static_cast<const TensorArray*>(
static_cast<const VariantData*>(tensor->data.data));
}
int GetIndOfInput(int input) { return input_inds_[input]; }
private:
std::vector<int> input_inds_;
int output_ind_;
};
template <typename T>
class ListAddNTest : public ::testing::Test {};
TYPED_TEST_SUITE_P(ListAddNTest);
TYPED_TEST_P(ListAddNTest, TwoInputs_AllItemsPresent_AllSameShape) {
TfLiteType tfl_type = typeToTfLiteType<TypeParam>();
std::optional<TensorType> tensor_type = TflToTensorType(tfl_type);
ASSERT_TRUE(tensor_type.has_value());
ListAddNModel m(2);
for (const int i : {0, 1}) {
const int list_ind = m.GetIndOfInput(i);
m.PopulateListTensor(list_ind, {}, 2, tfl_type);
m.ListSetItem(list_ind, 0, {2, 2}, tfl_type,
std::vector<TypeParam>(4, 1).data());
m.ListSetItem(list_ind, 1, {2, 2}, tfl_type,
std::vector<TypeParam>(4, 1).data());
}
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TensorArray* const arr = m.GetOutput();
ASSERT_EQ(arr->NumElements(), 2);
ASSERT_EQ(arr->ElementType(), tfl_type);
EXPECT_THAT(arr->At(0), AllOf(IsAllocatedAs(tfl_type), DimsAre({2, 2}),
FilledWith<TypeParam>(2)));
EXPECT_THAT(arr->At(1), AllOf(IsAllocatedAs(tfl_type), DimsAre({2, 2}),
FilledWith<TypeParam>(2)));
}
TYPED_TEST_P(ListAddNTest, TwoInputs_AllItemsPresent_ListsContainMixedShapes) {
TfLiteType tfl_type = typeToTfLiteType<TypeParam>();
std::optional<TensorType> tensor_type = TflToTensorType(tfl_type);
ASSERT_TRUE(tensor_type.has_value());
ListAddNModel m(2);
for (const int i : {0, 1}) {
const int list_ind = m.GetIndOfInput(i);
m.PopulateListTensor(list_ind, {}, 2, tfl_type);
m.ListSetItem(list_ind, 0, {2, 2}, tfl_type,
std::vector<TypeParam>(4, 1).data());
m.ListSetItem(list_ind, 1, {3, 3}, tfl_type,
std::vector<TypeParam>(9, 1).data());
}
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TensorArray* const arr = m.GetOutput();
ASSERT_EQ(arr->NumElements(), 2);
ASSERT_EQ(arr->ElementType(), tfl_type);
EXPECT_THAT(arr->At(0), AllOf(IsAllocatedAs(tfl_type), DimsAre({2, 2}),
FilledWith<TypeParam>(2)));
EXPECT_THAT(arr->At(1), AllOf(IsAllocatedAs(tfl_type), DimsAre({3, 3}),
FilledWith<TypeParam>(2)));
}
TYPED_TEST_P(ListAddNTest, TwoInputs_NoItemsPresent_ListShapesMerge) {
TfLiteType tfl_type = typeToTfLiteType<TypeParam>();
std::optional<TensorType> tensor_type = TflToTensorType(tfl_type);
ASSERT_TRUE(tensor_type.has_value());
ListAddNModel m(2);
m.PopulateListTensor(m.GetIndOfInput(0), {2, -1}, 1, tfl_type);
m.PopulateListTensor(m.GetIndOfInput(1), {-1, 2}, 1, tfl_type);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TensorArray* const arr = m.GetOutput();
ASSERT_EQ(arr->NumElements(), 1);
ASSERT_EQ(arr->ElementType(), tfl_type);
EXPECT_THAT(arr->At(0), AllOf(IsAllocatedAs(tfl_type), DimsAre({2, 2}),
FilledWith<TypeParam>(0)));
}
TYPED_TEST_P(ListAddNTest, TwoInputs_NoItemsPresent_ListShapesUndefinedError) {
TfLiteType tfl_type = typeToTfLiteType<TypeParam>();
std::optional<TensorType> tensor_type = TflToTensorType(tfl_type);
ASSERT_TRUE(tensor_type.has_value());
ListAddNModel m(2);
m.PopulateListTensor(m.GetIndOfInput(0), {2, -1}, 1, tfl_type);
m.PopulateListTensor(m.GetIndOfInput(1), {-1, -1}, 1, tfl_type);
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
TYPED_TEST_P(ListAddNTest, TwoInputs_SomeItemsPresent_UsesElementShape) {
TfLiteType tfl_type = typeToTfLiteType<TypeParam>();
std::optional<TensorType> tensor_type = TflToTensorType(tfl_type);
ASSERT_TRUE(tensor_type.has_value());
ListAddNModel m(2);
m.PopulateListTensor(m.GetIndOfInput(0), {}, 1, tfl_type);
m.PopulateListTensor(m.GetIndOfInput(1), {}, 1, tfl_type);
m.ListSetItem(m.GetIndOfInput(0), 0, {3, 3}, tfl_type,
std::vector<TypeParam>(9, 1).data());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TensorArray* const arr = m.GetOutput();
ASSERT_EQ(arr->NumElements(), 1);
ASSERT_EQ(arr->ElementType(), tfl_type);
EXPECT_THAT(arr->At(0), AllOf(IsAllocatedAs(tfl_type), DimsAre({3, 3}),
FilledWith<TypeParam>(1)));
}
REGISTER_TYPED_TEST_SUITE_P(ListAddNTest,
TwoInputs_AllItemsPresent_AllSameShape,
TwoInputs_AllItemsPresent_ListsContainMixedShapes,
TwoInputs_NoItemsPresent_ListShapesMerge,
TwoInputs_SomeItemsPresent_UsesElementShape,
TwoInputs_NoItemsPresent_ListShapesUndefinedError);
using ValidTypes = ::testing::Types<int, float>;
INSTANTIATE_TYPED_TEST_SUITE_P(ListAddNTests, ListAddNTest, ValidTypes);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/list_kernels/variant_add_n.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/list_kernels/variant_add_n_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f28e4228-9163-4126-9831-6df089c74fdf | cpp | tensorflow/tensorflow | list_set_item | tensorflow/lite/kernels/variants/list_kernels/list_set_item.cc | tensorflow/lite/kernels/variants/list_kernels/list_set_item_test.cc | #include <utility>
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/variants/tensor_array.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace variants {
namespace ops {
namespace {
constexpr int kListInputIdx = 0;
constexpr int kIndexInputIdx = 1;
constexpr int kListOutputIdx = 0;
class SetItemSemantic {
public:
SetItemSemantic(TfLiteContext* ctx, TfLiteNode* node)
: ctx_(ctx), node_(node) {}
static constexpr int kItemInputIdx = 2;
TfLiteStatus CheckIndexInput() const {
const TfLiteTensor* index_input;
TF_LITE_ENSURE_OK(ctx_,
GetInputSafe(ctx_, node_, kIndexInputIdx, &index_input));
TF_LITE_ENSURE_TYPES_EQ(ctx_, index_input->type, kTfLiteInt32);
return kTfLiteOk;
}
TfLiteStatus GetIndexVal(const TensorArray& arr, int& result) const {
const TfLiteTensor* index_input;
TF_LITE_ENSURE_OK(ctx_,
GetInputSafe(ctx_, node_, kIndexInputIdx, &index_input));
TF_LITE_ENSURE_EQ(ctx_, index_input->bytes, sizeof(int));
const int* index_data = GetTensorData<int>(index_input);
TF_LITE_ENSURE(ctx_, index_data != nullptr);
const int index = *index_data;
TF_LITE_ENSURE(ctx_, index >= 0);
result = index;
return kTfLiteOk;
}
private:
TfLiteContext* const ctx_;
TfLiteNode* const node_;
};
class PushBackSemantic {
public:
PushBackSemantic(TfLiteContext* ctx, TfLiteNode* node) {}
static constexpr int kItemInputIdx = 1;
TfLiteStatus CheckIndexInput() const { return kTfLiteOk; }
TfLiteStatus GetIndexVal(const TensorArray& arr, int& result) const {
result = arr.NumElements();
return kTfLiteOk;
}
};
template <class Semantic>
TfLiteStatus Prepare(TfLiteContext* ctx, TfLiteNode* node) {
const auto semantic = Semantic(ctx, node);
const TfLiteTensor* list_input;
TF_LITE_ENSURE_OK(ctx, GetInputSafe(ctx, node, kListInputIdx, &list_input));
TF_LITE_ENSURE_TYPES_EQ(ctx, list_input->type, kTfLiteVariant);
TF_LITE_ENSURE_OK(ctx, semantic.CheckIndexInput());
TfLiteTensor* output;
TF_LITE_ENSURE_OK(ctx, GetOutputSafe(ctx, node, kListOutputIdx, &output));
TF_LITE_ENSURE_TYPES_EQ(ctx, output->type, kTfLiteVariant);
output->allocation_type = kTfLiteVariantObject;
return kTfLiteOk;
}
template <class Semantic>
TfLiteStatus Eval(TfLiteContext* ctx, TfLiteNode* node) {
const auto semantic = Semantic(ctx, node);
const TfLiteTensor* list_input;
TF_LITE_ENSURE_OK(ctx, GetInputSafe(ctx, node, kListInputIdx, &list_input));
TF_LITE_ENSURE_EQ(ctx, list_input->allocation_type, kTfLiteVariantObject);
TensorArray* input_arr =
reinterpret_cast<TensorArray*>(list_input->data.data);
int index;
TF_LITE_ENSURE_OK(ctx, semantic.GetIndexVal(*input_arr, index));
const TfLiteTensor* item_input;
TF_LITE_ENSURE_OK(
ctx, GetInputSafe(ctx, node, semantic.kItemInputIdx, &item_input));
TF_LITE_ENSURE_TYPES_EQ(ctx, input_arr->ElementType(), item_input->type);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(ctx, GetOutputSafe(ctx, node, kListOutputIdx, &output));
TensorArray* output_arr = static_cast<TensorArray*>(
input_arr->CloneTo(static_cast<VariantData*>(output->data.data)));
TensorUniquePtr item_copy = BuildTfLiteTensor(
item_input->type, BuildTfLiteArray(*item_input->dims), kTfLiteDynamic);
TfLiteTensorCopy(item_input, item_copy.get());
if (index >= output_arr->NumElements()) {
output_arr->Resize(index + 1);
}
TF_LITE_ENSURE(ctx, output_arr->Set(index, std::move(item_copy)));
output->data.data = static_cast<VariantData*>(output_arr);
return kTfLiteOk;
}
}
TfLiteRegistration* Register_LIST_SET_ITEM() {
static TfLiteRegistration r = {nullptr, nullptr, Prepare<SetItemSemantic>,
Eval<SetItemSemantic>};
return &r;
}
TfLiteRegistration* Register_LIST_PUSH_BACK() {
static TfLiteRegistration r = {nullptr, nullptr, Prepare<PushBackSemantic>,
Eval<PushBackSemantic>};
return &r;
}
}
}
} | #include <cstddef>
#include <cstring>
#include <optional>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/kernels/variants/list_kernels/test_util.h"
#include "tensorflow/lite/kernels/variants/list_ops_lib.h"
#include "tensorflow/lite/kernels/variants/tensor_array.h"
#include "tensorflow/lite/portable_type_to_tflitetype.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace variants {
namespace ops {
namespace {
using ::testing::AllOf;
template <typename T>
class SetItemWithTypeTest : public ::testing::Test {};
class ListSetItemModel : public ListOpModel {
public:
explicit ListSetItemModel(TensorData item_data) {
list_input_ = AddInput({TensorType_VARIANT, {}});
index_input_ = AddInput({TensorType_INT32, {1}});
tensor_input_ = AddInput(item_data);
list_output_ = AddOutput({TensorType_VARIANT, {}});
SetCustomOp("ListSetItem", {}, Register_LIST_SET_ITEM);
BuildInterpreter({{}, {1}, item_data.shape});
interpreter_->input_tensor(0)->allocation_type = kTfLiteVariantObject;
}
const TensorArray* GetOutputTensorArray(int tensor_id) {
TfLiteTensor* tensor = interpreter_->tensor(tensor_id);
TFLITE_CHECK(tensor != nullptr && tensor->type == kTfLiteVariant &&
tensor->allocation_type == kTfLiteVariantObject);
return static_cast<const TensorArray*>(
static_cast<const VariantData*>(tensor->data.data));
}
int index_input_;
int list_input_;
int tensor_input_;
int list_output_;
};
constexpr int kNumElements = 4;
TYPED_TEST_SUITE_P(SetItemWithTypeTest);
TYPED_TEST_P(SetItemWithTypeTest, SetItemOnEmptyTensorList_ListShapeDefined) {
TfLiteType tfl_type = typeToTfLiteType<TypeParam>();
std::optional<TensorType> tensor_type = TflToTensorType(tfl_type);
ASSERT_TRUE(tensor_type.has_value());
ListSetItemModel m({tensor_type.value(), {2, 2}});
m.PopulateTensor(m.index_input_, {0});
m.PopulateListTensor(m.list_input_, {2, 2}, kNumElements, tfl_type);
m.PopulateTensor<TypeParam>(m.tensor_input_, {0, 0, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TensorArray* arr = m.GetOutputTensorArray(m.list_output_);
ASSERT_EQ(arr->NumElements(), kNumElements);
ASSERT_EQ(arr->ElementType(), tfl_type);
for (int i = 1; i < arr->NumElements(); ++i) {
EXPECT_EQ(arr->At(i), nullptr);
}
EXPECT_THAT(arr->At(0), AllOf(IsAllocatedAs(tfl_type), DimsAre({2, 2}),
FilledWith(static_cast<TypeParam>(0))));
}
TYPED_TEST_P(SetItemWithTypeTest, SetItemOnEmptyTensorList_ListShapeUnranked) {
TfLiteType tfl_type = typeToTfLiteType<TypeParam>();
std::optional<TensorType> tensor_type = TflToTensorType(tfl_type);
ASSERT_TRUE(tensor_type.has_value());
ListSetItemModel m({tensor_type.value(), {2, 2}});
m.PopulateTensor(m.index_input_, {0});
m.PopulateListTensor(m.list_input_, {}, kNumElements, tfl_type);
m.PopulateTensor<TypeParam>(m.tensor_input_, {0, 0, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TensorArray* arr = m.GetOutputTensorArray(m.list_output_);
ASSERT_EQ(arr->NumElements(), kNumElements);
ASSERT_EQ(arr->ElementType(), tfl_type);
for (int i = 1; i < arr->NumElements(); ++i) {
EXPECT_EQ(arr->At(i), nullptr);
}
EXPECT_THAT(arr->At(0), AllOf(IsAllocatedAs(tfl_type), DimsAre({2, 2}),
FilledWith(static_cast<TypeParam>(0))));
}
TYPED_TEST_P(SetItemWithTypeTest, OverwriteSetItem_ItemsSameShape) {
TfLiteType tfl_type = typeToTfLiteType<TypeParam>();
std::optional<TensorType> tensor_type = TflToTensorType(tfl_type);
ASSERT_TRUE(tensor_type.has_value());
ListSetItemModel m({tensor_type.value(), {2, 2}});
m.PopulateTensor(m.index_input_, {0});
m.PopulateListTensor(m.list_input_, {}, kNumElements, tfl_type);
TypeParam init_item_data[4] = {1, 1, 1, 1};
m.ListSetItem(m.list_input_, 0, {2, 2}, tfl_type, init_item_data);
m.PopulateTensor<TypeParam>(m.tensor_input_, {0, 0, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TensorArray* arr = m.GetOutputTensorArray(m.list_output_);
ASSERT_EQ(arr->NumElements(), kNumElements);
ASSERT_EQ(arr->ElementType(), tfl_type);
for (int i = 1; i < arr->NumElements(); ++i) {
EXPECT_EQ(arr->At(i), nullptr);
}
EXPECT_THAT(arr->At(0), AllOf(IsAllocatedAs(tfl_type), DimsAre({2, 2}),
FilledWith(static_cast<TypeParam>(0))));
}
TYPED_TEST_P(SetItemWithTypeTest,
SetItemOnNonEmptyListAtEmptyIndex_ItemsSameShape) {
TfLiteType tfl_type = typeToTfLiteType<TypeParam>();
std::optional<TensorType> tensor_type = TflToTensorType(tfl_type);
ASSERT_TRUE(tensor_type.has_value());
ListSetItemModel m({tensor_type.value(), {2, 2}});
m.PopulateTensor(m.index_input_, {1});
m.PopulateListTensor(m.list_input_, {}, kNumElements, tfl_type);
TypeParam init_item_data[4] = {1, 1, 1, 1};
m.ListSetItem(m.list_input_, 0, {2, 2}, tfl_type, init_item_data);
m.PopulateTensor<TypeParam>(m.tensor_input_, {0, 0, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TensorArray* arr = m.GetOutputTensorArray(m.list_output_);
ASSERT_EQ(arr->NumElements(), kNumElements);
ASSERT_EQ(arr->ElementType(), tfl_type);
for (int i = 2; i < arr->NumElements(); ++i) {
EXPECT_EQ(arr->At(i), nullptr);
}
EXPECT_THAT(arr->At(0), AllOf(IsAllocatedAs(tfl_type), DimsAre({2, 2}),
FilledWith(static_cast<TypeParam>(1))));
EXPECT_THAT(arr->At(1), AllOf(IsAllocatedAs(tfl_type), DimsAre({2, 2}),
FilledWith(static_cast<TypeParam>(0))));
}
TYPED_TEST_P(SetItemWithTypeTest, OverwriteSetItem_ItemsDifferentShape) {
TfLiteType tfl_type = typeToTfLiteType<TypeParam>();
std::optional<TensorType> tensor_type = TflToTensorType(tfl_type);
ASSERT_TRUE(tensor_type.has_value());
ListSetItemModel m({tensor_type.value(), {2}});
m.PopulateTensor(m.index_input_, {0});
m.PopulateListTensor(m.list_input_, {}, kNumElements, tfl_type);
TypeParam init_item_data[4] = {1, 1, 1, 1};
m.ListSetItem(m.list_input_, 0, {2, 2}, tfl_type, init_item_data);
m.PopulateTensor<TypeParam>(m.tensor_input_, {0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TensorArray* arr = m.GetOutputTensorArray(m.list_output_);
ASSERT_EQ(arr->NumElements(), kNumElements);
ASSERT_EQ(arr->ElementType(), tfl_type);
for (int i = 1; i < arr->NumElements(); ++i) {
EXPECT_EQ(arr->At(i), nullptr);
}
EXPECT_THAT(arr->At(0), AllOf(IsAllocatedAs(tfl_type), DimsAre({2}),
FilledWith(static_cast<TypeParam>(0))));
}
REGISTER_TYPED_TEST_SUITE_P(SetItemWithTypeTest,
SetItemOnEmptyTensorList_ListShapeDefined,
SetItemOnEmptyTensorList_ListShapeUnranked,
OverwriteSetItem_ItemsSameShape,
SetItemOnNonEmptyListAtEmptyIndex_ItemsSameShape,
OverwriteSetItem_ItemsDifferentShape);
using ValidTypes = ::testing::Types<int, int64_t, bool, float>;
INSTANTIATE_TYPED_TEST_SUITE_P(SetItemTests, SetItemWithTypeTest, ValidTypes);
TEST(ListSetItemTest, ItemNotSameTypeAsList_Fails) {
ListSetItemModel m{{TensorType_INT32, {2, 2}}};
m.PopulateTensor(m.index_input_, {0});
m.PopulateListTensor(m.list_input_, {}, kNumElements, kTfLiteInt64);
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
TEST(ListSetItemTest, IndexLessThanZero_Fails) {
ListSetItemModel m{{TensorType_INT32, {2, 2}}};
m.PopulateTensor(m.index_input_, {-1});
m.PopulateListTensor(m.list_input_, {}, kNumElements, kTfLiteInt32);
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
TEST(ListSetItemTest, IndexLessGreaterThanListLen_ResizesList) {
ListSetItemModel m{{TensorType_INT32, {2, 2}}};
m.PopulateTensor(m.index_input_, {2});
m.PopulateListTensor(m.list_input_, {}, 2, kTfLiteInt32);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TensorArray* arr = m.GetOutputTensorArray(m.list_output_);
ASSERT_EQ(arr->NumElements(), 3);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/list_kernels/list_set_item.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/list_kernels/list_set_item_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
26620278-c5dd-4003-a0f9-3124c44bf565 | cpp | tensorflow/tensorflow | list_stack | tensorflow/lite/kernels/variants/list_kernels/list_stack.cc | tensorflow/lite/kernels/variants/list_kernels/list_stack_test.cc | #include <cstring>
#include <utility>
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/variants/list_ops_util.h"
#include "tensorflow/lite/kernels/variants/tensor_array.h"
namespace tflite {
namespace variants {
namespace ops {
namespace {
constexpr int kListInput = 0;
constexpr int kShapeInput = 1;
constexpr int kTensorOutput = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
const TfLiteTensor* list_input;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kListInput, &list_input));
TF_LITE_ENSURE_TYPES_EQ(context, list_input->type, kTfLiteVariant);
const TfLiteTensor* shape_input;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kShapeInput, &shape_input));
TF_LITE_ENSURE_TYPES_EQ(context, shape_input->type, kTfLiteInt32);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kTensorOutput, &output));
SetTensorToDynamic(output);
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* list_input;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kListInput, &list_input));
TF_LITE_ENSURE_EQ(context, list_input->allocation_type, kTfLiteVariantObject);
TensorArray* arr = static_cast<TensorArray*>(
static_cast<VariantData*>(list_input->data.data));
const TfLiteTensor* shape_input;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kShapeInput, &shape_input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kTensorOutput, &output));
TF_LITE_ENSURE_TYPES_EQ(context, output->type, arr->ElementType());
IntArrayUniquePtr cur_shape_suffix;
TF_LITE_ENSURE_OK(context, GetShapeIfAllEqual(*arr, cur_shape_suffix));
cur_shape_suffix = MergeShapesOrNull(
MergeShapesOrNull(TensorAsShape(*shape_input),
BuildTfLiteArray(*arr->ElementShape())),
std::move(cur_shape_suffix));
TF_LITE_ENSURE_MSG(
context,
cur_shape_suffix != nullptr && IsShapeFullyDefined(*cur_shape_suffix),
"Shapes from input, list and elements are not compatible "
"or do not resolve to fully defined shape.");
IntArrayUniquePtr final_output_shape;
const bool suffix_is_not_scalar =
!(cur_shape_suffix->size == 0 ||
(cur_shape_suffix->size == 1 && cur_shape_suffix->data[0] == 1));
if (suffix_is_not_scalar) {
final_output_shape = BuildTfLiteArray(cur_shape_suffix->size + 1);
memcpy(final_output_shape->data + 1, cur_shape_suffix->data,
cur_shape_suffix->size * sizeof(int));
final_output_shape->data[0] = arr->NumElements();
} else {
final_output_shape = BuildTfLiteArray({arr->NumElements()});
}
context->ResizeTensor(context, output, final_output_shape.release());
const auto num_elements = static_cast<int>(NumElements(output));
if (num_elements == 0) {
TfLiteTensorDataFree(output);
return kTfLiteOk;
}
const int element_num_elements = num_elements / output->dims->data[0];
const size_t bytes_per_element =
element_num_elements * TfLiteTypeGetSize(output->type);
char* raw_data_offset = output->data.raw;
for (int i = 0; i < arr->NumElements(); ++i) {
if (arr->At(i) == nullptr) {
memset(raw_data_offset, 0, bytes_per_element);
} else {
memcpy(raw_data_offset, arr->At(i)->data.data, bytes_per_element);
}
raw_data_offset = raw_data_offset + bytes_per_element;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_LIST_STACK() {
static TfLiteRegistration r = {nullptr, nullptr, Prepare, Eval};
return &r;
}
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/kernels/variants/list_kernels/test_util.h"
#include "tensorflow/lite/kernels/variants/list_ops_lib.h"
#include "tensorflow/lite/schema/schema_generated.h"
using ::testing::ElementsAreArray;
using ::tflite::variants::ops::Register_LIST_STACK;
namespace tflite {
namespace {
class ListStackModel : public ListOpModel {
public:
explicit ListStackModel(TensorData output_data) {
tensor_id_ = AddOutput(output_data);
list_id_ = AddInput({TensorType_VARIANT, {}});
shape_id_ = AddInput({TensorType_INT32, {1}});
SetCustomOp("ListStack", {}, Register_LIST_STACK);
BuildInterpreter({{}, {1}});
}
ListStackModel(TensorData output_data, TensorData shape_input_data) {
tensor_id_ = AddOutput(output_data);
list_id_ = AddInput({TensorType_VARIANT, {}});
shape_id_ = AddInput(shape_input_data);
SetCustomOp("ListStack", {}, Register_LIST_STACK);
BuildInterpreter({{}, shape_input_data.shape});
}
const TfLiteTensor* GetOutputTensor(int tensor_id) {
return interpreter_->tensor(tensor_id);
}
int tensor_id_;
int shape_id_;
int list_id_;
};
TEST(ListStackTest, MismatchedListShapeInputShape_Fails) {
ListStackModel m({TensorType_INT32, {2, 2}});
m.PopulateListTensor(m.list_id_, {1}, 2, kTfLiteInt32);
m.PopulateTensor(m.shape_id_, {3});
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
TEST(ListStackTest, MismatchedShapeOfElementsAndInput_Fails) {
ListStackModel m({TensorType_INT32, {2, 2}});
m.PopulateListTensor(m.list_id_, {}, 4, kTfLiteInt32);
m.PopulateTensor(m.shape_id_, {2});
m.ListSetItem(m.list_id_, 0, {1}, kTfLiteInt32, std::vector<int>{0}.data());
m.ListSetItem(m.list_id_, 1, {1}, kTfLiteInt32, std::vector<int>{1}.data());
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
TEST(ListStackTest, ElementsNotSameShape_Fails) {
ListStackModel m({TensorType_INT32, {2, 2}});
m.PopulateListTensor(m.list_id_, {}, 2, kTfLiteInt32);
m.PopulateTensor(m.shape_id_, {2});
m.ListSetItem(m.list_id_, 0, {2}, kTfLiteInt32,
std::vector<int>{2, 2}.data());
m.ListSetItem(m.list_id_, 1, {1}, kTfLiteInt32, std::vector<int>{3}.data());
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
TEST(ListStackTest, NoElementsNoShape_Fails) {
ListStackModel m({TensorType_INT32, {4}});
m.PopulateListTensor(m.list_id_, {}, 2, kTfLiteInt32);
m.PopulateTensor<int>(m.shape_id_, {-1});
EXPECT_EQ(m.Invoke(), kTfLiteError);
}
TEST(ListStackTest, ListElementTypeNotEqualOutputType_Fails) {
ListStackModel m({TensorType_INT32, {4}});
m.PopulateListTensor(m.list_id_, {}, 0, kTfLiteInt64);
m.PopulateTensor<int>(m.shape_id_, {-1});
EXPECT_EQ(m.Invoke(), kTfLiteError);
}
TEST(ListStackTest, ScalarElementShape_FullList_Returns1D) {
ListStackModel m({TensorType_INT32, {2}});
m.PopulateListTensor(m.list_id_, {}, 2, kTfLiteInt32);
m.PopulateTensor(m.shape_id_, {1});
m.ListSetItem(m.list_id_, 0, {1}, kTfLiteInt32, std::vector<int>{2}.data());
m.ListSetItem(m.list_id_, 1, {1}, kTfLiteInt32, std::vector<int>{3}.data());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutputTensor(m.tensor_id_);
ASSERT_THAT(output, DimsAre({2}));
ASSERT_THAT(output->type, kTfLiteInt32);
EXPECT_THAT(std::vector<int>(output->data.i32, output->data.i32 + 2),
ElementsAreArray({2, 3}));
EXPECT_EQ(output->allocation_type, kTfLiteDynamic);
}
TEST(ListStackTest, ScalarElementShape_PartialFilledList_Returns1DWithZeroed) {
ListStackModel m({TensorType_INT32, {2}});
m.PopulateListTensor(m.list_id_, {}, 2, kTfLiteInt32);
m.PopulateTensor(m.shape_id_, {1});
m.ListSetItem(m.list_id_, 0, {1}, kTfLiteInt32, std::vector<int>{2}.data());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutputTensor(m.tensor_id_);
ASSERT_THAT(output, DimsAre({2}));
ASSERT_THAT(output->type, kTfLiteInt32);
EXPECT_THAT(std::vector<int>(output->data.i32, output->data.i32 + 2),
ElementsAreArray({2, 0}));
EXPECT_EQ(output->allocation_type, kTfLiteDynamic);
}
TEST(ListStackTest, ScalarElementShape_EmptyList_Returns1DAllZeroed) {
ListStackModel m({TensorType_INT32, {2}});
m.PopulateListTensor(m.list_id_, {}, 2, kTfLiteInt32);
m.PopulateTensor(m.shape_id_, {1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutputTensor(m.tensor_id_);
ASSERT_THAT(output, DimsAre({2}));
ASSERT_THAT(output->type, kTfLiteInt32);
EXPECT_THAT(std::vector<int>(output->data.i32, output->data.i32 + 2),
ElementsAreArray({0, 0}));
EXPECT_EQ(output->allocation_type, kTfLiteDynamic);
}
TEST(ListStackTest, VectorElementShape_FilledList_Returns2D) {
ListStackModel m({TensorType_INT32, {2, 2}});
m.PopulateListTensor(m.list_id_, {}, 2, kTfLiteInt32);
m.PopulateTensor<int>(m.shape_id_, {2});
m.ListSetItem(m.list_id_, 0, {2}, kTfLiteInt32,
std::vector<int>{2, 2}.data());
m.ListSetItem(m.list_id_, 1, {2}, kTfLiteInt32,
std::vector<int>{3, 3}.data());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutputTensor(m.tensor_id_);
ASSERT_THAT(output, DimsAre({2, 2}));
EXPECT_THAT(std::vector<int>(output->data.i32, output->data.i32 + 4),
ElementsAreArray({2, 2, 3, 3}));
EXPECT_EQ(output->allocation_type, kTfLiteDynamic);
}
TEST(ListStackTest, VectorElementShape_PartialFilledList_Returns2DWithZeroed) {
ListStackModel m({TensorType_INT32, {2, 2}});
m.PopulateListTensor(m.list_id_, {}, 2, kTfLiteInt32);
m.PopulateTensor<int>(m.shape_id_, {2});
m.ListSetItem(m.list_id_, 0, {2}, kTfLiteInt32,
std::vector<int>{2, 2}.data());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutputTensor(m.tensor_id_);
ASSERT_THAT(output, DimsAre({2, 2}));
EXPECT_THAT(std::vector<int>(output->data.i32, output->data.i32 + 4),
ElementsAreArray({2, 2, 0, 0}));
EXPECT_EQ(output->allocation_type, kTfLiteDynamic);
}
TEST(ListStackTest, VectorElementShape_EmptyList_Returns2DAllZeroed) {
ListStackModel m({TensorType_INT32, {2, 2}});
m.PopulateListTensor(m.list_id_, {}, 2, kTfLiteInt32);
m.PopulateTensor<int>(m.shape_id_, {2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutputTensor(m.tensor_id_);
ASSERT_THAT(output, DimsAre({2, 2}));
EXPECT_THAT(std::vector<int>(output->data.i32, output->data.i32 + 4),
ElementsAreArray({0, 0, 0, 0}));
EXPECT_EQ(output->allocation_type, kTfLiteDynamic);
}
TEST(ListStackTest, NoShapeArguments_ZeroSizeList_InfersShapeFromElements) {
ListStackModel m({TensorType_INT32, {2, 2}});
m.PopulateListTensor(m.list_id_, {}, 2, kTfLiteInt32);
m.PopulateTensor<int>(m.shape_id_, {-1});
m.ListSetItem(m.list_id_, 0, {2}, kTfLiteInt32,
std::vector<int>{2, 2}.data());
EXPECT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutputTensor(m.tensor_id_);
ASSERT_THAT(output, DimsAre({2, 2}));
EXPECT_THAT(std::vector<int>(output->data.i32, output->data.i32 + 4),
ElementsAreArray({2, 2, 0, 0}));
EXPECT_EQ(output->allocation_type, kTfLiteDynamic);
}
TEST(ListStackTest, ListFirstDimZero_ReturnsEmptyTensor) {
ListStackModel m({TensorType_INT32, {0, 2}});
m.PopulateListTensor(m.list_id_, {}, 0, kTfLiteInt32);
m.PopulateTensor<int>(m.shape_id_, {2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutputTensor(m.tensor_id_);
EXPECT_THAT(output, DimsAre({0, 2}));
}
TEST(ListStackTest, MismatchedOutput_ReturnsResizedOutput1D) {
ListStackModel m({TensorType_INT32, {2}});
m.PopulateListTensor(m.list_id_, {}, 4, kTfLiteInt32);
m.PopulateTensor<int>(m.shape_id_, {1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutputTensor(m.tensor_id_);
EXPECT_THAT(output, DimsAre({4}));
}
TEST(ListStackTest, MismatchedOutput_ReturnsResizedOutput2D) {
ListStackModel m({TensorType_INT32, std::vector<int>{}});
m.PopulateListTensor(m.list_id_, {}, 2, kTfLiteInt32);
m.PopulateTensor<int>(m.shape_id_, {2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutputTensor(m.tensor_id_);
EXPECT_THAT(output, DimsAre({2, 2}));
}
TEST(ListStackTest, Trailing0DimInElementShape1D_NonZeroLen_Returns2DNoData) {
ListStackModel m({TensorType_INT32, std::vector<int>{}});
m.PopulateListTensor(m.list_id_, {}, 2, kTfLiteInt32);
m.PopulateTensor<int>(m.shape_id_, {0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutputTensor(m.tensor_id_);
ASSERT_THAT(output, DimsAre({2, 0}));
EXPECT_EQ(output->bytes, 0);
}
TEST(ListStackTest, Trailing0DimInElementShape2D_NonZeroLen_Returns3DNoData) {
ListStackModel m({TensorType_INT32, {}}, {TensorType_INT32, {2}});
m.PopulateListTensor(m.list_id_, {}, 2, kTfLiteInt32);
m.PopulateTensor<int>(m.shape_id_, {2, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutputTensor(m.tensor_id_);
ASSERT_THAT(output, DimsAre({2, 2, 0}));
EXPECT_EQ(output->bytes, 0);
}
TEST(ListStackTest, Trailing0DimInElementShape1D_ZeroLen_Returns2DNoData) {
ListStackModel m({TensorType_INT32, {}}, {TensorType_INT32, {1}});
m.PopulateListTensor(m.list_id_, {}, 0, kTfLiteInt32);
m.PopulateTensor<int>(m.shape_id_, {0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutputTensor(m.tensor_id_);
ASSERT_THAT(output, DimsAre({0, 0}));
EXPECT_EQ(output->bytes, 0);
}
TEST(ListStackTest, Trailing0DimInElementShape2D_ZeroLen_Returns3DNoData) {
ListStackModel m({TensorType_INT32, {}}, {TensorType_INT32, {2}});
m.PopulateListTensor(m.list_id_, {}, 0, kTfLiteInt32);
m.PopulateTensor<int>(m.shape_id_, {2, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutputTensor(m.tensor_id_);
ASSERT_THAT(output, DimsAre({0, 2, 0}));
EXPECT_EQ(output->bytes, 0);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/list_kernels/list_stack.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/list_kernels/list_stack_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
990e754e-57b8-469c-adf1-c1785a3f9738 | cpp | tensorflow/tensorflow | list_from_tensor | tensorflow/lite/kernels/variants/list_kernels/list_from_tensor.cc | tensorflow/lite/kernels/variants/list_kernels/list_from_tensor_test.cc | #include <cstring>
#include <utility>
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/variants/list_ops_lib.h"
#include "tensorflow/lite/kernels/variants/list_ops_util.h"
#include "tensorflow/lite/kernels/variants/tensor_array.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace variants {
namespace ops {
namespace {
constexpr int kTensorInput = 0;
constexpr int kElementShapeInput = 1;
constexpr int kListOut = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
const TfLiteTensor* element_shape;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kElementShapeInput, &element_shape));
TF_LITE_ENSURE(context, element_shape->type == kTfLiteInt32);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kListOut, &output));
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteVariant);
output->allocation_type = kTfLiteVariantObject;
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* tensor_input;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kTensorInput, &tensor_input));
const int rank = tensor_input->dims->size;
TF_LITE_ENSURE(context, rank > 0);
const int list_len = tensor_input->dims->data[0];
IntArrayUniquePtr element_shape_for_tensors =
BuildTfLiteArray(rank - 1, tensor_input->dims->data + 1);
const TfLiteTensor* element_shape_tensor;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kElementShapeInput,
&element_shape_tensor));
TF_LITE_ENSURE(context, (element_shape_tensor->dims->size == 1 &&
element_shape_tensor->dims->data[0] == rank - 1) ||
element_shape_tensor->dims->size == 0);
IntArrayUniquePtr element_shape_for_list =
TensorAsShape(*element_shape_tensor);
if (element_shape_for_list->size > 0) {
TF_LITE_ENSURE_EQ(context, element_shape_for_list->size,
element_shape_for_tensors->size);
for (int i = 0; i < element_shape_for_tensors->size; ++i) {
const int lhs = element_shape_for_list->data[i];
const int rhs = element_shape_for_tensors->data[i];
TF_LITE_ENSURE(context, lhs == -1 || rhs == -1 || lhs == rhs);
}
}
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kListOut, &output));
TF_LITE_ENSURE_OK(context, TfLiteTensorVariantRealloc<TensorArray>(
output, tensor_input->type,
BuildTfLiteArray(*element_shape_for_list)));
TensorArray* arr =
static_cast<TensorArray*>(static_cast<VariantData*>(output->data.data));
arr->Resize(list_len);
size_t data_offset = 0;
for (int i = 0; i < list_len; ++i) {
TensorUniquePtr tensor_to_set = BuildTfLiteTensor(
tensor_input->type, BuildTfLiteArray(*element_shape_for_tensors),
kTfLiteDynamic);
memcpy(tensor_to_set->data.raw, tensor_input->data.raw + data_offset,
tensor_to_set->bytes);
data_offset += tensor_to_set->bytes;
TF_LITE_ENSURE(context, arr->Set(i, std::move(tensor_to_set)));
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_LIST_FROM_TENSOR() {
static TfLiteRegistration r = {nullptr, nullptr, Prepare, Eval};
return &r;
}
}
}
} | #include <tuple>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/kernels/variants/list_ops_lib.h"
#include "tensorflow/lite/kernels/variants/tensor_array.h"
#include "tensorflow/lite/schema/schema_generated.h"
using ::testing::ElementsAre;
namespace tflite {
namespace variants {
namespace ops {
namespace {
class ListFromTensorModel : public SingleOpModel {
public:
ListFromTensorModel(TensorData tensor_data, TensorData shape_data) {
tensor_id_ = AddInput(tensor_data);
shape_id_ = AddInput(shape_data);
list_id_ = AddOutput({TensorType_VARIANT, {1}});
SetCustomOp("TensorListFromTensor", {},
Register_LIST_FROM_TENSOR);
BuildInterpreter({tensor_data.shape, shape_data.shape});
}
const TensorArray* GetOutputTensorArray(int tensor_id) {
TfLiteTensor* tensor = interpreter_->tensor(tensor_id);
TFLITE_CHECK(tensor != nullptr && tensor->type == kTfLiteVariant &&
tensor->allocation_type == kTfLiteVariantObject);
return static_cast<const TensorArray*>(
static_cast<const VariantData*>(tensor->data.data));
}
int tensor_id_;
int shape_id_;
int list_id_;
};
TEST(ListFromTensorTest, MatrixInput_ReturnsListWithVectorElements) {
ListFromTensorModel m({TensorType_INT32, {2, 2}}, {TensorType_INT32, {1}});
m.PopulateTensor<int>(m.tensor_id_, {1, 2, 3, 4});
m.PopulateTensor<int>(m.shape_id_, {2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TensorArray* arr = m.GetOutputTensorArray(m.list_id_);
ASSERT_EQ(arr->NumElements(), 2);
ASSERT_THAT(arr->ElementShape(), DimsAre({2}));
ASSERT_EQ(arr->ElementType(), kTfLiteInt32);
{
const TfLiteTensor* element = arr->At(0);
ASSERT_THAT(element, DimsAre({2}));
EXPECT_THAT(std::make_tuple(GetTensorData<int>(element), 2),
ElementsAre(1, 2));
}
{
const TfLiteTensor* element = arr->At(1);
ASSERT_THAT(element, DimsAre({2}));
EXPECT_THAT(std::make_tuple(GetTensorData<int>(element), 2),
ElementsAre(3, 4));
}
}
TEST(ListFromTensorTest, VectorInput_ReturnsListWithScalarElements) {
ListFromTensorModel m({TensorType_INT32, {2}}, {TensorType_INT32, {0}});
m.PopulateTensor<int>(m.tensor_id_, {1, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TensorArray* arr = m.GetOutputTensorArray(m.list_id_);
ASSERT_EQ(arr->NumElements(), 2);
ASSERT_THAT(arr->ElementShape(), DimsAre({}));
ASSERT_EQ(arr->ElementType(), kTfLiteInt32);
{
const TfLiteTensor* element = arr->At(0);
ASSERT_THAT(element, DimsAre({}));
EXPECT_THAT(std::make_tuple(GetTensorData<int>(element), 1),
ElementsAre(1));
}
{
const TfLiteTensor* element = arr->At(1);
ASSERT_THAT(element, DimsAre({}));
EXPECT_THAT(std::make_tuple(GetTensorData<int>(element), 1),
ElementsAre(2));
}
}
TEST(ListFromTensorTest, 3DInput_ReturnsListWithMatrixElements) {
ListFromTensorModel m({TensorType_INT32, {2, 2, 2}}, {TensorType_INT32, {2}});
m.PopulateTensor<int>(m.tensor_id_, {1, 2, 3, 4, 5, 6, 7, 8});
m.PopulateTensor<int>(m.shape_id_, {2, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TensorArray* arr = m.GetOutputTensorArray(m.list_id_);
ASSERT_EQ(arr->NumElements(), 2);
ASSERT_THAT(arr->ElementShape(), DimsAre({2, 2}));
ASSERT_EQ(arr->ElementType(), kTfLiteInt32);
{
const TfLiteTensor* element = arr->At(0);
ASSERT_THAT(element, DimsAre({2, 2}));
EXPECT_THAT(std::make_tuple(GetTensorData<int>(element), 4),
ElementsAre(1, 2, 3, 4));
}
{
const TfLiteTensor* element = arr->At(1);
ASSERT_THAT(element, DimsAre({2, 2}));
EXPECT_THAT(std::make_tuple(GetTensorData<int>(element), 4),
ElementsAre(5, 6, 7, 8));
}
}
TEST(ListFromTensorTest, MismatchedShapeInputTensorShape_Fails) {
ListFromTensorModel m({TensorType_INT32, {2, 2, 2}}, {TensorType_INT32, {2}});
m.PopulateTensor<int>(m.shape_id_, {2, 3});
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
TEST(ListFromTensorTest, ScalarInput_Fails) {
ListFromTensorModel m({TensorType_INT32, {}}, {TensorType_INT32, {}});
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/list_kernels/list_from_tensor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/list_kernels/list_from_tensor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
037392cf-1249-4b50-8b23-6ba6a5e9bddd | cpp | tensorflow/tensorflow | list_get_item | tensorflow/lite/kernels/variants/list_kernels/list_get_item.cc | tensorflow/lite/kernels/variants/list_kernels/list_get_item_test.cc | #include <cstddef>
#include <cstring>
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/variants/list_ops_lib.h"
#include "tensorflow/lite/kernels/variants/list_ops_util.h"
#include "tensorflow/lite/kernels/variants/tensor_array.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace variants {
namespace ops {
namespace {
constexpr int kListInput = 0;
class GetItemSemantic {
public:
GetItemSemantic(TfLiteContext* ctx, TfLiteNode* node)
: ctx_(ctx), node_(node) {}
static constexpr int kElementShapeInputIdx = 2;
static constexpr int kTensorOutputIdx = 0;
static constexpr int kIndexInputIdx = 1;
[[nodiscard]] TfLiteStatus CheckAndHandleTensors() const {
TF_LITE_ENSURE(ctx_, NumInputs(node_) == 3 && NumOutputs(node_) == 1);
const TfLiteTensor* index_input;
TF_LITE_ENSURE_OK(ctx_,
GetInputSafe(ctx_, node_, kIndexInputIdx, &index_input));
TF_LITE_ENSURE_TYPES_EQ(ctx_, index_input->type, kTfLiteInt32);
return kTfLiteOk;
}
[[nodiscard]] TfLiteStatus GetIndexVal(const TensorArray* const arr,
int& result) const {
const TfLiteTensor* index_input;
TF_LITE_ENSURE_OK(ctx_,
GetInputSafe(ctx_, node_, kIndexInputIdx, &index_input));
TF_LITE_ENSURE_EQ(ctx_, index_input->bytes, sizeof(int));
result = *GetTensorData<int>(index_input);
return kTfLiteOk;
}
[[nodiscard]] TfLiteStatus HandleOutput(const TensorArray* const arr) const {
return kTfLiteOk;
}
private:
TfLiteContext* const ctx_;
TfLiteNode* const node_;
};
class PopBackSemantic {
public:
PopBackSemantic(TfLiteContext* ctx, TfLiteNode* node)
: ctx_(ctx), node_(node) {}
static constexpr int kElementShapeInputIdx = 1;
static constexpr int kTensorOutputIdx = 1;
static constexpr int kListOutputIdx = 0;
[[nodiscard]] TfLiteStatus CheckAndHandleTensors() const {
TF_LITE_ENSURE(ctx_, NumInputs(node_) == 2 && NumOutputs(node_) == 2);
TfLiteTensor* list_output;
TF_LITE_ENSURE_OK(ctx_,
GetOutputSafe(ctx_, node_, kListOutputIdx, &list_output));
TF_LITE_ENSURE_TYPES_EQ(ctx_, list_output->type, kTfLiteVariant);
list_output->allocation_type = kTfLiteVariantObject;
return kTfLiteOk;
}
[[nodiscard]] TfLiteStatus GetIndexVal(const TensorArray* const arr,
int& result) const {
result = arr->NumElements() - 1;
return kTfLiteOk;
}
[[nodiscard]] TfLiteStatus HandleOutput(const TensorArray* const arr) const {
TfLiteTensor* list_output;
TF_LITE_ENSURE_OK(ctx_,
GetOutputSafe(ctx_, node_, kListOutputIdx, &list_output));
TensorArray* output_arr = static_cast<TensorArray*>(
arr->CloneTo(static_cast<VariantData*>(list_output->data.data)));
output_arr->Resize(output_arr->NumElements() - 1);
list_output->data.data = output_arr;
return kTfLiteOk;
}
private:
TfLiteContext* const ctx_;
TfLiteNode* const node_;
};
template <class Semantic>
TfLiteStatus Prepare(TfLiteContext* ctx, TfLiteNode* node) {
const auto semantic = Semantic(ctx, node);
TF_LITE_ENSURE_OK(ctx, semantic.CheckAndHandleTensors());
const TfLiteTensor* list_input;
TF_LITE_ENSURE_OK(ctx, GetInputSafe(ctx, node, kListInput, &list_input));
TF_LITE_ENSURE_TYPES_EQ(ctx, list_input->type, kTfLiteVariant);
const TfLiteTensor* element_shape_input;
TF_LITE_ENSURE_OK(ctx, GetInputSafe(ctx, node, semantic.kElementShapeInputIdx,
&element_shape_input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(
ctx, GetOutputSafe(ctx, node, semantic.kTensorOutputIdx, &output));
const TfLiteIntArray* const out_dims_sig = output->dims_signature;
if (out_dims_sig == nullptr || out_dims_sig->size <= 0 ||
!IsShapeFullyDefined(*out_dims_sig)) {
SetTensorToDynamic(output);
}
return kTfLiteOk;
}
template <class Semantic>
TfLiteStatus Eval(TfLiteContext* ctx, TfLiteNode* node) {
const auto semantic = Semantic(ctx, node);
const TfLiteTensor* list_input;
TF_LITE_ENSURE_OK(ctx, GetInputSafe(ctx, node, kListInput, &list_input));
TF_LITE_ENSURE_EQ(ctx, list_input->allocation_type, kTfLiteVariantObject);
const auto* arr = static_cast<const TensorArray*>(
static_cast<VariantData*>(list_input->data.data));
int idx;
TF_LITE_ENSURE_OK(ctx, semantic.GetIndexVal(arr, idx));
TF_LITE_ENSURE(ctx, idx >= 0 && idx < arr->NumElements());
TfLiteTensor* output;
TF_LITE_ENSURE_OK(
ctx, GetOutputSafe(ctx, node, semantic.kTensorOutputIdx, &output));
TF_LITE_ENSURE_TYPES_EQ(ctx, arr->ElementType(), output->type);
const TfLiteTensor* const element = arr->At(idx);
if (element != nullptr) {
if (IsDynamicTensor(output)) {
size_t bytes;
TF_LITE_ENSURE_OK(ctx, BytesRequired(output->type, element->dims->data,
element->dims->size, &bytes, ctx));
TF_LITE_ENSURE_OK(ctx, TfLiteTensorResizeMaybeCopy(bytes, output, false));
}
TF_LITE_ENSURE_OK(ctx, TfLiteTensorCopy(element, output));
return semantic.HandleOutput(arr);
}
if (!IsDynamicTensor(output)) {
memset(output->data.data, 0, output->bytes);
return semantic.HandleOutput(arr);
}
const TfLiteTensor* element_shape_input;
TF_LITE_ENSURE_OK(ctx, GetInputSafe(ctx, node, semantic.kElementShapeInputIdx,
&element_shape_input));
IntArrayUniquePtr output_shape =
MergeShapesOrNull(BuildTfLiteArray(*arr->ElementShape()),
TensorAsShape(*element_shape_input));
TF_LITE_ENSURE(ctx, output_shape != nullptr);
const bool can_infer_shape = (element_shape_input->dims->size != 0 ||
arr->ElementShape()->size != 0) &&
IsShapeFullyDefined(*output_shape);
if (!can_infer_shape) {
TF_LITE_ENSURE_MSG(
ctx,
GetShapeIfAllEqual(*arr, output_shape) == kTfLiteOk &&
output_shape != nullptr,
"Failed to infer the output shape for an item which has not been set.");
}
ctx->ResizeTensor(ctx, output, output_shape.release());
memset(output->data.data, 0, output->bytes);
return semantic.HandleOutput(arr);
}
}
TfLiteRegistration* Register_LIST_GET_ITEM() {
static TfLiteRegistration r = {nullptr, nullptr, Prepare<GetItemSemantic>,
Eval<GetItemSemantic>};
return &r;
}
TfLiteRegistration* Register_LIST_POP_BACK() {
static TfLiteRegistration r = {nullptr, nullptr, Prepare<PopBackSemantic>,
Eval<PopBackSemantic>};
return &r;
}
}
}
} | #include <tuple>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/kernels/variants/list_kernels/test_util.h"
#include "tensorflow/lite/kernels/variants/list_ops_lib.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace variants {
namespace ops {
namespace {
using ::testing::ElementsAre;
class ListGetItemModel : public ListOpModel {
public:
ListGetItemModel(TensorData index, TensorData element_shape,
TensorData output) {
list_input_ = AddInput({TensorType_VARIANT, {}});
index_input_ = AddInput(index);
element_shape_input_ = AddInput(element_shape);
output_ = AddOutput(output);
SetCustomOp("TensorListGetItem", {},
Register_LIST_GET_ITEM);
BuildInterpreter({{}, index.shape, element_shape.shape});
}
ListGetItemModel(int index, const std::vector<int>& element_shape) {
list_input_ = AddInput({TensorType_VARIANT, {}});
index_input_ = AddInput({TensorType_INT32, {1}});
element_shape_input_ =
AddInput({TensorType_INT32, {static_cast<int>(element_shape.size())}});
output_ = AddOutput({TensorType_INT32, element_shape});
SetCustomOp("TensorListGetItem", {},
Register_LIST_GET_ITEM);
BuildInterpreter({{}, {1}, {static_cast<int>(element_shape.size())}});
PopulateListTensor(list_input_, {}, 2, kTfLiteInt32);
PopulateTensor(index_input_, {index});
PopulateTensor(element_shape_input_, element_shape);
}
const TfLiteTensor* GetOutput(int idx) { return interpreter_->tensor(idx); }
int list_input_;
int index_input_;
int element_shape_input_;
int output_;
};
TEST(ListGetItemTest, IndexOOB_Fails) {
ListGetItemModel m(-1, {2, 2});
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
TEST(ListGetItemTest, GetPresentItem_ReturnsElement_Dynamic) {
ListGetItemModel m({TensorType_INT32, {}}, {TensorType_INT32, {2}},
{TensorType_INT32, {}});
m.PopulateListTensor(m.list_input_, {2, 2}, 3, kTfLiteInt32);
m.PopulateTensor(m.element_shape_input_, {2, 2});
m.PopulateTensor(m.index_input_, {0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutput(m.output_);
ASSERT_EQ(output->type, kTfLiteInt32);
ASSERT_THAT(output, DimsAre({2, 2}));
EXPECT_EQ(output->allocation_type, kTfLiteDynamic);
EXPECT_THAT(std::tuple(GetTensorData<int>(output), 4),
ElementsAre(0, 0, 0, 0));
}
TEST(ListGetItemTest, GetUnsetItem_InferShapeFromListShape_Dynamic) {
ListGetItemModel m({TensorType_INT32, {}}, {TensorType_INT32, {}},
{TensorType_INT32, {}});
m.PopulateListTensor(m.list_input_, {2, 2}, 2, kTfLiteInt32);
m.PopulateTensor(m.index_input_, {0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutput(m.output_);
ASSERT_EQ(output->type, kTfLiteInt32);
ASSERT_THAT(output, DimsAre({2, 2}));
EXPECT_EQ(output->allocation_type, kTfLiteDynamic);
EXPECT_THAT(std::tuple(GetTensorData<int>(output), 4),
ElementsAre(0, 0, 0, 0));
}
TEST(ListGetItemTest, GetUnsetItem_InferShapeFromGivenShape_Dynamic) {
ListGetItemModel m({TensorType_INT32, {}}, {TensorType_INT32, {2}},
{TensorType_INT32, {}});
m.PopulateListTensor(m.list_input_, {}, 2, kTfLiteInt32);
m.PopulateTensor(m.index_input_, {0});
m.PopulateTensor(m.element_shape_input_, {2, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutput(m.output_);
ASSERT_EQ(output->type, kTfLiteInt32);
ASSERT_THAT(output, DimsAre({2, 2}));
EXPECT_EQ(output->allocation_type, kTfLiteDynamic);
EXPECT_THAT(std::tuple(GetTensorData<int>(output), 4),
ElementsAre(0, 0, 0, 0));
}
TEST(ListGetItemTest, GetUnsetItem_InferShapeFromOtherElements_Dynamic) {
ListGetItemModel m({TensorType_INT32, {}}, {TensorType_INT32, {}},
{TensorType_INT32, {}});
m.PopulateListTensor(m.list_input_, {}, 3, kTfLiteInt32);
m.ListSetItem(m.list_input_, 1, {2, 2}, kTfLiteInt32,
std::vector<int>{1, 2, 3, 4}.data());
m.ListSetItem(m.list_input_, 2, {2, 2}, kTfLiteInt32,
std::vector<int>{5, 6, 7, 8}.data());
m.PopulateTensor(m.index_input_, {0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutput(m.output_);
ASSERT_EQ(output->type, kTfLiteInt32);
ASSERT_THAT(output, DimsAre({2, 2}));
EXPECT_EQ(output->allocation_type, kTfLiteDynamic);
EXPECT_THAT(std::tuple(GetTensorData<int>(output), 4),
ElementsAre(0, 0, 0, 0));
}
TEST(ListGetItemTest,
GetUnsetItem_InferShapeFromMergedListShapeGivenShape_Dynamic) {
ListGetItemModel m({TensorType_INT32, {}}, {TensorType_INT32, {2}},
{TensorType_INT32, {}});
m.PopulateListTensor(m.list_input_, {2, -1}, 3, kTfLiteInt32);
m.PopulateTensor(m.element_shape_input_, {-1, 2});
m.PopulateTensor(m.index_input_, {0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutput(m.output_);
ASSERT_EQ(output->type, kTfLiteInt32);
ASSERT_THAT(output, DimsAre({2, 2}));
EXPECT_EQ(output->allocation_type, kTfLiteDynamic);
EXPECT_THAT(std::tuple(GetTensorData<int>(output), 4),
ElementsAre(0, 0, 0, 0));
}
TEST(ListGetItemTest, GetPresentItem_ReturnsElement_ScalarFallsBackDynamic) {
ListGetItemModel m({TensorType_INT32, {}}, {TensorType_INT32, {}},
{TensorType_INT32, {}});
m.PopulateListTensor(m.list_input_, {}, 3, kTfLiteInt32);
m.ListSetItem(m.list_input_, 1, {}, kTfLiteInt32, std::vector<int>{1}.data());
m.PopulateTensor(m.index_input_, {1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutput(m.output_);
ASSERT_EQ(output->type, kTfLiteInt32);
ASSERT_THAT(output, DimsAre({}));
EXPECT_EQ(output->allocation_type, kTfLiteDynamic);
EXPECT_THAT(std::tuple(GetTensorData<int>(output), 1), ElementsAre(1));
}
TEST(ListGetItemTest, GetPresentItem_ReturnsElement_Static) {
TensorData output_spec({TensorType_INT32, {2, 2}});
output_spec.shape_signature = {2, 2};
ListGetItemModel m({TensorType_INT32, {}}, {TensorType_INT32, {2}},
output_spec);
m.PopulateListTensor(m.list_input_, {2, 2}, 3, kTfLiteInt32);
m.ListSetItem(m.list_input_, 1, {2, 2}, kTfLiteInt32,
std::vector<int>{1, 2, 3, 4}.data());
m.PopulateTensor(m.element_shape_input_, {2, 2});
m.PopulateTensor(m.index_input_, {1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutput(m.output_);
ASSERT_EQ(output->type, kTfLiteInt32);
ASSERT_THAT(output, DimsAre({2, 2}));
EXPECT_EQ(output->allocation_type, kTfLiteArenaRw);
EXPECT_THAT(std::tuple(GetTensorData<int>(output), 4),
ElementsAre(1, 2, 3, 4));
}
TEST(ListGetItemTest, GetPresentItem_OutputShapeMismatched_Fails_Static) {
TensorData output_spec({TensorType_INT32, {2, 2}});
output_spec.shape_signature = {2, 2};
ListGetItemModel m({TensorType_INT32, {}}, {TensorType_INT32, {}},
output_spec);
m.PopulateListTensor(m.list_input_, {}, 3, kTfLiteInt32);
m.ListSetItem(m.list_input_, 1, {3, 3}, kTfLiteInt32,
std::vector<int>{1, 2, 3, 4, 5, 6, 7, 8, 9}.data());
m.PopulateTensor(m.index_input_, {1});
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
TEST(ListGetItemTest, GetUnsetItem_Static) {
TensorData output_spec({TensorType_INT32, {2, 2}});
output_spec.shape_signature = {2, 2};
ListGetItemModel m({TensorType_INT32, {}}, {TensorType_INT32, {2}},
output_spec);
m.PopulateListTensor(m.list_input_, {2, 2}, 3, kTfLiteInt32);
m.PopulateTensor(m.element_shape_input_, {2, 2});
m.PopulateTensor(m.index_input_, {0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutput(m.output_);
ASSERT_EQ(output->type, kTfLiteInt32);
ASSERT_THAT(output, DimsAre({2, 2}));
EXPECT_EQ(output->allocation_type, kTfLiteArenaRw);
EXPECT_THAT(std::tuple(GetTensorData<int>(output), 4),
ElementsAre(0, 0, 0, 0));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/list_kernels/list_get_item.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/list_kernels/list_get_item_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5cab47d9-a1a6-4e16-bd6b-9e28cc39f160 | cpp | tensorflow/tensorflow | list_element_shape | tensorflow/lite/kernels/variants/list_kernels/list_element_shape.cc | tensorflow/lite/kernels/variants/list_kernels/list_element_shape_test.cc | #include <cstdint>
#include <cstring>
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/variants/list_ops_lib.h"
#include "tensorflow/lite/kernels/variants/tensor_array.h"
namespace tflite {
namespace variants {
namespace ops {
namespace list_element_shape {
namespace {
using ::tflite::variants::TensorArray;
constexpr int kListInput = 0;
constexpr int kShapeOut = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
const TfLiteTensor* list_input;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kListInput, &list_input));
TF_LITE_ENSURE(context, list_input->type == kTfLiteVariant);
TfLiteTensor* shape_out;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kShapeOut, &shape_out));
TF_LITE_ENSURE_TYPES_EQ(context, shape_out->type, kTfLiteInt32);
SetTensorToDynamic(shape_out);
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* list_input;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kListInput, &list_input));
const TensorArray* const list =
reinterpret_cast<const TensorArray*>(list_input->data.data);
const TfLiteIntArray& element_shape = *list->ElementShape();
TfLiteTensor* shape_out;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kShapeOut, &shape_out));
if (element_shape.size == 0) {
context->ResizeTensor(context, shape_out, BuildTfLiteArray(0).release());
GetTensorData<int32_t>(shape_out)[0] = -1;
} else if (element_shape.data[0] == 0) {
context->ResizeTensor(context, shape_out, BuildTfLiteArray({0}).release());
} else {
context->ResizeTensor(context, shape_out,
BuildTfLiteArray({element_shape.size}).release());
memcpy(GetTensorData<int32_t>(shape_out), element_shape.data,
element_shape.size * sizeof(int32_t));
}
return kTfLiteOk;
}
}
}
TfLiteRegistration* Register_LIST_ELEMENT_SHAPE() {
static TfLiteRegistration r = {nullptr, nullptr, list_element_shape::Prepare,
list_element_shape::Eval};
return &r;
}
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/kernels/variants/list_kernels/test_util.h"
#include "tensorflow/lite/kernels/variants/list_ops_lib.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace variants {
namespace ops {
namespace {
using ::testing::ElementsAreArray;
class ListElementShapeModel : public ListOpModel {
public:
ListElementShapeModel() {
list_input_ = AddInput({TensorType_VARIANT, {}});
shape_output_ = AddOutput({TensorType_INT32, {}});
SetCustomOp("ListElementShape", {}, Register_LIST_ELEMENT_SHAPE);
BuildInterpreter({{}});
}
const TfLiteTensor* GetOutputTensor(int index) {
return interpreter_->tensor(index);
}
int list_input_;
int shape_output_;
};
TEST(ListElementShapeTest, MultiDimStaticShape) {
ListElementShapeModel m;
m.PopulateListTensor(0, {2, 2}, 10, kTfLiteInt32);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* const out = m.GetOutputTensor(m.shape_output_);
ASSERT_THAT(out, DimsAre({2}));
ASSERT_THAT(std::vector<int>(out->data.i32, out->data.i32 + 2),
ElementsAreArray({2, 2}));
}
TEST(ListElementShapeTest, MultiDimWithDynamicDims) {
ListElementShapeModel m;
m.PopulateListTensor(0, {2, -1, 3}, 10, kTfLiteInt32);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* const out = m.GetOutputTensor(m.shape_output_);
ASSERT_THAT(out, DimsAre({3}));
ASSERT_THAT(std::vector<int>(out->data.i32, out->data.i32 + 3),
ElementsAreArray({2, -1, 3}));
}
TEST(ListElementShapeTest, ScalarShape) {
ListElementShapeModel m;
m.PopulateListTensor(0, {0}, 10, kTfLiteInt32);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* const out = m.GetOutputTensor(m.shape_output_);
ASSERT_THAT(out, DimsAre({0}));
ASSERT_EQ(out->bytes, 0);
}
TEST(ListElementShapeTest, UnrankedShape) {
ListElementShapeModel m;
m.PopulateListTensor(0, {}, 10, kTfLiteInt32);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* const out = m.GetOutputTensor(m.shape_output_);
ASSERT_THAT(out, DimsAre({}));
ASSERT_EQ(out->bytes, sizeof(int));
ASSERT_EQ(out->data.i32[0], -1);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/list_kernels/list_element_shape.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/list_kernels/list_element_shape_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
97a52a24-2e0a-4ad9-a56f-7f2e75f9a551 | cpp | tensorflow/tensorflow | parse_example | tensorflow/lite/kernels/parse_example/parse_example.cc | tensorflow/lite/kernels/parse_example/parse_example_test.cc | #include "tensorflow/lite/kernels/parse_example/parse_example.h"
#include <algorithm>
#include <cstddef>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/core/example/feature.pb.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/fingerprint.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/util/example_proto_fast_parsing.h"
#include "tensorflow/core/util/presized_cuckoo_map.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/parse_example/example_proto_fast_parsing.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace parse_example {
namespace {
namespace tf = ::tensorflow;
using tf::Status;
using tf::StringPiece;
using tf::tstring;
using tf::example::CopyOrMoveBlock;
using tf::example::FastParseExampleConfig;
using tf::example::GetListFromBuffer;
using tf::example::LimitedArraySlice;
using tf::example::ParseExample;
using tf::example::SeededHasher;
using tf::example::SmallVector;
using tf::example::SparseBuffer;
using tf::example::Type;
using tf::example::parsed::Example;
using ConfigIndex = tf::PresizedCuckooMap<std::pair<int32_t, Type>>;
struct TfLiteResult {
std::vector<TfLiteTensor*> dense_values;
std::vector<TfLiteTensor*> sparse_values;
std::vector<TfLiteTensor*> sparse_indices;
std::vector<TfLiteTensor*> sparse_shapes;
std::map<int, tf::Tensor> dense_tensors;
};
template <typename T>
void FillAndCopyVarLen(const int d, const size_t num_elements,
const size_t num_elements_per_minibatch,
const FastParseExampleConfig& config,
std::vector<SparseBuffer>& varlen_dense_buffers,
TfLiteTensor* values) {
const tf::Tensor& default_value = config.dense[d].default_value;
std::fill(reinterpret_cast<T*>(values->data.raw),
reinterpret_cast<T*>(values->data.raw) + num_elements,
default_value.flat<T>()(0));
auto data = reinterpret_cast<T*>(values->data.raw);
const SparseBuffer& buffer = varlen_dense_buffers[d];
const auto& end_indices = buffer.example_end_indices;
const size_t examples_in_buffer = end_indices.size();
const auto& list = GetListFromBuffer<T>(buffer);
auto list_ptr = list.begin();
size_t elements_tally = 0;
for (size_t j = 0; j < examples_in_buffer; ++j) {
const size_t num_elems = end_indices[j] - elements_tally;
CopyOrMoveBlock(list_ptr, list_ptr + num_elems, data);
list_ptr += num_elems;
data += num_elements_per_minibatch;
elements_tally = end_indices[j];
}
DCHECK(elements_tally == list.size());
}
bool ParseExample(StringRef serialized, Example* example) {
DCHECK(example != nullptr);
tf::protobuf::io::CodedInputStream stream(
reinterpret_cast<const uint8_t*>(serialized.str), serialized.len);
tensorflow::example::EnableAliasing(&stream);
return ParseExample(&stream, example);
}
Status FastParseSerializedExample(
StringRef serialized_example, const tstring& example_name,
const size_t example_index, const FastParseExampleConfig& config,
bool* quick_filter, int quick_filter_size,
const std::unique_ptr<ConfigIndex>& config_index, int config_index_size,
SeededHasher* hasher, std::vector<TfLiteTensor*>* output_dense,
std::vector<SparseBuffer>* output_varlen_dense,
std::vector<SparseBuffer>* output_sparse,
std::map<absl::string_view, int>& stats, TfLiteResult* result) {
DCHECK(output_dense != nullptr);
tensorflow::example::parsed::Example parsed_example;
if (!ParseExample(serialized_example, &parsed_example)) {
return tf::errors::Internal("Failed to parse example");
}
std::vector<int64_t> dense_feature_last_example(config.dense.size(), -1);
std::vector<int64_t> sparse_feature_last_example(config.sparse.size(), -1);
const size_t parsed_example_size = parsed_example.size();
for (size_t i = 0; i < parsed_example_size; ++i) {
tensorflow::example::parsed::FeatureMapEntry& name_and_feature =
parsed_example[parsed_example_size - i - 1];
const StringPiece feature_name = name_and_feature.first;
tensorflow::example::parsed::Feature& feature = name_and_feature.second;
if (feature_name.length() >= quick_filter_size ||
!quick_filter[feature_name.length()]) {
continue;
}
const uint64_t h = (*hasher)(feature_name);
std::pair<int32_t, Type> d_and_type;
if (!config_index->Find(h, &d_and_type)) {
continue;
}
size_t d = d_and_type.first;
bool is_dense = d_and_type.second == Type::Dense;
auto example_error = [&](StringPiece suffix) {
return tf::errors::Internal("Name: ", example_name,
", Key: ", feature_name,
", Index: ", example_index, ". ", suffix);
};
auto parse_error = [&] {
return example_error("Can't parse serialized Example.");
};
tf::DataType example_dtype;
if (feature.ParseDataType(&example_dtype) != absl::OkStatus()) {
return parse_error();
}
if (is_dense) {
if (example_dtype == tf::DT_INVALID) continue;
dense_feature_last_example[d] = example_index;
if (example_dtype != config.dense[d].dtype) {
return example_error(absl::StrCat(
"Data types don't match. Data type: ",
DataTypeString(example_dtype),
" but expected type: ", DataTypeString(config.dense[d].dtype)));
}
if (!config.dense[d].variable_length) {
TfLiteTensor* out = (*output_dense)[d];
const std::size_t num_elements = config.dense[d].elements_per_stride;
const std::size_t offset = example_index * num_elements;
auto shape_error = [&](size_t size, StringPiece type_str) {
return example_error(absl::StrCat(
"Number of ", type_str,
" values != expected. "
"Values size:",
size,
" but output shape: ", config.dense[d].shape.DebugString()));
};
switch (config.dense[d].dtype) {
case tf::DT_INT64: {
auto out_p = reinterpret_cast<int64_t*>(out->data.raw) + offset;
LimitedArraySlice<int64_t> slice(out_p, num_elements);
if (!feature.ParseInt64List(&slice)) return parse_error();
if (slice.EndDistance() != 0) {
return shape_error(num_elements - slice.EndDistance(), "int64");
}
break;
}
case tf::DT_FLOAT: {
auto out_p = reinterpret_cast<float*>(out->data.raw) + offset;
LimitedArraySlice<float> slice(out_p, num_elements);
if (!feature.ParseFloatList(&slice)) return parse_error();
if (slice.EndDistance() != 0) {
return shape_error(num_elements - slice.EndDistance(), "float");
}
break;
}
case tf::DT_STRING: {
auto& out_tensor = result->dense_tensors[d];
auto out_p = out_tensor.flat<tstring>().data() + offset;
LimitedArraySlice<tstring> slice(out_p, num_elements);
if (!feature.ParseBytesList(&slice)) return parse_error();
if (slice.EndDistance() != 0) {
return shape_error(num_elements - slice.EndDistance(), "bytes");
}
break;
}
default:
return tf::errors::Internal("Unrecognized dense type: ",
config.dense[d].dtype);
}
} else {
SparseBuffer& out = (*output_varlen_dense)[d];
const std::size_t num_elements = config.dense[d].elements_per_stride;
if (example_dtype != tf::DT_INVALID &&
example_dtype != config.dense[d].dtype) {
return example_error(absl::StrCat(
"Data types don't match. ",
"Expected type: ", DataTypeString(config.dense[d].dtype)));
}
auto shape_error = [&](size_t size, StringPiece type_str) {
return example_error(
absl::StrCat("Number of ", type_str,
" values is not a multiple of stride length. Saw ",
size, " values but output shape is: ",
config.dense[d].shape.DebugString()));
};
switch (config.dense[d].dtype) {
case tf::DT_INT64: {
if (example_dtype != tf::DT_INVALID) {
if (!feature.ParseInt64List(&out.int64_list)) {
return parse_error();
}
if (out.int64_list.size() % num_elements != 0) {
return shape_error(out.int64_list.size(), "int64");
}
}
out.example_end_indices.push_back(out.int64_list.size());
break;
}
case tf::DT_FLOAT: {
if (example_dtype != tf::DT_INVALID) {
if (!feature.ParseFloatList(&out.float_list)) {
return parse_error();
}
if (out.float_list.size() % num_elements != 0) {
return shape_error(out.float_list.size(), "float");
}
}
out.example_end_indices.push_back(out.float_list.size());
break;
}
case tf::DT_STRING: {
if (example_dtype != tf::DT_INVALID) {
if (!feature.ParseBytesList(&out.bytes_list)) {
return parse_error();
}
if (out.bytes_list.size() % num_elements != 0) {
return shape_error(out.bytes_list.size(), "byte");
}
}
out.example_end_indices.push_back(out.bytes_list.size());
break;
}
default:
return tf::errors::Internal("Should not happen: ",
config.dense[d].dtype);
}
}
} else {
auto& last_example = sparse_feature_last_example;
if (last_example[d] == example_index) {
continue;
}
last_example[d] = example_index;
SparseBuffer& out = (*output_sparse)[d];
tf::DataType feature_dtype = config.sparse[d].dtype;
if (example_dtype != tf::DT_INVALID && example_dtype != feature_dtype) {
return tf::errors::Internal("Data types don't match:", example_dtype,
" != ", feature_dtype);
}
switch (feature_dtype) {
case tf::DT_INT64: {
if (example_dtype != tf::DT_INVALID) {
if (!feature.ParseInt64List(&out.int64_list)) {
return parse_error();
}
}
out.example_end_indices.push_back(out.int64_list.size());
break;
}
case tf::DT_FLOAT: {
if (example_dtype != tf::DT_INVALID) {
if (!feature.ParseFloatList(&out.float_list)) {
return parse_error();
}
}
out.example_end_indices.push_back(out.float_list.size());
break;
}
case tf::DT_STRING: {
if (example_dtype != tf::DT_INVALID) {
if (!feature.ParseBytesList(&out.bytes_list)) {
return parse_error();
}
}
out.example_end_indices.push_back(out.bytes_list.size());
break;
}
default:
return tf::errors::Internal("Should not happen: ", feature_dtype);
}
}
}
for (size_t d = 0; d < config.dense.size(); ++d) {
if (config.dense[d].variable_length) continue;
if (dense_feature_last_example[d] == example_index) continue;
if (config.dense[d].default_value.NumElements() == 0) {
return tf::errors::Internal(
"Name: ", example_name, ", Feature: ", config.dense[d].feature_name,
" (data type: ", DataTypeString(config.dense[d].dtype), ")",
" is required but could not be found.");
}
const tf::Tensor& in = config.dense[d].default_value;
TfLiteTensor* out = result->dense_values[d];
const std::size_t num_elements = in.shape().num_elements();
const std::size_t offset = example_index * num_elements;
switch (config.dense[d].dtype) {
case tf::DT_INT64: {
std::copy_n(in.flat<int64_t>().data(), num_elements,
out->data.i64 + offset);
break;
}
case tf::DT_FLOAT: {
std::copy_n(in.flat<float>().data(), num_elements,
out->data.f + offset);
break;
}
case tf::DT_STRING: {
auto& out_tensor = result->dense_tensors[d];
std::copy_n(in.flat<tstring>().data(), num_elements,
out_tensor.flat<tstring>().data() + offset);
break;
}
default:
return tf::errors::Internal("Should not happen: ",
config.dense[d].dtype);
}
}
for (size_t d = 0; d < config.dense.size(); ++d) {
if (!config.dense[d].variable_length) continue;
if (dense_feature_last_example[d] == example_index) continue;
SparseBuffer& out = (*output_varlen_dense)[d];
size_t prev_example_end_index =
out.example_end_indices.empty() ? 0 : out.example_end_indices.back();
out.example_end_indices.push_back(prev_example_end_index);
}
for (size_t d = 0; d < config.sparse.size(); ++d) {
if (sparse_feature_last_example[d] == example_index) continue;
SparseBuffer& out = (*output_sparse)[d];
size_t prev_example_end_index =
out.example_end_indices.empty() ? 0 : out.example_end_indices.back();
out.example_end_indices.push_back(prev_example_end_index);
}
return absl::OkStatus();
}
void CountSparseFeatures(const SparseBuffer& sparse_buffer,
size_t* total_num_features, size_t* max_num_features) {
const std::vector<size_t>& end_indices = sparse_buffer.example_end_indices;
*total_num_features += end_indices.back();
*max_num_features = std::max(*max_num_features, end_indices[0]);
for (size_t i = 1; i < end_indices.size(); ++i) {
size_t example_size = end_indices[i] - end_indices[i - 1];
*max_num_features = std::max(*max_num_features, example_size);
}
}
void CopySparseBufferToTensor(tf::DataType dtype, size_t offset,
SparseBuffer* src, TfLiteTensor* dst) {
switch (dtype) {
case tf::DT_INT64: {
std::copy(src->int64_list.begin(), src->int64_list.end(),
reinterpret_cast<int64_t*>(dst->data.raw) + offset);
break;
}
case tf::DT_FLOAT: {
std::copy(src->float_list.begin(), src->float_list.end(),
reinterpret_cast<float*>(dst->data.raw) + offset);
break;
}
case tf::DT_STRING: {
DynamicBuffer buffer;
for (auto* begin = src->bytes_list.begin();
begin != src->bytes_list.end(); begin++) {
buffer.AddString(begin->c_str(), begin->size());
}
buffer.WriteToTensor(dst, nullptr);
break;
}
default:
DCHECK(false) << "Encountered unexpected DataType "
<< DataTypeString(dtype)
<< "in variable that should have been checked.";
}
}
inline void CopyToBuffer(absl::Span<const tstring> vec, char* tensor_buffer,
int num_examples, int batch_size,
int elements_per_stride) {
int i = 0, k = 0;
int start = 0;
for (; i < num_examples; ++i) {
for (int j = 0; j < elements_per_stride; ++j) {
memcpy(tensor_buffer + start, vec[k].c_str(), vec[k].size());
start += vec[k].size();
k++;
}
}
for (; i < batch_size; ++i) {
for (int j = 0; j < elements_per_stride; ++j) {
memcpy(tensor_buffer + start, vec[k].c_str(), vec[k].size());
start += vec[k].size();
k++;
}
}
}
Status FastParseExampleLite(
const FastParseExampleConfig& config, const TfLiteTensor* serialized,
absl::Span<const tstring> example_names, bool* quick_filter,
int quick_filter_size, const std::unique_ptr<ConfigIndex>& config_index,
int config_index_size, SeededHasher* hasher, TfLiteResult* result,
std::map<absl::string_view, int>& stats, TfLiteContext* context) {
if (result == nullptr) {
return tf::errors::Internal("Result is null");
}
const int count = GetStringCount(serialized);
std::vector<tf::Tensor> fixed_dense_values(config.dense.size());
std::vector<SparseBuffer> sparse_buffers(config.sparse.size());
std::vector<SparseBuffer> varlen_dense_buffers(config.dense.size());
Status status_of_minibatch;
for (size_t e = 0; e < count; ++e) {
status_of_minibatch = FastParseSerializedExample(
GetString(serialized, e),
(!example_names.empty() ? example_names[e] : "<unknown>"), e, config,
quick_filter, quick_filter_size, config_index, config_index_size,
hasher, &result->dense_values, &varlen_dense_buffers, &sparse_buffers,
stats, result);
if (!status_of_minibatch.ok()) break;
}
if (!status_of_minibatch.ok()) {
return status_of_minibatch;
}
for (size_t d = 0; d < config.sparse.size(); ++d) {
size_t total_num_features = 0;
size_t max_num_features = 0;
CountSparseFeatures(sparse_buffers[d], &total_num_features,
&max_num_features);
tf::TensorShape indices_shape;
TfLiteTensor* indices = result->sparse_indices[d];
TfLiteTensor* values = result->sparse_values[d];
TfLiteTensor* sparse_shape = result->sparse_shapes[d];
auto* sparse_shape_ptr = reinterpret_cast<int64_t*>(sparse_shape->data.raw);
sparse_shape_ptr[1] = max_num_features;
TfLiteIntArray* index_shape = TfLiteIntArrayCreate(2);
index_shape->data[0] = total_num_features;
index_shape->data[1] = 2;
context->ResizeTensor(context, indices, index_shape);
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(1);
output_shape->data[0] = total_num_features;
context->ResizeTensor(context, values, output_shape);
SparseBuffer& buffer = sparse_buffers[d];
auto* indices_p = reinterpret_cast<int64_t*>(indices->data.raw);
if (!indices_p) {
return tf::errors::Internal("Indices tensor not allocated!");
}
if (total_num_features > 0) {
int64_t* ix_p = indices_p;
size_t example_index = 0;
int idx0 = 0;
size_t delta = 0;
for (size_t example_end_index : buffer.example_end_indices) {
size_t feature_index = 0;
for (; delta < example_end_index; ++delta) {
if (idx0 < total_num_features) {
*ix_p = example_index;
*(ix_p + 1) = feature_index;
ix_p += 2;
}
++feature_index;
++idx0;
}
++example_index;
}
CopySparseBufferToTensor(config.sparse[d].dtype, 0, &buffer, values);
}
}
for (size_t d = 0; d < config.dense.size(); ++d) {
if (!config.dense[d].variable_length) {
continue;
}
size_t max_num_features = 0;
std::vector<size_t>& end_indices =
varlen_dense_buffers[d].example_end_indices;
max_num_features = std::max(max_num_features, end_indices[0]);
for (size_t i = 1; i < end_indices.size(); ++i) {
size_t example_size = end_indices[i] - end_indices[i - 1];
max_num_features = std::max(max_num_features, example_size);
}
const size_t stride_size = config.dense[d].elements_per_stride;
const size_t max_num_elements = max_num_features / stride_size;
tf::TensorShape values_shape;
DCHECK_EQ(max_num_features % config.dense[d].elements_per_stride, 0);
const size_t batch_size = GetStringCount(serialized);
TF_RETURN_IF_ERROR(values_shape.AddDimWithStatus(batch_size));
TF_RETURN_IF_ERROR(values_shape.AddDimWithStatus(max_num_elements));
for (int i = 1; i < config.dense[d].shape.dims(); ++i) {
TF_RETURN_IF_ERROR(
values_shape.AddDimWithStatus(config.dense[d].shape.dim_size(i)));
}
TfLiteTensor* values = result->dense_values[d];
const size_t num_elements = GetTensorShape(values).FlatSize();
if (num_elements == 0) {
continue;
}
const size_t num_elements_per_minibatch = num_elements / batch_size;
switch (config.dense[d].dtype) {
case tf::DT_INT64: {
FillAndCopyVarLen<int64_t>(d, num_elements, num_elements_per_minibatch,
config, varlen_dense_buffers, values);
break;
}
case tf::DT_FLOAT: {
FillAndCopyVarLen<float>(d, num_elements, num_elements_per_minibatch,
config, varlen_dense_buffers, values);
break;
}
default:
DCHECK(false) << "Encountered unexpected DataType "
<< config.dense[d].dtype
<< "in variable that should have been checked";
}
}
for (size_t d = 0; d < config.dense.size(); ++d) {
if (config.dense[d].variable_length) {
continue;
}
if (result->dense_values[d]->type == kTfLiteString) {
auto& in = result->dense_tensors[d];
auto vec = in.vec<tstring>();
const int batch_size = result->dense_values[d]->dims->data[0];
const int elements_per_stride = config.dense[d].elements_per_stride;
int total_size = 0;
std::vector<int32_t> offsets;
offsets.reserve(vec.size() + 1);
offsets.push_back(0);
int k = 0;
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < elements_per_stride; ++j) {
if (i < count) {
total_size += vec(k++).size();
offsets.push_back(total_size);
} else {
offsets.push_back(total_size);
}
}
}
const int32_t num_strings = offsets.size() - 1;
const size_t required_bytes = sizeof(int32_t) * (num_strings + 2) +
total_size;
char* tensor_buffer =
reinterpret_cast<char*>(result->dense_values[d]->data.raw);
if (result->dense_values[d]->bytes < required_bytes) {
if (result->dense_values[d]->data.raw) {
free(result->dense_values[d]->data.raw);
}
tensor_buffer = reinterpret_cast<char*>(malloc(required_bytes));
result->dense_values[d]->data.raw = tensor_buffer;
result->dense_values[d]->bytes = required_bytes;
}
const int32_t start = sizeof(int32_t) * (num_strings + 2);
memcpy(tensor_buffer, &num_strings, sizeof(int32_t));
for (size_t i = 0; i < offsets.size(); i++) {
int32_t offset_i = start + offsets[i];
memcpy(tensor_buffer + sizeof(int32_t) * (i + 1), &offset_i,
sizeof(int32_t));
}
absl::Span<const tstring> slice(vec.data(), vec.size());
CopyToBuffer(slice, tensor_buffer + start, count, batch_size,
elements_per_stride);
}
}
return absl::OkStatus();
}
}
enum InputTensor {
kExampleTensor = 0,
kNamesTensor = 1,
kSparseKeysTensor = 2,
kDenseKeysTensor = 3,
kRaggedKeysTensor = 4,
};
struct OpData {
FastParseExampleConfig config;
std::vector<tf::TensorShape> dense_shapes;
int dense_size = 0;
int sparse_size = 0;
std::unique_ptr<ConfigIndex> config_index;
int config_index_size;
SeededHasher hasher;
TfLiteResult got;
bool* quick_filter = nullptr;
int quick_filter_size;
bool created = false;
~OpData() {
if (quick_filter) {
free(quick_filter);
}
}
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
return new OpData;
}
template <typename T>
tf::Tensor AsTensor(const std::vector<T>& val) {
tf::Tensor ret(tf::DataTypeToEnum<T>::value,
{static_cast<int64_t>(val.size())});
std::copy_n(val.begin(), val.size(), ret.flat<T>().data());
return ret;
}
enum Version {
V1,
V2,
};
tf::TensorShape TfLiteToTfShape(TfLiteIntArray* array) {
tf::TensorShape shape;
for (int i = 0; i < array->size; i++) {
shape.AddDim(array->data[i]);
}
return shape;
}
template <Version version>
TfLiteStatus PrepareParseExample(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE(context, node->custom_initial_data);
data->config.dense.clear();
data->config.sparse.clear();
data->got.dense_values.clear();
const flexbuffers::Vector& v =
flexbuffers::GetRoot(
reinterpret_cast<const uint8_t*>(node->custom_initial_data),
node->custom_initial_data_size)
.AsVector();
if (v.size() == 2) {
tf::NodeDef nodedef;
TF_LITE_ENSURE_EQ(context, nodedef.ParseFromString(v[1].AsString().str()),
true);
if (version == V1) {
data->dense_size = nodedef.attr().at("Ndense").i();
data->sparse_size = nodedef.attr().at("Nsparse").i();
} else if (version == V2) {
data->dense_size = nodedef.attr().at("Tdense").list().type_size();
data->sparse_size = nodedef.attr().at("num_sparse").i();
}
auto dense_shapes = nodedef.attr().at("dense_shapes").list();
if (data->dense_shapes.empty()) {
for (int i = 0; i < dense_shapes.shape_size(); ++i) {
data->dense_shapes.push_back(dense_shapes.shape(i));
}
}
} else {
const flexbuffers::Map& m =
flexbuffers::GetRoot(
reinterpret_cast<const uint8_t*>(node->custom_initial_data),
node->custom_initial_data_size)
.AsMap();
const flexbuffers::TypedVector keys = m.Keys();
int num_sparse = 0;
int num_dense = 0;
for (int k = 0; k < keys.size(); ++k) {
const std::string key = keys[k].ToString();
const auto value = m[key];
if (key == "Nsparse" || key == "num_sparse") {
num_sparse = value.AsInt32();
}
if (key == "Ndense") {
num_dense = value.AsInt32();
}
}
data->sparse_size = num_sparse;
data->dense_size = num_dense;
if (version == V2) {
const TfLiteTensor* dense_key_tensor =
GetInput(context, node, kDenseKeysTensor);
data->dense_size = GetTensorShape(dense_key_tensor).FlatSize();
}
}
data->config.dense.reserve(data->dense_size);
data->config.sparse.reserve(data->sparse_size);
data->dense_shapes.reserve(data->dense_size);
const auto* serialized = GetInput(context, node, 0);
const int batch_size =
serialized->dims->size > 0 ? serialized->dims->data[0] : 1;
const bool missing_shape_info = data->dense_shapes.empty();
for (int i = 0; i < data->dense_size; i++) {
TfLiteTensor* dense_key_tensor =
GetOutput(context, node, data->sparse_size * 3 + i);
TfLiteIntArray* output_size = TfLiteIntArrayCopy(dense_key_tensor->dims);
if (missing_shape_info) {
data->dense_shapes.push_back(TfLiteToTfShape(output_size));
}
const int original_size = data->dense_shapes[i].dims() > 0
? data->dense_shapes[i].dim_size(0)
: 1;
output_size->data[0] = batch_size * original_size;
context->ResizeTensor(context, dense_key_tensor, output_size);
}
size_t offset = 0;
for (int i = 0; i < data->sparse_size; i++) {
auto* parse_output = GetOutput(context, node, i + offset);
SetTensorToDynamic(parse_output);
TfLiteIntArray* sparse_size = TfLiteIntArrayCreate(2);
sparse_size->data[0] = batch_size;
sparse_size->data[1] = 2;
context->ResizeTensor(context, parse_output, sparse_size);
data->got.sparse_indices.push_back(parse_output);
}
offset += data->sparse_size;
for (int i = 0; i < data->sparse_size; i++) {
auto* parse_output = GetOutput(context, node, i + offset);
SetTensorToDynamic(parse_output);
TfLiteIntArray* sparse_size = TfLiteIntArrayCreate(1);
sparse_size->data[0] = 0;
context->ResizeTensor(context, parse_output, sparse_size);
data->got.sparse_values.push_back(parse_output);
}
offset += data->sparse_size;
for (int i = 0; i < data->sparse_size; i++) {
TfLiteTensor* parse_output = GetOutput(context, node, i + offset);
SetTensorToDynamic(parse_output);
TfLiteIntArray* sparse_size = TfLiteIntArrayCreate(1);
sparse_size->data[0] = 2;
context->ResizeTensor(context, parse_output, sparse_size);
auto* shapes_shape_t = reinterpret_cast<int64_t*>(parse_output->data.i64);
shapes_shape_t[0] = batch_size;
shapes_shape_t[1] = 1;
data->got.sparse_shapes.push_back(parse_output);
}
data->created = false;
return kTfLiteOk;
}
template <Version version>
TfLiteStatus EvalParseExample(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
if (!data->created) {
for (int i = 0; i < data->sparse_size; i++) {
int input_index =
version == V1 ? kSparseKeysTensor + i : kSparseKeysTensor;
int string_index = version == V1 ? 0 : i;
const TfLiteTensor* sparse_key_tensor =
GetInput(context, node, input_index);
const auto key = GetString(sparse_key_tensor, string_index);
const auto* sparse_output =
GetOutput(context, node, i + data->sparse_size);
std::string k(key.str, key.len);
switch (sparse_output->type) {
case kTfLiteInt64:
data->config.sparse.emplace_back(k,
tf::DataTypeToEnum<int64_t>::value);
break;
case kTfLiteFloat32:
data->config.sparse.emplace_back(k, tf::DataTypeToEnum<float>::value);
break;
case kTfLiteString:
data->config.sparse.emplace_back(k,
tf::DataTypeToEnum<tstring>::value);
break;
default:
return kTfLiteError;
}
}
const auto& dense_shapes = data->dense_shapes;
for (int i = 0; i < data->dense_size; i++) {
const int input_index = version == V1
? kSparseKeysTensor + data->sparse_size + i
: kDenseKeysTensor;
const int dense_defaults_index =
version == V1
? kSparseKeysTensor + data->sparse_size + data->dense_size + i
: kRaggedKeysTensor + i + 1;
int string_index = version == V1 ? 0 : i;
const TfLiteTensor* dense_key_tensor =
GetInput(context, node, input_index);
const auto* dense_output =
GetOutput(context, node, i + data->sparse_size * 3);
const auto* dense_defaults =
GetInput(context, node, dense_defaults_index);
const auto key = GetString(dense_key_tensor, string_index);
std::string k(key.str, key.len);
const int elements_per_stride =
dense_shapes[i].dims() ? dense_shapes[i].num_elements() : 1;
switch (dense_output->type) {
case kTfLiteInt64:
data->config.dense.emplace_back(
k, tf::DataTypeToEnum<int64_t>::value, dense_shapes[i],
AsTensor<int64_t>(std::vector<int64_t>(
dense_defaults->data.i64,
dense_defaults->data.i64 + elements_per_stride)),
false, elements_per_stride);
break;
case kTfLiteFloat32:
data->config.dense.emplace_back(
k, tf::DataTypeToEnum<float>::value, dense_shapes[i],
AsTensor<float>(std::vector<float>(
dense_defaults->data.f,
dense_defaults->data.f + elements_per_stride)),
false, elements_per_stride);
break;
case kTfLiteString: {
const int num_strings = GetStringCount(dense_defaults);
std::vector<tstring> values;
for (int i = 0; i < num_strings; ++i) {
auto ref = GetString(dense_defaults, i);
values.emplace_back(ref.str, ref.len);
}
data->config.dense.emplace_back(
k, tf::DataTypeToEnum<tstring>::value, dense_shapes[i],
AsTensor<tstring>(values), false, elements_per_stride);
break;
}
default:
return kTfLiteError;
}
}
int offset = 3 * data->sparse_size;
for (int i = 0; i < data->dense_size; i++) {
auto* parse_output = GetOutput(context, node, i + offset);
data->got.dense_values.push_back(parse_output);
if (parse_output->type == kTfLiteString) {
tf::TensorShape shape;
if (parse_output->dims->size == 1) {
shape.AddDim(parse_output->dims->data[0]);
} else {
shape.AddDim(GetTensorShape(parse_output).FlatSize());
}
data->got.dense_tensors[i] =
tf::Tensor(tf::DataTypeToEnum<tstring>::value, shape);
}
}
size_t config_size = data->config.dense.size();
config_size += data->config.sparse.size();
data->config_index_size = config_size;
auto config_index = std::make_unique<ConfigIndex>(config_size);
bool ok = true;
int max_length = 0;
for (size_t d = 0; d < data->config.dense.size(); ++d) {
auto s = data->config.dense[d].feature_name;
max_length = s.length() > max_length ? s.length() : max_length;
}
for (size_t d = 0; d < data->config.sparse.size(); ++d) {
auto s = data->config.sparse[d].feature_name;
max_length = s.length() > max_length ? s.length() : max_length;
}
if (data->quick_filter) {
free(data->quick_filter);
}
data->quick_filter =
static_cast<bool*>(malloc(++max_length * sizeof(bool)));
memset(data->quick_filter, 0, max_length * sizeof(bool));
data->quick_filter_size = max_length;
for (size_t d = 0; d < data->config.dense.size(); ++d) {
const auto& s = data->config.dense[d].feature_name;
data->quick_filter[s.length()] = true;
}
for (size_t d = 0; d < data->config.sparse.size(); ++d) {
const auto& s = data->config.sparse[d].feature_name;
data->quick_filter[s.length()] = true;
}
for (int i = 0; i < 1000; ++i) {
for (size_t d = 0; d < data->config.dense.size(); ++d) {
ok &= config_index->InsertUnique(
data->hasher(data->config.dense[d].feature_name), {d, Type::Dense});
}
for (size_t d = 0; d < data->config.sparse.size(); ++d) {
ok &= config_index->InsertUnique(
data->hasher(data->config.sparse[d].feature_name),
{d, Type::Sparse});
}
if (ok) {
break;
}
data->hasher.seed++;
config_index->Clear(config_size);
ok = true;
}
if (!ok) {
return kTfLiteError;
}
data->config_index = std::move(config_index);
data->created = true;
}
const TfLiteTensor* serialized = GetInput(context, node, kExampleTensor);
std::map<absl::string_view, int> stats;
const auto status = FastParseExampleLite(
data->config, serialized, {}, data->quick_filter, data->quick_filter_size,
data->config_index, data->config_index_size, &data->hasher, &data->got,
stats, context);
if (status != absl::OkStatus()) {
TF_LITE_KERNEL_LOG(context, status.ToString().c_str());
return kTfLiteError;
}
return kTfLiteOk;
}
void Free(TfLiteContext* context, void* buffer) {
auto* obj = reinterpret_cast<OpData*>(buffer);
delete obj;
}
}
TfLiteRegistration* Register_PARSE_EXAMPLE() {
static TfLiteRegistration r = {
parse_example::Init, parse_example::Free,
parse_example::PrepareParseExample<parse_example::V1>,
parse_example::EvalParseExample<parse_example::V1>};
return &r;
}
TfLiteRegistration* Register_PARSE_EXAMPLE_V2() {
static TfLiteRegistration r = {
parse_example::Init, parse_example::Free,
parse_example::PrepareParseExample<parse_example::V2>,
parse_example::EvalParseExample<parse_example::V2>};
return &r;
}
extern "C" void AddParseExampleOp(::tflite::MutableOpResolver* resolver) {
resolver->AddCustom("ParseExample", Register_PARSE_EXAMPLE());
resolver->AddCustom("ParseExampleV2", Register_PARSE_EXAMPLE_V2());
}
}
}
} | #include "tensorflow/lite/kernels/parse_example/parse_example.h"
#include <cstdint>
#include <initializer_list>
#include <string>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/core/example/feature_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/model_builder.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace tf = ::tensorflow;
const char* kNodeDefTxt = R"pb(
name: "ParseExample/ParseExample"
op: "ParseExample"
input: "serialized"
input: "ParseExample/ParseExample/names"
input: "ParseExample/ParseExample/dense_keys_0"
input: "ParseExample/Const"
attr {
key: "Ndense"
value { i: 1 }
}
attr {
key: "Nsparse"
value { i: 0 }
}
attr {
key: "Tdense"
value { list { type: DT_FLOAT } }
}
attr {
key: "dense_shapes"
value { list { shape { dim { size: 2 } } } }
}
attr {
key: "sparse_types"
value { list { type: DT_FLOAT } }
}
)pb";
const char* kNodeDefTxt2 = R"pb(
name: "ParseExample/ParseExample"
op: "ParseExample"
input: "serialized"
input: "ParseExample/ParseExample/names"
input: "ParseExample/ParseExample/sparse_keys_0"
attr {
key: "Ndense"
value { i: 0 }
}
attr {
key: "Nsparse"
value { i: 1 }
}
attr {
key: "Tdense"
value {}
}
attr {
key: "dense_shapes"
value {}
}
attr {
key: "sparse_types"
value { list { type: DT_FLOAT } }
}
)pb";
const char* kNodeDefTxt3 = R"pb(
name: "ParseExample/ParseExample"
op: "ParseExample"
input: "serialized"
input: "ParseExample/ParseExample/names"
input: "ParseExample/ParseExample/sparse_keys_0"
attr {
key: "Ndense"
value { i: 1 }
}
attr {
key: "Nsparse"
value { i: 0 }
}
attr {
key: "Tdense"
value { list { type: DT_STRING } }
}
attr {
key: "dense_shapes"
value { list { shape { dim { size: 1 } } } }
}
attr {
key: "sparse_types"
value { list { type: DT_FLOAT } }
}
)pb";
const char* kNodeDefTxt4 = R"pb(
name: "ParseExample/ParseExample"
op: "ParseExample"
input: "serialized"
input: "ParseExample/ParseExample/names"
input: "ParseExample/ParseExample/sparse_keys_0"
attr {
key: "Ndense"
value { i: 0 }
}
attr {
key: "Nsparse"
value { i: 1 }
}
attr {
key: "Tdense"
value {}
}
attr {
key: "dense_shapes"
value {}
}
attr {
key: "sparse_types"
value { list { type: DT_STRING } }
}
)pb";
const char* kNodeDefTxt5 = R"pb(
name: "ParseExample/ParseExample"
op: "ParseExample"
input: "serialized"
input: "ParseExample/ParseExample/names"
input: "ParseExample/ParseExample/dense_keys_0"
input: "ParseExample/Const"
attr {
key: "Ndense"
value { i: 1 }
}
attr {
key: "Nsparse"
value { i: 0 }
}
attr {
key: "Tdense"
value { list { type: DT_FLOAT } }
}
attr {
key: "dense_shapes"
value {}
}
attr {
key: "sparse_types"
value { list { type: DT_FLOAT } }
}
)pb";
template <typename DefaultType>
class ParseExampleOpModel : public SingleOpModel {
public:
ParseExampleOpModel(std::vector<std::string> serialized_examples,
std::vector<std::string> sparse_keys,
std::vector<std::string> dense_keys,
std::initializer_list<DefaultType> dense_defaults,
std::vector<TensorType> dense_types,
std::vector<TensorType> sparse_types,
const char* text_def, int dense_size = 2) {
const int input_size = serialized_examples.size();
auto input_tensor_data = TensorData(TensorType_STRING, {input_size});
string_indices_.push_back(AddInput(input_tensor_data));
string_indices_.push_back(
AddConstInput<std::string>(TensorData(TensorType_STRING, {0}), {""}));
std::for_each(sparse_keys.begin(), sparse_keys.end(), [&](auto&&) {
string_indices_.push_back(AddInput(TensorData(TensorType_STRING, {1})));
});
std::for_each(dense_keys.begin(), dense_keys.end(), [&](auto&&) {
string_indices_.push_back(AddInput(TensorData(TensorType_STRING, {1})));
});
if (dense_size > 0) {
dense_defaults_ = AddConstInput<DefaultType>(
TensorData(dense_types[0], {dense_size}), dense_defaults);
}
if (!sparse_keys.empty()) {
for (int i = 0; i < sparse_keys.size(); i++) {
sparse_indices_outputs_.push_back(AddOutput(TensorType_INT64));
}
for (int i = 0; i < sparse_keys.size(); i++) {
sparse_values_outputs_.push_back(AddOutput(sparse_types[i]));
}
for (int i = 0; i < sparse_keys.size(); i++) {
sparse_shapes_outputs_.push_back(AddOutput({TensorType_INT64, {2}}));
}
}
for (int i = 0; i < dense_keys.size(); i++) {
dense_outputs_.push_back(AddOutput({dense_types[i], {dense_size}}));
}
tf::NodeDef nodedef;
tf::protobuf::TextFormat::Parser parser;
tf::protobuf::io::ArrayInputStream input_stream(text_def, strlen(text_def));
if (!parser.Parse(&input_stream, &nodedef)) {
abort();
}
std::string serialized_nodedef;
nodedef.SerializeToString(&serialized_nodedef);
flexbuffers::Builder fbb;
fbb.Vector([&]() {
fbb.String(nodedef.op());
fbb.String(serialized_nodedef);
});
fbb.Finish();
const auto buffer = fbb.GetBuffer();
SetCustomOp("ParseExample", buffer, Register_PARSE_EXAMPLE);
BuildInterpreter({{input_size}});
int idx = 0;
PopulateStringTensor(string_indices_[idx++], serialized_examples);
PopulateStringTensor(string_indices_[idx++], {""});
for (const auto& key : sparse_keys) {
PopulateStringTensor(string_indices_[idx++], {key});
}
for (const auto& key : dense_keys) {
PopulateStringTensor(string_indices_[idx++], {key});
}
}
void ResizeInputTensor(std::vector<std::vector<int>> input_shapes) {
for (size_t i = 0; i < input_shapes.size(); ++i) {
const int input_idx = interpreter_->inputs()[i];
if (input_idx == kTfLiteOptionalTensor) continue;
const auto& shape = input_shapes[i];
if (shape.empty()) continue;
CHECK(interpreter_->ResizeInputTensor(input_idx, shape) == kTfLiteOk);
}
}
template <typename T>
std::vector<T> GetSparseIndicesOutput(int i) {
return ExtractVector<T>(sparse_indices_outputs_[i]);
}
template <typename T>
std::vector<T> GetSparseValuesOutput(int i) {
return ExtractVector<T>(sparse_values_outputs_[i]);
}
template <typename T>
std::vector<T> GetSparseShapesOutput(int i) {
return ExtractVector<T>(sparse_shapes_outputs_[i]);
}
template <typename T>
std::vector<T> GetDenseOutput(int i) {
return ExtractVector<T>(dense_outputs_[i]);
}
std::vector<std::string> GetStringOutput(int i) {
auto* t = interpreter_->tensor(i);
int count = GetStringCount(t);
std::vector<std::string> v;
for (int i = 0; i < count; ++i) {
auto ref = GetString(t, i);
v.emplace_back(ref.str, ref.len);
}
return v;
}
int DenseDefaults() { return dense_defaults_; }
int SparseValuesOutputs(int i) { return sparse_values_outputs_[i]; }
int DenseOutputs(int i) { return dense_outputs_[i]; }
std::vector<int> dense_outputs_;
std::vector<int> sparse_indices_outputs_;
std::vector<int> sparse_shapes_outputs_;
std::vector<int> sparse_values_outputs_;
std::vector<int> string_indices_;
int dense_defaults_ = -1;
};
TEST(ParseExampleOpsTest, SimpleTest) {
tf::Example example;
tf::AppendFeatureValues<float>({1.5f, 1.5f}, "time", &example);
tf::AppendFeatureValues<float>({1.0f, 1.0f}, "num", &example);
ParseExampleOpModel<float> m({example.SerializeAsString()}, {}, {"time"},
{0.f, 0.f}, {TensorType_FLOAT32}, {},
kNodeDefTxt);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetDenseOutput<float>(0),
ElementsAreArray(ArrayFloatNear({1.5f, 1.5f})));
}
TEST(ParseExampleOpsTest, SparseTest) {
tf::Example example;
tf::AppendFeatureValues<float>({1.5f}, "time", &example);
ParseExampleOpModel<float> m({example.SerializeAsString()}, {"time"}, {}, {},
{}, {TensorType_FLOAT32}, kNodeDefTxt2, 0);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetSparseIndicesOutput<int64_t>(0),
ElementsAreArray(ArrayFloatNear({0, 0})));
EXPECT_THAT(m.GetSparseValuesOutput<float>(0),
ElementsAreArray(ArrayFloatNear({1.5f})));
EXPECT_THAT(m.GetSparseShapesOutput<int64_t>(0),
ElementsAreArray(ArrayFloatNear({1, 1})));
}
TEST(ParseExampleOpsTest, SimpleBytesTest) {
tf::Example example;
const std::string test_data = "simpletest";
tf::AppendFeatureValues<tensorflow::tstring>({test_data}, "time", &example);
tf::AppendFeatureValues<float>({1.0f, 1.0f}, "num", &example);
std::string default_value = "missing";
ParseExampleOpModel<std::string> m({example.SerializeAsString()}, {},
{"time"}, {default_value},
{TensorType_STRING}, {}, kNodeDefTxt3, 1);
m.PopulateStringTensor(m.DenseDefaults(), {default_value});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<string> c = m.GetStringOutput(m.DenseOutputs(0));
EXPECT_EQ(1, c.size());
EXPECT_EQ(test_data, c[0]);
}
TEST(ParseExampleOpsTest, SparseBytesTest) {
tf::Example example;
const std::string test_data = "simpletest";
tf::AppendFeatureValues<tensorflow::tstring>({test_data, test_data}, "time",
&example);
tf::AppendFeatureValues<float>({1.0f, 1.0f}, "num", &example);
ParseExampleOpModel<std::string> m({example.SerializeAsString()}, {"time"},
{}, {}, {}, {TensorType_STRING},
kNodeDefTxt4, 0);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetSparseIndicesOutput<int64_t>(0),
testing::ElementsAreArray({0, 0, 0, 1}));
auto values = m.GetStringOutput(m.SparseValuesOutputs(0));
EXPECT_EQ(2, values.size());
EXPECT_EQ(test_data, values[0]);
EXPECT_EQ(test_data, values[1]);
EXPECT_THAT(m.GetSparseShapesOutput<int64_t>(0),
testing::ElementsAreArray({1, 2}));
}
TEST(ParseExampleOpsTest, ResizeTest) {
const int num_tests = 3;
std::vector<tf::Example> examples(num_tests);
std::vector<std::vector<float>> expected(num_tests);
std::vector<std::vector<std::string>> inputs(num_tests);
std::vector<int> sizes;
for (int i = 0; i < num_tests; ++i) {
float val = i;
std::initializer_list<float> floats = {val + val / 10.f, -val - val / 10.f};
tf::AppendFeatureValues<float>({val, val}, "num", &examples[i]);
tf::AppendFeatureValues<float>(floats, "time", &examples[i]);
sizes.push_back((num_tests - i) * 2);
for (int j = 0; j < sizes.back(); ++j) {
inputs[i].push_back(examples[i].SerializeAsString());
expected[i].insert(expected[i].end(), floats.begin(), floats.end());
}
}
ParseExampleOpModel<float> m(inputs[0], {}, {"time"}, {0.f, 0.f},
{TensorType_FLOAT32}, {}, kNodeDefTxt);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetDenseOutput<float>(0),
ElementsAreArray(ArrayFloatNear(expected[0])));
for (int i = 1; i < num_tests; ++i) {
m.ResizeInputTensor({{sizes[i]}});
m.AllocateAndDelegate(false);
m.PopulateStringTensor(0, inputs[i]);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetDenseOutput<float>(0),
ElementsAreArray(ArrayFloatNear(expected[i])));
}
}
TEST(ParseExampleOpsTest, ResizeMissingInfoTest) {
const int num_tests = 3;
std::vector<tf::Example> examples(num_tests);
std::vector<std::vector<float>> expected(num_tests);
std::vector<std::vector<std::string>> inputs(num_tests);
std::vector<int> sizes;
for (int i = 0; i < num_tests; ++i) {
float val = i;
std::initializer_list<float> floats = {val + val / 10.f, -val - val / 10.f};
tf::AppendFeatureValues<float>({val, val}, "num", &examples[i]);
tf::AppendFeatureValues<float>(floats, "time", &examples[i]);
sizes.push_back((num_tests - i) * 2);
for (int j = 0; j < sizes.back(); ++j) {
inputs[i].push_back(examples[i].SerializeAsString());
expected[i].insert(expected[i].end(), floats.begin(), floats.end());
}
}
ParseExampleOpModel<float> m(inputs[0], {}, {"time"}, {0.f, 0.f},
{TensorType_FLOAT32}, {}, kNodeDefTxt5);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetDenseOutput<float>(0),
ElementsAreArray(ArrayFloatNear(expected[0])));
for (int i = 1; i < num_tests; ++i) {
m.ResizeInputTensor({{sizes[i]}});
m.AllocateAndDelegate(false);
m.PopulateStringTensor(0, inputs[i]);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetDenseOutput<float>(0),
ElementsAreArray(ArrayFloatNear(expected[i])));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/parse_example/parse_example.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/parse_example/parse_example_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
915c18f8-e578-43d6-8287-09d0e320b63d | cpp | tensorflow/tensorflow | transpose_utils | tensorflow/lite/kernels/internal/transpose_utils.cc | tensorflow/lite/kernels/internal/transpose_utils_test.cc | #include "tensorflow/lite/kernels/internal/transpose_utils.h"
namespace tflite {
namespace transpose_utils {
bool IsTranspose2DApplicable(const TransposeParams& params,
const RuntimeShape& input_shape, int* dim0,
int* dim1) {
const int dims_cnt = input_shape.DimensionsCount();
if (dims_cnt == 2) {
*dim0 = input_shape.Dims(0);
*dim1 = input_shape.Dims(1);
return true;
}
const int first_perm = params.perm[0];
for (int i = 1; i < dims_cnt; ++i) {
int rebased = params.perm[i] - first_perm;
if (rebased < 0) {
rebased += dims_cnt;
}
if (rebased != i) {
return false;
}
}
*dim0 = 1;
*dim1 = 1;
for (int i = 0; i < dims_cnt; ++i) {
if (i < first_perm) {
*dim0 *= input_shape.Dims(i);
} else {
*dim1 *= input_shape.Dims(i);
}
}
return true;
}
void RemoveOneSizeDimensions(RuntimeShape* input_shape,
RuntimeShape* output_shape,
TransposeParams* params) {
const int dims_cnt = input_shape->DimensionsCount();
TFLITE_DCHECK_EQ(params->perm_count, dims_cnt);
bool foundOneSizeDim = false;
for (int i = 0; i < dims_cnt; ++i) {
if (input_shape->Dims(i) == 1) {
foundOneSizeDim = true;
break;
}
}
if (!foundOneSizeDim) return;
if (input_shape->FlatSize() == 1) {
input_shape->Resize(1);
input_shape->SetDim(0, 1);
output_shape->Resize(1);
output_shape->SetDim(0, 1);
params->perm_count = 1;
params->perm[0] = 0;
return;
}
int new_dims_cnt = 0;
for (int i = 0; i < dims_cnt; ++i) {
if (input_shape->Dims(i) == 1) {
continue;
}
input_shape->SetDim(new_dims_cnt, input_shape->Dims(i));
++new_dims_cnt;
}
input_shape->Resize(new_dims_cnt);
TransposeParams new_params;
new_dims_cnt = 0;
for (int i = 0; i < dims_cnt; ++i) {
if (output_shape->Dims(i) == 1) {
continue;
}
new_params.perm[new_dims_cnt] = params->perm[i];
output_shape->SetDim(new_dims_cnt, output_shape->Dims(i));
++new_dims_cnt;
}
output_shape->Resize(new_dims_cnt);
new_params.perm_count = new_dims_cnt;
for (int i = 0; i < new_dims_cnt; ++i) {
int min_val_idx = -1;
for (int j = 0; j < new_dims_cnt; ++j) {
if (new_params.perm[j] >= i &&
(min_val_idx == -1 ||
new_params.perm[min_val_idx] > new_params.perm[j])) {
min_val_idx = j;
}
}
new_params.perm[min_val_idx] = i;
}
*params = new_params;
}
size_t Flatten(const RuntimeShape& input_shape,
const RuntimeShape& output_shape, const TransposeParams& params,
RuntimeShape* non_flatten_input_shape,
RuntimeShape* non_flatten_output_shape,
TransposeParams* non_flatten_params) {
int skip_dims_cnt = 0;
size_t flat_size = input_shape.FlatSize();
for (int i = 0; i < params.perm_count; ++i) {
if (params.perm[i] == i) {
flat_size /= input_shape.Dims(i);
++skip_dims_cnt;
} else {
break;
}
}
const int new_dims_cnt = params.perm_count - skip_dims_cnt;
non_flatten_input_shape->Resize(new_dims_cnt);
non_flatten_output_shape->Resize(new_dims_cnt);
non_flatten_params->perm_count = new_dims_cnt;
for (int i = skip_dims_cnt; i < params.perm_count; ++i) {
non_flatten_input_shape->SetDim(i - skip_dims_cnt, input_shape.Dims(i));
non_flatten_output_shape->SetDim(i - skip_dims_cnt, output_shape.Dims(i));
non_flatten_params->perm[i - skip_dims_cnt] =
params.perm[i] - skip_dims_cnt;
}
return flat_size;
}
}
} | #include "tensorflow/lite/kernels/internal/transpose_utils.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
namespace {
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_1DNoChanges) {
RuntimeShape input_shape({9});
RuntimeShape output_shape({9});
TransposeParams params;
params.perm_count = 1;
params.perm[0] = 0;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({9}));
EXPECT_EQ(output_shape, RuntimeShape({9}));
EXPECT_EQ(params.perm_count, 1);
EXPECT_EQ(params.perm[0], 0);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_2DNoChanges) {
RuntimeShape input_shape({9, 3});
RuntimeShape output_shape({3, 9});
TransposeParams params;
params.perm_count = 2;
params.perm[0] = 1;
params.perm[1] = 0;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({9, 3}));
EXPECT_EQ(output_shape, RuntimeShape({3, 9}));
EXPECT_EQ(params.perm_count, 2);
EXPECT_EQ(params.perm[0], 1);
EXPECT_EQ(params.perm[1], 0);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_2DShrinking) {
RuntimeShape input_shape({9, 1});
RuntimeShape output_shape({1, 9});
TransposeParams params;
params.perm_count = 2;
params.perm[0] = 1;
params.perm[1] = 0;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({9}));
EXPECT_EQ(output_shape, RuntimeShape({9}));
EXPECT_EQ(params.perm_count, 1);
EXPECT_EQ(params.perm[0], 0);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_3DNoChanges) {
RuntimeShape input_shape({4, 3, 8});
RuntimeShape output_shape({8, 4, 3});
TransposeParams params;
params.perm_count = 3;
params.perm[0] = 2;
params.perm[1] = 0;
params.perm[2] = 1;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({4, 3, 8}));
EXPECT_EQ(output_shape, RuntimeShape({8, 4, 3}));
EXPECT_EQ(params.perm_count, 3);
EXPECT_EQ(params.perm[0], 2);
EXPECT_EQ(params.perm[1], 0);
EXPECT_EQ(params.perm[2], 1);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_3DShrinkingOnce) {
RuntimeShape input_shape({4, 1, 8});
RuntimeShape output_shape({8, 4, 1});
TransposeParams params;
params.perm_count = 3;
params.perm[0] = 2;
params.perm[1] = 0;
params.perm[2] = 1;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({4, 8}));
EXPECT_EQ(output_shape, RuntimeShape({8, 4}));
EXPECT_EQ(output_shape.Dims(1), 4);
EXPECT_EQ(params.perm_count, 2);
EXPECT_EQ(params.perm[0], 1);
EXPECT_EQ(params.perm[1], 0);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_3DShrinkingTwice) {
RuntimeShape input_shape({4, 1, 1});
RuntimeShape output_shape({1, 4, 1});
TransposeParams params;
params.perm_count = 3;
params.perm[0] = 2;
params.perm[1] = 0;
params.perm[2] = 1;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({4}));
EXPECT_EQ(output_shape, RuntimeShape({4}));
EXPECT_EQ(params.perm_count, 1);
EXPECT_EQ(params.perm[0], 0);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_3DAllOnes) {
RuntimeShape input_shape({1, 1, 1});
RuntimeShape output_shape({1, 1, 1});
TransposeParams params;
params.perm_count = 3;
params.perm[0] = 2;
params.perm[1] = 0;
params.perm[2] = 1;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({1}));
EXPECT_EQ(output_shape, RuntimeShape({1}));
EXPECT_EQ(params.perm_count, 1);
EXPECT_EQ(params.perm[0], 0);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_4DNoChanges) {
RuntimeShape input_shape({9, 3, 2, 4});
RuntimeShape output_shape({3, 9, 4, 2});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 1;
params.perm[1] = 0;
params.perm[2] = 3;
params.perm[3] = 2;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({9, 3, 2, 4}));
EXPECT_EQ(output_shape, RuntimeShape({3, 9, 4, 2}));
EXPECT_EQ(params.perm_count, 4);
EXPECT_EQ(params.perm[0], 1);
EXPECT_EQ(params.perm[1], 0);
EXPECT_EQ(params.perm[2], 3);
EXPECT_EQ(params.perm[3], 2);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_4DShrinkingOnce) {
RuntimeShape input_shape({9, 3, 1, 4});
RuntimeShape output_shape({3, 9, 4, 1});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 1;
params.perm[1] = 0;
params.perm[2] = 3;
params.perm[3] = 2;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({9, 3, 4}));
EXPECT_EQ(output_shape, RuntimeShape({3, 9, 4}));
EXPECT_EQ(params.perm_count, 3);
EXPECT_EQ(params.perm[0], 1);
EXPECT_EQ(params.perm[1], 0);
EXPECT_EQ(params.perm[2], 2);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_4DShrinkingTwice) {
RuntimeShape input_shape({1, 3, 1, 4});
RuntimeShape output_shape({3, 1, 4, 1});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 1;
params.perm[1] = 2;
params.perm[2] = 3;
params.perm[3] = 0;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({3, 4}));
EXPECT_EQ(output_shape, RuntimeShape({3, 4}));
EXPECT_EQ(params.perm_count, 2);
EXPECT_EQ(params.perm[0], 0);
EXPECT_EQ(params.perm[1], 1);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_4DShrinkingThirdTimes) {
RuntimeShape input_shape({1, 1, 7, 1});
RuntimeShape output_shape({1, 7, 1, 1});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 0;
params.perm[1] = 2;
params.perm[2] = 1;
params.perm[3] = 3;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({7}));
EXPECT_EQ(output_shape, RuntimeShape({7}));
EXPECT_EQ(params.perm_count, 1);
EXPECT_EQ(params.perm[0], 0);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_4DAllOnes) {
RuntimeShape input_shape({1, 1, 1, 1});
RuntimeShape output_shape({1, 1, 1, 1});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 0;
params.perm[1] = 2;
params.perm[2] = 1;
params.perm[3] = 3;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({1}));
EXPECT_EQ(output_shape, RuntimeShape({1}));
EXPECT_EQ(params.perm_count, 1);
EXPECT_EQ(params.perm[0], 0);
}
TEST(TransposeUtilsTest, Flatten3D) {
RuntimeShape input_shape({3, 5, 7});
RuntimeShape output_shape({3, 7, 5});
TransposeParams params;
params.perm_count = 3;
params.perm[0] = 0;
params.perm[1] = 2;
params.perm[2] = 1;
RuntimeShape non_flatten_input_shape;
RuntimeShape non_flatten_output_shape;
TransposeParams non_flatten_params;
size_t non_flatten_size = transpose_utils::Flatten(
input_shape, output_shape, params, &non_flatten_input_shape,
&non_flatten_output_shape, &non_flatten_params);
EXPECT_EQ(non_flatten_input_shape, RuntimeShape({5, 7}));
EXPECT_EQ(non_flatten_output_shape, RuntimeShape({7, 5}));
EXPECT_EQ(non_flatten_size, 5 * 7);
EXPECT_EQ(non_flatten_params.perm_count, 2);
EXPECT_EQ(non_flatten_params.perm[0], 1);
EXPECT_EQ(non_flatten_params.perm[1], 0);
}
TEST(TransposeUtilsTest, Flatten4DFlattenOnce) {
RuntimeShape input_shape({3, 5, 7, 9});
RuntimeShape output_shape({3, 7, 5, 9});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 0;
params.perm[1] = 2;
params.perm[2] = 1;
params.perm[3] = 3;
RuntimeShape non_flatten_input_shape;
RuntimeShape non_flatten_output_shape;
TransposeParams non_flatten_params;
size_t non_flatten_size = transpose_utils::Flatten(
input_shape, output_shape, params, &non_flatten_input_shape,
&non_flatten_output_shape, &non_flatten_params);
EXPECT_EQ(non_flatten_input_shape, RuntimeShape({5, 7, 9}));
EXPECT_EQ(non_flatten_output_shape, RuntimeShape({7, 5, 9}));
EXPECT_EQ(non_flatten_size, 5 * 7 * 9);
EXPECT_EQ(non_flatten_params.perm_count, 3);
EXPECT_EQ(non_flatten_params.perm[0], 1);
EXPECT_EQ(non_flatten_params.perm[1], 0);
EXPECT_EQ(non_flatten_params.perm[2], 2);
}
TEST(TransposeUtilsTest, Flatten4DFlattenTwice) {
RuntimeShape input_shape({3, 5, 7, 9});
RuntimeShape output_shape({3, 5, 9, 7});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 0;
params.perm[1] = 1;
params.perm[2] = 3;
params.perm[3] = 2;
RuntimeShape non_flatten_input_shape;
RuntimeShape non_flatten_output_shape;
TransposeParams non_flatten_params;
size_t non_flatten_size = transpose_utils::Flatten(
input_shape, output_shape, params, &non_flatten_input_shape,
&non_flatten_output_shape, &non_flatten_params);
EXPECT_EQ(non_flatten_input_shape, RuntimeShape({7, 9}));
EXPECT_EQ(non_flatten_output_shape, RuntimeShape({9, 7}));
EXPECT_EQ(non_flatten_size, 7 * 9);
EXPECT_EQ(non_flatten_params.perm_count, 2);
EXPECT_EQ(non_flatten_params.perm[0], 1);
EXPECT_EQ(non_flatten_params.perm[1], 0);
}
TEST(TransposeUtilsTest, IsTranspose2DApplicable2D) {
RuntimeShape input_shape({4, 5});
TransposeParams params;
params.perm_count = 2;
params.perm[0] = 1;
params.perm[1] = 0;
int dim0, dim1;
bool applicable = transpose_utils::IsTranspose2DApplicable(
params, input_shape, &dim0, &dim1);
EXPECT_TRUE(applicable);
EXPECT_EQ(dim0, 4);
EXPECT_EQ(dim1, 5);
}
TEST(TransposeUtilsTest, IsTranspose2DApplicable3DOne) {
RuntimeShape input_shape({4, 5, 6});
TransposeParams params;
params.perm_count = 3;
params.perm[0] = 1;
params.perm[1] = 2;
params.perm[2] = 0;
int dim0, dim1;
bool applicable = transpose_utils::IsTranspose2DApplicable(
params, input_shape, &dim0, &dim1);
EXPECT_TRUE(applicable);
EXPECT_EQ(dim0, 4);
EXPECT_EQ(dim1, 30);
}
TEST(TransposeUtilsTest, IsTranspose2DApplicable3DTwo) {
RuntimeShape input_shape({4, 5, 6});
TransposeParams params;
params.perm_count = 3;
params.perm[0] = 2;
params.perm[1] = 0;
params.perm[2] = 1;
int dim0, dim1;
bool applicable = transpose_utils::IsTranspose2DApplicable(
params, input_shape, &dim0, &dim1);
EXPECT_TRUE(applicable);
EXPECT_EQ(dim0, 20);
EXPECT_EQ(dim1, 6);
}
TEST(TransposeUtilsTest, IsTranspose2DApplicable3DNotApplicable) {
RuntimeShape input_shape({4, 5, 6});
TransposeParams params;
params.perm_count = 3;
params.perm[0] = 2;
params.perm[1] = 1;
params.perm[2] = 0;
int dim0, dim1;
bool applicable = transpose_utils::IsTranspose2DApplicable(
params, input_shape, &dim0, &dim1);
EXPECT_FALSE(applicable);
}
TEST(TransposeUtilsTest, IsTranspose2DApplicable4DOne) {
RuntimeShape input_shape({4, 5, 6, 7});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 1;
params.perm[1] = 2;
params.perm[2] = 3;
params.perm[3] = 0;
int dim0, dim1;
bool applicable = transpose_utils::IsTranspose2DApplicable(
params, input_shape, &dim0, &dim1);
EXPECT_TRUE(applicable);
EXPECT_EQ(dim0, 4);
EXPECT_EQ(dim1, 210);
}
TEST(TransposeUtilsTest, IsTranspose2DApplicable4DTwo) {
RuntimeShape input_shape({4, 5, 6, 7});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 2;
params.perm[1] = 3;
params.perm[2] = 0;
params.perm[3] = 1;
int dim0, dim1;
bool applicable = transpose_utils::IsTranspose2DApplicable(
params, input_shape, &dim0, &dim1);
EXPECT_TRUE(applicable);
EXPECT_EQ(dim0, 20);
EXPECT_EQ(dim1, 42);
}
TEST(TransposeUtilsTest, IsTranspose2DApplicable4DThird) {
RuntimeShape input_shape({4, 5, 6, 7});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 3;
params.perm[1] = 0;
params.perm[2] = 1;
params.perm[3] = 2;
int dim0, dim1;
bool applicable = transpose_utils::IsTranspose2DApplicable(
params, input_shape, &dim0, &dim1);
EXPECT_TRUE(applicable);
EXPECT_EQ(dim0, 120);
EXPECT_EQ(dim1, 7);
}
TEST(TransposeUtilsTest, IsTranspose2DApplicable4DNotApplicable) {
RuntimeShape input_shape({4, 5, 6, 7});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 3;
params.perm[1] = 2;
params.perm[2] = 1;
params.perm[3] = 0;
int dim0, dim1;
bool applicable = transpose_utils::IsTranspose2DApplicable(
params, input_shape, &dim0, &dim1);
EXPECT_FALSE(applicable);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/transpose_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/transpose_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
db76c60d-a5eb-455c-bdf2-48477675fc36 | cpp | tensorflow/tensorflow | runtime_shape | tensorflow/compiler/mlir/lite/kernels/internal/runtime_shape.cc | tensorflow/compiler/mlir/lite/kernels/internal/runtime_shape_test.cc | #include "tensorflow/compiler/mlir/lite/kernels/internal/runtime_shape.h"
#include <cstdint>
#include <cstring>
#include "tensorflow/compiler/mlir/lite/kernels/internal/compatibility_macros.h"
namespace mlir {
RuntimeShape::~RuntimeShape() {
if (size_ > kMaxSmallSize) {
delete[] dims_pointer_;
}
}
int32_t RuntimeShape::Dims(int i) const {
TFLITE_DCHECK_GE(i, 0);
TFLITE_DCHECK_LT(i, size_);
return size_ > kMaxSmallSize ? dims_pointer_[i] : dims_[i];
}
void RuntimeShape::ReplaceWith(int dimensions_count, const int32_t* dims_data) {
Resize(dimensions_count);
int32_t* dst_dims = DimsData();
std::memcpy(dst_dims, dims_data, dimensions_count * sizeof(int32_t));
}
int RuntimeShape::FlatSize() const {
int buffer_size = 1;
const int* dims_data = reinterpret_cast<const int*>(DimsData());
for (int i = 0; i < size_; i++) {
buffer_size *= dims_data[i];
}
return buffer_size;
}
} | #include "tensorflow/compiler/mlir/lite/kernels/internal/runtime_shape.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <initializer_list>
#include <numeric>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/types/span.h"
using testing::Each;
using testing::ElementsAreArray;
namespace mlir {
namespace {
constexpr int kSmallSize = RuntimeShape::kMaxSmallSize;
constexpr int kBigSize = RuntimeShape::kMaxSmallSize + 1;
std::vector<int32_t> IotaVector(int size, int start = 0) {
std::vector<int32_t> vec(size);
absl::c_iota(vec, start);
return vec;
}
absl::Span<const int32_t> AsSpan(const RuntimeShape& shape) {
return absl::Span<const int32_t>(shape.DimsData(), shape.DimensionsCount());
}
class RuntimeShapeTest : public testing::TestWithParam<int> {};
TEST(RuntimeShapeTest, TestDefaultConstructor) {
const RuntimeShape shape;
EXPECT_EQ(shape.DimensionsCount(), 0);
}
TEST_P(RuntimeShapeTest, TestConstructorWithSize) {
const int size = GetParam();
const RuntimeShape shape(size);
EXPECT_EQ(shape.DimensionsCount(), size);
}
TEST_P(RuntimeShapeTest, TestConstructorWithSizeAndDefaultValue) {
const int size = GetParam();
const RuntimeShape shape(size, 34);
EXPECT_EQ(shape.DimensionsCount(), size);
EXPECT_THAT(AsSpan(shape), Each(34));
}
TEST_P(RuntimeShapeTest, TestConstructorFromCArray) {
const int size = GetParam();
const std::vector<int32_t> src = IotaVector(size);
const RuntimeShape shape(size, src.data());
EXPECT_EQ(shape.DimensionsCount(), size);
EXPECT_THAT(AsSpan(shape), ElementsAreArray(src));
}
TEST(RuntimeShapeTest, TestConstructorFromSmallInitList) {
std::initializer_list<int> init{1, 2, 3};
ASSERT_LE(init.size(), RuntimeShape::kMaxSmallSize);
const RuntimeShape shape(init);
EXPECT_EQ(shape.DimensionsCount(), init.size());
EXPECT_THAT(AsSpan(shape), ElementsAreArray(init));
}
TEST(RuntimeShapeTest, TestConstructorFromBigInitList) {
std::initializer_list<int> init{1, 2, 3, 4, 5, 6, 7, 8, 9};
ASSERT_GT(init.size(), RuntimeShape::kMaxSmallSize);
const RuntimeShape shape(init);
EXPECT_EQ(shape.DimensionsCount(), init.size());
EXPECT_THAT(AsSpan(shape), ElementsAreArray(init));
}
TEST_P(RuntimeShapeTest, TestCopyConstructorFromShape) {
const int size = GetParam();
const RuntimeShape src(size, 34);
const RuntimeShape dst(src);
EXPECT_EQ(dst.DimensionsCount(), src.DimensionsCount());
EXPECT_THAT(AsSpan(dst), ElementsAreArray(AsSpan(src)));
}
TEST_P(RuntimeShapeTest, TestEqualityOperator) {
const int size = GetParam();
const RuntimeShape shape1(size, 34);
const RuntimeShape shape2(size, 34);
EXPECT_TRUE(shape1 == shape2);
EXPECT_FALSE(shape1 != shape2);
}
TEST_P(RuntimeShapeTest, TestEqualityOperatorDifferentSizes) {
const int size = GetParam();
const RuntimeShape shape1(size, 34);
const RuntimeShape shape2(size + 1, 34);
EXPECT_FALSE(shape1 == shape2);
EXPECT_TRUE(shape1 != shape2);
}
TEST_P(RuntimeShapeTest, TestEqualityOperatorDifferentValues) {
const int size = GetParam();
const RuntimeShape shape1(size, 34);
const RuntimeShape shape2(size, 43);
EXPECT_FALSE(shape1 == shape2);
EXPECT_TRUE(shape1 != shape2);
}
TEST_P(RuntimeShapeTest, TestSetterGetter) {
const int size = GetParam();
RuntimeShape shape(size);
for (int i = 0; i < size; ++i) {
shape.SetDim(i, i);
EXPECT_EQ(shape.Dims(i), i);
}
EXPECT_THAT(AsSpan(shape), ElementsAreArray(IotaVector(size)));
}
TEST(RuntimeShapeTest, TestResizeSmallSmall) {
ASSERT_GE(kSmallSize, 1);
RuntimeShape shape(kSmallSize - 1, 23);
shape.Resize(kSmallSize);
EXPECT_EQ(shape.DimensionsCount(), kSmallSize);
EXPECT_THAT(absl::Span<const int32_t>(shape.DimsData(), kSmallSize - 1),
Each(23));
}
TEST(RuntimeShapeTest, TestResizeSmallBig) {
RuntimeShape shape(kSmallSize, 23);
shape.Resize(kBigSize);
EXPECT_EQ(shape.DimensionsCount(), kBigSize);
EXPECT_THAT(absl::Span<const int32_t>(shape.DimsData(), kSmallSize),
Each(23));
}
TEST(RuntimeShapeTest, TestResizeBigSmall) {
RuntimeShape shape(kBigSize, 23);
shape.Resize(kSmallSize);
EXPECT_EQ(shape.DimensionsCount(), kSmallSize);
EXPECT_THAT(absl::Span<const int32_t>(shape.DimsData(), kSmallSize),
Each(23));
}
TEST(RuntimeShapeTest, TestResizeDownBigBig) {
RuntimeShape shape(kBigSize + 3, 23);
shape.Resize(kBigSize);
EXPECT_EQ(shape.DimensionsCount(), kBigSize);
EXPECT_THAT(absl::Span<const int32_t>(shape.DimsData(), kBigSize), Each(23));
}
TEST(RuntimeShapeTest, TestResizeUpBigBig) {
RuntimeShape shape(kBigSize, 23);
shape.Resize(kBigSize + 1);
EXPECT_EQ(shape.DimensionsCount(), kBigSize + 1);
EXPECT_THAT(absl::Span<const int32_t>(shape.DimsData(), kBigSize), Each(23));
}
TEST_P(RuntimeShapeTest, TestReplaceWith) {
static_assert(
RuntimeShape::kMaxSmallSize > 2,
"kMaxSmallSize should be greater than 2 for this test to work.");
const int size = GetParam();
for (const int offset : {-2, 2}) {
const std::vector<int32_t> src =
IotaVector(offset + RuntimeShape::kMaxSmallSize);
RuntimeShape shape(size);
shape.ReplaceWith(src.size(), src.data());
EXPECT_EQ(shape.DimensionsCount(), src.size());
EXPECT_THAT(AsSpan(shape), testing::ElementsAreArray(src));
}
}
TEST_P(RuntimeShapeTest, TestBuildFrom) {
const int size = GetParam();
const std::vector<int32_t> src = IotaVector(size);
RuntimeShape shape;
shape.BuildFrom(src);
EXPECT_EQ(shape.DimensionsCount(), src.size());
EXPECT_THAT(AsSpan(shape), testing::ElementsAreArray(src));
}
TEST(RuntimeShapeTest, TestExtendedShapeSmall) {
ASSERT_GE(kSmallSize, 2);
const std::vector<int32_t> dims = IotaVector(kSmallSize - 2);
const RuntimeShape src(dims.size(), dims.data());
const RuntimeShape extended = RuntimeShape::ExtendedShape(kSmallSize, src);
EXPECT_EQ(extended.DimensionsCount(), kSmallSize);
EXPECT_EQ(extended.Dims(0), 1);
EXPECT_EQ(extended.Dims(1), 1);
EXPECT_THAT(absl::Span<const int32_t>(extended.DimsData() + 2, dims.size()),
ElementsAreArray(dims));
}
TEST(RuntimeShapeTest, TestExtendedShapeBig) {
ASSERT_GE(kSmallSize, 2);
const std::vector<int32_t> dims = IotaVector(kBigSize);
const RuntimeShape src(dims.size(), dims.data());
const RuntimeShape extended = RuntimeShape::ExtendedShape(kBigSize + 2, src);
EXPECT_EQ(extended.DimensionsCount(), kBigSize + 2);
EXPECT_EQ(extended.Dims(0), 1);
EXPECT_EQ(extended.Dims(1), 1);
EXPECT_THAT(absl::Span<const int32_t>(extended.DimsData() + 2, dims.size()),
ElementsAreArray(dims));
}
TEST(RuntimeShapeTest, TestExtendedShapeSmallToBig) {
const std::vector<int32_t> dims = IotaVector(kSmallSize);
const RuntimeShape src(dims.size(), dims.data());
const RuntimeShape extended = RuntimeShape::ExtendedShape(kBigSize, src);
EXPECT_EQ(extended.DimensionsCount(), kBigSize);
EXPECT_THAT(
absl::Span<const int32_t>(extended.DimsData(), kBigSize - kSmallSize),
Each(1));
EXPECT_THAT(absl::Span<const int32_t>(
extended.DimsData() + kBigSize - kSmallSize, dims.size()),
ElementsAreArray(dims));
}
TEST_P(RuntimeShapeTest, TestFlatSize) {
const std::vector<int32_t> src = IotaVector(kSmallSize);
const RuntimeShape shape(src.size(), src.data());
EXPECT_EQ(shape.FlatSize(),
std::reduce(src.begin(), src.end(), 1, std::multiplies<int>{}));
}
INSTANTIATE_TEST_SUITE_P(BigSmall, RuntimeShapeTest,
testing::Values(kSmallSize, kBigSize),
[](const testing::TestParamInfo<int>& info) {
return info.param == kSmallSize ? "Small" : "Big";
});
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/kernels/internal/runtime_shape.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/kernels/internal/runtime_shape_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
241e305d-2df4-4bfa-a29d-7db00655992a | cpp | tensorflow/tensorflow | quantization_util | tensorflow/compiler/mlir/lite/kernels/internal/quantization_util.cc | tensorflow/lite/delegates/xnnpack/quantization_util_test.cc | #include "tensorflow/compiler/mlir/lite/kernels/internal/quantization_util.h"
#include <algorithm>
#include <cmath>
#include <limits>
#include "tensorflow/compiler/mlir/lite/kernels/internal/compatibility_macros.h"
#include "tensorflow/compiler/mlir/lite/kernels/internal/cppmath.h"
namespace tflite_migration {
namespace {
constexpr uint64_t kSignMask = 0x8000000000000000LL;
constexpr uint64_t kExponentMask = 0x7ff0000000000000LL;
constexpr int32_t kExponentShift = 52;
constexpr int32_t kExponentBias = 1023;
constexpr uint32_t kExponentIsBadNum = 0x7ff;
constexpr uint64_t kFractionMask = 0x000fffffffc00000LL;
constexpr uint32_t kFractionShift = 22;
constexpr uint32_t kFractionRoundingMask = 0x003fffff;
constexpr uint32_t kFractionRoundingThreshold = 0x00200000;
}
void QuantizeMultiplier(double double_multiplier, int32_t* quantized_multiplier,
int* shift) {
#if TFLITE_SINGLE_ROUNDING
#endif
if (double_multiplier == 0.) {
*quantized_multiplier = 0;
*shift = 0;
return;
}
#ifdef TFLITE_EMULATE_FLOAT
int64_t q_fixed = IntegerFrExp(double_multiplier, shift);
#else
const double q = std::frexp(double_multiplier, shift);
auto q_fixed = static_cast<int64_t>(TfLiteRound(q * (1LL << 31)));
#endif
TFLITE_DCHECK(q_fixed <= (1LL << 31));
if (q_fixed == (1LL << 31)) {
q_fixed /= 2;
++*shift;
}
TFLITE_DCHECK_LE(q_fixed, std::numeric_limits<int32_t>::max());
if (*shift < -31) {
*shift = 0;
q_fixed = 0;
}
#if TFLITE_SINGLE_ROUNDING
if (*shift > 30) {
*shift = 30;
q_fixed = (1LL << 31) - 1;
}
#endif
*quantized_multiplier = static_cast<int32_t>(q_fixed);
}
void QuantizeMultiplierGreaterThanOne(double double_multiplier,
int32_t* quantized_multiplier,
int* left_shift) {
TFLITE_DCHECK_GT(double_multiplier, 1.);
QuantizeMultiplier(double_multiplier, quantized_multiplier, left_shift);
TFLITE_DCHECK_GE(*left_shift, 0);
}
int64_t IntegerFrExp(double input, int* shift) {
TFLITE_DCHECK_EQ(8, sizeof(double));
union {
double double_value;
uint64_t double_as_uint;
} cast_union;
cast_union.double_value = input;
const uint64_t u = cast_union.double_as_uint;
if ((u & ~kSignMask) == 0) {
*shift = 0;
return 0;
}
const uint32_t exponent_part = ((u & kExponentMask) >> kExponentShift);
if (exponent_part == kExponentIsBadNum) {
*shift = std::numeric_limits<int>::max();
if (u & kFractionMask) {
return 0;
} else {
if (u & kSignMask) {
return std::numeric_limits<int64_t>::min();
} else {
return std::numeric_limits<int64_t>::max();
}
}
}
*shift = (exponent_part - kExponentBias) + 1;
int64_t fraction = 0x40000000 + ((u & kFractionMask) >> kFractionShift);
if ((u & kFractionRoundingMask) > kFractionRoundingThreshold) {
fraction += 1;
}
if (u & kSignMask) {
fraction *= -1;
}
return fraction;
}
double DoubleFromFractionAndShift(int64_t fraction, int shift) {
union {
double double_value;
uint64_t double_as_uint;
} result;
if (shift == std::numeric_limits<int>::max()) {
if (fraction == 0) {
return std::numeric_limits<double>::quiet_NaN();
} else if (fraction > 0) {
return std::numeric_limits<double>::infinity();
} else {
return -std::numeric_limits<double>::infinity();
}
}
if (fraction == 0) {
result.double_as_uint = 0;
return result.double_value;
}
bool is_negative = (fraction < 0);
int64_t encoded_fraction = is_negative ? -fraction : fraction;
int64_t encoded_shift = (shift - 1);
while (encoded_fraction < 0x40000000) {
encoded_fraction *= 2;
encoded_shift -= 1;
}
while (encoded_fraction > 0x80000000) {
encoded_fraction /= 2;
encoded_shift += 1;
}
encoded_fraction -= 0x40000000;
if (encoded_shift < -1022) {
encoded_shift = -1023;
} else if (encoded_shift > 1022) {
encoded_shift = 1023;
}
encoded_shift += kExponentBias;
uint64_t encoded_sign = is_negative ? kSignMask : 0;
result.double_as_uint = encoded_sign | (encoded_shift << kExponentShift) |
(encoded_fraction << kFractionShift);
return result.double_value;
}
double IntegerDoubleMultiply(double a, double b) {
int a_shift;
const int64_t a_fraction = IntegerFrExp(a, &a_shift);
int b_shift;
const int64_t b_fraction = IntegerFrExp(b, &b_shift);
if (a_shift == std::numeric_limits<int>::max() ||
(b_shift == std::numeric_limits<int>::max())) {
return std::numeric_limits<double>::quiet_NaN();
}
const int result_shift = a_shift + b_shift + 1;
const int64_t result_fraction = (a_fraction * b_fraction) >> 32;
return DoubleFromFractionAndShift(result_fraction, result_shift);
}
int IntegerDoubleCompare(double a, double b) {
int a_shift;
const int64_t a_fraction = IntegerFrExp(a, &a_shift);
int b_shift;
const int64_t b_fraction = IntegerFrExp(b, &b_shift);
if (a_shift == std::numeric_limits<int>::max() ||
(b_shift == std::numeric_limits<int>::max())) {
return 1;
}
if ((a_fraction == 0) && (b_fraction < 0)) {
return 1;
} else if ((a_fraction < 0) && (b_fraction == 0)) {
return -1;
} else if (a_shift < b_shift) {
return -1;
} else if (a_shift > b_shift) {
return 1;
} else if (a_fraction < b_fraction) {
return -1;
} else if (a_fraction > b_fraction) {
return 1;
} else {
return 0;
}
}
void PreprocessSoftmaxScaling(double beta, double input_scale,
int input_integer_bits,
int32_t* quantized_multiplier, int* left_shift) {
#if TFLITE_SINGLE_ROUNDING
const double max_real_multiplier = (1LL << 30) - 1.0;
#else
const double max_real_multiplier = (1LL << 31) - 1.0;
#endif
#ifdef TFLITE_EMULATE_FLOAT
const double input_beta = IntegerDoubleMultiply(beta, input_scale);
int shift;
int64_t fraction = IntegerFrExp(input_beta, &shift);
shift += (31 - input_integer_bits);
double input_beta_real_multiplier =
DoubleFromFractionAndShift(fraction, shift);
if (IntegerDoubleCompare(input_beta_real_multiplier, max_real_multiplier) >
0) {
input_beta_real_multiplier = max_real_multiplier;
}
#else
const double input_beta_real_multiplier =
std::min<double>(beta * input_scale * (1 << (31 - input_integer_bits)),
max_real_multiplier);
#endif
QuantizeMultiplierGreaterThanOne(input_beta_real_multiplier,
quantized_multiplier, left_shift);
}
int CalculateInputRadius(int input_integer_bits, int input_left_shift,
int total_signed_bits) {
#ifdef TFLITE_EMULATE_FLOAT
int64_t result = (1 << input_integer_bits) - 1;
result <<= (total_signed_bits - input_integer_bits);
result >>= input_left_shift;
return result;
#else
const double max_input_rescaled =
1.0 * ((1 << input_integer_bits) - 1) *
(1LL << (total_signed_bits - input_integer_bits)) /
(1LL << input_left_shift);
return static_cast<int>(std::floor(max_input_rescaled));
#endif
}
} | #include "tensorflow/lite/delegates/xnnpack/quantization_util.h"
#include <stdint.h>
#include <limits>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
using ::testing::FloatNear;
using ::testing::Pointwise;
namespace tflite {
namespace xnnpack {
namespace {
template <typename T>
inline double ScaleFromMinMax(const float min, const float max) {
return (max - min) / ((std::numeric_limits<T>::max() * 1.0) -
std::numeric_limits<T>::min());
}
template <typename T>
inline int32_t ZeroPointFromMinMax(const float min, const float max) {
return static_cast<int32_t>(std::numeric_limits<T>::min()) +
static_cast<int32_t>(-min / ScaleFromMinMax<T>(min, max) + 0.5f);
}
TEST(Dequantize, Int8) {
std::vector<int8_t> quantized_data = {-3, -2, -1, 1, 2, 3};
std::vector<float> dequantized_data(quantized_data.size());
RuntimeShape tensor_shape(1, quantized_data.size());
const float min = -12.8f;
const float max = 12.7f;
const double scale = ScaleFromMinMax<int8_t>(min, max);
const int32_t zero_point = ZeroPointFromMinMax<int8_t>(min, max);
DequantizeInt8(quantized_data.data(), dequantized_data.data(), tensor_shape,
zero_point, scale);
EXPECT_THAT(dequantized_data,
Pointwise(FloatNear(1e-6), {-0.3, -0.2, -0.1, 0.1, 0.2, 0.3}));
}
TEST(Dequantize, PerChannelInt8) {
const std::vector<float> scales = {0.5, 0.25};
const std::vector<int> zero_points = {-1, -1};
const int quantized_dimension = 0;
const RuntimeShape shape({2, 5});
const std::vector<int8_t> input = {-128, -127, -126, -125, -124,
123, 124, 125, 126, 127};
std::vector<float> output(10, -1);
PerChannelDequantizeInt8(input.data(), output.data(), shape,
zero_points.data(), scales.data(),
quantized_dimension);
EXPECT_THAT(output,
Pointwise(FloatNear(1e-6), {-63.5, -63., -62.5, -62., -61.5, 31.,
31.25, 31.5, 31.75, 32.}));
}
TEST(Dequantize, Float16) {
std::vector<uint16_t> quantized_data = {
UINT16_C(0x3000),
UINT16_C(0x3400),
UINT16_C(0x3800),
UINT16_C(0x3C00),
UINT16_C(0x4000),
UINT16_C(0x4400)
};
std::vector<float> dequantized_data(quantized_data.size());
DequantizeFloat16(quantized_data.data(), dequantized_data.data(),
quantized_data.size());
EXPECT_THAT(dequantized_data,
Pointwise(FloatNear(1e-6), {0.125, 0.25, 0.5, 1., 2., 4.}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/kernels/internal/quantization_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/quantization_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ec019446-b0f4-48a0-a2a5-764ee3e0c817 | cpp | tensorflow/tensorflow | sparsity_format_converter | tensorflow/compiler/mlir/lite/kernels/internal/utils/sparsity_format_converter.cc | tensorflow/lite/kernels/internal/utils/sparsity_format_converter_test.cc | #include "tensorflow/compiler/mlir/lite/kernels/internal/utils/sparsity_format_converter.h"
#include <algorithm>
#include <cstdint>
#include <utility>
#include <vector>
#include "Eigen/Core"
#include "tensorflow/compiler/mlir/lite/core/c/dimension_type.h"
namespace tflite_migration {
namespace internal {
namespace sparsity {
template <typename T>
FormatConverter<T>::FormatConverter(
const std::vector<int>& shape, const std::vector<int>& traversal_order,
const std::vector<TfLiteDimensionType>& format,
const std::vector<int>& block_size, const std::vector<int>& block_map)
: dense_shape_(shape),
traversal_order_(traversal_order),
block_size_(block_size),
block_map_(block_map) {
dense_size_ = 1;
int block_dim = 0;
blocked_shape_.resize(shape.size());
format_.resize(shape.size() + block_map.size());
for (int i = 0; i < shape.size(); i++) {
format_[i] = format[traversal_order[i]];
dense_size_ *= shape[i];
if (block_dim < block_map.size() && block_map[block_dim] == i) {
blocked_shape_[i] = shape[i] / block_size[block_dim];
block_dim++;
} else {
blocked_shape_[i] = shape[i];
}
}
for (int i = 0; i < block_map.size(); i++) {
format_[i + shape.size()] = kTfLiteDimDense;
}
}
template <typename T>
void FormatConverter<T>::DenseToSparse(const T* src_data) {
int num_original_dims = dense_shape_.size();
int num_block_dims = block_map_.size();
int num_expanded_dims = num_original_dims + num_block_dims;
std::vector<int> expanded_shape(num_expanded_dims);
for (int i = 0; i < num_expanded_dims; i++) {
if (i < num_original_dims) {
expanded_shape[i] = blocked_shape_[i];
} else {
expanded_shape[i] = block_size_[i - num_original_dims];
}
}
std::vector<int> shape_offset(num_original_dims);
shape_offset[shape_offset.size() - 1] = 1;
for (int i = num_original_dims - 1; i > 0; --i) {
shape_offset[i - 1] = shape_offset[i] * dense_shape_[i];
}
std::vector<int> expanded_shape_offset(num_expanded_dims);
for (int i = 0; i < num_original_dims; ++i) {
expanded_shape_offset[i] = shape_offset[i];
}
for (int i = 0; i < num_block_dims; ++i) {
int mapped_dim = block_map_[i];
expanded_shape_offset[num_original_dims + i] = shape_offset[mapped_dim];
expanded_shape_offset[mapped_dim] *= block_size_[i];
}
std::vector<int> dst_ordered_offset(num_expanded_dims);
for (int i = 0; i < num_expanded_dims; ++i) {
dst_ordered_offset[i] = expanded_shape_offset[traversal_order_[i]];
}
std::vector<bool> dst_dim_has_nonzeroes(num_expanded_dims);
std::fill(dst_dim_has_nonzeroes.begin(), dst_dim_has_nonzeroes.end(), false);
std::vector<int> inner_compressed_dim(num_expanded_dims);
int most_recent_compressed_dim = -1;
std::vector<int> num_segments_of_next_compressed_dim(num_expanded_dims);
int segment_count = 1;
for (int i = num_expanded_dims - 1; i >= 0; --i) {
inner_compressed_dim[i] = most_recent_compressed_dim;
if (format_[i] == kTfLiteDimSparseCSR) {
most_recent_compressed_dim = i;
num_segments_of_next_compressed_dim[i] = segment_count;
segment_count = 1;
} else {
num_segments_of_next_compressed_dim[i] = -1;
segment_count *= expanded_shape[traversal_order_[i]];
}
}
dim_metadata_.resize(num_expanded_dims * 2);
std::vector<int> dst_sparse_dims;
dst_sparse_dims.reserve(num_expanded_dims);
for (int i = 0; i < num_expanded_dims; ++i) {
dim_metadata_[i * 2].clear();
dim_metadata_[i * 2 + 1].clear();
if (format_[i] == kTfLiteDimDense) {
dim_metadata_[i * 2].push_back(expanded_shape[traversal_order_[i]]);
} else {
dim_metadata_[i * 2].push_back(0);
dst_sparse_dims.push_back(i);
}
}
int dst_dim_idx = num_expanded_dims;
std::vector<int> coordinate(num_expanded_dims, 0);
int dense_tensor_idx = 0;
while (dst_dim_idx >= 0) {
if (dst_dim_idx == num_expanded_dims) {
if (!IsZero(src_data[dense_tensor_idx])) {
data_.push_back(src_data[dense_tensor_idx]);
for (auto dst_dim : dst_sparse_dims) {
if (!dst_dim_has_nonzeroes[dst_dim]) {
dim_metadata_[2 * dst_dim + 1].push_back(coordinate[dst_dim]);
dst_dim_has_nonzeroes[dst_dim] = true;
}
}
} else if (format_[num_expanded_dims - 1] == kTfLiteDimDense) {
data_.push_back(src_data[dense_tensor_idx]);
}
--dst_dim_idx;
} else {
int original_dim_idx = traversal_order_[dst_dim_idx];
int dim_size = expanded_shape[original_dim_idx];
if (dst_dim_has_nonzeroes[dst_dim_idx]) {
dst_dim_has_nonzeroes[dst_dim_idx] = false;
} else if (format_[dst_dim_idx] == kTfLiteDimSparseCSR) {
int next_compressed_dim = inner_compressed_dim[dst_dim_idx];
int erase_offset = dim_metadata_[2 * dst_dim_idx + 1].size() *
num_segments_of_next_compressed_dim[dst_dim_idx];
if (next_compressed_dim >= 0) {
auto& segments = dim_metadata_[2 * inner_compressed_dim[dst_dim_idx]];
segments.erase(segments.begin() + 1 + erase_offset, segments.end());
} else {
data_.erase(data_.begin() + erase_offset, data_.end());
}
}
if (++coordinate[dst_dim_idx] < dim_size) {
dense_tensor_idx += dst_ordered_offset[dst_dim_idx];
++dst_dim_idx;
} else {
if (format_[dst_dim_idx] == kTfLiteDimSparseCSR) {
dim_metadata_[2 * dst_dim_idx].push_back(
dim_metadata_[2 * dst_dim_idx + 1].size());
}
coordinate[dst_dim_idx] = -1;
dense_tensor_idx -= dst_ordered_offset[dst_dim_idx] * dim_size;
--dst_dim_idx;
}
}
}
}
template <typename T>
bool FormatConverter<T>::IsZero(const T val) {
return (val == static_cast<T>(0));
}
template class FormatConverter<int8_t>;
template class FormatConverter<float>;
template class FormatConverter<Eigen::half>;
}
}
} | #include "tensorflow/lite/kernels/internal/utils/sparsity_format_converter.h"
#include <gtest/gtest.h>
#include "tensorflow/lite/c/common.h"
namespace tflite {
namespace internal {
namespace sparsity {
namespace {
TEST(FormatConverterTest, SimpleTestD0D1) {
const std::vector<int> dense_values = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
const std::vector<int> dense_shape = {3, 4};
const std::vector<int> traversal_order = {0, 1};
const std::vector<TfLiteDimensionType> format = {kTfLiteDimDense,
kTfLiteDimDense};
FormatConverter<int> converter(dense_shape, traversal_order, format);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm0 = {3};
const std::vector<int> dm1 = {4};
EXPECT_EQ(dm0, dim_metadata[0]);
EXPECT_EQ(dm1, dim_metadata[2]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, SimpleTestS0D1) {
const std::vector<int> dense_values = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
const std::vector<int> dense_shape = {3, 4};
const std::vector<int> traversal_order = {0, 1};
const std::vector<TfLiteDimensionType> format = {kTfLiteDimSparseCSR,
kTfLiteDimDense};
FormatConverter<int> converter(dense_shape, traversal_order, format);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm0_0 = {0, 2};
const std::vector<int> dm0_1 = {0, 2};
const std::vector<int> dm1 = {4};
EXPECT_EQ(dm0_0, dim_metadata[0]);
EXPECT_EQ(dm0_1, dim_metadata[1]);
EXPECT_EQ(dm1, dim_metadata[2]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {6, 0, 9, 8, 5, 0, 0, 7};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, SimpleTestD0S1) {
const std::vector<int> dense_values = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
const std::vector<int> dense_shape = {3, 4};
const std::vector<int> traversal_order = {0, 1};
const std::vector<TfLiteDimensionType> format = {kTfLiteDimDense,
kTfLiteDimSparseCSR};
FormatConverter<int> converter(dense_shape, traversal_order, format);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm0 = {3};
const std::vector<int> dm1_0 = {0, 3, 3, 5};
const std::vector<int> dm1_1 = {0, 2, 3, 0, 3};
EXPECT_EQ(dm0, dim_metadata[0]);
EXPECT_EQ(dm1_0, dim_metadata[2]);
EXPECT_EQ(dm1_1, dim_metadata[3]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {6, 9, 8, 5, 7};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, SimpleTestS0S1) {
const std::vector<int> dense_values = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
const std::vector<int> dense_shape = {3, 4};
const std::vector<int> traversal_order = {0, 1};
const std::vector<TfLiteDimensionType> format = {kTfLiteDimSparseCSR,
kTfLiteDimSparseCSR};
FormatConverter<int> converter(dense_shape, traversal_order, format);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm0_0 = {0, 2};
const std::vector<int> dm0_1 = {0, 2};
const std::vector<int> dm1_0 = {0, 3, 5};
const std::vector<int> dm1_1 = {0, 2, 3, 0, 3};
EXPECT_EQ(dm0_0, dim_metadata[0]);
EXPECT_EQ(dm0_1, dim_metadata[1]);
EXPECT_EQ(dm1_0, dim_metadata[2]);
EXPECT_EQ(dm1_1, dim_metadata[3]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {6, 9, 8, 5, 7};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, SimpleTestD1D0) {
const std::vector<int> dense_values = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
const std::vector<int> dense_shape = {3, 4};
const std::vector<int> traversal_order = {1, 0};
const std::vector<TfLiteDimensionType> format = {kTfLiteDimDense,
kTfLiteDimDense};
FormatConverter<int> converter(dense_shape, traversal_order, format);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm0 = {4};
const std::vector<int> dm1 = {3};
EXPECT_EQ(dm0, dim_metadata[0]);
EXPECT_EQ(dm1, dim_metadata[2]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {6, 0, 5, 0, 0, 0, 9, 0, 0, 8, 0, 7};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, SimpleTestS1D0) {
const std::vector<int> dense_values = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
const std::vector<int> dense_shape = {3, 4};
const std::vector<int> traversal_order = {1, 0};
const std::vector<TfLiteDimensionType> format = {kTfLiteDimDense,
kTfLiteDimSparseCSR};
FormatConverter<int> converter(dense_shape, traversal_order, format);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm0_0 = {0, 3};
const std::vector<int> dm0_1 = {0, 2, 3};
const std::vector<int> dm1 = {3};
EXPECT_EQ(dm0_0, dim_metadata[0]);
EXPECT_EQ(dm0_1, dim_metadata[1]);
EXPECT_EQ(dm1, dim_metadata[2]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {6, 0, 5, 9, 0, 0, 8, 0, 7};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, SimpleTestD1S0) {
const std::vector<int> dense_values = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
const std::vector<int> dense_shape = {3, 4};
const std::vector<int> traversal_order = {1, 0};
const std::vector<TfLiteDimensionType> format = {kTfLiteDimSparseCSR,
kTfLiteDimDense};
FormatConverter<int> converter(dense_shape, traversal_order, format);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm0 = {4};
const std::vector<int> dm1_0 = {0, 2, 2, 3, 5};
const std::vector<int> dm1_1 = {0, 2, 0, 0, 2};
EXPECT_EQ(dm0, dim_metadata[0]);
EXPECT_EQ(dm1_0, dim_metadata[2]);
EXPECT_EQ(dm1_1, dim_metadata[3]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {6, 5, 9, 8, 7};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, SimpleTestS1S0) {
const std::vector<int> dense_values = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
const std::vector<int> dense_shape = {3, 4};
const std::vector<int> traversal_order = {1, 0};
const std::vector<TfLiteDimensionType> format = {kTfLiteDimSparseCSR,
kTfLiteDimSparseCSR};
FormatConverter<int> converter(dense_shape, traversal_order, format);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm0_0 = {0, 3};
const std::vector<int> dm0_1 = {0, 2, 3};
const std::vector<int> dm1_0 = {0, 2, 3, 5};
const std::vector<int> dm1_1 = {0, 2, 0, 0, 2};
EXPECT_EQ(dm0_0, dim_metadata[0]);
EXPECT_EQ(dm0_1, dim_metadata[1]);
EXPECT_EQ(dm1_0, dim_metadata[2]);
EXPECT_EQ(dm1_1, dim_metadata[3]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {6, 5, 9, 8, 7};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, 3DTestS0D1S2) {
const std::vector<int> dense_values = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
const std::vector<int> dense_shape = {3, 2, 2};
const std::vector<int> traversal_order = {0, 1, 2};
const std::vector<TfLiteDimensionType> format = {
kTfLiteDimSparseCSR, kTfLiteDimDense, kTfLiteDimSparseCSR};
FormatConverter<int> converter(dense_shape, traversal_order, format);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm0_0 = {0, 2};
const std::vector<int> dm0_1 = {0, 2};
const std::vector<int> dm1 = {2};
const std::vector<int> dm2_0 = {0, 1, 3, 4, 5};
const std::vector<int> dm2_1 = {0, 0, 1, 0, 1};
EXPECT_EQ(dm0_0, dim_metadata[0]);
EXPECT_EQ(dm0_1, dim_metadata[1]);
EXPECT_EQ(dm1, dim_metadata[2]);
EXPECT_EQ(dm2_0, dim_metadata[4]);
EXPECT_EQ(dm2_1, dim_metadata[5]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {6, 9, 8, 5, 7};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, 3DTestD0D1S2) {
const std::vector<int> dense_values = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
const std::vector<int> dense_shape = {3, 2, 2};
const std::vector<int> traversal_order = {0, 1, 2};
const std::vector<TfLiteDimensionType> format = {
kTfLiteDimDense, kTfLiteDimDense, kTfLiteDimSparseCSR};
FormatConverter<int> converter(dense_shape, traversal_order, format);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm0 = {3};
const std::vector<int> dm1 = {2};
const std::vector<int> dm2_0 = {0, 1, 3, 3, 3, 4, 5};
const std::vector<int> dm2_1 = {0, 0, 1, 0, 1};
EXPECT_EQ(dm0, dim_metadata[0]);
EXPECT_EQ(dm1, dim_metadata[2]);
EXPECT_EQ(dm2_0, dim_metadata[4]);
EXPECT_EQ(dm2_1, dim_metadata[5]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {6, 9, 8, 5, 7};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, 3DTestS0S1S2) {
const std::vector<int> dense_values = {1, 7, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 2, 0, 0, 4, 8, 3, 9};
const std::vector<int> dense_shape = {3, 4, 2};
const std::vector<int> traversal_order = {0, 1, 2};
const std::vector<TfLiteDimensionType> format = {
kTfLiteDimSparseCSR, kTfLiteDimSparseCSR, kTfLiteDimSparseCSR};
FormatConverter<int> converter(dense_shape, traversal_order, format);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm0_0 = {0, 2};
const std::vector<int> dm0_1 = {0, 2};
const std::vector<int> dm1_0 = {0, 2, 5};
const std::vector<int> dm1_1 = {0, 2, 0, 2, 3};
const std::vector<int> dm2_0 = {0, 2, 3, 4, 6, 8};
const std::vector<int> dm2_1 = {0, 1, 1, 1, 0, 1, 0, 1};
EXPECT_EQ(dm0_0, dim_metadata[0]);
EXPECT_EQ(dm0_1, dim_metadata[1]);
EXPECT_EQ(dm1_0, dim_metadata[2]);
EXPECT_EQ(dm1_1, dim_metadata[3]);
EXPECT_EQ(dm2_0, dim_metadata[4]);
EXPECT_EQ(dm2_1, dim_metadata[5]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {1, 7, 5, 2, 4, 8, 3, 9};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, 3DTestS0S2S1) {
const std::vector<int> dense_values = {1, 0, 0, 0, 7, 0, 5, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 4, 3, 2, 0, 8, 9};
const std::vector<int> dense_shape = {3, 2, 4};
const std::vector<int> traversal_order = {0, 2, 1};
const std::vector<TfLiteDimensionType> format = {
kTfLiteDimSparseCSR, kTfLiteDimSparseCSR, kTfLiteDimSparseCSR};
FormatConverter<int> converter(dense_shape, traversal_order, format);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm0_0 = {0, 2};
const std::vector<int> dm0_1 = {0, 2};
const std::vector<int> dm1_0 = {0, 2, 5};
const std::vector<int> dm1_1 = {0, 2, 0, 2, 3};
const std::vector<int> dm2_0 = {0, 2, 3, 4, 6, 8};
const std::vector<int> dm2_1 = {0, 1, 1, 1, 0, 1, 0, 1};
EXPECT_EQ(dm0_0, dim_metadata[0]);
EXPECT_EQ(dm0_1, dim_metadata[1]);
EXPECT_EQ(dm1_0, dim_metadata[2]);
EXPECT_EQ(dm1_1, dim_metadata[3]);
EXPECT_EQ(dm2_0, dim_metadata[4]);
EXPECT_EQ(dm2_1, dim_metadata[5]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {1, 7, 5, 2, 4, 8, 3, 9};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, BlockTestD0D1) {
const std::vector<int> dense_values = {1, 0, 2, 3, 0, 4, 0, 0,
0, 0, 5, 0, 0, 0, 0, 6};
const std::vector<int> dense_shape = {4, 4};
const std::vector<int> traversal_order = {0, 1, 2, 3};
const std::vector<TfLiteDimensionType> format = {kTfLiteDimDense,
kTfLiteDimDense};
const std::vector<int> block_size = {2, 2};
const std::vector<int> block_map = {0, 1};
FormatConverter<int> converter(dense_shape, traversal_order, format,
block_size, block_map);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm = {2};
EXPECT_EQ(dm, dim_metadata[0]);
EXPECT_EQ(dm, dim_metadata[2]);
EXPECT_EQ(dm, dim_metadata[4]);
EXPECT_EQ(dm, dim_metadata[6]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {1, 0, 0, 4, 2, 3, 0, 0,
0, 0, 0, 0, 5, 0, 0, 6};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, BlockTestD0S11DBlock) {
const std::vector<int> dense_values = {1, 0, 2, 3, 0, 4, 0, 0,
0, 0, 5, 0, 0, 0, 0, 6};
const std::vector<int> dense_shape = {4, 4};
const std::vector<int> traversal_order = {0, 1, 2};
const std::vector<TfLiteDimensionType> format = {kTfLiteDimDense,
kTfLiteDimSparseCSR};
const std::vector<int> block_size = {2};
const std::vector<int> block_map = {1};
FormatConverter<int> converter(dense_shape, traversal_order, format,
block_size, block_map);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm0 = {4};
const std::vector<int> dm2 = {2};
const std::vector<int> dm1_0 = {0, 2, 3, 4, 5};
const std::vector<int> dm1_1 = {0, 1, 0, 1, 1};
EXPECT_EQ(dm0, dim_metadata[0]);
EXPECT_EQ(dm1_0, dim_metadata[2]);
EXPECT_EQ(dm1_1, dim_metadata[3]);
EXPECT_EQ(dm2, dim_metadata[4]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {1, 0, 2, 3, 0, 4, 5, 0, 0, 6};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, BlockTestD0S12DBlock) {
const std::vector<int> dense_values = {1, 0, 2, 3, 0, 4, 0, 0,
0, 0, 5, 0, 0, 0, 0, 6};
const std::vector<int> dense_shape = {4, 4};
const std::vector<int> traversal_order = {0, 1, 2, 3};
const std::vector<TfLiteDimensionType> format = {kTfLiteDimDense,
kTfLiteDimSparseCSR};
const std::vector<int> block_size = {2, 2};
const std::vector<int> block_map = {0, 1};
FormatConverter<int> converter(dense_shape, traversal_order, format,
block_size, block_map);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm = {2};
const std::vector<int> dm1_0 = {0, 2, 3};
const std::vector<int> dm1_1 = {0, 1, 1};
EXPECT_EQ(dm, dim_metadata[0]);
EXPECT_EQ(dm1_0, dim_metadata[2]);
EXPECT_EQ(dm1_1, dim_metadata[3]);
EXPECT_EQ(dm, dim_metadata[4]);
EXPECT_EQ(dm, dim_metadata[6]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {1, 0, 0, 4, 2, 3, 0, 0, 5, 0, 0, 6};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, BlockTestD1S0) {
const std::vector<int> dense_values = {1, 0, 2, 3, 0, 4, 0, 0,
0, 0, 5, 0, 0, 0, 0, 6};
const std::vector<int> dense_shape = {4, 4};
const std::vector<int> traversal_order = {1, 0, 3, 2};
const std::vector<TfLiteDimensionType> format = {kTfLiteDimSparseCSR,
kTfLiteDimDense};
const std::vector<int> block_size = {2, 2};
const std::vector<int> block_map = {0, 1};
FormatConverter<int> converter(dense_shape, traversal_order, format,
block_size, block_map);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm = {2};
const std::vector<int> dm1_0 = {0, 1, 3};
const std::vector<int> dm1_1 = {0, 0, 1};
EXPECT_EQ(dm, dim_metadata[0]);
EXPECT_EQ(dm1_0, dim_metadata[2]);
EXPECT_EQ(dm1_1, dim_metadata[3]);
EXPECT_EQ(dm, dim_metadata[4]);
EXPECT_EQ(dm, dim_metadata[6]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {1, 0, 0, 4, 2, 0, 3, 0, 5, 0, 0, 6};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, BlockTestD0S1LastBlockEmpty) {
const std::vector<int> dense_values = {1, 0, 2, 3, 0, 4, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0};
const std::vector<int> dense_shape = {4, 4};
const std::vector<int> traversal_order = {0, 1, 2, 3};
const std::vector<TfLiteDimensionType> format = {kTfLiteDimDense,
kTfLiteDimSparseCSR};
const std::vector<int> block_size = {2, 2};
const std::vector<int> block_map = {0, 1};
FormatConverter<int> converter(dense_shape, traversal_order, format,
block_size, block_map);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm = {2};
const std::vector<int> dm1_0 = {0, 2, 2};
const std::vector<int> dm1_1 = {0, 1};
EXPECT_EQ(dm, dim_metadata[0]);
EXPECT_EQ(dm1_0, dim_metadata[2]);
EXPECT_EQ(dm1_1, dim_metadata[3]);
EXPECT_EQ(dm, dim_metadata[4]);
EXPECT_EQ(dm, dim_metadata[6]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {1, 0, 0, 4, 2, 3, 0, 0};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, BlockTestD0S1ColMajorBlock) {
const std::vector<int> dense_values = {1, 0, 2, 3, 0, 4, 0, 0, 1, 0, 2,
3, 0, 4, 0, 0, 0, 0, 5, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
const std::vector<int> dense_shape = {4, 8};
const std::vector<int> traversal_order = {0, 1, 3, 2};
const std::vector<TfLiteDimensionType> format = {kTfLiteDimDense,
kTfLiteDimSparseCSR};
const std::vector<int> block_size = {2, 2};
const std::vector<int> block_map = {0, 1};
FormatConverter<int> converter(dense_shape, traversal_order, format,
block_size, block_map);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm = {2};
const std::vector<int> dm1_0 = {0, 3, 4};
const std::vector<int> dm1_1 = {0, 1, 2, 1};
EXPECT_EQ(dm, dim_metadata[0]);
EXPECT_EQ(dm1_0, dim_metadata[2]);
EXPECT_EQ(dm1_1, dim_metadata[3]);
EXPECT_EQ(dm, dim_metadata[4]);
EXPECT_EQ(dm, dim_metadata[6]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {1, 1, 0, 0, 2, 2, 3, 3,
0, 0, 4, 4, 5, 0, 0, 0};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/kernels/internal/utils/sparsity_format_converter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/utils/sparsity_format_converter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4158aec7-4771-4a69-b0de-b4fe7c30f13e | cpp | tensorflow/tensorflow | ctc_beam_search_decoder | tensorflow/lite/kernels/ctc/ctc_beam_search_decoder.cc | tensorflow/lite/kernels/ctc/ctc_beam_search_decoder_test.cc | #include <algorithm>
#include <vector>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/ctc/ctc_beam_search.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/op_macros.h"
namespace tflite {
namespace ops {
namespace custom {
namespace ctc_beam_search_decoder {
constexpr int kInputsTensor = 0;
constexpr int kSequenceLengthTensor = 1;
typedef struct {
int beam_width;
int top_paths;
bool merge_repeated;
} CTCBeamSearchDecoderParams;
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
TFLITE_CHECK(buffer != nullptr);
const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer);
const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap();
CTCBeamSearchDecoderParams* option = new CTCBeamSearchDecoderParams;
option->beam_width = m["beam_width"].AsInt32();
option->top_paths = m["top_paths"].AsInt32();
option->merge_repeated = m["merge_repeated"].AsBool();
return option;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<CTCBeamSearchDecoderParams*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const CTCBeamSearchDecoderParams* option =
reinterpret_cast<CTCBeamSearchDecoderParams*>(node->user_data);
const int top_paths = option->top_paths;
TF_LITE_ENSURE(context, option->beam_width >= top_paths);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 3 * top_paths + 1);
const TfLiteTensor* inputs;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputsTensor, &inputs));
TF_LITE_ENSURE_EQ(context, NumDimensions(inputs), 3);
TF_LITE_ENSURE_EQ(context, inputs->type, kTfLiteFloat32);
const int batch_size = SizeOfDimension(inputs, 1);
const TfLiteTensor* sequence_length;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSequenceLengthTensor,
&sequence_length));
TF_LITE_ENSURE_EQ(context, NumDimensions(sequence_length), 1);
TF_LITE_ENSURE_EQ(context, NumElements(sequence_length), batch_size);
TF_LITE_ENSURE_EQ(context, sequence_length->type, kTfLiteInt32);
for (int i = 0; i < top_paths; ++i) {
TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, i, &indices));
SetTensorToDynamic(indices);
TfLiteTensor* values;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, i + top_paths, &values));
SetTensorToDynamic(values);
TfLiteTensor* output_shape;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, i + 2 * top_paths,
&output_shape));
SetTensorToDynamic(output_shape);
}
TfLiteTensor* log_probability_output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, top_paths * 3,
&log_probability_output));
TfLiteIntArray* log_probability_output_shape_array = TfLiteIntArrayCreate(2);
log_probability_output_shape_array->data[0] = batch_size;
log_probability_output_shape_array->data[1] = top_paths;
return context->ResizeTensor(context, log_probability_output,
log_probability_output_shape_array);
}
TfLiteStatus Resize(TfLiteContext* context,
std::initializer_list<int32_t> output_shape,
TfLiteTensor* output) {
const int dimensions = output_shape.size();
TfLiteIntArray* output_shape_array = TfLiteIntArrayCreate(dimensions);
int i = 0;
for (const int v : output_shape) {
output_shape_array->data[i++] = v;
}
return context->ResizeTensor(context, output, output_shape_array);
}
TfLiteStatus StoreAllDecodedSequences(
TfLiteContext* context,
const std::vector<std::vector<std::vector<int>>>& sequences,
TfLiteNode* node, int top_paths) {
const int32_t batch_size = sequences.size();
std::vector<int32_t> num_entries(top_paths, 0);
for (const auto& batch_s : sequences) {
TF_LITE_ENSURE_EQ(context, batch_s.size(), top_paths);
for (int p = 0; p < top_paths; ++p) {
num_entries[p] += batch_s[p].size();
}
}
for (int p = 0; p < top_paths; ++p) {
const int32_t p_num = num_entries[p];
TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, p, &indices));
TF_LITE_ENSURE_OK(context, Resize(context, {p_num, 2}, indices));
TfLiteTensor* values;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, p + top_paths, &values));
TF_LITE_ENSURE_OK(context, Resize(context, {p_num}, values));
TfLiteTensor* decoded_shape;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, p + 2 * top_paths,
&decoded_shape));
TF_LITE_ENSURE_OK(context, Resize(context, {2}, decoded_shape));
int32_t max_decoded = 0;
int32_t offset = 0;
int32_t* indices_data = GetTensorData<int32_t>(indices);
int32_t* values_data = GetTensorData<int32_t>(values);
int32_t* decoded_shape_data = GetTensorData<int32_t>(decoded_shape);
for (int b = 0; b < batch_size; ++b) {
auto& p_batch = sequences[b][p];
int32_t num_decoded = p_batch.size();
max_decoded = std::max(max_decoded, num_decoded);
std::copy_n(p_batch.begin(), num_decoded, values_data + offset);
for (int32_t t = 0; t < num_decoded; ++t, ++offset) {
indices_data[offset * 2] = b;
indices_data[offset * 2 + 1] = t;
}
}
decoded_shape_data[0] = batch_size;
decoded_shape_data[1] = max_decoded;
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* inputs;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputsTensor, &inputs));
const TfLiteTensor* sequence_length;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSequenceLengthTensor,
&sequence_length));
const CTCBeamSearchDecoderParams* option =
reinterpret_cast<CTCBeamSearchDecoderParams*>(node->user_data);
const int max_time = SizeOfDimension(inputs, 0);
const int batch_size = SizeOfDimension(inputs, 1);
const int num_classes = SizeOfDimension(inputs, 2);
const int beam_width = option->beam_width;
const int top_paths = option->top_paths;
const bool merge_repeated = option->merge_repeated;
for (int i = 0; i < batch_size; ++i) {
TF_LITE_ENSURE(context,
max_time >= GetTensorData<int32_t>(sequence_length)[i]);
}
std::vector<optimized_ops::TTypes<float>::UnalignedConstMatrix> input_list_t;
input_list_t.reserve(max_time);
for (std::size_t t = 0; t < max_time; ++t) {
input_list_t.emplace_back(
GetTensorData<float>(inputs) + t * batch_size * num_classes, batch_size,
num_classes);
}
::tflite::custom::ctc::CTCBeamSearchDecoder<>::DefaultBeamScorer beam_scorer;
::tflite::custom::ctc::CTCBeamSearchDecoder<> beam_search(
num_classes, beam_width, &beam_scorer, 1 ,
merge_repeated);
float* input_chip_t_data =
static_cast<float*>(malloc(num_classes * sizeof(float)));
Eigen::array<Eigen::DenseIndex, 1> dims;
dims[0] = num_classes;
optimized_ops::TTypes<float>::Flat input_chip_t(input_chip_t_data, dims);
std::vector<std::vector<std::vector<int>>> best_paths(batch_size);
std::vector<float> log_probs;
TfLiteTensor* log_probabilities;
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, 3 * top_paths, &log_probabilities));
float* log_probabilities_output = GetTensorData<float>(log_probabilities);
for (int b = 0; b < batch_size; ++b) {
auto& best_paths_b = best_paths[b];
best_paths_b.resize(top_paths);
for (int t = 0; t < GetTensorData<int32_t>(sequence_length)[b]; ++t) {
input_chip_t = input_list_t[t].chip(b, 0);
auto input_bi =
Eigen::Map<const Eigen::ArrayXf>(input_chip_t.data(), num_classes);
beam_search.Step(input_bi);
}
TF_LITE_ENSURE(context, beam_search.TopPaths(top_paths, &best_paths_b,
&log_probs, merge_repeated));
beam_search.Reset();
for (int bp = 0; bp < top_paths; ++bp) {
log_probabilities_output[b * top_paths + bp] = log_probs[bp];
}
}
free(input_chip_t_data);
return StoreAllDecodedSequences(context, best_paths, node, top_paths);
}
}
TfLiteRegistration* Register_CTC_BEAM_SEARCH_DECODER() {
static TfLiteRegistration r = {
ctc_beam_search_decoder::Init, ctc_beam_search_decoder::Free,
ctc_beam_search_decoder::Prepare, ctc_beam_search_decoder::Eval};
return &r;
}
}
}
} | #include <functional>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/kernels/test_util.h"
namespace tflite {
namespace ops {
namespace custom {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
TfLiteRegistration* Register_CTC_BEAM_SEARCH_DECODER();
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
class CTCBeamSearchDecoderOpModel : public SingleOpModel {
public:
CTCBeamSearchDecoderOpModel(std::initializer_list<int> input_shape,
std::initializer_list<int> sequence_length_shape,
int beam_width, int top_paths,
bool merge_repeated) {
inputs_ = AddInput(TensorType_FLOAT32);
sequence_length_ = AddInput(TensorType_INT32);
for (int i = 0; i < top_paths * 3; ++i) {
outputs_.push_back(AddOutput(TensorType_INT32));
}
outputs_.push_back(AddOutput(TensorType_FLOAT32));
flexbuffers::Builder fbb;
fbb.Map([&]() {
fbb.Int("beam_width", beam_width);
fbb.Int("top_paths", top_paths);
fbb.Bool("merge_repeated", merge_repeated);
});
fbb.Finish();
SetCustomOp("CTCBeamSearchDecoder", fbb.GetBuffer(),
Register_CTC_BEAM_SEARCH_DECODER);
BuildInterpreter({input_shape, sequence_length_shape});
}
int inputs() { return inputs_; }
int sequence_length() { return sequence_length_; }
std::vector<std::vector<int>> GetDecodedOutpus() {
std::vector<std::vector<int>> outputs;
for (int i = 0; i < outputs_.size() - 1; ++i) {
outputs.push_back(ExtractVector<int>(outputs_[i]));
}
return outputs;
}
std::vector<float> GetLogProbabilitiesOutput() {
return ExtractVector<float>(outputs_[outputs_.size() - 1]);
}
std::vector<std::vector<int>> GetOutputShapes() {
std::vector<std::vector<int>> output_shapes;
for (const int output : outputs_) {
output_shapes.push_back(GetTensorShape(output));
}
return output_shapes;
}
private:
int inputs_;
int sequence_length_;
std::vector<int> outputs_;
};
TEST(CTCBeamSearchTest, SimpleTest) {
CTCBeamSearchDecoderOpModel m({2, 1, 2}, {1}, 1, 1, true);
m.PopulateTensor<float>(m.inputs(),
{-0.50922557, -1.35512652, -2.55445064, -1.58419356});
m.PopulateTensor<int>(m.sequence_length(), {2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const std::vector<std::vector<int>>& output_shapes = m.GetOutputShapes();
EXPECT_EQ(output_shapes.size(), 4);
EXPECT_THAT(output_shapes[0], ElementsAre(1, 2));
EXPECT_THAT(output_shapes[1], ElementsAre(1));
EXPECT_THAT(output_shapes[2], ElementsAre(2));
EXPECT_THAT(output_shapes[3], ElementsAre(1, 1));
const std::vector<std::vector<int>>& decoded_outputs = m.GetDecodedOutpus();
EXPECT_EQ(decoded_outputs.size(), 3);
EXPECT_THAT(decoded_outputs[0], ElementsAre(0, 0));
EXPECT_THAT(decoded_outputs[1], ElementsAre(0));
EXPECT_THAT(decoded_outputs[2], ElementsAre(1, 1));
EXPECT_THAT(m.GetLogProbabilitiesOutput(),
ElementsAreArray(ArrayFloatNear({-0.357094})));
}
TEST(CTCBeamSearchTest, MultiBatchTest) {
CTCBeamSearchDecoderOpModel m({3, 3, 3}, {3}, 1, 1, true);
m.PopulateTensor<float>(
m.inputs(),
{-0.63649208, -0.00487571, -0.04249819, -0.67754697, -1.0341399,
-2.14717721, -0.77686821, -3.41973774, -0.05151402, -0.21482619,
-0.57411168, -1.45039917, -0.73769373, -2.10941739, -0.44818325,
-0.25287673, -2.80057302, -0.54748312, -0.73334867, -0.86537719,
-0.2065197, -0.18725838, -1.42770405, -0.86051965, -1.61642301,
-2.07275114, -0.9201845});
m.PopulateTensor<int>(m.sequence_length(), {3, 3, 3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const std::vector<std::vector<int>>& output_shapes = m.GetOutputShapes();
EXPECT_EQ(output_shapes.size(), 4);
EXPECT_THAT(output_shapes[0], ElementsAre(4, 2));
EXPECT_THAT(output_shapes[1], ElementsAre(4));
EXPECT_THAT(output_shapes[2], ElementsAre(2));
EXPECT_THAT(output_shapes[3], ElementsAre(3, 1));
const std::vector<std::vector<int>>& decoded_outputs = m.GetDecodedOutpus();
EXPECT_EQ(decoded_outputs.size(), 3);
EXPECT_THAT(decoded_outputs[0], ElementsAre(0, 0, 0, 1, 1, 0, 2, 0));
EXPECT_THAT(decoded_outputs[1], ElementsAre(1, 0, 0, 0));
EXPECT_THAT(decoded_outputs[2], ElementsAre(3, 2));
EXPECT_THAT(m.GetLogProbabilitiesOutput(),
ElementsAreArray(ArrayFloatNear({-1.88343, -1.41188, -1.20958})));
}
TEST(CTCBeamSearchTest, MultiPathsTest) {
CTCBeamSearchDecoderOpModel m({3, 2, 5}, {2}, 3, 2, true);
m.PopulateTensor<float>(
m.inputs(),
{-2.206851, -0.09542714, -0.2393415, -3.81866197, -0.27241158,
-0.20371124, -0.68236623, -1.1397166, -0.17422639, -1.85224048,
-0.9406037, -0.32544678, -0.21846784, -0.38377237, -0.33498676,
-0.10139782, -0.51886883, -0.21678554, -0.15267063, -1.91164412,
-0.31328673, -0.27462716, -0.65975336, -1.53671973, -2.76554225,
-0.23920634, -1.2370502, -4.98751576, -3.12995717, -0.43129368});
m.PopulateTensor<int>(m.sequence_length(), {3, 3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const std::vector<std::vector<int>>& output_shapes = m.GetOutputShapes();
EXPECT_EQ(output_shapes.size(), 7);
EXPECT_THAT(output_shapes[0], ElementsAre(4, 2));
EXPECT_THAT(output_shapes[1], ElementsAre(3, 2));
EXPECT_THAT(output_shapes[2], ElementsAre(4));
EXPECT_THAT(output_shapes[3], ElementsAre(3));
EXPECT_THAT(output_shapes[4], ElementsAre(2));
EXPECT_THAT(output_shapes[5], ElementsAre(2));
EXPECT_THAT(output_shapes[6], ElementsAre(2, 2));
const std::vector<std::vector<int>>& decoded_outputs = m.GetDecodedOutpus();
EXPECT_EQ(decoded_outputs.size(), 6);
EXPECT_THAT(decoded_outputs[0], ElementsAre(0, 0, 0, 1, 1, 0, 1, 1));
EXPECT_THAT(decoded_outputs[1], ElementsAre(0, 0, 0, 1, 1, 0));
EXPECT_THAT(decoded_outputs[2], ElementsAre(1, 2, 3, 0));
EXPECT_THAT(decoded_outputs[3], ElementsAre(2, 1, 0));
EXPECT_THAT(decoded_outputs[4], ElementsAre(2, 2));
EXPECT_THAT(decoded_outputs[5], ElementsAre(2, 2));
EXPECT_THAT(m.GetLogProbabilitiesOutput(),
ElementsAreArray(
ArrayFloatNear({-2.65148, -2.65864, -2.17914, -2.61357})));
}
TEST(CTCBeamSearchTest, NonEqualSequencesTest) {
CTCBeamSearchDecoderOpModel m({3, 3, 4}, {3}, 3, 1, true);
m.PopulateTensor<float>(
m.inputs(),
{-1.26658163, -0.25760023, -0.03917975, -0.63772235, -0.03794756,
-0.45063099, -0.27706473, -0.01569179, -0.59940385, -0.35700127,
-0.48920721, -1.42635476, -1.3462478, -0.02565498, -0.30179568,
-0.6491698, -0.55017719, -2.92291466, -0.92522973, -0.47592022,
-0.07099135, -0.31575624, -0.86345281, -0.36017021, -0.79208612,
-1.75306124, -0.65089224, -0.00912786, -0.42915003, -1.72606203,
-1.66337589, -0.70800793, -2.52272352, -0.67329562, -2.49145522,
-0.49786342});
m.PopulateTensor<int>(m.sequence_length(), {1, 2, 3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const std::vector<std::vector<int>>& output_shapes = m.GetOutputShapes();
EXPECT_EQ(output_shapes.size(), 4);
EXPECT_THAT(output_shapes[0], ElementsAre(3, 2));
EXPECT_THAT(output_shapes[1], ElementsAre(3));
EXPECT_THAT(output_shapes[2], ElementsAre(2));
EXPECT_THAT(output_shapes[3], ElementsAre(3, 1));
const std::vector<std::vector<int>>& decoded_outputs = m.GetDecodedOutpus();
EXPECT_EQ(decoded_outputs.size(), 3);
EXPECT_THAT(decoded_outputs[0], ElementsAre(0, 0, 1, 0, 2, 0));
EXPECT_THAT(decoded_outputs[1], ElementsAre(2, 0, 1));
EXPECT_THAT(decoded_outputs[2], ElementsAre(3, 1));
EXPECT_THAT(m.GetLogProbabilitiesOutput(),
ElementsAreArray(ArrayFloatNear({-0.97322, -1.16334, -2.15553})));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/ctc/ctc_beam_search_decoder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/ctc/ctc_beam_search_decoder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
885c633f-e336-4b1d-baa1-74c080d9a5ac | cpp | tensorflow/tensorflow | dense_image_warp | tensorflow/lite/kernels/perception/dense_image_warp.cc | tensorflow/lite/kernels/perception/dense_image_warp_test.cc | #include <algorithm>
#include <cmath>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace dense_image_warp {
constexpr int kInputTensor = 0;
constexpr int kFlowTensor = 1;
constexpr int kOutputTensor = 0;
inline void DenseImageWarp(const RuntimeShape& input_shape,
const float* input_data,
const RuntimeShape& flow_shape,
const float* flow_data, float* output_data) {
const int batches = MatchingDim(input_shape, 0, flow_shape, 0);
const int height = MatchingDim(input_shape, 1, flow_shape, 1);
const int width = MatchingDim(input_shape, 2, flow_shape, 2);
const int channels = input_shape.Dims(3);
TFLITE_CHECK_EQ(flow_shape.Dims(3), 2);
const int max_floor_y = height - 2;
const int max_floor_x = width - 2;
for (int batch = 0; batch < batches; ++batch) {
for (int in_y = 0; in_y < height; ++in_y) {
for (int in_x = 0; in_x < width; ++in_x) {
float querry_point_y =
in_y - flow_data[Offset(flow_shape, batch, in_y, in_x, 0)];
float querry_point_x =
in_x - flow_data[Offset(flow_shape, batch, in_y, in_x, 1)];
int floor_y =
std::min(std::max(0, static_cast<int>(std::floor(querry_point_y))),
max_floor_y);
int floor_x =
std::min(std::max(0, static_cast<int>(std::floor(querry_point_x))),
max_floor_x);
float alpha_y =
std::min(std::max(0.0f, querry_point_y - floor_y), 1.0f);
float alpha_x =
std::min(std::max(0.0f, querry_point_x - floor_x), 1.0f);
for (int c = 0; c < channels; ++c) {
float top_left =
input_data[Offset(input_shape, batch, floor_y, floor_x, c)];
float top_right =
input_data[Offset(input_shape, batch, floor_y, floor_x + 1, c)];
float bottom_left =
input_data[Offset(input_shape, batch, floor_y + 1, floor_x, c)];
float bottom_right = input_data[Offset(input_shape, batch,
floor_y + 1, floor_x + 1, c)];
float interp_top = alpha_x * (top_right - top_left) + top_left;
float interp_bottom =
alpha_x * (bottom_right - bottom_left) + bottom_left;
float interp = alpha_y * (interp_bottom - interp_top) + interp_top;
output_data[Offset(input_shape, batch, in_y, in_x, c)] = interp;
}
}
}
}
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE(context, output != nullptr);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TF_LITE_ENSURE(context, input != nullptr);
const TfLiteTensor* flow = GetInput(context, node, kFlowTensor);
TF_LITE_ENSURE(context, flow != nullptr);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, flow->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, NumDimensions(flow), 4);
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4);
const RuntimeShape input_shape = GetTensorShape(input);
const RuntimeShape flow_shape = GetTensorShape(flow);
TF_LITE_ENSURE_EQ(context, input_shape.Dims(0), flow_shape.Dims(0));
TF_LITE_ENSURE_EQ(context, input_shape.Dims(1), flow_shape.Dims(1));
TF_LITE_ENSURE_EQ(context, input_shape.Dims(2), flow_shape.Dims(2));
TF_LITE_ENSURE_MSG(context, input_shape.Dims(1) >= 2,
"Image height must be at least 2.");
TF_LITE_ENSURE_MSG(context, input_shape.Dims(2) >= 2,
"Image width must be at least 2.");
TF_LITE_ENSURE_MSG(context, flow_shape.Dims(3) == 2,
"The last dimension of flow tensor must be 2.");
TfLiteIntArray* output_size = TfLiteIntArrayCopy(input->dims);
return context->ResizeTensor(context, output, output_size);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE(context, output != nullptr);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TF_LITE_ENSURE(context, input != nullptr);
const TfLiteTensor* flow = GetInput(context, node, kFlowTensor);
TF_LITE_ENSURE(context, flow != nullptr);
DenseImageWarp(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(flow), GetTensorData<float>(flow),
GetTensorData<float>(output));
return kTfLiteOk;
}
}
TfLiteRegistration* RegisterDenseImageWarp() {
static TfLiteRegistration reg = {nullptr,
nullptr, dense_image_warp::Prepare,
dense_image_warp::Eval};
return ®
}
TfLiteRegistration* Register_DENSE_IMAGE_WARP() {
return RegisterDenseImageWarp();
}
}
}
} | #include <cstdint>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/perception/perception_ops.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace custom {
namespace {
using testing::ElementsAreArray;
class DenseImageWarpOpModel : public SingleOpModel {
public:
DenseImageWarpOpModel(const TensorData& input, const TensorData& flow,
const TensorData& output) {
input_ = AddInput(input);
flow_ = AddInput(flow);
output_ = AddOutput(output);
std::vector<uint8_t> custom_option;
SetCustomOp("DenseImageWarp", custom_option, RegisterDenseImageWarp);
BuildInterpreter({GetShape(input_), GetShape(flow_)});
}
void SetInput(const std::vector<float>& data) {
PopulateTensor(input_, data);
}
void SetFlow(const std::vector<float>& data) { PopulateTensor(flow_, data); }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input_;
int flow_;
int output_;
};
TEST(DenseImageWarpOpTest, MismatchedSizeTest) {
EXPECT_DEATH_IF_SUPPORTED(
DenseImageWarpOpModel model(
{TensorType_FLOAT32, {1, 4, 4, 1}},
{TensorType_FLOAT32, {1, 4, 2, 2}},
{TensorType_FLOAT32, {}});
, "input_shape.Dims.2. != flow_shape.Dims.2. .4 != 2.");
}
TEST(DenseImageWarpOpTest, WrongFlowSizeTest) {
EXPECT_DEATH_IF_SUPPORTED(DenseImageWarpOpModel model(
{TensorType_FLOAT32, {1, 4, 4, 1}},
{TensorType_FLOAT32, {1, 4, 4, 1}},
{TensorType_FLOAT32, {}});
, "The last dimension of flow tensor must be 2.");
}
TEST(DenseImageWarpOpTest, SimpleTest) {
DenseImageWarpOpModel model(
{TensorType_FLOAT32, {1, 4, 4, 1}},
{TensorType_FLOAT32, {1, 4, 4, 2}},
{TensorType_FLOAT32, {}});
model.SetInput({0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15});
model.SetFlow({4, 10, 6, 10, 4, 2, 6, 6, 10, -4, 2, -2, 6, 8, 6, 0,
2, -2, 10, 6, 4, 4, 2, -4, -4, 10, -4, -4, -2, 6, 4, 6});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({0, 0, 0, 0, 3, 3, 0, 3, 2, 0,
0, 3, 12, 15, 12, 0}));
}
TEST(DenseImageWarpOpTest, RoundTest) {
DenseImageWarpOpModel model(
{TensorType_FLOAT32, {1, 4, 4, 1}},
{TensorType_FLOAT32, {1, 4, 4, 2}},
{TensorType_FLOAT32, {}});
model.SetInput({0.2, 1.5, 2.4, 3.5, 4.6, 5.1, 6.3, 7.2, 8.5, 9.6, 10.9, 11.6,
12.8, 13.2, 14.4, 15.5});
model.SetFlow({4, 10, 6, 10, 4, 2, 6, 6, 10, -4, 2, -2, 6, 8, 6, 0,
2, -2, 10, 6, 4, 4, 2, -4, -4, 10, -4, -4, -2, 6, 4, 6});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({0.2, 0.2, 0.2, 0.2, 3.5, 3.5, 0.2, 3.5, 2.4,
0.2, 0.2, 3.5, 12.8, 15.5, 12.8, 0.2}));
}
TEST(DenseImageWarpOpTest, WithBatchandChannelTest) {
DenseImageWarpOpModel model(
{TensorType_FLOAT32, {2, 4, 4, 3}},
{TensorType_FLOAT32, {2, 4, 4, 2}},
{TensorType_FLOAT32, {}});
std::vector<float> input_data;
for (int i = 0; i < 96; ++i) input_data.push_back(i);
model.SetInput(input_data);
model.SetFlow({2, -2, 10, 6, 4, 4, 2, -4, -4, 10, -4, -4, -2, 6, 4, 6,
4, 10, 6, 10, 4, 2, 6, 6, 10, -4, 2, -2, 6, 8, 6, 0,
2, -2, 10, 6, 4, 4, 2, -4, -4, 10, -4, -4, -2, 6, 4, 6,
4, 10, 6, 10, 4, 2, 6, 6, 10, -4, 2, -2, 6, 8, 6, 0});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 4, 4, 3}));
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray({6, 7, 8, 0, 1, 2, 0, 1, 2, 9, 10, 11, 36, 37,
38, 45, 46, 47, 36, 37, 38, 0, 1, 2, 0, 1, 2, 0,
1, 2, 0, 1, 2, 0, 1, 2, 9, 10, 11, 21, 22, 23,
0, 1, 2, 9, 10, 11, 54, 55, 56, 48, 49, 50, 48, 49,
50, 57, 58, 59, 84, 85, 86, 93, 94, 95, 84, 85, 86, 48,
49, 50, 48, 49, 50, 48, 49, 50, 48, 49, 50, 48, 49, 50,
57, 58, 59, 69, 70, 71, 48, 49, 50, 57, 58, 59}));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/perception/dense_image_warp.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/perception/dense_image_warp_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b4905ce5-1e2f-4847-a006-430bc3261142 | cpp | tensorflow/tensorflow | max_pool_with_argmax | tensorflow/lite/kernels/perception/max_pool_with_argmax.cc | tensorflow/lite/kernels/perception/max_pool_with_argmax_test.cc | #include <algorithm>
#include <string>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/padding.h"
namespace tflite {
namespace ops {
namespace custom {
namespace max_pool_with_argmax {
namespace {
template <typename T>
inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
const RuntimeShape& output_shape, const T* input_data,
T* output_data, int32_t* indices_data) {
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int32_t batches = MatchingDim(input_shape, 0, output_shape, 0);
const int32_t depth = MatchingDim(input_shape, 3, output_shape, 3);
const int32_t input_height = input_shape.Dims(1);
const int32_t input_width = input_shape.Dims(2);
const int32_t output_height = output_shape.Dims(1);
const int32_t output_width = output_shape.Dims(2);
const int32_t stride_height = params.stride_height;
const int32_t stride_width = params.stride_width;
for (int32_t batch = 0; batch < batches; ++batch) {
for (int32_t out_y = 0; out_y < output_height; ++out_y) {
for (int32_t out_x = 0; out_x < output_width; ++out_x) {
for (int32_t channel = 0; channel < depth; ++channel) {
const int32_t in_x_origin =
(out_x * stride_width) - params.padding_values.width;
const int32_t in_y_origin =
(out_y * stride_height) - params.padding_values.height;
const int32_t filter_x_start = std::max(0, -in_x_origin);
const int32_t filter_x_end =
std::min(params.filter_width, input_width - in_x_origin);
const int32_t filter_y_start = std::max(0, -in_y_origin);
const int32_t filter_y_end =
std::min(params.filter_height, input_height - in_y_origin);
float max = std::numeric_limits<float>::lowest();
int32_t max_x = 0;
int32_t max_y = 0;
for (int32_t filter_y = filter_y_start; filter_y < filter_y_end;
++filter_y) {
for (int32_t filter_x = filter_x_start; filter_x < filter_x_end;
++filter_x) {
const int32_t in_x = in_x_origin + filter_x;
const int32_t in_y = in_y_origin + filter_y;
float cur =
input_data[Offset(input_shape, batch, in_y, in_x, channel)];
if (cur > max) {
max = cur;
max_x = in_x;
max_y = in_y;
}
}
}
int32_t output_idx =
Offset(output_shape, batch, out_y, out_x, channel);
output_data[output_idx] = ActivationFunctionWithMinMax(
max, params.float_activation_min, params.float_activation_max);
indices_data[output_idx] =
(max_y * input_width + max_x) * depth + channel;
}
}
}
}
}
}
constexpr int kDataInputTensor = 0;
constexpr int kDataOutputTensor = 0;
constexpr int kIndicesOutputTensor = 1;
constexpr const char kIncludeBatchStr[] = "include_batch_in_index";
constexpr const char kPoolSizeStr[] = "ksize";
constexpr const char kStridesStr[] = "strides";
constexpr const char kPaddingStr[] = "padding";
constexpr const char kPaddingSameStr[] = "SAME";
constexpr const char kPaddingValidStr[] = "VALID";
struct OpData {
TfLitePoolParams params;
bool include_batch_in_index;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
const flexbuffers::Map& m =
flexbuffers::GetRoot(reinterpret_cast<const uint8_t*>(buffer), length)
.AsMap();
OpData* op_data = new OpData;
op_data->params.computed.padding = TfLitePaddingValues{0, 0, 0, 0};
op_data->include_batch_in_index = m[kIncludeBatchStr].AsBool();
op_data->params.activation = kTfLiteActNone;
const std::string padding = m[kPaddingStr].AsString().str();
if (padding == kPaddingValidStr) {
op_data->params.padding = kTfLitePaddingValid;
} else if (padding == kPaddingSameStr) {
op_data->params.padding = kTfLitePaddingSame;
} else {
op_data->params.padding = kTfLitePaddingUnknown;
}
const auto pool_size = m[kPoolSizeStr].AsTypedVector();
TFLITE_CHECK_EQ(pool_size.size(), 4);
TFLITE_CHECK_EQ(pool_size[0].AsInt32(), 1);
TFLITE_CHECK_EQ(pool_size[3].AsInt32(), 1);
op_data->params.filter_height = pool_size[1].AsInt32();
op_data->params.filter_width = pool_size[2].AsInt32();
const auto strides = m[kStridesStr].AsTypedVector();
TFLITE_CHECK_EQ(strides.size(), 4);
TFLITE_CHECK_EQ(strides[0].AsInt32(), 1);
TFLITE_CHECK_EQ(strides[3].AsInt32(), 1);
op_data->params.stride_height = strides[1].AsInt32();
op_data->params.stride_width = strides[2].AsInt32();
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2);
TfLiteTensor *output, *indices;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kDataOutputTensor, &output));
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kIndicesOutputTensor, &indices));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kDataInputTensor, &input));
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32);
TF_LITE_ENSURE(context, indices->type == kTfLiteInt32);
TF_LITE_ENSURE(context, op_data->params.padding != kTfLitePaddingUnknown);
TF_LITE_ENSURE_MSG(
context, !op_data->include_batch_in_index,
"Include batch dimension in flattened index is not yet supported.");
int batches = input->dims->data[0];
int height = input->dims->data[1];
int width = input->dims->data[2];
int channels_out = input->dims->data[3];
int out_width, out_height;
op_data->params.computed.padding = ComputePaddingHeightWidth(
op_data->params.stride_height, op_data->params.stride_width, 1, 1, height,
width, op_data->params.filter_height, op_data->params.filter_width,
op_data->params.padding, &out_height, &out_width);
TfLiteIntArray* output_size = TfLiteIntArrayCreate(4);
output_size->data[0] = batches;
output_size->data[1] = out_height;
output_size->data[2] = out_width;
output_size->data[3] = channels_out;
TfLiteIntArray* indices_size = TfLiteIntArrayCopy(output_size);
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, indices, indices_size));
return context->ResizeTensor(context, output, output_size);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
float activation_min, activation_max;
CalculateActivationRange(op_data->params.activation, &activation_min,
&activation_max);
tflite::PoolParams op_params;
op_params.stride_height = op_data->params.stride_height;
op_params.stride_width = op_data->params.stride_width;
op_params.filter_height = op_data->params.filter_height;
op_params.filter_width = op_data->params.filter_width;
op_params.padding_values.height = op_data->params.computed.padding.height;
op_params.padding_values.width = op_data->params.computed.padding.width;
op_params.float_activation_min = activation_min;
op_params.float_activation_max = activation_max;
TfLiteTensor *output, *indices;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kDataOutputTensor, &output));
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kIndicesOutputTensor, &indices));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kDataInputTensor, &input));
switch (input->type) {
case kTfLiteFloat32:
MaxPool<float>(op_params, GetTensorShape(input), GetTensorShape(output),
GetTensorData<float>(input), GetTensorData<float>(output),
GetTensorData<int32_t>(indices));
break;
default:
TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* RegisterMaxPoolWithArgmax() {
static TfLiteRegistration r = {
max_pool_with_argmax::Init, max_pool_with_argmax::Free,
max_pool_with_argmax::Prepare, max_pool_with_argmax::Eval};
return &r;
}
TfLiteRegistration* Register_MAX_POOL_WITH_ARGMAX() {
return RegisterMaxPoolWithArgmax();
}
}
}
} | #include <cstdint>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/perception/perception_ops.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace custom {
namespace {
using testing::ElementsAreArray;
class MaxpoolingWithArgMaxOpModel : public SingleOpModel {
public:
MaxpoolingWithArgMaxOpModel(const TensorData& input, int stride_height,
int stride_width, int filter_height,
int filter_width, TfLitePadding padding,
const TensorData& output,
const TensorData& indices) {
input_ = AddInput(input);
output_ = AddOutput(output);
indices_ = AddOutput(indices);
std::vector<uint8_t> custom_option = CreateCustomOptions(
stride_height, stride_width, filter_height, filter_width, padding);
SetCustomOp("MaxPoolWithArgmax", custom_option, RegisterMaxPoolWithArgmax);
BuildInterpreter({GetShape(input_)});
}
void SetInput(const std::vector<float>& data) {
PopulateTensor(input_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
std::vector<int32_t> GetIndices() { return ExtractVector<int32_t>(indices_); }
std::vector<int> GetIndicesShape() { return GetTensorShape(indices_); }
protected:
int input_;
int output_;
int indices_;
private:
std::vector<uint8_t> CreateCustomOptions(int stride_height, int stride_width,
int filter_height, int filter_width,
TfLitePadding padding) {
auto flex_builder = std::make_unique<flexbuffers::Builder>();
size_t map_start = flex_builder->StartMap();
flex_builder->Bool("include_batch_in_index", false);
if (padding == kTfLitePaddingValid) {
flex_builder->String("padding", "VALID");
} else {
flex_builder->String("padding", "SAME");
}
auto start = flex_builder->StartVector("ksize");
flex_builder->Add(1);
flex_builder->Add(filter_height);
flex_builder->Add(filter_width);
flex_builder->Add(1);
flex_builder->EndVector(start, true, false);
auto strides_start = flex_builder->StartVector("strides");
flex_builder->Add(1);
flex_builder->Add(stride_height);
flex_builder->Add(stride_width);
flex_builder->Add(1);
flex_builder->EndVector(strides_start, true, false);
flex_builder->EndMap(map_start);
flex_builder->Finish();
return flex_builder->GetBuffer();
}
};
TEST(MaxpoolWithArgMaxTest, UnsupportedInt64Test) {
EXPECT_DEATH_IF_SUPPORTED(MaxpoolingWithArgMaxOpModel model(
{TensorType_FLOAT32, {1, 2, 4, 1}},
2, 2,
2, 2,
kTfLitePaddingSame,
{TensorType_FLOAT32, {}},
{TensorType_INT64, {}});
, "indices->type == kTfLiteInt32 was not true.");
}
TEST(MaxpoolWithArgMaxTest, SimpleTest) {
MaxpoolingWithArgMaxOpModel model(
{TensorType_FLOAT32, {1, 2, 4, 1}},
2, 2,
2, 2,
kTfLitePaddingSame,
{TensorType_FLOAT32, {}},
{TensorType_INT32, {}});
model.SetInput({0, 13, 2, 0, 0, 1, 4, 0});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 2, 1}));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({13, 4}));
EXPECT_THAT(model.GetIndicesShape(), ElementsAreArray({1, 1, 2, 1}));
EXPECT_THAT(model.GetIndices(), ElementsAreArray({1, 6}));
}
TEST(MaxpoolWithArgMaxTest, Strides2x1Test) {
MaxpoolingWithArgMaxOpModel model(
{TensorType_FLOAT32, {1, 4, 2, 2}},
2, 1,
2, 2,
kTfLitePaddingSame,
{TensorType_FLOAT32, {}},
{TensorType_INT32, {}});
model.SetInput({1, 0, 0, 2, 3, 0, 0, 4, 5, 0, 0, 6, 7, 0, 0, 8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 2, 2, 2}));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({3, 4, 0, 4, 7, 8, 0, 8}));
EXPECT_THAT(model.GetIndicesShape(), ElementsAreArray({1, 2, 2, 2}));
EXPECT_THAT(model.GetIndices(),
ElementsAreArray({4, 7, 2, 7, 12, 15, 10, 15}));
}
TEST(MaxpoolWithArgMaxTest, Strides2x2Test) {
MaxpoolingWithArgMaxOpModel model(
{TensorType_FLOAT32, {1, 4, 8, 1}},
2, 2,
2, 2,
kTfLitePaddingSame,
{TensorType_FLOAT32, {}},
{TensorType_INT32, {}});
model.SetInput({1, 0, 0, 0, 0, 2, 0, 0, 0, 0, 3, 0, 0, 4, 0, 0,
0, 0, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 2, 4, 1}));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 3, 4, 0, 0, 7, 6, 8}));
EXPECT_THAT(model.GetIndicesShape(), ElementsAreArray({1, 2, 4, 1}));
EXPECT_THAT(model.GetIndices(),
ElementsAreArray({0, 10, 13, 6, 16, 27, 20, 31}));
}
TEST(MaxpoolWithArgMaxTest, Strides2x2UnfitTest) {
MaxpoolingWithArgMaxOpModel model(
{TensorType_FLOAT32, {1, 4, 7, 1}},
2, 2,
2, 2,
kTfLitePaddingSame,
{TensorType_FLOAT32, {}},
{TensorType_INT32, {}});
model.SetInput({1, 0, 0, 0, 0, 2, 0, 0, 0, 0, 3, 0, 0, 4,
0, 0, 0, 0, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 2, 4, 1}));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 3, 2, 4, 0, 0, 5, 7}));
EXPECT_THAT(model.GetIndicesShape(), ElementsAreArray({1, 2, 4, 1}));
EXPECT_THAT(model.GetIndices(),
ElementsAreArray({0, 10, 5, 13, 14, 16, 19, 27}));
}
TEST(MaxpoolWithArgMaxTest, PaddingValidTest) {
MaxpoolingWithArgMaxOpModel model(
{TensorType_FLOAT32, {1, 4, 5, 1}},
2, 2,
2, 3,
kTfLitePaddingValid,
{TensorType_FLOAT32, {}},
{TensorType_INT32, {}});
model.SetInput(
{0, 0, 0, 0, 0, 0, 7, 0, 0, 10, 0, 0, 0, 0, 0, 0, 20, 0, 0, 19});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 2, 2, 1}));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({7, 10, 20, 19}));
EXPECT_THAT(model.GetIndicesShape(), ElementsAreArray({1, 2, 2, 1}));
EXPECT_THAT(model.GetIndices(), ElementsAreArray({6, 9, 16, 19}));
}
TEST(MaxpoolWithArgMaxTest, PaddingValidUnfitTest) {
MaxpoolingWithArgMaxOpModel model(
{TensorType_FLOAT32, {1, 4, 6, 1}},
2, 2,
2, 3,
kTfLitePaddingValid,
{TensorType_FLOAT32, {}},
{TensorType_INT32, {}});
model.SetInput({0, 0, 0, 0, 0, 0, 7, 0, 0, 10, 0, 0,
0, 0, 0, 0, 20, 0, 0, 19, 24, 1, 2, 44});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 2, 2, 1}));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({7, 10, 24, 24}));
EXPECT_THAT(model.GetIndicesShape(), ElementsAreArray({1, 2, 2, 1}));
EXPECT_THAT(model.GetIndices(), ElementsAreArray({6, 9, 20, 20}));
}
TEST(MaxpoolWithArgMaxTest, InputWithBatchTest) {
MaxpoolingWithArgMaxOpModel model(
{TensorType_FLOAT32, {2, 4, 12, 2}},
2, 3,
2, 2,
kTfLitePaddingSame,
{TensorType_FLOAT32, {}},
{TensorType_INT32, {}});
model.SetInput({0, 0, 1, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 6,
0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 8, 9, 0, 0, 10,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 0,
0, 16, 0, 0, 0, 0, 0, 0, 11, 0, 0, 12, 0, 0, 0, 14,
13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
17, 18, 0, 0, 0, 30, 0, 20, 0, 0, 0, 0, 0, 0, 21, 0,
0, 0, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0, 0, 0, 19, 0,
0, 0, 0, 22, 0, 0, 0, 0, 0, 0, 23, 0, 0, 0, 0, 0,
0, 0, 27, 28, 0, 0, 0, 0, 29, 0, 0, 0, 0, 0, 0, 32,
0, 0, 0, 0, 25, 26, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 2, 4, 2}));
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({1, 0, 3, 4, 5, 6, 9, 8, 11, 12, 13,
14, 15, 0, 0, 0, 17, 18, 19, 20, 21, 0,
23, 24, 27, 28, 29, 0, 31, 32, 25, 26}));
EXPECT_THAT(model.GetIndicesShape(), ElementsAreArray({2, 2, 4, 2}));
EXPECT_THAT(model.GetIndices(),
ElementsAreArray({2, 1, 8, 9, 12, 15, 44, 43, 72, 75, 80,
79, 62, 61, 66, 67, 0, 1, 30, 7, 14, 13,
42, 21, 50, 51, 56, 55, 86, 63, 68, 69}));
}
TEST(MaxpoolWithArgMaxTest, InputWithBatchAndPaddingValidTest) {
MaxpoolingWithArgMaxOpModel model(
{TensorType_FLOAT32, {2, 4, 11, 2}},
2, 3,
2, 2,
kTfLitePaddingValid,
{TensorType_FLOAT32, {}},
{TensorType_INT32, {}});
model.SetInput({0, 0, 1, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 6,
0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 8, 9, 0, 0, 10,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 0,
0, 16, 0, 0, 0, 0, 0, 0, 11, 0, 0, 12, 0, 0, 0, 14,
13, 0, 0, 0, 0, 0, 0, 0, 17, 18, 0, 0, 0, 30, 0, 20,
0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 24, 0, 0,
0, 0, 0, 0, 0, 0, 19, 0, 0, 0, 0, 22, 0, 0, 0, 0,
0, 0, 23, 0, 0, 0, 0, 0, 0, 0, 27, 28, 0, 0, 0, 0,
29, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 25, 26, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 31, 0});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 2, 4, 2}));
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 29, 0, 31, 32}));
EXPECT_THAT(model.GetIndicesShape(), ElementsAreArray({2, 2, 4, 2}));
EXPECT_THAT(model.GetIndices(),
ElementsAreArray({2, 23, 8, 9, 12, 15, 40, 43, 44, 47, 72,
75, 80, 79, 62, 65, 0, 1, 30, 7, 14, 35,
42, 21, 68, 69, 50, 51, 56, 57, 86, 63}));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/perception/max_pool_with_argmax.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/perception/max_pool_with_argmax_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2b79fb86-bfa2-4e02-9747-fa99dd1a5572 | cpp | tensorflow/tensorflow | max_unpooling_2d | tensorflow/lite/kernels/perception/max_unpooling_2d.cc | tensorflow/lite/kernels/perception/max_unpooling_2d_test.cc | #include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace max_unpooling_2d {
constexpr int kDataInputTensor = 0;
constexpr int kIndicesTensor = 1;
constexpr int kOutputTensor = 0;
inline void MaxUnpooling(const RuntimeShape& input_shape,
const float* input_data, const int32_t* indices_data,
const RuntimeShape& output_shape, float* output_data) {
std::memset(output_data, 0, output_shape.FlatSize() * sizeof(float));
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int depth = MatchingDim(input_shape, 3, output_shape, 3);
const int batch_stride =
output_shape.Dims(1) * output_shape.Dims(2) * output_shape.Dims(3);
for (int batch = 0; batch < batches; ++batch) {
for (int in_y = 0; in_y < input_shape.Dims(1); ++in_y) {
for (int in_x = 0; in_x < input_shape.Dims(2); ++in_x) {
for (int channel = 0; channel < depth; ++channel) {
const auto input_offset =
Offset(input_shape, batch, in_y, in_x, channel);
int idx = indices_data[input_offset];
output_data[batch * batch_stride + idx] = input_data[input_offset];
}
}
}
}
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<const TfLitePoolParams*>(node->custom_initial_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE(context, output != nullptr);
const TfLiteTensor* input = GetInput(context, node, kDataInputTensor);
TF_LITE_ENSURE(context, input != nullptr);
const TfLiteTensor* indices = GetInput(context, node, kIndicesTensor);
TF_LITE_ENSURE(context, indices != nullptr);
TF_LITE_ENSURE_EQ(context, NumDimensions(indices), 4);
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4);
TF_LITE_ENSURE_EQ(context, input->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, output->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, indices->type, kTfLiteInt32);
TF_LITE_ENSURE(context, params->padding != kTfLitePaddingUnknown);
const RuntimeShape input_shape = GetTensorShape(input);
const RuntimeShape indices_shape = GetTensorShape(indices);
TF_LITE_ENSURE_MSG(
context, input_shape.DimensionsCount() == indices_shape.DimensionsCount(),
"Input and indices must have the same shape.");
for (int i = 0; i < input_shape.DimensionsCount(); ++i) {
TF_LITE_ENSURE_MSG(context, input_shape.Dims(i) == indices_shape.Dims(i),
"Input and indices must have the same shape.");
}
int batches = input->dims->data[0];
int height = input->dims->data[1];
int width = input->dims->data[2];
int channels_out = input->dims->data[3];
int out_width, out_height;
if (params->padding == kTfLitePaddingSame) {
out_width = width * params->stride_width;
out_height = height * params->stride_height;
} else {
out_width = (width - 1) * params->stride_width + params->filter_width;
out_height = (height - 1) * params->stride_height + params->filter_height;
}
TfLiteIntArray* output_size = TfLiteIntArrayCreate(4);
output_size->data[0] = batches;
output_size->data[1] = out_height;
output_size->data[2] = out_width;
output_size->data[3] = channels_out;
return context->ResizeTensor(context, output, output_size);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE(context, output != nullptr);
const TfLiteTensor* input = GetInput(context, node, kDataInputTensor);
TF_LITE_ENSURE(context, input != nullptr);
const TfLiteTensor* indices = GetInput(context, node, kIndicesTensor);
TF_LITE_ENSURE(context, indices != nullptr);
MaxUnpooling(GetTensorShape(input), GetTensorData<float>(input),
GetTensorData<int32_t>(indices), GetTensorShape(output),
GetTensorData<float>(output));
return kTfLiteOk;
}
}
TfLiteRegistration* RegisterMaxUnpooling2D() {
static TfLiteRegistration reg = {nullptr,
nullptr, max_unpooling_2d::Prepare,
max_unpooling_2d::Eval};
return ®
}
TfLiteRegistration* Register_MAX_UNPOOLING2D() {
return RegisterMaxUnpooling2D();
}
}
}
} | #include <cstdint>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/perception/perception_ops.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace custom {
namespace {
using testing::ElementsAreArray;
class MaxUnpoolingOpModel : public SingleOpModel {
public:
MaxUnpoolingOpModel(const TensorData& input, const TensorData& indices,
int stride_height, int stride_width, int filter_height,
int filter_width, TfLitePadding padding,
const TensorData& output) {
input_ = AddInput(input);
indices_ = AddInput(indices);
output_ = AddOutput(output);
TfLitePoolParams params{padding, stride_width, stride_height,
filter_width, filter_height, kTfLiteActNone};
uint8_t* params_ptr = reinterpret_cast<uint8_t*>(¶ms);
std::vector<uint8_t> custom_option;
custom_option.assign(params_ptr, params_ptr + sizeof(TfLitePoolParams));
SetCustomOp("MaxUnpooling2D", custom_option, RegisterMaxUnpooling2D);
BuildInterpreter({GetShape(input_), GetShape(indices_)});
}
void SetInput(const std::vector<float>& data) {
PopulateTensor(input_, data);
}
void SetIndices(const std::vector<int32_t>& data) {
PopulateTensor(indices_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input_;
int indices_;
int output_;
};
TEST(MaxUnpoolingOpTest, DimensionMisMatchTest) {
EXPECT_DEATH(MaxUnpoolingOpModel model(
{TensorType_FLOAT32, {1, 1, 2, 1}},
{TensorType_INT32, {1, 2, 2, 1}},
2, 2,
2, 2,
kTfLitePaddingSame,
{TensorType_FLOAT32, {}}),
"Input and indices must have the same shape.");
}
TEST(MaxUnpoolingOpTest, SimpleTest) {
MaxUnpoolingOpModel model(
{TensorType_FLOAT32, {1, 1, 2, 1}},
{TensorType_INT32, {1, 1, 2, 1}},
2, 2,
2, 2,
kTfLitePaddingSame,
{TensorType_FLOAT32, {}});
model.SetInput({13, 4});
model.SetIndices({1, 6});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 2, 4, 1}));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({0, 13, 0, 0, 0, 0, 4, 0}));
}
TEST(MaxUnpoolingOpTest, Strides2x1Test) {
constexpr int kInputB = 1;
constexpr int kInputH = 2;
constexpr int kInputW = 2;
constexpr int kInputC = 2;
std::vector<float> input_data{1, 2, 3, 4, 5, 6, 7, 8};
std::vector<int32_t> indices_data{0, 3, 4, 7, 8, 11, 12, 15};
MaxUnpoolingOpModel model(
{TensorType_FLOAT32, {kInputB, kInputH, kInputW, kInputC}},
{TensorType_INT32, {kInputB, kInputH, kInputW, kInputC}},
2, 1,
2, 2,
kTfLitePaddingSame,
{TensorType_FLOAT32, {}});
model.SetInput(input_data);
model.SetIndices(indices_data);
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 4, 2, 2}));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 0, 0, 2, 3, 0, 0, 4, 5, 0,
0, 6, 7, 0, 0, 8}));
}
TEST(MaxUnpoolingOpTest, Strides2x2Test) {
constexpr int kInputB = 1;
constexpr int kInputH = 2;
constexpr int kInputW = 4;
constexpr int kInputC = 1;
std::vector<float> input_data{1, 2, 3, 4, 5, 6, 7, 8};
std::vector<int32_t> indices_data{0, 5, 10, 13, 19, 20, 27, 31};
MaxUnpoolingOpModel model(
{TensorType_FLOAT32, {kInputB, kInputH, kInputW, kInputC}},
{TensorType_INT32, {kInputB, kInputH, kInputW, kInputC}},
2, 2,
2, 2,
kTfLitePaddingSame,
{TensorType_FLOAT32, {}});
model.SetInput(input_data);
model.SetIndices(indices_data);
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 4, 8, 1}));
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray({1, 0, 0, 0, 0, 2, 0, 0, 0, 0, 3, 0, 0, 4, 0, 0,
0, 0, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 8}));
}
TEST(MaxUnpoolingOpTest, PaddingValidTest) {
constexpr int kInputB = 1;
constexpr int kInputH = 2;
constexpr int kInputW = 2;
constexpr int kInputC = 1;
std::vector<float> input_data{7, 10, 20, 19};
std::vector<int32_t> indices_data{6, 9, 16, 19};
MaxUnpoolingOpModel model(
{TensorType_FLOAT32, {kInputB, kInputH, kInputW, kInputC}},
{TensorType_INT32, {kInputB, kInputH, kInputW, kInputC}},
2, 2,
2, 3,
kTfLitePaddingValid,
{TensorType_FLOAT32, {}});
model.SetInput(input_data);
model.SetIndices(indices_data);
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 4, 5, 1}));
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({0, 0, 0, 0, 0, 0, 7, 0, 0, 10,
0, 0, 0, 0, 0, 0, 20, 0, 0, 19}));
}
TEST(MaxUnpoolingOpTest, InputWithBatchTest) {
constexpr int kInputB = 2;
constexpr int kInputH = 2;
constexpr int kInputW = 4;
constexpr int kInputC = 2;
std::vector<float> input_data{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 29, 30, 31, 32};
std::vector<int32_t> indices_data{2, 23, 8, 9, 12, 15, 40, 43, 44, 47, 72,
75, 80, 79, 62, 65, 0, 1, 30, 7, 14, 35,
42, 21, 68, 69, 50, 51, 56, 5, 86, 63};
MaxUnpoolingOpModel model(
{TensorType_FLOAT32, {kInputB, kInputH, kInputW, kInputC}},
{TensorType_INT32, {kInputB, kInputH, kInputW, kInputC}},
2, 3,
2, 2,
kTfLitePaddingSame,
{TensorType_FLOAT32, {}});
model.SetInput(input_data);
model.SetIndices(indices_data);
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 4, 12, 2}));
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray(
{0, 0, 1, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 6, 0, 0,
0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 7, 0, 0, 8, 9, 0, 0, 10, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 16, 0, 0, 0, 0, 0, 0,
11, 0, 0, 12, 0, 0, 0, 14, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 17, 18, 0, 0, 0, 30, 0, 20, 0, 0, 0, 0,
0, 0, 21, 0, 0, 0, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0, 0, 0,
19, 0, 0, 0, 0, 22, 0, 0, 0, 0, 0, 0, 23, 0, 0, 0, 0, 0,
0, 0, 27, 28, 0, 0, 0, 0, 29, 0, 0, 0, 0, 0, 0, 32, 0, 0,
0, 0, 25, 26, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(MaxUnpoolingOpTest, InputWithBatchAndPaddingValidTest) {
constexpr int kInputB = 2;
constexpr int kInputH = 2;
constexpr int kInputW = 4;
constexpr int kInputC = 2;
std::vector<float> input_data{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 29, 30, 31, 32};
std::vector<int32_t> indices_data{2, 23, 8, 9, 12, 15, 40, 43, 44, 47, 72,
75, 80, 79, 62, 65, 0, 1, 30, 7, 14, 35,
42, 21, 68, 69, 50, 51, 56, 5, 86, 63};
MaxUnpoolingOpModel model(
{TensorType_FLOAT32, {kInputB, kInputH, kInputW, kInputC}},
{TensorType_INT32, {kInputB, kInputH, kInputW, kInputC}},
2, 3,
2, 2,
kTfLitePaddingValid,
{TensorType_FLOAT32, {}});
model.SetInput(input_data);
model.SetIndices(indices_data);
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 4, 11, 2}));
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray(
{0, 0, 1, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 6, 0, 0,
0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 7, 0, 0, 8, 9, 0, 0, 10, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 16, 0, 0, 0, 0, 0, 0,
11, 0, 0, 12, 0, 0, 0, 14, 13, 0, 0, 0, 0, 0, 0, 0, 17, 18,
0, 0, 0, 30, 0, 20, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0,
0, 24, 0, 0, 0, 0, 0, 0, 0, 0, 19, 0, 0, 0, 0, 22, 0, 0,
0, 0, 0, 0, 23, 0, 0, 0, 0, 0, 0, 0, 27, 28, 0, 0, 0, 0,
29, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 25, 26, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 31, 0}));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/perception/max_unpooling_2d.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/perception/max_unpooling_2d_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
972db8a3-1645-43d4-94a7-46408a04c7bc | cpp | tensorflow/tensorflow | tflite_tensor_view | tensorflow/lite/kernels/shim/tflite_tensor_view.cc | tensorflow/lite/kernels/shim/tflite_tensor_view_test.cc | #include "tensorflow/lite/kernels/shim/tflite_tensor_view.h"
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/variant.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/shim/tensor_view.h"
#include "tensorflow/lite/string_util.h"
#include "tensorflow/lite/type_to_tflitetype.h"
#define CASE_FOR_DTYPE_GIVEN_CPP_DTYPE(TFLITE_DTYPE, CPP_DTYPE) \
case TFLITE_DTYPE: { \
using DType = typename CPP_DTYPE; \
return TfLiteTensorView(wrapped_tensor, DType()); \
}
#define CASE_FOR_DTYPE(TFLITE_DTYPE) \
CASE_FOR_DTYPE_GIVEN_CPP_DTYPE( \
TFLITE_DTYPE, ::tflite::TfLiteTypeToType<TFLITE_DTYPE>::Type)
namespace tflite {
namespace shim {
TfLiteTensorView::TfLiteTensorView(::TfLiteTensor *wrapped_tensor,
const ::tensorflow::tstring &dtype)
: TensorView(absl::Span<int>(wrapped_tensor->dims->data,
wrapped_tensor->dims->size),
nullptr, 0, dtype),
wrapped_tensor_(wrapped_tensor),
const_wrapped_tensor_(wrapped_tensor) {
InitForStringDType();
}
TfLiteTensorView::TfLiteTensorView(const ::TfLiteTensor *wrapped_tensor,
const ::tensorflow::tstring &dtype)
: TensorView(absl::Span<int>(wrapped_tensor->dims->data,
wrapped_tensor->dims->size),
nullptr, 0, dtype),
const_wrapped_tensor_(wrapped_tensor) {
InitForStringDType();
}
TfLiteTensorView::TfLiteTensorView(TfLiteTensorView &&o) noexcept
: TensorView(std::move(o)),
wrapped_tensor_(o.wrapped_tensor_),
const_wrapped_tensor_(o.const_wrapped_tensor_),
str_vec_(std::move(o.str_vec_)) {
}
TfLiteTensorView::TfLiteTensorView(const TfLiteTensorView &o)
: TensorView(o),
wrapped_tensor_(o.wrapped_tensor_),
const_wrapped_tensor_(o.const_wrapped_tensor_),
str_vec_(o.str_vec_) {
}
TfLiteTensorView &TfLiteTensorView::operator=(TfLiteTensorView &&o) noexcept {
wrapped_tensor_ = o.wrapped_tensor_;
const_wrapped_tensor_ = o.const_wrapped_tensor_;
str_vec_ = std::move(o.str_vec_);
TensorView::operator=(std::move(o));
return *this;
}
TfLiteTensorView &TfLiteTensorView::operator=(const TfLiteTensorView &o) {
if (&o == this) return *this;
TensorView::operator=(o);
wrapped_tensor_ = o.wrapped_tensor_;
const_wrapped_tensor_ = o.const_wrapped_tensor_;
str_vec_ = o.str_vec_;
return *this;
}
void TfLiteTensorView::InitForStringDType() {
if (str_vec_ == nullptr) {
str_vec_ = std::make_shared<StringBuffer>(this);
}
data_ = absl::Span<::tensorflow::tstring>(str_vec_->buffer);
}
TfLiteTensorView::StringBuffer::StringBuffer(TfLiteTensorView *t_view)
: wrapped_tensor(t_view->wrapped_tensor_) {
buffer.resize(NumElements(t_view->shape_));
const auto const_wrapped_tensor = t_view->const_wrapped_tensor_;
std::size_t str_count;
if (const_wrapped_tensor->data.raw == nullptr)
str_count = 0;
else
str_count = ::tflite::GetStringCount(const_wrapped_tensor);
for (int i = 0; i < str_count; ++i) {
const auto str_ref = ::tflite::GetString(const_wrapped_tensor, i);
buffer[i].assign_as_view(str_ref.str, str_ref.len);
}
}
TfLiteTensorView::StringBuffer::~StringBuffer() {
if (wrapped_tensor == nullptr) return;
tflite::DynamicBuffer buf;
for (const auto &s : buffer) buf.AddString(s.data(), s.length());
buf.WriteToTensor(wrapped_tensor, nullptr);
}
template <typename TfLiteTensorType>
absl::StatusOr<
typename MatchConstNess<TfLiteTensorType, TfLiteTensorView>::Type>
TfLiteTensorViewTemplatizedNew(TfLiteTensorType *wrapped_tensor) {
switch (wrapped_tensor->type) {
CASE_FOR_DTYPE(kTfLiteBool);
CASE_FOR_DTYPE(kTfLiteUInt8);
CASE_FOR_DTYPE(kTfLiteUInt64);
CASE_FOR_DTYPE(kTfLiteInt8);
CASE_FOR_DTYPE(kTfLiteInt16);
CASE_FOR_DTYPE(kTfLiteInt32);
CASE_FOR_DTYPE(kTfLiteInt64);
CASE_FOR_DTYPE(kTfLiteFloat32);
CASE_FOR_DTYPE(kTfLiteFloat64);
CASE_FOR_DTYPE_GIVEN_CPP_DTYPE(kTfLiteString, ::tensorflow::tstring);
default: {
return absl::UnimplementedError(
absl::StrCat("Unsupported dtype: ", wrapped_tensor->type));
}
}
}
template <>
absl::StatusOr<TfLiteTensorView> TensorView::New<::TfLiteTensor>(
::TfLiteTensor *wrapped_tensor) {
return TfLiteTensorViewTemplatizedNew(wrapped_tensor);
}
template <>
absl::StatusOr<const TfLiteTensorView> TensorView::New<const ::TfLiteTensor>(
const ::TfLiteTensor *wrapped_tensor) {
return TfLiteTensorViewTemplatizedNew(wrapped_tensor);
}
}
} | #include "tensorflow/lite/kernels/shim/tflite_tensor_view.h"
#include <cstdint>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/shim/test_util.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace shim {
namespace {
using ::testing::Eq;
TEST(TfLiteTensorW, Bool) {
::tflite::Interpreter interpreter;
interpreter.AddTensors(1);
interpreter.AllocateTensors();
auto* tflite_tensor = interpreter.tensor(0);
ReallocDynamicTensor<bool>({3, 2}, tflite_tensor);
tflite_tensor->name = "test_bool";
auto owned_tflite_tensor = UniqueTfLiteTensor(tflite_tensor);
auto t_premove_or = TensorView::New(tflite_tensor);
ASSERT_TRUE(t_premove_or.ok()) << t_premove_or.status();
auto t = std::move(t_premove_or.value());
auto data = t.Data<bool>();
for (int32_t i = 0; i < 3 * 2; ++i) data[i] = (i % 5 == 0);
ASSERT_THAT(TfliteTensorDebugString(tflite_tensor),
Eq("[[1, 0], [0, 0], [0, 1]]"));
}
template <typename IntType>
void IntTest() {
::tflite::Interpreter interpreter;
interpreter.AddTensors(1);
interpreter.AllocateTensors();
auto* tflite_tensor = interpreter.tensor(0);
ReallocDynamicTensor<IntType>({3, 2}, tflite_tensor);
tflite_tensor->name = "test_int";
auto owned_tflite_tensor = UniqueTfLiteTensor(tflite_tensor);
auto t_premove_or = TensorView::New(tflite_tensor);
ASSERT_TRUE(t_premove_or.ok()) << t_premove_or.status();
auto t = std::move(t_premove_or.value());
auto data = t.Data<IntType>();
for (int32_t i = 0; i < 3 * 2; ++i) data[i] = i;
ASSERT_THAT(TfliteTensorDebugString(tflite_tensor),
Eq("[[0, 1], [2, 3], [4, 5]]"));
}
TEST(TfLiteTensorW, Int8) { IntTest<int8_t>(); }
TEST(TfLiteTensorW, UInt8) { IntTest<uint8_t>(); }
TEST(TfLiteTensorW, Int16) { IntTest<int16_t>(); }
TEST(TfLiteTensorW, Int32) { IntTest<int32_t>(); }
TEST(TfLiteTensorW, Int64) { IntTest<int64_t>(); }
template <typename FloatType>
void FloatTest() {
::tflite::Interpreter interpreter;
interpreter.AddTensors(1);
interpreter.AllocateTensors();
auto* tflite_tensor = interpreter.tensor(0);
ReallocDynamicTensor<FloatType>({3, 2}, tflite_tensor);
tflite_tensor->name = "test_float";
auto owned_tflite_tensor = UniqueTfLiteTensor(tflite_tensor);
auto t_or = TensorView::New(tflite_tensor);
ASSERT_TRUE(t_or.ok()) << t_or.status();
auto& t = t_or.value();
auto data = t.Data<FloatType>();
for (int32_t i = 0; i < 3 * 2; ++i) data[i] = static_cast<FloatType>(i) / 2.;
ASSERT_THAT(TfliteTensorDebugString(tflite_tensor),
Eq("[[0, 0.5], [1, 1.5], [2, 2.5]]"));
}
TEST(TfLiteTensorW, Float) { FloatTest<float>(); }
TEST(TfLiteTensorW, Double) { FloatTest<double>(); }
TEST(TfLiteTensorW, Str) {
::tflite::Interpreter interpreter;
interpreter.AddTensors(1);
interpreter.AllocateTensors();
auto* tflite_tensor = interpreter.tensor(0);
ReallocDynamicTensor<std::string>({3, 2}, tflite_tensor);
tflite_tensor->name = "test_str";
auto owned_tflite_tensor = UniqueTfLiteTensor(tflite_tensor);
{
auto t_or = TensorView::New(tflite_tensor);
ASSERT_TRUE(t_or.ok()) << t_or.status();
auto& t = t_or.value();
auto t_mat = t.As<::tensorflow::tstring, 2>();
t.Data<::tensorflow::tstring>()[0] = "a";
t.Data<::tensorflow::tstring>()[1] = "bc";
t_mat(1, 0) = "def";
t.Data<::tensorflow::tstring>()[3] = "g";
t.Data<::tensorflow::tstring>()[4] = "";
t_mat(2, 1) = "hi";
}
{
auto t_or = TensorView::New(tflite_tensor);
ASSERT_TRUE(t_or.ok()) << t_or.status();
auto& t = t_or.value();
EXPECT_THAT(t.Data<::tensorflow::tstring>(),
::testing::ElementsAre("a", "bc", "def", "g", "", "hi"));
}
const auto const_tflite_tensor = tflite_tensor;
{
const auto t_or = TensorView::New(const_tflite_tensor);
ASSERT_TRUE(t_or.ok()) << t_or.status();
const auto& t = t_or.value();
EXPECT_THAT(t.Data<::tensorflow::tstring>(),
::testing::ElementsAre("a", "bc", "def", "g", "", "hi"));
}
EXPECT_THAT(TfliteTensorDebugString(tflite_tensor),
Eq("[[a, bc], [def, g], [, hi]]"));
}
TEST(TfLiteTensorW, EmptyStr) {
::tflite::Interpreter interpreter;
interpreter.AddTensors(1);
interpreter.AllocateTensors();
auto* tflite_tensor = interpreter.tensor(0);
ReallocDynamicTensor<std::string>({0}, tflite_tensor);
tflite_tensor->name = "test_str";
auto owned_tflite_tensor = UniqueTfLiteTensor(tflite_tensor);
{
auto t_or = TensorView::New(tflite_tensor);
ASSERT_TRUE(t_or.ok()) << t_or.status();
}
EXPECT_THAT(GetStringCount(tflite_tensor), Eq(0));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/shim/tflite_tensor_view.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/shim/tflite_tensor_view_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fdfa34b6-8776-4034-b82f-679216ac2e9b | cpp | tensorflow/tensorflow | tf_tensor_view | tensorflow/lite/kernels/shim/tf_tensor_view.cc | tensorflow/lite/kernels/shim/tf_tensor_view_test.cc | #include "tensorflow/lite/kernels/shim/tf_tensor_view.h"
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "tensorflow/core/framework/types.pb.h"
#define CASE_FOR_DTYPE_GIVEN_CPP_DTYPE(TF_DTYPE, CPP_DTYPE) \
case TF_DTYPE: { \
using DType = typename CPP_DTYPE; \
return TfTensorView(wrapped_tensor, DType()); \
}
#define CASE_FOR_DTYPE(TF_DTYPE) \
CASE_FOR_DTYPE_GIVEN_CPP_DTYPE(TF_DTYPE, \
::tensorflow::EnumToDataType<TF_DTYPE>::Type)
namespace tflite {
namespace shim {
TfTensorView::TfTensorView(TfTensorView &&o) noexcept
: TensorView(std::move(o)), shape_data_(std::move(o.shape_data_)) {
shape_ = absl::Span<int>(shape_data_);
}
TfTensorView::TfTensorView(const TfTensorView &o)
: TensorView(o), shape_data_(o.shape_data_) {
shape_ = absl::Span<int>(shape_data_);
}
TfTensorView &TfTensorView::operator=(TfTensorView &&o) noexcept {
shape_data_ = std::move(o.shape_data_);
TensorView::operator=(std::move(o));
shape_ = absl::Span<int>(shape_data_);
return *this;
}
TfTensorView &TfTensorView::operator=(const TfTensorView &o) {
if (&o == this) return *this;
TensorView::operator=(o);
shape_data_ = o.shape_data_;
shape_ = absl::Span<int>(shape_data_);
return *this;
}
template <typename TfTensorType>
absl::StatusOr<typename MatchConstNess<TfTensorType, TfTensorView>::Type>
TfTensorViewTemplatizedNew(TfTensorType *wrapped_tensor) {
switch (wrapped_tensor->dtype()) {
CASE_FOR_DTYPE(::tensorflow::DT_BOOL);
CASE_FOR_DTYPE(::tensorflow::DT_UINT8);
CASE_FOR_DTYPE(::tensorflow::DT_UINT64);
CASE_FOR_DTYPE(::tensorflow::DT_INT8);
CASE_FOR_DTYPE(::tensorflow::DT_INT16);
CASE_FOR_DTYPE(::tensorflow::DT_INT32);
CASE_FOR_DTYPE_GIVEN_CPP_DTYPE(::tensorflow::DT_INT64, std::int64_t);
CASE_FOR_DTYPE(::tensorflow::DT_FLOAT);
CASE_FOR_DTYPE(::tensorflow::DT_DOUBLE);
CASE_FOR_DTYPE(::tensorflow::DT_STRING);
default: {
return absl::UnimplementedError(
absl::StrCat("Unsupported data type: ", wrapped_tensor->dtype()));
}
}
}
template <>
absl::StatusOr<TfTensorView> TensorView::New<::tensorflow::Tensor>(
::tensorflow::Tensor *wrapped_tensor) {
return TfTensorViewTemplatizedNew(wrapped_tensor);
}
template <>
absl::StatusOr<const TfTensorView> TensorView::New<const ::tensorflow::Tensor>(
const ::tensorflow::Tensor *wrapped_tensor) {
return TfTensorViewTemplatizedNew(wrapped_tensor);
}
}
} | #include "tensorflow/lite/kernels/shim/tf_tensor_view.h"
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/tstring.h"
namespace tflite {
namespace shim {
namespace {
using ::tensorflow::protobuf::TextFormat;
TEST(TfTensorView, Bool) {
::tensorflow::TensorProto tf_tensor_pb;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(
dtype: DT_BOOL
tensor_shape {
dim:
[ { size: 3 }
, { size: 2 }]
}
bool_val: [ false, false, false, false, false, false ]
)pb",
&tf_tensor_pb));
::tensorflow::Tensor tf_tensor;
ASSERT_TRUE(tf_tensor.FromProto(tf_tensor_pb));
auto t_premove_or = TensorView::New(&tf_tensor);
ASSERT_TRUE(t_premove_or.ok()) << t_premove_or.status();
auto t = std::move(t_premove_or.value());
auto tensor_data_as_vector = t.Data<bool>();
for (int i = 0; i < 3 * 2; ++i) tensor_data_as_vector[i] = i % 5 == 0;
ASSERT_THAT(tf_tensor.SummarizeValue(10, true),
::testing::Eq("[[1 0]\n [0 0]\n [0 1]]"));
}
TEST(TfTensorView, Int32) {
::tensorflow::TensorProto tf_tensor_pb;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(
dtype: DT_INT32
tensor_shape {
dim:
[ { size: 3 }
, { size: 2 }]
}
int_val: [ 0, 0, 0, 0, 0, 0 ]
)pb",
&tf_tensor_pb));
::tensorflow::Tensor tf_tensor;
ASSERT_TRUE(tf_tensor.FromProto(tf_tensor_pb));
auto t_premove_or = TensorView::New(&tf_tensor);
ASSERT_TRUE(t_premove_or.ok()) << t_premove_or.status();
auto t = std::move(t_premove_or.value());
auto tensor_data_as_vector = t.Data<int32_t>();
for (int i = 0; i < 3 * 2; ++i) tensor_data_as_vector[i] = i;
ASSERT_THAT(tf_tensor.SummarizeValue(10, true),
::testing::Eq("[[0 1]\n [2 3]\n [4 5]]"));
}
TEST(TfTensorView, Int64) {
::tensorflow::TensorProto tf_tensor_pb;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(
dtype: DT_INT64
tensor_shape {
dim:
[ { size: 3 }
, { size: 2 }]
}
int_val: [ 0, 0, 0, 0, 0, 0 ]
)pb",
&tf_tensor_pb));
::tensorflow::Tensor tf_tensor;
ASSERT_TRUE(tf_tensor.FromProto(tf_tensor_pb));
auto t_or = TensorView::New(&tf_tensor);
ASSERT_TRUE(t_or.ok()) << t_or.status();
auto& t = t_or.value();
auto tensor_data_as_vector = t.Data<int64_t>();
for (int i = 0; i < 3 * 2; ++i) tensor_data_as_vector[i] = i;
ASSERT_THAT(tf_tensor.SummarizeValue(10, true),
::testing::Eq("[[0 1]\n [2 3]\n [4 5]]"));
}
TEST(TfTensorView, Float) {
::tensorflow::TensorProto tf_tensor_pb;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(
dtype: DT_FLOAT
tensor_shape {
dim:
[ { size: 3 }
, { size: 2 }]
}
float_val: [ 0, 0, 0, 0, 0, 0 ]
)pb",
&tf_tensor_pb));
::tensorflow::Tensor tf_tensor;
ASSERT_TRUE(tf_tensor.FromProto(tf_tensor_pb));
auto t_or = TensorView::New(&tf_tensor);
ASSERT_TRUE(t_or.ok()) << t_or.status();
auto& t = t_or.value();
auto tensor_data_as_vector = t.Data<float>();
for (int i = 0; i < 3 * 2; ++i)
tensor_data_as_vector[i] = static_cast<float>(i) / 2.0;
ASSERT_THAT(tf_tensor.SummarizeValue(10, true),
::testing::Eq("[[0 0.5]\n [1 1.5]\n [2 2.5]]"));
}
TEST(TfTensorView, Double) {
::tensorflow::TensorProto tf_tensor_pb;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(
dtype: DT_DOUBLE
tensor_shape {
dim:
[ { size: 3 }
, { size: 2 }]
}
double_val: [ 0, 0, 0, 0, 0, 0 ]
)pb",
&tf_tensor_pb));
::tensorflow::Tensor tf_tensor;
ASSERT_TRUE(tf_tensor.FromProto(tf_tensor_pb));
auto t_or = TensorView::New(&tf_tensor);
ASSERT_TRUE(t_or.ok()) << t_or.status();
auto& t = t_or.value();
auto tensor_data_as_vector = t.Data<double>();
for (int i = 0; i < 3 * 2; ++i)
tensor_data_as_vector[i] = static_cast<double>(i) / 2.0;
ASSERT_THAT(tf_tensor.SummarizeValue(10, true),
::testing::Eq("[[0 0.5]\n [1 1.5]\n [2 2.5]]"));
}
TEST(TfTensorView, Str) {
::tensorflow::TensorProto tf_tensor_pb;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(
dtype: DT_STRING
tensor_shape {
dim:
[ { size: 3 }
, { size: 2 }]
}
string_val: [ "", "", "", "", "", "" ]
)pb",
&tf_tensor_pb));
::tensorflow::Tensor tf_tensor;
ASSERT_TRUE(tf_tensor.FromProto(tf_tensor_pb));
auto t_or = TensorView::New(&tf_tensor);
ASSERT_TRUE(t_or.ok()) << t_or.status();
auto& t = t_or.value();
auto tensor_data_as_vector = t.Data<::tensorflow::tstring>();
tensor_data_as_vector[0] = "a";
tensor_data_as_vector[1] = "bc";
tensor_data_as_vector[2] = "def";
tensor_data_as_vector[3] = "g";
tensor_data_as_vector[4] = "hi";
tensor_data_as_vector[5] = "";
EXPECT_THAT(t.Data<::tensorflow::tstring>(),
::testing::ElementsAre("a", "bc", "def", "g", "hi", ""));
const auto& const_tf_tensor = tf_tensor;
const auto const_t_or = TensorView::New(&const_tf_tensor);
ASSERT_TRUE(const_t_or.ok()) << const_t_or.status();
const auto& const_t = const_t_or.value();
EXPECT_THAT(const_t.Data<::tensorflow::tstring>(),
::testing::ElementsAre("a", "bc", "def", "g", "hi", ""));
const char expectation[] = R"(
[["a" "bc"]
["def" "g"]
["hi" ""]])";
EXPECT_THAT(tf_tensor.SummarizeValue(10, true),
::testing::Eq(absl::string_view(expectation).substr(1)));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/shim/tf_tensor_view.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/shim/tf_tensor_view_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
86af0dfd-9e9d-4348-8772-56caee98f0d0 | cpp | tensorflow/tensorflow | simple_tf_op | tensorflow/lite/kernels/shim/test_op/simple_tf_op.cc | tensorflow/lite/kernels/shim/test_op/simple_tf_op_test.cc | #include "tensorflow/lite/kernels/shim/test_op/simple_tf_op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/lite/kernels/shim/tf_op_shim.h"
namespace tflite {
namespace shim {
REGISTER_TF_OP_SHIM(SimpleOpKernel);
REGISTER_KERNEL_BUILDER(
Name(SimpleOpKernel::OpName()).Device(::tensorflow::DEVICE_CPU),
SimpleOpKernel);
}
} | #include <cstdint>
#include <gtest/gtest.h>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/platform/tstring.h"
namespace tflite {
namespace shim {
namespace {
using ::tensorflow::DT_INT64;
using ::tensorflow::DT_STRING;
using ::tensorflow::FakeInput;
using ::tensorflow::NodeDefBuilder;
using ::tensorflow::TensorShape;
using ::tensorflow::tstring;
using ::tensorflow::test::AsTensor;
using ::tensorflow::test::ExpectTensorEqual;
class SimpleOpTfTest : public ::tensorflow::OpsTestBase {};
TEST_F(SimpleOpTfTest, Output1Size_5_N_2) {
TF_ASSERT_OK(NodeDefBuilder("simple_op", "SimpleOperation")
.Attr("output1_size", 5)
.Attr("output2_suffix", "foo")
.Attr("N", 2)
.Input(FakeInput(DT_STRING))
.Input(FakeInput(2, DT_INT64))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<tstring>(TensorShape({}), {"abc"});
AddInputFromArray<int64_t>(TensorShape({}), {123});
AddInputFromArray<int64_t>(TensorShape({2}), {456, 789});
TF_ASSERT_OK(RunOpKernel());
ExpectTensorEqual<int>(*GetOutput(0),
AsTensor<int>({0, 1, 2, 3, 4}, {5}));
ExpectTensorEqual<float>(
*GetOutput(1), AsTensor<float>({0, 0.5, 1., 1.5, 2.}, {5}));
ExpectTensorEqual<tstring>(
*GetOutput(2), AsTensor<tstring>({"0", "1", "2", "foo"}, {4}));
ExpectTensorEqual<int64_t>(*GetOutput(3),
AsTensor<int64_t>({124}, {}));
ExpectTensorEqual<int64_t>(*GetOutput(4),
AsTensor<int64_t>({457, 790}, {2}));
}
TEST_F(SimpleOpTfTest, Output1Size_3_N_0) {
TF_ASSERT_OK(NodeDefBuilder("simple_op", "SimpleOperation")
.Attr("output1_size", 3)
.Attr("output2_suffix", "foo")
.Attr("N", 0)
.Input(FakeInput(DT_STRING))
.Input(FakeInput(0, DT_INT64))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<tstring>(TensorShape({}), {"abcde"});
TF_ASSERT_OK(RunOpKernel());
ExpectTensorEqual<int>(*GetOutput(0),
AsTensor<int>({0, 1, 2, 3, 4}, {5}));
ExpectTensorEqual<float>(*GetOutput(1),
AsTensor<float>({0, 0.5, 1.}, {3}));
ExpectTensorEqual<tstring>(
*GetOutput(2),
AsTensor<tstring>({"0", "1", "2", "3", "4", "foo"}, {6}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/shim/test_op/simple_tf_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/shim/test_op/simple_tf_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bc07f90e-c5ca-4425-9d00-f2c2cd42fb47 | cpp | tensorflow/tensorflow | tmpl_tflite_op | tensorflow/lite/kernels/shim/test_op/tmpl_tflite_op.cc | tensorflow/lite/kernels/shim/test_op/tmpl_tflite_op_test.cc | #include "tensorflow/lite/kernels/shim/test_op/tmpl_tflite_op.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/shim/op_kernel.h"
#include "tensorflow/lite/kernels/shim/test_op/tmpl_op.h"
#include "tensorflow/lite/kernels/shim/tflite_op_shim.h"
#include "tensorflow/lite/kernels/shim/tflite_op_wrapper.h"
#include "tensorflow/lite/mutable_op_resolver.h"
namespace tflite {
namespace ops {
namespace custom {
namespace {
const char a_type[]("AType"), b_type[]("BType");
}
using ::tflite::shim::op_wrapper::Attr;
using ::tflite::shim::op_wrapper::AttrName;
using ::tflite::shim::op_wrapper::OpWrapper;
template <shim::Runtime Rt>
using Op = OpWrapper<Rt, shim::TmplOp, Attr<AttrName<a_type>, int32_t, float>,
Attr<AttrName<b_type>, int32_t, int64_t, bool>>;
using OpKernel = ::tflite::shim::TfLiteOpKernel<Op>;
void AddTmplOp(MutableOpResolver* resolver) { OpKernel::Add(resolver); }
TfLiteRegistration* Register_TMPL_OP() {
return OpKernel::GetTfLiteRegistration();
}
const char* OpName_TMPL_OP() { return OpKernel::OpName(); }
}
}
} | #include "tensorflow/lite/kernels/shim/test_op/tmpl_tflite_op.h"
#include <cstdint>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace shim {
namespace {
template <typename AType, typename BType>
class TmplOpModel : public SingleOpModel {
public:
TmplOpModel(const std::vector<uint8_t>& op_options,
const std::vector<tflite::TensorType>& input_types,
const std::vector<std::vector<int>>& input_shapes,
const std::vector<AType>& input0,
const std::vector<BType>& input1,
const std::vector<tflite::TensorType>& output_types) {
std::vector<int> input_idx;
for (const auto input_type : input_types) {
input_idx.push_back(AddInput(input_type));
}
for (const auto output_type : output_types) {
output_idx_.push_back(AddOutput(output_type));
}
SetCustomOp(ops::custom::OpName_TMPL_OP(), op_options,
ops::custom::Register_TMPL_OP);
BuildInterpreter(input_shapes);
PopulateTensor(input_idx[0], input0);
PopulateTensor(input_idx[1], input1);
}
template <typename T>
std::vector<T> GetOutput(const int i) {
return ExtractVector<T>(output_idx_[i]);
}
std::vector<int> GetOutputShape(const int i) {
return GetTensorShape(output_idx_[i]);
}
protected:
std::vector<int> output_idx_;
};
TEST(TmplOpModel, float_int32) {
flexbuffers::Builder builder;
builder.Map([&]() {
builder.Int("AType", kTfLiteFloat32);
builder.Int("BType", kTfLiteInt32);
});
builder.Finish();
std::vector<std::vector<int>> input_shapes = {{}, {}};
std::vector<tflite::TensorType> input_types = {tflite::TensorType_FLOAT32,
tflite::TensorType_INT32};
std::vector<tflite::TensorType> output_types = {tflite::TensorType_FLOAT32};
const std::vector<float> input0 = {5.6f};
const std::vector<int32_t> input1 = {3};
TmplOpModel<float, int32_t> m(
builder.GetBuffer(), input_types, input_shapes, input0,
input1, output_types);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(0), testing::ElementsAre(8.6f));
}
TEST(TmplOpModel, int32_int64) {
flexbuffers::Builder builder;
builder.Map([&]() {
builder.Int("AType", kTfLiteInt32);
builder.Int("BType", kTfLiteInt64);
});
builder.Finish();
std::vector<std::vector<int>> input_shapes = {{}, {}};
std::vector<tflite::TensorType> input_types = {tflite::TensorType_INT32,
tflite::TensorType_INT64};
std::vector<tflite::TensorType> output_types = {tflite::TensorType_FLOAT32};
const std::vector<int32_t> input0 = {12};
const std::vector<int64_t> input1 = {33l};
TmplOpModel<int32_t, int64_t> m(
builder.GetBuffer(), input_types, input_shapes, input0,
input1, output_types);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(0), testing::ElementsAre(45.0f));
}
TEST(TmplOpModel, int32_bool) {
flexbuffers::Builder builder;
builder.Map([&]() {
builder.Int("AType", kTfLiteInt32);
builder.Int("BType", kTfLiteBool);
});
builder.Finish();
std::vector<std::vector<int>> input_shapes = {{}, {}};
std::vector<tflite::TensorType> input_types = {tflite::TensorType_INT32,
tflite::TensorType_BOOL};
std::vector<tflite::TensorType> output_types = {tflite::TensorType_FLOAT32};
const std::vector<int32_t> input0 = {12};
const std::vector<bool> input1 = {true};
TmplOpModel<int32_t, bool> m(
builder.GetBuffer(), input_types, input_shapes, input0,
input1, output_types);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(0), testing::ElementsAre(13.0f));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/shim/test_op/tmpl_tflite_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/shim/test_op/tmpl_tflite_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2ec14317-0709-423f-841c-6f95002ea05e | cpp | tensorflow/tensorflow | simple_tflite_op | tensorflow/lite/kernels/shim/test_op/simple_tflite_op.cc | tensorflow/lite/kernels/shim/test_op/simple_tflite_op_test.cc | #include "tensorflow/lite/kernels/shim/test_op/simple_tflite_op.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/shim/test_op/simple_op.h"
#include "tensorflow/lite/kernels/shim/tflite_op_shim.h"
#include "tensorflow/lite/mutable_op_resolver.h"
namespace tflite {
namespace ops {
namespace custom {
using OpKernel = ::tflite::shim::TfLiteOpKernel<tflite::shim::SimpleOp>;
void AddSimpleOp(MutableOpResolver* resolver) { OpKernel::Add(resolver); }
TfLiteRegistration* Register_SIMPLE_OP() {
return OpKernel::GetTfLiteRegistration();
}
const char* OpName_SIMPLE_OP() { return OpKernel::OpName(); }
}
}
} | #include "tensorflow/lite/kernels/shim/test_op/simple_tflite_op.h"
#include <cstring>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace custom {
namespace {
class SimpleOpModel : public SingleOpModel {
public:
SimpleOpModel(const std::vector<uint8_t>& op_options,
const std::vector<tflite::TensorType>& input_types,
const std::vector<std::vector<int>>& input_shapes,
const std::string& input0,
const std::vector<std::vector<int64_t>>& input1,
const std::vector<tflite::TensorType>& output_types) {
std::vector<int> input_idx;
for (const auto input_type : input_types) {
input_idx.push_back(AddInput(input_type));
}
for (const auto output_type : output_types) {
output_idx_.push_back(AddOutput(output_type));
}
SetCustomOp(OpName_SIMPLE_OP(), op_options, Register_SIMPLE_OP);
BuildInterpreter(input_shapes);
PopulateStringTensor(input_idx[0], {input0});
for (int i = 0; i < input1.size(); ++i) {
PopulateTensor(input_idx[1 + i], input1[i]);
}
}
template <typename T>
std::vector<T> GetOutput(const int i) {
return ExtractVector<T>(output_idx_[i]);
}
std::vector<int> GetOutputShape(const int i) {
return GetTensorShape(output_idx_[i]);
}
protected:
std::vector<int> output_idx_;
};
TEST(SimpleOpModel, OutputSize_5_N_2) {
flexbuffers::Builder builder;
builder.Map([&]() {
builder.Int("output1_size", 5);
builder.String("output2_suffix", "foo");
builder.Int("N", 2);
});
builder.Finish();
std::vector<std::vector<int>> input_shapes = {{}, {}, {2}};
std::vector<tflite::TensorType> input_types = {tflite::TensorType_STRING,
tflite::TensorType_INT64,
tflite::TensorType_INT64};
std::vector<tflite::TensorType> output_types = {
tflite::TensorType_INT32, tflite::TensorType_FLOAT32,
tflite::TensorType_STRING, tflite::TensorType_INT64,
tflite::TensorType_INT64};
const std::string input0 = "abc";
const std::vector<std::vector<int64_t>> input1 = {{123}, {456, 789}};
SimpleOpModel m(builder.GetBuffer(), input_types, input_shapes,
input0, input1, output_types);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int>(0), testing::ElementsAre(0, 1, 2, 3, 4));
EXPECT_THAT(m.GetOutput<float>(1),
testing::ElementsAre(0, 0.5, 1.0, 1.5, 2.0));
EXPECT_THAT(m.GetOutput<std::string>(2),
testing::ElementsAre("0", "1", "2", "foo"));
EXPECT_THAT(m.GetOutput<int64_t>(3), testing::ElementsAre(124));
EXPECT_THAT(m.GetOutputShape(3), testing::ElementsAre());
EXPECT_THAT(m.GetOutput<int64_t>(4), testing::ElementsAre(457, 790));
EXPECT_THAT(m.GetOutputShape(4), testing::ElementsAre(2));
}
TEST(SimpleOpModel, OutputSize_3_N_0) {
flexbuffers::Builder builder;
builder.Map([&]() {
builder.Int("output1_size", 3);
builder.String("output2_suffix", "foo");
builder.Int("N", 0);
});
builder.Finish();
std::vector<std::vector<int>> input_shapes = {{}};
std::vector<tflite::TensorType> input_types = {tflite::TensorType_STRING};
std::vector<tflite::TensorType> output_types = {tflite::TensorType_INT32,
tflite::TensorType_FLOAT32,
tflite::TensorType_STRING};
const std::string input0 = "abcde";
const std::vector<std::vector<int64_t>> input1;
SimpleOpModel m(builder.GetBuffer(), input_types, input_shapes,
input0, input1, output_types);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int>(0), testing::ElementsAre(0, 1, 2, 3, 4));
EXPECT_THAT(m.GetOutput<float>(1), testing::ElementsAre(0, 0.5, 1.0));
EXPECT_THAT(m.GetOutput<std::string>(2),
testing::ElementsAre("0", "1", "2", "3", "4", "foo"));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/shim/test_op/simple_tflite_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/shim/test_op/simple_tflite_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2e2fd5e8-fdd6-41e4-afdf-bd4ad1803f9e | cpp | tensorflow/tensorflow | tmpl_tf_op | tensorflow/lite/kernels/shim/test_op/tmpl_tf_op.cc | tensorflow/lite/kernels/shim/test_op/tmpl_tf_op_test.cc | #include "tensorflow/lite/kernels/shim/test_op/tmpl_tf_op.h"
#include <cstdint>
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/lite/kernels/shim/tf_op_shim.h"
namespace tflite {
namespace shim {
using TmplOpKernelInstance = TmplOpKernel<float, int32_t>;
REGISTER_TF_OP_SHIM(TmplOpKernelInstance);
REGISTER_KERNEL_BUILDER(Name(TmplOpKernelInstance::OpName())
.Device(::tensorflow::DEVICE_CPU)
.TypeConstraint<float>("AType")
.TypeConstraint<int32_t>("BType"),
TmplOpKernel<float, int32_t>);
REGISTER_KERNEL_BUILDER(Name(TmplOpKernelInstance::OpName())
.Device(::tensorflow::DEVICE_CPU)
.TypeConstraint<int32_t>("AType")
.TypeConstraint<int64_t>("BType"),
TmplOpKernel<int32_t, int64_t>);
}
} | #include <cstdint>
#include <gtest/gtest.h>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
namespace tflite {
namespace shim {
namespace {
using ::tensorflow::DT_FLOAT;
using ::tensorflow::DT_INT32;
using ::tensorflow::DT_INT64;
using ::tensorflow::FakeInput;
using ::tensorflow::NodeDefBuilder;
using ::tensorflow::TensorShape;
using ::tensorflow::test::AsTensor;
using ::tensorflow::test::ExpectTensorEqual;
class TmplOpTfTest : public ::tensorflow::OpsTestBase {};
TEST_F(TmplOpTfTest, float_int32) {
TF_ASSERT_OK(NodeDefBuilder("tmpl_op", "TemplatizedOperation")
.Attr("AType", DT_FLOAT)
.Attr("BType", DT_INT32)
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({}), {10.5});
AddInputFromArray<int32_t>(TensorShape({}), {20});
TF_ASSERT_OK(RunOpKernel());
ExpectTensorEqual<float>(*GetOutput(0),
AsTensor<float>({30.5}, {}));
}
TEST_F(TmplOpTfTest, int32_int64) {
TF_ASSERT_OK(NodeDefBuilder("tmpl_op", "TemplatizedOperation")
.Attr("AType", DT_INT32)
.Attr("BType", DT_INT64)
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_INT64))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<int32_t>(TensorShape({}), {10});
AddInputFromArray<int64_t>(TensorShape({}), {20});
TF_ASSERT_OK(RunOpKernel());
ExpectTensorEqual<float>(*GetOutput(0), AsTensor<float>({30}, {}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/shim/test_op/tmpl_tf_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/shim/test_op/tmpl_tf_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
31560376-54b1-4b76-9afc-50948117b640 | cpp | tensorflow/tensorflow | devicedb | tensorflow/lite/experimental/acceleration/compatibility/devicedb.cc | tensorflow/lite/experimental/acceleration/compatibility/devicedb_test.cc | #include "tensorflow/lite/experimental/acceleration/compatibility/devicedb.h"
#include <map>
#include <string>
#include <vector>
#include "tensorflow/lite/experimental/acceleration/compatibility/database_generated.h"
namespace tflite {
namespace acceleration {
namespace {
std::vector<const DeviceDecisionTreeEdge*> Find(
const DeviceDecisionTreeNode* root, const std::string& value) {
std::vector<const DeviceDecisionTreeEdge*> found;
if (root->comparison() == Comparison_EQUAL) {
const DeviceDecisionTreeEdge* possible =
root->items()->LookupByKey(value.c_str());
if (possible) {
found.push_back(possible);
}
} else {
for (const DeviceDecisionTreeEdge* item : *(root->items())) {
if ((root->comparison() == Comparison_MINIMUM)
? value >= item->value()->str()
: value <= item->value()->str()) {
found.push_back(item);
}
}
}
return found;
}
void UpdateVariablesFromDeviceDecisionTreeEdges(
std::map<std::string, std::string>* variable_values,
const DeviceDecisionTreeEdge& item) {
if (item.derived_properties()) {
for (const DerivedProperty* p : *(item.derived_properties())) {
(*variable_values)[p->variable()->str()] = p->value()->str();
}
}
}
void Follow(const DeviceDecisionTreeNode* root,
std::map<std::string, std::string>* variable_values) {
if (!root->variable()) {
return;
}
auto possible_value = variable_values->find(root->variable()->str());
if (possible_value == variable_values->end()) {
return;
}
std::vector<const DeviceDecisionTreeEdge*> edges =
Find(root, possible_value->second);
for (const DeviceDecisionTreeEdge* edge : edges) {
UpdateVariablesFromDeviceDecisionTreeEdges(variable_values, *edge);
if (edge->children()) {
for (const DeviceDecisionTreeNode* root : *(edge->children())) {
Follow(root, variable_values);
}
}
}
}
}
void UpdateVariablesFromDatabase(
std::map<std::string, std::string>* variable_values,
const DeviceDatabase& database) {
if (!database.root()) return;
for (const DeviceDecisionTreeNode* root : *(database.root())) {
Follow(root, variable_values);
}
}
}
} | #include "tensorflow/lite/experimental/acceleration/compatibility/devicedb.h"
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/devicedb-sample.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/variables.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace acceleration {
namespace {
class DeviceDbTest : public ::testing::Test {
protected:
void LoadSample() {
device_db_ = flatbuffers::GetRoot<DeviceDatabase>(
g_tflite_acceleration_devicedb_sample_binary);
}
const DeviceDatabase* device_db_ = nullptr;
};
TEST_F(DeviceDbTest, Load) {
LoadSample();
ASSERT_TRUE(device_db_);
ASSERT_TRUE(device_db_->root());
EXPECT_EQ(device_db_->root()->size(), 4);
}
TEST_F(DeviceDbTest, SocLookup) {
LoadSample();
ASSERT_TRUE(device_db_);
std::map<std::string, std::string> variables;
variables[kDeviceModel] = "m712c";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[kSoCModel], "exynos_7872");
variables.clear();
variables[kDeviceModel] = "sc_02l";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[kSoCModel], "exynos_7885");
variables.clear();
variables[kDeviceModel] = "nosuch";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables.find(kSoCModel), variables.end());
}
TEST_F(DeviceDbTest, StatusLookupWithSoC) {
LoadSample();
ASSERT_TRUE(device_db_);
std::map<std::string, std::string> variables;
variables[kOpenGLESVersion] = "3.1";
variables[kSoCModel] = "exynos_7872";
variables[kAndroidSdkVersion] = "24";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusSupported);
variables[kOpenGLESVersion] = "3.0";
variables.erase(variables.find(gpu::kStatus));
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables.find(gpu::kStatus), variables.end());
variables.clear();
variables[kOpenGLESVersion] = "3.1";
variables[kSoCModel] = "exynos_7883";
variables[kAndroidSdkVersion] = "24";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables.find(gpu::kStatus), variables.end());
variables[kAndroidSdkVersion] = "29";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusSupported);
}
TEST_F(DeviceDbTest, StatusLookupWithDevice) {
LoadSample();
ASSERT_TRUE(device_db_);
std::map<std::string, std::string> variables;
variables[kAndroidSdkVersion] = "24";
variables[kDeviceModel] = "sm_j810f";
variables[kDeviceName] = "j8y18lte";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusUnsupported);
variables.clear();
variables[kAndroidSdkVersion] = "24";
variables[kDeviceModel] = "sm_j810m";
variables[kDeviceName] = "j8y18lte";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusSupported);
}
TEST_F(DeviceDbTest, StatusLookupBasedOnDerivedProperties) {
LoadSample();
ASSERT_TRUE(device_db_);
std::map<std::string, std::string> variables;
variables[kOpenGLESVersion] = "3.1";
variables[kAndroidSdkVersion] = "24";
variables[kDeviceModel] = "m712c";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusSupported);
}
TEST_F(DeviceDbTest, StatusLookupWithMaximumComparison) {
LoadSample();
ASSERT_TRUE(device_db_);
std::map<std::string, std::string> variables;
variables[kDeviceModel] = "shiraz_ag_2011";
variables[kAndroidSdkVersion] = "28";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusUnsupported);
variables[kAndroidSdkVersion] = "27";
variables.erase(variables.find(gpu::kStatus));
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusUnsupported);
variables[kAndroidSdkVersion] = "29";
variables.erase(variables.find(gpu::kStatus));
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables.find(gpu::kStatus), variables.end());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/compatibility/devicedb.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/compatibility/devicedb_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a702c183-894e-41e6-b224-457072d64d80 | cpp | tensorflow/tensorflow | canonicalize_value | tensorflow/lite/experimental/acceleration/compatibility/canonicalize_value.cc | tensorflow/lite/experimental/acceleration/compatibility/canonicalize_value_test.cc | #include "tensorflow/lite/experimental/acceleration/compatibility/canonicalize_value.h"
#include <iterator>
#include <string>
#include "absl/algorithm/container.h"
#include "absl/strings/ascii.h"
#include "absl/strings/string_view.h"
#include "re2/re2.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/variables.h"
namespace tflite::acceleration {
namespace {
inline char ascii_normalise(const unsigned char c) {
if (c == ' ' || c == '-') {
return '_';
}
return absl::ascii_tolower(c);
}
}
std::string CanonicalizeValue(absl::string_view value) {
std::string output;
absl::c_transform(value, std::back_inserter(output),
tflite::acceleration::ascii_normalise);
return output;
}
std::string CanonicalizeValueWithKey(absl::string_view key,
absl::string_view value) {
std::string output = CanonicalizeValue(value);
std::string gpu_output;
return key == kGPUModel &&
RE2::FullMatch(
output,
R"((angle_\(samsung_xclipse_[0-9]*\)_on_vulkan).*$)",
&gpu_output)
? gpu_output
: output;
}
} | #include "tensorflow/lite/experimental/acceleration/compatibility/canonicalize_value.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/acceleration/compatibility/variables.h"
namespace tflite::acceleration {
namespace {
TEST(CanonicalizeValue, CharactersAreLowercased) {
EXPECT_EQ(CanonicalizeValue("hElLo"), "hello");
}
TEST(CanonicalizeValue, HyphensAreReplaced) {
EXPECT_EQ(CanonicalizeValue("-"), "_");
}
TEST(CanonicalizeValue, SpacesAreReplaced) {
EXPECT_EQ(CanonicalizeValue(" "), "_");
}
TEST(CanonicalizeValue, OtherSpecialCharactersAreUnaffected) {
for (unsigned char c = 0; c < 65; ++c) {
if (c == ' ' || c == '-') continue;
std::string s = {1, static_cast<char>(c)};
EXPECT_EQ(CanonicalizeValue(s), s);
}
}
TEST(CanonicalizeValue, SamsungXclipseGpuNormalized) {
EXPECT_EQ(CanonicalizeValueWithKey(
kGPUModel, "ANGLE (Samsung Xclipse 920) on Vulkan 1.1.179"),
"angle_(samsung_xclipse_920)_on_vulkan");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/compatibility/canonicalize_value.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/compatibility/canonicalize_value_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5b2e1d7e-be28-4c3b-854c-5f1e07a81350 | cpp | tensorflow/tensorflow | gpu_compatibility | tensorflow/lite/tools/versioning/gpu_compatibility.cc | tensorflow/lite/tools/versioning/gpu_compatibility_test.cc | #include "tensorflow/lite/tools/versioning/gpu_compatibility.h"
#include <algorithm>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "tensorflow/lite/builtin_op_data.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/tools/versioning/op_signature.h"
namespace tflite {
namespace {
const std::string GetOpName(const OpSignature& op_sig) {
if (op_sig.op == tflite::BuiltinOperator_CUSTOM) {
return op_sig.custom_name;
}
return tflite::EnumNamesBuiltinOperator()[op_sig.op];
}
int NumElements(const std::vector<int32_t>& dims) {
int count = 1;
for (int i = 0; i < dims.size(); ++i) {
count *= dims.at(i);
}
return count;
}
#define RETURN_IF_ERROR(s) \
{ \
auto c = (s); \
if (!c.ok()) return c; \
}
template <typename ParamsT>
absl::Status RetrieveBuiltinData(const OpSignature& op_sig,
const ParamsT** tf_options) {
*tf_options = static_cast<const ParamsT*>(op_sig.builtin_data);
if (!*tf_options) {
return absl::InternalError("Unable to retrieve builtin_data.");
}
return absl::OkStatus();
}
template <typename ParamsT>
absl::Status RetrieveCustomInitialData(const OpSignature& op_sig,
const ParamsT** tf_options) {
*tf_options = static_cast<const ParamsT*>(op_sig.custom_initial_data);
if (!*tf_options) {
return absl::InternalError("Unable to retrieve custom_initial_data.");
}
return absl::OkStatus();
}
absl::Status IsActivationSupported(TfLiteFusedActivation fused_activation) {
switch (fused_activation) {
case kTfLiteActNone:
case kTfLiteActRelu:
case kTfLiteActReluN1To1:
case kTfLiteActRelu6:
case kTfLiteActTanh:
case kTfLiteActSigmoid:
return absl::OkStatus();
case kTfLiteActSignBit:
return absl::UnimplementedError(
"TfLiteFusedActivation.kTfLiteActSignBit");
}
}
int GetNumberOfRuntimeInputs(const OpSignature& op_sig) {
int number_of_runtime_inputs = 0;
for (auto& input : op_sig.inputs) {
if (!input.is_const && input.type != kTfLiteNoType) {
number_of_runtime_inputs++;
}
}
return number_of_runtime_inputs;
}
absl::Status CheckInputsOutputs(const OpSignature& op_sig,
const int required_runtime_inputs,
const int required_outputs) {
const int runtime_inputs_from_model = GetNumberOfRuntimeInputs(op_sig);
if (runtime_inputs_from_model != required_runtime_inputs) {
return absl::InternalError(
absl::StrCat("Expected ", required_runtime_inputs,
" runtime input tensor(s), but node has ",
runtime_inputs_from_model, " runtime input(s)."));
}
const int outputs_from_model = op_sig.outputs.size();
if (outputs_from_model != required_outputs) {
return absl::InternalError(absl::StrCat("Expected ", required_outputs,
" output tensor(s), but node has ",
outputs_from_model, " output(s)."));
}
return absl::OkStatus();
}
absl::Status CheckInputsConstsOutputs(const OpSignature& op_sig,
int required_runtime_inputs,
int required_const_inputs,
int required_outputs) {
int const_inputs_from_model = 0;
for (auto& input : op_sig.inputs) {
if (input.is_const) {
++const_inputs_from_model;
}
}
if (const_inputs_from_model != required_const_inputs) {
return absl::InternalError(
absl::StrCat("Expected ", required_const_inputs,
" const input tensor(s), but node has ",
const_inputs_from_model, " const input(s)."));
}
return CheckInputsOutputs(op_sig, required_runtime_inputs, required_outputs);
}
absl::Status CheckTensorIsAvailable(const OpSignature& op_sig, int idx) {
if (idx >= op_sig.inputs.size()) {
return absl::OutOfRangeError(
absl::StrCat("Requested index goes beyond array size: ", idx, " vs ",
op_sig.inputs.size()));
}
return absl::OkStatus();
}
absl::Status CheckConvoultionInputOutput(const OpSignature& op_sig) {
const int runtime_inputs = GetNumberOfRuntimeInputs(op_sig);
if (runtime_inputs > 2) {
return absl::InternalError(
absl::StrCat("Expected 1 or 2 input tensor(s), but node has ",
runtime_inputs, " runtime inputs."));
}
const int runtime_outputs = op_sig.outputs.size();
if (runtime_outputs != 1) {
return absl::InternalError(
absl::StrCat("Expected 1 output tensor(s), but node has ",
runtime_outputs, " runtime outputs."));
}
if (runtime_inputs == 1) {
RETURN_IF_ERROR(CheckTensorIsAvailable(op_sig, 1));
}
return absl::OkStatus();
}
absl::Status CheckStrides(int strides_h, int strides_w) {
if (strides_h <= 0 || strides_w <= 0) {
return absl::InvalidArgumentError(
absl::StrCat("Incorrect stride values: stride_height = ", strides_h,
", stride_width = ", strides_w));
}
return absl::OkStatus();
}
absl::Status CheckDilation(int dilation_h, int dilation_w) {
if (dilation_h <= 0 || dilation_w <= 0) {
return absl::InvalidArgumentError(absl::StrCat(
"Incorrect dilation values: dilation_height = ", dilation_h,
", dilation_width = ", dilation_w));
}
return absl::OkStatus();
}
absl::Status CheckStridesAndDilation(int strides_h, int strides_w,
int dilation_h, int dilation_w) {
RETURN_IF_ERROR(CheckStrides(strides_h, strides_w));
RETURN_IF_ERROR(CheckDilation(dilation_h, dilation_w));
return absl::OkStatus();
}
absl::Status CheckKernels(int kernel_h, int kernel_w) {
if (kernel_h <= 0 || kernel_w <= 0) {
return absl::InvalidArgumentError(
absl::StrCat("Incorrect kernel values: kernel_height = ", kernel_h,
", kernel_width = ", kernel_w));
}
return absl::OkStatus();
}
absl::Status CheckKernelsAndStrides(int kernel_h, int kernel_w, int strides_h,
int strides_w) {
RETURN_IF_ERROR(CheckKernels(kernel_h, kernel_w));
RETURN_IF_ERROR(CheckStrides(strides_h, strides_w));
return absl::OkStatus();
}
absl::Status CheckAxesAreInt32Const(const OpSignature& op_sig, int idx) {
auto axes = op_sig.inputs.at(idx);
if (!axes.is_const) {
return absl::UnimplementedError(GetOpName(op_sig) +
" is only supported with constant axes.");
}
if (axes.type != kTfLiteInt32) {
return absl::UnimplementedError(absl::StrCat(
GetOpName(op_sig) + " supports int32 tensor for axes. But node has ",
TfLiteTypeGetName(axes.type)));
}
return absl::OkStatus();
}
absl::Status CheckPooling2DGpuDelegateCompatibility(const OpSignature& op_sig) {
const TfLitePoolParams* tf_options;
if (op_sig.custom_initial_data) {
RETURN_IF_ERROR(RetrieveCustomInitialData(op_sig, &tf_options));
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
1,
2));
} else {
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
1,
1));
}
RETURN_IF_ERROR(CheckKernelsAndStrides(
tf_options->filter_height, tf_options->filter_width,
tf_options->stride_height, tf_options->stride_width));
return IsActivationSupported(tf_options->activation);
}
absl::Status CheckDepthwiseConvGpuDelegateCompatibility(
const OpSignature& op_sig) {
RETURN_IF_ERROR(CheckConvoultionInputOutput(op_sig));
const TfLiteDepthwiseConvParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
RETURN_IF_ERROR(CheckStridesAndDilation(
tf_options->stride_height, tf_options->stride_width,
tf_options->dilation_height_factor, tf_options->dilation_width_factor));
RETURN_IF_ERROR(IsActivationSupported(tf_options->activation));
const int depth_multiplier = tf_options->depth_multiplier;
const auto* input = &op_sig.inputs[0];
const auto* filter = &op_sig.inputs[1];
const auto* bias = op_sig.inputs.size() > 2 ? &op_sig.inputs[2] : nullptr;
const auto* output = &op_sig.outputs[0];
if (input->dims.size() != 4) {
return absl::InvalidArgumentError("input.dims.size != 4");
}
if (filter->dims.size() != 4) {
return absl::InvalidArgumentError("filter.dims.size != 4");
}
if (output->dims.size() != 4) {
return absl::InvalidArgumentError("output.dims.size != 4");
}
if (input->dims[0] != output->dims[0]) {
return absl::InvalidArgumentError("input.b != output.b");
}
const int input_depth = input->dims[3];
const int output_depth = output->dims[3];
if (filter->dims[3] != output_depth) {
return absl::InvalidArgumentError("filter.i != output.c");
}
if (output_depth != input_depth * depth_multiplier) {
return absl::InvalidArgumentError("output.c != input.c * depth_multiplier");
}
if (bias && NumElements(bias->dims) != output_depth) {
return absl::InvalidArgumentError("bias.size != output.c");
}
if (depth_multiplier != 1 && input_depth != 1) {
return absl::UnimplementedError("depth_multiplier != 1 && input.c != 1");
}
return absl::OkStatus();
}
absl::Status CheckCumsumGpuDelegateCompatibility(const OpSignature& op_sig) {
if (op_sig.inputs.size() != 2) {
return absl::InvalidArgumentError("Expects 2 inputs and 1 output");
}
auto error = absl::InvalidArgumentError(
"Input/output must be float type and indices must be constant int32 "
"type");
if ((op_sig.inputs.at(0).type != kTfLiteFloat16 &&
op_sig.inputs.at(0).type != kTfLiteFloat32) ||
(op_sig.outputs.at(0).type != op_sig.inputs.at(0).type) ||
(op_sig.inputs.at(1).type != kTfLiteInt32 ||
!op_sig.inputs.at(1).is_const)) {
return error;
}
return absl::OkStatus();
}
absl::Status CheckOneHotGpuDelegateCompatibility(const OpSignature& op_sig) {
if (op_sig.inputs.size() != 4 && op_sig.outputs.size() != 1) {
return absl::InvalidArgumentError("Expects 4 inputs and 1 output");
}
absl::Status error = absl::InvalidArgumentError(
"Indices must be int32 type, on/off tensors must be constant, scalar, "
"float type, axis must be -1 or last dim");
if (op_sig.inputs[0].type != kTfLiteInt32) {
return error;
}
auto* one_hot_options =
reinterpret_cast<TfLiteOneHotParams*>(op_sig.builtin_data);
const int num_dims = op_sig.inputs[0].dims.size();
if (one_hot_options->axis != -1 &&
one_hot_options->axis != op_sig.inputs[0].dims[num_dims - 1]) {
return error;
}
for (int i = 0; i < num_dims - 1; ++i) {
if (num_dims > 3 && i == 0) {
continue;
}
if (op_sig.inputs.at(0).dims[i] != 1) {
return absl::InvalidArgumentError(
absl::StrCat("Unspported non-singleton dim at ", i));
}
}
if (op_sig.inputs.at(2).type != kTfLiteFloat32 ||
op_sig.inputs.at(3).type != kTfLiteFloat32) {
return error;
}
if (!op_sig.inputs.at(2).is_const || !op_sig.inputs.at(3).is_const ||
op_sig.inputs.at(2).dims.size() > 1 ||
op_sig.inputs.at(3).dims.size() > 1) {
return error;
}
if ((!op_sig.inputs.at(2).dims.empty() && op_sig.inputs.at(2).dims[0] > 1) ||
(!op_sig.inputs.at(3).dims.empty() && op_sig.inputs.at(3).dims[0] > 1)) {
return error;
}
return absl::OkStatus();
}
absl::Status CheckSelectV2GpuDelegateCompatibility(const OpSignature& op_sig) {
if (op_sig.inputs.size() != 3 || op_sig.outputs.size() != 1) {
return absl::InvalidArgumentError("Expected 3 inputs and 1 output");
}
absl::Status error = absl::InvalidArgumentError(
"Cond must be float or bool type, if, else tensors must be float and "
"either be same the shape as output or constant, scalar.");
if ((op_sig.inputs.at(0).type != kTfLiteBool &&
op_sig.inputs.at(0).type != kTfLiteFloat16 &&
op_sig.inputs.at(0).type != kTfLiteFloat32) ||
(op_sig.inputs.at(1).type != kTfLiteFloat16 &&
op_sig.inputs.at(1).type != kTfLiteFloat32) ||
(op_sig.inputs.at(2).type != kTfLiteFloat16 &&
op_sig.inputs.at(2).type != kTfLiteFloat32)) {
return error;
}
std::vector<int32_t> output_dims = op_sig.outputs[0].dims;
if (!op_sig.inputs.at(1).dims.empty() &&
(op_sig.inputs.at(1).dims != output_dims) &&
(op_sig.inputs.at(1).dims.size() > 1 ||
op_sig.inputs.at(1).dims[0] > 1)) {
return error;
}
if (op_sig.inputs.at(1).is_const && op_sig.inputs.at(1).dims.size() == 2) {
return absl::InvalidArgumentError(
"2-D if tensor only supported if constant.");
}
if (!op_sig.inputs.at(2).dims.empty() &&
(op_sig.inputs.at(2).dims != output_dims) &&
(op_sig.inputs.at(2).dims.size() > 1 ||
op_sig.inputs.at(2).dims[0] > 1)) {
return error;
}
if (op_sig.inputs.at(2).is_const && op_sig.inputs.at(2).dims.size() == 2) {
return absl::InvalidArgumentError(
"2-D else tensor only supported if constant.");
}
return absl::OkStatus();
}
absl::Status CheckCustomOpsGpuDelegateCompatibility(const OpSignature& op_sig) {
if (op_sig.custom_name == "Convolution2DTransposeBias") {
RETURN_IF_ERROR(CheckTensorIsAvailable(op_sig, 1));
const TfLiteTransposeConvParams* tf_options;
RETURN_IF_ERROR(RetrieveCustomInitialData(op_sig, &tf_options));
RETURN_IF_ERROR(
CheckStrides(tf_options->stride_height, tf_options->stride_width));
return absl::OkStatus();
}
if (op_sig.custom_name == "MaxPoolingWithArgmax2D") {
return CheckPooling2DGpuDelegateCompatibility(op_sig);
}
if (op_sig.custom_name == "MaxUnpooling2D") {
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
2,
1));
const TfLitePoolParams* tf_options;
RETURN_IF_ERROR(RetrieveCustomInitialData(op_sig, &tf_options));
RETURN_IF_ERROR(CheckKernelsAndStrides(
tf_options->filter_height, tf_options->filter_width,
tf_options->stride_height, tf_options->stride_width));
return absl::OkStatus();
}
if (op_sig.custom_name == "Resampler") {
return CheckInputsOutputs(op_sig,
2,
1);
}
return absl::InvalidArgumentError(
absl::StrCat("Not supported custom op ", op_sig.custom_name));
}
bool CheckIsBroadcastable(const std::vector<int32_t>* longer_dims,
const std::vector<int32_t>* shorter_dims) {
int idx_1 = longer_dims->size() - 1;
int idx_2 = shorter_dims->size() - 1;
int max_idx = std::max(idx_1, idx_2);
int data_1 = 0;
int data_2 = 0;
for (int i = max_idx; i >= 0; --i) {
data_1 = idx_1 < 0 ? 1 : longer_dims->at(idx_1);
data_2 = idx_2 < 0 ? 1 : shorter_dims->at(idx_2);
if (data_1 != data_2 && data_1 != 1 && data_2 != 1) {
return false;
}
--idx_1;
--idx_2;
}
return true;
}
absl::Status CheckAddMulBroadcastCompatibility(
const OpSignatureTensorSpec& input0, const OpSignatureTensorSpec& input1,
GpuCompatibilityFlags flags) {
if (input0.dims.size() > 1 && input1.dims.size() > 1 &&
input0.dims.size() != input1.dims.size()) {
const std::vector<int32_t>*longer_dims, *shorter_dims;
if (input0.dims.size() >= input1.dims.size()) {
longer_dims = &input0.dims;
shorter_dims = &input1.dims;
} else {
longer_dims = &input1.dims;
shorter_dims = &input0.dims;
}
bool is_broadcastable = false;
if (flags == GpuCompatibilityFlags::kEnhancedBroadcast) {
is_broadcastable = CheckIsBroadcastable(longer_dims, shorter_dims);
} else {
if (longer_dims->size() == 4 && shorter_dims->size() == 3 &&
longer_dims->at(0) == 1) {
is_broadcastable = true;
} else if (longer_dims->size() == 4 && shorter_dims->size() == 2 &&
longer_dims->at(0) == 1 && shorter_dims->at(0) == 1 &&
shorter_dims->at(1) == 1) {
is_broadcastable = true;
} else if (longer_dims->size() == 4 && shorter_dims->size() == 2 &&
longer_dims->at(0) == shorter_dims->at(0) &&
longer_dims->at(3) == shorter_dims->at(1)) {
is_broadcastable = true;
}
}
if (!is_broadcastable) {
return absl::UnimplementedError(
absl::StrCat("Doesn't support broadcasting - input0: [",
absl::StrJoin(input0.dims, ","), "], input1: [",
absl::StrJoin(input1.dims, ","), "]"));
}
}
return absl::OkStatus();
}
}
absl::Status CheckGpuDelegateCompatibility(const OpSignature& op_sig,
GpuCompatibilityFlags flags) {
TfLiteBuiltinOperator opcode = static_cast<TfLiteBuiltinOperator>(op_sig.op);
switch (opcode) {
case kTfLiteBuiltinAdd: {
if (op_sig.inputs.size() != 2) {
return absl::UnimplementedError("ADD requires two input tensors.");
}
const auto& input0 = op_sig.inputs.at(0);
const auto& input1 = op_sig.inputs.at(1);
auto broadcastable =
CheckAddMulBroadcastCompatibility(input0, input1, flags);
if (!broadcastable.ok()) {
return broadcastable;
}
const TfLiteAddParams* tf_options;
return RetrieveBuiltinData(op_sig, &tf_options);
}
case kTfLiteBuiltinAddN: {
return op_sig.inputs.size() == 2
? absl::OkStatus()
: absl::UnimplementedError("ADD_N only supports 2 inputs.");
}
case kTfLiteBuiltinAveragePool2d:
return CheckPooling2DGpuDelegateCompatibility(op_sig);
case kTfLiteBuiltinBatchMatmul: {
const int num_inputs = op_sig.inputs.size();
const int num_outputs = op_sig.outputs.size();
if (!(num_inputs == 2 && num_outputs == 1)) {
return absl::InternalError(
absl::StrCat("Expected 2 inputs and 1 output, got: ", num_inputs,
" inputs and ", num_outputs, " outputs"));
}
return absl::OkStatus();
}
case kTfLiteBuiltinCast:
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
1,
1));
if (op_sig.inputs.at(0).type == kTfLiteBool &&
(op_sig.outputs.at(0).type == kTfLiteFloat16 ||
op_sig.outputs.at(0).type == kTfLiteFloat32)) {
return absl::OkStatus();
} else if ((op_sig.inputs.at(0).type == kTfLiteFloat16 ||
op_sig.inputs.at(0).type == kTfLiteFloat32) &&
op_sig.outputs.at(0).type == kTfLiteBool) {
return absl::OkStatus();
} else if ((op_sig.inputs.at(0).type == kTfLiteFloat32 ||
op_sig.inputs.at(0).type == kTfLiteInt32) &&
(op_sig.outputs.at(0).type == kTfLiteFloat32 ||
op_sig.outputs.at(0).type == kTfLiteInt32)) {
return absl::OkStatus();
} else {
return absl::UnimplementedError(absl::StrCat(
"Not supported Cast case. Input type: ",
TfLiteTypeGetName(op_sig.inputs.at(0).type), " and output type: ",
TfLiteTypeGetName(op_sig.outputs.at(0).type)));
}
case kTfLiteBuiltinConcatenation: {
const TfLiteConcatenationParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
return absl::OkStatus();
}
case kTfLiteBuiltinConv2d: {
RETURN_IF_ERROR(CheckConvoultionInputOutput(op_sig));
const TfLiteConvParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
RETURN_IF_ERROR(CheckStridesAndDilation(
tf_options->stride_height, tf_options->stride_width,
tf_options->dilation_height_factor,
tf_options->dilation_width_factor));
return IsActivationSupported(tf_options->activation);
}
case kTfLiteBuiltinCumsum:
return CheckCumsumGpuDelegateCompatibility(op_sig);
case kTfLiteBuiltinDensify:
return CheckInputsOutputs(op_sig, 0,
1);
case kTfLiteBuiltinDepthwiseConv2d:
return CheckDepthwiseConvGpuDelegateCompatibility(op_sig);
case kTfLiteBuiltinDepthToSpace: {
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
1,
1));
const TfLiteDepthToSpaceParams* d2s_params;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &d2s_params));
if (d2s_params->block_size == 1) {
return absl::InvalidArgumentError(
"DEPTH_TO_SPACE block_size = 1 is a no-op.");
}
if (d2s_params->block_size < 1) {
return absl::InvalidArgumentError(
"DEPTH_TO_SPACE block_size must be > 1.");
}
return absl::OkStatus();
}
case kTfLiteBuiltinDequantize: {
const int num_inputs = op_sig.inputs.size();
const int num_outputs = op_sig.outputs.size();
if (num_inputs != 1 || num_outputs != 1) {
return absl::InternalError(absl::StrCat(
"Expected 1 input & output each from Dequantize, got: %d, %d",
num_inputs, num_outputs));
}
if (op_sig.inputs[0].type == kTfLiteInt16) {
return absl::UnimplementedError("Unsupported dequantization type.");
}
return absl::OkStatus();
}
case kTfLiteBuiltinEmbeddingLookup: {
const int num_inputs = op_sig.inputs.size();
const OpSignatureTensorSpec ids_spec = op_sig.inputs[0];
const OpSignatureTensorSpec value_spec = op_sig.inputs[1];
const OpSignatureTensorSpec output_spec = op_sig.outputs[0];
if (num_inputs != 2) {
return absl::InvalidArgumentError(
absl::StrCat("Expected 2, but got ", num_inputs, " inputs."));
}
if (ids_spec.dims.size() != 1) {
return absl::InvalidArgumentError(absl::StrCat(
"Expected 1D, but got ", ids_spec.dims.size(), "D input #0."));
}
if (value_spec.dims.size() < 2) {
return absl::InvalidArgumentError(absl::StrCat(
"Expected > 1D, but got ", value_spec.dims.size(), "D input #1."));
}
if (op_sig.outputs.size() != 1) {
return absl::InvalidArgumentError(absl::StrCat(
"Expected 1, but got ", op_sig.outputs.size(), " outputs."));
}
if (value_spec.dims.size() != output_spec.dims.size()) {
return absl::InvalidArgumentError(
absl::StrCat("Expected ", value_spec.dims.size(), ", but got ",
output_spec.dims.size(), " for output."));
}
for (int i = 1; i < output_spec.dims.size(); ++i) {
if (value_spec.dims[i] != output_spec.dims[i]) {
return absl::InvalidArgumentError(
absl::StrCat("Expected ", value_spec.dims[i], ", but got ",
output_spec.dims[i], " for output.dim[", i, "]."));
}
}
if (value_spec.type != kTfLiteInt8 && value_spec.type != kTfLiteInt4 &&
value_spec.type != kTfLiteFloat32) {
return absl::InvalidArgumentError(
absl::StrCat("Expected int8, int4, or float32, but got ",
TfLiteTypeGetName(value_spec.type), " for input #1."));
}
return absl::OkStatus();
}
case kTfLiteBuiltinDynamicUpdateSlice: {
if (op_sig.inputs.size() != 3) {
return absl::UnimplementedError(
"DynamicUpdateSlice requires 3 inputs.");
}
OpSignatureTensorSpec operand = op_sig.inputs[0];
OpSignatureTensorSpec update_slice = op_sig.inputs[1];
OpSignatureTensorSpec start_indices = op_sig.inputs[2];
if (operand.dims.size() == 4 && operand.dims[0] != 1) {
return absl::UnimplementedError(
"DynamicUpdateSlice only support 4D operand with batch size 1.");
}
if (start_indices.dims.size() > 1) {
return absl::UnimplementedError(
"DynamicUpdateSlice only support 1D start_indices.");
}
if (operand.type != update_slice.type) {
return absl::InternalError(
absl::StrCat("Array to update and updated slice must have the same "
"data type, but got: array to update: ",
operand.type, ", updated slice: ", update_slice.type));
}
if (start_indices.dims.size() != 1) {
return absl::InternalError(
absl::StrCat("Start indices must have be 1D, but got: ",
start_indices.dims.size()));
}
if (start_indices.type != kTfLiteInt32) {
return absl::InvalidArgumentError(
"start_indices must be of type int32.");
}
if (update_slice.dims.size() != operand.dims.size()) {
return absl::InternalError(absl::StrCat(
"Operand and update must have the same number of "
"dimensions, but got: operand: ",
operand.dims.size(), ", update: ", update_slice.dims.size()));
}
return absl::OkStatus();
}
case kTfLiteBuiltinFullyConnected: {
const TfLiteFullyConnectedParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
if (tf_options->weights_format !=
kTfLiteFullyConnectedWeightsFormatDefault) {
return absl::UnimplementedError(
absl::StrCat("Unsupported FullyConnected weights format: ",
tf_options->weights_format));
}
if (GetNumberOfRuntimeInputs(op_sig) > 2) {
return absl::UnimplementedError(
"FullyConnected doesn't support more than 2 runtime inputs.");
}
if (op_sig.inputs[0].is_const) {
return absl::UnimplementedError(
"FullyConnected doesn't support constant input.");
}
if (tf_options->keep_num_dims == true) {
const auto& input = op_sig.inputs.at(0);
const auto& output = op_sig.outputs.at(0);
if (input.dims.size() != output.dims.size()) {
return absl::UnimplementedError(
"Input and output dimensions different and FullyConnected "
"doesn't "
"support keep_num_dims.");
}
}
return absl::OkStatus();
}
case kTfLiteBuiltinGather:
if (!CheckInputsConstsOutputs(op_sig, 2,
0,
1)
.ok() &&
!CheckInputsConstsOutputs(op_sig, 1,
1,
1)
.ok()) {
return absl::InvalidArgumentError(
"Op can only handle 1 or 2 operand(s).");
}
if (op_sig.inputs[1].dims.size() != 1) {
return absl::UnimplementedError("Only support 1D indices\n");
}
return op_sig.inputs.at(1).type == kTfLiteInt32
? absl::OkStatus()
: absl::UnimplementedError("Only accept INT32 indices\n");
case kTfLiteBuiltinHardSwish:
return CheckInputsOutputs(op_sig, 1,
1);
case kTfLiteBuiltinLstm: {
const TfLiteLSTMParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
switch (tf_options->kernel_type) {
case kTfLiteLSTMFullKernel: {
const int inputs = op_sig.inputs.size();
if (inputs != 20 && inputs != 24) {
return absl::InternalError(
absl::StrCat("Expected 20 or 24 input tensors, but node has ",
inputs, " input(s)."));
}
const int runtime_outputs = op_sig.outputs.size();
if (runtime_outputs != 1) {
return absl::InternalError(
absl::StrCat("Expected 1 output tensor, but node has ",
runtime_outputs, " output(s)."));
}
if (tf_options->activation != kTfLiteActSigmoid &&
tf_options->activation != kTfLiteActTanh) {
return absl::UnimplementedError(absl::StrCat(
"Only sigmoid or tanh activation is supported, but node has ",
tf_options->activation));
}
return absl::OkStatus();
}
case kTfLiteLSTMBasicKernel:
RETURN_IF_ERROR(
CheckInputsConstsOutputs(op_sig, 3,
2,
4));
if (tf_options->activation != kTfLiteActTanh) {
return absl::UnimplementedError(
absl::StrCat("Only TANH activation is supported. but node has ",
tf_options->activation));
}
if (tf_options->cell_clip != 0.0f) {
return absl::UnimplementedError("cell_clip is not supported.");
}
if (tf_options->proj_clip != 0.0f) {
return absl::UnimplementedError("proj_clip is not supported.");
}
return absl::OkStatus();
}
}
case kTfLiteBuiltinMaxPool2d:
return CheckPooling2DGpuDelegateCompatibility(op_sig);
case kTfLiteBuiltinMean: {
RETURN_IF_ERROR(CheckInputsConstsOutputs(op_sig,
1,
1,
1));
return CheckAxesAreInt32Const(op_sig, 1);
}
case kTfLiteBuiltinMul: {
if (op_sig.inputs.size() != 2) {
return absl::UnimplementedError("MUL requires two input tensors.");
}
const auto& input0 = op_sig.inputs.at(0);
const auto& input1 = op_sig.inputs.at(1);
if (input0.dims.size() == input1.dims.size()) {
bool first_has_smaller_dim = false;
bool second_has_smaller_dim = false;
for (int i = 0; i < input0.dims.size(); ++i) {
if (input0.dims[i] < input1.dims[i]) {
first_has_smaller_dim = true;
}
if (input1.dims[i] < input0.dims[i]) {
second_has_smaller_dim = true;
}
}
if (first_has_smaller_dim && second_has_smaller_dim) {
return absl::UnimplementedError(
"MUL requires one tensor that not less than second in all "
"dimensions.");
}
} else {
const auto& input0 = op_sig.inputs.at(0);
const auto& input1 = op_sig.inputs.at(1);
auto broadcastable =
CheckAddMulBroadcastCompatibility(input0, input1, flags);
if (!broadcastable.ok()) {
return broadcastable;
}
}
const TfLiteMulParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
return IsActivationSupported(tf_options->activation);
}
case kTfLiteBuiltinPack:
return absl::OkStatus();
case kTfLiteBuiltinOneHot:
return CheckOneHotGpuDelegateCompatibility(op_sig);
case kTfLiteBuiltinQuantize:
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
1,
1));
return absl::OkStatus();
case kTfLiteBuiltinReluN1To1:
return absl::OkStatus();
case kTfLiteBuiltinPrelu:
return absl::OkStatus();
case kTfLiteBuiltinReshape:
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
1,
1));
return absl::OkStatus();
case kTfLiteBuiltinSelect:
case kTfLiteBuiltinSelectV2:
return CheckSelectV2GpuDelegateCompatibility(op_sig);
case kTfLiteBuiltinSlice: {
if (op_sig.inputs.size() < 3) {
return absl::UnimplementedError(
absl::StrCat("SLICE requires 3 inputs, but node has ",
op_sig.inputs.size(), " inputs."));
}
const auto& input = op_sig.inputs.at(0);
if (input.dims.size() != 3 && input.dims.size() != 4) {
return absl::UnimplementedError(absl::StrCat(
"SLICE supports for 3 or 4 dimensional tensors only, but node has ",
input.dims.size(), " dimensional tensors."));
}
return absl::OkStatus();
}
case kTfLiteBuiltinSoftmax: {
const TfLiteSoftmaxParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
if (tf_options->beta != 1) {
return absl::UnimplementedError("Softmax.beta != 1 is not supported.");
}
return absl::OkStatus();
}
case kTfLiteBuiltinSpaceToDepth: {
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
1,
1));
const TfLiteSpaceToDepthParams* s2d_params;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &s2d_params));
if (s2d_params->block_size == 1) {
return absl::InvalidArgumentError(
"SPACE_TO_DEPTH block_size = 1 is a no-op.");
}
if (s2d_params->block_size < 1) {
return absl::InvalidArgumentError(
"SPACE_TO_DEPTH block_size must be > 1.");
}
return absl::OkStatus();
}
case kTfLiteBuiltinSplit:
return absl::OkStatus();
case kTfLiteBuiltinSplitV:
return absl::OkStatus();
case kTfLiteBuiltinStridedSlice: {
const TfLiteStridedSliceParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
if (tf_options->ellipsis_mask) {
return absl::UnimplementedError(
"Slice does not support ellipsis_mask.");
}
if (tf_options->new_axis_mask) {
return absl::UnimplementedError(
"Slice does not support new_axis_mask.");
}
if (tf_options->shrink_axis_mask) {
return absl::UnimplementedError(
"Slice does not support shrink_axis_mask parameter. ");
}
if (op_sig.inputs.size() < 4) {
return absl::UnimplementedError("STRIDED_SLICE requires 4 inputs.");
}
const auto& input = op_sig.inputs.at(0);
if (input.dims.size() != 3 && input.dims.size() != 4) {
return absl::UnimplementedError(
"STRIDED_SLICE supports for 3 or 4 dimensional tensors only.");
}
return absl::OkStatus();
}
case kTfLiteBuiltinTile:
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
1,
1));
return absl::OkStatus();
case kTfLiteBuiltinTranspose:
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
1,
1));
return absl::OkStatus();
case kTfLiteBuiltinTransposeConv: {
RETURN_IF_ERROR(CheckConvoultionInputOutput(op_sig));
const TfLiteTransposeConvParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
RETURN_IF_ERROR(
CheckStrides(tf_options->stride_height, tf_options->stride_width));
return absl::OkStatus();
}
case kTfLiteBuiltinResizeBilinear: {
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
1,
1));
const TfLiteResizeBilinearParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
if (tf_options->align_corners && tf_options->half_pixel_centers) {
return absl::InternalError(
"If half_pixel_centers is True, align_corners must be False.");
}
return absl::OkStatus();
}
case kTfLiteBuiltinResizeNearestNeighbor: {
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
1,
1));
const TfLiteResizeNearestNeighborParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
return absl::OkStatus();
}
case kTfLiteBuiltinRelu:
case kTfLiteBuiltinRelu6:
case kTfLiteBuiltinLeakyRelu:
return absl::OkStatus();
case kTfLiteBuiltinReduceMax:
case kTfLiteBuiltinReduceMin:
case kTfLiteBuiltinReduceProd:
case kTfLiteBuiltinSum: {
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
1,
1));
return CheckAxesAreInt32Const(op_sig, 1);
}
case kTfLiteBuiltinPad:
case kTfLiteBuiltinPadv2:
case kTfLiteBuiltinMirrorPad: {
if (opcode == kTfLiteBuiltinMirrorPad) {
const TfLiteMirrorPaddingParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
if (tf_options->mode !=
TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingReflect) {
return absl::InvalidArgumentError(
absl::StrCat("Only Reflective padding is supported for Mirror "
"Pad operation. But node has ",
tf_options->mode));
}
}
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
1,
1));
RETURN_IF_ERROR(CheckTensorIsAvailable(op_sig, 1));
auto& pad_tensor = op_sig.inputs.at(1);
if (pad_tensor.dims.size() != 2) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid paddings tensor dimension: expected 2 dim, got ",
pad_tensor.dims.size(), " dim"));
}
bool supported = pad_tensor.dims[0] == 3 || pad_tensor.dims[0] == 4;
if (!supported || pad_tensor.dims[1] != 2) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid paddings tensor shape: expected 4x2 or 3x2, got ",
pad_tensor.dims[0], "x", pad_tensor.dims[1]));
}
return absl::OkStatus();
}
case kTfLiteBuiltinReverseV2: {
RETURN_IF_ERROR(CheckInputsConstsOutputs(op_sig,
1,
1,
1));
return CheckAxesAreInt32Const(op_sig, 1);
}
case kTfLiteBuiltinAbs:
case kTfLiteBuiltinCeil:
case kTfLiteBuiltinCos:
case kTfLiteBuiltinElu:
case kTfLiteBuiltinExp:
case kTfLiteBuiltinFloor:
case kTfLiteBuiltinGelu:
case kTfLiteBuiltinLog:
case kTfLiteBuiltinLogistic:
case kTfLiteBuiltinNeg:
case kTfLiteBuiltinRsqrt:
case kTfLiteBuiltinSign:
case kTfLiteBuiltinSin:
case kTfLiteBuiltinSqrt:
case kTfLiteBuiltinSquare:
case kTfLiteBuiltinTanh:
return (CheckInputsConstsOutputs(op_sig, 1,
0,
1));
case kTfLiteBuiltinAtan2:
case kTfLiteBuiltinDiv:
case kTfLiteBuiltinEqual:
case kTfLiteBuiltinFloorDiv:
case kTfLiteBuiltinFloorMod:
case kTfLiteBuiltinGreater:
case kTfLiteBuiltinGreaterEqual:
case kTfLiteBuiltinLogicalAnd:
case kTfLiteBuiltinLess:
case kTfLiteBuiltinLessEqual:
case kTfLiteBuiltinMaximum:
case kTfLiteBuiltinMinimum:
case kTfLiteBuiltinNotEqual:
case kTfLiteBuiltinPow:
case kTfLiteBuiltinStablehloRemainder:
case kTfLiteBuiltinSquaredDifference:
case kTfLiteBuiltinSub: {
if (!CheckInputsConstsOutputs(op_sig, 2,
0,
1)
.ok() &&
!CheckInputsConstsOutputs(op_sig, 1,
1,
1)
.ok()) {
return absl::InvalidArgumentError(
"Op can only handle 1 or 2 operand(s).");
}
TfLiteFusedActivation activation = kTfLiteActNone;
if (opcode == kTfLiteBuiltinDiv) {
const TfLiteDivParams* tf_options;
auto status = RetrieveBuiltinData(op_sig, &tf_options);
activation = status.ok() ? tf_options->activation : kTfLiteActNone;
} else if (opcode == kTfLiteBuiltinSub) {
const TfLiteSubParams* tf_options;
auto status = RetrieveBuiltinData(op_sig, &tf_options);
activation = status.ok() ? tf_options->activation : kTfLiteActNone;
}
return IsActivationSupported(activation);
}
case kTfLiteBuiltinStablehloBroadcastInDim:
if (!CheckInputsConstsOutputs(op_sig, 1,
1,
1)
.ok()) {
return absl::InvalidArgumentError(
"requires one runtime input, one const input, and one output");
}
if (op_sig.inputs[1].dims.size() != 1) {
return absl::InvalidArgumentError("Only support 1D indices");
}
if (op_sig.inputs[1].type != kTfLiteInt32) {
return absl::InvalidArgumentError("Only support int32 indices");
}
if (op_sig.inputs[0].dims.size() != op_sig.inputs[1].dims[0]) {
return absl::InvalidArgumentError(
"Require size(indices) = rank(operand)");
}
return absl::OkStatus();
case kTfLiteBuiltinStablehloCbrt:
if (op_sig.inputs[0].type != kTfLiteFloat16 &&
op_sig.inputs[0].type != kTfLiteFloat32 &&
op_sig.inputs[0].type != kTfLiteBFloat16) {
return absl::InvalidArgumentError("Only support float inputs");
}
if (op_sig.inputs[0].type != op_sig.outputs[0].type) {
return absl::InvalidArgumentError("Input and output types must match");
}
return CheckInputsConstsOutputs(op_sig, 1,
0,
1);
case kTfLiteBuiltinStablehloClamp:
if ((op_sig.inputs.at(0).type != op_sig.inputs.at(1).type) ||
(op_sig.inputs.at(1).type != op_sig.inputs.at(2).type)) {
return absl::InvalidArgumentError(
"Clamp tensors must all be the same type");
}
if ((op_sig.inputs.at(0).dims != op_sig.inputs.at(1).dims) &&
(NumElements(op_sig.inputs.at(0).dims) != 1)) {
return absl::InvalidArgumentError(
"Min tensor must be the same shape as the input, or a scalar");
}
if ((op_sig.inputs.at(2).dims != op_sig.inputs.at(1).dims) &&
(NumElements(op_sig.inputs.at(0).dims) != 1)) {
return absl::InvalidArgumentError(
"Max tensor must be the same shape as the input, or a scalar");
}
return CheckInputsConstsOutputs(op_sig, 3,
0,
1);
case kTfLiteBuiltinCustom:
return CheckCustomOpsGpuDelegateCompatibility(op_sig);
default:
break;
}
return absl::InvalidArgumentError(absl::StrCat(
"Not supported op ", tflite::EnumNamesBuiltinOperator()[op_sig.op]));
}
absl::Status CheckGpuDelegateCompatibility(const OperatorCode* op_code,
const Operator* op,
const SubGraph* subgraph,
const Model* model) {
OpSignature op_sig = GetOpSignature(op_code, op, subgraph, model);
auto status = CheckGpuDelegateCompatibility(
op_sig, GpuCompatibilityFlags::kEnhancedBroadcast);
if (op_sig.builtin_data) {
free(op_sig.builtin_data);
}
return status;
}
absl::Status CheckGpuDelegateCompatibility(
const TfLiteContext* context, const TfLiteNode* node,
const TfLiteRegistration* registration, GpuCompatibilityFlags flags) {
return CheckGpuDelegateCompatibility(
GetOpSignature(context, node, registration), flags);
}
} | #include "tensorflow/lite/tools/versioning/gpu_compatibility.h"
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/model_builder.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/tools/versioning/op_signature.h"
namespace tflite {
namespace {
absl::Status CheckGpuDelegateCompatibility(const tflite::Model* model) {
auto subgraphs = model->subgraphs();
for (int i = 0; i < subgraphs->Length(); ++i) {
const SubGraph* subgraph = subgraphs->Get(i);
for (int j = 0; j < subgraph->operators()->Length(); ++j) {
const Operator* op = subgraph->operators()->Get(j);
const OperatorCode* op_code =
model->operator_codes()->Get(op->opcode_index());
auto status = CheckGpuDelegateCompatibility(op_code, op, subgraph, model);
if (!status.ok()) {
return status;
}
}
}
return absl::OkStatus();
}
}
TEST(CheckGpuDelegateCompatibility, Conv2DModel) {
const std::string& full_path = tensorflow::GetDataDependencyFilepath(
"tensorflow/lite/testdata/conv_huge_im2col.bin");
auto model = FlatBufferModel::BuildFromFile(full_path.data());
ASSERT_TRUE(model);
EXPECT_TRUE(CheckGpuDelegateCompatibility(model->GetModel()).ok());
}
TEST(CheckGpuDelegateCompatibility, Conv3DModel) {
const std::string& full_path = tensorflow::GetDataDependencyFilepath(
"tensorflow/lite/testdata/conv3d_huge_im2col.bin");
auto model = FlatBufferModel::BuildFromFile(full_path.data());
ASSERT_TRUE(model);
EXPECT_EQ(CheckGpuDelegateCompatibility(model->GetModel()).message(),
"Not supported op CONV_3D");
}
TEST(CheckGpuDelegateCompatibility, FlexModel) {
const std::string& full_path = tensorflow::GetDataDependencyFilepath(
"tensorflow/lite/testdata/multi_add_flex.bin");
auto model = FlatBufferModel::BuildFromFile(full_path.data());
ASSERT_TRUE(model);
EXPECT_EQ(CheckGpuDelegateCompatibility(model->GetModel()).message(),
"Not supported custom op FlexAddV2");
}
TEST(CheckGpuDelegateCompatibility, FCConstInput) {
OpSignature op_sig = OpSignature();
op_sig.op = BuiltinOperator_FULLY_CONNECTED;
auto params = std::make_unique<TfLiteFullyConnectedParams>();
params->weights_format = kTfLiteFullyConnectedWeightsFormatDefault;
op_sig.builtin_data = static_cast<void*>(params.get());
op_sig.inputs = std::vector<OpSignatureTensorSpec>(1);
op_sig.inputs[0] = OpSignatureTensorSpec();
op_sig.inputs[0].is_const = true;
EXPECT_EQ(CheckGpuDelegateCompatibility(op_sig).message(),
"FullyConnected doesn't support constant input.");
}
TEST(CheckGpuDelegateCompatibility, Add1Dto3DBroadcastSuccess) {
OpSignature op_sig = OpSignature();
op_sig.op = BuiltinOperator_ADD;
auto params = std::make_unique<TfLiteAddParams>();
op_sig.builtin_data = static_cast<void*>(params.get());
op_sig.inputs = std::vector<OpSignatureTensorSpec>(2);
op_sig.inputs[0] = OpSignatureTensorSpec();
op_sig.inputs[0].dims = {4, 1, 2};
op_sig.inputs[1] = OpSignatureTensorSpec();
op_sig.inputs[1].dims = {2};
EXPECT_TRUE(CheckGpuDelegateCompatibility(op_sig).message().empty());
}
TEST(CheckGpuDelegateCompatibility, Add2Dto3DBroadcastFail) {
OpSignature op_sig = OpSignature();
op_sig.op = BuiltinOperator_ADD;
auto params = std::make_unique<TfLiteAddParams>();
op_sig.builtin_data = static_cast<void*>(params.get());
op_sig.inputs = std::vector<OpSignatureTensorSpec>(2);
op_sig.inputs[0] = OpSignatureTensorSpec();
op_sig.inputs[0].dims = {1, 100, 256};
op_sig.inputs[1] = OpSignatureTensorSpec();
op_sig.inputs[1].dims = {100, 256};
EXPECT_EQ(CheckGpuDelegateCompatibility(op_sig).message(),
"Doesn't support broadcasting - input0: [1,100,256], input1: "
"[100,256]");
}
TEST(CheckGpuDelegateCompatibility, Add3Dto4DBroadcastFail) {
OpSignature op_sig = OpSignature();
op_sig.op = BuiltinOperator_ADD;
auto params = std::make_unique<TfLiteAddParams>();
op_sig.builtin_data = static_cast<void*>(params.get());
op_sig.inputs = std::vector<OpSignatureTensorSpec>(2);
op_sig.inputs[0] = OpSignatureTensorSpec();
op_sig.inputs[0].dims = {4, 1, 1, 2};
op_sig.inputs[1] = OpSignatureTensorSpec();
op_sig.inputs[1].dims = {1, 1, 2};
EXPECT_EQ(
CheckGpuDelegateCompatibility(op_sig).message(),
"Doesn't support broadcasting - input0: [4,1,1,2], input1: [1,1,2]");
}
TEST(CheckGpuDelegateCompatibility, Add3Dto4DBroadcastSuccess) {
OpSignature op_sig = OpSignature();
op_sig.op = BuiltinOperator_ADD;
auto params = std::make_unique<TfLiteAddParams>();
op_sig.builtin_data = static_cast<void*>(params.get());
op_sig.inputs = std::vector<OpSignatureTensorSpec>(2);
op_sig.inputs[0] = OpSignatureTensorSpec();
op_sig.inputs[0].dims = {1, 128, 513, 3};
op_sig.inputs[1] = OpSignatureTensorSpec();
op_sig.inputs[1].dims = {128, 513, 3};
EXPECT_TRUE(CheckGpuDelegateCompatibility(op_sig).message().empty());
}
TEST(CheckGpuDelegateCompatibility, Add2Dto4DBroadcastSuccess) {
OpSignature op_sig = OpSignature();
op_sig.op = BuiltinOperator_ADD;
auto params = std::make_unique<TfLiteAddParams>();
op_sig.builtin_data = static_cast<void*>(params.get());
op_sig.inputs = std::vector<OpSignatureTensorSpec>(2);
op_sig.inputs[0] = OpSignatureTensorSpec();
op_sig.inputs[0].dims = {1, 512, 512, 1};
op_sig.inputs[1] = OpSignatureTensorSpec();
op_sig.inputs[1].dims = {1, 1};
EXPECT_TRUE(CheckGpuDelegateCompatibility(op_sig).message().empty());
}
TEST(CheckGpuDelegateCompatibility, Add2Dto4DBroadcastSuccess2) {
OpSignature op_sig = OpSignature();
op_sig.op = BuiltinOperator_ADD;
auto params = std::make_unique<TfLiteAddParams>();
op_sig.builtin_data = static_cast<void*>(params.get());
op_sig.inputs = std::vector<OpSignatureTensorSpec>(2);
op_sig.inputs[0] = OpSignatureTensorSpec();
op_sig.inputs[0].dims = {1, 384, 384, 3};
op_sig.inputs[1] = OpSignatureTensorSpec();
op_sig.inputs[1].dims = {1, 1};
EXPECT_TRUE(CheckGpuDelegateCompatibility(op_sig).message().empty());
}
TEST(CheckGpuDelegateCompatibility, Add2Dto4DBroadcastSuccess3) {
OpSignature op_sig = OpSignature();
op_sig.op = BuiltinOperator_ADD;
auto params = std::make_unique<TfLiteAddParams>();
op_sig.builtin_data = static_cast<void*>(params.get());
op_sig.inputs = std::vector<OpSignatureTensorSpec>(2);
op_sig.inputs[0] = OpSignatureTensorSpec();
op_sig.inputs[0].dims = {1, 4, 4, 10};
op_sig.inputs[1] = OpSignatureTensorSpec();
op_sig.inputs[1].dims = {1, 10};
EXPECT_TRUE(CheckGpuDelegateCompatibility(op_sig).message().empty());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/versioning/gpu_compatibility.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/versioning/gpu_compatibility_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d89b3608-8520-4260-8f56-7949b38f8dce | cpp | tensorflow/tensorflow | call | tensorflow/lite/experimental/acceleration/mini_benchmark/call.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/call_test.cc | #include <stddef.h>
#include <cstring>
#include <sstream>
#include <string>
#include <vector>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/call_register.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/op_macros.h"
namespace tflite {
namespace acceleration {
namespace ops {
namespace call_kernel {
namespace {
bool MatchDimensionsExceptBatchSize(TfLiteTensor* a, TfLiteTensor* b) {
if (a->dims->size != b->dims->size) {
return false;
}
for (int i = 1; i < a->dims->size; ++i) {
if (a->dims->data[i] != b->dims->data[i]) {
return false;
}
}
return true;
}
TfLiteStatus ValidateAndResizeInputsIfNeeded(TfLiteContext* context,
TfLiteNode* node,
Subgraph* subgraph,
int loop_count) {
TF_LITE_ENSURE_EQ(context, subgraph->inputs().size(), node->inputs->size);
for (int i = 0; i < node->inputs->size; ++i) {
TfLiteTensor* node_input = context->tensors + node->inputs->data[i];
TfLiteTensor* subgraph_input = subgraph->tensor(subgraph->inputs()[i]);
TF_LITE_ENSURE_TYPES_EQ(context, node_input->type, subgraph_input->type);
TF_LITE_ENSURE_MSG(
context, node_input->dims->size > 0,
"Dimensions of all of call node's inputs should be non-zero.");
TF_LITE_ENSURE_EQ(context, node_input->dims->data[0], loop_count);
if (!subgraph_input->dims->size) {
std::vector<int> new_dims;
new_dims.reserve(node_input->dims->size);
new_dims.push_back(1);
new_dims.insert(new_dims.end(), node_input->dims->data + 1,
node_input->dims->data + node_input->dims->size);
subgraph->ResizeInputTensor(subgraph->inputs()[i], new_dims);
} else {
if (!MatchDimensionsExceptBatchSize(node_input, subgraph_input)) {
std::stringstream node_input_dims, subgraph_input_dims;
for (int i = 0; i < node_input->dims->size; i++) {
node_input_dims << node_input->dims->data[i] << " ";
subgraph_input_dims << subgraph_input->dims->data[i] << " ";
}
TF_LITE_KERNEL_LOG(
context,
"%s:%d: All dimensions except the batch size should match for call "
"node and the subgraph to invoke (input tensor %s[ %s], subgraph "
"tensor %s[ %s])",
__FILE__, __LINE__, node_input->name, node_input_dims.str().c_str(),
subgraph_input->name, subgraph_input_dims.str().c_str());
return kTfLiteError;
}
TF_LITE_ENSURE_EQ(context, subgraph_input->dims->data[0], 1);
}
}
return kTfLiteOk;
}
TfLiteStatus ValidateAndResizeOutputs(TfLiteContext* context, TfLiteNode* node,
Subgraph* subgraph, int loop_count) {
TF_LITE_ENSURE_EQ(context, subgraph->outputs().size(), node->outputs->size);
for (int i = 0; i < node->outputs->size; ++i) {
const TfLiteTensor* subgraph_output =
subgraph->tensor(subgraph->outputs()[i]);
TfLiteTensor* node_output = context->tensors + node->outputs->data[i];
TF_LITE_ASSERT(subgraph_output->dims->size > 0);
TfLiteIntArray* new_dims_array = TfLiteIntArrayCopy(subgraph_output->dims);
new_dims_array->data[0] = loop_count;
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, node_output, new_dims_array));
node_output->type = subgraph_output->type;
}
return kTfLiteOk;
}
TfLiteStatus CopyInputTensorsData(TfLiteContext* context, TfLiteNode* node,
Subgraph* dst_subgraph, int loop_index,
int loop_count) {
const std::vector<int>& dst_tensor_indices = dst_subgraph->inputs();
TF_LITE_ENSURE_EQ(context, node->inputs->size, dst_tensor_indices.size());
for (int i = 0; i < dst_tensor_indices.size(); ++i) {
TfLiteTensor* src_tensor = context->tensors + node->inputs->data[i];
TfLiteTensor* dst_tensor = dst_subgraph->tensor(dst_tensor_indices[i]);
size_t offset = src_tensor->bytes / loop_count * loop_index;
TF_LITE_ENSURE_EQ(context, src_tensor->bytes / loop_count,
dst_tensor->bytes);
memcpy(dst_tensor->data.raw, src_tensor->data.raw + offset,
src_tensor->bytes / loop_count);
}
return kTfLiteOk;
}
TfLiteStatus CopyOutputTensorsData(TfLiteContext* context,
Subgraph* src_subgraph, TfLiteNode* node,
int loop_index, int loop_count) {
const std::vector<int>& src_tensor_indices = src_subgraph->outputs();
TF_LITE_ENSURE_EQ(context, src_tensor_indices.size(), node->outputs->size);
for (int i = 0; i < src_tensor_indices.size(); ++i) {
const TfLiteTensor* src_tensor =
src_subgraph->tensor(src_tensor_indices[i]);
TfLiteTensor* dst_tensor = context->tensors + node->outputs->data[i];
size_t offset = dst_tensor->bytes / loop_count * loop_index;
TF_LITE_ENSURE_EQ(context, src_tensor->bytes,
dst_tensor->bytes / loop_count);
memcpy(dst_tensor->data.raw + offset, src_tensor->data.raw,
src_tensor->bytes);
}
return kTfLiteOk;
}
}
struct OpData {
int subgraph_index;
int loop_count;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
if (!buffer) {
return nullptr;
}
auto* op_data = new OpData;
const uint8_t* buffer_fixed_width = reinterpret_cast<const uint8_t*>(buffer);
const flexbuffers::Map& map =
flexbuffers::GetRoot(buffer_fixed_width, length).AsMap();
op_data->subgraph_index = map["subgraph_index"].AsInt32();
op_data->loop_count = map["loop_count"].AsInt32();
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE(context, op_data);
Subgraph* this_subgraph = reinterpret_cast<Subgraph*>(context->impl_);
auto* subgraphs = this_subgraph->GetSubgraphs();
TF_LITE_ENSURE_MSG(context,
(op_data->subgraph_index < subgraphs->size()) &&
(op_data->subgraph_index >= 0),
"Index of subgraph to be invoked is invalid.");
Subgraph* subgraph = (*subgraphs)[op_data->subgraph_index].get();
TF_LITE_ENSURE_MSG(
context, subgraph != this_subgraph,
"Subgraph to invoke must be different from the invoking graph.");
int loop_count = op_data->loop_count;
TF_LITE_ENSURE_MSG(context, loop_count >= 0, "Loop count must be positive. ");
TF_LITE_ENSURE_OK(context, ValidateAndResizeInputsIfNeeded(
context, node, subgraph, loop_count));
TF_LITE_ENSURE_OK(context, subgraph->AllocateTensors());
TF_LITE_ENSURE_OK(
context, ValidateAndResizeOutputs(context, node, subgraph, loop_count));
TF_LITE_ENSURE(context, !subgraph->HasDynamicTensors());
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
Subgraph* this_subgraph = reinterpret_cast<Subgraph*>(context->impl_);
auto* subgraphs = this_subgraph->GetSubgraphs();
Subgraph* subgraph = (*subgraphs)[op_data->subgraph_index].get();
for (int loop_index = 0; loop_index < op_data->loop_count; loop_index++) {
TF_LITE_ENSURE_OK(context,
CopyInputTensorsData(context, node, subgraph, loop_index,
op_data->loop_count));
TF_LITE_ENSURE_OK(context, subgraph->Invoke());
for (int tensor_index : subgraph->outputs()) {
subgraph->EnsureTensorDataIsReadable(tensor_index);
}
TF_LITE_ENSURE_OK(context,
CopyOutputTensorsData(context, subgraph, node, loop_index,
op_data->loop_count));
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_CALL() {
static TfLiteRegistration r = {call_kernel::Init, call_kernel::Free,
call_kernel::Prepare, call_kernel::Eval};
return &r;
}
}
}
} | #include <cstddef>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/kernels/builtin_op_kernels.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/call_register.h"
#include "tensorflow/lite/interpreter_test_util.h"
#include "tensorflow/lite/kernels/subgraph_test_util.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace {
class CallTest : public subgraph_test_util::ControlFlowOpTest {
public:
CallTest() { interpreter_ = std::make_unique<Interpreter>(&error_reporter_); }
~CallTest() override = default;
void SetupTensor(Subgraph* subgraph, int tensor_index, TfLiteType type) {
ASSERT_EQ(subgraph->SetTensorParametersReadWrite(tensor_index, type, "", 0,
nullptr, {}, false),
kTfLiteOk);
}
void BuildCallSubgraph(Subgraph* subgraph, std::vector<uint8_t> params_buffer,
std::vector<int> inputs, std::vector<int> outputs,
int expected_node_index, bool single_node_subgraph) {
if (single_node_subgraph) {
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(inputs.size() + outputs.size(),
&first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs(inputs), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs(outputs), kTfLiteOk);
}
for (const int& idx : inputs) {
SetupTensor(subgraph, idx, kTfLiteInt32);
}
for (const int& idx : outputs) {
SetupTensor(subgraph, idx, kTfLiteInt32);
}
int node_index;
subgraph->AddNodeWithParameters(
inputs, outputs, {},
reinterpret_cast<const char*>(params_buffer.data()),
params_buffer.size(), nullptr, acceleration::ops::Register_CALL(),
&node_index);
ASSERT_EQ(node_index, expected_node_index);
}
void BuildCallSubgraph(Subgraph* subgraph, int index, int loop_count,
std::vector<int> inputs, std::vector<int> outputs,
int expected_node_index = 0,
bool single_node_subgraph = true) {
flexbuffers::Builder fbb;
fbb.Map([&] {
fbb.Int("subgraph_index", index);
fbb.Int("loop_count", loop_count);
});
fbb.Finish();
BuildCallSubgraph(subgraph, fbb.GetBuffer(), inputs, outputs,
expected_node_index, single_node_subgraph);
}
void BuildGraphWithMultipleOutputs(Subgraph* subgraph) {
const int kInput1 = 0;
const int kInput2 = 1;
const int kMulOutput = 2;
const int kAddOutput = 3;
const int kTensorCount = 4;
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInput1, kInput2}), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kMulOutput, kAddOutput}), kTfLiteOk);
SetupTensor(subgraph, kInput1, kTfLiteInt32);
SetupTensor(subgraph, kInput2, kTfLiteInt32);
SetupTensor(subgraph, kMulOutput, kTfLiteInt32);
SetupTensor(subgraph, kAddOutput, kTfLiteInt32);
TfLiteMulParams* params_mul =
reinterpret_cast<TfLiteMulParams*>(malloc(sizeof(TfLiteMulParams)));
params_mul->activation = kTfLiteActNone;
int node_index;
subgraph->AddNodeWithParameters(
{kInput1, kInput2}, {kMulOutput}, {}, nullptr, 0, params_mul,
::tflite::ops::builtin::Register_MUL(), &node_index);
TfLiteAddParams* params_add =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
params_add->activation = kTfLiteActNone;
subgraph->AddNodeWithParameters(
{kInput1, kInput2}, {kAddOutput}, {}, nullptr, 0, params_add,
::tflite::ops::builtin::Register_ADD(), &node_index);
}
void BuildMultiNodeGraph(Subgraph* this_subgraph) {
const int kInput1 = 0, kInput2 = 1, kInput3 = 2, kInput4 = 3;
const int kOutput1 = 4, kOutput2 = 5, kOutput3 = 6;
const int kTensorCount = 7;
int first_new_tensor_index;
ASSERT_EQ(this_subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
std::vector<int> inputs = {kInput1, kInput2, kInput3, kInput4};
std::vector<int> outputs = {kOutput3};
ASSERT_EQ(this_subgraph->SetInputs(inputs), kTfLiteOk);
ASSERT_EQ(this_subgraph->SetOutputs({kOutput3}), kTfLiteOk);
for (int idx = 0; idx < kTensorCount; ++idx) {
SetupTensor(this_subgraph, idx, kTfLiteInt32);
}
int expected_node_index = 0, node_index;
auto* pad_reg = ops::builtin::Register_PAD();
pad_reg->builtin_code = kTfLiteBuiltinPad;
this_subgraph->AddNodeWithParameters(
{kInput2, kInput3}, {kOutput1}, {}, nullptr, 0,
reinterpret_cast<TfLitePadParams*>(malloc(sizeof(TfLitePadParams))),
pad_reg, &node_index);
ASSERT_EQ(node_index, expected_node_index++);
AddSubgraphs(1);
const int kLoopCount = 1;
const int kSubgraphIndex = 1;
builder_->BuildAddSubgraph(interpreter_->subgraph(1));
CallTest::BuildCallSubgraph(this_subgraph, kSubgraphIndex, kLoopCount,
{kInput1, kOutput1}, {kOutput2},
expected_node_index++, false);
TfLiteMulParams* mul_params =
reinterpret_cast<TfLiteMulParams*>(malloc(sizeof(TfLiteMulParams)));
mul_params->activation = kTfLiteActNone;
auto* mul_reg = ops::builtin::Register_MUL();
mul_reg->builtin_code = kTfLiteBuiltinMul;
this_subgraph->AddNodeWithParameters({kInput4, kOutput2}, {kOutput3}, {},
nullptr, 0, mul_params, mul_reg,
&node_index);
ASSERT_EQ(node_index, expected_node_index++);
}
TestErrorReporter error_reporter_;
};
TEST_F(CallTest, SubgraphMultipleInputsSingleOutput) {
std::vector<std::vector<int>> test_shapes = {
{3, 2}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (size_t i = 0; i < test_shapes.size(); ++i) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(1);
int loop_count = test_shapes[i][0];
builder_->BuildMulSubgraph(interpreter_->subgraph(1));
CallTest::BuildCallSubgraph(&interpreter_->primary_subgraph(), 1,
loop_count, {0, 1}, {2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], test_shapes[i]);
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], test_shapes[i]);
ASSERT_EQ(interpreter_->subgraph(1)->AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
subgraph_test_util::FillIntTensor(
interpreter_->tensor(interpreter_->inputs()[0]), {-1, 2, -3, 4, -5, 6});
subgraph_test_util::FillIntTensor(
interpreter_->tensor(interpreter_->inputs()[1]), {-1, 2, -3, 4, -5, 6});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
subgraph_test_util::CheckIntTensor(output, test_shapes[i],
{1, 4, 9, 16, 25, 36});
}
}
TEST_F(CallTest, ShouldBeANoOpWhenLoopCountIsZero) {
AddSubgraphs(1);
int loop_count = 0;
builder_->BuildMulSubgraph(interpreter_->subgraph(1));
CallTest::BuildCallSubgraph(&interpreter_->primary_subgraph(), 1, loop_count,
{0, 1}, {2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {0, 3});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {0, 3});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
subgraph_test_util::CheckIntTensor(output, {0, 3}, {});
}
TEST_F(CallTest, SubgraphWithFixedInputShapes) {
AddSubgraphs(1);
const int kLoopCount = 2;
const int kBatchSizeSubgraph = 1;
const int kFixedInputLen = 3;
const std::vector<int> kCallOpInputShape = {kLoopCount, kFixedInputLen};
const std::vector<int> kSubgraphInputShape = {kBatchSizeSubgraph,
kFixedInputLen};
Subgraph* subgraph = interpreter_->subgraph(1);
builder_->BuildMulSubgraph(subgraph);
CallTest::BuildCallSubgraph(&interpreter_->primary_subgraph(), 1, kLoopCount,
{0, 1}, {2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], kCallOpInputShape);
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], kCallOpInputShape);
subgraph->ResizeInputTensor(subgraph->inputs()[0], kSubgraphInputShape);
subgraph->ResizeInputTensor(subgraph->inputs()[1], kSubgraphInputShape);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
subgraph_test_util::FillIntTensor(
interpreter_->tensor(interpreter_->inputs()[0]), {-1, 2, -3, 4, -5, 6});
subgraph_test_util::FillIntTensor(
interpreter_->tensor(interpreter_->inputs()[1]), {-1, 2, -3, 4, -5, 6});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
subgraph_test_util::CheckIntTensor(output, kCallOpInputShape,
{1, 4, 9, 16, 25, 36});
}
TEST_F(CallTest, SubgraphWithMultipleInputsAndOutputs) {
std::vector<std::vector<int>> test_shapes = {
{3, 2, 1}, {1, 2, 3}, {2, 1, 3}, {2, 3, 1, 1}, {2, 3}};
for (size_t i = 0; i < test_shapes.size(); ++i) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(1);
int loop_count = test_shapes[i][0];
CallTest::BuildGraphWithMultipleOutputs(interpreter_->subgraph(1));
CallTest::BuildCallSubgraph(&interpreter_->primary_subgraph(), 1,
loop_count, {0, 1}, {2, 3});
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], test_shapes[i]);
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], test_shapes[i]);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
subgraph_test_util::FillIntTensor(
interpreter_->tensor(interpreter_->inputs()[0]), {-1, 2, -3, 4, -5, 6});
subgraph_test_util::FillIntTensor(
interpreter_->tensor(interpreter_->inputs()[1]), {-1, 2, -3, 4, -5, 6});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output_mul = interpreter_->tensor(interpreter_->outputs()[0]);
subgraph_test_util::CheckIntTensor(output_mul, test_shapes[i],
{1, 4, 9, 16, 25, 36});
TfLiteTensor* output_add = interpreter_->tensor(interpreter_->outputs()[1]);
subgraph_test_util::CheckIntTensor(output_add, test_shapes[i],
{-2, 4, -6, 8, -10, 12});
}
}
TEST_F(CallTest, ShouldHandleInvalidParamsAndSetToDefault) {
flexbuffers::Builder fbb;
fbb.Vector([&]() {
fbb.String("hi");
fbb.String("hello");
});
fbb.Finish();
AddSubgraphs(1);
CallTest::BuildCallSubgraph(&interpreter_->primary_subgraph(),
fbb.GetBuffer(), {0}, {1}, 0, true);
const int kNodeIndex = 0;
const TfLiteNode* call_node = &interpreter_->primary_subgraph()
.nodes_and_registration()[kNodeIndex]
.first;
tflite::acceleration::ops::TfLiteCallParams* op_data =
reinterpret_cast<tflite::acceleration::ops::TfLiteCallParams*>(
call_node->user_data);
EXPECT_EQ(op_data->subgraph_index, 0);
EXPECT_EQ(op_data->loop_count, 0);
}
TEST_F(CallTest, MultiNodeGraph) {
CallTest::BuildMultiNodeGraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1, 4, 4, 1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1, 2, 2, 1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {4, 2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[3], {1, 4, 4, 1});
ASSERT_EQ(interpreter_->subgraph(1)->AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
subgraph_test_util::FillIntTensor(
interpreter_->tensor(interpreter_->inputs()[0]), std::vector<int>(16, 1));
subgraph_test_util::FillIntTensor(
interpreter_->tensor(interpreter_->inputs()[1]), {1, 2, 3, 4});
subgraph_test_util::FillIntTensor(
interpreter_->tensor(interpreter_->inputs()[2]),
{0, 0, 1, 1, 1, 1, 0, 0});
subgraph_test_util::FillIntTensor(
interpreter_->tensor(interpreter_->inputs()[3]), std::vector<int>(16, 2));
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
subgraph_test_util::CheckIntTensor(
output, {1, 4, 4, 1}, {2, 2, 2, 2, 2, 4, 6, 2, 2, 8, 10, 2, 2, 2, 2, 2});
}
TEST_F(CallTest, ShouldFailWith0DInputs) {
AddSubgraphs(1);
int loop_count = 5;
builder_->BuildMulSubgraph(interpreter_->subgraph(1));
interpreter_->subgraph(1)->ResizeInputTensor(0, {});
interpreter_->subgraph(1)->ResizeInputTensor(1, {});
CallTest::BuildCallSubgraph(&interpreter_->primary_subgraph(), 1, loop_count,
{0, 1}, {2});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteError);
EXPECT_THAT(
error_reporter_.error_messages(),
testing::HasSubstr(
"Dimensions of all of call node's inputs should be non-zero."));
}
TEST_F(CallTest, ShouldFailWhenLoopCountDoesNotMatchBatchSize) {
AddSubgraphs(1);
int loop_count = 7;
builder_->BuildMulSubgraph(interpreter_->subgraph(1));
CallTest::BuildCallSubgraph(&interpreter_->primary_subgraph(), 1, loop_count,
{0, 1}, {2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {5, 3});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {5, 3});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteError);
EXPECT_THAT(
error_reporter_.error_messages(),
testing::HasSubstr("node_input->dims->data[0] != loop_count (5 != 7)"));
}
TEST_F(CallTest, ShouldFailForSubgraphWithIncompatibleInputShapes) {
AddSubgraphs(1);
const int kLoopCount = 5;
const int kBatchSizeSubgraph = 1;
std::vector<int> call_op_input = {kLoopCount, 3};
std::vector<int> subgraph_input = {kBatchSizeSubgraph, 7};
Subgraph* subgraph = interpreter_->subgraph(1);
builder_->BuildMulSubgraph(subgraph);
CallTest::BuildCallSubgraph(&interpreter_->primary_subgraph(), 1, kLoopCount,
{0, 1}, {2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], call_op_input);
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], call_op_input);
subgraph->ResizeInputTensor(subgraph->inputs()[0], subgraph_input);
subgraph->ResizeInputTensor(subgraph->inputs()[1], subgraph_input);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteError);
EXPECT_THAT(
error_reporter_.error_messages(),
testing::HasSubstr("All dimensions except the batch size should match "
"for call node and the subgraph to invoke"));
}
TEST_F(CallTest, ShouldFailWhenSubgraphIndexMatchesInvokedSubgraph) {
const int kPrimarySubgraphIndex = 0;
CallTest::BuildCallSubgraph(&interpreter_->primary_subgraph(),
kPrimarySubgraphIndex, 1, {0}, {1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteError);
EXPECT_THAT(
error_reporter_.error_messages(),
testing::HasSubstr(
"Subgraph to invoke must be different from the invoking graph."));
}
TEST_F(CallTest, ShouldFailWithNegativeLoopCount) {
AddSubgraphs(1);
CallTest::BuildCallSubgraph(&interpreter_->primary_subgraph(), 1, -1, {0},
{1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteError);
EXPECT_THAT(error_reporter_.error_messages(),
testing::HasSubstr("Loop count must be positive."));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/call.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/call_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
303b5c72-09ca-49c0-9552-8dacb6f3caf9 | cpp | tensorflow/tensorflow | libjpeg_decoder | tensorflow/lite/experimental/acceleration/mini_benchmark/libjpeg_decoder.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/libjpeg_decoder_test.cc | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/libjpeg_decoder.h"
#include <setjmp.h>
#include <algorithm>
#include <cctype>
#include <cstddef>
#include <cstdio>
#include <cstring>
#include <functional>
#include <limits>
#include <memory>
#include <string>
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/decode_jpeg_status.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/jpeg_decompress_buffered_struct.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/jpeg_header_parser.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/libjpeg_handle.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace acceleration {
namespace decode_jpeg_kernel {
const size_t LibjpegDecoder::kMaxImageHeight = 10000;
const size_t LibjpegDecoder::kMaxImageWidth = 10000;
constexpr char kSizeMismatchError[] =
"JPEG parameter struct mismatch: library thinks size is ";
LibjpegDecoder::Impl::Impl(size_t decompress_struct_size,
const LibjpegHandle* handle)
: decompress_struct_size_(decompress_struct_size),
handle_(handle),
cinfo_(decompress_struct_size) {
cinfo_.get()->err = handle->jpeg_std_error_(&jerr_);
jerr_.error_exit = ErrorExit;
cinfo_.get()->client_data = this;
}
void LibjpegDecoder::Impl::ErrorExit(j_common_ptr cinfo) {
Impl* const impl = reinterpret_cast<Impl*>(cinfo->client_data);
char message[JMSG_LENGTH_MAX];
cinfo->err->format_message(cinfo, message);
impl->status_.code = kTfLiteError;
impl->status_.error_message = message;
longjmp(impl->env_, 1);
}
Status ExtractSizeFromErrorMessage(const std::string& error_message,
size_t& expected_size) {
Status status;
static const int kExpLengthStart = strlen(kSizeMismatchError);
int end = kExpLengthStart;
while (end < error_message.length() && std::isdigit(error_message[end])) {
end++;
}
if (end > kExpLengthStart) {
expected_size = std::stoi(error_message.substr(kExpLengthStart, end));
} else {
status.code = kTfLiteError;
status.error_message =
"Couldn't parse the size from message: \'" + error_message + "\'";
}
return status;
}
std::unique_ptr<LibjpegDecoder> LibjpegDecoder::Create(Status& status) {
std::unique_ptr<LibjpegDecoder> decoder(
new LibjpegDecoder(LibCHandle::Create(status)));
if (status.code != kTfLiteOk) {
return nullptr;
}
decoder->libjpeg_handle_ = LibjpegHandle::Create(status);
if (decoder->libjpeg_handle_ == nullptr) {
return nullptr;
}
Impl impl(sizeof(jpeg_decompress_struct), decoder->libjpeg_handle_.get());
impl.jpeg_CreateDecompress(LibjpegHandle::kLibjpegVersion,
sizeof(jpeg_decompress_struct));
status = impl.status();
if (status.code == kTfLiteOk) {
decoder->expected_size_for_decompress_struct_ =
sizeof(jpeg_decompress_struct);
return decoder;
}
if (!absl::StrContains(status.error_message, kSizeMismatchError)) {
return nullptr;
}
status = ExtractSizeFromErrorMessage(
status.error_message, decoder->expected_size_for_decompress_struct_);
if (status.code != kTfLiteOk) {
return nullptr;
}
return decoder;
}
namespace {
std::string JpegHeaderToString(const JpegHeader& header) {
return "(" + std::to_string(header.height) + ", " +
std::to_string(header.width) + ", " + std::to_string(header.channels) +
", " + std::to_string(header.bits_per_sample) + ")";
}
}
Status LibjpegDecoder::DecodeImage(const tflite::StringRef& encoded,
const JpegHeader& expected_image_dimensions,
unsigned char* decoded,
const size_t& decoded_size) const {
if (expected_image_dimensions.bits_per_sample != 8) {
return {kTfLiteError, "Supporting only images with 8 bits per sample"};
}
if (expected_image_dimensions.channels != 1 &&
expected_image_dimensions.channels != 3) {
return {kTfLiteError, "Supporting only images with 1 or 3 channels"};
}
if (expected_image_dimensions.width > kMaxImageWidth ||
expected_image_dimensions.height > kMaxImageHeight) {
return {kTfLiteError, "Image is too big, dimensions (" +
std::to_string(expected_image_dimensions.width) +
"," +
std::to_string(expected_image_dimensions.width) +
") larger than the maximum allowed (" +
std::to_string(kMaxImageWidth) + ", " +
std::to_string(kMaxImageHeight) + ")"};
}
JpegHeader header;
Status read_header_status = ReadJpegHeader(encoded, &header);
if (read_header_status.code != kTfLiteOk) {
return read_header_status;
}
if (expected_image_dimensions.channels != header.channels ||
expected_image_dimensions.width != header.width ||
expected_image_dimensions.height != header.height ||
expected_image_dimensions.bits_per_sample != header.bits_per_sample) {
return {kTfLiteError, "Decoded image size " + JpegHeaderToString(header) +
" is different from provided image size " +
JpegHeaderToString(expected_image_dimensions)};
}
size_t header_image_size = static_cast<size_t>(header.width) *
static_cast<size_t>(header.height) *
static_cast<size_t>(header.channels);
if (header_image_size != decoded_size) {
return {kTfLiteError, "Size of buffer(" + std::to_string(decoded_size) +
") for storing decoded image must be equal to "
"the size of decoded image(" +
std::to_string(header_image_size) + ")."};
}
char* image_buffer = const_cast<char*>(encoded.str);
size_t image_size = encoded.len;
std::unique_ptr<FILE, std::function<void(FILE*)>> file(
libc_handle_.fmemopen(image_buffer, image_size, "r"),
[](FILE* f) { fclose(f); });
if (file == nullptr) {
return {kTfLiteError, "Fmemopen failed."};
}
Impl impl(expected_size_for_decompress_struct_, libjpeg_handle_.get());
if (impl.jpeg_CreateDecompress(LibjpegHandle::kLibjpegVersion,
expected_size_for_decompress_struct_)) {
return impl.status();
}
if (impl.jpeg_stdio_src(file.get())) {
return impl.status();
}
int read_header_result = 0;
if (impl.jpeg_read_header(read_header_result, true) != kTfLiteOk) {
return impl.status();
}
if (read_header_result != JPEG_HEADER_OK) {
return {kTfLiteError, "Failed call jpeg_read_header"};
}
boolean start_decompress_result = false;
if (impl.jpeg_start_decompress(start_decompress_result) != kTfLiteOk) {
return impl.status();
}
if (!start_decompress_result) {
return {kTfLiteError, "Failed call jpeg_start_decompress_"};
}
size_t height = header.height;
size_t row_stride = header.width * header.channels;
const size_t kMaxImageSize = JPEG_MAX_DIMENSION * 4;
std::vector<unsigned char> decode_buffer(kMaxImageSize);
unsigned char* buffer_array[1];
buffer_array[0] = decode_buffer.data();
size_t decoded_offset = 0;
while (height--) {
unsigned int num_of_scanlines_read = 0;
if (impl.jpeg_read_scanlines(num_of_scanlines_read, buffer_array, 1) !=
kTfLiteOk) {
return impl.status();
}
if (num_of_scanlines_read != 1) {
return {kTfLiteError, "Expected " + std::to_string(header.height) +
" lines but found only " +
std::to_string(header.height - height) +
" read scanlines is " +
std::to_string(num_of_scanlines_read)};
}
std::copy_n(buffer_array[0], row_stride, decoded + decoded_offset);
decoded_offset += row_stride;
}
boolean finish_decompress_result = false;
if (impl.jpeg_finish_decompress(finish_decompress_result) != kTfLiteOk) {
return impl.status();
}
if (!finish_decompress_result) {
return {kTfLiteError, "Failed call jpeg_finish_decompress_"};
}
if (impl.jpeg_destroy_decompress() != kTfLiteOk) {
return impl.status();
}
return impl.status();
}
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/libjpeg_decoder.h"
#include <stddef.h>
#include <cstddef>
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/decode_jpeg_status.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_chessboard_jpeg.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_snow_jpeg.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_test_card_jpeg.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/jpeg_header_parser.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/libjpeg_decoder_test_helper.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace acceleration {
namespace decode_jpeg_kernel {
namespace {
using testing::IsEmpty;
using testing::NotNull;
constexpr JpegHeader kExpectedImageDimensions{
.height = 300, .width = 250, .channels = 3};
constexpr int kDecodedSize = kExpectedImageDimensions.height *
kExpectedImageDimensions.width *
kExpectedImageDimensions.channels;
TEST(LibjpegDecoderTest, InitShouldSucceedOnAndroid) {
Status status;
std::unique_ptr<LibjpegDecoder> decoder = LibjpegDecoder::Create(status);
EXPECT_EQ(status.code, kTfLiteOk);
EXPECT_THAT(status.error_message, IsEmpty());
}
TEST(LibjpegDecoderTest, DecodingChessboardShouldSucceedOnAndroid) {
Status status;
std::unique_ptr<LibjpegDecoder> decoder = LibjpegDecoder::Create(status);
ASSERT_THAT(decoder, NotNull());
tflite::StringRef string_ref = {
reinterpret_cast<const char*>(g_tflite_acceleration_chessboard_jpeg),
static_cast<size_t>(g_tflite_acceleration_chessboard_jpeg_len)};
unsigned char decoded[kDecodedSize];
status = decoder->DecodeImage(string_ref, kExpectedImageDimensions, decoded,
kDecodedSize);
ASSERT_EQ(status.error_message, "");
ASSERT_EQ(status.code, kTfLiteOk);
std::vector<uint8_t> decoded_vec(decoded, decoded + kDecodedSize);
EXPECT_THAT(decoded_vec, HasChessboardPatternWithTolerance(12));
}
TEST(LibjpegDecoderTest, DecodingRainbowTestCardShouldSucceedOnAndroid) {
Status status;
std::unique_ptr<LibjpegDecoder> decoder = LibjpegDecoder::Create(status);
EXPECT_THAT(status.error_message, IsEmpty());
EXPECT_EQ(status.code, kTfLiteOk);
ASSERT_THAT(decoder, NotNull());
std::string encoded(
reinterpret_cast<const char*>(g_tflite_acceleration_test_card_jpeg),
g_tflite_acceleration_test_card_jpeg_len);
tflite::StringRef string_ref = {encoded.c_str(), encoded.length()};
unsigned char decoded[kDecodedSize];
status = decoder->DecodeImage(string_ref, kExpectedImageDimensions, decoded,
kDecodedSize);
ASSERT_EQ(status.error_message, "");
ASSERT_EQ(status.code, kTfLiteOk);
std::vector<uint8_t> decoded_vec(decoded, decoded + kDecodedSize);
EXPECT_THAT(decoded_vec, HasRainbowPatternWithTolerance(5));
}
TEST(LibjpegDecoderTest, ErrorsFromJpegLayerAreReturnedToCaller) {
Status status;
std::unique_ptr<LibjpegDecoder> decoder = LibjpegDecoder::Create(status);
EXPECT_THAT(status.error_message, IsEmpty());
EXPECT_EQ(status.code, kTfLiteOk);
ASSERT_THAT(decoder, NotNull());
std::string str = "this is not a jpeg image";
tflite::StringRef encoded = {str.c_str(), str.length()};
unsigned char decoded_image[12];
status = decoder->DecodeImage(encoded, kExpectedImageDimensions,
decoded_image, 12);
EXPECT_EQ(status.code, kTfLiteError);
EXPECT_EQ(status.error_message, "Not a valid JPEG image.");
}
TEST(LibjpegDecoderTest, DecodingFailsWhenDecodeBufferIsSmall) {
Status status;
std::unique_ptr<LibjpegDecoder> decoder = LibjpegDecoder::Create(status);
EXPECT_THAT(status.error_message, IsEmpty());
EXPECT_EQ(status.code, kTfLiteOk);
ASSERT_THAT(decoder, NotNull());
std::string encoded(
reinterpret_cast<const char*>(g_tflite_acceleration_test_card_jpeg),
g_tflite_acceleration_test_card_jpeg_len);
tflite::StringRef string_ref = {encoded.c_str(), encoded.length()};
const int decoded_size = 100;
unsigned char decoded[decoded_size];
status = decoder->DecodeImage(string_ref, kExpectedImageDimensions, decoded,
decoded_size);
EXPECT_EQ(status.code, kTfLiteError);
EXPECT_EQ(status.error_message,
"Size of buffer(100) for storing decoded image must be equal to "
"the size of decoded image(225000).");
}
TEST(LibjpegDecoderTest, DecodingFailsWhenImageDimensionsDifferFromExpected) {
Status status;
std::unique_ptr<LibjpegDecoder> decoder = LibjpegDecoder::Create(status);
EXPECT_THAT(status.error_message, IsEmpty());
EXPECT_EQ(status.code, kTfLiteOk);
ASSERT_THAT(decoder, NotNull());
std::string encoded(
reinterpret_cast<const char*>(g_tflite_acceleration_test_card_jpeg),
g_tflite_acceleration_test_card_jpeg_len);
tflite::StringRef string_ref = {encoded.c_str(), encoded.length()};
unsigned char decoded[kDecodedSize];
status = decoder->DecodeImage(string_ref,
{.height = 300, .width = 250, .channels = 1},
decoded, kDecodedSize);
EXPECT_EQ(status.code, kTfLiteError);
EXPECT_EQ(
status.error_message,
"Decoded image size (300, 250, 3, 8) is different from provided image "
"size (300, 250, 1, 8)");
}
TEST(LibjpegDecoderTest, DecodingFailsWhenImageDimensionsAreOverThreshold) {
Status status;
std::unique_ptr<LibjpegDecoder> decoder = LibjpegDecoder::Create(status);
EXPECT_THAT(status.error_message, IsEmpty());
EXPECT_EQ(status.code, kTfLiteOk);
ASSERT_THAT(decoder, NotNull());
std::string encoded(
reinterpret_cast<const char*>(g_tflite_acceleration_test_card_jpeg),
g_tflite_acceleration_test_card_jpeg_len);
tflite::StringRef origin_string_ref = {encoded.c_str(), encoded.length()};
const JpegHeader kHeader{
.height = static_cast<int>(LibjpegDecoder::kMaxImageHeight + 1),
.width = static_cast<int>(LibjpegDecoder::kMaxImageWidth + 1),
.channels = 3};
const size_t decoded_size = static_cast<size_t>(kHeader.height) *
static_cast<size_t>(kHeader.width) *
static_cast<size_t>(kHeader.channels);
std::string altered_image;
Status alter_header_status =
BuildImageWithNewHeader(origin_string_ref, kHeader, altered_image);
ASSERT_EQ(alter_header_status.code, kTfLiteOk);
tflite::StringRef altered_string_ref = {altered_image.c_str(),
altered_image.length()};
std::vector<unsigned char> decoded(decoded_size);
status = decoder->DecodeImage(altered_string_ref, kHeader, decoded.data(),
decoded_size);
EXPECT_EQ(status.code, kTfLiteError);
EXPECT_EQ(status.error_message,
"Image is too big, dimensions (" + std::to_string(kHeader.width) +
"," + std::to_string(kHeader.width) +
") larger than the maximum allowed (" +
std::to_string(LibjpegDecoder::kMaxImageHeight) + ", " +
std::to_string(LibjpegDecoder::kMaxImageWidth) + ")");
}
TEST(LibjpegDecoderTest, DecodingFailsWhenImageHasUnsupportedNumberOfChannels) {
Status status;
std::unique_ptr<LibjpegDecoder> decoder = LibjpegDecoder::Create(status);
EXPECT_THAT(status.error_message, IsEmpty());
EXPECT_EQ(status.code, kTfLiteOk);
ASSERT_THAT(decoder, NotNull());
std::string encoded(
reinterpret_cast<const char*>(g_tflite_acceleration_test_card_jpeg),
g_tflite_acceleration_test_card_jpeg_len);
tflite::StringRef string_ref = {encoded.c_str(), encoded.length()};
unsigned char decoded[300 * 250 * 4];
const JpegHeader kHeader{.height = 300, .width = 250, .channels = 4};
status = decoder->DecodeImage(string_ref, kHeader, decoded, kDecodedSize);
EXPECT_EQ(status.code, kTfLiteError);
EXPECT_EQ(status.error_message,
"Supporting only images with 1 or 3 channels");
}
TEST(LibjpegDecoderTest, DecodingFailsWhenExpectedBitPerSampleIsNot8) {
Status status;
std::unique_ptr<LibjpegDecoder> decoder = LibjpegDecoder::Create(status);
EXPECT_THAT(status.error_message, IsEmpty());
EXPECT_EQ(status.code, kTfLiteOk);
ASSERT_THAT(decoder, NotNull());
std::string encoded(
reinterpret_cast<const char*>(g_tflite_acceleration_test_card_jpeg),
g_tflite_acceleration_test_card_jpeg_len);
tflite::StringRef string_ref = {encoded.c_str(), encoded.length()};
unsigned char decoded[kDecodedSize];
status = decoder->DecodeImage(
string_ref,
{.height = 300, .width = 250, .channels = 3, .bits_per_sample = 4},
decoded, kDecodedSize);
EXPECT_EQ(status.code, kTfLiteError);
EXPECT_EQ(status.error_message,
"Supporting only images with 8 bits per sample");
}
TEST(LibjpegDecoderTest, DoesNotDecodeBeyondWhatIsSpecifiedInHeader) {
Status status;
std::unique_ptr<LibjpegDecoder> decoder = LibjpegDecoder::Create(status);
EXPECT_THAT(status.error_message, IsEmpty());
EXPECT_EQ(status.code, kTfLiteOk);
ASSERT_THAT(decoder, NotNull());
std::string origin_encoded_img(
reinterpret_cast<const char*>(g_tflite_acceleration_test_card_jpeg),
g_tflite_acceleration_test_card_jpeg_len);
tflite::StringRef origin_string_ref = {origin_encoded_img.c_str(),
origin_encoded_img.length()};
JpegHeader undersized_image_header = {
.height = kExpectedImageDimensions.height / 2,
.width = kExpectedImageDimensions.width / 2,
.channels = kExpectedImageDimensions.channels};
std::string altered_image;
Status alter_header_status = BuildImageWithNewHeader(
origin_string_ref, undersized_image_header, altered_image);
ASSERT_EQ(alter_header_status.code, kTfLiteOk);
tflite::StringRef altered_string_ref{altered_image.c_str(),
altered_image.length()};
unsigned char decoded[kDecodedSize / 4];
status = decoder->DecodeImage(altered_string_ref, undersized_image_header,
decoded, kDecodedSize / 4);
EXPECT_EQ(status.code, kTfLiteOk);
}
TEST(LibjpegDecoderTest, CanReadImagesWithVeryLargeRows) {
Status status;
std::unique_ptr<LibjpegDecoder> decoder = LibjpegDecoder::Create(status);
EXPECT_THAT(status.error_message, IsEmpty());
EXPECT_EQ(status.code, kTfLiteOk);
ASSERT_THAT(decoder, NotNull());
std::string origin_encoded_img(
reinterpret_cast<const char*>(g_tflite_acceleration_snow_jpeg),
g_tflite_acceleration_snow_jpeg_len);
tflite::StringRef origin_string_ref = {origin_encoded_img.c_str(),
origin_encoded_img.length()};
JpegHeader one_long_row_image_header = {
.height = 1,
.width = static_cast<int>(LibjpegDecoder::kMaxImageWidth),
.channels = kExpectedImageDimensions.channels};
std::string altered_image;
Status alter_header_status = BuildImageWithNewHeader(
origin_string_ref, one_long_row_image_header, altered_image);
ASSERT_EQ(alter_header_status.code, kTfLiteOk);
tflite::StringRef altered_string_ref = {altered_image.c_str(),
altered_image.length()};
const size_t kImageSize = LibjpegDecoder::kMaxImageWidth * 3;
std::vector<unsigned char> decoded(kImageSize);
status = decoder->DecodeImage(altered_string_ref, one_long_row_image_header,
decoded.data(), kImageSize);
EXPECT_EQ(status.code, kTfLiteOk);
}
TEST(LibjpegDecoderTest, FailDecodingAnImageWithUnexpectedEofInDataStream) {
Status status;
std::unique_ptr<LibjpegDecoder> decoder = LibjpegDecoder::Create(status);
EXPECT_THAT(status.error_message, IsEmpty());
EXPECT_EQ(status.code, kTfLiteOk);
ASSERT_THAT(decoder, NotNull());
std::string img(
reinterpret_cast<const char*>(g_tflite_acceleration_test_card_jpeg),
g_tflite_acceleration_test_card_jpeg_len);
tflite::StringRef truncated_image_ref = {img.c_str(), img.length() - 100};
unsigned char decoded[kDecodedSize];
status = decoder->DecodeImage(truncated_image_ref, kExpectedImageDimensions,
decoded, kDecodedSize);
EXPECT_EQ(status.code, kTfLiteError);
EXPECT_EQ(status.error_message, "Not a valid JPEG image.");
}
TEST(LibjpegDecoderTest, JpegErrorMsgParsingForValidMsg) {
size_t extracted_size;
Status status = ExtractSizeFromErrorMessage(
"JPEG parameter struct mismatch: library thinks size is 480, caller "
"expects 464.",
extracted_size);
ASSERT_EQ(status.code, kTfLiteOk);
EXPECT_EQ(extracted_size, 480);
}
TEST(LibjpegDecoderTest, JpegErrorMsgParsingForMaformedMsg) {
size_t extracted_size;
std::string err_msg =
"JPEG parameter struct mismatch: library thinks size is abcde, caller "
"expects 464.";
Status status = ExtractSizeFromErrorMessage(err_msg, extracted_size);
EXPECT_EQ(status.code, kTfLiteError);
EXPECT_EQ(status.error_message,
"Couldn't parse the size from message: '" + err_msg + "'");
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/libjpeg_decoder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/libjpeg_decoder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f927de03-a405-483a-a8e5-2dd242bcc799 | cpp | tensorflow/tensorflow | validator_runner | tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_test.cc | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner.h"
#include <cstdlib>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_options.h"
#include "tensorflow/lite/minimal_logging.h"
namespace tflite {
namespace acceleration {
constexpr int kMaxAttempts = 2;
ValidatorRunner::ValidatorRunner(const ValidatorRunnerOptions& options)
: storage_path_(options.storage_path),
storage_(options.storage_path, options.error_reporter),
error_reporter_(options.error_reporter) {
validator_runner_impl_ = std::make_unique<ValidatorRunnerImpl>(
CreateModelLoaderPath(options), options.storage_path,
options.data_directory_path, options.per_test_timeout_ms,
options.custom_input_data.empty()
? nullptr
: std::make_unique<CustomValidationEmbedder>(
options.custom_input_batch_size, options.custom_input_data,
options.error_reporter),
error_reporter_, options.nnapi_sl, options.gpu_plugin_handle,
options.validation_entrypoint_name, options.benchmark_result_evaluator);
}
MinibenchmarkStatus ValidatorRunner::Init() {
MinibenchmarkStatus status = storage_.Read();
if (status != kMinibenchmarkSuccess) {
TF_LITE_REPORT_ERROR(error_reporter_, "Storage::Read failed");
return status;
}
return validator_runner_impl_->Init();
}
int ValidatorRunner::TriggerMissingValidation(
const std::vector<const TFLiteSettings*>& for_settings) {
if (triggered_) {
return 0;
}
triggered_ = true;
storage_.Read();
std::vector<flatbuffers::FlatBufferBuilder> to_be_run;
for (auto settings : for_settings) {
TFLiteSettingsT tflite_settings;
settings->UnPackTo(&tflite_settings);
int started_count = 0;
int results_count = 0;
for (int i = 0; i < storage_.Count(); i++) {
const BenchmarkEvent* event = storage_.Get(i);
if (event->event_type() == BenchmarkEventType_LOGGED) {
continue;
}
if (!event->tflite_settings()) {
TFLITE_LOG_PROD(TFLITE_LOG_WARNING,
"Got previous event %d type %d with no tflite settings",
i, static_cast<int>(event->event_type()));
continue;
}
TFLiteSettingsT event_settings;
event->tflite_settings()->UnPackTo(&event_settings);
if (event_settings != tflite_settings) {
continue;
}
if (event->event_type() == BenchmarkEventType_START) {
started_count++;
} else if (event->event_type() == BenchmarkEventType_END) {
results_count++;
}
}
if (results_count > 0 || started_count >= kMaxAttempts) {
continue;
}
flatbuffers::FlatBufferBuilder copy;
copy.Finish(CreateTFLiteSettings(copy, &tflite_settings));
to_be_run.emplace_back(std::move(copy));
}
int to_be_run_count = to_be_run.size();
validator_runner_impl_->TriggerValidationAsync(std::move(to_be_run),
storage_path_);
return to_be_run_count;
}
std::vector<const BenchmarkEvent*> ValidatorRunner::GetAndFlushEventsToLog(
int64_t timeout_us) {
std::vector<const BenchmarkEvent*> events;
storage_.Read();
if (storage_.Count() == 0) {
return events;
}
const BenchmarkEvent* last = storage_.Get(storage_.Count() - 1);
if (!last || last->event_type() == BenchmarkEventType_LOGGED) {
return events;
}
bool has_pending_event = false;
for (int i = storage_.Count() - 1; i >= 0; i--) {
const BenchmarkEvent* event = storage_.Get(i);
if (!event || event->event_type() == BenchmarkEventType_LOGGED) {
break;
} else if (event->event_type() == BenchmarkEventType_END ||
event->event_type() == BenchmarkEventType_ERROR) {
break;
} else if (event->event_type() == BenchmarkEventType_START &&
std::abs(event->boottime_us() - Validator::BootTimeMicros()) <
timeout_us) {
has_pending_event = true;
}
}
if (has_pending_event) {
return events;
}
flatbuffers::FlatBufferBuilder fbb;
int64_t boottime_us = Validator::BootTimeMicros();
storage_.Append(
&fbb, CreateBenchmarkEvent(fbb, 0, BenchmarkEventType_LOGGED,
0, 0, boottime_us,
Validator::WallTimeMicros()));
storage_.Read();
bool seen_end = false;
for (int i = storage_.Count() - 1; i >= 0; i--) {
const BenchmarkEvent* event = storage_.Get(i);
if (!event || (event->event_type() == BenchmarkEventType_LOGGED &&
event->boottime_us() != boottime_us)) {
break;
}
if (event->event_type() == BenchmarkEventType_END ||
event->event_type() == BenchmarkEventType_ERROR ||
event->event_type() == BenchmarkEventType_RECOVERED_ERROR) {
events.push_back(event);
seen_end = true;
} else if (event->event_type() == BenchmarkEventType_START) {
if (!seen_end) {
events.push_back(event);
} else {
seen_end = false;
}
}
}
return events;
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner.h"
#include <fcntl.h>
#include <fstream>
#include <iostream>
#include <memory>
#include <ostream>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/android_info.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_validation_model.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_nnapi_sl_fake_impl.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark_test_helper.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/nnapi_sl_fake_impl.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/nnapi/sl/include/SupportLibrary.h"
#ifdef __ANDROID__
#include <dlfcn.h>
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_validator_runner_entrypoint.h"
#endif
namespace tflite {
namespace acceleration {
namespace {
std::vector<const TFLiteSettings*> BuildBenchmarkSettings(
const AndroidInfo& android_info, flatbuffers::FlatBufferBuilder& fbb_cpu,
flatbuffers::FlatBufferBuilder& fbb_nnapi,
flatbuffers::FlatBufferBuilder& fbb_gpu,
bool ignore_android_version = false) {
std::vector<const TFLiteSettings*> settings;
fbb_cpu.Finish(CreateTFLiteSettings(fbb_cpu, Delegate_NONE,
CreateNNAPISettings(fbb_cpu)));
settings.push_back(
flatbuffers::GetRoot<TFLiteSettings>(fbb_cpu.GetBufferPointer()));
if (ignore_android_version || android_info.android_sdk_version >= "28") {
fbb_nnapi.Finish(CreateTFLiteSettings(fbb_nnapi, Delegate_NNAPI,
CreateNNAPISettings(fbb_nnapi)));
settings.push_back(
flatbuffers::GetRoot<TFLiteSettings>(fbb_nnapi.GetBufferPointer()));
}
#ifdef __ANDROID__
fbb_gpu.Finish(CreateTFLiteSettings(fbb_gpu, Delegate_GPU));
settings.push_back(
flatbuffers::GetRoot<TFLiteSettings>(fbb_gpu.GetBufferPointer()));
#endif
return settings;
}
std::string GetTargetDeviceName(const BenchmarkEvent* event) {
if (event->tflite_settings()->delegate() == Delegate_GPU) {
return "GPU";
} else if (event->tflite_settings()->delegate() == Delegate_NNAPI) {
return "NNAPI";
}
return "CPU";
}
class ValidatorRunnerTest : public ::testing::Test {
protected:
void SetUp() override {
MiniBenchmarkTestHelper helper;
should_perform_test_ = helper.should_perform_test();
if (!should_perform_test_) {
return;
}
model_path_ = helper.DumpToTempFile(
"mobilenet_quant_with_validation.tflite",
g_tflite_acceleration_embedded_mobilenet_validation_model,
g_tflite_acceleration_embedded_mobilenet_validation_model_len);
ASSERT_TRUE(!model_path_.empty());
}
void CheckConfigurations(bool use_path = true) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
AndroidInfo android_info;
auto status = RequestAndroidInfo(&android_info);
ASSERT_TRUE(status.ok());
ValidatorRunnerOptions options;
options.data_directory_path = ::testing::TempDir();
options.storage_path = ::testing::TempDir() + "/storage_path.fb";
(void)unlink(options.storage_path.c_str());
if (use_path) {
options.model_path = model_path_;
} else {
options.model_fd = open(model_path_.c_str(), O_RDONLY);
ASSERT_GE(options.model_fd, 0);
struct stat stat_buf = {0};
ASSERT_EQ(fstat(options.model_fd, &stat_buf), 0);
options.model_size = stat_buf.st_size;
options.model_offset = 0;
}
auto validator1 = std::make_unique<ValidatorRunner>(options);
auto validator2 = std::make_unique<ValidatorRunner>(options);
ASSERT_EQ(validator1->Init(), kMinibenchmarkSuccess);
ASSERT_EQ(validator2->Init(), kMinibenchmarkSuccess);
std::vector<const BenchmarkEvent*> events =
validator1->GetAndFlushEventsToLog();
ASSERT_TRUE(events.empty());
flatbuffers::FlatBufferBuilder fbb_cpu, fbb_nnapi, fbb_gpu;
std::vector<const TFLiteSettings*> settings =
BuildBenchmarkSettings(android_info, fbb_cpu, fbb_nnapi, fbb_gpu);
ASSERT_EQ(validator1->TriggerMissingValidation(settings), settings.size());
int event_count = 0;
while (event_count < settings.size()) {
events = validator1->GetAndFlushEventsToLog();
event_count += events.size();
for (const BenchmarkEvent* event : events) {
std::string delegate_name = GetTargetDeviceName(event);
if (event->event_type() == BenchmarkEventType_END) {
if (event->result()->ok()) {
std::cout << "Validation passed on " << delegate_name << std::endl;
} else {
std::cout << "Validation did not pass on " << delegate_name
<< std::endl;
}
} else if (event->event_type() == BenchmarkEventType_ERROR) {
std::cout << "Failed to run validation on " << delegate_name
<< std::endl;
}
}
#ifndef _WIN32
sleep(1);
#endif
}
EXPECT_EQ(validator2->TriggerMissingValidation(settings), 0);
}
bool should_perform_test_ = true;
std::string model_path_;
};
TEST_F(ValidatorRunnerTest, AllConfigurationsWithFilePath) {
CheckConfigurations(true);
}
TEST_F(ValidatorRunnerTest, AllConfigurationsWithFd) {
CheckConfigurations(false);
}
using ::tflite::nnapi::NnApiSupportLibrary;
std::unique_ptr<const NnApiSupportLibrary> LoadNnApiSupportLibrary() {
MiniBenchmarkTestHelper helper;
std::string nnapi_sl_path = helper.DumpToTempFile(
"libnnapi_fake.so", g_nnapi_sl_fake_impl, g_nnapi_sl_fake_impl_len);
std::unique_ptr<const NnApiSupportLibrary> nnapi_sl =
::tflite::nnapi::loadNnApiSupportLibrary(nnapi_sl_path);
return nnapi_sl;
}
TEST_F(ValidatorRunnerTest, ShouldUseNnApiSl) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
AndroidInfo android_info;
auto status = RequestAndroidInfo(&android_info);
ASSERT_TRUE(status.ok());
InitNnApiSlInvocationStatus();
std::unique_ptr<const NnApiSupportLibrary> nnapi_sl =
LoadNnApiSupportLibrary();
ASSERT_THAT(nnapi_sl.get(), ::testing::NotNull());
ValidatorRunnerOptions options;
options.model_path = model_path_;
options.storage_path = ::testing::TempDir() + "/storage_path.fb";
(void)unlink(options.storage_path.c_str());
options.data_directory_path = ::testing::TempDir();
options.nnapi_sl = nnapi_sl->getFL5();
ValidatorRunner validator(options);
ASSERT_EQ(validator.Init(), kMinibenchmarkSuccess);
std::vector<const BenchmarkEvent*> events =
validator.GetAndFlushEventsToLog();
ASSERT_TRUE(events.empty());
flatbuffers::FlatBufferBuilder fbb_cpu, fbb_nnapi, fbb_gpu;
std::vector<const TFLiteSettings*> settings =
BuildBenchmarkSettings(android_info, fbb_cpu, fbb_nnapi, fbb_gpu,
true);
ASSERT_EQ(validator.TriggerMissingValidation(settings), settings.size());
int event_count = 0;
while (event_count < settings.size()) {
events = validator.GetAndFlushEventsToLog();
event_count += events.size();
#ifndef _WIN32
sleep(1);
#endif
}
ASSERT_EQ(validator.TriggerMissingValidation(settings), 0);
EXPECT_TRUE(WasNnApiSlInvoked());
}
TEST_F(ValidatorRunnerTest, ShouldFailIfItCannotFindNnApiSlPath) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
std::string storage_path = ::testing::TempDir() + "/storage_path.fb";
(void)unlink(storage_path.c_str());
NnApiSLDriverImplFL5 wrong_handle_nnapi_sl{};
ValidatorRunnerOptions options;
options.model_path = model_path_;
options.storage_path = ::testing::TempDir() + "/storage_path.fb";
(void)unlink(options.storage_path.c_str());
options.data_directory_path = ::testing::TempDir();
options.nnapi_sl = &wrong_handle_nnapi_sl;
ValidatorRunner validator(options);
ASSERT_EQ(validator.Init(), kMiniBenchmarkCannotLoadSupportLibrary);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7965bdd1-2403-4bc1-9e96-9ed3b45991d5 | cpp | tensorflow/tensorflow | blocking_validator_runner | tensorflow/lite/experimental/acceleration/mini_benchmark/blocking_validator_runner.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/blocking_validator_runner_test.cc | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/blocking_validator_runner.h"
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_options.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
namespace tflite {
namespace acceleration {
namespace {
using ::flatbuffers::FlatBufferBuilder;
using ::flatbuffers::GetRoot;
constexpr absl::Duration kWaitBetweenRefresh = absl::Milliseconds(20);
std::string GenerateRandomString() {
static const char charset[] =
"0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz";
const int size = 10;
std::string result;
result.resize(size);
for (int i = 0; i < size; ++i) {
result[i] = charset[rand() % (sizeof(charset) - 1)];
}
return result;
}
}
BlockingValidatorRunner::BlockingValidatorRunner(
const ValidatorRunnerOptions& options)
: per_test_timeout_ms_(options.per_test_timeout_ms),
storage_path_base_(options.storage_path) {
validator_runner_impl_ = std::make_unique<ValidatorRunnerImpl>(
CreateModelLoaderPath(options), options.storage_path,
options.data_directory_path, options.per_test_timeout_ms,
options.custom_input_data.empty()
? nullptr
: std::make_unique<CustomValidationEmbedder>(
options.custom_input_batch_size, options.custom_input_data,
options.error_reporter),
options.error_reporter, options.nnapi_sl, options.gpu_plugin_handle,
options.validation_entrypoint_name, options.benchmark_result_evaluator);
}
MinibenchmarkStatus BlockingValidatorRunner::Init() {
return validator_runner_impl_->Init();
}
std::vector<FlatBufferBuilder> BlockingValidatorRunner::TriggerValidation(
const std::vector<const TFLiteSettings*>& for_settings) {
if (for_settings.empty()) {
return {};
}
std::string storage_path =
absl::StrCat(storage_path_base_, ".", GenerateRandomString());
TFLITE_LOG_PROD(TFLITE_LOG_INFO, "Validation storage path: %s",
storage_path.c_str());
std::vector<flatbuffers::FlatBufferBuilder> to_be_run;
std::vector<TFLiteSettingsT> for_settings_obj;
for_settings_obj.reserve(for_settings.size());
for (auto settings : for_settings) {
TFLiteSettingsT tflite_settings;
settings->UnPackTo(&tflite_settings);
flatbuffers::FlatBufferBuilder copy;
copy.Finish(CreateTFLiteSettings(copy, &tflite_settings));
to_be_run.emplace_back(std::move(copy));
for_settings_obj.emplace_back(tflite_settings);
}
validator_runner_impl_->TriggerValidationAsync(std::move(to_be_run),
storage_path);
int64_t total_timeout_ms = per_test_timeout_ms_ * (1 + for_settings.size());
int64_t deadline_us = Validator::BootTimeMicros() + total_timeout_ms * 1000;
bool within_timeout = true;
while ((validator_runner_impl_->GetNumCompletedResults()) <
for_settings.size() &&
(within_timeout = Validator::BootTimeMicros() < deadline_us)) {
usleep(absl::ToInt64Microseconds(kWaitBetweenRefresh));
}
std::vector<FlatBufferBuilder> results =
validator_runner_impl_->GetCompletedResults();
if (!within_timeout) {
TFLITE_LOG_PROD(
TFLITE_LOG_WARNING,
"Validation timed out after %ld ms. Return before all tests finished.",
total_timeout_ms);
} else if (for_settings.size() != results.size()) {
TFLITE_LOG_PROD(TFLITE_LOG_WARNING,
"Validation completed.Started benchmarking for %d "
"TFLiteSettings, received %d results.",
for_settings.size(), results.size());
}
std::vector<TFLiteSettingsT> result_settings;
result_settings.reserve(results.size());
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
TFLiteSettingsT event_settings;
event->tflite_settings()->UnPackTo(&event_settings);
result_settings.emplace_back(std::move(event_settings));
}
for (auto& settings_obj : for_settings_obj) {
auto result_it =
std::find(result_settings.begin(), result_settings.end(), settings_obj);
if (result_it == result_settings.end()) {
FlatBufferBuilder fbb;
fbb.Finish(CreateBenchmarkEvent(
fbb, CreateTFLiteSettings(fbb, &settings_obj),
BenchmarkEventType_ERROR, 0,
CreateBenchmarkError(fbb, BenchmarkStage_UNKNOWN,
0, 0,
0,
kMinibenchmarkCompletionEventMissing),
Validator::BootTimeMicros(), Validator::WallTimeMicros()));
results.emplace_back(std::move(fbb));
}
}
(void)unlink(storage_path.c_str());
return results;
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/blocking_validator_runner.h"
#include <fcntl.h>
#include <iostream>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/benchmark_result_evaluator.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_model.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_validation_model.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark_test_helper.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_options.h"
namespace tflite {
namespace acceleration {
namespace {
using ::flatbuffers::FlatBufferBuilder;
using ::flatbuffers::GetRoot;
class CustomResultEvaluator : public AbstractBenchmarkResultEvaluator {
public:
bool HasPassedAccuracyCheck(const BenchmarkResult& result) override {
return true;
}
};
class BlockingValidatorRunnerTest : public ::testing::Test {
protected:
void SetUp() override {
MiniBenchmarkTestHelper helper;
should_perform_test_ = helper.should_perform_test();
options_.model_path = helper.DumpToTempFile(
"mobilenet_quant_with_validation.tflite",
g_tflite_acceleration_embedded_mobilenet_validation_model,
g_tflite_acceleration_embedded_mobilenet_validation_model_len);
ASSERT_TRUE(!options_.model_path.empty());
options_.data_directory_path = ::testing::TempDir();
options_.storage_path =
absl::StrCat(::testing::TempDir(), "storage_path.fb.1");
options_.per_test_timeout_ms = 5000;
plain_model_path_ = MiniBenchmarkTestHelper::DumpToTempFile(
"mobilenet_quant.tflite",
g_tflite_acceleration_embedded_mobilenet_model,
g_tflite_acceleration_embedded_mobilenet_model_len);
}
std::string plain_model_path_;
ValidatorRunnerOptions options_;
bool should_perform_test_ = true;
};
TEST_F(BlockingValidatorRunnerTest, SucceedWithEmbeddedValidation) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
BlockingValidatorRunner runner(options_);
ASSERT_EQ(runner.Init(), kMinibenchmarkSuccess);
FlatBufferBuilder fbb;
#ifdef __ANDROID__
fbb.Finish(CreateTFLiteSettings(fbb, Delegate_GPU));
#else
fbb.Finish(CreateTFLiteSettings(fbb));
#endif
std::vector<FlatBufferBuilder> results = runner.TriggerValidation(
{flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer())});
EXPECT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
EXPECT_EQ(event->event_type(), BenchmarkEventType_END);
EXPECT_TRUE(event->result()->ok());
}
}
TEST_F(BlockingValidatorRunnerTest, SucceedWithFdCloexecEmbeddedValidation) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
options_.model_fd = open(options_.model_path.c_str(), O_RDONLY | O_CLOEXEC);
ASSERT_GE(options_.model_fd, 0);
struct stat stat_buf = {0};
ASSERT_EQ(fstat(options_.model_fd, &stat_buf), 0);
options_.model_size = stat_buf.st_size;
options_.model_offset = 0;
options_.model_path.clear();
BlockingValidatorRunner runner(options_);
ASSERT_EQ(runner.Init(), kMinibenchmarkSuccess);
FlatBufferBuilder fbb;
#ifdef __ANDROID__
fbb.Finish(CreateTFLiteSettings(fbb, Delegate_GPU));
#else
fbb.Finish(CreateTFLiteSettings(fbb));
#endif
std::vector<FlatBufferBuilder> results = runner.TriggerValidation(
{flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer())});
EXPECT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
EXPECT_EQ(event->event_type(), BenchmarkEventType_END);
EXPECT_TRUE(event->result()->ok());
}
}
TEST_F(BlockingValidatorRunnerTest, SucceedWithBufferModel) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
options_.model_buffer =
g_tflite_acceleration_embedded_mobilenet_validation_model;
options_.model_size =
g_tflite_acceleration_embedded_mobilenet_validation_model_len;
options_.model_path.clear();
BlockingValidatorRunner runner(options_);
ASSERT_EQ(runner.Init(), kMinibenchmarkSuccess);
FlatBufferBuilder fbb;
fbb.Finish(CreateTFLiteSettings(fbb));
std::vector<FlatBufferBuilder> results = runner.TriggerValidation(
{flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer())});
EXPECT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
EXPECT_EQ(event->event_type(), BenchmarkEventType_END);
EXPECT_TRUE(event->result()->ok());
}
}
TEST_F(BlockingValidatorRunnerTest, SucceedWithFdModelCustomValidation) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
options_.model_path.clear();
options_.model_fd = open(plain_model_path_.c_str(), O_RDONLY);
ASSERT_GE(options_.model_fd, 0);
struct stat stat_buf = {0};
ASSERT_EQ(fstat(options_.model_fd, &stat_buf), 0);
options_.model_size = stat_buf.st_size;
options_.model_offset = 0;
options_.custom_input_batch_size = 3;
options_.custom_input_data = {std::vector<uint8_t>(3 * 224 * 224 * 3, 1)};
CustomResultEvaluator evaluator;
options_.benchmark_result_evaluator = &evaluator;
BlockingValidatorRunner runner(options_);
ASSERT_EQ(runner.Init(), kMinibenchmarkSuccess);
FlatBufferBuilder fbb;
#ifdef __ANDROID__
fbb.Finish(CreateTFLiteSettings(fbb, Delegate_XNNPACK));
#else
fbb.Finish(CreateTFLiteSettings(fbb));
#endif
std::vector<FlatBufferBuilder> results = runner.TriggerValidation(
{flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer())});
EXPECT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
EXPECT_EQ(event->event_type(), BenchmarkEventType_END);
}
}
TEST_F(BlockingValidatorRunnerTest, SucceedWhenRunningMultipleTimes) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
BlockingValidatorRunner runner(options_);
ASSERT_EQ(runner.Init(), kMinibenchmarkSuccess);
FlatBufferBuilder fbb;
fbb.Finish(CreateTFLiteSettings(fbb));
int num_runs = 3;
for (int i = 0; i < num_runs; i++) {
std::vector<FlatBufferBuilder> results = runner.TriggerValidation(
{flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer()),
flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer())});
EXPECT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
EXPECT_EQ(event->event_type(), BenchmarkEventType_END);
EXPECT_TRUE(event->result()->ok());
}
}
}
TEST_F(BlockingValidatorRunnerTest, ReturnErrorWhenTimedOut) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
options_.per_test_timeout_ms = 50;
BlockingValidatorRunner runner(options_);
ASSERT_EQ(runner.Init(), kMinibenchmarkSuccess);
FlatBufferBuilder fbb;
fbb.Finish(CreateTFLiteSettings(fbb));
std::vector<FlatBufferBuilder> results = runner.TriggerValidation(
{flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer())});
EXPECT_THAT(results, testing::SizeIs(1));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
EXPECT_EQ(event->event_type(), BenchmarkEventType_ERROR);
ASSERT_NE(nullptr, event->error());
EXPECT_THAT(event->error()->mini_benchmark_error_code(),
testing::AnyOf(kMinibenchmarkCommandTimedOut,
kMinibenchmarkCompletionEventMissing));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/blocking_validator_runner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/blocking_validator_runner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
94ed2b30-ae08-4f42-b4db-a5d14276453a | cpp | tensorflow/tensorflow | file_lock | tensorflow/lite/experimental/acceleration/mini_benchmark/file_lock.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/file_lock_test.cc | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/file_lock.h"
#ifndef _WIN32
#include <fcntl.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#endif
#include <string>
namespace tflite {
namespace acceleration {
bool FileLock::TryLock() {
#ifndef _WIN32
if (fd_ < 0) {
fd_ = open(path_.c_str(), O_WRONLY | O_CREAT | O_CLOEXEC, 0600);
}
if (fd_ < 0) {
return false;
}
if (flock(fd_, LOCK_EX | LOCK_NB) == 0) {
return true;
}
#endif
return false;
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/file_lock.h"
#include <csignal>
#include <iostream>
#include <string>
#include <utility>
#include <gtest/gtest.h>
namespace tflite {
namespace acceleration {
namespace {
class FileLockTest : public ::testing::Test {
protected:
void SetUp() override { file_path_ = ::testing::TempDir() + "/file_lock"; }
std::string file_path_;
};
TEST_F(FileLockTest, CanLock) { EXPECT_TRUE(FileLock(file_path_).TryLock()); }
TEST_F(FileLockTest, FailIfLockMoreThanOnce) {
FileLock lock_one(file_path_);
FileLock lock_two(file_path_);
ASSERT_TRUE(lock_one.TryLock());
EXPECT_FALSE(lock_two.TryLock());
}
TEST_F(FileLockTest, LockReleasedWhenThreadCrash) {
pid_t pid = fork();
if (pid == 0) {
FileLock lock(file_path_);
if (!lock.TryLock()) {
_exit(1);
}
std::cout << "Lock acquired successfully.";
kill(getpid(), SIGKILL);
}
int wstatus;
int w = waitpid(pid, &wstatus, WUNTRACED);
ASSERT_NE(w, -1);
FileLock lock_two(file_path_);
EXPECT_TRUE(lock_two.TryLock());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/file_lock.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/file_lock_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
98768ebc-6ed4-449c-809b-5a25249a04e5 | cpp | tensorflow/tensorflow | fb_storage | tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage_test.cc | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage.h"
#include <fcntl.h>
#include <string.h>
#ifndef _WIN32
#include <sys/file.h>
#include <unistd.h>
#endif
#include <fstream>
#include <sstream>
#include <string>
#include "absl/strings/string_view.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#ifndef TEMP_FAILURE_RETRY
#ifdef __ANDROID__
#error "TEMP_FAILURE_RETRY not set although on Android"
#else
#define TEMP_FAILURE_RETRY(exp) exp
#endif
#endif
namespace tflite {
namespace acceleration {
FileStorage::FileStorage(absl::string_view path, ErrorReporter* error_reporter)
: path_(path), error_reporter_(error_reporter) {}
MinibenchmarkStatus FileStorage::ReadFileIntoBuffer() {
#ifndef _WIN32
buffer_.clear();
int fd = TEMP_FAILURE_RETRY(open(path_.c_str(), O_RDONLY | O_CLOEXEC, 0600));
int open_error_no = errno;
if (fd < 0) {
int fd = TEMP_FAILURE_RETRY(
open(path_.c_str(), O_WRONLY | O_APPEND | O_CREAT | O_CLOEXEC, 0600));
if (fd >= 0) {
close(fd);
return kMinibenchmarkSuccess;
}
int create_error_no = errno;
TF_LITE_REPORT_ERROR(
error_reporter_,
"Could not open %s for reading: %s, creating failed as well: %s",
path_.c_str(), std::strerror(open_error_no),
std::strerror(create_error_no));
return kMinibenchmarkCantCreateStorageFile;
}
int lock_status = flock(fd, LOCK_EX);
int lock_error_no = errno;
if (lock_status < 0) {
close(fd);
TF_LITE_REPORT_ERROR(error_reporter_, "Could not flock %s: %s",
path_.c_str(), std::strerror(lock_error_no));
return kMinibenchmarkFlockingStorageFileFailed;
}
char buffer[512];
while (true) {
int bytes_read = TEMP_FAILURE_RETRY(read(fd, buffer, 512));
int read_error_no = errno;
if (bytes_read == 0) {
close(fd);
return kMinibenchmarkSuccess;
} else if (bytes_read < 0) {
close(fd);
TF_LITE_REPORT_ERROR(error_reporter_, "Error reading %s: %s",
path_.c_str(), std::strerror(read_error_no));
return kMinibenchmarkErrorReadingStorageFile;
} else {
buffer_.append(buffer, bytes_read);
}
}
#else
return kMinibenchmarkUnsupportedPlatform;
#endif
}
MinibenchmarkStatus FileStorage::AppendDataToFile(absl::string_view data) {
#ifndef _WIN32
int fd = TEMP_FAILURE_RETRY(
open(path_.c_str(), O_WRONLY | O_APPEND | O_CREAT | O_CLOEXEC, 0600));
if (fd < 0) {
int error_no = errno;
TF_LITE_REPORT_ERROR(error_reporter_, "Could not open %s for writing: %s",
path_.c_str(), std::strerror(error_no));
return kMinibenchmarkFailedToOpenStorageFileForWriting;
}
int lock_status = flock(fd, LOCK_EX);
int lock_error_no = errno;
if (lock_status < 0) {
close(fd);
TF_LITE_REPORT_ERROR(error_reporter_, "Could not flock %s: %s",
path_.c_str(), std::strerror(lock_error_no));
return kMinibenchmarkFlockingStorageFileFailed;
}
absl::string_view bytes = data;
while (!bytes.empty()) {
ssize_t bytes_written =
TEMP_FAILURE_RETRY(write(fd, bytes.data(), bytes.size()));
if (bytes_written < 0) {
int error_no = errno;
close(fd);
TF_LITE_REPORT_ERROR(error_reporter_, "Could not write to %s: %s",
path_.c_str(), std::strerror(error_no));
return kMinibenchmarkErrorWritingStorageFile;
}
bytes.remove_prefix(bytes_written);
}
if (TEMP_FAILURE_RETRY(fsync(fd)) < 0) {
int error_no = errno;
close(fd);
TF_LITE_REPORT_ERROR(error_reporter_, "Failed to fsync %s: %s",
path_.c_str(), std::strerror(error_no));
return kMinibenchmarkErrorFsyncingStorageFile;
}
if (TEMP_FAILURE_RETRY(close(fd)) < 0) {
int error_no = errno;
TF_LITE_REPORT_ERROR(error_reporter_, "Failed to close %s: %s",
path_.c_str(), std::strerror(error_no));
return kMinibenchmarkErrorClosingStorageFile;
}
return kMinibenchmarkSuccess;
#else
return kMinibenchmarkUnsupportedPlatform;
#endif
}
const char kFlatbufferStorageIdentifier[] = "STO1";
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage.h"
#include <algorithm>
#include <string>
#include <thread>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/c/c_api_types.h"
namespace tflite {
namespace acceleration {
namespace {
std::string GetTemporaryDirectory() {
#ifdef __ANDROID__
return "/data/local/tmp";
#else
if (getenv("TEST_TMPDIR")) {
return getenv("TEST_TMPDIR");
}
if (getenv("TEMP")) {
return getenv("TEMP");
}
return ".";
#endif
}
std::string GetStoragePath() {
std::string path = GetTemporaryDirectory() + "/storage.fb";
unlink(path.c_str());
return path;
}
TEST(FlatbufferStorageTest, AppendAndReadOneItem) {
std::string path = GetStoragePath();
flatbuffers::FlatBufferBuilder fbb;
flatbuffers::Offset<BenchmarkEvent> o =
CreateBenchmarkEvent(fbb, 0, BenchmarkEventType_START);
FlatbufferStorage<BenchmarkEvent> storage(path);
EXPECT_EQ(storage.Read(), kMinibenchmarkSuccess);
EXPECT_EQ(storage.Count(), 0);
EXPECT_EQ(storage.Append(&fbb, o), kMinibenchmarkSuccess);
ASSERT_EQ(storage.Count(), 1);
EXPECT_EQ(storage.Get(0)->event_type(), BenchmarkEventType_START);
storage = FlatbufferStorage<BenchmarkEvent>(path);
EXPECT_EQ(storage.Read(), kMinibenchmarkSuccess);
ASSERT_EQ(storage.Count(), 1);
EXPECT_EQ(storage.Get(0)->event_type(), BenchmarkEventType_START);
}
TEST(FlatbufferStorageTest, AppendAndReadThreeItems) {
std::string path = GetStoragePath();
FlatbufferStorage<BenchmarkEvent> storage(path);
EXPECT_EQ(storage.Read(), kMinibenchmarkSuccess);
EXPECT_EQ(storage.Count(), 0);
for (auto event : {BenchmarkEventType_START, BenchmarkEventType_ERROR,
BenchmarkEventType_END}) {
flatbuffers::FlatBufferBuilder fbb;
flatbuffers::Offset<BenchmarkEvent> object =
CreateBenchmarkEvent(fbb, 0, event);
EXPECT_EQ(storage.Append(&fbb, object), kMinibenchmarkSuccess);
}
ASSERT_EQ(storage.Count(), 3);
EXPECT_EQ(storage.Get(0)->event_type(), BenchmarkEventType_START);
EXPECT_EQ(storage.Get(1)->event_type(), BenchmarkEventType_ERROR);
EXPECT_EQ(storage.Get(2)->event_type(), BenchmarkEventType_END);
storage = FlatbufferStorage<BenchmarkEvent>(path);
EXPECT_EQ(storage.Read(), kMinibenchmarkSuccess);
ASSERT_EQ(storage.Count(), 3);
EXPECT_EQ(storage.Get(0)->event_type(), BenchmarkEventType_START);
EXPECT_EQ(storage.Get(1)->event_type(), BenchmarkEventType_ERROR);
EXPECT_EQ(storage.Get(2)->event_type(), BenchmarkEventType_END);
}
TEST(FlatbufferStorageTest, PathDoesntExist) {
std::string path = GetTemporaryDirectory() + "/nosuchdirectory/storage.pb";
FlatbufferStorage<BenchmarkEvent> storage(path);
EXPECT_EQ(storage.Read(), kMinibenchmarkCantCreateStorageFile);
}
#ifndef __ANDROID__
TEST(FlatbufferStorageTest, WriteFailureResetsStorage) {
std::string path = GetStoragePath();
flatbuffers::FlatBufferBuilder fbb;
flatbuffers::Offset<BenchmarkEvent> o =
CreateBenchmarkEvent(fbb, 0, BenchmarkEventType_START);
FlatbufferStorage<BenchmarkEvent> storage(path);
EXPECT_EQ(storage.Append(&fbb, o), kMinibenchmarkSuccess);
ASSERT_EQ(storage.Count(), 1);
chmod(path.c_str(), 0444);
EXPECT_EQ(storage.Append(&fbb, o),
kMinibenchmarkFailedToOpenStorageFileForWriting);
ASSERT_EQ(storage.Count(), 0);
}
#endif
TEST(FlatbufferStorageTest, Locking) {
std::string path = GetStoragePath();
std::vector<std::thread> threads;
const int kNumThreads = 4;
const int kIterations = 10;
threads.reserve(kNumThreads);
for (int i = 0; i < kNumThreads; i++) {
threads.push_back(std::thread([path]() {
for (int j = 0; j < kIterations; j++) {
FlatbufferStorage<BenchmarkEvent> storage(path);
flatbuffers::FlatBufferBuilder fbb;
flatbuffers::Offset<BenchmarkEvent> o =
CreateBenchmarkEvent(fbb, 0, BenchmarkEventType_START);
EXPECT_EQ(storage.Append(&fbb, o), kMinibenchmarkSuccess);
}
}));
}
std::for_each(threads.begin(), threads.end(),
[](std::thread& t) { t.join(); });
FlatbufferStorage<BenchmarkEvent> storage(path);
EXPECT_EQ(storage.Read(), kMinibenchmarkSuccess);
EXPECT_EQ(storage.Count(), kNumThreads * kIterations);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
28edffeb-45c6-42b1-ad17-93c134919c8e | cpp | tensorflow/tensorflow | big_little_affinity | tensorflow/lite/experimental/acceleration/mini_benchmark/big_little_affinity.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/big_little_affinity_test.cc | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/big_little_affinity.h"
#include <algorithm>
#include <cstdint>
#include <map>
#include <set>
#include "include/cpuinfo.h"
namespace tflite {
namespace acceleration {
namespace {
bool IsInOrderArch(cpuinfo_uarch arch) {
switch (arch) {
case cpuinfo_uarch_cortex_a53:
case cpuinfo_uarch_cortex_a55r0:
case cpuinfo_uarch_cortex_a55:
case cpuinfo_uarch_cortex_a57:
return true;
default:
return false;
}
return false;
}
}
BigLittleAffinity GetAffinity() {
BigLittleAffinity affinity;
if (!cpuinfo_initialize()) {
return affinity;
}
std::map<uint32_t, uint64_t> cluster_to_max_frequency;
uint64_t smallest_max_frequency = UINT64_MAX;
uint64_t largest_max_frequency = 0;
uint64_t processors_count = cpuinfo_get_processors_count();
for (auto i = 0; i < processors_count; i++) {
const struct cpuinfo_processor* processor = cpuinfo_get_processor(i);
if (processor->core->frequency > 0) {
cluster_to_max_frequency[processor->cluster->cluster_id] =
processor->core->frequency;
smallest_max_frequency =
std::min(smallest_max_frequency, processor->core->frequency);
largest_max_frequency =
std::max(largest_max_frequency, processor->core->frequency);
}
}
int count_of_processors_with_largest_max_frequency = 0;
for (auto i = 0; i < cpuinfo_get_processors_count(); i++) {
const struct cpuinfo_processor* processor = cpuinfo_get_processor(i);
uint64_t max_frequency =
cluster_to_max_frequency[processor->cluster->cluster_id];
if (max_frequency == largest_max_frequency) {
++count_of_processors_with_largest_max_frequency;
}
}
std::set<cpuinfo_uarch> archs;
for (auto i = 0; i < cpuinfo_get_processors_count(); i++) {
const struct cpuinfo_processor* processor = cpuinfo_get_processor(i);
uint64_t max_frequency =
cluster_to_max_frequency[processor->cluster->cluster_id];
bool is_little;
archs.insert(processor->core->uarch);
if (count_of_processors_with_largest_max_frequency ==
cpuinfo_get_processors_count()) {
is_little = IsInOrderArch(processor->core->uarch);
} else if (count_of_processors_with_largest_max_frequency == 2) {
is_little = (max_frequency != largest_max_frequency);
} else {
is_little = (max_frequency == smallest_max_frequency);
}
#ifdef __ANDROID__
if (is_little) {
affinity.little_core_affinity |= (0x1 << processor->linux_id);
} else {
affinity.big_core_affinity |= (0x1 << processor->linux_id);
}
#endif
}
if (cluster_to_max_frequency.size() == 1) {
affinity.big_core_affinity = affinity.little_core_affinity =
std::max(affinity.big_core_affinity, affinity.little_core_affinity);
} else if (count_of_processors_with_largest_max_frequency ==
cpuinfo_get_processors_count() &&
archs.size() == 1) {
affinity.big_core_affinity = affinity.little_core_affinity =
std::max(affinity.big_core_affinity, affinity.little_core_affinity);
}
return affinity;
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/big_little_affinity.h"
#include <cstdint>
#include <map>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "include/cpuinfo.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/android_info.h"
namespace tflite {
namespace acceleration {
namespace {
TEST(BigLittle, CheckBasics) {
ASSERT_TRUE(cpuinfo_initialize());
auto processors_count = cpuinfo_get_processors_count();
ASSERT_GT(processors_count, 0);
#if defined(__ANDROID__)
AndroidInfo android_info;
auto status = RequestAndroidInfo(&android_info);
if (android_info.is_emulator) {
std::cout << "Running on emulator\n";
return;
} else {
std::cout << "Running on hardware\n";
}
ASSERT_TRUE(status.ok());
std::map<uint32_t, uint64_t> cluster_to_max_frequency;
for (auto i = 0; i < cpuinfo_get_processors_count(); i++) {
const struct cpuinfo_processor* processor = cpuinfo_get_processor(i);
if (processor->core->frequency > 0) {
cluster_to_max_frequency[processor->cluster->cluster_id] =
processor->core->frequency;
}
}
EXPECT_GT(cluster_to_max_frequency.size(), 0);
EXPECT_LE(cluster_to_max_frequency.size(), 3);
for (auto i = 0; i < cpuinfo_get_processors_count(); i++) {
const struct cpuinfo_processor* processor = cpuinfo_get_processor(i);
EXPECT_TRUE(cluster_to_max_frequency.find(processor->cluster->cluster_id) !=
cluster_to_max_frequency.end());
}
BigLittleAffinity affinity = GetAffinity();
EXPECT_GT(affinity.little_core_affinity, 0);
EXPECT_GT(affinity.big_core_affinity, 0);
std::cout << "Little core affinity: " << std::hex
<< affinity.little_core_affinity << std::endl;
std::cout << "Big core affinity: " << std::hex << affinity.big_core_affinity
<< std::endl;
#endif
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/big_little_affinity.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/big_little_affinity_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fd60af3b-8684-4a3f-9060-125254894f91 | cpp | tensorflow/tensorflow | jpeg_header_parser | tensorflow/lite/experimental/acceleration/mini_benchmark/jpeg_header_parser.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/jpeg_header_parser_test.cc | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/jpeg_header_parser.h"
#include <cstdint>
#include <memory>
#include <string>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace acceleration {
namespace decode_jpeg_kernel {
namespace {
using MarkerId = uint16_t;
void AsWord(int value, char* msb, char* lsb) {
*msb = static_cast<char>(value >> 8);
*lsb = static_cast<char>(value);
}
class JfifHeaderParser {
public:
explicit JfifHeaderParser(const tflite::StringRef& jpeg_image_data)
: jpeg_image_data_(jpeg_image_data), offset_(0) {
if (!IsJpegImage(jpeg_image_data_)) {
is_valid_image_buffer_ = false;
validation_error_message_ = "Not a valid JPEG image.";
} else if (!IsJfifImage(jpeg_image_data_)) {
is_valid_image_buffer_ = false;
validation_error_message_ = "Image is not in JFIF format.";
return;
} else {
is_valid_image_buffer_ = true;
}
}
#define ENSURE_READ_STATUS(a) \
do { \
const TfLiteStatus s = (a); \
if (s != kTfLiteOk) { \
return {s, "Error trying to parse JPEG header."}; \
} \
} while (0)
Status ReadJpegHeader(JpegHeader* result) {
if (!is_valid_image_buffer_) {
return {kTfLiteError, validation_error_message_};
}
Status move_to_sof_status = MoveToStartOfFrameMarker();
if (move_to_sof_status.code != kTfLiteOk) {
return move_to_sof_status;
}
ENSURE_READ_STATUS(SkipBytes(2));
char precision;
ENSURE_READ_STATUS(ReadByte(&precision));
uint16_t height;
ENSURE_READ_STATUS(ReadWord(&height));
uint16_t width;
ENSURE_READ_STATUS(ReadWord(&width));
char num_of_components;
ENSURE_READ_STATUS(ReadByte(&num_of_components));
if (num_of_components != 1 && num_of_components != 3) {
return {kTfLiteError,
"A JFIF image without App14 marker doesn't support a number of "
"components = " +
std::to_string(static_cast<int>(num_of_components))};
}
result->width = width;
result->height = height;
result->channels = num_of_components;
result->bits_per_sample = precision;
return {kTfLiteOk, ""};
}
Status ApplyHeaderToImage(const JpegHeader& new_header,
std::string& write_to) {
if (!is_valid_image_buffer_) {
return {kTfLiteError, validation_error_message_};
}
Status move_to_sof_status = MoveToStartOfFrameMarker();
if (move_to_sof_status.code != kTfLiteOk) {
return move_to_sof_status;
}
ENSURE_READ_STATUS(SkipBytes(2));
if (!HasData(6)) {
return {kTfLiteError,
"Invalid SOF marker, image buffer ends before end of marker"};
}
char header[6];
header[0] = static_cast<char>(new_header.bits_per_sample);
AsWord(new_header.height, header + 1, header + 2);
AsWord(new_header.width, header + 3, header + 4);
header[5] = static_cast<char>(new_header.channels);
write_to.clear();
write_to.append(jpeg_image_data_.str, offset_);
write_to.append(header, 6);
ENSURE_READ_STATUS(SkipBytes(6));
if (HasData()) {
write_to.append(jpeg_image_data_.str + offset_,
jpeg_image_data_.len - offset_);
}
return {kTfLiteOk, ""};
}
private:
const tflite::StringRef jpeg_image_data_;
int offset_;
bool is_valid_image_buffer_;
std::string validation_error_message_;
Status MoveToStartOfFrameMarker() {
const MarkerId kStartOfStreamMarkerId = 0xFFDA;
offset_ = 0;
ENSURE_READ_STATUS(SkipBytes(4));
ENSURE_READ_STATUS(SkipCurrentMarker());
MarkerId curr_marker_id;
while (HasData(4)) {
ENSURE_READ_STATUS(ReadWord(&curr_marker_id));
if (IsStartOfFrameMarkerId(curr_marker_id)) {
break;
}
if (curr_marker_id == kStartOfStreamMarkerId) {
return {kTfLiteError, "Error trying to parse JPEG header."};
}
ENSURE_READ_STATUS(SkipCurrentMarker());
}
return {kTfLiteOk, ""};
}
#undef ENSURE_READ_STATUS
bool HasData(int min_data_size = 1) {
return offset_ <= jpeg_image_data_.len - min_data_size;
}
TfLiteStatus SkipBytes(int bytes) {
if (!HasData(bytes)) {
TFLITE_LOG(TFLITE_LOG_WARNING,
"Trying to move out of image boundaries from offset %d, "
"skipping %d bytes",
offset_, bytes);
return kTfLiteError;
}
offset_ += bytes;
return kTfLiteOk;
}
TfLiteStatus ReadByte(char* result) {
if (!HasData()) {
return kTfLiteError;
}
*result = jpeg_image_data_.str[offset_];
return SkipBytes(1);
}
TfLiteStatus ReadWord(uint16_t* result) {
TF_LITE_ENSURE_STATUS(ReadWordAt(jpeg_image_data_, offset_, result));
return SkipBytes(2);
}
TfLiteStatus SkipCurrentMarker() {
uint16_t full_marker_len;
TF_LITE_ENSURE_STATUS(ReadWord(&full_marker_len));
if (full_marker_len <= 2) {
TFLITE_LOG(TFLITE_LOG_WARNING,
"Invalid marker length %d read at offset %X", full_marker_len,
offset_);
return kTfLiteError;
}
return SkipBytes(full_marker_len - 2);
}
static TfLiteStatus ReadWordAt(const tflite::StringRef& jpeg_image_data,
int read_offset, uint16_t* result) {
if (read_offset < 0 || read_offset + 2 > jpeg_image_data.len) {
return kTfLiteError;
}
const unsigned char* buf =
reinterpret_cast<const unsigned char*>(jpeg_image_data.str);
*result = (buf[read_offset] << 8) + buf[read_offset + 1];
return kTfLiteOk;
}
static bool IsJpegImage(const tflite::StringRef& jpeg_image_data) {
const MarkerId kStartOfImageMarkerId = 0xFFD8;
const MarkerId kEndOfImageMarkerId = 0xFFD9;
MarkerId soi_marker_id;
MarkerId eoi_marker_id;
if (ReadWordAt(jpeg_image_data, 0, &soi_marker_id) != kTfLiteOk) {
return false;
}
if (ReadWordAt(jpeg_image_data, jpeg_image_data.len - 2, &eoi_marker_id) !=
kTfLiteOk) {
return false;
}
return (soi_marker_id == kStartOfImageMarkerId) &&
(eoi_marker_id == kEndOfImageMarkerId);
}
static bool IsJfifImage(const tflite::StringRef& jpeg_image_data) {
const MarkerId kApp0MarkerId = 0xFFE0;
MarkerId app_marker_id;
if ((ReadWordAt(jpeg_image_data, 2, &app_marker_id) != kTfLiteOk) ||
(app_marker_id != kApp0MarkerId)) {
return false;
}
const std::string kJfifIdString{"JFIF\0", 5};
const int KJfifIdStringStartOffset = 6;
if (KJfifIdStringStartOffset + kJfifIdString.size() >=
jpeg_image_data.len) {
TFLITE_LOG(TFLITE_LOG_WARNING,
"Invalid image, reached end of data at offset while "
"parsing APP0 header");
return false;
}
const std::string actualImgId(
jpeg_image_data.str + KJfifIdStringStartOffset, kJfifIdString.size());
if (kJfifIdString != actualImgId) {
TFLITE_LOG(TFLITE_LOG_WARNING, "Invalid image, invalid APP0 header");
return false;
}
return true;
}
static bool IsStartOfFrameMarkerId(MarkerId marker_id) {
return 0xFFC0 <= marker_id && marker_id < 0xFFCF;
}
};
}
Status ReadJpegHeader(const tflite::StringRef& jpeg_image_data,
JpegHeader* header) {
JfifHeaderParser parser(jpeg_image_data);
return parser.ReadJpegHeader(header);
}
Status BuildImageWithNewHeader(const tflite::StringRef& orig_jpeg_image_data,
const JpegHeader& new_header,
std::string& new_image_data) {
JfifHeaderParser parser(orig_jpeg_image_data);
return parser.ApplyHeaderToImage(new_header, new_image_data);
}
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/jpeg_header_parser.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_chessboard_jpeg.h"
namespace tflite {
namespace acceleration {
namespace decode_jpeg_kernel {
void PrintTo(const Status& status, std::ostream* os) {
*os << "{ code: " + std::to_string(status.code) + ", error_message: '" +
status.error_message + "'}";
}
}
}
}
namespace {
using ::testing::AllOf;
using ::testing::Eq;
using ::testing::Field;
using ::testing::Matcher;
using tflite::acceleration::decode_jpeg_kernel::JpegHeader;
using tflite::acceleration::decode_jpeg_kernel::ReadJpegHeader;
Matcher<JpegHeader> JpegHeaderEq(const JpegHeader& expected) {
return AllOf(
Field(&JpegHeader::channels, Eq(expected.channels)),
Field(&JpegHeader::height, Eq(expected.height)),
Field(&JpegHeader::width, Eq(expected.width)),
Field(&JpegHeader::bits_per_sample, Eq(expected.bits_per_sample)));
}
using tflite::acceleration::decode_jpeg_kernel::Status;
Matcher<Status> StatusEq(const Status& expected) {
return AllOf(Field(&Status::code, Eq(expected.code)),
Field(&Status::error_message, Eq(expected.error_message)));
}
const int kChessboardImgHeight = 300;
const int kChessboardImgWidth = 250;
const int kChessboardImgChannels = 3;
TEST(ReadJpegHeader, ShouldParseValidJpgImage) {
const tflite::StringRef chessboard_image{
reinterpret_cast<const char*>(g_tflite_acceleration_chessboard_jpeg),
static_cast<size_t>(g_tflite_acceleration_chessboard_jpeg_len)};
ASSERT_GT(chessboard_image.len, 4);
JpegHeader header;
ASSERT_THAT(ReadJpegHeader(chessboard_image, &header),
StatusEq({kTfLiteOk, ""}));
EXPECT_THAT(header, JpegHeaderEq({kChessboardImgHeight, kChessboardImgWidth,
kChessboardImgChannels}));
}
TEST(ReadJpegHeader, ShouldFailForInvalidJpegImage) {
const std::string invalid_image = "invalid image content";
const tflite::StringRef invalid_image_ref{invalid_image.c_str(),
invalid_image.size()};
JpegHeader header;
EXPECT_THAT(ReadJpegHeader(invalid_image_ref, &header),
StatusEq({kTfLiteError, "Not a valid JPEG image."}));
}
TEST(ReadJpegHeader, ShouldFailForEmptyJpegImage) {
const tflite::StringRef invalid_image_ref{"", 0};
JpegHeader header;
EXPECT_THAT(ReadJpegHeader(invalid_image_ref, &header),
StatusEq({kTfLiteError, "Not a valid JPEG image."}));
}
TEST(ApplyHeaderToImage, ReturnsNewImageWithDifferentHeader) {
const tflite::StringRef chessboard_image{
reinterpret_cast<const char*>(g_tflite_acceleration_chessboard_jpeg),
static_cast<size_t>(g_tflite_acceleration_chessboard_jpeg_len)};
JpegHeader new_header{
.height = 20, .width = 30, .channels = 1, .bits_per_sample = 3};
std::string new_image_data;
ASSERT_THAT(
BuildImageWithNewHeader(chessboard_image, new_header, new_image_data),
StatusEq({kTfLiteOk, ""}));
const tflite::StringRef altered_image{new_image_data.c_str(),
new_image_data.size()};
JpegHeader header;
ASSERT_THAT(ReadJpegHeader(altered_image, &header),
StatusEq({kTfLiteOk, ""}));
EXPECT_THAT(header, JpegHeaderEq(new_header));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/jpeg_header_parser.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/jpeg_header_parser_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
324bf79d-ead5-4060-9186-ae0c63b490d5 | cpp | tensorflow/tensorflow | validator_runner_entrypoint | tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_entrypoint.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_entrypoint_test.cc | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_entrypoint.h"
#include <dlfcn.h>
#include <memory>
#include <string>
#ifndef _WIN32
#include <fcntl.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <cstdint>
#include <thread>
#include <utility>
#include <vector>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/constants.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/file_lock.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/set_big_core_affinity.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator.h"
#include "tensorflow/lite/nnapi/sl/include/SupportLibrary.h"
#include "tensorflow/lite/tools/model_loader.h"
namespace tflite {
namespace acceleration {
namespace {
using flatbuffers::Offset;
Validator::Status RunValidator(const std::string& model_path,
const std::string& delegate_so_path,
const TFLiteSettingsT& tflite_settings,
Validator::Results& results) {
TFLiteSettingsT copy(tflite_settings);
std::unique_ptr<const ::tflite::nnapi::NnApiSupportLibrary> nnapi_sl_handle;
if (!delegate_so_path.empty()) {
if (tflite_settings.nnapi_settings) {
nnapi_sl_handle =
::tflite::nnapi::loadNnApiSupportLibrary(delegate_so_path);
if (!nnapi_sl_handle) {
return Validator::Status{kMiniBenchmarkCannotLoadSupportLibrary,
BenchmarkStage_INITIALIZATION};
}
copy.nnapi_settings->support_library_handle =
reinterpret_cast<uint64_t>(nnapi_sl_handle->getFL5());
} else if (tflite_settings.gpu_settings) {
copy.stable_delegate_loader_settings =
std::make_unique<StableDelegateLoaderSettingsT>();
copy.stable_delegate_loader_settings->delegate_path = delegate_so_path;
}
}
flatbuffers::FlatBufferBuilder fbb;
fbb.Finish(CreateComputeSettings(fbb, ExecutionPreference_ANY,
CreateTFLiteSettings(fbb, ©)));
std::unique_ptr<tools::ModelLoader> model_loader =
tools::CreateModelLoaderFromPath(model_path);
if (!model_loader) {
return Validator::Status{kMinibenchmarkPreconditionNotMet,
BenchmarkStage_INITIALIZATION};
}
auto validator = std::make_unique<Validator>(
std::move(model_loader),
flatbuffers::GetRoot<ComputeSettings>(fbb.GetBufferPointer()));
return validator->RunValidation(&results);
}
}
extern "C" {
int Java_org_tensorflow_lite_acceleration_validation_entrypoint(int argc,
char** argv) {
if (argc < 6) return 1;
const std::string model_path = argv[3];
const std::string storage_path = argv[4];
const std::string nnapi_sl_path = argc > 6 ? argv[6] : "";
FileLock lock(storage_path + ".child_lock");
if (!lock.TryLock()) {
return kMinibenchmarkChildProcessAlreadyRunning;
}
std::string pid = std::to_string(getpid());
pid.resize(kPidBufferLength);
if (write(1, pid.data(), kPidBufferLength) != kPidBufferLength) {
return kMinibenchmarkPreconditionNotMet;
}
FlatbufferStorage<BenchmarkEvent> storage(storage_path);
MinibenchmarkStatus read_status = storage.Read();
if (read_status != kMinibenchmarkSuccess) {
return read_status;
}
TFLiteSettingsT tflite_settings;
int32_t set_big_core_affinity_errno = SetBigCoresAffinity();
if (set_big_core_affinity_errno != 0) {
flatbuffers::FlatBufferBuilder fbb;
storage.Append(
&fbb,
CreateBenchmarkEvent(
fbb, CreateTFLiteSettings(fbb, &tflite_settings),
BenchmarkEventType_RECOVERED_ERROR, 0,
CreateBenchmarkError(fbb, BenchmarkStage_INITIALIZATION,
set_big_core_affinity_errno,
0,
0,
kMinibenchmarkUnableToSetCpuAffinity),
Validator::BootTimeMicros(), Validator::WallTimeMicros()));
}
Validator::Status run_status =
Validator::Status{kMinibenchmarkNoValidationRequestFound};
for (int i = storage.Count() - 1; i >= 0; i--) {
const BenchmarkEvent* event = storage.Get(i);
if (event->event_type() == BenchmarkEventType_START) {
event->tflite_settings()->UnPackTo(&tflite_settings);
Validator::Results results;
run_status =
RunValidator(model_path, nnapi_sl_path, tflite_settings, results);
if (run_status.status != kMinibenchmarkSuccess) {
break;
}
flatbuffers::FlatBufferBuilder fbb;
std::vector<int64_t> delegate_prep_time_us{results.delegate_prep_time_us};
std::vector<Offset<tflite::BenchmarkMetric>> metrics;
metrics.reserve(results.metrics.size());
for (const auto& name_and_values : results.metrics) {
metrics.push_back(
CreateBenchmarkMetric(fbb, fbb.CreateString(name_and_values.first),
fbb.CreateVector(name_and_values.second)));
}
std::vector<Offset<BenchmarkResult_::InferenceOutput>> actual_output;
for (const auto& output : results.actual_inference_output) {
const uint8_t* output_uint8 =
reinterpret_cast<const uint8_t*>(output.data());
actual_output.push_back(BenchmarkResult_::CreateInferenceOutput(
fbb, fbb.CreateVector(output_uint8, output.size())));
}
return storage.Append(
&fbb,
CreateBenchmarkEvent(
fbb, CreateTFLiteSettings(fbb, &tflite_settings),
BenchmarkEventType_END,
CreateBenchmarkResult(
fbb, fbb.CreateVector(delegate_prep_time_us),
fbb.CreateVector(results.execution_time_us), 0, results.ok,
fbb.CreateVector(metrics), fbb.CreateVector(actual_output)),
0, Validator::BootTimeMicros(),
Validator::WallTimeMicros()));
}
}
flatbuffers::FlatBufferBuilder fbb;
return storage.Append(
&fbb, CreateBenchmarkEvent(
fbb, CreateTFLiteSettings(fbb, &tflite_settings),
BenchmarkEventType_ERROR, 0,
CreateBenchmarkError(fbb, run_status.stage, 0,
0, 0,
run_status.status),
Validator::BootTimeMicros(), Validator::WallTimeMicros()));
}
}
}
}
#endif | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_entrypoint.h"
#include <sys/types.h>
#include <fstream>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator.h"
namespace tflite {
namespace acceleration {
static int32_t big_core_affinity_result;
int32_t SetBigCoresAffinity() { return big_core_affinity_result; }
namespace {
class ValidatorRunnerEntryPointTest : public ::testing::Test {
protected:
ValidatorRunnerEntryPointTest()
: storage_path_(::testing::TempDir() + "/events.fb"),
storage_(storage_path_) {}
std::vector<const tflite::BenchmarkEvent*> GetEvents() {
std::vector<const tflite::BenchmarkEvent*> result;
storage_.Read();
int storage_size = storage_.Count();
if (storage_size == 0) {
return result;
}
for (int i = 0; i < storage_size; i++) {
const ::tflite::BenchmarkEvent* event = storage_.Get(i);
result.push_back(event);
}
return result;
}
void ClearEvents() { (void)unlink(storage_path_.c_str()); }
void SetUp() override {
ClearEvents();
SetBigCoreAffinityReturns(0);
}
int CallEntryPoint(std::string cpu_affinity = "0") {
std::vector<std::string> args = {
"test",
"binary_name",
"Java_org_tensorflow_lite_acceleration_validation_entrypoint",
"model_path",
storage_path_,
"data_dir"};
std::vector<std::vector<char>> mutable_args(args.size());
std::vector<char*> argv(args.size());
for (int i = 0; i < mutable_args.size(); i++) {
mutable_args[i] = {args[i].data(), args[i].data() + args[i].size()};
mutable_args[i].push_back('\0');
argv[i] = mutable_args[i].data();
}
return Java_org_tensorflow_lite_acceleration_validation_entrypoint(
argv.size(), argv.data());
}
void SetBigCoreAffinityReturns(int32_t value) {
big_core_affinity_result = value;
}
std::string storage_path_;
FlatbufferStorage<BenchmarkEvent> storage_;
};
TEST_F(ValidatorRunnerEntryPointTest, NotEnoughArguments) {
std::vector<std::string> args = {
"test", "binary_name",
"Java_org_tensorflow_lite_acceleration_validation_entrypoint",
"model_path", storage_path_};
std::vector<std::vector<char>> mutable_args(args.size());
std::vector<char*> argv(args.size());
for (int i = 0; i < mutable_args.size(); i++) {
mutable_args[i] = {args[i].data(), args[i].data() + args[i].size()};
mutable_args[i].push_back('\0');
argv[i] = mutable_args[i].data();
}
EXPECT_EQ(1, Java_org_tensorflow_lite_acceleration_validation_entrypoint(
5, argv.data()));
}
TEST_F(ValidatorRunnerEntryPointTest, NoValidationRequestFound) {
EXPECT_EQ(kMinibenchmarkSuccess, CallEntryPoint());
std::vector<const tflite::BenchmarkEvent*> events = GetEvents();
ASSERT_THAT(events, testing::SizeIs(1));
const tflite::BenchmarkEvent* event = events[0];
EXPECT_EQ(BenchmarkEventType_ERROR, event->event_type());
EXPECT_EQ(kMinibenchmarkNoValidationRequestFound,
event->error()->mini_benchmark_error_code());
}
TEST_F(ValidatorRunnerEntryPointTest, CannotSetCpuAffinity) {
SetBigCoreAffinityReturns(10);
EXPECT_EQ(kMinibenchmarkSuccess, CallEntryPoint("invalid_cpu_affinity"));
std::vector<const tflite::BenchmarkEvent*> events = GetEvents();
ASSERT_THAT(events, testing::SizeIs(2));
const tflite::BenchmarkEvent* event = events[0];
EXPECT_EQ(BenchmarkEventType_RECOVERED_ERROR, event->event_type());
EXPECT_EQ(kMinibenchmarkUnableToSetCpuAffinity,
event->error()->mini_benchmark_error_code());
EXPECT_EQ(10, event->error()->exit_code());
}
TEST_F(ValidatorRunnerEntryPointTest, CannotLoadNnapi) {
flatbuffers::FlatBufferBuilder fbb;
TFLiteSettingsT tflite_settings;
NNAPISettingsT nnapi_settings;
ASSERT_EQ(
storage_.Append(
&fbb,
CreateBenchmarkEvent(
fbb,
CreateTFLiteSettings(fbb, Delegate_NNAPI,
CreateNNAPISettings(fbb, &nnapi_settings)),
BenchmarkEventType_START, 0, 0,
Validator::BootTimeMicros(), Validator::WallTimeMicros())),
kMinibenchmarkSuccess);
std::vector<std::string> args = {
"test",
"binary_name",
"Java_org_tensorflow_lite_acceleration_validation_entrypoint",
"model_path",
storage_path_,
"data_directory_path",
"nnapi_path"};
std::vector<std::vector<char>> mutable_args(args.size());
std::vector<char*> argv(args.size());
for (int i = 0; i < mutable_args.size(); i++) {
mutable_args[i] = {args[i].data(), args[i].data() + args[i].size()};
mutable_args[i].push_back('\0');
argv[i] = mutable_args[i].data();
}
EXPECT_EQ(kMinibenchmarkSuccess,
Java_org_tensorflow_lite_acceleration_validation_entrypoint(
7, argv.data()));
std::vector<const tflite::BenchmarkEvent*> events = GetEvents();
ASSERT_THAT(events, testing::SizeIs(2));
const tflite::BenchmarkEvent* event = events[1];
EXPECT_EQ(BenchmarkEventType_ERROR, event->event_type());
EXPECT_EQ(kMiniBenchmarkCannotLoadSupportLibrary,
event->error()->mini_benchmark_error_code());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_entrypoint.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_entrypoint_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
130d5aab-4743-43c9-b3fc-3e272b7b30b2 | cpp | tensorflow/tensorflow | runner | tensorflow/lite/experimental/acceleration/mini_benchmark/runner.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/runner_test.cc | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/runner.h"
#ifndef TFLITE_ACCELERATION_BENCHMARK_IN_PROCESS
#include <dlfcn.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
#ifndef _WIN32
#include <poll.h>
#include <signal.h>
#include <unistd.h>
#endif
#include <cstdlib>
#include <fstream>
#include <sstream>
#include <string>
#include <thread>
#include <vector>
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/lite/allocation.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/constants.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
#if defined(__ANDROID__) && !defined(TFLITE_ACCELERATION_BENCHMARK_IN_PROCESS)
#include "tensorflow/lite/experimental/acceleration/compatibility/android_info.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_runner_executable.h"
#endif
namespace tflite {
namespace acceleration {
namespace {
std::string ShellEscape(const std::string& src);
}
MinibenchmarkStatus ProcessRunner::Init() {
if (!function_pointer_) {
return kMinibenchmarkPreconditionNotMet;
}
#if !defined(__ANDROID__) || defined(TFLITE_ACCELERATION_BENCHMARK_IN_PROCESS)
return kMinibenchmarkSuccess;
#else
tflite::acceleration::AndroidInfo android_info;
if (!tflite::acceleration::RequestAndroidInfo(&android_info).ok()) {
return kMinibenchmarkRequestAndroidInfoFailed;
}
if (android_info.android_sdk_version.length() < 2 ||
android_info.android_sdk_version < "23") {
return kMinibenchmarkUnsupportedPlatform;
}
std::string soname;
Dl_info dl_info;
int status = dladdr(function_pointer_, &dl_info);
if (status != 0) {
if (dl_info.dli_fname) {
soname = dl_info.dli_fname;
} else {
return kMinibenchmarkDliFnameWasNull;
}
} else {
return kMinibenchmarkDladdrReturnedZero;
}
if (soname.size() >= 4 && soname.substr(soname.size() - 4) == ".apk") {
return kMinibenchmarkDliFnameHasApkNameOnly;
}
std::string runner_path;
runner_path = temporary_path_ + "/runner";
(void)unlink(runner_path.c_str());
std::string runner_contents(
reinterpret_cast<const char*>(g_tflite_acceleration_embedded_runner),
g_tflite_acceleration_embedded_runner_len);
std::ofstream f(runner_path, std::ios::binary);
if (!f.is_open()) {
return kMinibenchmarkCouldntOpenTemporaryFileForBinary;
}
f << runner_contents;
f.close();
if (chmod(runner_path.c_str(), 0500) != 0) {
return kMinibenchmarkCouldntChmodTemporaryFile;
}
runner_path = ShellEscape(runner_path);
if (android_info.android_sdk_version >= "29") {
#if defined(__arm__) || defined(__i386__)
std::string linker_path = "/system/bin/linker";
#else
std::string linker_path = "/system/bin/linker64";
#endif
runner_path = linker_path + " " + runner_path;
}
runner_path_ = runner_path;
soname_ = soname;
return kMinibenchmarkSuccess;
#endif
}
#ifndef _WIN32
bool ProcessRunner::KillProcessWhenTimedOut(FILE* fstream) {
const int array_length = 1 + kPidBufferLength;
char buffer[array_length];
memset(buffer, '\0', array_length);
ssize_t length = fread(buffer, 1, kPidBufferLength, fstream);
int pid;
if (length != kPidBufferLength || !absl::SimpleAtoi(buffer, &pid)) {
TF_LITE_REPORT_ERROR(error_reporter_,
"Failed to get Validator subprocess id: %s", buffer);
return false;
}
struct pollfd pfd[1];
pfd[0].fd = fileno(fstream);
pfd[0].events = POLLHUP;
int poll_ret = poll(pfd, 1, timeout_millisec_);
if (poll_ret == 0) {
kill(pid, SIGKILL);
return true;
} else if (poll_ret < 0) {
TF_LITE_REPORT_ERROR(error_reporter_, "Validator timer failed: %s",
strerror(errno));
}
return false;
}
#endif
MinibenchmarkStatus ProcessRunner::Run(const Allocation* model_allocation,
const std::vector<std::string>& args,
std::string* output, int* exitcode,
int* signal) {
#ifdef _WIN32
return kMinibenchmarkUnsupportedPlatform;
#else
if (!output || !exitcode) {
return kMinibenchmarkPreconditionNotMet;
}
int benchmark_process_status = 0;
MinibenchmarkStatus status = kMinibenchmarkCommandFailed;
#ifdef TFLITE_ACCELERATION_BENCHMARK_IN_PROCESS
if (function_pointer_) {
benchmark_process_status = RunInprocess(model_allocation, args);
} else {
return kMinibenchmarkPreconditionNotMet;
}
#else
if (runner_path_.empty()) {
return kMinibenchmarkPreconditionNotMet;
}
std::string cmd = runner_path_ + " " + ShellEscape(soname_) + " " +
ShellEscape(function_name_);
int pipe_fds[2];
if (model_allocation != nullptr) {
if (pipe(pipe_fds) < 0) {
*exitcode = errno;
return kMinibenchmarkPipeFailed;
}
std::string pipe_model_path = absl::StrCat(
"pipe:", pipe_fds[0], ":", pipe_fds[1], ":", model_allocation->bytes());
cmd = cmd + " " + ShellEscape(pipe_model_path);
}
for (const auto& arg : args) {
cmd = cmd + " " + ShellEscape(arg);
}
FILE* f = popen(cmd.c_str(), "r");
if (!f) {
*exitcode = errno;
return kMinibenchmarkPopenFailed;
}
if (model_allocation != nullptr) {
close(pipe_fds[0]);
int written_bytes = 0;
int remaining_bytes = model_allocation->bytes();
const uint8_t* current =
static_cast<const uint8_t*>(model_allocation->base());
while (remaining_bytes > 0 &&
(written_bytes = write(pipe_fds[1], current, remaining_bytes)) > 0) {
remaining_bytes -= written_bytes;
current += written_bytes;
}
close(pipe_fds[1]);
if (written_bytes <= 0 || remaining_bytes > 0) {
*exitcode = errno;
return kMinibenchmarkPipeFailed;
}
}
if (timeout_millisec_ > 0 && KillProcessWhenTimedOut(f)) {
status = kMinibenchmarkCommandTimedOut;
TFLITE_LOG_PROD(
TFLITE_LOG_INFO,
"Validator did not finish after %dms. Tried to kill the test.",
timeout_millisec_);
}
std::vector<char> buffer(4 * 1024, 0);
ssize_t length;
std::string ret;
do {
length = fread(buffer.data(), 1, buffer.size(), f);
ret = ret + std::string(buffer.data(), length);
} while (length == buffer.size());
*output = ret;
benchmark_process_status = pclose(f);
#endif
if (WIFEXITED(benchmark_process_status)) {
*exitcode = WEXITSTATUS(benchmark_process_status);
*signal = 0;
if (*exitcode == kMinibenchmarkSuccess) {
status = kMinibenchmarkSuccess;
}
} else if (WIFSIGNALED(benchmark_process_status)) {
*exitcode = 0;
*signal = WTERMSIG(benchmark_process_status);
}
return status;
#endif
}
#ifdef TFLITE_ACCELERATION_BENCHMARK_IN_PROCESS
#ifndef __W_EXITCODE
#define __W_EXITCODE(ret, sig) ((ret) << 8 | (sig))
#endif
int ProcessRunner::RunInprocess(const Allocation* model_allocation,
const std::vector<std::string>& user_args) {
TFLITE_LOG_PROD(TFLITE_LOG_INFO, "Running Validator in-process.");
std::vector<std::string> args_string;
args_string.push_back("inprocess");
args_string.push_back("inprocess");
args_string.push_back(function_name_);
std::thread write_thread;
if (model_allocation != nullptr) {
int pipe_fds[2];
if (pipe(pipe_fds) < 0) {
return __W_EXITCODE(kMinibenchmarkPipeFailed, 0);
}
args_string.push_back(
absl::StrCat("pipe:", pipe_fds[0], ":-1:", model_allocation->bytes()));
write_thread = std::thread([pipe_fds, model_allocation,
error_reporter = error_reporter_]() {
int written_bytes = 0;
int remaining_bytes = model_allocation->bytes();
const uint8_t* current =
static_cast<const uint8_t*>(model_allocation->base());
while (remaining_bytes > 0 &&
(written_bytes = write(pipe_fds[1], current, remaining_bytes)) >
0) {
remaining_bytes -= written_bytes;
current += written_bytes;
}
close(pipe_fds[1]);
if (written_bytes < 0 || remaining_bytes > 0) {
TF_LITE_REPORT_ERROR(
error_reporter,
"Failed to write Model to pipe: %s. Expect to write %d "
"bytes, %d bytes written.",
strerror(errno), remaining_bytes, written_bytes);
}
});
}
for (int i = 0; i < user_args.size(); i++) {
args_string.push_back(user_args[i]);
}
std::vector<std::vector<char>> args_char(args_string.size());
std::vector<char*> argv(args_string.size());
for (int i = 0; i < args_string.size(); i++) {
args_char[i] = {args_string[i].begin(), args_string[i].end()};
args_char[i].push_back('\0');
argv[i] = args_char[i].data();
}
int (*function_pointer)(int, char**) =
reinterpret_cast<int (*)(int, char**)>(function_pointer_);
int exit_code = __W_EXITCODE(function_pointer(argv.size(), argv.data()), 0);
if (write_thread.joinable()) {
write_thread.join();
}
return exit_code;
}
#endif
namespace {
static const char kDontNeedShellEscapeChars[] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"0123456789+-_.=/:,@";
std::string ShellEscape(const std::string& src) {
if (!src.empty() &&
src.find_first_not_of(kDontNeedShellEscapeChars) == std::string::npos) {
return src;
} else if (src.find('\'') == std::string::npos) {
return "'" + src + "'";
} else {
std::string result = "\"";
for (const char c : src) {
switch (c) {
case '\\':
case '$':
case '"':
case '`':
result.push_back('\\');
}
result.push_back(c);
}
result.push_back('"');
return result;
}
}
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/runner.h"
#include <dlfcn.h>
#include <signal.h>
#include <cstddef>
#include <fstream>
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/compiler/mlir/lite/schema/mutable/schema_generated.h"
#include "tensorflow/lite/allocation.h"
#include "tensorflow/lite/stderr_reporter.h"
#ifdef __ANDROID__
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_runner_executable.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_runner_unit_test_entry_points.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark_test_helper.h"
#endif
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
extern "C" {
int TfLiteFunctionInBinary(int argc, char** argv) { return 2; }
}
namespace tflite {
namespace acceleration {
namespace {
typedef int (*EntryPoint)(int, char**);
using flatbuffers::FlatBufferBuilder;
struct RunnerTest : ::testing::Test {
protected:
void* LoadEntryPointModule() {
void* module =
dlopen(entry_point_file.c_str(), RTLD_NOW | RTLD_LOCAL | RTLD_NODELETE);
EXPECT_TRUE(module) << dlerror();
return module;
}
EntryPoint Load(const char* name) {
#ifdef __ANDROID__
void* module = LoadEntryPointModule();
if (!module) {
return nullptr;
}
#else
auto module = RTLD_DEFAULT;
#endif
void* symbol = dlsym(module, name);
return reinterpret_cast<EntryPoint>(symbol);
}
void SetUp() override {
#ifdef __ANDROID__
entry_point_file = MiniBenchmarkTestHelper::DumpToTempFile(
"librunner_unit_test_entry_points.so",
g_tflite_acceleration_embedded_runner_unit_test_entry_points,
g_tflite_acceleration_embedded_runner_unit_test_entry_points_len);
ASSERT_TRUE(!entry_point_file.empty());
#endif
}
void Init(const char* symbol_name) {
EntryPoint function = Load(symbol_name);
ASSERT_TRUE(function);
runner = std::make_unique<ProcessRunner>(::testing::TempDir(), symbol_name,
function, timeout_ms);
ASSERT_EQ(runner->Init(), kMinibenchmarkSuccess);
}
FlatBufferBuilder CreateTestModel() {
ModelT model;
model.description = "test";
flatbuffers::FlatBufferBuilder fbb;
FinishModelBuffer(fbb, CreateModel(fbb, &model));
return fbb;
}
int exitcode = 0;
int signal = 0;
int timeout_ms = 0;
std::string output;
std::unique_ptr<ProcessRunner> runner;
std::string entry_point_file;
};
#if !defined(__aarch64__)
TEST_F(RunnerTest, LoadSymbol) {
EntryPoint TfLiteJustReturnZero = Load("TfLiteJustReturnZero");
ASSERT_TRUE(TfLiteJustReturnZero);
#ifdef __ANDROID__
Dl_info dl_info;
int status = dladdr(reinterpret_cast<void*>(TfLiteJustReturnZero), &dl_info);
ASSERT_TRUE(status) << dlerror();
ASSERT_TRUE(dl_info.dli_fname) << dlerror();
void* this_module =
dlopen(dl_info.dli_fname, RTLD_NOW | RTLD_LOCAL | RTLD_NODELETE);
ASSERT_TRUE(this_module);
void* symbol = dlsym(this_module, "TfLiteJustReturnZero");
EXPECT_TRUE(symbol);
#endif
}
TEST_F(RunnerTest, JustReturnZero) {
ASSERT_NO_FATAL_FAILURE(Init("TfLiteJustReturnZero"));
EXPECT_EQ(runner->Run(nullptr, {}, &output, &exitcode, &signal),
kMinibenchmarkCommandFailed);
EXPECT_EQ(exitcode, 0);
EXPECT_EQ(signal, 0);
EXPECT_EQ(output, "");
}
TEST_F(RunnerTest, ReturnOne) {
ASSERT_NO_FATAL_FAILURE(Init("TfLiteReturnOne"));
EXPECT_EQ(runner->Run(nullptr, {}, &output, &exitcode, &signal),
kMinibenchmarkCommandFailed);
EXPECT_EQ(exitcode, 1);
EXPECT_EQ(signal, 0);
EXPECT_EQ(output, "");
}
TEST_F(RunnerTest, ReturnSuccess) {
ASSERT_NO_FATAL_FAILURE(Init("TfLiteReturnSuccess"));
EXPECT_EQ(runner->Run(nullptr, {}, &output, &exitcode, &signal),
kMinibenchmarkSuccess);
EXPECT_EQ(exitcode, kMinibenchmarkSuccess);
EXPECT_EQ(signal, 0);
EXPECT_EQ(output, "");
}
TEST_F(RunnerTest, NullFunctionPointer) {
ProcessRunner runner("foo", "bar", nullptr);
EXPECT_EQ(runner.Init(), kMinibenchmarkPreconditionNotMet);
EXPECT_EQ(runner.Run(nullptr, {}, &output, &exitcode, &signal),
kMinibenchmarkPreconditionNotMet);
}
#ifdef __ANDROID__
TEST_F(RunnerTest, SigKillSubprocess) {
ASSERT_NO_FATAL_FAILURE(Init("TfLiteSigKillSelf"));
EXPECT_EQ(runner->Run(nullptr, {}, &output, &exitcode, &signal),
kMinibenchmarkCommandFailed);
EXPECT_EQ(exitcode, 0);
EXPECT_EQ(signal, SIGKILL);
EXPECT_EQ(output, "");
}
TEST_F(RunnerTest, WriteOk) {
ASSERT_NO_FATAL_FAILURE(Init("TfLiteWriteOk"));
EXPECT_EQ(runner->Run(nullptr, {}, &output, &exitcode, &signal),
kMinibenchmarkSuccess);
EXPECT_EQ(exitcode, kMinibenchmarkSuccess);
EXPECT_EQ(signal, 0);
EXPECT_EQ(output, "ok\n");
}
TEST_F(RunnerTest, Write10kChars) {
ASSERT_NO_FATAL_FAILURE(Init("TfLiteWrite10kChars"));
EXPECT_EQ(runner->Run(nullptr, {}, &output, &exitcode, &signal),
kMinibenchmarkSuccess);
EXPECT_EQ(exitcode, kMinibenchmarkSuccess);
EXPECT_EQ(signal, 0);
EXPECT_EQ(output.size(), 10000);
}
TEST_F(RunnerTest, ArgsArePassed) {
ASSERT_NO_FATAL_FAILURE(Init("TfLiteWriteArgs"));
EXPECT_EQ(
runner->Run(nullptr, {"foo", "bar", "baz"}, &output, &exitcode, &signal),
kMinibenchmarkSuccess);
EXPECT_EQ(exitcode, kMinibenchmarkSuccess);
EXPECT_EQ(signal, 0);
EXPECT_EQ(output, "foo\nbar\nbaz\n");
}
TEST_F(RunnerTest, SymbolLookupFailed) {
ProcessRunner runner(::testing::TempDir(), "TfLiteFunctionInBinary",
TfLiteFunctionInBinary);
EXPECT_EQ(runner.Init(), kMinibenchmarkSuccess);
EXPECT_EQ(runner.Run(nullptr, {}, &output, &exitcode, &signal),
kMinibenchmarkCommandFailed)
<< output;
EXPECT_EQ(exitcode, kMinibenchmarkRunnerMainSymbolLookupFailed) << output;
}
TEST_F(RunnerTest, ReadModelFromPipe) {
ASSERT_NO_FATAL_FAILURE(Init("TfLiteReadFromPipe"));
FlatBufferBuilder model = CreateTestModel();
MemoryAllocation alloc(model.GetBufferPointer(), model.GetSize(),
tflite::DefaultErrorReporter());
EXPECT_EQ(runner->Run(&alloc, {}, &output, &exitcode, &signal),
kMinibenchmarkSuccess);
EXPECT_EQ(exitcode, kMinibenchmarkSuccess);
EXPECT_EQ(signal, 0);
EXPECT_EQ(output,
std::string((char*)model.GetBufferPointer(), model.GetSize()));
}
TEST_F(RunnerTest, ProcessTimedOut) {
timeout_ms = 1000;
ASSERT_NO_FATAL_FAILURE(Init("TfLiteWritePidThenSleepNSec"));
EXPECT_EQ(runner->Run(nullptr, {"30"}, &output, &exitcode, &signal),
kMinibenchmarkCommandTimedOut);
EXPECT_EQ(signal, SIGKILL);
EXPECT_EQ(output, "");
EXPECT_EQ(exitcode, 0);
}
TEST_F(RunnerTest, ProcessDoNotTimedOut) {
timeout_ms = 3000;
ASSERT_NO_FATAL_FAILURE(Init("TfLiteWritePidThenSleepNSec"));
EXPECT_EQ(runner->Run(nullptr, {"1"}, &output, &exitcode, &signal),
kMinibenchmarkSuccess);
EXPECT_EQ(signal, 0);
EXPECT_EQ(output, "");
EXPECT_EQ(exitcode, kMinibenchmarkSuccess);
}
#else
TEST_F(RunnerTest, ReadModelFromPipeNonAndroid) {
ASSERT_NO_FATAL_FAILURE(Init("TfLiteReadFromPipeInProcess"));
FlatBufferBuilder model = CreateTestModel();
MemoryAllocation alloc(model.GetBufferPointer(), model.GetSize(),
tflite::DefaultErrorReporter());
EXPECT_EQ(runner->Run(&alloc, {}, &output, &exitcode, &signal),
kMinibenchmarkSuccess);
}
#endif
#endif
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/runner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/runner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4cea8920-dc74-417f-9f7e-44a4c6a5967c | cpp | tensorflow/tensorflow | validator_runner_impl | tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_impl.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_impl_test.cc | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_impl.h"
#include <iostream>
#include <memory>
#include <ostream>
#include <string>
#include <thread>
#include <utility>
#include <vector>
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/allocation.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/benchmark_result_evaluator.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/file_lock.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier/custom_validation_embedder.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/runner.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/tools/model_loader.h"
namespace tflite {
namespace acceleration {
namespace {
using ::flatbuffers::FlatBufferBuilder;
std::pair<std::unique_ptr<Allocation>, std::vector<uint8_t>> CopyModel(
const Allocation* input, ErrorReporter* error_reporter) {
std::vector<uint8_t> copy;
if (!input) {
return {nullptr, copy};
}
copy.resize(input->bytes());
memcpy(copy.data(), input->base(), input->bytes());
return {std::make_unique<MemoryAllocation>(copy.data(), copy.size(),
error_reporter),
std::move(copy)};
}
class FdHolder {
public:
explicit FdHolder(int fd) : fd_(fd) {}
FdHolder(FdHolder&& other) = default;
FdHolder& operator=(FdHolder&& other) = default;
~FdHolder() {
if (fd_ > 0) {
close(fd_);
}
}
private:
int fd_;
};
std::unique_ptr<FdHolder> UpdateModelPathIfUsingFd(std::string& model_path) {
if (!absl::StartsWith(model_path, "fd:")) {
return nullptr;
}
std::vector<std::string> parts = absl::StrSplit(model_path, ':');
int model_fd;
if (!absl::SimpleAtoi(parts[1], &model_fd)) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Failed to parse file descriptor %s from model_path %s",
parts[1].c_str(), model_path.c_str());
return nullptr;
}
int new_fd = dup(model_fd);
if (new_fd < 0) {
TFLITE_LOG_PROD(
TFLITE_LOG_ERROR,
"Failed to dup() file descriptor. Original fd: %d errno: %d", model_fd,
errno);
return nullptr;
}
parts[1] = std::to_string(new_fd);
model_path = absl::StrJoin(parts, ":");
return std::make_unique<FdHolder>(new_fd);
}
}
MinibenchmarkStatus ValidatorRunnerImpl::Init() {
if (storage_path_.empty()) {
TF_LITE_REPORT_ERROR(error_reporter_, "storage_path is empty.");
return kMinibenchmarkPreconditionNotMet;
}
if (data_directory_path_.empty()) {
TF_LITE_REPORT_ERROR(error_reporter_, "data_directory_path is empty.");
return kMinibenchmarkPreconditionNotMet;
}
if (benchmark_evaluator_ == nullptr) {
TF_LITE_REPORT_ERROR(error_reporter_, "benchmark_evaluator is null.");
return kMinibenchmarkPreconditionNotMet;
}
MinibenchmarkStatus status = storage_.Read();
if (status != kMinibenchmarkSuccess) {
TF_LITE_REPORT_ERROR(error_reporter_, "Storage::Read failed.");
return status;
}
std::unique_ptr<tools::ModelLoader> model_loader =
tools::CreateModelLoaderFromPath(fd_or_model_path_);
if (!model_loader) {
TF_LITE_REPORT_ERROR(error_reporter_, "Failed to parse model path.");
return kMinibenchmarkPreconditionNotMet;
}
if (!model_loader->Init() || !model_loader->GetModel()) {
TF_LITE_REPORT_ERROR(error_reporter_, "Could not load model.");
return kMinibenchmarkModelInitFailed;
}
if (custom_validation_embedder_) {
status = custom_validation_embedder_->BuildModel(
*model_loader->GetModel()->GetModel(), model_with_custom_input_);
if (status != kMinibenchmarkSuccess) {
TF_LITE_REPORT_ERROR(error_reporter_,
"Failed to embed golden input to model: %d",
static_cast<int>(status));
return status;
}
model_allocation_ = std::make_unique<MemoryAllocation>(
model_with_custom_input_.GetBufferPointer(),
model_with_custom_input_.GetSize(), error_reporter_);
} else if (model_loader->type() ==
tools::ModelLoader::Type::kBufferModelLoader) {
const Allocation* alloc = model_loader->GetModel()->allocation();
if (!alloc || !alloc->valid() || !alloc->base() || alloc->bytes() <= 0) {
TF_LITE_REPORT_ERROR(error_reporter_,
"Internal error: BufferModelLoader doesn't have a "
"valid allocation.");
return kMinibenchmarkPreconditionNotMet;
}
model_allocation_ = std::make_unique<MemoryAllocation>(
alloc->base(), alloc->bytes(), error_reporter_);
}
status = nnapi_helper_.Load();
if (status != kMinibenchmarkSuccess) {
TF_LITE_REPORT_ERROR(error_reporter_, "Failed to load NNAPI SL: %d",
static_cast<int>(status));
return status;
}
status = gpu_helper_.Load();
if (status != kMinibenchmarkSuccess) {
TF_LITE_REPORT_ERROR(error_reporter_, "Failed to load GPU Module: %d",
static_cast<int>(status));
return status;
}
status = validation_entrypoint_helper_.Validate();
if (status != kMinibenchmarkSuccess) {
return status;
}
ProcessRunner check_runner(data_directory_path_,
validation_entrypoint_helper_.name().c_str(),
validation_entrypoint_helper_.LoadEntrypoint(),
timeout_ms_, error_reporter_);
status = check_runner.Init();
if (status != kMinibenchmarkSuccess) {
TF_LITE_REPORT_ERROR(error_reporter_, "Runner::Init returned %d",
static_cast<int>(status));
return status;
}
return kMinibenchmarkSuccess;
}
void ValidatorRunnerImpl::TriggerValidationAsync(
std::vector<FlatBufferBuilder> tflite_settings,
absl::string_view storage_path) {
if (tflite_settings.empty()) {
return;
}
storage_ = FlatbufferStorage<BenchmarkEvent>(storage_path, error_reporter_);
std::thread detached_thread(
[original_model_path = fd_or_model_path_,
storage_path = std::string(storage_path),
data_directory_path = data_directory_path_,
tflite_settings = std::move(tflite_settings),
validation_entrypoint_name =
validation_entrypoint_helper_.name().c_str(),
validation_entrypoint = validation_entrypoint_helper_.LoadEntrypoint(),
nnapi_sl_path = nnapi_helper_.nnapi_sl_path(),
gpu_so_path = gpu_helper_.gpu_so_path(),
allocation_and_model =
CopyModel(model_allocation_.get(), error_reporter_),
timeout_ms = timeout_ms_]() {
FileLock lock(absl::StrCat(storage_path, ".parent_lock"));
if (!lock.TryLock()) {
return;
}
std::string model_path = original_model_path;
std::unique_ptr<FdHolder> fd_holder =
UpdateModelPathIfUsingFd(model_path);
for (auto& one_setting : tflite_settings) {
FlatbufferStorage<BenchmarkEvent> storage(storage_path);
TFLiteSettingsT tflite_settings_obj;
flatbuffers::GetRoot<TFLiteSettings>(one_setting.GetBufferPointer())
->UnPackTo(&tflite_settings_obj);
TFLITE_LOG_PROD(TFLITE_LOG_INFO,
"Run validation with entry point '%s' %s",
validation_entrypoint_name, storage_path.c_str());
ProcessRunner runner(data_directory_path, validation_entrypoint_name,
validation_entrypoint, timeout_ms);
int exitcode = 0;
int signal = 0;
MinibenchmarkStatus status = runner.Init();
if (status == kMinibenchmarkSuccess) {
flatbuffers::FlatBufferBuilder fbb;
status = storage.Append(
&fbb,
CreateBenchmarkEvent(
fbb, CreateTFLiteSettings(fbb, &tflite_settings_obj),
BenchmarkEventType_START, 0, 0,
Validator::BootTimeMicros(), Validator::WallTimeMicros()));
}
if (status != kMinibenchmarkSuccess) {
flatbuffers::FlatBufferBuilder fbb;
storage.Append(
&fbb,
CreateBenchmarkEvent(
fbb, CreateTFLiteSettings(fbb, &tflite_settings_obj),
BenchmarkEventType_ERROR, 0,
CreateBenchmarkError(fbb, BenchmarkStage_INITIALIZATION,
exitcode, signal, {},
status),
Validator::BootTimeMicros(), Validator::WallTimeMicros()));
continue;
}
std::vector<std::string> args;
if (!allocation_and_model.first) {
args.push_back(model_path);
}
args.push_back(storage_path);
args.push_back(data_directory_path);
if (tflite_settings_obj.delegate == tflite::Delegate_NNAPI &&
!nnapi_sl_path.empty()) {
TFLITE_LOG_PROD(
TFLITE_LOG_INFO,
"Running benchmark using NNAPI support library at path '%s'",
nnapi_sl_path.c_str());
args.push_back(nnapi_sl_path);
} else if (tflite_settings_obj.delegate == tflite::Delegate_GPU &&
!gpu_so_path.empty()) {
TFLITE_LOG_PROD(
TFLITE_LOG_INFO,
"Running benchmark using GPU Delegate Module at path '%s'",
gpu_so_path.c_str());
args.push_back(gpu_so_path);
}
std::string output;
status = runner.Run(allocation_and_model.first.get(), args, &output,
&exitcode, &signal);
if (status != kMinibenchmarkSuccess) {
std::cout << "Run() returned " << status << std::endl;
flatbuffers::FlatBufferBuilder fbb;
storage.Append(
&fbb,
CreateBenchmarkEvent(
fbb, CreateTFLiteSettings(fbb, &tflite_settings_obj),
BenchmarkEventType_ERROR, 0,
CreateBenchmarkError(fbb, BenchmarkStage_UNKNOWN, exitcode,
signal, {}, status),
Validator::BootTimeMicros(), Validator::WallTimeMicros()));
}
}
});
detached_thread.detach();
}
std::vector<const BenchmarkEvent*>
ValidatorRunnerImpl::GetSuccessfulResultsFromStorage() {
std::vector<const BenchmarkEvent*> results;
storage_.Read();
for (int i = 0; i < storage_.Count(); i++) {
const BenchmarkEvent* event = storage_.Get(i);
TFLITE_LOG_PROD(TFLITE_LOG_WARNING, "Benchmark event(%d).",
event->event_type());
if (benchmark_evaluator_->IsValidationSuccessEvent(*event)) {
results.push_back(event);
} else if (event->event_type() == BenchmarkEventType_ERROR) {
TFLITE_LOG(
TFLITE_LOG_WARNING,
"Benchmark event failed with error code (%d), signal (%d), exit code "
"(%d), stage (%d), mini benchmark error code (%d).\n",
event->error()->error_code(), event->error()->signal(),
event->error()->exit_code(), event->error()->stage(),
event->error()->mini_benchmark_error_code());
}
}
return results;
}
std::vector<FlatBufferBuilder> ValidatorRunnerImpl::GetCompletedResults() {
storage_.Read();
std::vector<FlatBufferBuilder> results;
for (int i = 0; i < storage_.Count(); i++) {
const BenchmarkEvent* event = storage_.Get(i);
if (event->event_type() != BenchmarkEventType_ERROR &&
event->event_type() != BenchmarkEventType_END) {
continue;
}
BenchmarkEventT event_obj;
event->UnPackTo(&event_obj);
if (benchmark_evaluator_->IsValidationSuccessEvent(*event)) {
event_obj.result->ok = true;
}
FlatBufferBuilder fbb;
fbb.Finish(CreateBenchmarkEvent(fbb, &event_obj));
results.emplace_back(std::move(fbb));
}
return results;
}
int ValidatorRunnerImpl::GetNumCompletedResults() {
storage_.Read();
int num_results = 0;
for (int i = 0; i < storage_.Count(); i++) {
const BenchmarkEvent* event = storage_.Get(i);
if (event->event_type() == BenchmarkEventType_ERROR ||
(event->event_type() == BenchmarkEventType_END && event->result())) {
num_results++;
}
}
return num_results;
}
MinibenchmarkStatus
ValidatorRunnerImpl::ValidationEntrypointHelper::Validate() {
#ifndef _WIN32
if (!LoadEntrypoint()) {
TF_LITE_REPORT_ERROR(error_reporter_, "Could not load symbol '%s': '%s'",
validation_entrypoint_name_.c_str(), dlerror());
return kMinibenchmarkValidationEntrypointSymbolNotFound;
}
return kMinibenchmarkSuccess;
#else
return kMinibenchmarkUnsupportedPlatform;
#endif
}
ValidatorRunnerImpl::ValidationEntrypointHelper::EntrypointFunc*
ValidatorRunnerImpl::ValidationEntrypointHelper::LoadEntrypoint() {
#ifndef _WIN32
return reinterpret_cast<int (*)(int, char**)>(
dlsym(RTLD_DEFAULT, validation_entrypoint_name_.c_str()));
#endif
return nullptr;
}
MinibenchmarkStatus ValidatorRunnerImpl::NnapiHelper::Load() {
if (nnapi_sl_) {
#ifndef _WIN32
Dl_info dl_info;
if (!nnapi_sl_->ANeuralNetworks_getRuntimeFeatureLevel) {
return kMiniBenchmarkCannotLoadSupportLibrary;
}
int status = dladdr(reinterpret_cast<void*>(
nnapi_sl_->ANeuralNetworks_getRuntimeFeatureLevel),
&dl_info);
if (status == 0 || !dl_info.dli_fname) {
return kMiniBenchmarkCannotLoadSupportLibrary;
}
nnapi_sl_path_ = dl_info.dli_fname;
#else
return kMinibenchmarkUnsupportedPlatform;
#endif
}
return kMinibenchmarkSuccess;
}
MinibenchmarkStatus ValidatorRunnerImpl::GpuHelper::Load() {
if (gpu_plugin_handle_) {
#ifndef _WIN32
Dl_info dl_info;
int status = dladdr(gpu_plugin_handle_, &dl_info);
if (status == 0 || !dl_info.dli_fname) {
return kMinibenchmarkCannotLoadGpuModule;
}
gpu_so_path_ = dl_info.dli_fname;
}
#else
return kMinibenchmarkUnsupportedPlatform;
}
#endif
return kMinibenchmarkSuccess;
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_impl.h"
#include <iostream>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/time/time.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/android_info.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/benchmark_result_evaluator.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_model.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_validation_model.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_nnapi_sl_fake_impl.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark_test_helper.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier/custom_validation_embedder.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/nnapi_sl_fake_impl.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_options.h"
#include "tensorflow/lite/nnapi/sl/include/SupportLibrary.h"
#include "tensorflow/lite/stderr_reporter.h"
#ifdef __ANDROID__
#include <dlfcn.h>
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_validator_runner_entrypoint.h"
#endif
namespace tflite {
namespace acceleration {
namespace {
using ::flatbuffers::FlatBufferBuilder;
using ::flatbuffers::GetRoot;
constexpr absl::Duration kWaitBetweenRefresh = absl::Milliseconds(20);
class AlwaysTrueEvaluator : public AbstractBenchmarkResultEvaluator {
public:
bool HasPassedAccuracyCheck(const BenchmarkResult& result) override {
return true;
}
};
class ValidatorRunnerImplTest : public ::testing::Test {
protected:
void SetUp() override {
MiniBenchmarkTestHelper helper;
should_perform_test_ = helper.should_perform_test();
nnapi_sl_dump_path_ = helper.DumpToTempFile(
"libnnapi_fake.so", g_nnapi_sl_fake_impl, g_nnapi_sl_fake_impl_len);
options_.data_directory_path = ::testing::TempDir();
options_.storage_path = ::testing::TempDir() + "/storage_path.fb";
options_.validation_entrypoint_name =
"Java_org_tensorflow_lite_acceleration_validation_entrypoint";
options_.error_reporter = tflite::DefaultErrorReporter();
options_.benchmark_result_evaluator =
EmbeddedResultEvaluator::GetInstance();
options_.per_test_timeout_ms = 0;
options_.model_path = helper.DumpToTempFile(
"mobilenet_quant_with_validation.tflite",
g_tflite_acceleration_embedded_mobilenet_validation_model,
g_tflite_acceleration_embedded_mobilenet_validation_model_len);
ASSERT_TRUE(!options_.model_path.empty());
plain_model_path_ = MiniBenchmarkTestHelper::DumpToTempFile(
"mobilenet_quant.tflite",
g_tflite_acceleration_embedded_mobilenet_model,
g_tflite_acceleration_embedded_mobilenet_model_len);
ASSERT_TRUE(!plain_model_path_.empty());
}
void TearDown() override {
if (should_perform_test_) {
ASSERT_EQ(unlink(options_.storage_path.c_str()), 0);
}
}
ValidatorRunnerImpl CreateValidator() {
return ValidatorRunnerImpl(
CreateModelLoaderPath(options_), options_.storage_path,
options_.data_directory_path, options_.per_test_timeout_ms,
std::move(custom_validation_embedder_), options_.error_reporter,
options_.nnapi_sl, options_.gpu_plugin_handle,
options_.validation_entrypoint_name,
options_.benchmark_result_evaluator);
}
bool should_perform_test_;
ValidatorRunnerOptions options_{};
std::string plain_model_path_;
std::unique_ptr<CustomValidationEmbedder> custom_validation_embedder_ =
nullptr;
std::string nnapi_sl_dump_path_;
};
TEST_F(ValidatorRunnerImplTest,
GetSuccessfulResultsSucceedWithNnApiSlAndEmbeddedValidation) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
AndroidInfo android_info;
auto status = RequestAndroidInfo(&android_info);
ASSERT_TRUE(status.ok());
InitNnApiSlInvocationStatus();
std::unique_ptr<const ::tflite::nnapi::NnApiSupportLibrary> fake_nnapi_sl =
::tflite::nnapi::loadNnApiSupportLibrary(nnapi_sl_dump_path_);
ASSERT_THAT(fake_nnapi_sl.get(), ::testing::NotNull());
options_.nnapi_sl = fake_nnapi_sl->getFL5();
ValidatorRunnerImpl validator = CreateValidator();
ASSERT_EQ(validator.Init(), kMinibenchmarkSuccess);
std::vector<flatbuffers::FlatBufferBuilder> tflite_settings(1);
tflite_settings[0].Finish(
CreateTFLiteSettings(tflite_settings[0], Delegate_NNAPI,
CreateNNAPISettings(tflite_settings[0])));
validator.TriggerValidationAsync(std::move(tflite_settings),
options_.storage_path);
FlatbufferStorage<BenchmarkEvent> storage(options_.storage_path,
options_.error_reporter);
while (validator.GetNumCompletedResults() < 1) {
usleep(absl::ToInt64Microseconds(kWaitBetweenRefresh));
}
std::vector<const BenchmarkEvent*> results =
validator.GetSuccessfulResultsFromStorage();
ASSERT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
ASSERT_THAT(result, testing::Property(&BenchmarkEvent::event_type,
testing::Eq(BenchmarkEventType_END)));
EXPECT_THAT(result->result()->actual_output(),
testing::Pointee(testing::SizeIs(0)));
}
EXPECT_TRUE(WasNnApiSlInvoked());
}
TEST_F(ValidatorRunnerImplTest,
GetSuccessfulResultsSucceedWithBufferModelAndCustomValidation) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
options_.model_buffer = g_tflite_acceleration_embedded_mobilenet_model;
options_.model_size = g_tflite_acceleration_embedded_mobilenet_model_len;
options_.model_path.clear();
int batch_size = 3;
custom_validation_embedder_ = std::make_unique<CustomValidationEmbedder>(
batch_size, std::vector<std::vector<uint8_t>>{
std::vector<uint8_t>(batch_size * 224 * 224 * 3, 1)});
AlwaysTrueEvaluator evaluator;
options_.benchmark_result_evaluator = &evaluator;
ValidatorRunnerImpl validator = CreateValidator();
ASSERT_EQ(validator.Init(), kMinibenchmarkSuccess);
std::vector<flatbuffers::FlatBufferBuilder> tflite_settings(1);
tflite_settings[0].Finish(CreateTFLiteSettings(tflite_settings[0]));
validator.TriggerValidationAsync(std::move(tflite_settings),
options_.storage_path);
FlatbufferStorage<BenchmarkEvent> storage(options_.storage_path,
options_.error_reporter);
while (validator.GetNumCompletedResults() < 1) {
usleep(absl::ToInt64Microseconds(kWaitBetweenRefresh));
}
std::vector<FlatBufferBuilder> results = validator.GetCompletedResults();
ASSERT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
ASSERT_THAT(event, testing::Property(&BenchmarkEvent::event_type,
testing::Eq(BenchmarkEventType_END)));
EXPECT_TRUE(event->result()->ok());
EXPECT_THAT(event->result()->actual_output(),
testing::Pointee(testing::SizeIs(1)));
EXPECT_THAT(event->result()->actual_output()->Get(0)->value(),
testing::Pointee(testing::SizeIs(batch_size * 1001)));
}
}
TEST_F(ValidatorRunnerImplTest,
GetCompletedResultsReturnsOkWithCustomValidation) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
int batch_size = 3;
custom_validation_embedder_ = std::make_unique<CustomValidationEmbedder>(
batch_size, std::vector<std::vector<uint8_t>>{
std::vector<uint8_t>(batch_size * 224 * 224 * 3, 1)});
options_.model_path = plain_model_path_;
AlwaysTrueEvaluator evaluator;
options_.benchmark_result_evaluator = &evaluator;
ValidatorRunnerImpl validator = CreateValidator();
ASSERT_EQ(validator.Init(), kMinibenchmarkSuccess);
std::vector<flatbuffers::FlatBufferBuilder> tflite_settings(1);
tflite_settings[0].Finish(CreateTFLiteSettings(tflite_settings[0]));
validator.TriggerValidationAsync(std::move(tflite_settings),
options_.storage_path);
FlatbufferStorage<BenchmarkEvent> storage(options_.storage_path,
options_.error_reporter);
while (validator.GetNumCompletedResults() < 1) {
usleep(absl::ToInt64Microseconds(kWaitBetweenRefresh));
}
std::vector<FlatBufferBuilder> results = validator.GetCompletedResults();
ASSERT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
ASSERT_THAT(event, testing::Property(&BenchmarkEvent::event_type,
testing::Eq(BenchmarkEventType_END)));
EXPECT_TRUE(event->result()->ok());
EXPECT_THAT(event->result()->actual_output(),
testing::Pointee(testing::SizeIs(1)));
EXPECT_THAT(event->result()->actual_output()->Get(0)->value(),
testing::Pointee(testing::SizeIs(batch_size * 1001)));
}
}
TEST_F(ValidatorRunnerImplTest,
GetCompletedResultsReturnsNotOkIfCustomValidationFailed) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
int batch_size = 3;
custom_validation_embedder_ = std::make_unique<CustomValidationEmbedder>(
batch_size, std::vector<std::vector<uint8_t>>{
std::vector<uint8_t>(batch_size * 224 * 224 * 3, 1)});
options_.model_path = plain_model_path_;
ValidatorRunnerImpl validator = CreateValidator();
ASSERT_EQ(validator.Init(), kMinibenchmarkSuccess);
std::vector<flatbuffers::FlatBufferBuilder> tflite_settings(1);
tflite_settings[0].Finish(CreateTFLiteSettings(tflite_settings[0]));
validator.TriggerValidationAsync(std::move(tflite_settings),
options_.storage_path);
FlatbufferStorage<BenchmarkEvent> storage(options_.storage_path,
options_.error_reporter);
while (validator.GetNumCompletedResults() < 1) {
usleep(absl::ToInt64Microseconds(kWaitBetweenRefresh));
}
std::vector<FlatBufferBuilder> results = validator.GetCompletedResults();
ASSERT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
ASSERT_THAT(event, testing::Property(&BenchmarkEvent::event_type,
testing::Eq(BenchmarkEventType_END)));
EXPECT_FALSE(event->result()->ok());
EXPECT_THAT(event->result()->actual_output(),
testing::Pointee(testing::SizeIs(1)));
EXPECT_THAT(event->result()->actual_output()->Get(0)->value(),
testing::Pointee(testing::SizeIs(batch_size * 1001)));
}
}
TEST_F(ValidatorRunnerImplTest, FailIfItCannotFindNnApiSlPath) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
NnApiSLDriverImplFL5 wrong_handle_nnapi_sl{};
options_.nnapi_sl = &wrong_handle_nnapi_sl;
ValidatorRunnerImpl validator = CreateValidator();
EXPECT_EQ(validator.Init(), kMiniBenchmarkCannotLoadSupportLibrary);
}
TEST_F(ValidatorRunnerImplTest, FailWithInvalidEntrypoint) {
options_.validation_entrypoint_name = "invalid_name()";
EXPECT_EQ(CreateValidator().Init(),
kMinibenchmarkValidationEntrypointSymbolNotFound);
}
TEST_F(ValidatorRunnerImplTest, FailIfCannotLoadModel) {
options_.model_path = "invalid/path";
EXPECT_EQ(CreateValidator().Init(), kMinibenchmarkModelInitFailed);
}
TEST_F(ValidatorRunnerImplTest, FailIfCannotEmbedInputData) {
options_.model_path = plain_model_path_;
custom_validation_embedder_ = std::make_unique<CustomValidationEmbedder>(
1, std::vector<std::vector<uint8_t>>(2));
EXPECT_EQ(CreateValidator().Init(),
kMinibenchmarkValidationSubgraphBuildFailed);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_impl.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_impl_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d760ed97-1225-41c7-86ae-6346991c7cf1 | cpp | tensorflow/tensorflow | validator | tensorflow/lite/experimental/acceleration/mini_benchmark/validator.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/validator_test.cc | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator.h"
#include <stdint.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#include <functional>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/acceleration/configuration/delegate_registry.h"
#include "tensorflow/lite/core/acceleration/configuration/stable_delegate_registry.h"
#include "tensorflow/lite/core/api/profiler.h"
#include "tensorflow/lite/core/c/c_api.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/call_register.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/constants.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/decode_jpeg_register.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/tools/benchmark/register_custom_op.h"
#include "tensorflow/lite/tools/model_loader.h"
#ifndef TEMP_FAILURE_RETRY
#ifdef __ANDROID__
#error "TEMP_FAILURE_RETRY not set although on Android"
#else
#define TEMP_FAILURE_RETRY(exp) exp
#endif
#endif
namespace tflite {
namespace acceleration {
namespace {
std::unique_ptr<tflite::delegates::DelegatePluginInterface> LoadDelegatePlugin(
const std::string& name, const tflite::TFLiteSettings& tflite_settings) {
return tflite::delegates::DelegatePluginRegistry::CreateByName(
name + "Plugin", tflite_settings);
}
void AppendTensorDataToVector(const TfLiteTensor* tensor,
std::vector<std::vector<char>>& output_vector) {
std::vector<char> char_output(TfLiteTensorByteSize(tensor));
memcpy(char_output.data(), TfLiteTensorData(tensor),
TfLiteTensorByteSize(tensor));
output_vector.emplace_back(std::move(char_output));
}
inline bool HasTensorData(tools::ModelLoader* model_loader,
const Subgraph& graph, int index) {
const TfLiteTensor* tensor = graph.tensor(index);
return tensor->allocation != nullptr ||
(model_loader->type() == tools::ModelLoader::Type::kPipeModelLoader &&
tensor->data.data != nullptr);
}
constexpr int64_t kMicrosInSecond = 1000 * 1000;
constexpr int64_t kNanosInMicro = 1000;
int64_t ElapsedTimeMicros() {
struct timespec ts;
#if defined(__ANDROID__)
int err = clock_gettime(CLOCK_BOOTTIME, &ts);
#elif defined(_WIN32)
int err = 1;
#else
int err = clock_gettime(CLOCK_MONOTONIC, &ts);
#endif
if (err) {
return -1;
}
return ts.tv_sec * kMicrosInSecond + ts.tv_nsec / kNanosInMicro;
}
class ValidatorProfiler : public ::tflite::Profiler {
public:
struct EventData {
std::string tag;
int64_t start_time_us = -1;
int64_t end_time_us = -1;
};
const std::vector<EventData>& events() { return events_; }
uint32_t BeginEvent(const char* tag, EventType event_type,
int64_t event_metadata1,
int64_t event_metadata2) override {
if (event_type != EventType::DEFAULT) {
return 0;
}
events_.push_back({tag, ElapsedTimeMicros(), -1});
return events_.size();
}
void EndEvent(uint32_t event_handle) override {
if (event_handle == 0) {
return;
}
events_[event_handle - 1].end_time_us = ElapsedTimeMicros();
}
private:
std::vector<EventData> events_;
};
}
MinibenchmarkStatus Validator::CheckGoldenOutput(Results* results_out) {
if (!interpreter_ || !model_loader_->GetModel()) {
return kMinibenchmarkPreconditionNotMet;
}
if (validation_entrypoint_->inputs().size() <= 1) {
return kMinibenchmarkValidationSubgraphHasTooFewInputs;
}
if (validation_entrypoint_->inputs().size() >
validation_entrypoint_->outputs().size()) {
return kMinibenchmarkValidationSubgraphHasTooFewOutputs;
}
if (HasTensorData(model_loader_.get(), *validation_entrypoint_,
validation_entrypoint_->inputs()[0])) {
return kMinibenchmarkSuccess;
}
TFLITE_LOG_PROD(TFLITE_LOG_INFO,
"Running on CPU to get golden output for comparison.");
tflite::InterpreterBuilder(*model_loader_->GetModel(),
*resolver_)(&golden_interpreter_);
if (!golden_interpreter_) {
return kMinibenchmarkInterpreterBuilderFailed;
}
Subgraph* golden_validation_entrypoint =
golden_interpreter_->subgraph(validation_entrypoint_index_);
if (golden_validation_entrypoint->AllocateTensors() != kTfLiteOk) {
return kMinibenchmarkAllocateTensorsFailed;
}
for (int i = 0; i < golden_validation_entrypoint->inputs().size() - 1; i++) {
TfLiteTensor* input_tensor = golden_validation_entrypoint->tensor(
golden_validation_entrypoint->inputs()[i]);
memset(input_tensor->data.data, 0, input_tensor->bytes);
}
if (golden_validation_entrypoint->Invoke() != kTfLiteOk) {
return kMinibenchmarkInvokeFailed;
}
for (int i = 0; i < validation_entrypoint_->inputs().size() - 1; i++) {
TfLiteTensor* input_tensor =
validation_entrypoint_->tensor(validation_entrypoint_->inputs()[i]);
TfLiteTensor* golden_output_tensor = golden_validation_entrypoint->tensor(
golden_validation_entrypoint->outputs()[i]);
if (input_tensor->bytes != golden_output_tensor->bytes) {
return kMinibenchmarkValidationSubgraphInputsDontMatchOutputs;
}
memcpy(input_tensor->data.data, golden_output_tensor->data.data,
golden_output_tensor->bytes);
}
return kMinibenchmarkSuccess;
}
MinibenchmarkStatus Validator::LoadDelegate() {
if (!compute_settings_) {
return kMinibenchmarkPreconditionNotMet;
}
if (opaque_delegate_) {
return kMinibenchmarkSuccess;
}
Delegate which_delegate = Delegate_NONE;
bool is_stable_delegate_path_provided = false;
auto tflite_settings = compute_settings_->tflite_settings();
if (tflite_settings) {
which_delegate = compute_settings_->tflite_settings()->delegate();
if (tflite_settings->stable_delegate_loader_settings()) {
is_stable_delegate_path_provided =
tflite_settings->stable_delegate_loader_settings()->delegate_path() &&
!tflite_settings->stable_delegate_loader_settings()
->delegate_path()
->str()
.empty();
}
}
std::string delegate_name;
if (is_stable_delegate_path_provided && which_delegate == Delegate_GPU) {
delegate_name = "GpuModule";
} else if (is_stable_delegate_path_provided) {
delegate_name = "StableDelegate";
} else {
switch (which_delegate) {
case Delegate_NONE:
return kMinibenchmarkSuccess;
case Delegate_NNAPI:
delegate_name = "Nnapi";
break;
case Delegate_GPU:
delegate_name = "Gpu";
break;
case Delegate_XNNPACK:
delegate_name = "XNNPack";
break;
case Delegate_EDGETPU:
delegate_name = "EdgeTpu";
break;
default:
return kMinibenchmarkDelegateNotSupported;
}
}
TFLITE_LOG_PROD(TFLITE_LOG_INFO, "Running mini-benchmark on %s",
delegate_name.c_str());
if (!(delegate_plugin_ = LoadDelegatePlugin(
delegate_name, *compute_settings_->tflite_settings()))) {
return kMinibenchmarkDelegatePluginNotFound;
}
if (!(delegate_ = delegate_plugin_->Create())) {
return kMinibenchmarkDelegateCreateFailed;
}
return kMinibenchmarkSuccess;
}
MinibenchmarkStatus Validator::LoadOpaqueDelegate() {
if (!compute_settings_) {
return kMinibenchmarkPreconditionNotMet;
}
bool is_stable_delegate_name_provided = false;
auto tflite_settings = compute_settings_->tflite_settings();
if (!tflite_settings) {
return kMinibenchmarkSuccess;
}
auto stable_delegate_settings =
tflite_settings->stable_delegate_loader_settings();
is_stable_delegate_name_provided =
stable_delegate_settings && stable_delegate_settings->delegate_name() &&
!stable_delegate_settings->delegate_name()->str().empty();
if (!is_stable_delegate_name_provided) {
return kMinibenchmarkSuccess;
}
std::string delegate_name = stable_delegate_settings->delegate_name()->str();
TFLITE_LOG_PROD(TFLITE_LOG_INFO, "Running mini-benchmark on %s",
delegate_name.c_str());
const TfLiteStableDelegate* stable_delegate =
delegates::StableDelegateRegistry::RetrieveStableDelegate(delegate_name);
if (!stable_delegate) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Failed to load stable delegate plugin %s",
delegate_name.c_str());
return kMinibenchmarkDelegatePluginNotFound;
}
const TfLiteOpaqueDelegatePlugin* delegate_plugin =
stable_delegate->delegate_plugin;
opaque_delegate_ = TfLiteOpaqueDelegatePtr(
delegate_plugin->create(tflite_settings), delegate_plugin->destroy);
return kMinibenchmarkSuccess;
}
MinibenchmarkStatus Validator::CreateInterpreter(int* delegate_error_out,
int* delegated_kernels_out) {
if (!delegate_error_out || !delegated_kernels_out ||
!model_loader_->GetModel()) {
return kMinibenchmarkPreconditionNotMet;
}
if (interpreter_) {
return kMinibenchmarkSuccess;
}
*delegate_error_out = 0;
if (compute_settings_->tflite_settings() &&
compute_settings_->tflite_settings()->disable_default_delegates()) {
resolver_ = std::make_unique<
::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates>();
} else {
resolver_ = std::make_unique<::tflite::ops::builtin::BuiltinOpResolver>();
}
resolver_->AddCustom("validation/call",
::tflite::acceleration::ops::Register_CALL(), 1);
resolver_->AddCustom(
"validation/decode_jpeg",
::tflite::acceleration::decode_jpeg_kernel::Register_DECODE_JPEG(), 1);
RegisterSelectedOps(resolver_.get());
tflite::InterpreterBuilder builder(*model_loader_->GetModel(), *resolver_);
if (delegate_ != nullptr) {
builder.AddDelegate(delegate_.get());
}
if (opaque_delegate_ != nullptr) {
builder.AddDelegate(opaque_delegate_.get());
}
TfLiteStatus status = builder(&interpreter_);
if (!interpreter_) {
*delegate_error_out =
delegate_plugin_ ? delegate_plugin_->GetDelegateErrno(delegate_.get())
: 0;
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Creating Interpreter failed with error code %d.", status);
return kMinibenchmarkInterpreterBuilderFailed;
}
main_model_ = interpreter_->subgraph(0);
validation_entrypoint_index_ = -1;
for (int i = 0; i < interpreter_->subgraphs_size(); i++) {
Subgraph* subgraph = interpreter_->subgraph(i);
if (subgraph->GetName() == kValidationGraphName) {
validation_entrypoint_index_ = i;
validation_entrypoint_ = subgraph;
} else if (subgraph->GetName() == "VALIDATION:metrics") {
has_accuracy_validation_ = true;
}
}
if (!validation_entrypoint_) {
return kMinibenchmarkValidationSubgraphNotFound;
}
if (validation_entrypoint_->inputs().empty()) {
return kMinibenchmarkValidationSubgraphHasTooFewInputs;
}
if (!HasTensorData(model_loader_.get(), *validation_entrypoint_,
validation_entrypoint_->inputs().back())) {
return kMinibenchmarkValidationInputMissing;
}
if (validation_entrypoint_->AllocateTensors() != kTfLiteOk) {
return kMinibenchmarkAllocateTensorsFailed;
}
absl::flat_hash_set<int> checked_node_ids;
int num_delegated_kernels = 0;
for (int i = 0; i < interpreter_->execution_plan().size(); ++i) {
int node_id = interpreter_->execution_plan()[i];
if (checked_node_ids.find(node_id) != checked_node_ids.end()) {
continue;
}
const TfLiteNode& node =
interpreter_->node_and_registration(node_id)->first;
if (node.delegate != nullptr) {
num_delegated_kernels++;
checked_node_ids.insert(node_id);
}
}
*delegated_kernels_out = num_delegated_kernels;
bool fully_delegated = (num_delegated_kernels == 1 &&
interpreter_->execution_plan().size() == 1);
if (!fully_delegated) {
TFLITE_LOG_PROD(TFLITE_LOG_WARNING,
"The model will be %s executed by the delegate.",
num_delegated_kernels > 0 ? "partially" : "not");
}
return kMinibenchmarkSuccess;
}
Validator::Status Validator::RunValidation(Results* results_out) {
BenchmarkStage stage = BenchmarkStage_INITIALIZATION;
if (!results_out) {
return Validator::Status{kMinibenchmarkPreconditionNotMet, stage};
}
if (!model_loader_) {
return Validator::Status{kMinibenchmarkModelReadFailed, stage};
}
if (!model_loader_->Init()) {
return Validator::Status{kMinibenchmarkModelInitFailed, stage};
}
#define MB_RETURN_IF_ERROR(s, bs) \
{ \
MinibenchmarkStatus c = (s); \
if (c != kMinibenchmarkSuccess) return Validator::Status{c, (bs)}; \
}
int64_t delegate_load_start_time_us = ElapsedTimeMicros();
MB_RETURN_IF_ERROR(LoadOpaqueDelegate(), stage);
MB_RETURN_IF_ERROR(LoadDelegate(), stage);
MB_RETURN_IF_ERROR(CreateInterpreter(&results_out->delegate_error,
&results_out->delegated_kernels),
stage);
int64_t delegate_load_end_time_us = ElapsedTimeMicros();
ValidatorProfiler profiler;
stage = BenchmarkStage_INFERENCE;
if (has_accuracy_validation_) {
MB_RETURN_IF_ERROR(CheckGoldenOutput(results_out), stage);
}
main_model_->SetProfiler(&profiler, 0);
TfLiteStatus status = validation_entrypoint_->Invoke();
main_model_->SetProfiler(nullptr, 0);
if (status != kTfLiteOk) {
MB_RETURN_IF_ERROR(kMinibenchmarkInvokeFailed, stage);
}
int model_output_size = main_model_->outputs().size();
if (has_accuracy_validation_) {
const std::string kMetricPrefix = "metrics/";
const std::string kOk("ok");
for (int i = model_output_size;
i < validation_entrypoint_->outputs().size(); i++) {
TfLiteTensor* tensor =
validation_entrypoint_->tensor(validation_entrypoint_->outputs()[i]);
std::string name = tensor->name;
if (name.find(kMetricPrefix) != 0) {
continue;
}
name = name.substr(kMetricPrefix.size());
if (kOk == name) {
results_out->ok = *(tensor->data.b);
} else {
std::vector<float> values;
int count = 1;
for (int j = 0; j < tensor->dims->size; j++) {
count *= tensor->dims->data[j];
}
values.reserve(count);
for (int j = 0; j < count; j++) {
values.push_back(tensor->data.f[j]);
TFLITE_LOG_PROD(TFLITE_LOG_INFO, " %s %.4f", name.c_str(),
tensor->data.f[j]);
}
results_out->metrics[name] = values;
}
}
TFLITE_LOG_PROD(TFLITE_LOG_INFO, " accuracy: %s",
results_out->ok ? "ok" : "not ok");
} else {
results_out->actual_inference_output.clear();
results_out->actual_inference_output.reserve(model_output_size);
for (int i = 0; i < model_output_size; i++) {
AppendTensorDataToVector(
validation_entrypoint_->tensor(validation_entrypoint_->outputs()[i]),
results_out->actual_inference_output);
}
}
results_out->delegate_prep_time_us =
(delegate_load_end_time_us == -1 || delegate_load_start_time_us == -1)
? -1
: delegate_load_end_time_us - delegate_load_start_time_us;
TFLITE_LOG_PROD(TFLITE_LOG_INFO, " Delegate preparation took %d us",
static_cast<int>(results_out->delegate_prep_time_us));
for (const auto& e : profiler.events()) {
if (e.tag == "Invoke" && e.start_time_us != -1 && e.end_time_us != -1) {
results_out->execution_time_us.push_back(e.end_time_us - e.start_time_us);
TFLITE_LOG_PROD(TFLITE_LOG_INFO, " Inference took %d us",
static_cast<int>(e.end_time_us - e.start_time_us));
}
}
#undef MB_RETURN_IF_ERROR
return Validator::Status{kMinibenchmarkSuccess};
}
int64_t Validator::BootTimeMicros() { return ElapsedTimeMicros(); }
int64_t Validator::WallTimeMicros() {
struct timespec ts;
#ifndef _WIN32
int err = clock_gettime(CLOCK_REALTIME, &ts);
#else
int err = 1;
#endif
if (err) {
return -1;
}
return ts.tv_sec * kMicrosInSecond + ts.tv_nsec / kNanosInMicro;
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator.h"
#include <iostream>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#if FLATBUFFERS_LITTLEENDIAN == 0
#include "tensorflow/lite/core/model_builder.h"
#endif
#include "tensorflow/compiler/mlir/lite/schema/mutable/schema_generated.h"
#include "tensorflow/lite/acceleration/configuration/configuration.pb.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/acceleration/configuration/proto_to_flatbuffer.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_model.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_validation_model.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark_test_helper.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier/custom_validation_embedder.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/tools/model_loader.h"
namespace tflite {
namespace acceleration {
namespace {
using flatbuffers::FlatBufferBuilder;
constexpr int kOutputTensorSize = 1001;
class ValidatorTest : public ::testing::Test {
protected:
void SetUp() override {
std::string validation_model_path = MiniBenchmarkTestHelper::DumpToTempFile(
"mobilenet_quant_with_validation.tflite",
g_tflite_acceleration_embedded_mobilenet_validation_model,
g_tflite_acceleration_embedded_mobilenet_validation_model_len);
ASSERT_TRUE(!validation_model_path.empty());
validation_model_loader_ =
std::make_unique<tools::PathModelLoader>(validation_model_path);
std::string plain_model_path = MiniBenchmarkTestHelper::DumpToTempFile(
"mobilenet_quant.tflite",
g_tflite_acceleration_embedded_mobilenet_model,
g_tflite_acceleration_embedded_mobilenet_model_len);
ASSERT_TRUE(!plain_model_path.empty());
plain_model_loader_ =
std::make_unique<tools::PathModelLoader>(plain_model_path);
compute_settings_fbb_.Finish(CreateComputeSettings(compute_settings_fbb_));
default_compute_settings_ = flatbuffers::GetRoot<ComputeSettings>(
compute_settings_fbb_.GetBufferPointer());
}
std::unique_ptr<tools::ModelLoader> validation_model_loader_;
std::unique_ptr<tools::ModelLoader> plain_model_loader_;
FlatBufferBuilder compute_settings_fbb_;
const ComputeSettings* default_compute_settings_;
};
TEST_F(ValidatorTest, HappyPathOnCpuWithEmbeddedValidation) {
ASSERT_TRUE(validation_model_loader_->Init());
Validator validator(std::move(validation_model_loader_),
default_compute_settings_);
Validator::Results results;
Validator::Status validation_run = validator.RunValidation(&results);
EXPECT_EQ(validation_run.status, kMinibenchmarkSuccess);
EXPECT_EQ(validation_run.stage, BenchmarkStage_UNKNOWN);
EXPECT_TRUE(results.ok);
EXPECT_GE(results.metrics.size(), 0);
EXPECT_EQ(results.delegate_error, 0);
EXPECT_TRUE(results.actual_inference_output.empty());
}
TEST_F(ValidatorTest, HappyPathOnCpuWithCustomValidation) {
ASSERT_TRUE(plain_model_loader_->Init());
ASSERT_TRUE(validation_model_loader_->Init());
const SubGraph* main_model =
plain_model_loader_->GetModel()->GetModel()->subgraphs()->Get(0);
const int model_output_size = main_model->outputs()->size();
int model_input_byte_size = 1;
for (int shape_i :
*main_model->tensors()->Get(main_model->inputs()->Get(0))->shape()) {
model_input_byte_size *= shape_i;
}
int batch_size = 5;
FlatBufferBuilder model_with_input;
CustomValidationEmbedder embedder(
batch_size,
{std::vector<uint8_t>(batch_size * model_input_byte_size, 1)});
EXPECT_EQ(embedder.BuildModel(*plain_model_loader_->GetModel()->GetModel(),
model_with_input),
kMinibenchmarkSuccess);
std::string serialized_str(
reinterpret_cast<const char*>(model_with_input.GetBufferPointer()),
model_with_input.GetSize());
#if FLATBUFFERS_LITTLEENDIAN == 0
tflite::FlatBufferModel::ByteSwapSerializedModel(&serialized_str, true);
#endif
std::string model_path = MiniBenchmarkTestHelper::DumpToTempFile(
"mobilenet_quant_with_input.tflite",
reinterpret_cast<const unsigned char*>(serialized_str.c_str()),
serialized_str.size());
ASSERT_TRUE(!model_path.empty());
auto model_loader = std::make_unique<tools::PathModelLoader>(model_path);
Validator validator(std::move(model_loader), default_compute_settings_);
Validator::Results results;
Validator::Status validation_run = validator.RunValidation(&results);
EXPECT_EQ(validation_run.status, kMinibenchmarkSuccess);
EXPECT_EQ(validation_run.stage, BenchmarkStage_UNKNOWN);
EXPECT_FALSE(results.ok);
EXPECT_EQ(results.metrics.size(), 0);
EXPECT_EQ(results.delegate_error, 0);
EXPECT_EQ(results.actual_inference_output.size(), model_output_size);
EXPECT_EQ(results.actual_inference_output[0].size(),
batch_size * kOutputTensorSize);
}
TEST_F(ValidatorTest, DelegateNotSupported) {
proto::ComputeSettings settings_proto;
settings_proto.mutable_tflite_settings()->set_delegate(proto::CORE_ML);
flatbuffers::FlatBufferBuilder fbb;
const ComputeSettings* settings = ConvertFromProto(settings_proto, &fbb);
Validator validator(std::move(validation_model_loader_), settings);
Validator::Results results;
Validator::Status validation_run = validator.RunValidation(&results);
EXPECT_EQ(validation_run.status, kMinibenchmarkDelegateNotSupported);
EXPECT_EQ(validation_run.stage, BenchmarkStage_INITIALIZATION);
}
TEST_F(ValidatorTest, NoValidationSubgraph) {
Validator validator(std::move(plain_model_loader_),
default_compute_settings_);
Validator::Results results;
Validator::Status validation_run = validator.RunValidation(&results);
EXPECT_EQ(validation_run.status, kMinibenchmarkValidationSubgraphNotFound);
EXPECT_EQ(validation_run.stage, BenchmarkStage_INITIALIZATION);
}
TEST_F(ValidatorTest, NoValidationInputData) {
ASSERT_TRUE(plain_model_loader_->Init());
FlatBufferBuilder model_with_input;
CustomValidationEmbedder embedder(1, {{}});
EXPECT_EQ(embedder.BuildModel(*plain_model_loader_->GetModel()->GetModel(),
model_with_input),
kMinibenchmarkSuccess);
std::string model_path = MiniBenchmarkTestHelper::DumpToTempFile(
"mobilenet_quant_with_input.tflite", model_with_input.GetBufferPointer(),
model_with_input.GetSize());
ASSERT_TRUE(!model_path.empty());
auto model_loader = std::make_unique<tools::PathModelLoader>(model_path);
Validator validator(std::move(model_loader), default_compute_settings_);
Validator::Results results;
Validator::Status validation_run = validator.RunValidation(&results);
EXPECT_EQ(validation_run.status, kMinibenchmarkValidationInputMissing);
EXPECT_EQ(validation_run.stage, BenchmarkStage_INITIALIZATION);
}
TEST_F(ValidatorTest, InvalidModel) {
const std::string dump_path = MiniBenchmarkTestHelper::DumpToTempFile(
"foo.tflite", g_tflite_acceleration_embedded_mobilenet_validation_model,
g_tflite_acceleration_embedded_mobilenet_validation_model_len - 12000);
ASSERT_TRUE(!dump_path.empty());
Validator validator(std::make_unique<tools::PathModelLoader>(dump_path),
default_compute_settings_);
Validator::Results results;
Validator::Status validation_run = validator.RunValidation(&results);
EXPECT_EQ(validation_run.status, kMinibenchmarkModelInitFailed);
EXPECT_EQ(validation_run.stage, BenchmarkStage_INITIALIZATION);
}
TEST_F(ValidatorTest, EmptyModelLoader) {
Validator validator(nullptr, default_compute_settings_);
Validator::Results results;
Validator::Status validation_run = validator.RunValidation(&results);
EXPECT_EQ(validation_run.status, kMinibenchmarkModelReadFailed);
EXPECT_EQ(validation_run.stage, BenchmarkStage_INITIALIZATION);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/validator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/validator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ac4662b7-8415-4b1d-9b99-97c649832c2d | cpp | tensorflow/tensorflow | mini_benchmark | tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark_test.cc | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark.h"
#include <string>
#include <utility>
#include "absl/status/statusor.h"
#include "flatbuffers/flatbuffers.h"
namespace tflite {
namespace acceleration {
namespace {
class NoopMiniBenchmark : public MiniBenchmark {
public:
ComputeSettingsT GetBestAcceleration() override { return ComputeSettingsT(); }
void TriggerMiniBenchmark() override {}
void SetEventTimeoutForTesting(int64_t) override {}
std::vector<MiniBenchmarkEventT> MarkAndGetEventsToLog() override {
return {};
}
int NumRemainingAccelerationTests() override { return -1; }
};
}
std::unique_ptr<MiniBenchmark> CreateMiniBenchmark(
const MinibenchmarkSettings& settings, const std::string& model_namespace,
const std::string& model_id) {
absl::StatusOr<std::unique_ptr<MiniBenchmark>> s_or_mb =
MinibenchmarkImplementationRegistry::CreateByName(
"Impl", settings, model_namespace, model_id);
if (!s_or_mb.ok()) {
return std::unique_ptr<MiniBenchmark>(new NoopMiniBenchmark());
} else {
return std::move(*s_or_mb);
}
}
void MinibenchmarkImplementationRegistry::RegisterImpl(
const std::string& name, CreatorFunction creator_function) {
absl::MutexLock lock(&mutex_);
factories_[name] = creator_function;
}
std::unique_ptr<MiniBenchmark> MinibenchmarkImplementationRegistry::CreateImpl(
const std::string& name, const MinibenchmarkSettings& settings,
const std::string& model_namespace, const std::string& model_id) {
absl::MutexLock lock(&mutex_);
auto it = factories_.find(name);
return (it != factories_.end())
? it->second(settings, model_namespace, model_id)
: nullptr;
}
MinibenchmarkImplementationRegistry*
MinibenchmarkImplementationRegistry::GetSingleton() {
static auto* instance = new MinibenchmarkImplementationRegistry();
return instance;
}
std::unique_ptr<MiniBenchmark>
MinibenchmarkImplementationRegistry::CreateByName(
const std::string& name, const MinibenchmarkSettings& settings,
const std::string& model_namespace, const std::string& model_id) {
auto* const instance = MinibenchmarkImplementationRegistry::GetSingleton();
return instance->CreateImpl(name, settings, model_namespace, model_id);
}
MinibenchmarkImplementationRegistry::Register::Register(
const std::string& name, CreatorFunction creator_function) {
auto* const instance = MinibenchmarkImplementationRegistry::GetSingleton();
instance->RegisterImpl(name, creator_function);
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark.h"
#include <unistd.h>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/acceleration/configuration/configuration.pb.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/acceleration/configuration/proto_to_flatbuffer.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_float_validation_model.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_nnapi_sl_fake_impl.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark_test_helper.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/nnapi_sl_fake_impl.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/nnapi/sl/include/SupportLibrary.h"
namespace tflite {
namespace acceleration {
namespace {
TEST(BasicMiniBenchmarkTest, EmptySettings) {
proto::MinibenchmarkSettings settings_proto;
flatbuffers::FlatBufferBuilder empty_settings_buffer_;
const MinibenchmarkSettings* empty_settings =
ConvertFromProto(settings_proto, &empty_settings_buffer_);
std::unique_ptr<MiniBenchmark> mb(
CreateMiniBenchmark(*empty_settings, "ns", "id"));
mb->TriggerMiniBenchmark();
const ComputeSettingsT acceleration = mb->GetBestAcceleration();
EXPECT_EQ(nullptr, acceleration.tflite_settings);
EXPECT_TRUE(mb->MarkAndGetEventsToLog().empty());
EXPECT_EQ(-1, mb->NumRemainingAccelerationTests());
}
class MiniBenchmarkTest : public ::testing::Test {
protected:
void SetUp() override {
MiniBenchmarkTestHelper helper;
should_perform_test_ = helper.should_perform_test();
if (should_perform_test_) {
mobilenet_model_path_ = MiniBenchmarkTestHelper::DumpToTempFile(
"mobilenet_float_with_validation.tflite",
g_tflite_acceleration_embedded_mobilenet_float_validation_model,
g_tflite_acceleration_embedded_mobilenet_float_validation_model_len);
}
}
void SetupBenchmark(proto::Delegate delegate, const std::string& model_path,
bool reset_storage = true,
const nnapi::NnApiSupportLibrary* nnapi_sl = nullptr) {
proto::MinibenchmarkSettings settings;
proto::TFLiteSettings* tflite_settings = settings.add_settings_to_test();
tflite_settings->set_delegate(delegate);
if ((delegate == proto::Delegate::NNAPI) && nnapi_sl) {
std::cerr << "Using NNAPI SL\n";
tflite_settings->mutable_nnapi_settings()->set_support_library_handle(
reinterpret_cast<int64_t>(nnapi_sl->getFL5()));
}
proto::ModelFile* file = settings.mutable_model_file();
file->set_filename(model_path);
proto::BenchmarkStoragePaths* paths = settings.mutable_storage_paths();
paths->set_storage_file_path(::testing::TempDir() + "/storage.fb");
if (reset_storage) {
(void)unlink(paths->storage_file_path().c_str());
(void)unlink((paths->storage_file_path() + ".extra.fb").c_str());
}
paths->set_data_directory_path(::testing::TempDir());
if (delegate != proto::Delegate::NONE) {
proto::TFLiteSettings* cpu_tflite_settings =
settings.add_settings_to_test();
cpu_tflite_settings->set_disable_default_delegates(false);
}
settings_ = ConvertFromProto(settings, &settings_buffer_);
mb_ = CreateMiniBenchmark(*settings_, ns_, model_id_);
}
void TriggerBenchmark(proto::Delegate delegate, const std::string& model_path,
bool reset_storage = true,
const nnapi::NnApiSupportLibrary* nnapi_sl = nullptr) {
SetupBenchmark(delegate, model_path, reset_storage, nnapi_sl);
mb_->TriggerMiniBenchmark();
}
void WaitForValidationCompletion(
absl::Duration timeout = absl::Seconds(300)) {
absl::Time deadline = absl::Now() + timeout;
while (absl::Now() < deadline) {
if (mb_->NumRemainingAccelerationTests() == 0) return;
absl::SleepFor(absl::Milliseconds(200));
}
ASSERT_NE(0, mb_->NumRemainingAccelerationTests());
}
const std::string ns_ = "org.tensorflow.lite.mini_benchmark.test";
const std::string model_id_ = "test_minibenchmark_model";
bool should_perform_test_ = true;
std::unique_ptr<MiniBenchmark> mb_;
std::string mobilenet_model_path_;
flatbuffers::FlatBufferBuilder settings_buffer_;
const MinibenchmarkSettings* settings_;
};
TEST_F(MiniBenchmarkTest, OnlyCPUSettings) {
if (!should_perform_test_) return;
SetupBenchmark(proto::Delegate::NONE, mobilenet_model_path_);
EXPECT_EQ(-1, mb_->NumRemainingAccelerationTests());
ComputeSettingsT acceleration = mb_->GetBestAcceleration();
EXPECT_EQ(nullptr, acceleration.tflite_settings);
EXPECT_EQ(1, mb_->NumRemainingAccelerationTests());
mb_->TriggerMiniBenchmark();
WaitForValidationCompletion();
acceleration = mb_->GetBestAcceleration();
EXPECT_EQ(nullptr, acceleration.tflite_settings);
}
TEST_F(MiniBenchmarkTest, RunSuccessfully) {
if (!should_perform_test_) return;
TriggerBenchmark(proto::Delegate::XNNPACK, mobilenet_model_path_);
WaitForValidationCompletion();
mb_->MarkAndGetEventsToLog();
const ComputeSettingsT acceleration1 = mb_->GetBestAcceleration();
const ComputeSettingsT acceleration2 = mb_->GetBestAcceleration();
EXPECT_EQ(acceleration1, acceleration2);
#ifndef ADDRESS_SANITIZER
ASSERT_NE(nullptr, acceleration1.tflite_settings);
EXPECT_EQ(tflite::Delegate_XNNPACK, acceleration1.tflite_settings->delegate);
#endif
EXPECT_EQ(model_id_, acceleration1.model_identifier_for_statistics);
EXPECT_EQ(ns_, acceleration1.model_namespace_for_statistics);
auto events = mb_->MarkAndGetEventsToLog();
EXPECT_EQ(1, events.size());
const auto& decision = events.front().best_acceleration_decision;
EXPECT_NE(nullptr, decision);
#ifndef ADDRESS_SANITIZER
EXPECT_EQ(tflite::Delegate_XNNPACK,
decision->min_latency_event->tflite_settings->delegate);
#endif
}
TEST_F(MiniBenchmarkTest, BestAccelerationEventIsMarkedLoggedAfterRestart) {
if (!should_perform_test_) return;
TriggerBenchmark(proto::Delegate::XNNPACK, mobilenet_model_path_);
WaitForValidationCompletion();
mb_->MarkAndGetEventsToLog();
mb_->GetBestAcceleration();
TriggerBenchmark(proto::Delegate::XNNPACK, mobilenet_model_path_,
false);
EXPECT_EQ(0, mb_->NumRemainingAccelerationTests());
const ComputeSettingsT acceleration = mb_->GetBestAcceleration();
#ifndef ADDRESS_SANITIZER
ASSERT_NE(nullptr, acceleration.tflite_settings);
EXPECT_EQ(tflite::Delegate_XNNPACK, acceleration.tflite_settings->delegate);
#endif
EXPECT_EQ(model_id_, acceleration.model_identifier_for_statistics);
EXPECT_EQ(ns_, acceleration.model_namespace_for_statistics);
auto events = mb_->MarkAndGetEventsToLog();
EXPECT_EQ(1, events.size());
}
TEST_F(MiniBenchmarkTest,
BestAccelerationEventIsNotReMarkedLoggedAfterRestart) {
if (!should_perform_test_) return;
TriggerBenchmark(proto::Delegate::XNNPACK, mobilenet_model_path_);
WaitForValidationCompletion();
mb_->GetBestAcceleration();
mb_->MarkAndGetEventsToLog();
TriggerBenchmark(proto::Delegate::XNNPACK, mobilenet_model_path_,
false);
mb_->GetBestAcceleration();
EXPECT_TRUE(mb_->MarkAndGetEventsToLog().empty());
}
TEST_F(MiniBenchmarkTest, DelegatePluginNotSupported) {
if (!should_perform_test_) return;
TriggerBenchmark(proto::Delegate::HEXAGON, mobilenet_model_path_);
WaitForValidationCompletion();
const ComputeSettingsT acceleration = mb_->GetBestAcceleration();
EXPECT_EQ(nullptr, acceleration.tflite_settings);
EXPECT_EQ(model_id_, acceleration.model_identifier_for_statistics);
EXPECT_EQ(ns_, acceleration.model_namespace_for_statistics);
const auto events = mb_->MarkAndGetEventsToLog();
bool is_found = false;
for (const auto& event : events) {
const auto& t = event.benchmark_event;
if (t == nullptr) continue;
if (t->event_type == tflite::BenchmarkEventType_ERROR &&
t->error->mini_benchmark_error_code ==
tflite::acceleration::kMinibenchmarkDelegateNotSupported) {
is_found = true;
break;
}
}
EXPECT_TRUE(is_found);
}
#ifdef __ANDROID__
TEST_F(MiniBenchmarkTest, UseNnApiSl) {
if (!should_perform_test_) return;
std::string nnapi_sl_path_ = MiniBenchmarkTestHelper::DumpToTempFile(
"libnnapi_fake.so", g_nnapi_sl_fake_impl, g_nnapi_sl_fake_impl_len);
std::unique_ptr<const ::tflite::nnapi::NnApiSupportLibrary> nnapi_sl =
::tflite::nnapi::loadNnApiSupportLibrary(nnapi_sl_path_);
ASSERT_TRUE(nnapi_sl);
TriggerBenchmark(proto::Delegate::NNAPI, mobilenet_model_path_,
true, nnapi_sl.get());
WaitForValidationCompletion();
EXPECT_TRUE(tflite::acceleration::WasNnApiSlInvoked());
}
#endif
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f69aa326-3804-4aaf-a389-a40eb316c0c6 | cpp | tensorflow/tensorflow | decode_jpeg | tensorflow/lite/experimental/acceleration/mini_benchmark/decode_jpeg.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/decode_jpeg_test.cc | #include <algorithm>
#include <cstddef>
#include <memory>
#include <vector>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/decode_jpeg_register.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/libjpeg_decoder.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace acceleration {
namespace decode_jpeg_kernel {
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
if (!buffer) {
return nullptr;
}
#define RET_ENSURE(context, condition) \
do { \
if (!(condition)) { \
TF_LITE_KERNEL_LOG((context), "%s:%d %s was not true.", __FILE__, \
__LINE__, #condition); \
return nullptr; \
} \
} while (0)
const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer);
const flexbuffers::Map m = flexbuffers::GetRoot(buffer_t, length).AsMap();
RET_ENSURE(context, m["height"].IsInt());
RET_ENSURE(context, m["width"].IsInt());
RET_ENSURE(context, m["num_images"].IsInt());
RET_ENSURE(context, m["channels"].IsInt());
OpData* op_data = new OpData();
op_data->height = m["height"].AsInt32();
op_data->width = m["width"].AsInt32();
op_data->num_images = m["num_images"].AsInt32();
op_data->channels = m["channels"].AsInt32();
return op_data;
#undef RET_ENSURE
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE(context, op_data);
TF_LITE_ENSURE(context, op_data->height > 0);
TF_LITE_ENSURE(context, op_data->width > 0);
TF_LITE_ENSURE(context, op_data->num_images > 0);
TF_LITE_ENSURE(context, op_data->channels == 3 || op_data->channels == 4);
TF_LITE_ENSURE_EQ(context, node->inputs->size, 1);
TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
const TfLiteTensor* input_buffer;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, 0, &input_buffer));
TfLiteTensor* output_tensor;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, 0, &output_tensor));
TF_LITE_ENSURE_TYPES_EQ(context, input_buffer->type, kTfLiteString);
TF_LITE_ENSURE_TYPES_EQ(context, output_tensor->type, kTfLiteUInt8);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_buffer), 1);
TF_LITE_ENSURE_EQ(context, input_buffer->dims->data[0], op_data->num_images);
TfLiteIntArray* new_dims = TfLiteIntArrayCreate(4);
new_dims->data[0] = op_data->num_images;
new_dims->data[1] = op_data->height;
new_dims->data[2] = op_data->width;
new_dims->data[3] = op_data->channels;
output_tensor->type = kTfLiteUInt8;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output_tensor, new_dims));
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input_buffer;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, 0, &input_buffer));
TF_LITE_ENSURE(context, input_buffer);
TF_LITE_ENSURE(context, input_buffer->data.raw);
const int channels = op_data->channels;
const int decode_channels = 3;
TfLiteTensor* output_tensor;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, 0, &output_tensor));
unsigned char* output_arr = GetTensorData<unsigned char>(output_tensor);
Status decoder_status;
std::unique_ptr<LibjpegDecoder> decoder =
LibjpegDecoder::Create(decoder_status);
if (decoder_status.code != kTfLiteOk) {
TF_LITE_KERNEL_LOG(context, decoder_status.error_message.c_str());
return kTfLiteError;
}
const int kDecodedImageSize =
op_data->width * op_data->height * decode_channels;
const int kOutputImageSize = op_data->width * op_data->height * channels;
int output_array_offset = 0;
for (int img = 0; img < op_data->num_images; ++img) {
tflite::StringRef inputref =
tflite::GetString(input_buffer, img);
unsigned char* decoded = output_arr + output_array_offset;
Status decode_status = decoder->DecodeImage(
inputref, {op_data->height, op_data->width, decode_channels}, decoded,
kDecodedImageSize);
if (channels == 4) {
size_t height = op_data->height;
size_t src_offset = kDecodedImageSize;
size_t dst_offset = kOutputImageSize;
while (height--) {
size_t width = op_data->width;
while (width--) {
src_offset -= decode_channels;
dst_offset -= channels;
std::copy_n(decoded + src_offset, decode_channels,
decoded + dst_offset);
decoded[dst_offset + 3] = 255;
}
}
}
output_array_offset += kOutputImageSize;
if (decode_status.code != kTfLiteOk) {
TF_LITE_KERNEL_LOG(context, decode_status.error_message.c_str());
return kTfLiteError;
}
}
return kTfLiteOk;
}
TfLiteRegistration* Register_DECODE_JPEG() {
static TfLiteRegistration r = {
decode_jpeg_kernel::Init, decode_jpeg_kernel::Free,
decode_jpeg_kernel::Prepare, decode_jpeg_kernel::Eval};
return &r;
}
}
}
} | #include <cstdint>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/decode_jpeg_register.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_chessboard_jpeg.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_test_card_jpeg.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/libjpeg_decoder_test_helper.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace acceleration {
namespace decode_jpeg_kernel {
namespace {
using testing::ElementsAre;
const int kHeight = 300, kWidth = 250, kChannels = 3;
const int kDecodedSize = kHeight * kWidth * kChannels;
class DecodeJPEGOpModel : public SingleOpModel {
public:
DecodeJPEGOpModel(const TensorData& input, const TensorData& output,
int num_images, int height, int width, int channels) {
input_id_ = AddInput(input);
output_id_ = AddOutput(output);
flexbuffers::Builder fbb;
fbb.Map([&] {
fbb.Int("num_images", num_images);
fbb.Int("height", height);
fbb.Int("width", width);
fbb.Int("channels", channels);
});
fbb.Finish();
SetCustomOp("DECODE_JPEG", fbb.GetBuffer(),
tflite::acceleration::decode_jpeg_kernel::Register_DECODE_JPEG);
BuildInterpreter({GetShape(input_id_)});
}
int input_buffer_id() { return input_id_; }
int output_id() { return output_id_; }
std::vector<uint8_t> GetOutput() {
return ExtractVector<uint8_t>(output_id_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_id_); }
protected:
int input_id_;
int shapes_id_;
int output_id_;
};
TEST(DecodeJpegTest, TestMultipleJPEGImages) {
std::string chessboard_image(
reinterpret_cast<const char*>(g_tflite_acceleration_chessboard_jpeg),
g_tflite_acceleration_chessboard_jpeg_len);
std::string test_card_image(
reinterpret_cast<const char*>(g_tflite_acceleration_test_card_jpeg),
g_tflite_acceleration_test_card_jpeg_len);
const int kNumImages = 2;
DecodeJPEGOpModel model({TensorType_STRING, {kNumImages}},
{TensorType_UINT8, {}}, kNumImages, kHeight, kWidth,
kChannels);
model.PopulateStringTensor(model.input_buffer_id(),
{chessboard_image, test_card_image});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
ASSERT_THAT(model.GetOutputShape(),
ElementsAre(kNumImages, kHeight, kWidth, kChannels));
std::vector<uint8_t> output_flattened = model.GetOutput();
std::vector<uint8_t> img1(output_flattened.begin(),
output_flattened.begin() + kDecodedSize);
EXPECT_THAT(img1, HasChessboardPatternWithTolerance(12));
std::vector<uint8_t> img2(output_flattened.begin() + kDecodedSize,
output_flattened.end());
EXPECT_THAT(img2, HasRainbowPatternWithTolerance(5));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/decode_jpeg.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/decode_jpeg_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9b61f2fd-dae2-47fa-a877-b61df3b15a36 | cpp | tensorflow/tensorflow | libc_handle | tensorflow/lite/experimental/acceleration/mini_benchmark/libc_handle.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/libc_handle_test.cc | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/libc_handle.h"
#ifdef __ANDROID__
#include <dlfcn.h>
#endif
#include <stdio.h>
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/decode_jpeg_status.h"
namespace tflite {
namespace acceleration {
namespace decode_jpeg_kernel {
LibCHandle LibCHandle::Create(Status &status) {
#ifndef __ANDROID__
#ifndef _WIN32
return LibCHandle(nullptr, ::fmemopen);
#else
status = {kTfLiteError, "Windows not supported."};
return LibCHandle(nullptr, nullptr);
#endif
#else
void *libc = nullptr;
FmemopenPtr fmemopen_ptr = nullptr;
if (!(libc = dlopen("libc.so", RTLD_NOW | RTLD_LOCAL))) {
status = {kTfLiteError,
"Failed to load the libc dynamic shared object library."};
return LibCHandle(nullptr, nullptr);
}
if (!(fmemopen_ptr =
reinterpret_cast<FmemopenPtr>(dlsym(libc, "fmemopen")))) {
status = {kTfLiteError, "Failed to dynamically load the method: fmemopen"};
return LibCHandle(nullptr, nullptr);
}
status = {kTfLiteOk, ""};
return LibCHandle(libc, fmemopen_ptr);
#endif
}
FILE *LibCHandle::fmemopen(void *buf, size_t size, const char *mode) const {
return fmemopen_(buf, size, mode);
}
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/libc_handle.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
namespace acceleration {
namespace decode_jpeg_kernel {
namespace {
TEST(LibCHandleTest, LoadingSucceedsAndroidPlatforms) {
Status status;
LibCHandle handle = LibCHandle::Create(status);
EXPECT_EQ(status.error_message, "");
EXPECT_EQ(status.code, kTfLiteOk);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/libc_handle.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/libc_handle_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
aa187d9f-b591-4c32-9640-9440a3eb802c | cpp | tensorflow/tensorflow | custom_validation_embedder | tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier/custom_validation_embedder.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier/custom_validation_embedder_test.cc | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier/custom_validation_embedder.h"
#include <algorithm>
#include <iostream>
#include <iterator>
#include <string>
#include <vector>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "flatbuffers/flexbuffers.h"
#include "flatbuffers/vector.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/tools/verifier.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/constants.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace acceleration {
namespace {
using ::flatbuffers::FlatBufferBuilder;
flatbuffers::Offset<flatbuffers::Vector<uint8_t>> CallOpCustomOptions(
int primary_subgraph_index, int batch_size, FlatBufferBuilder& output) {
flexbuffers::Builder flexbuffer_builder;
flexbuffer_builder.Map([&] {
flexbuffer_builder.Int("subgraph_index", primary_subgraph_index);
flexbuffer_builder.Int("loop_count", batch_size);
});
flexbuffer_builder.Finish();
return output.CreateVector(flexbuffer_builder.GetBuffer());
}
}
void CustomValidationEmbedder::CreateTensorsFrom(
const SubGraph& from_subgraph, const std::vector<int>& from_indexes,
std::vector<std::vector<uint8_t>>* buffer_content,
flatbuffers::FlatBufferBuilder& fbb, std::vector<int>& new_indexes,
std::vector<flatbuffers::Offset<Buffer>>& buffers,
std::vector<flatbuffers::Offset<Tensor>>& tensors) {
int tensor_index_start = tensors.size();
for (int i = 0; i < from_indexes.size(); i++) {
TensorT base_tensor;
from_subgraph.tensors()->Get(from_indexes[i])->UnPackTo(&base_tensor);
if (!base_tensor.shape.empty() && base_tensor.shape[0] == 1) {
base_tensor.shape[0] = batch_size_;
}
if (!base_tensor.shape_signature.empty() &&
base_tensor.shape_signature[0] == 1) {
base_tensor.shape_signature[0] = batch_size_;
}
base_tensor.buffer = buffers.size();
tensors.push_back(CreateTensor(fbb, &base_tensor));
new_indexes.push_back(tensor_index_start + i);
if (buffer_content && !(*buffer_content)[i].empty()) {
buffers.push_back(
CreateBuffer(fbb, fbb.CreateVector((*buffer_content)[i])));
} else {
buffers.push_back(CreateBuffer(fbb));
}
}
}
MinibenchmarkStatus CustomValidationEmbedder::BuildModel(
const Model& main_model, flatbuffers::FlatBufferBuilder& fbb) {
ModelT main_model_obj;
main_model.UnPackTo(&main_model_obj);
if (main_model_obj.subgraphs[0]->inputs.size() != custom_input_.size()) {
TF_LITE_REPORT_ERROR(
error_reporter_,
"Unexpected custom_input size. Expected: %d. Actual: %d.",
main_model_obj.subgraphs[0]->inputs.size(), custom_input_.size());
return kMinibenchmarkValidationSubgraphBuildFailed;
}
std::vector<flatbuffers::Offset<Metadata>> metadata;
metadata.reserve(main_model_obj.metadata.size());
for (auto& iter : main_model_obj.metadata) {
metadata.push_back(CreateMetadata(fbb, iter.get()));
}
std::vector<flatbuffers::Offset<SignatureDef>> signature_defs;
signature_defs.reserve(main_model_obj.signature_defs.size());
for (auto& iter : main_model_obj.signature_defs) {
signature_defs.push_back(CreateSignatureDef(fbb, iter.get()));
}
std::vector<flatbuffers::Offset<SubGraph>> subgraphs;
subgraphs.reserve(main_model_obj.subgraphs.size());
for (auto& iter : main_model_obj.subgraphs) {
subgraphs.push_back(CreateSubGraph(fbb, iter.get()));
}
std::vector<flatbuffers::Offset<Buffer>> buffers;
buffers.reserve(main_model_obj.buffers.size());
for (auto& iter : main_model_obj.buffers) {
buffers.push_back(CreateBuffer(fbb, iter.get()));
}
std::vector<flatbuffers::Offset<OperatorCode>> operator_codes;
operator_codes.reserve(main_model_obj.operator_codes.size());
for (auto& iter : main_model_obj.operator_codes) {
operator_codes.push_back(CreateOperatorCode(fbb, iter.get()));
}
operator_codes.push_back(CreateOperatorCode(
fbb, BuiltinOperator_CUSTOM, fbb.CreateString("validation/call")));
int operator_code_index = operator_codes.size() - 1;
std::vector<flatbuffers::Offset<Tensor>> tensors;
std::vector<int32_t> input;
CreateTensorsFrom(*main_model.subgraphs()->Get(0),
main_model_obj.subgraphs[0]->inputs, &custom_input_, fbb,
input, buffers, tensors);
std::vector<int32_t> output;
CreateTensorsFrom(*main_model.subgraphs()->Get(0),
main_model_obj.subgraphs[0]->outputs, nullptr, fbb, output,
buffers, tensors);
auto input_offset = fbb.CreateVector(input);
auto output_offset = fbb.CreateVector(output);
std::vector<flatbuffers::Offset<Operator>> operators{CreateOperator(
fbb, operator_code_index, input_offset, output_offset,
tflite::BuiltinOptions_NONE, 0,
CallOpCustomOptions( 0, batch_size_, fbb),
tflite::CustomOptionsFormat_FLEXBUFFERS)};
subgraphs.push_back(
CreateSubGraph(fbb, fbb.CreateVector(tensors), input_offset,
output_offset, fbb.CreateVector(operators),
fbb.CreateString(std::string(kValidationGraphName))));
fbb.Finish(
CreateModel(fbb, kModelSchemaVersion, fbb.CreateVector(operator_codes),
fbb.CreateVector(subgraphs),
fbb.CreateString(main_model_obj.description),
fbb.CreateVector(buffers),
0, fbb.CreateVector(metadata),
fbb.CreateVector(signature_defs)),
"TFL3");
if (Verify(fbb.GetBufferPointer(), fbb.GetSize(), error_reporter_)) {
return kMinibenchmarkSuccess;
} else {
return kMinibenchmarkValidationSubgraphBuildFailed;
}
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier/custom_validation_embedder.h"
#include <iostream>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/model_builder.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/call_register.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_model.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark_test_helper.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/tools/model_loader.h"
namespace tflite {
namespace acceleration {
namespace {
using ::flatbuffers::FlatBufferBuilder;
constexpr int kMobileNetModelInputByteSize = 1 * 224 * 224 * 3;
class CustomValidationEmbedderTest : public ::testing::Test {
protected:
void SetUp() override {
std::string plain_model_path = MiniBenchmarkTestHelper::DumpToTempFile(
"mobilenet_quant.tflite",
g_tflite_acceleration_embedded_mobilenet_model,
g_tflite_acceleration_embedded_mobilenet_model_len);
ASSERT_TRUE(!plain_model_path.empty());
plain_model_loader_ =
std::make_unique<tools::PathModelLoader>(plain_model_path);
ASSERT_TRUE(plain_model_loader_->Init());
}
std::unique_ptr<tools::ModelLoader> plain_model_loader_;
};
TEST_F(CustomValidationEmbedderTest, BuildValidationModelSucceed) {
int batch_size = 5;
std::vector<uint8_t> input_buffer(batch_size * kMobileNetModelInputByteSize);
CustomValidationEmbedder embedder(batch_size, {input_buffer});
FlatBufferBuilder fbb;
EXPECT_EQ(
embedder.BuildModel(*plain_model_loader_->GetModel()->GetModel(), fbb),
kMinibenchmarkSuccess);
auto model =
FlatBufferModel::BuildFromModel(GetModel(fbb.GetBufferPointer()));
auto interpreter = std::make_unique<Interpreter>();
auto resolver = std::make_unique<
::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates>();
resolver->AddCustom("validation/call", ops::Register_CALL(), 1);
ASSERT_EQ(InterpreterBuilder(*model, *resolver)(&interpreter), kTfLiteOk);
ASSERT_NE(interpreter, nullptr);
Subgraph* validation_graph = interpreter->subgraph(1);
EXPECT_THAT(input_buffer, testing::ElementsAreArray(
GetTensorData<uint8_t>(validation_graph->tensor(
validation_graph->inputs()[0])),
input_buffer.size()));
EXPECT_EQ(validation_graph->AllocateTensors(), kTfLiteOk);
EXPECT_EQ(validation_graph->Invoke(), kTfLiteOk);
}
TEST_F(CustomValidationEmbedderTest, BuildValidationModelTooManyInput) {
int batch_size = 5;
CustomValidationEmbedder embedder(batch_size, {{}, {}});
FlatBufferBuilder fbb;
EXPECT_EQ(
embedder.BuildModel(*plain_model_loader_->GetModel()->GetModel(), fbb),
kMinibenchmarkValidationSubgraphBuildFailed);
}
TEST_F(CustomValidationEmbedderTest, BuildValidationModelInvalidBufferSize) {
CustomValidationEmbedder embedder(2, {std::vector<uint8_t>(2, 2)});
FlatBufferBuilder fbb;
EXPECT_EQ(
embedder.BuildModel(*plain_model_loader_->GetModel()->GetModel(), fbb),
kMinibenchmarkValidationSubgraphBuildFailed);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier/custom_validation_embedder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier/custom_validation_embedder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ac032197-519c-4273-988c-740d82196974 | cpp | tensorflow/tensorflow | mul_op_plugin | tensorflow/lite/experimental/lrt/examples/mul_op_plugin.cc | tensorflow/lite/experimental/lrt/examples/mul_op_plugin_test.cc | #include <stdio.h>
#include <cstddef>
#include <cstdlib>
#include <string>
#include <vector>
#include "tensorflow/lite/experimental/lrt/c/lite_rt_common.h"
#include "tensorflow/lite/experimental/lrt/c/lite_rt_compiler_plugin.h"
#include "tensorflow/lite/experimental/lrt/c/lite_rt_model.h"
#include "tensorflow/lite/experimental/lrt/c/lite_rt_op_code.h"
#include "tensorflow/lite/experimental/lrt/cc/lite_rt_support.h"
#include "tensorflow/lite/experimental/lrt/core/graph_tools.h"
constexpr char kPluginMan[] = "ExampleSocManufacturer";
constexpr char kPluginModel[] = "DummyMulOp";
const char* LrtPluginSocManufacturer() { return kPluginMan; }
lrt_param_index_t LrtPluginNumSupportedSocModels(
LrtCompilerPlugin compiler_plugin) {
return 1;
}
LrtStatus LrtPluginGetSupportedSocModelId(LrtCompilerPlugin compiler_plugin,
lrt_param_index_t config_idx,
const char** config_id) {
if (config_idx != 0) {
return kLrtStatusErrorUnsupported;
}
*config_id = kPluginModel;
return kLrtStatusOk;
}
struct LrtCompiledResultT {
std::string byte_code;
std::vector<std::string> per_op_data;
};
LrtStatus LrtCompiledResultGetByteCode(LrtCompiledResult compiled_result,
const void** byte_code,
size_t* byte_code_size) {
*byte_code = compiled_result->byte_code.data();
*byte_code_size = compiled_result->byte_code.size();
return kLrtStatusOk;
}
LrtStatus LrtCompiledResultGetCallInfo(LrtCompiledResult compiled_result,
lrt_param_index_t call_idx,
const void** call_info,
size_t* call_info_size) {
if (call_idx >= compiled_result->per_op_data.size()) {
return kLrtStatusParamIndexOOB;
}
*call_info = compiled_result->per_op_data.at(call_idx).data();
*call_info_size = compiled_result->per_op_data.at(call_idx).size();
return kLrtStatusOk;
}
LrtStatus LrtCompiledResultGetNumCalls(LrtCompiledResult compiled_result,
lrt_param_index_t* num_calls) {
*num_calls = compiled_result->per_op_data.size();
return kLrtStatusOk;
}
void LrtCompiledResultDestroy(LrtCompiledResult compiled_result) {
delete compiled_result;
}
struct LrtCompilerPluginT {
};
LrtStatus LrtPluginInit(LrtCompilerPlugin* compiler_plugin) {
*compiler_plugin = new LrtCompilerPluginT;
return kLrtStatusOk;
}
void LrtPluginDestroy(LrtCompilerPlugin compiler_plugin) {
delete compiler_plugin;
}
LrtStatus LrtPluginPartitionModel(LrtCompilerPlugin compiler_plugin,
LrtModel model, LrtOpList selected_ops) {
LRT_ASSIGN_OR_RETURN_STATUS(auto subgraph, graph_tools::GetSubgraph(model));
LRT_ASSIGN_OR_RETURN_STATUS(auto ops, graph_tools::GetSubgraphOps(subgraph));
for (auto op : ops) {
LrtOpCode op_code;
LRT_RETURN_STATUS_IF_NOT_OK(GetOpCode(op, &op_code));
if (op_code != kLrtOpCodeTflMul) {
continue;
}
LRT_RETURN_STATUS_IF_NOT_OK(PushOp(selected_ops, op));
}
return kLrtStatusOk;
}
LrtStatus CompileSinglePartition(lrt_param_index_t partition_index,
LrtSubgraph subgraph,
LrtCompiledResultT& result) {
LRT_ASSIGN_OR_RETURN_STATUS(auto ops, graph_tools::GetSubgraphOps(subgraph));
int num_muls_in_partition = 0;
for (auto op : ops) {
LrtOpCode op_code;
LRT_RETURN_STATUS_IF_NOT_OK(GetOpCode(op, &op_code));
if (op_code != kLrtOpCodeTflMul) {
return kLrtStatusErrorUnsupported;
}
++num_muls_in_partition;
}
{
char* byte_code_append;
(void)asprintf(&byte_code_append,
"Partition_%lu_with_%d_muls:", partition_index,
num_muls_in_partition);
result.byte_code.append(byte_code_append);
free(byte_code_append);
}
{
char* per_op_data;
(void)asprintf(&per_op_data, "Partition_%lu", partition_index);
result.per_op_data.push_back(per_op_data);
free(per_op_data);
}
return kLrtStatusOk;
}
LrtStatus LrtPluginCompile(LrtCompilerPlugin compiler_plugin,
LrtSubgraphArray partitions,
lrt_param_index_t num_partitions,
LrtCompiledResult* compiled_result) {
LrtCompiledResult result = new LrtCompiledResultT;
for (auto i = 0; i < num_partitions; ++i) {
LRT_RETURN_STATUS_IF_NOT_OK(
CompileSinglePartition(i, partitions[i], *result));
}
*compiled_result = result;
return kLrtStatusOk;
} | #include <cstddef>
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "tensorflow/lite/experimental/lrt/c/lite_rt_compiler_plugin.h"
#include "tensorflow/lite/experimental/lrt/c/lite_rt_model.h"
#include "tensorflow/lite/experimental/lrt/c/lite_rt_op_code.h"
#include "tensorflow/lite/experimental/lrt/cc/lite_rt_support.h"
#include "tensorflow/lite/experimental/lrt/core/graph_tools.h"
#include "tensorflow/lite/experimental/lrt/core/model.h"
#include "tensorflow/lite/experimental/lrt/test_data/test_data_util.h"
namespace {
UniqueLrtCompilerPlugin GetDummyPlugin() {
LrtCompilerPlugin dummy_plugin;
LRT_CHECK_STATUS_OK(LrtPluginInit(&dummy_plugin));
CHECK_NE(dummy_plugin, nullptr);
return UniqueLrtCompilerPlugin(dummy_plugin);
}
TEST(TestDummyPlugin, GetConfigInfo) {
ASSERT_STREQ(LrtPluginSocManufacturer(), "ExampleSocManufacturer");
auto plugin = GetDummyPlugin();
ASSERT_EQ(1, LrtPluginNumSupportedSocModels(plugin.get()));
const char* config_id;
ASSERT_STATUS_OK(
LrtPluginGetSupportedSocModelId(plugin.get(), 0, &config_id));
ASSERT_STREQ(config_id, "DummyMulOp");
}
TEST(TestCallDummyPlugin, PartitionSimpleMultiAdd) {
auto plugin = GetDummyPlugin();
auto model = LoadTestFileModel("simple_multi_op.tflite");
LrtOpListT selected_ops;
ASSERT_STATUS_OK(
LrtPluginPartitionModel(plugin.get(), model.get(), &selected_ops));
ASSERT_EQ(selected_ops.ops.size(), 2);
ASSERT_EQ(selected_ops.ops[0]->op_code, kLrtOpCodeTflMul);
ASSERT_EQ(selected_ops.ops[1]->op_code, kLrtOpCodeTflMul);
}
TEST(TestCallDummyPlugin, CompileMulSubgraph) {
auto plugin = GetDummyPlugin();
auto model = LoadTestFileModel("mul_simple.tflite");
ASSERT_RESULT_OK_ASSIGN(auto subgraph, graph_tools::GetSubgraph(model.get()));
LrtCompiledResult compiled;
ASSERT_STATUS_OK(LrtPluginCompile(plugin.get(), &subgraph, 1, &compiled));
const void* byte_code;
size_t byte_code_size;
ASSERT_STATUS_OK(
LrtCompiledResultGetByteCode(compiled, &byte_code, &byte_code_size));
std::string byte_code_string(reinterpret_cast<const char*>(byte_code),
byte_code_size);
ASSERT_EQ(byte_code_string, "Partition_0_with_2_muls:");
const void* op_data;
size_t op_data_size;
ASSERT_STATUS_OK(
LrtCompiledResultGetCallInfo(compiled, 0, &op_data, &op_data_size));
std::string op_data_string(reinterpret_cast<const char*>(op_data),
op_data_size);
ASSERT_EQ(op_data_string, "Partition_0");
LrtCompiledResultDestroy(compiled);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/lrt/examples/mul_op_plugin.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/lrt/examples/mul_op_plugin_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d141a428-7f90-4be4-a9af-5ed15f002cff | cpp | tensorflow/tensorflow | log | tensorflow/lite/experimental/shlo/ops/log.cc | tensorflow/lite/experimental/shlo/ops/log_test.cc | #include "tensorflow/lite/experimental/shlo/ops/log.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Log {
template <class T>
T operator()(T v) const {
return std::log(v);
}
};
template <>
F16 Log::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Log::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
LogOp Create(LogOp::Attributes) { return {}; }
absl::Status Prepare(LogOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(
CheckCtx("log"), input, IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("log"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(LogOp& op, const Tensor& input, Tensor& output) {
Log log;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), log, input,
output)
} else if (IsFloatTensor(input)) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
log, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.log: Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/log.h"
#include <cmath>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::ElementsAreArray;
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<LogOp> {
static std::string Get() { return "Log"; }
};
namespace {
struct Log {
template <class T>
T operator()(T v) const {
return std::log(v);
}
} log_ref;
template <>
F16 Log::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Log::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
INSTANTIATE_TYPED_TEST_SUITE_P(Log, UnaryElementwiseOpShapePropagationTest,
LogOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
Log, UnaryElementwiseSameBaselineElementTypeConstraintTest,
UnaryElementwiseConstraint1Types<LogOp>, TestParamNames);
using UnsupportedTypes = WithOpTypes<
LogOp, ConcatTypes<BoolTestType, IntTestTypes, PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Log, UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct LogTest : ::testing::Test {};
TYPED_TEST_SUITE(LogTest, FloatTestTypes, TestParamNames);
TYPED_TEST(LogTest, FloatTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(
shape, static_cast<StorageT>(0.1));
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), log_ref);
auto op = Create(LogOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
template <class T>
struct QuantizedLogTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedLogTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedLogTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(5);
Vector<StorageT> input_data =
RandomBuffer<TypeParam::kStorage>(shape, zero_point + 1);
Vector<StorageT> output_data(shape.NumElements());
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor input_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = input_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
input_data, expected_data.begin(), [zero_point, scale](auto v) {
const ExpressedT dequantized_input = Dequantize(v, zero_point, scale);
const ExpressedT dequantized_res = log_ref(dequantized_input);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(LogOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/log.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/log_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b42f1177-a746-48c5-936f-0579562c34b0 | cpp | tensorflow/tensorflow | tfl_tensor_ref | tensorflow/lite/experimental/ml_adjacent/tflite/tfl_tensor_ref.cc | tensorflow/lite/experimental/ml_adjacent/tflite/tfl_tensor_ref_test.cc | #include "tensorflow/lite/experimental/ml_adjacent/tflite/tfl_tensor_ref.h"
#include <cstddef>
#include <vector>
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace ml_adj {
namespace data {
using ::tflite::BuildTfLiteArray;
using ::tflite::TfLiteArrayUniquePtr;
using ::tflite::TfLiteTypeGetSize;
namespace {
etype_t TflToLibType(const TfLiteType tfl_type) {
switch (tfl_type) {
case kTfLiteFloat32:
return etype_t::f32;
case kTfLiteInt32:
return etype_t::i32;
case kTfLiteFloat64:
return etype_t::f64;
default:
return etype_t::i32;
}
}
}
TflTensorRef::TflTensorRef(const TfLiteTensor* tfl_tensor)
: DataRef(TflToLibType(tfl_tensor->type)), tfl_tensor_(tfl_tensor) {
dims_.assign(tfl_tensor->dims->data,
tfl_tensor->dims->data + tfl_tensor->dims->size);
}
const void* TflTensorRef::Data() const { return tfl_tensor_->data.data; }
ind_t TflTensorRef::NumElements() const {
return tfl_tensor_->bytes / TfLiteTypeGetSize(tfl_tensor_->type);
}
size_t TflTensorRef::Bytes() const { return tfl_tensor_->bytes; }
MutableTflTensorRef::MutableTflTensorRef(TfLiteTensor* tfl_tensor,
TfLiteContext* tfl_ctx)
: MutableDataRef(TflToLibType(tfl_tensor->type)),
tfl_tensor_(tfl_tensor),
tfl_ctx_(tfl_ctx) {
dims_.assign(tfl_tensor->dims->data,
tfl_tensor->dims->data + tfl_tensor->dims->size);
}
void MutableTflTensorRef::Resize(dims_t&& dims) {
TfLiteArrayUniquePtr<int> arr =
BuildTfLiteArray(std::vector<int>(dims.begin(), dims.end()));
TFLITE_CHECK_EQ(tfl_ctx_->ResizeTensor(tfl_ctx_, tfl_tensor_, arr.release()),
kTfLiteOk);
dims_ = dims;
}
const void* MutableTflTensorRef::Data() const { return tfl_tensor_->data.data; }
ind_t MutableTflTensorRef::NumElements() const {
return tfl_tensor_->bytes / TfLiteTypeGetSize(tfl_tensor_->type);
}
size_t MutableTflTensorRef::Bytes() const { return tfl_tensor_->bytes; }
void* MutableTflTensorRef::Data() { return tfl_tensor_->data.data; }
}
} | #include "tensorflow/lite/experimental/ml_adjacent/tflite/tfl_tensor_ref.h"
#include <algorithm>
#include <cstddef>
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/util.h"
namespace ml_adj {
namespace data {
namespace {
using ::testing::Each;
using ::tflite::BuildTfLiteTensor;
using ::tflite::DimsAre;
using ::tflite::NumElements;
using ::tflite::TensorUniquePtr;
TfLiteStatus SimpleResizeTensor(TfLiteContext*, TfLiteTensor* tensor,
TfLiteIntArray* new_size) {
TFLITE_CHECK(tensor->type == kTfLiteFloat32);
size_t num_bytes = NumElements(new_size) * sizeof(float);
TF_LITE_ENSURE_STATUS(TfLiteTensorRealloc(num_bytes, tensor));
if (tensor->dims != nullptr) {
TfLiteIntArrayFree(tensor->dims);
}
tensor->dims = new_size;
return kTfLiteOk;
}
std::unique_ptr<TfLiteContext> MakeSimpleContext() {
auto ctx = std::make_unique<TfLiteContext>();
ctx->ResizeTensor = SimpleResizeTensor;
return ctx;
}
TEST(ImmutableTensorRefTest, ConstructsAndManifestsTensorData) {
TensorUniquePtr tfl_tensor =
BuildTfLiteTensor(kTfLiteFloat32, {2, 2}, kTfLiteDynamic);
std::fill(tfl_tensor->data.f, tfl_tensor->data.f + 4, 2.0f);
TflTensorRef ref(tfl_tensor.get());
ASSERT_EQ(ref.Type(), etype_t::f32);
ASSERT_EQ(ref.Dims(), (dims_t{2, 2}));
ASSERT_EQ(ref.Bytes(), 4 * sizeof(float));
absl::Span<const float> data(reinterpret_cast<const float*>(ref.Data()), 4);
EXPECT_THAT(data, Each(2.0f));
}
TEST(MutableTensorRefTest, ConstructsAndManifestsTensorData) {
TensorUniquePtr tfl_tensor =
BuildTfLiteTensor(kTfLiteFloat32, {2, 2}, kTfLiteDynamic);
std::fill(tfl_tensor->data.f, tfl_tensor->data.f + 4, 2.0f);
MutableTflTensorRef ref(tfl_tensor.get(), nullptr);
ASSERT_EQ(ref.Type(), etype_t::f32);
ASSERT_EQ(ref.Dims(), (dims_t{2, 2}));
ASSERT_EQ(ref.Bytes(), 4 * sizeof(float));
absl::Span<const float> data(reinterpret_cast<const float*>(ref.Data()), 4);
EXPECT_THAT(data, Each(2.0f));
}
TEST(MutableTensorRefTest, TensorRefWritesDataToTensor) {
TensorUniquePtr tfl_tensor =
BuildTfLiteTensor(kTfLiteFloat32, {3, 3}, kTfLiteDynamic);
MutableTflTensorRef ref(tfl_tensor.get(), nullptr);
ASSERT_EQ(ref.Type(), etype_t::f32);
ASSERT_EQ(ref.Dims(), (dims_t{3, 3}));
ASSERT_EQ(ref.Bytes(), 9 * sizeof(float));
absl::Span<float> data(reinterpret_cast<float*>(ref.Data()), 9);
std::fill(data.begin(), data.end(), 3.0f);
EXPECT_THAT(absl::Span<const float>(tfl_tensor->data.f, 9), Each(3.0f));
}
TEST(MutableTensorRefTest, ResizeIncreaseSize) {
TensorUniquePtr tfl_tensor =
BuildTfLiteTensor(kTfLiteFloat32, {2, 3}, kTfLiteDynamic);
std::unique_ptr<TfLiteContext> ctx = MakeSimpleContext();
MutableTflTensorRef ref(tfl_tensor.get(), ctx.get());
ASSERT_EQ(ref.Type(), etype_t::f32);
ASSERT_EQ(ref.Dims(), (dims_t{2, 3}));
ASSERT_EQ(ref.Bytes(), 6 * sizeof(float));
ref.Resize({3, 3});
ASSERT_EQ(ref.Dims(), (dims_t{3, 3}));
ASSERT_EQ(ref.Bytes(), 9 * sizeof(float));
absl::Span<float> ref_data(reinterpret_cast<float*>(ref.Data()), 9);
ASSERT_THAT(tfl_tensor.get(), DimsAre({3, 3}));
ASSERT_EQ(tfl_tensor->bytes, ref.Bytes());
ASSERT_EQ(ref.Data(), tfl_tensor->data.data);
}
TEST(MutableTensorRefTest, ResizeDecreasesSize) {
TensorUniquePtr tfl_tensor =
BuildTfLiteTensor(kTfLiteFloat32, {2, 3}, kTfLiteDynamic);
std::unique_ptr<TfLiteContext> ctx = MakeSimpleContext();
MutableTflTensorRef ref(tfl_tensor.get(), ctx.get());
ASSERT_EQ(ref.Type(), etype_t::f32);
ASSERT_EQ(ref.Dims(), (dims_t{2, 3}));
ASSERT_EQ(ref.Bytes(), 6 * sizeof(float));
ref.Resize({2, 2});
ASSERT_EQ(ref.Dims(), (dims_t{2, 2}));
ASSERT_EQ(ref.Bytes(), 4 * sizeof(float));
absl::Span<float> ref_data(reinterpret_cast<float*>(ref.Data()), 4);
ASSERT_THAT(tfl_tensor.get(), DimsAre({2, 2}));
ASSERT_EQ(tfl_tensor->bytes, ref.Bytes());
ASSERT_EQ(ref.Data(), tfl_tensor->data.data);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/tflite/tfl_tensor_ref.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/tflite/tfl_tensor_ref_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ba39cf62-bc1b-45b5-8cae-a92dc975116a | cpp | tensorflow/tensorflow | extern_call | tensorflow/lite/experimental/ml_adjacent/tflite/extern_call.cc | tensorflow/lite/experimental/ml_adjacent/tflite/extern_call_test.cc | #include "tensorflow/lite/experimental/ml_adjacent/tflite/extern_call.h"
#include <cstdint>
#include <memory>
#include <vector>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/experimental/ml_adjacent/algo/crop.h"
#include "tensorflow/lite/experimental/ml_adjacent/algo/resize.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/experimental/ml_adjacent/tflite/tfl_tensor_ref.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace extern_call {
namespace {
using ::ml_adj::algo::Algo;
using ::ml_adj::algo::InputPack;
using ::ml_adj::algo::OutputPack;
using ::ml_adj::data::MutableTflTensorRef;
using ::ml_adj::data::TflTensorRef;
template <typename PackType>
struct PackDeleter {
void operator()(PackType* pack) {
if (pack == nullptr) return;
for (auto* d : *pack) {
if (d == nullptr) continue;
delete d;
}
delete pack;
}
};
template <typename PackType>
using UniquePack = std::unique_ptr<PackType, PackDeleter<PackType>>;
constexpr uint8_t kNumFuncs = 2;
static const Algo* const kReg[kNumFuncs] = {ml_adj::crop::Impl_CenterCrop(),
ml_adj::resize::Impl_Resize()};
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
for (int i = 0; i < NumOutputs(node); ++i) {
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, i, &output));
SetTensorToDynamic(output);
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
UniquePack<InputPack> lib_inputs(new InputPack());
for (int i = 0; i < NumInputs(node); ++i) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i, &input));
lib_inputs->push_back(new TflTensorRef(input));
}
UniquePack<OutputPack> lib_outputs(new OutputPack());
for (int i = 0; i < NumOutputs(node); ++i) {
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, i, &output));
lib_outputs->push_back(new MutableTflTensorRef(output, context));
}
TF_LITE_ENSURE_EQ(context, node->custom_initial_data_size,
sizeof(ExternCallOptions));
const auto* const options =
reinterpret_cast<const ExternCallOptions*>(node->custom_initial_data);
TF_LITE_ENSURE(context,
options->func_id >= 0 && options->func_id < kNumFuncs);
const Algo* const algo = kReg[options->func_id];
algo->process(*lib_inputs, *lib_outputs);
return kTfLiteOk;
}
}
TfLiteRegistration* Register_EXTERN_CALL() {
static TfLiteRegistration r = {nullptr, nullptr, Prepare, Eval};
return &r;
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/tflite/extern_call.h"
#include <cstdint>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
class ExternCallModel : public SingleOpModel {
public:
ExternCallModel(std::vector<TensorData> inputs,
std::vector<TensorData> outputs, uint8_t func_id) {
std::vector<std::vector<int>> input_shapes;
for (const auto& data : inputs) {
input_ids_.push_back(AddInput(data));
input_shapes.push_back(GetShape(input_ids_.back()));
}
for (const auto& data : outputs) {
output_ids_.push_back(AddOutput(data));
}
SetCustomOp("ExternCall", {func_id}, extern_call::Register_EXTERN_CALL);
BuildInterpreter(input_shapes);
}
const TfLiteTensor* Output(int output_id) {
return interpreter_->tensor(output_ids_[output_id]);
}
private:
std::vector<int> input_ids_;
std::vector<int> output_ids_;
};
namespace {
TEST(ExternCallTest, CropFunc) {
std::vector<TensorData> inputs = {{TensorType_FLOAT32, {1, 5, 5, 1}},
{TensorType_FLOAT64, {}}};
std::vector<TensorData> output = {{TensorType_FLOAT32, {}}};
ExternCallModel model(inputs, output, 0);
model.PopulateTensor<double>(1, {0.5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
ASSERT_NE(model.Output(0), nullptr);
ASSERT_THAT(model.Output(0), DimsAre({1, 3, 3, 1}));
}
TEST(ExternCallTest, ResizeTest) {
std::vector<TensorData> inputs = {{TensorType_FLOAT32, {1, 5, 5, 1}},
{TensorType_UINT32, {2}}};
std::vector<TensorData> output = {{TensorType_FLOAT32, {}}};
ExternCallModel model(inputs, output, 1);
model.PopulateTensor<uint32_t>(1, {3, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
ASSERT_NE(model.Output(0), nullptr);
ASSERT_THAT(model.Output(0), DimsAre({1, 3, 3, 1}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/tflite/extern_call.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/tflite/extern_call_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.