ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 13
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 78
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 93
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
94b41683-66eb-49e3-8f28-76f38f147e83 | cpp | tensorflow/tensorflow | presized_cuckoo_map | tensorflow/core/util/presized_cuckoo_map.h | tensorflow/core/util/presized_cuckoo_map_test.cc | #ifndef TENSORFLOW_CORE_UTIL_PRESIZED_CUCKOO_MAP_H_
#define TENSORFLOW_CORE_UTIL_PRESIZED_CUCKOO_MAP_H_
#include <algorithm>
#include <vector>
#include "absl/base/prefetch.h"
#include "absl/numeric/int128.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/macros.h"
namespace tensorflow {
template <class value>
class PresizedCuckooMap {
public:
typedef uint64 key_type;
explicit PresizedCuckooMap(uint64 num_entries) { Clear(num_entries); }
void Clear(uint64 num_entries) {
cpq_.reset(new CuckooPathQueue());
double n(num_entries);
n /= kLoadFactor;
num_buckets_ = (static_cast<uint64>(n) / kSlotsPerBucket);
num_buckets_ += 32;
Bucket empty_bucket;
for (int i = 0; i < kSlotsPerBucket; i++) {
empty_bucket.keys[i] = kUnusedSlot;
}
buckets_.clear();
buckets_.resize(num_buckets_, empty_bucket);
}
bool InsertUnique(const key_type k, const value& v) {
uint64 tk = key_transform(k);
uint64 b1 = fast_map_to_buckets(tk);
uint64 b2 = fast_map_to_buckets(h2(tk));
uint64 target_bucket = 0;
int target_slot = kNoSpace;
for (auto bucket : {b1, b2}) {
Bucket* bptr = &buckets_[bucket];
for (int slot = 0; slot < kSlotsPerBucket; slot++) {
if (bptr->keys[slot] == k) {
return false;
} else if (target_slot == kNoSpace && bptr->keys[slot] == kUnusedSlot) {
target_bucket = bucket;
target_slot = slot;
}
}
}
if (target_slot != kNoSpace) {
InsertInternal(tk, v, target_bucket, target_slot);
return true;
}
return CuckooInsert(tk, v, b1, b2);
}
bool Find(const key_type k, value* out) const {
uint64 tk = key_transform(k);
return FindInBucket(k, fast_map_to_buckets(tk), out) ||
FindInBucket(k, fast_map_to_buckets(h2(tk)), out);
}
void PrefetchKey(const key_type k) const {
const uint64 tk = key_transform(k);
absl::PrefetchToLocalCache(&buckets_[fast_map_to_buckets(tk)].keys);
absl::PrefetchToLocalCache(&buckets_[fast_map_to_buckets(h2(tk))].keys);
}
int64_t MemoryUsed() const {
return sizeof(PresizedCuckooMap<value>) + sizeof(CuckooPathQueue);
}
private:
static constexpr int kSlotsPerBucket = 4;
static constexpr double kLoadFactor = 0.85;
static constexpr uint8 kMaxBFSPathLen = 5;
static constexpr int kMaxQueueSize = 682;
static constexpr int kVisitedListSize = 170;
static constexpr int kNoSpace = -1;
static constexpr uint64 kUnusedSlot = ~(0ULL);
struct Bucket {
key_type keys[kSlotsPerBucket];
value values[kSlotsPerBucket];
};
struct CuckooPathEntry {
uint64 bucket;
int depth;
int parent;
int parent_slot;
};
class CuckooPathQueue {
public:
CuckooPathQueue() : head_(0), tail_(0) {}
void push_back(CuckooPathEntry e) {
queue_[tail_] = e;
tail_ = (tail_ + 1) % kMaxQueueSize;
}
CuckooPathEntry pop_front() {
CuckooPathEntry& e = queue_[head_];
head_ = (head_ + 1) % kMaxQueueSize;
return e;
}
bool empty() const { return head_ == tail_; }
bool full() const { return ((tail_ + 1) % kMaxQueueSize) == head_; }
void reset() { head_ = tail_ = 0; }
private:
CuckooPathEntry queue_[kMaxQueueSize];
int head_;
int tail_;
};
typedef std::array<CuckooPathEntry, kMaxBFSPathLen> CuckooPath;
inline uint64 key_transform(const key_type k) const {
return k + (k == kUnusedSlot);
}
inline uint64 h2(uint64 h) const {
const uint64 m = 0xc6a4a7935bd1e995;
return m * ((h >> 32) | (h << 32));
}
inline uint64 alt_bucket(key_type k, uint64 b) const {
if (fast_map_to_buckets(k) != b) {
return fast_map_to_buckets(k);
}
return fast_map_to_buckets(h2(k));
}
inline void InsertInternal(key_type k, const value& v, uint64 b, int slot) {
Bucket* bptr = &buckets_[b];
bptr->keys[slot] = k;
bptr->values[slot] = v;
}
bool FindInBucket(key_type k, uint64 b, value* out) const {
const Bucket& bref = buckets_[b];
for (int i = 0; i < kSlotsPerBucket; i++) {
if (bref.keys[i] == k) {
*out = bref.values[i];
return true;
}
}
return false;
}
inline int SpaceAvailable(uint64 bucket) const {
const Bucket& bref = buckets_[bucket];
for (int i = 0; i < kSlotsPerBucket; i++) {
if (bref.keys[i] == kUnusedSlot) {
return i;
}
}
return kNoSpace;
}
inline void CopyItem(uint64 src_bucket, int src_slot, uint64 dst_bucket,
int dst_slot) {
Bucket& src_ref = buckets_[src_bucket];
Bucket& dst_ref = buckets_[dst_bucket];
dst_ref.keys[dst_slot] = src_ref.keys[src_slot];
dst_ref.values[dst_slot] = src_ref.values[src_slot];
}
bool CuckooInsert(key_type k, const value& v, uint64 b1, uint64 b2) {
int visited_end = 0;
cpq_->reset();
cpq_->push_back({b1, 1, 0, 0});
cpq_->push_back({b2, 1, 0, 0});
while (!cpq_->empty()) {
CuckooPathEntry e = cpq_->pop_front();
int free_slot;
free_slot = SpaceAvailable(e.bucket);
if (free_slot != kNoSpace) {
while (e.depth > 1) {
CuckooPathEntry parent = visited_[e.parent];
CopyItem(parent.bucket, e.parent_slot, e.bucket, free_slot);
free_slot = e.parent_slot;
e = parent;
}
InsertInternal(k, v, e.bucket, free_slot);
return true;
} else {
if (e.depth < (kMaxBFSPathLen)) {
auto parent_index = visited_end;
visited_[visited_end] = e;
visited_end++;
int start_slot = (k + e.bucket) % kSlotsPerBucket;
const Bucket& bref = buckets_[e.bucket];
for (int i = 0; i < kSlotsPerBucket; i++) {
int slot = (start_slot + i) % kSlotsPerBucket;
uint64 next_bucket = alt_bucket(bref.keys[slot], e.bucket);
uint64 e_parent_bucket = visited_[e.parent].bucket;
if (next_bucket != e_parent_bucket) {
cpq_->push_back({next_bucket, e.depth + 1, parent_index, slot});
}
}
}
}
}
LOG(WARNING) << "Cuckoo path finding failed: Table too small?";
return false;
}
inline uint64 fast_map_to_buckets(uint64 x) const {
return absl::Uint128High64(absl::uint128(x) * absl::uint128(num_buckets_));
}
uint64 num_buckets_;
std::vector<Bucket> buckets_;
std::unique_ptr<CuckooPathQueue> cpq_;
CuckooPathEntry visited_[kVisitedListSize];
PresizedCuckooMap(const PresizedCuckooMap&) = delete;
void operator=(const PresizedCuckooMap&) = delete;
};
}
#endif | #include "tensorflow/core/util/presized_cuckoo_map.h"
#include <array>
#include <vector>
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/fingerprint.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
TEST(PresizedCuckooMapTest, Basic) {
PresizedCuckooMap<int> pscm(1000);
EXPECT_TRUE(pscm.InsertUnique(1, 2));
int out;
EXPECT_TRUE(pscm.Find(1, &out));
EXPECT_EQ(out, 2);
}
TEST(PresizedCuckooMapTest, Prefetch) {
PresizedCuckooMap<int64_t> pscm(2);
EXPECT_TRUE(pscm.InsertUnique(1, 2));
pscm.PrefetchKey(1);
pscm.PrefetchKey(2);
}
TEST(PresizedCuckooMapTest, TooManyItems) {
static constexpr int kTableSize = 1000;
PresizedCuckooMap<int> pscm(kTableSize);
for (uint64 i = 0; i < kTableSize; i++) {
uint64 key =
Fingerprint64(string(reinterpret_cast<char *>(&i), sizeof(int64_t)));
ASSERT_TRUE(pscm.InsertUnique(key, i));
}
uint64 failed_at = 0;
for (uint64 i = kTableSize; i < (2 * kTableSize); i++) {
uint64 key =
Fingerprint64(string(reinterpret_cast<char *>(&i), sizeof(int64_t)));
if (!pscm.InsertUnique(key, i)) {
failed_at = i;
break;
}
}
EXPECT_NE(failed_at, 0);
for (uint64 i = 0; i < failed_at; i++) {
int out;
uint64 key =
Fingerprint64(string(reinterpret_cast<char *>(&i), sizeof(int64_t)));
EXPECT_TRUE(pscm.Find(key, &out));
EXPECT_EQ(out, i);
}
}
TEST(PresizedCuckooMapTest, ZeroSizeMap) {
PresizedCuckooMap<int> pscm(0);
int out;
for (uint64 i = 0; i < 100; i++) {
EXPECT_FALSE(pscm.Find(i, &out));
}
}
TEST(PresizedCuckooMapTest, RepeatedClear) {
PresizedCuckooMap<int> pscm(2);
int out;
for (int i = 0; i < 100; ++i) {
pscm.InsertUnique(0, 0);
pscm.InsertUnique(1, 1);
EXPECT_TRUE(pscm.Find(0, &out));
EXPECT_EQ(0, out);
EXPECT_TRUE(pscm.Find(1, &out));
EXPECT_EQ(1, out);
pscm.Clear(2);
EXPECT_FALSE(pscm.Find(0, &out));
EXPECT_FALSE(pscm.Find(1, &out));
}
}
void RunFill(int64_t table_size) {
PresizedCuckooMap<int> pscm(table_size);
for (int64_t i = 0; i < table_size; i++) {
uint64 key =
Fingerprint64(string(reinterpret_cast<char *>(&i), sizeof(int64_t)));
EXPECT_TRUE(pscm.InsertUnique(key, i));
}
for (int64_t i = 0; i < table_size; i++) {
uint64 key =
Fingerprint64(string(reinterpret_cast<char *>(&i), sizeof(int64_t)));
int out;
EXPECT_TRUE(pscm.Find(key, &out));
EXPECT_EQ(out, i);
}
}
TEST(PresizedCuckooMapTest, Fill) {
for (int64_t table_size = 10; table_size <= 5000000; table_size *= 71) {
RunFill(table_size);
}
}
TEST(PresizedCuckooMapTest, Duplicates) {
static constexpr int kSmallTableSize = 1000;
PresizedCuckooMap<int> pscm(kSmallTableSize);
for (uint64 i = 0; i < kSmallTableSize; i++) {
uint64 key =
Fingerprint64(string(reinterpret_cast<char *>(&i), sizeof(uint64)));
EXPECT_TRUE(pscm.InsertUnique(key, i));
}
for (uint64 i = 0; i < kSmallTableSize; i++) {
uint64 key =
Fingerprint64(string(reinterpret_cast<char *>(&i), sizeof(uint64)));
EXPECT_FALSE(pscm.InsertUnique(key, i));
}
}
static void CalculateKeys(uint64 num, std::vector<uint64> *dst) {
dst->resize(num);
for (uint64 i = 0; i < num; i++) {
uint64 key =
Fingerprint64(string(reinterpret_cast<char *>(&i), sizeof(uint64)));
dst->at(i) = key;
}
}
void BM_CuckooFill(::testing::benchmark::State &state) {
const int arg = state.range(0);
uint64 table_size = arg;
std::vector<uint64> calculated_keys;
CalculateKeys(table_size, &calculated_keys);
for (auto s : state) {
PresizedCuckooMap<int> pscm(table_size);
for (uint64 i = 0; i < table_size; i++) {
pscm.InsertUnique(calculated_keys[i], i);
}
}
}
BENCHMARK(BM_CuckooFill)->Arg(1000)->Arg(10000000);
void BM_CuckooRead(::testing::benchmark::State &state) {
const int arg = state.range(0);
uint64 table_size = arg;
std::vector<uint64> calculated_keys;
CalculateKeys(table_size, &calculated_keys);
PresizedCuckooMap<int> pscm(table_size);
for (uint64 i = 0; i < table_size; i++) {
pscm.InsertUnique(calculated_keys[i], i);
}
int i = 0;
for (auto s : state) {
uint64 key_index = i;
++i;
if (i == table_size) i = 0;
int out = 0;
pscm.Find(calculated_keys[key_index], &out);
tensorflow::testing::DoNotOptimize(out);
}
}
BENCHMARK(BM_CuckooRead)->Arg(1000)->Arg(10000000);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/presized_cuckoo_map.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/presized_cuckoo_map_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5b87bef5-aef6-4c86-af67-6588098ca436 | cpp | tensorflow/tensorflow | overflow | tensorflow/core/util/overflow.h | tensorflow/core/util/overflow_test.cc | #ifndef TENSORFLOW_CORE_UTIL_OVERFLOW_H_
#define TENSORFLOW_CORE_UTIL_OVERFLOW_H_
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
inline int64_t MultiplyWithoutOverflow(int64_t x, int64_t y) {
if (TF_PREDICT_FALSE(x < 0)) return -1;
if (TF_PREDICT_FALSE(y < 0)) return -1;
if (TF_PREDICT_FALSE(x == 0)) return 0;
const uint64 ux = x;
const uint64 uy = y;
const uint64 uxy = ux * uy;
if (TF_PREDICT_FALSE((ux | uy) >> 32 != 0)) {
if (uxy / ux != uy) return -1;
}
return static_cast<int64_t>(uxy);
}
inline int64_t AddWithoutOverflow(int64_t x, int64_t y) {
if (TF_PREDICT_FALSE((x < 0)) || (y < 0)) return -1;
const uint64 ux = x;
const uint64 uy = y;
const uint64 uxy = ux + uy;
return static_cast<int64_t>(uxy);
}
}
#endif | #include "tensorflow/core/util/overflow.h"
#include <cmath>
#include <limits>
#include <vector>
#ifdef PLATFORM_WINDOWS
#include <Windows.h>
#endif
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
bool HasMultiplyOverflow(int64_t x, int64_t y) {
#ifdef PLATFORM_WINDOWS
return ::MultiplyHigh(x, y) != 0;
#else
long double dxy = static_cast<long double>(x) * static_cast<long double>(y);
return dxy > std::numeric_limits<int64_t>::max();
#endif
}
bool HasAddOverflow(int64_t x, int64_t y) {
int64_t carry_from_lower_bits = ((x & 0xffffffff) + (y & 0xffffffff)) >> 32;
if ((x >> 32) + (y >> 32) + carry_from_lower_bits >=
(static_cast<int64_t>(1) << 31)) {
return true;
}
return false;
}
TEST(OverflowTest, Nonnegative) {
std::vector<int64_t> interesting = {
0,
std::numeric_limits<int64_t>::max(),
};
for (int i = 0; i < 63; i++) {
int64_t bit = static_cast<int64_t>(1) << i;
interesting.push_back(bit);
interesting.push_back(bit + 1);
interesting.push_back(bit - 1);
}
for (const int64_t mid : {static_cast<int64_t>(1) << 32,
static_cast<int64_t>(std::pow(2, 63.0 / 2))}) {
for (int i = -5; i < 5; i++) {
interesting.push_back(mid + i);
}
}
for (int64_t x : interesting) {
for (int64_t y : interesting) {
int64_t xmy = MultiplyWithoutOverflow(x, y);
if (HasMultiplyOverflow(x, y)) {
EXPECT_LT(xmy, 0) << x << " " << y;
} else {
EXPECT_EQ(x * y, xmy) << x << " " << y;
}
int64_t xpy = AddWithoutOverflow(x, y);
if (HasAddOverflow(x, y)) {
EXPECT_LT(xpy, 0) << x << " " << y;
} else {
EXPECT_EQ(x + y, xpy) << x << " " << y;
}
}
}
}
TEST(OverflowTest, Negative) {
const int64_t negatives[] = {-1, std::numeric_limits<int64_t>::min()};
for (const int64_t n : negatives) {
EXPECT_LT(MultiplyWithoutOverflow(n, 0), 0) << n;
EXPECT_LT(MultiplyWithoutOverflow(0, n), 0) << n;
EXPECT_LT(MultiplyWithoutOverflow(n, n), 0) << n;
EXPECT_LT(AddWithoutOverflow(n, 0), 0) << n;
EXPECT_LT(AddWithoutOverflow(0, n), 0) << n;
EXPECT_LT(AddWithoutOverflow(n, n), 0) << n;
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/overflow.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/overflow_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6d9f8664-2194-465f-8bbc-99225991b66c | cpp | tensorflow/tensorflow | matmul_bcast | tensorflow/core/util/matmul_bcast.h | tensorflow/core/util/matmul_bcast_test.cc | #ifndef TENSORFLOW_CORE_UTIL_MATMUL_BCAST_H_
#define TENSORFLOW_CORE_UTIL_MATMUL_BCAST_H_
#include <algorithm>
#include <memory>
#include <utility>
#include <vector>
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/util/bcast.h"
namespace tensorflow {
class MatMulBCast {
public:
using Vec = BCast::Vec;
MatMulBCast(const Vec& x, const Vec& y) {
if (std::max(x.size(), y.size()) == 2) return;
const Vec x_resized(x.begin(), x.end() - 2);
const Vec y_resized(y.begin(), y.end() - 2);
batch_bcast_ =
std::make_unique<BCast>(std::move(x_resized), std::move(y_resized));
if (!batch_bcast_->IsValid()) {
broadcasting_required_ = true;
return;
}
x_batch_size_ = TensorShape(batch_bcast_->x_reshape()).num_elements();
y_batch_size_ = TensorShape(batch_bcast_->y_reshape()).num_elements();
output_batch_shape_ = TensorShape(batch_bcast_->output_shape());
output_batch_size_ = output_batch_shape_.num_elements();
broadcasting_required_ =
std::min(x_batch_size_, y_batch_size_) != output_batch_size_;
if (broadcasting_required_) {
ComputeBatchIndices(output_batch_size_, batch_bcast_->x_reshape(),
batch_bcast_->x_bcast(), &x_batch_indices_);
ComputeBatchIndices(output_batch_size_, batch_bcast_->y_reshape(),
batch_bcast_->y_bcast(), &y_batch_indices_);
}
}
bool IsValid() const {
return !broadcasting_required_ || (batch_bcast_ && batch_bcast_->IsValid());
}
bool IsBroadcastingRequired() const { return broadcasting_required_; }
int64_t output_batch_size() const { return output_batch_size_; }
int64_t x_batch_size() const { return x_batch_size_; }
int64_t y_batch_size() const { return y_batch_size_; }
const TensorShape& output_batch_shape() const { return output_batch_shape_; }
const std::vector<int64_t>& x_batch_indices() const {
return x_batch_indices_;
}
const std::vector<int64_t>& y_batch_indices() const {
return y_batch_indices_;
}
private:
std::unique_ptr<BCast> batch_bcast_;
bool broadcasting_required_ = false;
int64_t x_batch_size_ = 1;
int64_t y_batch_size_ = 1;
TensorShape output_batch_shape_;
int64_t output_batch_size_ = 1;
std::vector<int64_t> x_batch_indices_;
std::vector<int64_t> y_batch_indices_;
};
}
#endif | #include "tensorflow/core/util/matmul_bcast.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
string MatMulBCastToStr(const MatMulBCast& b) {
if (!b.IsValid()) {
return "invalid";
}
string ret;
strings::StrAppend(
&ret, "[", absl::StrJoin(b.output_batch_shape().dim_sizes(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.x_batch_indices(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.y_batch_indices(), ","), "]");
return ret;
}
TEST(MatMulBCastTest, SimpleBroadcast) {
MatMulBCast bcast({1, 5, 3}, {4, 3, 7});
EXPECT_TRUE(bcast.IsValid());
EXPECT_TRUE(bcast.IsBroadcastingRequired());
EXPECT_EQ(1, bcast.x_batch_size());
EXPECT_EQ(4, bcast.y_batch_size());
EXPECT_EQ(4, bcast.output_batch_size());
EXPECT_EQ("[4][0,0,0,0][0,1,2,3]", MatMulBCastToStr(bcast));
}
TEST(MatMulBCastTest, EmptyBatchBroadcast) {
MatMulBCast bcast({5, 3}, {3, 7});
EXPECT_TRUE(bcast.IsValid());
EXPECT_FALSE(bcast.IsBroadcastingRequired());
EXPECT_EQ(1, bcast.x_batch_size());
EXPECT_EQ(1, bcast.y_batch_size());
EXPECT_EQ(1, bcast.output_batch_size());
EXPECT_EQ("[][][]", MatMulBCastToStr(bcast));
}
TEST(MatMulBCastTest, BroadcastingNotRequired) {
MatMulBCast bcast({2, 4, 6, 5, 3}, {2, 4, 6, 3, 7});
EXPECT_TRUE(bcast.IsValid());
EXPECT_FALSE(bcast.IsBroadcastingRequired());
EXPECT_EQ(48, bcast.x_batch_size());
EXPECT_EQ(48, bcast.y_batch_size());
EXPECT_EQ(48, bcast.output_batch_size());
EXPECT_EQ("[2,4,6][][]", MatMulBCastToStr(bcast));
}
TEST(MatMulBCastTest, EmptyWithNonEmptyBatchBroadcast) {
MatMulBCast bcast1({5, 3}, {6, 3, 7});
EXPECT_TRUE(bcast1.IsValid());
EXPECT_TRUE(bcast1.IsBroadcastingRequired());
EXPECT_EQ(1, bcast1.x_batch_size());
EXPECT_EQ(6, bcast1.y_batch_size());
EXPECT_EQ(6, bcast1.output_batch_size());
EXPECT_EQ("[6][0,0,0,0,0,0][0,1,2,3,4,5]", MatMulBCastToStr(bcast1));
MatMulBCast bcast2({2, 5, 3}, {3, 7});
EXPECT_TRUE(bcast2.IsValid());
EXPECT_TRUE(bcast2.IsBroadcastingRequired());
EXPECT_EQ(2, bcast2.x_batch_size());
EXPECT_EQ(1, bcast2.y_batch_size());
EXPECT_EQ(2, bcast2.output_batch_size());
EXPECT_EQ("[2][0,1][0,0]", MatMulBCastToStr(bcast2));
}
TEST(MatMulBCastTest, NoBathcDimensions) {
MatMulBCast bcast1({3, 3}, {3});
EXPECT_TRUE(bcast1.IsValid());
MatMulBCast bcast2({3}, {3, 3});
EXPECT_TRUE(bcast2.IsValid());
MatMulBCast bcast3({3, 3}, {3, 3});
EXPECT_TRUE(bcast3.IsValid());
}
TEST(MatMulBCastTest, InvalidDimensions) {
MatMulBCast bcast3({4, 5, 3}, {2, 3, 7});
EXPECT_FALSE(bcast3.IsValid());
MatMulBCast bcast4({2, 1, 5, 3}, {1, 3, 1, 3, 7});
EXPECT_FALSE(bcast4.IsValid());
}
TEST(MatMulBCastTest, BroadcastBothOperands) {
MatMulBCast bcast({3, 1, 5, 3}, {1, 4, 3, 7});
EXPECT_TRUE(bcast.IsValid());
EXPECT_EQ(3, bcast.x_batch_size());
EXPECT_EQ(4, bcast.y_batch_size());
EXPECT_EQ(12, bcast.output_batch_size());
EXPECT_EQ("[3,4][0,0,0,0,1,1,1,1,2,2,2,2][0,1,2,3,0,1,2,3,0,1,2,3]",
MatMulBCastToStr(bcast));
}
TEST(MatMulBCastTest, DifferentRanks) {
MatMulBCast bcast({3, 1, 5, 3}, {2, 1, 2, 3, 7});
EXPECT_TRUE(bcast.IsValid());
EXPECT_EQ(3, bcast.x_batch_size());
EXPECT_EQ(4, bcast.y_batch_size());
EXPECT_EQ(12, bcast.output_batch_size());
EXPECT_EQ("[2,3,2][0,0,1,1,2,2,0,0,1,1,2,2][0,1,0,1,0,1,2,3,2,3,2,3]",
MatMulBCastToStr(bcast));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/matmul_bcast.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/matmul_bcast_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
95ee4ca1-2dec-41c4-beaa-acb7667b5f7b | cpp | tensorflow/tensorflow | ctc_beam_search | tensorflow/lite/kernels/ctc/ctc_beam_search.h | tensorflow/core/util/ctc/ctc_beam_search_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_CTC_CTC_BEAM_SEARCH_H_
#define TENSORFLOW_LITE_KERNELS_CTC_CTC_BEAM_SEARCH_H_
#include <algorithm>
#include <cmath>
#include <limits>
#include <memory>
#include <vector>
#include "Eigen/Core"
#include "tensorflow/lite/kernels/ctc/ctc_beam_entry.h"
#include "tensorflow/lite/kernels/ctc/ctc_beam_scorer.h"
#include "tensorflow/lite/kernels/ctc/ctc_decoder.h"
#include "tensorflow/lite/kernels/ctc/ctc_loss_util.h"
#include "tensorflow/lite/kernels/ctc/top_n.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace tflite {
namespace custom {
namespace ctc {
template <typename CTCBeamState = ctc_beam_search::EmptyBeamState,
typename CTCBeamComparer =
ctc_beam_search::BeamComparer<CTCBeamState>>
class CTCBeamSearchDecoder : public CTCDecoder {
typedef ctc_beam_search::BeamEntry<CTCBeamState> BeamEntry;
typedef ctc_beam_search::BeamRoot<CTCBeamState> BeamRoot;
typedef ctc_beam_search::BeamProbability BeamProbability;
public:
typedef BaseBeamScorer<CTCBeamState> DefaultBeamScorer;
CTCBeamSearchDecoder(int num_classes, int beam_width,
BaseBeamScorer<CTCBeamState>* scorer, int batch_size = 1,
bool merge_repeated = false)
: CTCDecoder(num_classes, batch_size, merge_repeated),
beam_width_(beam_width),
leaves_(beam_width),
beam_scorer_(scorer) {
Reset();
}
~CTCBeamSearchDecoder() override {}
bool Decode(const CTCDecoder::SequenceLength& seq_len,
const std::vector<CTCDecoder::Input>& input,
std::vector<CTCDecoder::Output>* output,
CTCDecoder::ScoreOutput* scores) override;
template <typename Vector>
void Step(const Vector& raw_input);
template <typename Vector>
float GetTopK(const int K, const Vector& input,
std::vector<float>* top_k_logits,
std::vector<int>* top_k_indices);
BaseBeamScorer<CTCBeamState>* GetBeamScorer() const { return beam_scorer_; }
void SetLabelSelectionParameters(int label_selection_size,
float label_selection_margin) {
label_selection_size_ = label_selection_size;
label_selection_margin_ = label_selection_margin;
}
void Reset();
bool TopPaths(int n, std::vector<std::vector<int>>* paths,
std::vector<float>* log_probs, bool merge_repeated) const;
private:
int beam_width_;
int label_selection_size_ = 0;
float label_selection_margin_ = -1;
gtl::TopN<BeamEntry*, CTCBeamComparer> leaves_;
std::unique_ptr<BeamRoot> beam_root_;
BaseBeamScorer<CTCBeamState>* beam_scorer_;
CTCBeamSearchDecoder(const CTCBeamSearchDecoder&) = delete;
void operator=(const CTCBeamSearchDecoder&) = delete;
};
template <typename CTCBeamState, typename CTCBeamComparer>
bool CTCBeamSearchDecoder<CTCBeamState, CTCBeamComparer>::Decode(
const CTCDecoder::SequenceLength& seq_len,
const std::vector<CTCDecoder::Input>& input,
std::vector<CTCDecoder::Output>* output, ScoreOutput* scores) {
std::vector<std::vector<int>> beams;
std::vector<float> beam_log_probabilities;
int top_n = output->size();
if (std::any_of(output->begin(), output->end(),
[this](const CTCDecoder::Output& output) -> bool {
return output.size() < this->batch_size_;
})) {
return false;
}
if (scores->rows() < batch_size_ || scores->cols() < top_n) {
return false;
}
for (int b = 0; b < batch_size_; ++b) {
int seq_len_b = seq_len[b];
Reset();
for (int t = 0; t < seq_len_b; ++t) {
Step(input[t].row(b));
}
std::unique_ptr<std::vector<BeamEntry*>> branches(leaves_.Extract());
leaves_.Reset();
for (int i = 0; i < branches->size(); ++i) {
BeamEntry* entry = (*branches)[i];
beam_scorer_->ExpandStateEnd(&entry->state);
entry->newp.total +=
beam_scorer_->GetStateEndExpansionScore(entry->state);
leaves_.push(entry);
}
bool status =
TopPaths(top_n, &beams, &beam_log_probabilities, merge_repeated_);
if (!status) {
return status;
}
TFLITE_DCHECK_EQ(top_n, beam_log_probabilities.size());
TFLITE_DCHECK_EQ(beams.size(), beam_log_probabilities.size());
for (int i = 0; i < top_n; ++i) {
(*output)[i][b].swap(beams[i]);
(*scores)(b, i) = -beam_log_probabilities[i];
}
}
return true;
}
template <typename CTCBeamState, typename CTCBeamComparer>
template <typename Vector>
float CTCBeamSearchDecoder<CTCBeamState, CTCBeamComparer>::GetTopK(
const int K, const Vector& input, std::vector<float>* top_k_logits,
std::vector<int>* top_k_indices) {
TFLITE_DCHECK_EQ(num_classes_, input.size());
top_k_logits->clear();
top_k_indices->clear();
top_k_logits->resize(K, -INFINITY);
top_k_indices->resize(K, -1);
for (int j = 0; j < num_classes_ - 1; ++j) {
const float logit = input(j);
if (logit > (*top_k_logits)[K - 1]) {
int k = K - 1;
while (k > 0 && logit > (*top_k_logits)[k - 1]) {
(*top_k_logits)[k] = (*top_k_logits)[k - 1];
(*top_k_indices)[k] = (*top_k_indices)[k - 1];
k--;
}
(*top_k_logits)[k] = logit;
(*top_k_indices)[k] = j;
}
}
return std::max((*top_k_logits)[0], input(num_classes_ - 1));
}
template <typename CTCBeamState, typename CTCBeamComparer>
template <typename Vector>
void CTCBeamSearchDecoder<CTCBeamState, CTCBeamComparer>::Step(
const Vector& raw_input) {
std::vector<float> top_k_logits;
std::vector<int> top_k_indices;
const bool top_k =
(label_selection_size_ > 0 && label_selection_size_ < raw_input.size());
const int max_classes = top_k ? label_selection_size_ : (num_classes_ - 1);
float max_coeff;
if (top_k) {
max_coeff = GetTopK(label_selection_size_, raw_input, &top_k_logits,
&top_k_indices);
} else {
max_coeff = raw_input.maxCoeff();
}
float logsumexp = 0.0;
for (int j = 0; j < raw_input.size(); ++j) {
logsumexp += Eigen::numext::exp(raw_input(j) - max_coeff);
}
logsumexp = Eigen::numext::log(logsumexp);
float norm_offset = max_coeff + logsumexp;
const float label_selection_input_min =
(label_selection_margin_ >= 0) ? (max_coeff - label_selection_margin_)
: -std::numeric_limits<float>::infinity();
TFLITE_DCHECK_EQ(num_classes_, raw_input.size());
std::unique_ptr<std::vector<BeamEntry*>> branches(leaves_.Extract());
leaves_.Reset();
for (BeamEntry* b : *branches) {
b->oldp = b->newp;
}
for (BeamEntry* b : *branches) {
if (b->parent != nullptr) {
if (b->parent->Active()) {
float previous = (b->label == b->parent->label) ? b->parent->oldp.blank
: b->parent->oldp.total;
b->newp.label =
LogSumExp(b->newp.label,
beam_scorer_->GetStateExpansionScore(b->state, previous));
}
b->newp.label += raw_input(b->label) - norm_offset;
}
b->newp.blank = b->oldp.total + raw_input(blank_index_) - norm_offset;
b->newp.total = LogSumExp(b->newp.blank, b->newp.label);
leaves_.push(b);
}
for (BeamEntry* b : *branches) {
auto is_candidate = [this](const BeamProbability& prob) {
return (prob.total > kLogZero &&
(leaves_.size() < beam_width_ ||
prob.total > leaves_.peek_bottom()->newp.total));
};
if (!is_candidate(b->oldp)) {
continue;
}
for (int ind = 0; ind < max_classes; ind++) {
const int label = top_k ? top_k_indices[ind] : ind;
const float logit = top_k ? top_k_logits[ind] : raw_input(ind);
if (logit < label_selection_input_min) {
continue;
}
BeamEntry& c = b->GetChild(label);
if (!c.Active()) {
c.newp.blank = kLogZero;
beam_scorer_->ExpandState(b->state, b->label, &c.state, c.label);
float previous = (c.label == b->label) ? b->oldp.blank : b->oldp.total;
c.newp.label = logit - norm_offset +
beam_scorer_->GetStateExpansionScore(c.state, previous);
c.newp.total = c.newp.label;
if (is_candidate(c.newp)) {
if (leaves_.size() == beam_width_) {
BeamEntry* bottom = leaves_.peek_bottom();
bottom->newp.Reset();
}
leaves_.push(&c);
} else {
c.oldp.Reset();
c.newp.Reset();
}
}
}
}
}
template <typename CTCBeamState, typename CTCBeamComparer>
void CTCBeamSearchDecoder<CTCBeamState, CTCBeamComparer>::Reset() {
leaves_.Reset();
beam_root_.reset(new BeamRoot(nullptr, -1));
beam_root_->RootEntry()->newp.total = 0.0;
beam_root_->RootEntry()->newp.blank = 0.0;
leaves_.push(beam_root_->RootEntry());
beam_scorer_->InitializeState(&beam_root_->RootEntry()->state);
}
template <typename CTCBeamState, typename CTCBeamComparer>
bool CTCBeamSearchDecoder<CTCBeamState, CTCBeamComparer>::TopPaths(
int n, std::vector<std::vector<int>>* paths, std::vector<float>* log_probs,
bool merge_repeated) const {
TFLITE_DCHECK(paths);
TFLITE_DCHECK(log_probs);
paths->clear();
log_probs->clear();
if (n > beam_width_) {
return false;
}
if (n > leaves_.size()) {
return false;
}
gtl::TopN<BeamEntry*, CTCBeamComparer> top_branches(n);
for (auto it = leaves_.unsorted_begin(); it != leaves_.unsorted_end(); ++it) {
top_branches.push(*it);
}
std::unique_ptr<std::vector<BeamEntry*>> branches(top_branches.Extract());
for (int i = 0; i < n; ++i) {
BeamEntry* e((*branches)[i]);
paths->push_back(e->LabelSeq(merge_repeated));
log_probs->push_back(e->newp.total);
}
return true;
}
}
}
}
#endif | #include "tensorflow/core/util/ctc/ctc_beam_search.h"
#include <cmath>
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
namespace {
template <class T>
using TestData = std::vector<std::vector<std::vector<T>>>;
template <class T>
struct HistoryBeamState {
T score;
std::vector<int> labels;
};
template <class T, class BeamState>
class DictionaryBeamScorer
: public tensorflow::ctc::BaseBeamScorer<T, BeamState> {
public:
DictionaryBeamScorer()
: tensorflow::ctc::BaseBeamScorer<T, BeamState>(),
dictionary_({{3}, {3, 1}}) {}
void InitializeState(BeamState* root) const override { root->score = 0; }
void ExpandState(const BeamState& from_state, int from_label,
BeamState* to_state, int to_label) const override {
to_state->labels.push_back(to_label);
SetStateScoreAccordingToDict(to_state);
}
void ExpandStateEnd(BeamState* state) const override {
SetStateScoreAccordingToDict(state);
}
T GetStateExpansionScore(const BeamState& state,
T previous_score) const override {
return previous_score + state.score;
}
T GetStateEndExpansionScore(const BeamState& state) const override {
return state.score;
}
const std::vector<std::vector<int>> dictionary_;
private:
void SetStateScoreAccordingToDict(BeamState* state) const;
};
template <class T, class BeamState>
void DictionaryBeamScorer<T, BeamState>::SetStateScoreAccordingToDict(
BeamState* state) const {
const std::vector<int>& candidate = state->labels;
for (int w = 0; w < dictionary_.size(); ++w) {
const std::vector<int>& word = dictionary_[w];
if (candidate.size() > word.size()) {
continue;
}
if (std::equal(word.begin(), word.begin() + candidate.size(),
candidate.begin())) {
state->score = std::log(T(1.0));
return;
}
}
state->score = std::log(T(0.01));
}
template <class T>
void ctc_beam_search_decoding_with_and_without_dictionary() {
const int batch_size = 1;
const int timesteps = 5;
const int top_paths = 3;
const int num_classes = 6;
typename tensorflow::ctc::CTCBeamSearchDecoder<T>::DefaultBeamScorer
default_scorer;
tensorflow::ctc::CTCBeamSearchDecoder<T> decoder(num_classes, 10 * top_paths,
&default_scorer);
DictionaryBeamScorer<T, HistoryBeamState<T>> dictionary_scorer;
tensorflow::ctc::CTCBeamSearchDecoder<T, HistoryBeamState<T>>
dictionary_decoder(num_classes, top_paths, &dictionary_scorer);
int sequence_lengths[batch_size] = {timesteps};
T input_data_mat[timesteps][batch_size][num_classes] = {
{{0, 0.6, 0, 0.4, 0, 0}},
{{0, 0.5, 0, 0.5, 0, 0}},
{{0, 0.4, 0, 0.6, 0, 0}},
{{0, 0.4, 0, 0.6, 0, 0}},
{{0, 0.4, 0, 0.6, 0, 0}}};
for (int t = 0; t < timesteps; ++t) {
for (int b = 0; b < batch_size; ++b) {
for (int c = 0; c < num_classes; ++c) {
input_data_mat[t][b][c] = std::log(input_data_mat[t][b][c]);
}
}
}
std::vector<typename tensorflow::ctc::CTCDecoder<T>::Output> expected_output =
{
{{1, 3}, {1, 3, 1}, {3, 1, 3}},
};
std::vector<typename tensorflow::ctc::CTCDecoder<T>::Output>
expected_dict_output = {
{{3}, {1, 3}, {3, 1}},
};
Eigen::Map<const Eigen::ArrayXi> seq_len(&sequence_lengths[0], batch_size);
std::vector<
Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>>>
inputs;
inputs.reserve(timesteps);
for (int t = 0; t < timesteps; ++t) {
inputs.emplace_back(&input_data_mat[t][0][0], batch_size, num_classes);
}
std::vector<typename tensorflow::ctc::CTCDecoder<T>::Output> outputs(
top_paths);
for (typename tensorflow::ctc::CTCDecoder<T>::Output& output : outputs) {
output.resize(batch_size);
}
T score[batch_size][top_paths] = {{0.0}};
Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> scores(
&score[0][0], batch_size, top_paths);
EXPECT_TRUE(decoder.Decode(seq_len, inputs, &outputs, &scores).ok());
for (int path = 0; path < top_paths; ++path) {
EXPECT_EQ(outputs[path][0], expected_output[0][path]);
}
std::vector<typename tensorflow::ctc::CTCDecoder<T>::Output> dict_outputs(
top_paths);
for (typename tensorflow::ctc::CTCDecoder<T>::Output& output : dict_outputs) {
output.resize(batch_size);
}
EXPECT_TRUE(
dictionary_decoder.Decode(seq_len, inputs, &dict_outputs, &scores).ok());
for (int path = 0; path < top_paths; ++path) {
EXPECT_EQ(dict_outputs[path][0], expected_dict_output[0][path]);
}
}
template <class T>
void ctc_beam_search_decoding_all_beam_elements_have_finite_scores() {
const int batch_size = 1;
const int timesteps = 1;
const int top_paths = 3;
const int num_classes = 6;
typename tensorflow::ctc::CTCBeamSearchDecoder<T>::DefaultBeamScorer
default_scorer;
tensorflow::ctc::CTCBeamSearchDecoder<T> decoder(num_classes, top_paths,
&default_scorer);
int sequence_lengths[batch_size] = {timesteps};
T input_data_mat[timesteps][batch_size][num_classes] = {
{{0.4, 0.3, 0, 0, 0, 0.5}}};
Eigen::Map<const Eigen::ArrayXi> seq_len(&sequence_lengths[0], batch_size);
std::vector<
Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>>>
inputs;
inputs.reserve(timesteps);
for (int t = 0; t < timesteps; ++t) {
inputs.emplace_back(&input_data_mat[t][0][0], batch_size, num_classes);
}
std::vector<typename tensorflow::ctc::CTCDecoder<T>::Output> outputs(
top_paths);
for (typename tensorflow::ctc::CTCDecoder<T>::Output& output : outputs) {
output.resize(batch_size);
}
T score[batch_size][top_paths] = {{0.0}};
Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> scores(
&score[0][0], batch_size, top_paths);
EXPECT_TRUE(decoder.Decode(seq_len, inputs, &outputs, &scores).ok());
for (int path = 0; path < top_paths; ++path) {
LOG(INFO) << "path " << path;
EXPECT_FALSE(std::isinf(score[0][path]));
}
}
typedef int LabelState;
template <class T>
class RapidlyDroppingLabelScorer
: public tensorflow::ctc::BaseBeamScorer<T, LabelState> {
public:
void InitializeState(LabelState* root) const override {}
void ExpandState(const LabelState& from_state, int from_label,
LabelState* to_state, int to_label) const override {
*to_state = to_label;
}
void ExpandStateEnd(LabelState* state) const override {}
T GetStateExpansionScore(const LabelState& state,
T previous_score) const override {
const T kRapidly = 100;
return previous_score - kRapidly * state;
}
T GetStateEndExpansionScore(const LabelState& state) const override {
return T(0);
}
};
template <class T>
void ctc_beam_search_label_selection() {
const int batch_size = 1;
const int timesteps = 3;
const int top_paths = 5;
const int num_classes = 6;
RapidlyDroppingLabelScorer<T> scorer;
tensorflow::ctc::CTCBeamSearchDecoder<T, LabelState> decoder(
num_classes, top_paths, &scorer);
int sequence_lengths[batch_size] = {timesteps};
T input_data_mat[timesteps][batch_size][num_classes] = {
{{-1e6, 1, 2, 3, 4, -1e6}},
{{1e6, 0, 0, 0, 0, -1e6}},
{{-1e6, 1.1, 2.2, 3.3, 4.4, -1e6}},
};
std::vector<typename tensorflow::ctc::CTCDecoder<T>::Output>
expected_default_output = {
{{1, 0, 1}, {1, 0, 2}, {2, 0, 1}, {1, 0, 3}, {2, 0, 2}},
};
std::vector<typename tensorflow::ctc::CTCDecoder<T>::Output>
expected_output_size2 = {
{{3, 0, 3}, {3, 0, 4}, {4, 0, 3}, {4, 0, 4}, {3}},
};
std::vector<typename tensorflow::ctc::CTCDecoder<T>::Output>
expected_output_width2 = {
{{2, 0, 3}, {2, 0, 4}, {3, 0, 3}, {3, 0, 4}, {4, 0, 3}},
};
Eigen::Map<const Eigen::ArrayXi> seq_len(&sequence_lengths[0], batch_size);
std::vector<
Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>>>
inputs;
inputs.reserve(timesteps);
for (int t = 0; t < timesteps; ++t) {
inputs.emplace_back(&input_data_mat[t][0][0], batch_size, num_classes);
}
std::vector<typename tensorflow::ctc::CTCDecoder<T>::Output> outputs(
top_paths);
for (typename tensorflow::ctc::CTCDecoder<T>::Output& output : outputs) {
output.resize(batch_size);
}
T score[batch_size][top_paths] = {{0.0}};
Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> scores(
&score[0][0], batch_size, top_paths);
EXPECT_TRUE(decoder.Decode(seq_len, inputs, &outputs, &scores).ok());
for (int path = 0; path < top_paths; ++path) {
EXPECT_EQ(outputs[path][0], expected_default_output[0][path]);
}
decoder.SetLabelSelectionParameters(2, T(-1));
EXPECT_TRUE(decoder.Decode(seq_len, inputs, &outputs, &scores).ok());
for (int path = 0; path < top_paths; ++path) {
EXPECT_EQ(outputs[path][0], expected_output_size2[0][path]);
}
decoder.SetLabelSelectionParameters(0, T(2.0));
EXPECT_TRUE(decoder.Decode(seq_len, inputs, &outputs, &scores).ok());
for (int path = 0; path < top_paths; ++path) {
EXPECT_EQ(outputs[path][0], expected_output_width2[0][path]);
}
decoder.SetLabelSelectionParameters(2, T(2.0));
EXPECT_TRUE(decoder.Decode(seq_len, inputs, &outputs, &scores).ok());
for (int path = 0; path < top_paths; ++path) {
EXPECT_EQ(outputs[path][0], expected_output_size2[0][path]);
}
decoder.SetLabelSelectionParameters(4, T(3.3001));
EXPECT_TRUE(decoder.Decode(seq_len, inputs, &outputs, &scores).ok());
for (int path = 0; path < top_paths; ++path) {
EXPECT_EQ(outputs[path][0], expected_default_output[0][path]);
}
}
TEST(CtcBeamSearch, FloatDecodingWithAndWithoutDictionary) {
ctc_beam_search_decoding_with_and_without_dictionary<float>();
}
TEST(CtcBeamSearch, DoubleDecodingWithAndWithoutDictionary) {
ctc_beam_search_decoding_with_and_without_dictionary<double>();
}
TEST(CtcBeamSearch, FloatAllBeamElementsHaveFiniteScores) {
ctc_beam_search_decoding_all_beam_elements_have_finite_scores<float>();
}
TEST(CtcBeamSearch, DoubleAllBeamElementsHaveFiniteScores) {
ctc_beam_search_decoding_all_beam_elements_have_finite_scores<double>();
}
TEST(CtcBeamSearch, FloatLabelSelection) {
ctc_beam_search_label_selection<float>();
}
TEST(CtcBeamSearch, DoubleLabelSelection) {
ctc_beam_search_label_selection<double>();
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/ctc/ctc_beam_search.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/ctc/ctc_beam_search_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4838c2d9-38ad-4e47-8b5e-fc837fc96c8d | cpp | tensorflow/tensorflow | op_requires | tensorflow/core/framework/op_requires.h | tensorflow/core/framework/op_requires_test.cc | #ifndef TENSORFLOW_CORE_FRAMEWORK_OP_REQUIRES_H_
#define TENSORFLOW_CORE_FRAMEWORK_OP_REQUIRES_H_
#include <utility>
#include "tensorflow/core/platform/macros.h"
namespace tensorflow {
#define OP_REQUIRES(CTX, EXP, STATUS) \
do { \
if (!TF_PREDICT_TRUE(EXP)) { \
CheckNotInComputeAsync((CTX), "OP_REQUIRES_ASYNC"); \
(CTX)->CtxFailure(__FILE__, __LINE__, (STATUS)); \
return; \
} \
} while (0)
#define OP_REQUIRES_OK(CTX, ...) \
do { \
if (!TF_PREDICT_TRUE( \
::tensorflow::op_requires_internal::OkImpl<::absl::Status>( \
(CTX), __FILE__, __LINE__, \
static_cast<const ::absl::Status&>(__VA_ARGS__)))) { \
return; \
} \
} while (0)
#define OP_REQUIRES_OK_OR_SET_PAYLOAD(CTX, PAYLOAD_KEY, PAYLOAD_VALUE, STATUS) \
do { \
if (!TF_PREDICT_TRUE(STATUS.ok())) { \
CheckNotInComputeAsync((CTX), "OP_REQUIRES_OK_ASYNC"); \
if (!PAYLOAD_VALUE.empty()) { \
STATUS.SetPayload(PAYLOAD_KEY, absl::Cord(PAYLOAD_VALUE)); \
} \
(CTX)->CtxFailureWithWarning(__FILE__, __LINE__, STATUS); \
return; \
} \
} while (0)
#define OP_REQUIRES_ASYNC(CTX, EXP, STATUS, CALLBACK) \
do { \
if (!TF_PREDICT_TRUE(EXP)) { \
(CTX)->CtxFailure(__FILE__, __LINE__, (STATUS)); \
(CALLBACK)(); \
return; \
} \
} while (0)
#define OP_REQUIRES_OK_ASYNC(CTX, STATUS, CALLBACK) \
do { \
if (!TF_PREDICT_TRUE( \
::tensorflow::op_requires_internal::OkAsyncImpl<::absl::Status>( \
(CTX), __FILE__, __LINE__, (STATUS)))) { \
(CALLBACK)(); \
return; \
} \
} while (0)
#define OP_REQUIRES_VALUE(lhs, ctx, rexpr) \
OP_REQUIRES_VALUE_IMPL( \
TF_STATUS_MACROS_CONCAT_NAME(_status_or_value, __COUNTER__), lhs, ctx, \
rexpr)
#define OP_REQUIRES_VALUE_IMPL(statusor, lhs, ctx, rexpr) \
auto statusor = (rexpr); \
OP_REQUIRES_OK(ctx, statusor.status()); \
lhs = std::move(statusor.value())
namespace op_requires_internal {
template <typename S, typename Ctx>
bool OkImpl(Ctx&& ctx, const char* file, int line, const S& s) {
if (!TF_PREDICT_TRUE(s.ok())) {
CheckNotInComputeAsync(ctx, "OP_REQUIRES_OK_ASYNC");
ctx->CtxFailureWithWarning(file, line, s);
return false;
} else {
return true;
}
}
template <typename S, typename Ctx>
bool OkAsyncImpl(Ctx&& ctx, const char* file, int line, const S& s) {
if (!TF_PREDICT_TRUE(s.ok())) {
ctx->CtxFailureWithWarning(file, line, s);
return false;
} else {
return true;
}
}
}
}
#endif | #include "tensorflow/core/framework/op_requires.h"
#include <optional>
#include <utility>
#include "absl/status/status.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
using ::tensorflow::testing::StatusIs;
using ::testing::Optional;
class Holder {
public:
explicit Holder()
: fine_(absl::OkStatus()), foul_(absl::InternalError("test")) {}
const absl::Status& Fine() const { return fine_; }
const absl::Status& Foul() const { return foul_; }
private:
absl::Status fine_;
absl::Status foul_;
};
struct TestContext {
public:
void CtxFailureWithWarning(const char* file, int line, absl::Status status) {
stored_status.emplace(std::move(status));
}
friend void CheckNotInComputeAsync(TestContext* ctx, const char* msg) {}
std::optional<absl::Status> stored_status = std::nullopt;
};
void TestFunction(TestContext& ctx, bool success, bool& reached) {
if (success) {
OP_REQUIRES_OK(&ctx, Holder().Fine());
} else {
OP_REQUIRES_OK(&ctx, Holder().Foul());
}
reached = true;
}
TEST(OpRequires, RequiresOkWithOkStatus) {
TestContext ctx;
bool reached = false;
TestFunction(ctx, true, reached);
EXPECT_FALSE(ctx.stored_status.has_value());
EXPECT_TRUE(reached);
}
TEST(OpRequires, RequiresOkWithFailedStatus) {
TestContext ctx;
bool reached = false;
TestFunction(ctx, false, reached);
EXPECT_THAT(ctx.stored_status,
Optional(StatusIs(absl::StatusCode::kInternal)));
EXPECT_FALSE(reached);
}
void TestFunctionAsync(TestContext& ctx, bool success, bool& reached,
bool& handled) {
auto done = gtl::MakeCleanup([&handled]() { handled = true; });
if (success) {
OP_REQUIRES_OK_ASYNC(&ctx, Holder().Fine(), done.release());
} else {
OP_REQUIRES_OK_ASYNC(&ctx, Holder().Foul(), done.release());
}
reached = true;
}
TEST(OpRequires, RequiresOkAsyncWithOkStatus) {
TestContext ctx;
bool reached = false;
bool handled = false;
TestFunctionAsync(ctx, true, reached, handled);
EXPECT_FALSE(ctx.stored_status.has_value());
EXPECT_TRUE(reached);
EXPECT_TRUE(handled);
}
TEST(OpRequires, RequiresOkAsyncWithFailedStatus) {
TestContext ctx;
bool reached = false;
bool handled = false;
TestFunctionAsync(ctx, false, reached, handled);
EXPECT_THAT(ctx.stored_status,
Optional(StatusIs(absl::StatusCode::kInternal)));
EXPECT_FALSE(reached);
EXPECT_TRUE(handled);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/op_requires.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/op_requires_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
69e71fa6-5ee3-45a2-ac87-7b2a081c2431 | cpp | tensorflow/tensorflow | resource_op_kernel | tensorflow/core/framework/resource_op_kernel.h | tensorflow/core/framework/resource_op_kernel_test.cc | #ifndef TENSORFLOW_CORE_FRAMEWORK_RESOURCE_OP_KERNEL_H_
#define TENSORFLOW_CORE_FRAMEWORK_RESOURCE_OP_KERNEL_H_
#include <string>
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
template <typename T>
class ResourceOpKernel : public OpKernel {
public:
explicit ResourceOpKernel(OpKernelConstruction* context) : OpKernel(context) {
has_resource_type_ = (context->output_type(0) == DT_RESOURCE);
if (!has_resource_type_) {
OP_REQUIRES_OK(context, context->allocate_temp(
DT_STRING, TensorShape({2}), &tensor_));
}
}
~ResourceOpKernel() override {
if (cinfo_.resource_is_private_to_kernel()) {
if (!cinfo_.resource_manager()
->template Delete<T>(cinfo_.container(), cinfo_.name())
.ok()) {
}
}
}
void Compute(OpKernelContext* context) override TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
core::RefCountPtr<T> resource_ref_ptr = weak_resource_.GetNewRef();
if (resource_ref_ptr == nullptr) {
ResourceMgr* mgr = context->resource_manager();
OP_REQUIRES_OK(context, cinfo_.Init(mgr, def()));
T* resource;
OP_REQUIRES_OK(context,
mgr->LookupOrCreate<T>(
cinfo_.container(), cinfo_.name(), &resource,
[this](T** ret) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
Status s = CreateResource(ret);
if (!s.ok() && *ret != nullptr) {
CHECK((*ret)->Unref());
}
return s;
}));
core::ScopedUnref resource_unref(resource);
OP_REQUIRES_OK(context, VerifyResource(resource));
weak_resource_ = core::WeakPtr<T>(resource);
resource_ = resource;
if (!has_resource_type_) {
auto h = tensor_.template flat<tstring>();
h(0) = cinfo_.container();
h(1) = cinfo_.name();
}
}
if (has_resource_type_) {
OP_REQUIRES_OK(context, MakeResourceHandleToOutput(
context, 0, cinfo_.container(), cinfo_.name(),
TypeIndex::Make<T>()));
} else {
context->set_output_ref(0, &mu_, &tensor_);
}
}
protected:
mutex mu_;
ContainerInfo cinfo_ TF_GUARDED_BY(mu_);
ABSL_DEPRECATED("Use get_resource() instead.")
T* resource_ TF_GUARDED_BY(mu_) = nullptr;
core::RefCountPtr<T> get_resource() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock lock(mu_);
return weak_resource_.GetNewRef();
}
private:
core::WeakPtr<T> weak_resource_ TF_GUARDED_BY(mu_) =
core::WeakPtr<T>(nullptr);
virtual Status CreateResource(T** resource)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) = 0;
virtual Status VerifyResource(T* resource) { return absl::OkStatus(); }
Tensor tensor_ TF_GUARDED_BY(mu_);
bool has_resource_type_;
};
}
#endif | #include "tensorflow/core/framework/resource_op_kernel.h"
#include <memory>
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
class StubDevice : public DeviceBase {
public:
StubDevice() : DeviceBase(nullptr) {}
Allocator* GetAllocator(AllocatorAttributes) override {
return cpu_allocator();
}
};
class StubResource : public ResourceBase {
public:
string DebugString() const override { return ""; }
int code;
};
class StubResourceOpKernel : public ResourceOpKernel<StubResource> {
public:
using ResourceOpKernel::get_resource;
using ResourceOpKernel::ResourceOpKernel;
private:
Status CreateResource(StubResource** resource) override {
*resource = CHECK_NOTNULL(new StubResource);
return GetNodeAttr(def(), "code", &(*resource)->code);
}
Status VerifyResource(StubResource* resource) override {
int code;
TF_RETURN_IF_ERROR(GetNodeAttr(def(), "code", &code));
if (code != resource->code) {
return errors::InvalidArgument("stub has code ", resource->code,
" but requested code ", code);
}
return absl::OkStatus();
}
};
REGISTER_OP("StubResourceOp")
.Attr("code: int")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Output("output: Ref(string)");
REGISTER_KERNEL_BUILDER(Name("StubResourceOp").Device(DEVICE_CPU),
StubResourceOpKernel);
class ResourceOpKernelTest : public ::testing::Test {
protected:
std::unique_ptr<StubResourceOpKernel> CreateOp(int code,
const string& shared_name) {
static std::atomic<int64_t> count(0);
NodeDef node_def;
TF_CHECK_OK(NodeDefBuilder(strings::StrCat("test-node", count.fetch_add(1)),
"StubResourceOp")
.Attr("code", code)
.Attr("shared_name", shared_name)
.Finalize(&node_def));
Status status;
std::unique_ptr<OpKernel> op(CreateOpKernel(
DEVICE_CPU, &device_, device_.GetAllocator(AllocatorAttributes()),
node_def, TF_GRAPH_DEF_VERSION, &status));
TF_EXPECT_OK(status) << status;
EXPECT_NE(op, nullptr);
std::unique_ptr<StubResourceOpKernel> resource_op(
dynamic_cast<StubResourceOpKernel*>(op.get()));
EXPECT_NE(resource_op, nullptr);
if (resource_op != nullptr) {
op.release();
}
return resource_op;
}
Status RunOpKernel(OpKernel* op) {
OpKernelContext::Params params;
params.device = &device_;
params.resource_manager = &mgr_;
params.op_kernel = op;
OpKernelContext context(¶ms);
op->Compute(&context);
return context.status();
}
StubDevice device_;
ResourceMgr mgr_;
};
TEST_F(ResourceOpKernelTest, PrivateResource) {
const int code = -100;
auto op = CreateOp(code, "");
ASSERT_NE(op, nullptr);
TF_EXPECT_OK(RunOpKernel(op.get()));
const string key = "_0_" + op->name();
StubResource* resource;
TF_ASSERT_OK(
mgr_.Lookup<StubResource>(mgr_.default_container(), key, &resource));
EXPECT_EQ(op->get_resource().get(), resource);
EXPECT_EQ(code, resource->code);
resource->Unref();
op = nullptr;
Status s =
mgr_.Lookup<StubResource>(mgr_.default_container(), key, &resource);
EXPECT_FALSE(s.ok());
}
TEST_F(ResourceOpKernelTest, SharedResource) {
const string shared_name = "shared_stub";
const int code = -201;
auto op = CreateOp(code, shared_name);
ASSERT_NE(op, nullptr);
TF_EXPECT_OK(RunOpKernel(op.get()));
StubResource* resource;
TF_ASSERT_OK(mgr_.Lookup<StubResource>(mgr_.default_container(), shared_name,
&resource));
EXPECT_EQ(op->get_resource().get(), resource);
EXPECT_EQ(code, resource->code);
resource->Unref();
op = nullptr;
TF_ASSERT_OK(mgr_.Lookup<StubResource>(mgr_.default_container(), shared_name,
&resource));
resource->Unref();
}
TEST_F(ResourceOpKernelTest, LookupShared) {
auto op1 = CreateOp(-333, "shared_stub");
auto op2 = CreateOp(-333, "shared_stub");
ASSERT_NE(op1, nullptr);
ASSERT_NE(op2, nullptr);
TF_EXPECT_OK(RunOpKernel(op1.get()));
TF_EXPECT_OK(RunOpKernel(op2.get()));
EXPECT_EQ(op1->get_resource(), op2->get_resource());
}
TEST_F(ResourceOpKernelTest, VerifyResource) {
auto op1 = CreateOp(-444, "shared_stub");
auto op2 = CreateOp(0, "shared_stub");
ASSERT_NE(op1, nullptr);
ASSERT_NE(op2, nullptr);
TF_EXPECT_OK(RunOpKernel(op1.get()));
EXPECT_FALSE(RunOpKernel(op2.get()).ok());
EXPECT_NE(op1->get_resource(), nullptr);
EXPECT_EQ(op2->get_resource(), nullptr);
}
TEST_F(ResourceOpKernelTest, ContainerClearedBetweenRuns) {
const string shared_name = "shared_stub";
const int code = -201;
auto op = CreateOp(code, shared_name);
ASSERT_NE(op, nullptr);
TF_EXPECT_OK(RunOpKernel(op.get()));
StubResource* resource;
TF_ASSERT_OK(mgr_.Lookup<StubResource>(mgr_.default_container(), shared_name,
&resource));
EXPECT_EQ(op->get_resource().get(), resource);
EXPECT_EQ(code, resource->code);
resource->Unref();
mgr_.Clear();
TF_EXPECT_OK(RunOpKernel(op.get()));
TF_ASSERT_OK(mgr_.Lookup<StubResource>(mgr_.default_container(), shared_name,
&resource));
EXPECT_EQ(op->get_resource().get(), resource);
EXPECT_EQ(code, resource->code);
resource->Unref();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/resource_op_kernel.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/resource_op_kernel_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3d19f3e5-b0b2-4ec8-b860-1a7e27e52728 | cpp | tensorflow/tensorflow | partial_tensor_shape | tensorflow/core/framework/partial_tensor_shape.h | tensorflow/core/framework/partial_tensor_shape_test.cc | #ifndef TENSORFLOW_CORE_FRAMEWORK_PARTIAL_TENSOR_SHAPE_H_
#define TENSORFLOW_CORE_FRAMEWORK_PARTIAL_TENSOR_SHAPE_H_
#include "tensorflow/core/framework/tensor_shape.h"
#endif | #include "tensorflow/core/framework/partial_tensor_shape.h"
#include <limits>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(PartialTensorShapeTest, Default) {
const PartialTensorShape s;
EXPECT_EQ(s.dims(), -1);
EXPECT_TRUE(s.unknown_rank());
}
TEST(PartialTensorShapeTest, Concatenate) {
const PartialTensorShape s({10, 5});
ASSERT_EQ(2, s.dims());
EXPECT_EQ(10, s.dim_size(0));
EXPECT_EQ(5, s.dim_size(1));
EXPECT_EQ(50, s.num_elements());
const auto s1 = s.Concatenate(s);
ASSERT_EQ(4, s1.dims());
EXPECT_EQ(10, s1.dim_size(0));
EXPECT_EQ(5, s1.dim_size(1));
EXPECT_EQ(10, s1.dim_size(2));
EXPECT_EQ(5, s1.dim_size(3));
EXPECT_EQ(50 * 50, s1.num_elements());
const auto s2 = s.Concatenate(-1);
const auto s3 = s2.Concatenate(0);
ASSERT_EQ(3, s2.dims());
ASSERT_EQ(4, s3.dims());
EXPECT_EQ(10, s2.dim_size(0));
EXPECT_EQ(10, s3.dim_size(0));
EXPECT_EQ(5, s2.dim_size(1));
EXPECT_EQ(5, s3.dim_size(1));
EXPECT_EQ(-1, s2.dim_size(2));
EXPECT_EQ(-1, s3.dim_size(2));
EXPECT_EQ(0, s3.dim_size(3));
EXPECT_EQ(-1, s2.num_elements());
EXPECT_EQ(-1, s3.num_elements());
const auto s4 = s.Concatenate(PartialTensorShape());
EXPECT_EQ(-1, s4.dims());
EXPECT_EQ(-1, s4.num_elements());
}
TEST(PartialTensorShapeTest, ConcatenateWithStatus) {
PartialTensorShape s({10, 5, 20});
PartialTensorShape s2;
Status status = s.ConcatenateWithStatus(400, &s2);
EXPECT_TRUE(status.ok());
EXPECT_EQ(s2.num_elements(), 400000);
EXPECT_EQ(s2.dims(), 4);
PartialTensorShape s3;
status = s2.ConcatenateWithStatus(-10, &s3);
EXPECT_TRUE(status.ok());
EXPECT_EQ(s3.num_elements(), -1);
EXPECT_EQ(s3.dims(), 5);
PartialTensorShape s4;
status = s.ConcatenateWithStatus(s, &s4);
EXPECT_TRUE(status.ok());
EXPECT_EQ(s4.num_elements(), 1000000);
EXPECT_EQ(s4.dims(), 6);
PartialTensorShape s5;
status = s5.ConcatenateWithStatus(s5, &s4);
EXPECT_TRUE(status.ok());
}
TEST(PartialTensorShapeTest, PartialTensorShapeIsValid) {
PartialTensorShape s({10, 5, 20});
EXPECT_TRUE(s.IsValid());
PartialTensorShape s2({-1, 5, 20});
EXPECT_TRUE(s2.IsValid());
PartialTensorShape s3;
EXPECT_FALSE(s3.IsValid());
PartialTensorShape s4(s3.AsProto());
EXPECT_FALSE(s4.IsValid());
}
TEST(PartialTensorShapeTest, InvalidShapeProto) {
TensorShapeProto proto;
EXPECT_TRUE(PartialTensorShape::IsValid(proto));
proto.add_dim()->set_size(357);
proto.add_dim()->set_size(982);
EXPECT_TRUE(PartialTensorShape::IsValid(proto));
proto.Clear();
proto.add_dim()->set_size(0);
proto.add_dim()->set_size(-1);
EXPECT_TRUE(PartialTensorShape::IsValid(proto));
proto.Clear();
proto.set_unknown_rank(true);
EXPECT_TRUE(PartialTensorShape::IsValid(proto));
proto.add_dim()->set_size(1);
EXPECT_FALSE(PartialTensorShape::IsValid(proto));
proto.Clear();
proto.add_dim()->set_size(-2);
EXPECT_FALSE(PartialTensorShape::IsValid(proto));
}
TEST(PartialTensorShapeTest, PartialTensorShapeIsValidShape) {
PartialTensorShape s;
TensorShapeProto proto = s.AsProto();
TF_EXPECT_OK(PartialTensorShape::IsValidShape(proto));
proto.add_dim()->set_size(1);
EXPECT_THAT(PartialTensorShape::IsValidShape(proto),
testing::StatusIs(
error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex(
"An unknown shape must not have any dimensions set.")));
proto.set_unknown_rank(false);
proto.add_dim()->set_size(-1);
proto.add_dim()->set_size(-2);
EXPECT_THAT(PartialTensorShape::IsValidShape(proto),
testing::StatusIs(error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex(
"has dimensions with values below -1")));
EXPECT_THAT(TensorShape::IsValidShape(proto),
testing::StatusIs(
error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex("Shape.*is not fully defined")));
}
TEST(PartialTensorShapeTest, BuildPartialTensorShape) {
PartialTensorShape s;
TensorShapeProto sp = s.AsProto();
PartialTensorShape s2;
TF_EXPECT_OK(PartialTensorShape::BuildPartialTensorShape(sp, &s2));
EXPECT_EQ(s2.AsProto().DebugString(), sp.DebugString());
PartialTensorShape s3({-1, 5, 10});
TensorShapeProto sp3 = s3.AsProto();
PartialTensorShape s4;
TF_EXPECT_OK(PartialTensorShape::BuildPartialTensorShape(sp3, &s4));
EXPECT_EQ(s4.AsProto().DebugString(), sp3.DebugString());
sp3.add_dim()->set_size(std::numeric_limits<int64_t>::max());
EXPECT_THAT(
PartialTensorShape::BuildPartialTensorShape(sp3, &s4),
testing::StatusIs(error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex(
"Encountered overflow when multiplying shape")));
}
TEST(PartialTensorShapeTest, PartialShapeFullyDefined) {
const PartialTensorShape a({-1, 0, 1});
const PartialTensorShape b({1, 0, 1});
const PartialTensorShape c({-1, -1, 1});
const PartialTensorShape d({1, 0});
const PartialTensorShape e({});
const PartialTensorShape f;
EXPECT_FALSE(a.IsFullyDefined());
EXPECT_FALSE(c.IsFullyDefined());
EXPECT_TRUE(b.IsFullyDefined());
EXPECT_TRUE(d.IsFullyDefined());
EXPECT_TRUE(e.IsFullyDefined());
EXPECT_FALSE(f.IsFullyDefined());
}
TEST(PartialTensorShapeTest, ToTensorShape) {
const PartialTensorShape a({});
const PartialTensorShape b({1, 0});
const PartialTensorShape c({-1, 0});
const PartialTensorShape d;
TensorShape full;
EXPECT_TRUE(a.AsTensorShape(&full));
EXPECT_EQ(full.dims(), 0);
EXPECT_TRUE(b.AsTensorShape(&full));
EXPECT_EQ(full.dims(), 2);
EXPECT_EQ(full.dim_size(0), 1);
EXPECT_EQ(full.dim_size(1), 0);
EXPECT_FALSE(c.AsTensorShape(&full));
EXPECT_FALSE(d.AsTensorShape(&full));
}
TEST(PartialTensorShapeTest, PartialShapeIdenticalTo) {
const PartialTensorShape a({-1, 0, 1});
const PartialTensorShape b({1, 0, 1});
const PartialTensorShape c({-1, -1, 1});
const PartialTensorShape d({1, 0});
const PartialTensorShape e({-1, 0, 2});
const PartialTensorShape f({});
const PartialTensorShape g;
std::vector<PartialTensorShape> shapes = {a, b, c, d, e, f, g};
for (int i = 0; i < shapes.size(); ++i) {
for (int j = 0; j <= i; ++j) {
if (i == j) {
EXPECT_TRUE(shapes[i].IsIdenticalTo(shapes[j]));
} else {
EXPECT_FALSE(shapes[i].IsIdenticalTo(shapes[j]));
}
}
}
}
TEST(PartialTensorShapeTest, PartialShapeCompatibleWith) {
const PartialTensorShape a({-1, 0, 1});
const PartialTensorShape b({1, 0, 1});
const PartialTensorShape c({-1, -1, 1});
const PartialTensorShape d({1, 0});
const PartialTensorShape e({-1, 0, 2});
const PartialTensorShape f({});
const PartialTensorShape g;
EXPECT_TRUE(f.IsCompatibleWith(f));
EXPECT_TRUE(a.IsCompatibleWith(b));
EXPECT_TRUE(a.IsCompatibleWith(a));
EXPECT_TRUE(b.IsCompatibleWith(b));
EXPECT_TRUE(a.IsCompatibleWith(c));
EXPECT_TRUE(b.IsCompatibleWith(c));
EXPECT_FALSE(a.IsCompatibleWith(d));
EXPECT_FALSE(b.IsCompatibleWith(d));
EXPECT_FALSE(c.IsCompatibleWith(d));
EXPECT_FALSE(a.IsCompatibleWith(e));
EXPECT_FALSE(b.IsCompatibleWith(e));
EXPECT_FALSE(c.IsCompatibleWith(e));
EXPECT_FALSE(a.IsCompatibleWith(f));
EXPECT_FALSE(b.IsCompatibleWith(f));
EXPECT_FALSE(c.IsCompatibleWith(f));
EXPECT_TRUE(a.IsCompatibleWith(g));
EXPECT_TRUE(g.IsCompatibleWith(a));
EXPECT_TRUE(g.IsCompatibleWith(g));
}
TEST(PartialTensorShapeTest, ShapeCompatibleWith) {
const PartialTensorShape a({-1, 0, 1});
const PartialTensorShape unknown;
TensorShape b({0, 1});
TensorShape c({0, 0, 1});
TensorShape d({1, 0, 1});
TensorShape e({1, 1, 1});
EXPECT_FALSE(a.IsCompatibleWith(b));
EXPECT_TRUE(a.IsCompatibleWith(c));
EXPECT_TRUE(a.IsCompatibleWith(d));
EXPECT_FALSE(a.IsCompatibleWith(e));
EXPECT_TRUE(unknown.IsCompatibleWith(b));
EXPECT_TRUE(unknown.IsCompatibleWith(c));
EXPECT_TRUE(unknown.IsCompatibleWith(d));
EXPECT_TRUE(unknown.IsCompatibleWith(e));
}
TEST(PartialTensorShapeTest, PartialShapeMergeWith) {
const PartialTensorShape a({-1, 0, 1});
const PartialTensorShape b({1, 0, 1});
const PartialTensorShape c({-1, -1, 1});
const PartialTensorShape d({1, 0});
const PartialTensorShape e;
PartialTensorShape test;
EXPECT_EQ(absl::OkStatus(), a.MergeWith(a, &test));
EXPECT_EQ(test.dims(), 3);
EXPECT_EQ(test.dim_size(0), -1);
EXPECT_EQ(test.dim_size(1), 0);
EXPECT_EQ(test.dim_size(2), 1);
test = PartialTensorShape();
EXPECT_EQ(absl::OkStatus(), a.MergeWith(b, &test));
EXPECT_EQ(test.dims(), 3);
EXPECT_EQ(test.dim_size(0), 1);
EXPECT_EQ(test.dim_size(1), 0);
EXPECT_EQ(test.dim_size(2), 1);
test = PartialTensorShape();
EXPECT_TRUE(errors::IsInvalidArgument(a.MergeWith(d, &test)));
test = PartialTensorShape();
EXPECT_EQ(absl::OkStatus(), a.MergeWith(c, &test));
EXPECT_EQ(test.dims(), 3);
EXPECT_EQ(test.dim_size(0), -1);
EXPECT_EQ(test.dim_size(1), 0);
EXPECT_EQ(test.dim_size(2), 1);
test = PartialTensorShape();
EXPECT_EQ(absl::OkStatus(), c.MergeWith(a, &test));
EXPECT_EQ(test.dims(), 3);
EXPECT_EQ(test.dim_size(0), -1);
EXPECT_EQ(test.dim_size(1), 0);
EXPECT_EQ(test.dim_size(2), 1);
test = PartialTensorShape();
EXPECT_EQ(absl::OkStatus(), a.MergeWith(e, &test));
EXPECT_EQ(test.dims(), 3);
EXPECT_EQ(test.dim_size(0), -1);
EXPECT_EQ(test.dim_size(1), 0);
EXPECT_EQ(test.dim_size(2), 1);
test = PartialTensorShape();
EXPECT_EQ(absl::OkStatus(), e.MergeWith(a, &test));
EXPECT_EQ(test.dims(), 3);
EXPECT_EQ(test.dim_size(0), -1);
EXPECT_EQ(test.dim_size(1), 0);
EXPECT_EQ(test.dim_size(2), 1);
}
TEST(PartialTensorShapeTest, PartialShapeMergeWithInvalidData) {
PartialTensorShape a = PartialTensorShape({-1, 0, 1});
const PartialTensorShape b({-1, 0, 2});
const PartialTensorShape c({1, -1, 3});
const PartialTensorShape d({-1, std::numeric_limits<int64_t>::max(), -1});
EXPECT_THAT(a.MergeWith(b, &a),
testing::StatusIs(
error::Code::INTERNAL,
::testing::ContainsRegex("Cannot output result to itself")));
EXPECT_THAT(b.MergeWith(c, &a),
testing::StatusIs(error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex(
"Incompatible shapes during merge")));
EXPECT_THAT(c.MergeWith(d, &a),
testing::StatusIs(error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex(
"Encountered overflow when multiplying")));
}
TEST(PartialTensorShapeTest, MakePartialShapeEmpty) {
const int64_t dims[1] = {};
PartialTensorShape shape;
EXPECT_FALSE(shape.IsFullyDefined());
TF_ASSERT_OK(PartialTensorShape::MakePartialShape(dims, 0, &shape));
EXPECT_TRUE(shape.IsFullyDefined());
}
TEST(PartialTensorShapeTest, MakePartialShapeFull) {
const int64_t dims[3] = {7, -1, 2};
PartialTensorShape shape;
TF_ASSERT_OK(PartialTensorShape::MakePartialShape(dims, 3, &shape));
ASSERT_EQ(shape.dims(), 3);
for (int i = 0; i < 3; i++) {
EXPECT_EQ(shape.dim_size(i), dims[i]);
}
}
TEST(PartialTensorShapeTest, MakePartialShapeInvalid) {
const int64_t dims[3] = {7, -2, 2};
PartialTensorShape shape;
EXPECT_EQ(error::INVALID_ARGUMENT,
PartialTensorShape::MakePartialShape(dims, 3, &shape).code());
}
TEST(PartialTensorShapeUtilsTest, PartialShapeListString) {
PartialTensorShape s({2, 5, 20});
EXPECT_EQ(PartialTensorShapeUtils::PartialShapeListString({s}), "[[2,5,20]]");
PartialTensorShape s2;
PartialTensorShape s3({-1, -1, 10});
EXPECT_EQ(PartialTensorShapeUtils::PartialShapeListString({s, s2, s3}),
"[[2,5,20], <unknown>, [?,?,10]]");
}
TEST(PartialTensorShapeUtilsTest, PartialShapeAreCompatible) {
PartialTensorShape s1a({-1, 5, 20});
PartialTensorShape s1b({2, 5, 20});
PartialTensorShape s2a({-1, -1, 20});
PartialTensorShape s2b({5, 10, 20});
EXPECT_TRUE(PartialTensorShapeUtils::AreCompatible({s1a}, {s1b}));
EXPECT_TRUE(PartialTensorShapeUtils::AreCompatible({s1b}, {s1a}));
EXPECT_TRUE(PartialTensorShapeUtils::AreCompatible({s1a, s2b}, {s1b, s2b}));
EXPECT_FALSE(PartialTensorShapeUtils::AreCompatible({s1a}, {s2a, s1a}));
EXPECT_FALSE(PartialTensorShapeUtils::AreCompatible({s1a, s1b}, {s2a, s2b}));
}
TEST(PartialTensorShapeUtilsTest, PartialShapeAreIdentical) {
PartialTensorShape s1a({-1, 5, 20});
PartialTensorShape s1b({2, 5, 20});
PartialTensorShape s1c({-1, 5, 20});
PartialTensorShape s2a({-1, -1, 20});
PartialTensorShape s2b({5, 10, 20});
EXPECT_TRUE(PartialTensorShapeUtils::AreIdentical({s1a}, {s1a}));
EXPECT_TRUE(PartialTensorShapeUtils::AreIdentical({s1a, s1b}, {s1c, s1b}));
EXPECT_TRUE(PartialTensorShapeUtils::AreIdentical({s1c}, {s1a}));
EXPECT_FALSE(PartialTensorShapeUtils::AreIdentical({s1a}, {s1b}));
EXPECT_FALSE(PartialTensorShapeUtils::AreIdentical({s1a, s2b}, {s1b, s2b}));
EXPECT_FALSE(PartialTensorShapeUtils::AreIdentical({s1a}, {s2a, s1a}));
EXPECT_FALSE(PartialTensorShapeUtils::AreIdentical({s1a, s1b}, {s2a, s2b}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/partial_tensor_shape.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/partial_tensor_shape_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e5eb671c-fe51-4224-9402-beab750cbaee | cpp | tensorflow/tensorflow | float8 | tensorflow/core/platform/float8.h | third_party/xla/xla/tests/float8_test.cc | #ifndef TENSORFLOW_CORE_PLATFORM_FLOAT8_H_
#define TENSORFLOW_CORE_PLATFORM_FLOAT8_H_
#include "tsl/platform/ml_dtypes.h"
namespace tensorflow {
typedef tsl::float8_e4m3fn float8_e4m3fn;
typedef tsl::float8_e5m2 float8_e5m2;
}
#endif | #include <cmath>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "xla/hlo/builder/xla_builder.h"
#include "xla/test.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
#include "tsl/platform/ml_dtypes.h"
namespace xla {
namespace {
template <typename T>
class Float8Test : public ClientLibraryTestBase {};
using DataTypes = ::testing::Types<tsl::float8_e5m2, tsl::float8_e4m3,
tsl::float8_e4m3fn, tsl::float8_e3m4>;
TYPED_TEST_SUITE(Float8Test, DataTypes);
XLA_TYPED_TEST(Float8Test, ScalarOperation) {
XlaBuilder builder(this->TestName());
auto x = ConstantR0<TypeParam>(&builder, static_cast<TypeParam>(2.0f));
auto y = ConstantR0<TypeParam>(&builder, static_cast<TypeParam>(1.0f));
Add(x, y);
this->template ComputeAndCompareR0<TypeParam>(
&builder, static_cast<TypeParam>(3.0f), {});
}
XLA_TYPED_TEST(Float8Test, LogOperation) {
XlaBuilder builder(this->TestName());
auto x = ConstantR0<TypeParam>(&builder, static_cast<TypeParam>(4.0f));
Log(x);
this->template ComputeAndCompareR0<TypeParam>(
&builder, static_cast<TypeParam>(1.387f), {});
}
XLA_TYPED_TEST(Float8Test, CompareOperation) {
XlaBuilder builder(this->TestName());
auto x = ConstantR1<TypeParam>(&builder, {TypeParam{1.0}, TypeParam{2.0}});
auto y = ConstantR1<TypeParam>(&builder, {TypeParam{1.0}, TypeParam{3.0}});
Eq(x, y);
this->template ComputeAndCompareR1<bool>(&builder, {true, false}, {});
}
XLA_TYPED_TEST(Float8Test, DotOperation) {
XlaBuilder builder(this->TestName());
auto x = ConstantR2<TypeParam>(&builder, {{TypeParam{0.0}, TypeParam{1.0}},
{TypeParam{2.0}, TypeParam{3.0}}});
auto y = ConstantR2<TypeParam>(&builder, {{TypeParam{3.0}, TypeParam{2.0}},
{TypeParam{1.0}, TypeParam{0.0}}});
Dot(x, y);
this->template ComputeAndCompareR2<TypeParam>(
&builder,
{{TypeParam{1.0}, TypeParam{0.0}}, {TypeParam{9.0}, TypeParam{4.0}}}, {});
}
XLA_TYPED_TEST(Float8Test, NegateScalar) {
XlaBuilder builder(this->TestName());
Neg(ConstantR0<TypeParam>(&builder, static_cast<TypeParam>(2.0f)));
this->template ComputeAndCompareR0<TypeParam>(
&builder, static_cast<TypeParam>(-2.0f), {});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/float8.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/float8_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d7715548-177d-407d-b1e7-f44b71265ada | cpp | tensorflow/tensorflow | pending_counts | tensorflow/core/common_runtime/pending_counts.h | tensorflow/core/common_runtime/pending_counts_test.cc | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_PENDING_COUNTS_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_PENDING_COUNTS_H_
#include <atomic>
#include "tensorflow/core/lib/gtl/flatmap.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/util/port.h"
namespace tensorflow {
class PendingCounts {
public:
enum NodeState {
PENDING_NOTREADY,
PENDING_READY,
STARTED,
COMPLETED
};
class Handle;
class Layout {
public:
Handle CreateHandle(size_t max_pending_count, size_t max_dead_count);
private:
friend class PendingCounts;
int next_offset_ = 0;
};
explicit PendingCounts(Layout layout)
: num_bytes_(layout.next_offset_), bytes_(new char[num_bytes_]()) {
if (num_bytes_ >= sizeof(LargeCounts)) {
CHECK_EQ(uintptr_t(bytes_) % alignof(LargeCounts), 0);
}
}
explicit PendingCounts(const PendingCounts& other)
: num_bytes_(other.num_bytes_), bytes_(new char[num_bytes_]) {
if (num_bytes_ >= sizeof(LargeCounts)) {
CHECK_EQ(uintptr_t(bytes_) % alignof(LargeCounts), 0);
}
memcpy(bytes_, other.bytes_, other.num_bytes_);
}
~PendingCounts() { delete[] bytes_; }
void set_initial_count(Handle h, size_t pending_count) {
if (h.is_large_) {
std::atomic<LargeCounts>* c_ptr = Large(h);
auto c = c_ptr->load(std::memory_order_relaxed);
c.pending = pending_count;
c.dead_count = 0;
c.has_started = 0;
c_ptr->store(c, std::memory_order_relaxed);
} else {
DCHECK_LE(pending_count, kMaxCountForPackedCounts);
std::atomic<PackedCounts>* c_ptr = Packed(h);
auto c = c_ptr->load(std::memory_order_relaxed);
c.pending = pending_count;
c.dead_count = 0;
c.has_started = 0;
c_ptr->store(c, std::memory_order_relaxed);
}
}
NodeState node_state(Handle h) {
if (h.is_large_) {
return NodeStateForStruct(Large(h)->load(std::memory_order_relaxed));
} else {
return NodeStateForStruct(Packed(h)->load(std::memory_order_relaxed));
}
}
void mark_started(Handle h) {
DCHECK_EQ(pending(h), 0);
if (h.is_large_) {
std::atomic<LargeCounts>* c_ptr = Large(h);
auto c = c_ptr->load(std::memory_order_relaxed);
DCHECK_EQ(c.has_started, 0);
c.has_started = 1;
c_ptr->store(c, std::memory_order_relaxed);
} else {
std::atomic<PackedCounts>* c_ptr = Packed(h);
auto c = c_ptr->load(std::memory_order_relaxed);
DCHECK_EQ(c.has_started, 0);
c.has_started = 1;
c_ptr->store(c, std::memory_order_relaxed);
}
}
void mark_completed(Handle h) {
if (h.is_large_) {
std::atomic<LargeCounts>* c_ptr = Large(h);
auto c = c_ptr->load(std::memory_order_relaxed);
DCHECK_EQ(c.has_started, 1);
c.pending = 1;
c_ptr->store(c, std::memory_order_relaxed);
} else {
std::atomic<PackedCounts>* c_ptr = Packed(h);
auto c = c_ptr->load(std::memory_order_relaxed);
DCHECK_EQ(c.has_started, 1);
c.pending = 1;
c_ptr->store(c, std::memory_order_relaxed);
}
}
int pending(Handle h) {
if (h.is_large_) {
LargeCounts c = Large(h)->load(std::memory_order_relaxed);
if (PENDING_NOTREADY == NodeStateForStruct(c)) {
return c.pending;
} else {
return 0;
}
} else {
PackedCounts c = Packed(h)->load(std::memory_order_relaxed);
if (PENDING_NOTREADY == NodeStateForStruct(c)) {
return c.pending;
} else {
return 0;
}
}
}
struct AdjustResult {
int dead_count;
int pending_count;
AdjustResult(int dead_count, int pending_count)
: dead_count(dead_count), pending_count(pending_count) {}
};
int decrement_pending(Handle h, int v) {
DCHECK_GE(pending(h), v);
if (h.is_large_) {
std::atomic<LargeCounts>* c_ptr = Large(h);
auto c = c_ptr->load(std::memory_order_relaxed);
c.pending -= v;
c_ptr->store(c, std::memory_order_relaxed);
return c.pending;
} else {
std::atomic<PackedCounts>* c_ptr = Packed(h);
auto c = c_ptr->load(std::memory_order_relaxed);
c.pending -= v;
c_ptr->store(c, std::memory_order_relaxed);
return c.pending;
}
}
void mark_live(Handle h) {
if (h.is_large_) {
std::atomic<LargeCounts>* c_ptr = Large(h);
auto c = c_ptr->load(std::memory_order_relaxed);
if (PENDING_NOTREADY == NodeStateForStruct(c)) {
c.pending &= ~static_cast<int>(0x1);
c_ptr->store(c, std::memory_order_relaxed);
}
} else {
std::atomic<PackedCounts>* c_ptr = Packed(h);
auto c = c_ptr->load(std::memory_order_relaxed);
if (PENDING_NOTREADY == NodeStateForStruct(c)) {
static_assert(7 == kMaxCountForPackedCounts,
"Live flag incorrect for max packed count");
c.pending &= 0x6;
c_ptr->store(c, std::memory_order_relaxed);
}
}
}
int dead_count(Handle h) {
int r = h.is_large_ ? Large(h)->load(std::memory_order_relaxed).dead_count
: Packed(h)->load(std::memory_order_relaxed).dead_count;
return r;
}
void increment_dead_count(Handle h) {
if (h.is_large_) {
std::atomic<LargeCounts>* c_ptr = Large(h);
auto c = c_ptr->load(std::memory_order_relaxed);
if (PENDING_NOTREADY == NodeStateForStruct(c)) {
c.dead_count++;
c_ptr->store(c, std::memory_order_relaxed);
}
} else {
std::atomic<PackedCounts>* c_ptr = Packed(h);
auto c = c_ptr->load(std::memory_order_relaxed);
if (PENDING_NOTREADY == NodeStateForStruct(c)) {
DCHECK_LT(c.dead_count, kMaxCountForPackedCounts);
c.dead_count++;
c_ptr->store(c, std::memory_order_relaxed);
}
}
}
AdjustResult adjust_for_mark_live(Handle h) {
if (h.is_large_) {
std::atomic<LargeCounts>* c_ptr = Large(h);
auto c = c_ptr->load(std::memory_order_relaxed);
auto ret_pending = 0;
if (PENDING_NOTREADY == NodeStateForStruct(c)) {
ret_pending = c.pending;
c.pending &= ~static_cast<int>(0x1);
c_ptr->store(c, std::memory_order_relaxed);
}
return AdjustResult(c.dead_count, ret_pending);
} else {
std::atomic<PackedCounts>* c_ptr = Packed(h);
auto c = c_ptr->load(std::memory_order_relaxed);
auto ret_pending = 0;
if (PENDING_NOTREADY == NodeStateForStruct(c)) {
static_assert(7 == kMaxCountForPackedCounts,
"Live flag incorrect for max packed count");
ret_pending = c.pending;
c.pending &= 0x6;
c_ptr->store(c, std::memory_order_relaxed);
}
return AdjustResult(c.dead_count, ret_pending);
}
}
AdjustResult adjust_for_mark_live_atomic(Handle h) {
if (h.is_large_) {
std::atomic<LargeCounts>* c_ptr = Large(h);
auto old_val = c_ptr->load(std::memory_order_relaxed);
while (true) {
auto new_val = old_val;
auto ret_pending = 0;
if (PENDING_NOTREADY == NodeStateForStruct(new_val)) {
ret_pending = old_val.pending;
new_val.pending &= ~static_cast<int>(0x1);
}
AdjustResult ret(old_val.dead_count, ret_pending);
if (TF_PREDICT_TRUE(c_ptr->compare_exchange_weak(old_val, new_val)))
return ret;
}
} else {
std::atomic<PackedCounts>* c_ptr = Packed(h);
auto old_val = c_ptr->load(std::memory_order_relaxed);
while (true) {
auto new_val = old_val;
auto ret_pending = 0;
if (PENDING_NOTREADY == NodeStateForStruct(new_val)) {
static_assert(7 == kMaxCountForPackedCounts,
"Live flag incorrect for max packed count");
ret_pending = old_val.pending;
new_val.pending &= 0x6;
}
AdjustResult ret(old_val.dead_count, ret_pending);
if (TF_PREDICT_TRUE(c_ptr->compare_exchange_weak(old_val, new_val)))
return ret;
}
}
}
AdjustResult adjust_for_increment_dead(Handle h) {
if (h.is_large_) {
return adjust_for_increment_dead_shared(Large(h));
} else {
return adjust_for_increment_dead_shared(Packed(h));
}
}
AdjustResult adjust_for_increment_dead_atomic(Handle h) {
if (h.is_large_) {
return adjust_for_increment_dead_shared_atomic(Large(h));
} else {
return adjust_for_increment_dead_shared_atomic(Packed(h));
}
}
AdjustResult adjust_for_decrement_pending(Handle h, int decrement_pending) {
DCHECK_GE(pending(h), decrement_pending);
if (h.is_large_) {
return adjust_for_decrement_pending_shared(Large(h), decrement_pending);
} else {
return adjust_for_decrement_pending_shared(Packed(h), decrement_pending);
}
}
AdjustResult adjust_for_decrement_pending_atomic(Handle h,
int decrement_pending) {
DCHECK_GE(pending(h), decrement_pending);
if (h.is_large_) {
return adjust_for_decrement_pending_shared_atomic(Large(h),
decrement_pending);
} else {
return adjust_for_decrement_pending_shared_atomic(Packed(h),
decrement_pending);
}
}
AdjustResult adjust_for_activation(Handle h, bool increment_dead) {
DCHECK_GE(pending(h), 1);
if (h.is_large_) {
return adjust_for_activation_shared(Large(h), increment_dead);
} else {
return adjust_for_activation_shared(Packed(h), increment_dead);
}
}
AdjustResult adjust_for_activation_atomic(Handle h, bool increment_dead) {
DCHECK_GE(pending(h), 1);
if (h.is_large_) {
return adjust_for_activation_shared_atomic(Large(h), increment_dead);
} else {
return adjust_for_activation_shared_atomic(Packed(h), increment_dead);
}
}
class Handle {
public:
Handle() : byte_offset_(0), is_large_(0) {}
private:
friend class PendingCounts;
int byte_offset_ : 31;
bool is_large_ : 1;
};
private:
template <typename T>
inline AdjustResult adjust_for_increment_dead_shared(std::atomic<T>* c) {
T val = c->load(std::memory_order_relaxed);
auto ret_pending = 0;
if (PENDING_NOTREADY == NodeStateForStruct(val)) {
val.dead_count++;
ret_pending = val.pending;
c->store(val, std::memory_order_relaxed);
}
return AdjustResult(val.dead_count, ret_pending);
}
template <typename T>
inline AdjustResult adjust_for_increment_dead_shared_atomic(
std::atomic<T>* c) {
T old_val = c->load(std::memory_order_relaxed);
while (true) {
auto new_val = old_val;
auto ret_pending = 0;
if (PENDING_NOTREADY == NodeStateForStruct(new_val)) {
ret_pending = new_val.pending;
new_val.dead_count++;
}
AdjustResult ret(new_val.dead_count, ret_pending);
if (TF_PREDICT_TRUE(c->compare_exchange_weak(old_val, new_val)))
return ret;
}
}
template <typename T>
inline AdjustResult adjust_for_decrement_pending_shared(
std::atomic<T>* c, int decrement_pending) {
T val = c->load(std::memory_order_relaxed);
DCHECK_GE(val.pending, decrement_pending);
val.pending -= decrement_pending;
c->store(val, std::memory_order_relaxed);
return AdjustResult(val.dead_count, val.pending);
}
template <typename T>
inline AdjustResult adjust_for_decrement_pending_shared_atomic(
std::atomic<T>* c, int decrement_pending) {
T old_val = c->load(std::memory_order_relaxed);
while (true) {
T new_val = old_val;
DCHECK_GE(new_val.pending, decrement_pending);
new_val.pending -= decrement_pending;
AdjustResult ret(new_val.dead_count, new_val.pending);
if (TF_PREDICT_TRUE(c->compare_exchange_weak(old_val, new_val)))
return ret;
}
}
template <typename T>
inline AdjustResult adjust_for_activation_shared(std::atomic<T>* c,
bool increment_dead) {
T val = c->load(std::memory_order_relaxed);
if (increment_dead && PENDING_NOTREADY == NodeStateForStruct(val)) {
val.dead_count++;
}
DCHECK_GE(val.pending, 1);
val.pending--;
c->store(val, std::memory_order_relaxed);
return AdjustResult(val.dead_count, val.pending);
}
template <typename T>
inline AdjustResult adjust_for_activation_shared_atomic(std::atomic<T>* c,
bool increment_dead) {
T old_val = c->load(std::memory_order_relaxed);
while (true) {
T new_val = old_val;
if (increment_dead && PENDING_NOTREADY == NodeStateForStruct(new_val)) {
new_val.dead_count++;
}
DCHECK_GE(new_val.pending, 1);
new_val.pending--;
AdjustResult ret(new_val.dead_count, new_val.pending);
if (TF_PREDICT_TRUE(c->compare_exchange_weak(old_val, new_val)))
return ret;
}
}
static constexpr int kMaxCountForPackedCounts = 7;
struct PackedCounts {
uint8 pending : 3;
uint8 dead_count : 3;
uint8 has_started : 1;
};
struct alignas(8) LargeCounts {
uint32 pending;
uint32 dead_count : 31;
uint32 has_started : 1;
};
template <typename T>
NodeState NodeStateForStruct(const T& c) const {
if (c.has_started) {
return (c.pending == 0) ? STARTED : COMPLETED;
} else {
return (c.pending == 0) ? PENDING_READY : PENDING_NOTREADY;
}
}
inline std::atomic<LargeCounts>* Large(Handle h) {
DCHECK(h.is_large_);
DCHECK_LE(h.byte_offset_ + sizeof(std::atomic<LargeCounts>), num_bytes_);
DCHECK_EQ(h.byte_offset_ % alignof(std::atomic<LargeCounts>), 0);
return reinterpret_cast<std::atomic<LargeCounts>*>(bytes_ + h.byte_offset_);
}
inline std::atomic<PackedCounts>* Packed(Handle h) {
DCHECK(!h.is_large_);
DCHECK_LE(h.byte_offset_ + sizeof(PackedCounts), num_bytes_);
return reinterpret_cast<std::atomic<PackedCounts>*>(bytes_ +
h.byte_offset_);
}
const int num_bytes_;
char* bytes_;
void operator=(const PendingCounts&) = delete;
};
inline PendingCounts::Handle PendingCounts::Layout::CreateHandle(
size_t max_pending_count, size_t max_dead_count) {
Handle result;
if ((max_pending_count > kMaxCountForPackedCounts) ||
(max_dead_count > kMaxCountForPackedCounts)) {
constexpr int B = sizeof(std::atomic<LargeCounts>);
static_assert(
sizeof(std::atomic<LargeCounts>) >= alignof(std::atomic<LargeCounts>),
"std::atomic<LargeCounts> must be packed");
int64_t offset = ((static_cast<int64_t>(next_offset_) + B - 1) / B) * B;
result.byte_offset_ = offset;
result.is_large_ = true;
next_offset_ = result.byte_offset_ + B;
} else {
result.byte_offset_ = next_offset_;
result.is_large_ = false;
static_assert(sizeof(std::atomic<PackedCounts>) == 1,
"std::atomic<PackedCounts> should be a single byte");
next_offset_ += sizeof(std::atomic<PackedCounts>);
}
return result;
}
}
#endif | #include "tensorflow/core/common_runtime/pending_counts.h"
#include <memory>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
using std::unique_ptr;
namespace tensorflow {
TEST(PendingCounts, Simple) {
const int C = 300;
PendingCounts::Layout layout;
std::vector<PendingCounts::Handle> h(C);
for (int id = 0; id < C; id++) {
h[id] = layout.CreateHandle(id, id);
}
PendingCounts c(layout);
for (int id = 0; id < C; id++) {
c.set_initial_count(h[id], id);
}
for (int id = 0; id < C; id++) {
EXPECT_EQ(c.pending(h[id]), id);
EXPECT_EQ(c.dead_count(h[id]), 0);
}
for (int id = 0; id < C; id++) {
c.increment_dead_count(h[id]);
EXPECT_EQ(c.dead_count(h[id]), (id == 0) ? 0 : 1);
}
EXPECT_EQ(c.decrement_pending(h[1], 1), 0);
EXPECT_EQ(c.decrement_pending(h[3], 1), 2);
EXPECT_EQ(c.decrement_pending(h[3], 1), 1);
c.decrement_pending(h[5], 1);
c.decrement_pending(h[5], 3);
c.decrement_pending(h[170], 1);
c.decrement_pending(h[170], 13);
EXPECT_EQ(c.pending(h[1]), 0);
EXPECT_EQ(c.pending(h[3]), 1);
EXPECT_EQ(c.pending(h[5]), 1);
EXPECT_EQ(c.pending(h[170]), 156);
}
TEST(PendingCounts, CopyConstructor) {
const int C = 300;
PendingCounts::Layout layout;
std::vector<PendingCounts::Handle> h(C);
for (int id = 0; id < C; id++) {
h[id] = layout.CreateHandle(id, id);
}
PendingCounts c(layout);
for (int id = 0; id < C; id++) {
c.set_initial_count(h[id], id);
}
PendingCounts c2(c);
for (int id = 0; id < C; id++) {
EXPECT_EQ(c.pending(h[id]), c2.pending(h[id]));
EXPECT_EQ(c.dead_count(h[id]), c2.dead_count(h[id]));
}
}
TEST(PendingCounts, MarkLiveShowsUpAsCount) {
PendingCounts::Layout layout;
PendingCounts::Handle handles[2];
handles[0] = layout.CreateHandle(5, 4);
handles[1] = layout.CreateHandle(15, 4);
for (int id = 0; id < 2; id++) {
PendingCounts::Handle h = handles[id];
int count = (id == 0) ? 5 : 15;
PendingCounts c(layout);
c.set_initial_count(h, count);
EXPECT_EQ(c.pending(h), count);
auto result = c.adjust_for_mark_live_atomic(h);
EXPECT_EQ(c.pending(h), count - 1);
result = c.adjust_for_mark_live_atomic(h);
EXPECT_EQ(c.pending(h), count - 1);
c.decrement_pending(h, count - 1);
EXPECT_EQ(c.pending(h), 0);
result = c.adjust_for_mark_live_atomic(h);
EXPECT_EQ(c.pending(h), 0);
c.mark_started(h);
result = c.adjust_for_mark_live_atomic(h);
EXPECT_EQ(c.pending(h), 0);
c.mark_completed(h);
result = c.adjust_for_mark_live_atomic(h);
EXPECT_EQ(c.pending(h), 0);
}
}
TEST(PendingCounts, StateIsCorrect) {
const int C = 20;
PendingCounts::Layout layout;
std::vector<PendingCounts::Handle> handles(C);
for (int id = 0; id < C; id++) {
handles[id] = layout.CreateHandle(id, id);
}
PendingCounts c(layout);
for (int id = 0; id < C; id++) {
c.set_initial_count(handles[id], id);
}
for (int id = 0; id < C; id++) {
PendingCounts::Handle h = handles[id];
while (c.pending(h) > 0) {
EXPECT_EQ(c.node_state(h), PendingCounts::PENDING_NOTREADY);
c.decrement_pending(h, 1);
}
EXPECT_EQ(c.node_state(h), PendingCounts::PENDING_READY);
c.mark_started(h);
EXPECT_EQ(c.node_state(h), PendingCounts::STARTED);
c.mark_completed(h);
EXPECT_EQ(c.node_state(h), PendingCounts::COMPLETED);
}
}
TEST(PendingCounts, AdjustForActivation) {
PendingCounts::Layout layout;
PendingCounts::Handle handles[2];
handles[0] = layout.CreateHandle(5, 4);
handles[1] = layout.CreateHandle(15, 4);
for (int id = 0; id < 2; id++) {
PendingCounts::Handle h = handles[id];
int count = (id == 0) ? 5 : 15;
PendingCounts c(layout);
c.set_initial_count(h, count);
EXPECT_EQ(c.pending(h), count);
PendingCounts::AdjustResult result = c.adjust_for_activation(h, false);
EXPECT_EQ(c.pending(h), count - 1);
EXPECT_GT(result.pending_count, 0);
EXPECT_EQ(c.dead_count(h), 0);
EXPECT_EQ(result.dead_count, 0);
result = c.adjust_for_activation(h, true);
EXPECT_EQ(c.pending(h), count - 2);
EXPECT_GT(result.pending_count, 0);
EXPECT_EQ(c.dead_count(h), 1);
EXPECT_GT(result.dead_count, 0);
}
}
TEST(PendingCounts, AdjustForActivationAtomic) {
PendingCounts::Layout layout;
PendingCounts::Handle handles[2];
const int kInitialCounts[2] = {6, 16};
handles[0] = layout.CreateHandle(kInitialCounts[0], 0);
handles[1] = layout.CreateHandle(kInitialCounts[1], 0);
PendingCounts c(layout);
c.set_initial_count(handles[0], kInitialCounts[0]);
c.set_initial_count(handles[1], kInitialCounts[1]);
Env* env = Env::Default();
std::atomic<bool> start{false};
std::vector<unique_ptr<Thread>> threads;
for (int t = 0; t < 2; t++) {
threads.emplace_back(env->StartThread({}, "tester", [&]() {
while (!start) {
}
for (int i = 0; i < kInitialCounts[0] / 2; i++) {
c.adjust_for_activation_atomic(handles[0], false);
}
for (int i = 0; i < kInitialCounts[1] / 2; i++) {
c.adjust_for_activation_atomic(handles[1], false);
}
}));
}
start = true;
threads.clear();
EXPECT_EQ(c.pending(handles[0]), 0);
EXPECT_EQ(c.pending(handles[1]), 0);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/pending_counts.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/pending_counts_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f03b26a7-efe8-47dd-867f-fb78537061cf | cpp | tensorflow/tensorflow | tf_rendezvous_c_api | tensorflow/core/common_runtime/next_pluggable_device/c/tf_rendezvous_c_api.h | tensorflow/core/common_runtime/next_pluggable_device/c/tf_rendezvous_c_api_test.cc | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_NEXT_PLUGGABLE_DEVICE_C_TF_RENDEZVOUS_C_API_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_NEXT_PLUGGABLE_DEVICE_C_TF_RENDEZVOUS_C_API_H_
#include <stdint.h>
#include "tensorflow/c/c_api_macros.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_tensor.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct TF_DeviceContext TF_DeviceContext;
typedef struct TFDevice_AllocatorAttributes {
uint32_t value;
int32_t scope_id;
} TFDevice_AllocatorAttributes;
typedef struct TFE_CancellationManager TFE_CancellationManager;
typedef struct TF_RendezvousArgsStruct {
TF_DeviceContext* device_context;
TFDevice_AllocatorAttributes alloc_attrs;
TFE_CancellationManager* cancellation_manager;
} TF_RendezvousArgsStruct;
typedef struct TF_RendezvousParsedKey {
char* full_key;
uint32_t full_key_size;
} TF_RendezvousParsedKey;
typedef struct TF_RendezvousSend_Params {
const TF_RendezvousParsedKey* key;
const TF_RendezvousArgsStruct* args;
TF_Tensor* tensor;
bool is_dead;
TF_Status* status;
} TF_RendezvousSend_Params;
typedef void (*TF_RendezvousSend_Function)(void*, TF_RendezvousSend_Params*);
typedef struct TF_RendezvousDoneCallback_Params {
void* context;
const TF_Status* status;
const TF_Tensor* tensor;
bool is_dead;
} TF_RendezvousDoneCallback_Params;
typedef void (*TF_RendezvousDoneCallback_Function)(
void*, TF_RendezvousDoneCallback_Params*);
typedef struct TF_RendezvousDoneCallbackImpl {
void* context;
TF_RendezvousDoneCallback_Function callback;
} TF_RendezvousDoneCallbackImpl;
typedef struct TF_RendezvousAsyncRecv_Params {
void* context;
const TF_RendezvousParsedKey* key;
const TF_RendezvousArgsStruct* args;
TF_RendezvousDoneCallbackImpl on_done;
} TF_RendezvousAsyncRecv_Params;
typedef void (*TF_RendezvousAsyncRecv_Function)(void*,
TF_RendezvousAsyncRecv_Params*);
typedef void (*TF_RendezvousStartAbort_Function)(void* context,
const TF_Status*);
typedef struct TF_RendezvousThunk {
void* rendezvous;
TF_RendezvousSend_Function send_func;
TF_RendezvousAsyncRecv_Function async_recv_func;
TF_RendezvousStartAbort_Function start_abort_func;
} TF_RendezvousThunk;
#ifdef __cplusplus
}
#endif
#endif | #include "tensorflow/core/common_runtime/next_pluggable_device/c/tf_rendezvous_c_api.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/notification.h"
#include "xla/tsl/framework/allocator.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/next_pluggable_device/c/tf_rendezvous_c_api_helper.h"
#include "tensorflow/core/common_runtime/next_pluggable_device/c/tf_rendezvous_c_api_internal.h"
#include "tensorflow/core/common_runtime/rendezvous_mgr.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/control_flow.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/rendezvous.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/mem.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/status.h"
namespace tensorflow {
namespace {
Tensor CreateTestTensor() {
Tensor t(DT_INT8, TensorShape({10, 20}));
for (int64_t a = 0; a < t.shape().dim_size(0); a++) {
for (int64_t b = 0; b < t.shape().dim_size(1); b++) {
t.matrix<int8>()(a, b) = static_cast<int8>((a + 1) * (b + 1));
}
}
return t;
}
class FakeAllocator : public Allocator {
public:
std::string Name() override { return "fake"; }
void* AllocateRaw(size_t alignment, size_t num_bytes) override {
return port::AlignedMalloc(num_bytes, alignment);
}
void DeallocateRaw(void* ptr) override { return port::AlignedFree(ptr); }
};
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& attr) : Device(nullptr, attr) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes) override {
return allocator_.get();
}
static std::unique_ptr<Device> Make(absl::string_view name,
absl::string_view type) {
DeviceAttributes device_attributes;
device_attributes.set_name(std::string(name));
device_attributes.set_device_type(std::string(type));
return std::unique_ptr<Device>(new FakeDevice(device_attributes));
}
private:
std::unique_ptr<FakeAllocator> allocator_ = std::make_unique<FakeAllocator>();
};
class FakeDeviceManager : public DeviceMgr {
public:
void ListDeviceAttributes(
std::vector<DeviceAttributes>* devices) const override {
devices->clear();
}
std::vector<Device*> ListDevices() const override {
return std::vector<Device*>();
}
std::string DebugString() const override { return ""; }
std::string DeviceMappingString() const override { return ""; }
absl::Status LookupDevice(StringPiece name, Device** device) const override {
*device = fake_device_.get();
return absl::OkStatus();
}
bool ContainsDevice(int64_t device_incarnation) const override {
return false;
}
void ClearContainers(absl::Span<const string> containers) const override {}
int NumDeviceType(const string& type) const override { return 0; }
int NumDevices() const override { return 0; }
Device* HostCPU() const override { return nullptr; }
private:
std::unique_ptr<Device> fake_device_ = FakeDevice::Make("/cpu:0", "fake");
};
class TestDeviceContext : public DeviceContext {
public:
void CopyCPUTensorToDevice(const Tensor* cpu_tensor, Device* device,
Tensor* device_tensor, StatusCallback done,
bool sync_dst_compute) const override {
Tensor test_tensor = CreateTestTensor();
test::ExpectTensorEqual<int8>(test_tensor, *cpu_tensor);
done(absl::OkStatus());
}
void CopyDeviceTensorToCPU(const Tensor* device_tensor,
absl::string_view tensor_name, Device* device,
Tensor* cpu_tensor, StatusCallback done) override {
*cpu_tensor = CreateTestTensor();
done(absl::OkStatus());
}
void CopyTensorInSameDevice(const Tensor* input_tensor, Device* device,
Tensor* output_tensor,
tsl::StatusCallback done) const override {
done(absl::InternalError("TPU->TPU copy not implemented."));
}
};
std::string CreateRendezvousKey(bool to_host) {
const std::string task_prefix = "/job:worker/replica:0/task:0";
const std::string src_device = to_host ? "/device:TPU:0" : "/device:CPU:0";
const std::string dst_device = to_host ? "/device:CPU:0" : "/device:TPU:0";
const std::string rendezvous_key_base = "rendezvous_key_base";
return Rendezvous::CreateKey(absl::StrCat(task_prefix, src_device),
1,
absl::StrCat(task_prefix, dst_device),
rendezvous_key_base, FrameAndIter(0, 0));
}
TEST(RendezvousCAPI, DeviceToHost) {
auto device_manager = std::make_unique<FakeDeviceManager>();
core::RefCountPtr<Rendezvous> rendezvous = core::RefCountPtr<Rendezvous>(
new IntraProcessRendezvous(device_manager.get()));
core::RefCountPtr<TestDeviceContext> device_context =
core::RefCountPtr<TestDeviceContext>(new TestDeviceContext());
std::string key = CreateRendezvousKey(true);
Rendezvous::ParsedKey parsed_key;
TF_ASSERT_OK(Rendezvous::ParseKey(key, &parsed_key));
TF_RendezvousThunk* thunk = ToC(rendezvous.get());
std::unique_ptr<tensorflow::RendezvousInterface> thunk_rendezvous =
FromC(thunk);
Rendezvous::Args send_args;
send_args.device_context = device_context.get();
TF_CHECK_OK(thunk_rendezvous->Send(parsed_key, send_args, Tensor(), false));
Tensor result;
absl::Notification callback_done;
Rendezvous::Args recv_args;
recv_args.device_context = device_context.get();
recv_args.alloc_attrs.set_on_host(true);
rendezvous->RecvAsync(parsed_key, recv_args,
[&](const absl::Status& status,
const RefCountedIntraProcessRendezvous::Args&,
const RefCountedIntraProcessRendezvous::Args&,
const Tensor& tensor, const bool) {
TF_ASSERT_OK(status);
result = tensor;
callback_done.Notify();
});
callback_done.WaitForNotification();
Tensor test_tensor = CreateTestTensor();
test::ExpectTensorEqual<int8>(test_tensor, result);
Destroy(thunk);
delete thunk;
}
TEST(RendezvousCAPI, HostToDevice) {
auto device_manager = std::make_unique<FakeDeviceManager>();
core::RefCountPtr<Rendezvous> rendezvous = core::RefCountPtr<Rendezvous>(
new IntraProcessRendezvous(device_manager.get()));
core::RefCountPtr<TestDeviceContext> device_context =
core::RefCountPtr<TestDeviceContext>(new TestDeviceContext());
std::string key = CreateRendezvousKey(false);
Rendezvous::ParsedKey parsed_key;
TF_ASSERT_OK(Rendezvous::ParseKey(key, &parsed_key));
TF_RendezvousThunk* thunk = ToC(rendezvous.get());
std::unique_ptr<tensorflow::RendezvousInterface> thunk_rendezvous =
FromC(thunk);
Rendezvous::Args recv_args;
recv_args.device_context = device_context.get();
Tensor result;
absl::Notification callback_done;
thunk_rendezvous->RecvAsync(parsed_key, recv_args,
[&](const absl::Status& status,
const RefCountedIntraProcessRendezvous::Args&,
const RefCountedIntraProcessRendezvous::Args&,
const Tensor& tensor, const bool) {
TF_ASSERT_OK(status);
result = tensor;
callback_done.Notify();
});
Rendezvous::Args send_args;
send_args.device_context = device_context.get();
send_args.alloc_attrs.set_on_host(true);
Tensor test_tensor = CreateTestTensor();
TF_CHECK_OK(rendezvous->Send(parsed_key, send_args, test_tensor, false));
callback_done.WaitForNotification();
Destroy(thunk);
delete thunk;
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/next_pluggable_device/c/tf_rendezvous_c_api.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/next_pluggable_device/c/tf_rendezvous_c_api_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8bb9de31-2dae-4bbc-8191-b301b7399a6e | cpp | tensorflow/tensorflow | plugin_c_api | tensorflow/core/common_runtime/next_pluggable_device/c/plugin_c_api.h | tensorflow/core/common_runtime/next_pluggable_device/c/plugin_c_api_test.cc | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_NEXT_PLUGGABLE_DEVICE_C_PLUGIN_C_API_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_NEXT_PLUGGABLE_DEVICE_C_PLUGIN_C_API_H_
#include <cstddef>
#include "tensorflow/c/c_api.h"
#include "tensorflow/c/c_api_macros.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_tensor.h"
#include "xla/c/c_api_decl.h"
#include "xla/pjrt/c/pjrt_c_api.h"
#include "xla/stream_executor/tpu/c_api_decl.h"
#define TFNPD_MAJOR 0
#define TFNPD_MINOR 0
#define TFNPD_PATCH 1
#ifdef __cplusplus
extern "C" {
#endif
typedef struct TFNPD_DeviceEvent TFNPD_DeviceEvent;
typedef TFNPD_DeviceEvent* TFNPD_NewDeviceEvent();
typedef void TFNPD_DeviceEventAwait(TFNPD_DeviceEvent* event,
TF_Status* status);
typedef bool TFNPD_DeviceEventIsReady(TFNPD_DeviceEvent* event);
typedef void TFNPD_DeviceEventAndThen(TFNPD_DeviceEvent* event,
void (*callback)(void*),
void* callback_arg);
typedef void TFNPD_DeviceEventDelete(TFNPD_DeviceEvent* event);
typedef struct TFNPD_DeviceAllocator TFNPD_DeviceAllocator;
typedef TFNPD_DeviceAllocator* TFNPD_DeviceAllocatorCreate(int device_ordinal);
typedef void* TFNPD_DeviceAllocateRaw(TFNPD_DeviceAllocator* allocator,
size_t alignment, size_t num_bytes);
typedef void TFNPD_DeviceDeallocateRaw(TFNPD_DeviceAllocator* allocator,
void* ptr);
typedef TF_StringView TFNPD_DeviceAllocatorName(
TFNPD_DeviceAllocator* allocator);
typedef bool TFNPD_DeviceAllocatorAllocatesOpaqueHandle(
TFNPD_DeviceAllocator* allocator);
typedef void TFNPD_DeviceAllocatorDelete(TFNPD_DeviceAllocator* allocator);
typedef struct TFNPD_DeviceContext TFNPD_DeviceContext;
typedef TFNPD_DeviceContext* TFNPD_DeviceContextCreate(int device_ordinal);
typedef TFNPD_DeviceEvent* TFNPD_DeviceTensorToHostTensor(
TFNPD_DeviceContext* device_context, const TF_Tensor* device_tensor,
TF_Tensor* cpu_tensor, TF_Status* status);
typedef TFNPD_DeviceEvent* TFNPD_HostTensorToDeviceTensor(
TFNPD_DeviceContext* device_context, const TF_Tensor* cpu_tensor,
TF_Tensor* device_tensor, TF_Status* status);
typedef TFNPD_DeviceEvent* TFNPD_SameDeviceTensorCopy(
TFNPD_DeviceContext* context);
typedef PJRT_Buffer* TFNPD_SameDevicePjRtBufferCopy(PJRT_Buffer* src_buffer,
PJRT_Client* c_client,
TF_Status* status);
typedef void TFNPD_DeviceContextDelete(TFNPD_DeviceContext* context);
typedef void TFNPD_XlaShapeToDeviceShapeRepresentation(
XLA_Shape* serialized_xla_shape, int data_type, bool use_fast_memory,
XLA_LayoutPreference layout_preference, XLA_Shape* serialized_device_shape,
TF_Status* tf_status);
typedef int32_t TFNPD_GetDeviceCount(TF_Status* status);
typedef void TFNPD_InitPluginInternalDeviceStates(TF_Status* status);
#define TFNPD_API_STRUCT_FN(fn_type) fn_type* fn_type
typedef struct {
size_t struct_size;
void* priv;
TFNPD_API_STRUCT_FN(TFNPD_NewDeviceEvent);
TFNPD_API_STRUCT_FN(TFNPD_DeviceEventAwait);
TFNPD_API_STRUCT_FN(TFNPD_DeviceEventIsReady);
TFNPD_API_STRUCT_FN(TFNPD_DeviceEventAndThen);
TFNPD_API_STRUCT_FN(TFNPD_DeviceEventDelete);
TFNPD_API_STRUCT_FN(TFNPD_DeviceAllocatorCreate);
TFNPD_API_STRUCT_FN(TFNPD_DeviceAllocateRaw);
TFNPD_API_STRUCT_FN(TFNPD_DeviceDeallocateRaw);
TFNPD_API_STRUCT_FN(TFNPD_DeviceAllocatorName);
TFNPD_API_STRUCT_FN(TFNPD_DeviceAllocatorAllocatesOpaqueHandle);
TFNPD_API_STRUCT_FN(TFNPD_DeviceAllocatorDelete);
TFNPD_API_STRUCT_FN(TFNPD_DeviceContextCreate);
TFNPD_API_STRUCT_FN(TFNPD_DeviceContextDelete);
TFNPD_API_STRUCT_FN(TFNPD_DeviceTensorToHostTensor);
TFNPD_API_STRUCT_FN(TFNPD_HostTensorToDeviceTensor);
TFNPD_API_STRUCT_FN(TFNPD_SameDeviceTensorCopy);
TFNPD_API_STRUCT_FN(TFNPD_SameDevicePjRtBufferCopy);
TFNPD_API_STRUCT_FN(TFNPD_XlaShapeToDeviceShapeRepresentation);
TFNPD_API_STRUCT_FN(TFNPD_GetDeviceCount);
TFNPD_API_STRUCT_FN(TFNPD_InitPluginInternalDeviceStates);
} TFNPD_Api;
const size_t TFNPD_Api_STRUCT_SIZE =
TF_OFFSET_OF_END(TFNPD_Api, TFNPD_InitPluginInternalDeviceStates);
#undef TFNPD_API_STRUCT_FN
typedef struct TFNPD_PluginParams {
size_t struct_size;
void* ext;
const char* device_type;
const char* compilation_device_name;
int32_t priority;
bool is_pluggable_device;
bool use_pjrt_on_demand_compile;
} TFNPD_PluginParams;
const size_t TFNPD_PLUGIN_PARAMS_STRUCT_SIZE =
TF_OFFSET_OF_END(TFNPD_PluginParams, is_pluggable_device);
const TFNPD_Api* TFNPD_InitPlugin(TFNPD_PluginParams* params,
TF_Status* tf_status);
#if defined(__cplusplus)
}
#endif
#endif | #include "tensorflow/core/common_runtime/next_pluggable_device/c/plugin_c_api.h"
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <gtest/gtest.h>
#include "tensorflow/c/tf_status.h"
#include "tensorflow/core/common_runtime/next_pluggable_device/c/example_plugin.h"
#include "tensorflow/core/platform/status.h"
#include "tfrt/host_context/concurrent_work_queue.h"
#include "tfrt/host_context/diagnostic.h"
#include "tfrt/host_context/host_allocator.h"
#include "tfrt/host_context/host_context.h"
namespace {
struct CallbackParams {
std::function<void(const tensorflow::Status&)> callback;
tensorflow::Status status;
const TFNPD_Api* api;
TFNPD_DeviceEvent* event;
~CallbackParams() {
api->TFNPD_DeviceEventDelete(event);
}
};
void InvokeCallbackFn(void* arg) {
CallbackParams* params = reinterpret_cast<CallbackParams*>(arg);
params->callback(params->status);
delete params;
}
class PluginEventTestFixture : public testing::Test {
protected:
PluginEventTestFixture() {
api_ = GetExamplePluginApi();
auto diag_handler = [](const tfrt::DecodedDiagnostic& diag) {
LOG(ERROR) << diag.message();
};
std::unique_ptr<tfrt::ConcurrentWorkQueue> work_queue =
tfrt::CreateMultiThreadedWorkQueue(
4, 4);
std::unique_ptr<tfrt::HostAllocator> host_allocator =
tfrt::CreateMallocAllocator();
host_ = std::make_unique<tfrt::HostContext>(
diag_handler, std::move(host_allocator), std::move(work_queue));
status_ = TF_NewStatus();
}
~PluginEventTestFixture() override { TF_DeleteStatus(status_); }
std::unique_ptr<tfrt::HostContext> host_;
const TFNPD_Api* api_;
TF_Status* status_;
};
TEST_F(PluginEventTestFixture, TestAwait) {
std::unique_ptr<TFNPD_DeviceEvent> event;
event.reset(example_plugin::CreateDeviceEventAndSetAvailable(host_.get()));
EXPECT_FALSE(api_->TFNPD_DeviceEventIsReady(event.get()));
api_->TFNPD_DeviceEventAwait(event.get(), status_);
EXPECT_TRUE(api_->TFNPD_DeviceEventIsReady(event.get()));
EXPECT_EQ(TF_GetCode(status_), TF_OK);
}
TEST_F(PluginEventTestFixture, TestAwaitWithError) {
std::unique_ptr<TFNPD_DeviceEvent> event;
event.reset(
example_plugin::CreateDeviceEventAndSetAvailable(host_.get(),
true));
EXPECT_FALSE(api_->TFNPD_DeviceEventIsReady(event.get()));
api_->TFNPD_DeviceEventAwait(event.get(), status_);
EXPECT_TRUE(api_->TFNPD_DeviceEventIsReady(event.get()));
EXPECT_EQ(TF_GetCode(status_), TF_INTERNAL);
EXPECT_STREQ(TF_Message(status_), "ERROR");
}
TEST_F(PluginEventTestFixture, TestInvokeCallback) {
auto result_avref = tfrt::MakeUnconstructedAsyncValueRef<int>();
std::string tennis_goat = "Sampras";
auto done = [result_avref = result_avref.CopyRef(),
&tennis_goat](const tensorflow::Status& status) {
result_avref.emplace(42);
LOG(INFO) << "Invoking status callback. Tennis goat is: "
<< status.message();
tennis_goat = status.message();
};
TFNPD_DeviceEvent* event =
example_plugin::CreateDeviceEventAndSetAvailable(host_.get());
tensorflow::Status status(absl::StatusCode::kInternal, "Federer");
CallbackParams* params =
new CallbackParams{std::move(done), status, api_, event};
api_->TFNPD_DeviceEventAndThen(event, &InvokeCallbackFn,
params);
result_avref.AndThen([result_avref = result_avref.CopyRef(), tennis_goat,
host = std::move(host_)] {
EXPECT_EQ(result_avref.get(), 42);
LOG(INFO) << "Tennis goat: " << tennis_goat;
EXPECT_EQ(tennis_goat, "Federer");
});
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/next_pluggable_device/c/plugin_c_api.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/next_pluggable_device/c/plugin_c_api_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8ea32162-2193-47e5-a498-0124570cc283 | cpp | tensorflow/tensorflow | register_span | tensorflow/core/tfrt/mlrt/interpreter/register_span.h | tensorflow/core/tfrt/mlrt/interpreter/register_span_test.cc | #ifndef TENSORFLOW_CORE_TFRT_MLRT_INTERPRETER_REGISTER_SPAN_H_
#define TENSORFLOW_CORE_TFRT_MLRT_INTERPRETER_REGISTER_SPAN_H_
#include <iterator>
#include "absl/types/span.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/span.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/iterator.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/value.h"
namespace mlrt {
class RegisterIterator
: public iterator_internal::IteratorBase<RegisterIterator, Value,
absl::Span<Value>> {
public:
using IteratorBase<RegisterIterator, Value, absl::Span<Value>>::IteratorBase;
};
class ConstRegisterIterator
: public iterator_internal::IteratorBase<ConstRegisterIterator, const Value,
absl::Span<const Value>> {
using IteratorBase<ConstRegisterIterator, const Value,
absl::Span<const Value>>::IteratorBase;
};
class RegisterSpan {
public:
using value_type = Value;
using size_type = size_t;
using difference_type = std::ptrdiff_t;
using reference = Value&;
using const_reference = const Value&;
using pointer = Value*;
using const_pointer = const Value*;
using iterator = RegisterIterator;
using const_iterator = ConstRegisterIterator;
RegisterSpan() = default;
RegisterSpan(bc::Span<uint32_t> reg_indices, absl::Span<Value> regs)
: reg_indices_(reg_indices), regs_(regs) {}
Value& operator[](size_t idx) { return regs_[reg_indices_[idx]]; }
const Value& operator[](size_t idx) const { return regs_[reg_indices_[idx]]; }
Value& back() const { return regs_[reg_indices_.back()]; }
size_t size() const { return reg_indices_.size(); }
iterator begin() const { return iterator(reg_indices_.begin(), regs_); }
iterator end() const { return iterator(reg_indices_.end(), regs_); }
RegisterSpan drop_front(int num = 1) {
return RegisterSpan(reg_indices_.drop_front(num), regs_);
}
RegisterSpan drop_back(int num = 1) {
return RegisterSpan(reg_indices_.drop_back(num), regs_);
}
private:
bc::Span<uint32_t> reg_indices_;
absl::Span<Value> regs_;
};
template <typename T>
class RegisterValueIterator {
using Iter = RegisterValueIterator;
public:
using difference_type = std::ptrdiff_t;
using value_type = T;
using pointer = T*;
using reference = T&;
using iterator_category = std::random_access_iterator_tag;
explicit RegisterValueIterator(RegisterIterator reg_iter)
: reg_iter_(reg_iter) {}
reference operator*() const { return (*reg_iter_).Get<T>(); }
pointer operator->() const { return &(*reg_iter_).Get<T>(); }
reference operator[](difference_type i) const {
return (*(reg_iter_ + i)).Get<T>();
}
Iter& operator+=(difference_type d) {
reg_iter_ += d;
return *this;
}
Iter& operator-=(difference_type d) {
reg_iter_ -= d;
return *this;
}
Iter& operator++() {
++reg_iter_;
return *this;
}
Iter operator++(int) {
Iter r = *this;
++reg_iter_;
return r;
}
Iter& operator--() {
--reg_iter_;
return *this;
}
Iter operator--(int) {
Iter r = *this;
--reg_iter_;
return r;
}
Iter operator+(difference_type d) const {
Iter r = *this;
r += d;
return r;
}
friend Iter operator+(difference_type d, const Iter& i) { return i + d; }
Iter operator-(difference_type d) const {
Iter r = *this;
r -= d;
return r;
}
difference_type operator-(const Iter& other) const {
return reg_iter_ - other.reg_iter_;
}
friend bool operator==(const Iter& a, const Iter& b) {
return a.reg_iter_ == b.reg_iter_;
}
friend bool operator!=(const Iter& a, const Iter& b) {
return a.reg_iter_ != b.reg_iter_;
}
friend bool operator<(const Iter& a, const Iter& b) {
return a.reg_iter_ < b.reg_iter_;
}
friend bool operator<=(const Iter& a, const Iter& b) {
return a.reg_iter_ <= b.reg_iter_;
}
friend bool operator>(const Iter& a, const Iter& b) {
return a.reg_iter_ > b.reg_iter_;
}
friend bool operator>=(const Iter& a, const Iter& b) {
return a.reg_iter_ >= b.reg_iter_;
}
private:
RegisterIterator reg_iter_;
};
template <typename T>
class RegisterValueSpan {
public:
using value_type = T;
using size_type = size_t;
using difference_type = std::ptrdiff_t;
using reference = T&;
using const_reference = const T&;
using pointer = T*;
using const_pointer = const T*;
using iterator = RegisterValueIterator<T>;
using const_iterator = RegisterValueIterator<const T>;
RegisterValueSpan(bc::Span<uint32_t> reg_indices, absl::Span<Value> regs)
: reg_span_(reg_indices, regs) {}
RegisterValueSpan(RegisterSpan reg_span) : reg_span_(reg_span) {}
T& operator[](size_t idx) { return reg_span_[idx].Get<T>(); }
const T& operator[](size_t idx) const { return reg_span_[idx].Get<T>(); }
void Destroy(size_t idx) { reg_span_[idx].Destroy<T>(); }
size_t size() const { return reg_span_.size(); }
iterator begin() const { return iterator(reg_span_.begin()); }
iterator end() const { return iterator(reg_span_.end()); }
bool empty() const { return size() == 0; }
RegisterValueSpan drop_front(int num = 1) {
return reg_span_.drop_front(num);
}
RegisterValueSpan drop_back(int num = 1) { return reg_span_.drop_back(num); }
RegisterSpan reg_span() const { return reg_span_; }
private:
RegisterSpan reg_span_;
};
}
#endif | #include "tensorflow/core/tfrt/mlrt/interpreter/register_span.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/value.h"
namespace mlrt {
namespace {
TEST(RegisterSpan, RegisterSpan) {
std::vector<Value> regs(4);
regs[0].Set<int>(0);
regs[1].Set<int>(1);
regs[2].Set<int>(2);
regs[3].Set<int>(3);
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto reg_indices_ctor =
bc::New<bc::Vector<uint32_t>>(&allocator, std::vector<uint32_t>{1, 2});
bc::Vector<uint32_t> reg_indices(buffer.Get(reg_indices_ctor.address()));
RegisterSpan reg_span(reg_indices, absl::MakeSpan(regs));
ASSERT_EQ(reg_span.size(), 2);
EXPECT_EQ(reg_span[0].Get<int>(), 1);
EXPECT_EQ(reg_span[1].Get<int>(), 2);
EXPECT_THAT(RegisterValueSpan<int>(reg_span),
::testing::ElementsAreArray({1, 2}));
}
TEST(RegisterSpan, RegisterSpanToStdVector) {
std::vector<Value> regs(4);
regs[0].Set<int>(0);
regs[1].Set<int>(1);
regs[2].Set<int>(2);
regs[3].Set<int>(3);
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto reg_indices_ctor =
bc::New<bc::Vector<uint32_t>>(&allocator, std::vector<uint32_t>{1, 2});
bc::Vector<uint32_t> reg_indices(buffer.Get(reg_indices_ctor.address()));
RegisterSpan reg_span(reg_indices, absl::MakeSpan(regs));
std::vector<Value> subset(reg_span.begin(), reg_span.end());
ASSERT_EQ(subset.size(), 2);
EXPECT_EQ(subset[0].Get<int>(), 1);
EXPECT_EQ(subset[1].Get<int>(), 2);
}
TEST(RegisterSpan, RegisterValueSpan) {
std::vector<Value> regs(4);
regs[0].Set<int>(0);
regs[1].Set<int>(1);
regs[2].Set<int>(2);
regs[3].Set<int>(3);
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto reg_indices_ctor =
bc::New<bc::Vector<uint32_t>>(&allocator, std::vector<uint32_t>{1, 3});
bc::Vector<uint32_t> reg_indices(buffer.Get(reg_indices_ctor.address()));
RegisterValueSpan<int> reg_span(reg_indices, absl::MakeSpan(regs));
ASSERT_EQ(reg_span.size(), 2);
EXPECT_EQ(reg_span[0], 1);
EXPECT_EQ(reg_span[1], 3);
EXPECT_THAT(reg_span, ::testing::ElementsAreArray({1, 3}));
}
TEST(RegisterSpan, Modifiers) {
std::vector<Value> regs(4);
regs[0].Set<int>(0);
regs[1].Set<int>(1);
regs[2].Set<int>(2);
regs[3].Set<int>(3);
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto reg_indices_ctor = bc::New<bc::Vector<uint32_t>>(
&allocator, std::vector<uint32_t>{0, 2, 1, 3});
bc::Vector<uint32_t> reg_indices(buffer.Get(reg_indices_ctor.address()));
RegisterSpan reg_span(reg_indices, absl::MakeSpan(regs));
RegisterValueSpan<int> reg_value_span(reg_span);
EXPECT_THAT(RegisterValueSpan<int>(reg_span.drop_back(2)),
::testing::ElementsAreArray({0, 2}));
EXPECT_THAT(reg_value_span.drop_front(2),
::testing::ElementsAreArray({1, 3}));
reg_value_span.Destroy(1);
EXPECT_FALSE(regs[2].HasValue());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/interpreter/register_span.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/interpreter/register_span_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5b95d526-b4b8-453b-be5a-fc62526a0718 | cpp | tensorflow/tensorflow | bytecode | tensorflow/core/tfrt/mlrt/bytecode/bytecode.h | tensorflow/core/tfrt/mlrt/bytecode/bytecode_test.cc | #ifndef TENSORFLOW_CORE_TFRT_MLRT_BYTECODE_BYTECODE_H_
#define TENSORFLOW_CORE_TFRT_MLRT_BYTECODE_BYTECODE_H_
#include <cstddef>
#include <cstring>
#include <iterator>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/strings/string_view.h"
namespace mlrt {
namespace bc {
using BcAddr_t = uint64_t;
class Buffer {
public:
char* Get(BcAddr_t address) {
DCHECK_LT(address, buffer_.size());
return &buffer_.at(address);
}
char* data() { return buffer_.data(); }
const char* data() const { return buffer_.data(); }
size_t size() const { return buffer_.size(); }
bool empty() const { return buffer_.empty(); }
void shrink_to_fit() { buffer_.shrink_to_fit(); }
private:
static_assert(alignof(std::max_align_t) >= 8,
"The bytecode buffer needs to be at least 8-byte aligned.");
std::vector<char> buffer_;
friend class Allocator;
};
class Allocator {
public:
explicit Allocator(Buffer* buffer) : buffer_(buffer) {
DCHECK(buffer != nullptr);
}
BcAddr_t Allocate(size_t size, size_t alignment) {
DCHECK_LE(alignment, 8);
size_t next_align =
(buffer_->buffer_.size() + alignment - 1) / alignment * alignment;
buffer_->buffer_.resize(next_align + size);
return next_align;
}
template <typename T>
BcAddr_t Allocate() {
static_assert(std::is_trivial<T>::value, "T must be trivial.");
return Allocate(sizeof(T), alignof(T));
}
size_t size() const { return buffer_->size(); }
char* raw(BcAddr_t address) { return buffer_->Get(address); }
private:
Buffer* buffer_;
};
template <typename T, typename Enable = void>
struct AccessTraits {
using StorageType = T;
static_assert(std::is_trivial<StorageType>::value,
"StorageType must be trivial.");
using ConstructorType = void;
static T Read(const char* p) {
T value;
std::memcpy(&value, p, sizeof(T));
return value;
}
template <typename... Args>
static BcAddr_t Construct(Allocator* allocator, BcAddr_t address,
Args&&... args) {
T value(std::forward<Args>(args)...);
std::memcpy(allocator->raw(address), &value, sizeof(T));
return address;
}
static void Place(Allocator* allocator, BcAddr_t address, const char* data,
size_t size, size_t num = 1) {
CHECK_LE(size, num * sizeof(T));
std::memcpy(allocator->raw(address), data, size);
}
};
template <typename T>
struct AccessTraits<T, std::void_t<typename T::NonTrivialConstructorType>> {
using StorageType = typename T::StorageType;
static_assert(std::is_trivial<StorageType>::value,
"StorageType must be trivial.");
using ConstructorType = typename T::NonTrivialConstructorType;
static T Read(const char* p) {
return T(p);
}
template <typename... Args>
static ConstructorType Construct(Allocator* allocator, BcAddr_t address,
Args&&... args) {
return ConstructorType(allocator, address, std::forward<Args>(args)...);
}
};
template <typename T>
BcAddr_t Allocate(Allocator* allocator) {
return allocator->Allocate<typename AccessTraits<T>::StorageType>();
}
template <typename T, typename... Args>
auto New(Allocator* allocator, Args&&... args) {
auto address = Allocate<T>(allocator);
return AccessTraits<T>::Construct(allocator, address,
std::forward<Args>(args)...);
}
template <typename T>
class ReadIterator {
using StorageType = typename AccessTraits<T>::StorageType;
public:
using difference_type = std::ptrdiff_t;
using value_type = std::remove_cv_t<T>;
using pointer = void;
using reference = value_type;
using iterator_category = std::input_iterator_tag;
explicit ReadIterator(const char* data) : data_(data) {}
const char* data() const { return data_; }
value_type operator*() const { return AccessTraits<T>::Read(data_); }
ReadIterator& operator++() {
data_ += sizeof(StorageType);
return *this;
}
ReadIterator operator++(int) {
ReadIterator r = *this;
data_ += sizeof(StorageType);
return r;
}
ReadIterator& operator+=(difference_type offset) {
data_ += offset * sizeof(StorageType);
return *this;
}
ReadIterator operator+(difference_type offset) const {
ReadIterator r = *this;
r += offset;
return r;
}
ReadIterator& operator--() {
data_ -= sizeof(StorageType);
return *this;
}
ReadIterator operator--(int) {
ReadIterator r = *this;
data_ -= sizeof(StorageType);
return r;
}
ReadIterator& operator-=(difference_type offset) {
data_ -= offset * sizeof(StorageType);
return *this;
}
ReadIterator operator-(difference_type offset) const {
ReadIterator r = *this;
r -= offset;
return r;
}
difference_type operator-(const ReadIterator& other) const {
DCHECK_EQ((data_ - other.data_) % sizeof(StorageType), 0);
return (data_ - other.data_) / sizeof(StorageType);
}
friend bool operator==(const ReadIterator& a, const ReadIterator& b) {
return a.data_ == b.data_;
}
friend bool operator!=(const ReadIterator& a, const ReadIterator& b) {
return !(a == b);
}
friend bool operator<(const ReadIterator& a, const ReadIterator& b) {
return a.data_ < b.data_;
}
friend bool operator<=(const ReadIterator& a, const ReadIterator& b) {
return a.data_ <= b.data_;
}
friend bool operator>(const ReadIterator& a, const ReadIterator& b) {
return a.data_ > b.data_;
}
friend bool operator>=(const ReadIterator& a, const ReadIterator& b) {
return a.data_ >= b.data_;
}
private:
const char* data_ = nullptr;
};
#define DEFINE_BYTECODE_FIELD(Type, name) \
typename ::mlrt::bc::AccessTraits<Type>::StorageType name; \
static const char* name##_pointer(const char* base) { \
return base + offsetof(Self, name); \
} \
static ::mlrt::bc::BcAddr_t name##_address(::mlrt::bc::BcAddr_t base) { \
return base + offsetof(Self, name); \
} \
static Type read_##name(const char* base) { \
return ::mlrt::bc::AccessTraits<Type>::Read(name##_pointer(base)); \
} \
template <typename... Args> \
static auto construct_##name(::mlrt::bc::Allocator* allocator, \
::mlrt::bc::BcAddr_t base, Args&&... args) { \
return ::mlrt::bc::AccessTraits<Type>::Construct( \
allocator, name##_address(base), std::forward<Args>(args)...); \
} \
static_assert( \
std::is_trivial< \
typename ::mlrt::bc::AccessTraits<Type>::StorageType>::value, \
"Bytecode storage types must be trivial.")
template <typename T, typename SizeType = uint32_t>
class Vector {
public:
struct Storage {
using Self = Storage;
DEFINE_BYTECODE_FIELD(SizeType, size);
DEFINE_BYTECODE_FIELD(SizeType, offset);
};
static_assert(std::is_trivial<Storage>::value, "StorageType is trivial");
static_assert(std::is_standard_layout<Storage>::value,
"StorageType has standard layout");
static_assert(sizeof(Storage) == 2 * sizeof(SizeType));
static_assert(alignof(Storage) == alignof(SizeType));
using StorageType = Storage;
using ElementStorageType = typename AccessTraits<T>::StorageType;
using value_type = T;
using iterator = ReadIterator<T>;
using const_iterator = iterator;
class Constructor {
public:
Constructor(Allocator* allocator, BcAddr_t address, size_t size)
: allocator_(allocator), address_(address) {
DCHECK_GE(allocator->size(), address + sizeof(StorageType));
size_t data_start = allocator->Allocate(size * sizeof(ElementStorageType),
alignof(ElementStorageType));
CHECK_LT(size, std::numeric_limits<SizeType>::max());
CHECK_LT(data_start - address,
std::numeric_limits<SizeType>::max());
storage_.size = size;
storage_.offset = data_start - address;
AccessTraits<StorageType>::Construct(allocator, address, storage_);
}
Constructor(Allocator* allocator, BcAddr_t address,
const std::vector<T>& vec)
: Constructor(allocator, address, vec.size()) {
Assign(vec.begin(), vec.end());
}
template <typename... Args>
auto ConstructAt(size_t index, Args&&... args) {
DCHECK_LT(index, size());
return AccessTraits<T>::Construct(allocator_, GetElementAddress(index),
std::forward<Args>(args)...);
}
template <typename V>
void Assign(std::initializer_list<V> ilist) {
DCHECK_EQ(ilist.size(), size());
Assign(ilist.begin(), ilist.end());
}
template <typename Range>
void Assign(const Range& range) {
DCHECK_EQ(std::distance(std::begin(range), std::end(range)), size());
Assign(std::begin(range), std::end(range));
}
template <typename Iter>
void Assign(Iter begin, Iter end) {
size_t i = 0;
for (; begin != end; ++begin) {
ConstructAt(i++, *begin);
}
DCHECK_EQ(i, size());
}
template <
typename U = T,
typename std::enable_if<
std::is_same_v<typename AccessTraits<U>::ConstructorType, void>,
int>::type = 0>
void Place(const char* data, size_t size) {
AccessTraits<U>::Place(allocator_, address_ + storage_.offset, data, size,
storage_.size);
}
size_t size() const { return storage_.size; }
BcAddr_t address() const { return address_; }
private:
BcAddr_t GetElementAddress(size_t index) const {
return address_ + storage_.offset + index * sizeof(ElementStorageType);
}
Allocator* allocator_;
BcAddr_t address_;
Vector::Storage storage_;
};
using NonTrivialConstructorType = Constructor;
explicit Vector(const char* p) : p_(p) {
static_assert(!std::is_trivial_v<Vector>);
DCHECK(p_ != nullptr);
}
Vector() {
static_assert(!std::is_trivial_v<Vector>);
static Storage kEmptyStorage{0, 0};
p_ = reinterpret_cast<const char*>(&kEmptyStorage);
}
const char* data() const { return p_ + offset(); }
size_t size() const { return StorageType::read_size(p_); }
bool empty() const { return size() == 0; }
iterator begin() const { return iterator(data()); }
iterator end() const {
return iterator(data() + size() * sizeof(ElementStorageType));
}
T operator[](size_t index) const {
DCHECK_LT(index, size());
auto iter = begin();
iter += index;
return *iter;
}
private:
SizeType offset() const { return StorageType::read_offset(p_); }
const char* p_;
};
class String : public Vector<char, uint64_t> {
public:
using Base = Vector<char, uint64_t>;
using Base::Base;
class Constructor : public Base::Constructor {
public:
using Base::Constructor::Assign;
Constructor(Allocator* allocator, BcAddr_t address, absl::string_view str)
: Base::Constructor(allocator, address, str.size()) {
Assign(str.begin(), str.end());
}
};
using NonTrivialConstructorType = Constructor;
using Base::data;
using Base::size;
std::string str() const { return std::string(data(), size()); }
absl::string_view Get() const { return absl::string_view(data(), size()); }
operator absl::string_view() const {
return absl::string_view(data(), size());
}
friend bool operator==(String x, absl::string_view y) { return x.Get() == y; }
friend bool operator==(absl::string_view x, String y) { return x == y.Get(); }
};
}
}
#endif | #include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include <array>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
namespace mlrt {
namespace bc {
namespace {
TEST(ByteCodeTest, VectorOfTrivial) {
Buffer buffer;
Allocator alloc(&buffer);
auto ctor = New<Vector<uint32_t>>(&alloc, 4);
for (int i = 0; i < 4; ++i) {
ctor.ConstructAt(i, i);
}
Vector<uint32_t> view(buffer.Get(ctor.address()));
ASSERT_EQ(view.size(), 4);
EXPECT_EQ(view[0], 0);
EXPECT_EQ(view[1], 1);
EXPECT_EQ(view[2], 2);
EXPECT_EQ(view[3], 3);
EXPECT_THAT(view, ::testing::ElementsAreArray({0, 1, 2, 3}));
Vector<uint32_t> empty;
ASSERT_TRUE(empty.empty());
}
TEST(ByteCodeTest, VectorOfVector) {
Buffer buffer;
Allocator alloc(&buffer);
using T = Vector<uint32_t>;
using V = Vector<T>;
auto vctor = New<V>(&alloc, 3);
{
auto tctor = vctor.ConstructAt(0, 2);
tctor.ConstructAt(0, 0);
tctor.ConstructAt(1, 1);
}
{
auto tctor = vctor.ConstructAt(1, 1);
tctor.ConstructAt(0, 2);
}
vctor.ConstructAt(2, 0);
V v(buffer.Get(vctor.address()));
auto t0 = v[0];
ASSERT_EQ(t0.size(), 2);
EXPECT_EQ(t0[0], 0);
EXPECT_EQ(t0[1], 1);
EXPECT_THAT(t0, testing::ElementsAreArray({0, 1}));
auto t1 = v[1];
ASSERT_EQ(t1.size(), 1);
EXPECT_EQ(t1[0], 2);
EXPECT_THAT(t1, testing::ElementsAreArray({2}));
auto t2 = v[2];
ASSERT_EQ(t2.size(), 0);
Vector<Vector<uint32_t>> empty;
ASSERT_TRUE(empty.empty());
}
TEST(ByteCodeTest, String) {
Buffer buffer;
Allocator alloc(&buffer);
auto ctor = New<String>(&alloc, "bytecode string");
String view(buffer.Get(ctor.address()));
EXPECT_EQ(view.str(), "bytecode string");
EXPECT_EQ(view.Get(), "bytecode string");
EXPECT_EQ(absl::string_view(view), "bytecode string");
}
TEST(ByteCodeTest, PlaceVectorOfTrivial) {
Buffer buffer;
Allocator alloc(&buffer);
auto ctor = New<Vector<uint32_t>>(&alloc, 4);
std::array<uint32_t, 4> data = {0, 1, 2, 3};
ctor.Place(reinterpret_cast<const char*>(data.data()),
data.size() * sizeof(uint32_t));
Vector<uint32_t> view(buffer.Get(ctor.address()));
ASSERT_EQ(view.size(), 4);
EXPECT_EQ(view[0], 0);
EXPECT_EQ(view[1], 1);
EXPECT_EQ(view[2], 2);
EXPECT_EQ(view[3], 3);
EXPECT_THAT(view, ::testing::ElementsAreArray({0, 1, 2, 3}));
}
TEST(ByteCodeTest, ReadIteratorDistance) {
Buffer buffer;
Allocator alloc(&buffer);
auto ctor = New<Vector<uint32_t>>(&alloc, 4);
for (int i = 0; i < 4; ++i) {
ctor.ConstructAt(i, i);
}
Vector<uint32_t> view(buffer.Get(ctor.address()));
EXPECT_EQ(view.end() - view.begin(), 4);
}
TEST(ByteCodeTest, ReadIteratorCompare) {
Buffer buffer;
Allocator alloc(&buffer);
auto ctor = New<Vector<uint32_t>>(&alloc, 4);
for (int i = 0; i < 4; ++i) {
ctor.ConstructAt(i, i);
}
Vector<uint32_t> view(buffer.Get(ctor.address()));
EXPECT_GE(view.end(), view.begin());
EXPECT_GT(view.end(), view.begin());
EXPECT_LE(view.begin(), view.end());
EXPECT_LT(view.begin(), view.end());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/bytecode/bytecode.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/bytecode/bytecode_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4ada8775-bd63-45fb-95ee-e5da7caaac97 | cpp | tensorflow/tensorflow | span | tensorflow/core/tfrt/mlrt/bytecode/span.h | tensorflow/core/tfrt/mlrt/bytecode/span_test.cc | #ifndef TENSORFLOW_CORE_TFRT_MLRT_BYTECODE_SPAN_H_
#define TENSORFLOW_CORE_TFRT_MLRT_BYTECODE_SPAN_H_
#include <cstdint>
#include <vector>
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
namespace mlrt {
namespace bc {
template <typename T>
class Span {
public:
using value_type = T;
using iterator = ReadIterator<T>;
using const_iterator = iterator;
Span() = default;
Span(const char* data, size_t size) : data_(data), size_(size) {}
template <typename SizeType>
Span(const Vector<T, SizeType>& vec)
: Span(vec.data(), vec.size()) {}
Span(const String& vec)
: Span(vec.data(), vec.size()) {}
Span(const std::vector<T>& vec)
: Span(reinterpret_cast<const char*>(vec.data()), vec.size()) {}
const char* data() const { return data_; }
const char* data(size_t index) const { return data_ + index * sizeof(T); }
iterator begin() const { return iterator(data_); }
iterator end() const { return iterator(data_ + size_ * sizeof(T)); }
T back() const {
DCHECK_GT(size_, 0);
return *iterator(data_ + (size_ - 1) * sizeof(T));
}
T operator[](size_t index) const {
DCHECK_LT(index, size());
auto iter = begin();
iter += index;
return *iter;
}
size_t size() const { return size_; }
bool empty() const { return size_ == 0; }
Span drop_front(size_t num = 1) const {
auto beg = begin();
beg += num;
DCHECK_GE(size(), num);
return Span(beg.data(), size() - num);
}
Span drop_back(size_t num = 1) const {
DCHECK_GE(size(), num);
return Span(data(), size() - num);
}
private:
const char* data_ = nullptr;
size_t size_ = 0;
};
}
}
#endif | #include "tensorflow/core/tfrt/mlrt/bytecode/span.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
namespace mlrt {
namespace bc {
namespace {
TEST(SpanTest, SpanOfTrivial) {
Buffer buffer;
Allocator alloc(&buffer);
auto ctor = New<Vector<uint32_t>>(&alloc, 4);
for (int i = 0; i < 4; ++i) {
ctor.ConstructAt(i, i);
}
Vector<uint32_t> vec(buffer.Get(ctor.address()));
Span<uint32_t> span(vec);
ASSERT_EQ(span.size(), 4);
EXPECT_EQ(span[0], 0);
EXPECT_EQ(span[1], 1);
EXPECT_EQ(span[2], 2);
EXPECT_EQ(span[3], 3);
EXPECT_THAT(span, testing::ElementsAreArray({0, 1, 2, 3}));
}
TEST(BefTest, SpanOfVector) {
Buffer buffer;
Allocator alloc(&buffer);
using T = Vector<uint32_t>;
using V = Vector<T>;
auto vctor = New<V>(&alloc, 3);
{
auto tctor = vctor.ConstructAt(0, 2);
tctor.ConstructAt(0, 0);
tctor.ConstructAt(1, 1);
}
{
auto tctor = vctor.ConstructAt(1, 1);
tctor.ConstructAt(0, 2);
}
vctor.ConstructAt(2, 0);
V v(buffer.Get(vctor.address()));
Span<T> span(v);
T t0 = span[0];
ASSERT_EQ(t0.size(), 2);
EXPECT_EQ(t0[0], 0);
EXPECT_EQ(t0[1], 1);
EXPECT_THAT(t0, testing::ElementsAreArray({0, 1}));
T t1 = span[1];
ASSERT_EQ(t1.size(), 1);
EXPECT_EQ(t1[0], 2);
EXPECT_THAT(t1, testing::ElementsAreArray({2}));
T t2 = span[2];
ASSERT_EQ(t2.size(), 0);
}
TEST(SpanTest, SpanOfStdVectorTrivial) {
std::vector<uint32_t> vec = {0, 1, 2, 3};
Span<uint32_t> span(vec);
EXPECT_THAT(span, testing::ElementsAreArray({0, 1, 2, 3}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/bytecode/span.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/bytecode/span_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
87e44559-632d-4de9-a01a-784c19792410 | cpp | tensorflow/tensorflow | hash_tools | tensorflow/core/grappler/graph_analyzer/hash_tools.h | tensorflow/core/grappler/graph_analyzer/hash_tools_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_GRAPH_ANALYZER_HASH_TOOLS_H_
#define TENSORFLOW_CORE_GRAPPLER_GRAPH_ANALYZER_HASH_TOOLS_H_
#include <cstddef>
namespace tensorflow {
namespace grappler {
namespace graph_analyzer {
inline void CombineHash(size_t from, size_t* to) {
*to ^= from + 0x9e3779b9 + (*to << 6) + (*to >> 2);
}
inline void CombineHashCommutative(size_t from, size_t* to) {
*to = *to + from + 0x9e3779b9;
}
}
}
}
#endif | #include "tensorflow/core/grappler/graph_analyzer/hash_tools.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tensorflow {
namespace grappler {
namespace graph_analyzer {
namespace test {
namespace {
using ::testing::Eq;
TEST(HashToolsTest, CombineHashCommutative) {
size_t a = 0;
size_t b = 999;
size_t c = a;
CombineHashCommutative(b, &c);
size_t d = b;
CombineHashCommutative(a, &d);
EXPECT_THAT(c, Eq(d));
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/graph_analyzer/hash_tools.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/graph_analyzer/hash_tools_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
61bb902e-4e11-4c95-ab9f-f2ec2d85005e | cpp | tensorflow/tensorflow | graph_view_internal | tensorflow/core/grappler/utils/graph_view_internal.h | tensorflow/core/grappler/utils/graph_view_internal_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_UTILS_GRAPH_VIEW_INTERNAL_H_
#define TENSORFLOW_CORE_GRAPPLER_UTILS_GRAPH_VIEW_INTERNAL_H_
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/hash/hash.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/gtl/map_util.h"
namespace tensorflow {
namespace grappler {
namespace utils {
namespace internal {
constexpr int kMissingSlot = -2;
constexpr int kMissingIndex = -1;
constexpr int kNodeNamePresent = -1;
template <typename NodeViewT, typename GraphViewT>
class NodeIndexAndPortIndex {
public:
NodeIndexAndPortIndex()
: graph_view_(nullptr),
node_index_(kMissingIndex),
port_index_(kMissingSlot) {}
NodeIndexAndPortIndex(GraphViewT* graph_view, int node_index, int port_index)
: graph_view_(graph_view),
node_index_(node_index),
port_index_(port_index) {}
bool operator==(const NodeIndexAndPortIndex& other) const {
return port_index_ == other.port_index_ &&
node_index_ == other.node_index_ && graph_view_ == other.graph_view_;
}
template <typename Hash>
friend Hash AbslHashValue(Hash h, const NodeIndexAndPortIndex& n) {
return Hash::combine(std::move(h), n.node_index_, n.port_index_);
}
NodeViewT* node_view() const {
if (graph_view_ == nullptr) {
return nullptr;
}
return graph_view_->GetNode(node_index_);
}
int node_index() const { return node_index_; }
int index() const { return port_index_; }
protected:
GraphViewT* graph_view_;
int node_index_;
int port_index_;
};
class NodeDefAndPortIndex {
public:
NodeDefAndPortIndex(const NodeDef* node_def, int port_index)
: node_def_(node_def), port_index_(port_index) {}
bool operator==(const NodeDefAndPortIndex& other) const {
return node_def_ == other.node_def_ && port_index_ == other.port_index_;
}
template <typename Hash>
friend Hash AbslHashValue(Hash h, const NodeDefAndPortIndex& n) {
return Hash::combine(std::move(h), n.node_def_, n.port_index_);
}
private:
const NodeDef* node_def_;
int port_index_;
};
template <typename FaninViewT, typename FanoutViewT, typename GraphViewT,
bool IsConst>
class NodeViewInternal {
private:
using NodeDefT =
typename std::conditional<IsConst, const NodeDef, NodeDef>::type;
public:
explicit NodeViewInternal(GraphViewT* graph_view, int node_index)
: graph_view_(graph_view),
node_index_(node_index),
attrs_(AttrSlice(graph_view->graph()->node(node_index))) {}
NodeViewInternal()
: graph_view_(nullptr), node_index_(kMissingIndex), attrs_(AttrSlice()) {}
virtual ~NodeViewInternal() {}
NodeViewInternal(NodeViewInternal&&) = default;
NodeViewInternal& operator=(NodeViewInternal&&) = default;
bool operator==(const NodeViewInternal& other) const {
return node_index_ == other.node_index_ && graph_view_ == other.graph_view_;
}
template <typename Hash>
friend Hash AbslHashValue(Hash h, const NodeViewInternal& n) {
return Hash::combine(std::move(h), n.node_index_);
}
virtual NodeDefT* node() const = 0;
int node_index() const { return node_index_; }
const string& GetName() const { return node()->name(); }
const string& GetOp() const { return node()->op(); }
const string& GetDevice() const { return node()->device(); }
const std::vector<FanoutViewT>& GetRegularFanins() const {
return regular_fanins_;
}
const FanoutViewT& GetRegularFanin(int i) const {
int regular_fanins_size = regular_fanins_.size();
if (i < 0 || i >= regular_fanins_size) {
return GetMissingFanin();
}
return regular_fanins_[i];
}
const std::vector<FanoutViewT>& GetControllingFanins() const {
return controlling_fanins_;
}
const std::vector<std::vector<FaninViewT>>& GetRegularFanouts() const {
return regular_fanouts_by_port_;
}
const std::vector<FaninViewT>& GetRegularFanout(int i) const {
int regular_fanouts_by_port_size = regular_fanouts_by_port_.size();
if (i < 0 || i >= regular_fanouts_by_port_size) {
return GetMissingFanout();
}
return regular_fanouts_by_port_[i];
}
const std::vector<FaninViewT>& GetControlledFanouts() const {
return controlled_fanouts_;
}
int NumRegularFanins() const { return regular_fanins_.size(); }
int NumControllingFanins() const { return controlling_fanins_.size(); }
int NumRegularFanouts() const { return num_regular_fanouts_; }
int NumControlledFanouts() const { return controlled_fanouts_.size(); }
virtual bool HasFanin(const FanoutViewT& fanin) const = 0;
virtual bool HasFanout(const FaninViewT& fanout) const = 0;
const AttrValue* GetAttr(absl::string_view attr_name) const {
return attrs_.Find(attr_name);
}
const AttrSlice& GetAttrs() const { return attrs_; }
int NumAttrs() const { return attrs_.size(); }
bool HasAttr(absl::string_view attr_name) const {
return attrs_.Find(attr_name) != nullptr;
}
protected:
virtual inline const FanoutViewT& GetMissingFanin() const = 0;
virtual inline const std::vector<FaninViewT>& GetMissingFanout() const = 0;
std::vector<FanoutViewT> regular_fanins_;
std::vector<FanoutViewT> controlling_fanins_;
std::vector<std::vector<FaninViewT>> regular_fanouts_by_port_;
int num_regular_fanouts_ = 0;
std::vector<FaninViewT> controlled_fanouts_;
GraphViewT* graph_view_;
int node_index_;
AttrSlice attrs_;
};
template <typename NodeViewT, typename FaninViewT, typename FanoutViewT,
bool IsConst>
class GraphViewInternal {
private:
using GraphDefT =
typename std::conditional<IsConst, const GraphDef, GraphDef>::type;
public:
explicit GraphViewInternal(GraphDefT* graph) : graph_(graph) {}
virtual ~GraphViewInternal() {}
bool operator==(const GraphViewInternal& other) const {
return graph_ == other.graph_;
}
GraphDefT* graph() const { return graph_; }
const NodeViewT* GetNode(int node_index) const {
int nodes_size = nodes_.size();
if (node_index < 0 || node_index >= nodes_size) {
return nullptr;
}
return &nodes_[node_index];
}
NodeViewT* GetNode(int node_index) {
int nodes_size = nodes_.size();
if (node_index < 0 || node_index >= nodes_size) {
return nullptr;
}
return &nodes_[node_index];
}
const NodeViewT* GetNode(absl::string_view node_name) const {
auto it = node_index_by_name_.find(node_name);
if (it == node_index_by_name_.end()) {
return nullptr;
}
return &nodes_[it->second];
}
NodeViewT* GetNode(absl::string_view node_name) {
auto it = node_index_by_name_.find(node_name);
if (it == node_index_by_name_.end()) {
return nullptr;
}
return &nodes_[it->second];
}
const std::vector<NodeViewT>& GetNodes() const { return nodes_; }
bool HasNode(absl::string_view node_name) const {
return node_index_by_name_.contains(node_name);
}
int NumNodes() const { return nodes_.size(); }
protected:
void Reset() {
std::vector<NodeViewT>().swap(nodes_);
absl::flat_hash_map<absl::string_view, int>().swap(node_index_by_name_);
}
std::vector<NodeViewT> nodes_;
absl::flat_hash_map<absl::string_view, int> node_index_by_name_;
GraphDefT* graph_;
const FanoutViewT missing_fanin_;
const std::vector<FaninViewT> missing_fanout_;
};
inline SafeTensorId EmptyTensorId() {
return SafeTensorId("", internal::kMissingSlot);
}
inline bool IsEmptyTensorId(const TensorId tensor_id) {
return tensor_id.node().empty() &&
tensor_id.index() == internal::kMissingSlot;
}
template <typename GraphViewT>
struct NodeViewDiff {
explicit NodeViewDiff(GraphViewT* graph_view, int node_index)
: graph_view(graph_view), node_index(node_index) {}
GraphViewT* graph_view;
int node_index;
string name;
bool update_name = false;
string op;
bool update_op = false;
string device;
bool update_device = false;
std::vector<SafeTensorId> regular_inputs_to_add;
int num_regular_inputs_to_add = 0;
std::map<int, SafeTensorId> regular_inputs_to_update;
std::vector<bool> regular_inputs_to_remove;
int num_regular_inputs_to_remove = 0;
absl::flat_hash_set<string> controlling_inputs_to_add;
std::set<int> controlling_inputs_to_remove;
absl::flat_hash_map<string, AttrValue> attrs_to_add;
absl::flat_hash_set<string> attrs_to_remove;
absl::optional<AttrValueMap> processed_attrs;
};
template <typename GraphViewT>
inline bool UpdateName(NodeViewDiff<GraphViewT>* diff, absl::string_view name) {
if (diff->graph_view->GetNode(diff->node_index)->GetName() == name) {
diff->name.clear();
diff->update_name = false;
} else {
diff->name = string(name);
diff->update_name = true;
}
return true;
}
template <typename GraphViewT>
inline bool UpdateOp(NodeViewDiff<GraphViewT>* diff, absl::string_view op) {
if (diff->graph_view->GetNode(diff->node_index)->GetOp() == op) {
diff->op.clear();
diff->update_op = false;
} else {
diff->op = string(op);
diff->update_op = true;
}
return true;
}
template <typename GraphViewT>
inline bool UpdateDevice(NodeViewDiff<GraphViewT>* diff,
absl::string_view device) {
if (diff->graph_view->GetNode(diff->node_index)->GetDevice() == device) {
diff->device.clear();
diff->update_device = false;
} else {
diff->device = string(device);
diff->update_device = true;
}
return true;
}
template <typename T, typename U>
inline bool AddOrUpdateAtIndex(std::vector<T>* v, int i, const U& value,
const T& default_value) {
int v_size = v->size();
if (i > v_size) {
v->reserve(i + 1);
v->resize(i, default_value);
v->push_back({value});
} else if (i == v_size) {
v->push_back({value});
} else {
bool updated = (*v)[i] == default_value;
(*v)[i] = {value};
return updated;
}
return true;
}
template <typename GraphViewT>
inline bool CheckNodeNameExists(
absl::string_view node_name,
const absl::flat_hash_map<absl::string_view, int>& updated_node_names,
const GraphViewT* graph_view) {
auto it = updated_node_names.find(node_name);
if (it != updated_node_names.end()) {
return it->second == kNodeNamePresent;
}
return graph_view->HasNode(node_name);
}
template <typename GraphViewT>
inline bool AddOrUpdateRegularFanin(NodeViewDiff<GraphViewT>* diff, int index,
const TensorId& fanin) {
if (index < 0) {
return false;
}
auto* node_view = diff->graph_view->GetNode(diff->node_index);
const int num_regular_fanins = node_view->NumRegularFanins();
if (index < num_regular_fanins) {
const int relative_removal_index = num_regular_fanins - index - 1;
int diff_regular_inputs_to_remove_size =
diff->regular_inputs_to_remove.size();
if (relative_removal_index < diff_regular_inputs_to_remove_size &&
diff->regular_inputs_to_remove[relative_removal_index]) {
diff->regular_inputs_to_remove[relative_removal_index] = false;
--diff->num_regular_inputs_to_remove;
}
const auto& existing_fanin = node_view->GetRegularFanin(index);
if (existing_fanin.index() != fanin.index() ||
existing_fanin.node_view()->GetName() != fanin.node()) {
gtl::InsertOrUpdate(&diff->regular_inputs_to_update, index,
SafeTensorId(fanin));
}
} else {
const int relative_add_index = index - num_regular_fanins;
if (AddOrUpdateAtIndex(&diff->regular_inputs_to_add, relative_add_index,
fanin, EmptyTensorId())) {
++diff->num_regular_inputs_to_add;
}
}
return true;
}
template <typename GraphViewT>
inline bool RemoveRegularFanin(NodeViewDiff<GraphViewT>* diff, int index) {
if (index < 0) {
return false;
}
auto* node_view = diff->graph_view->GetNode(diff->node_index);
const int num_regular_fanins = node_view->NumRegularFanins();
if (index < num_regular_fanins) {
diff->regular_inputs_to_update.erase(index);
const int relative_removal_index = num_regular_fanins - index - 1;
if (AddOrUpdateAtIndex(&diff->regular_inputs_to_remove,
relative_removal_index,
true, false)) {
++diff->num_regular_inputs_to_remove;
}
} else {
const int relative_add_index = index - num_regular_fanins;
int diff_regular_inputs_to_add_size = diff->regular_inputs_to_add.size();
if (relative_add_index >= diff_regular_inputs_to_add_size ||
IsEmptyTensorId(diff->regular_inputs_to_add[relative_add_index])) {
return false;
}
diff->regular_inputs_to_add[relative_add_index] = EmptyTensorId();
--diff->num_regular_inputs_to_add;
}
return true;
}
template <typename GraphViewT>
inline bool AddControllingFanin(NodeViewDiff<GraphViewT>* diff,
int control_index,
absl::string_view fanin_node_name) {
if (control_index == kMissingIndex) {
diff->controlling_inputs_to_add.emplace(fanin_node_name);
} else {
diff->controlling_inputs_to_remove.erase(control_index);
}
return true;
}
template <typename GraphViewT>
inline bool RemoveControllingFanin(NodeViewDiff<GraphViewT>* diff,
int control_index,
absl::string_view fanin_node_name) {
if (control_index == kMissingIndex) {
diff->controlling_inputs_to_add.erase(fanin_node_name);
} else {
diff->controlling_inputs_to_remove.emplace(control_index);
}
return true;
}
template <typename GraphViewT>
inline bool AddOrUpdateAttribute(NodeViewDiff<GraphViewT>* diff,
absl::string_view attr_name,
const AttrValue& attr_value) {
diff->attrs_to_add.empty() ? 0 : diff->attrs_to_remove.erase(attr_name);
gtl::InsertOrUpdate(&diff->attrs_to_add, string(attr_name), attr_value);
return true;
}
template <typename GraphViewT>
inline bool RemoveAttribute(NodeViewDiff<GraphViewT>* diff,
absl::string_view attr_name) {
const size_t num_erased =
diff->attrs_to_add.empty() ? 0 : diff->attrs_to_add.erase(attr_name);
auto* node_view = diff->graph_view->GetNode(diff->node_index);
if (node_view->HasAttr(attr_name)) {
diff->attrs_to_remove.emplace(attr_name);
return true;
}
return num_erased > 0;
}
template <typename T>
inline void ResizeByTrimmingEndForValue(std::vector<T>* v, const T& value) {
int curr_index = v->size();
const int last_index = v->size() - 1;
for (int i = last_index; i >= 0; --i) {
if ((*v)[i] == value) {
curr_index = i;
} else {
break;
}
}
if (curr_index <= last_index) {
v->resize(curr_index);
}
}
template <typename GraphViewT>
inline bool IsEmpty(NodeViewDiff<GraphViewT>* diff) {
ResizeByTrimmingEndForValue(&diff->regular_inputs_to_remove, false);
ResizeByTrimmingEndForValue(&diff->regular_inputs_to_add, EmptyTensorId());
return !diff->update_name && !diff->update_op && !diff->update_device &&
diff->regular_inputs_to_add.empty() &&
diff->regular_inputs_to_update.empty() &&
diff->regular_inputs_to_remove.empty() &&
diff->controlling_inputs_to_add.empty() &&
diff->controlling_inputs_to_remove.empty() &&
diff->attrs_to_add.empty() && diff->attrs_to_remove.empty();
}
template <typename GraphViewT>
inline void Reset(NodeViewDiff<GraphViewT>* diff) {
diff->name.clear();
diff->update_name = false;
diff->op.clear();
diff->update_op = false;
diff->device.clear();
diff->update_device = false;
std::vector<SafeTensorId>().swap(diff->regular_inputs_to_add);
diff->num_regular_inputs_to_add = false;
std::map<int, SafeTensorId>().swap(diff->regular_inputs_to_update);
std::vector<bool>().swap(diff->regular_inputs_to_remove);
diff->num_regular_inputs_to_remove = 0;
absl::flat_hash_set<string>().swap(diff->controlling_inputs_to_add);
std::set<int>().swap(diff->controlling_inputs_to_remove);
absl::flat_hash_map<string, AttrValue>().swap(diff->attrs_to_add);
absl::flat_hash_set<string>().swap(diff->attrs_to_remove);
}
template <typename GraphViewT>
inline bool IsWellFormed(
NodeViewDiff<GraphViewT>* diff,
const absl::flat_hash_map<absl::string_view, int>& updated_node_names) {
ResizeByTrimmingEndForValue(&diff->regular_inputs_to_remove, false);
ResizeByTrimmingEndForValue(&diff->regular_inputs_to_add, EmptyTensorId());
int diff_regular_inputs_to_add_size = diff->regular_inputs_to_add.size();
if (diff_regular_inputs_to_add_size != diff->num_regular_inputs_to_add) {
return false;
} else if (diff->num_regular_inputs_to_add > 0 &&
!diff->regular_inputs_to_remove.empty()) {
return false;
} else if (static_cast<int>(diff->regular_inputs_to_remove.size()) !=
diff->num_regular_inputs_to_remove) {
return false;
}
auto* node_view = diff->graph_view->GetNode(diff->node_index);
const string& node_name =
diff->update_name ? diff->name : node_view->GetName();
auto invalid_node_name = [&](absl::string_view fanin_node_name) -> bool {
return fanin_node_name == node_name ||
!CheckNodeNameExists(fanin_node_name, updated_node_names,
diff->graph_view);
};
if (diff->update_name) {
const int last_index =
node_view->NumRegularFanins() - diff->num_regular_inputs_to_remove - 1;
auto regular_to_update_it = diff->regular_inputs_to_update.begin();
for (int i = 0; i <= last_index; ++i) {
if (regular_to_update_it != diff->regular_inputs_to_update.end() &&
regular_to_update_it->first < i) {
++regular_to_update_it;
}
if (regular_to_update_it != diff->regular_inputs_to_update.end() &&
regular_to_update_it->first == i) {
if (invalid_node_name(regular_to_update_it->second.node())) {
return false;
}
} else {
const string& regular_name =
node_view->GetRegularFanin(i).node_view()->GetName();
if (regular_name == node_name) {
return false;
}
}
}
auto& controls = node_view->GetControllingFanins();
const int num_controls = controls.size();
auto control_to_remove_it = diff->controlling_inputs_to_remove.begin();
for (int i = 0; i < num_controls; ++i) {
if (control_to_remove_it != diff->controlling_inputs_to_remove.end() &&
*control_to_remove_it < i) {
++control_to_remove_it;
}
if (control_to_remove_it != diff->controlling_inputs_to_remove.end() &&
*control_to_remove_it == i) {
continue;
} else if (controls[i].node_view()->GetName() == node_name) {
return false;
}
}
} else {
for (const auto& updated : diff->regular_inputs_to_update) {
const string& fanin_name = updated.second.node();
if (invalid_node_name(fanin_name)) {
return false;
}
}
}
for (const auto& regular : diff->regular_inputs_to_add) {
if (invalid_node_name(regular.node())) {
return false;
}
}
for (const auto& control : diff->controlling_inputs_to_add) {
if (invalid_node_name(control)) {
return false;
}
}
return true;
}
template <typename GraphViewT>
struct NewNode {
explicit NewNode(GraphViewT* graph_view, NodeDef&& node)
: graph_view(graph_view), node(std::move(node)) {}
GraphViewT* graph_view;
NodeDef node;
std::vector<SafeTensorId> regular_fanins;
int num_regular_fanins = 0;
absl::flat_hash_set<string> controlling_fanins;
};
template <typename GraphViewT>
inline void UpdateName(NewNode<GraphViewT>* new_node, absl::string_view name) {
if (name.empty()) {
new_node->node.clear_name();
} else {
new_node->node.set_name(string(name));
}
}
template <typename GraphViewT>
inline void UpdateOp(NewNode<GraphViewT>* new_node, absl::string_view op) {
if (op.empty()) {
new_node->node.clear_op();
} else {
new_node->node.set_op(string(op));
}
}
template <typename GraphViewT>
inline void UpdateDevice(NewNode<GraphViewT>* new_node,
absl::string_view device) {
if (device.empty()) {
new_node->node.clear_device();
} else {
new_node->node.set_device(string(device));
}
}
template <typename GraphViewT>
inline void AddOrUpdateRegularFanin(NewNode<GraphViewT>* new_node, int index,
const TensorId& fanin) {
if (index < 0) {
return;
} else if (AddOrUpdateAtIndex(&new_node->regular_fanins, index, fanin,
EmptyTensorId())) {
++new_node->num_regular_fanins;
}
}
template <typename GraphViewT>
inline void RemoveRegularFanin(NewNode<GraphViewT>* new_node, int index) {
int new_node_regular_fanins_size = new_node->regular_fanins.size();
if (index < 0 || index >= new_node_regular_fanins_size ||
IsEmptyTensorId(new_node->regular_fanins[index])) {
return;
}
new_node->regular_fanins[index] = EmptyTensorId();
--new_node->num_regular_fanins;
}
template <typename GraphViewT>
inline void AddControllingFanin(NewNode<GraphViewT>* new_node,
absl::string_view fanin_node_name) {
new_node->controlling_fanins.emplace(fanin_node_name);
}
template <typename GraphViewT>
inline void RemoveControllingFanin(NewNode<GraphViewT>* new_node,
absl::string_view fanin_node_name) {
new_node->controlling_fanins.erase(fanin_node_name);
}
template <typename GraphViewT>
inline void AddOrUpdateAttribute(NewNode<GraphViewT>* new_node,
absl::string_view attr_name,
const AttrValue& attr_value) {
gtl::InsertOrUpdate(new_node->node.mutable_attr(), string(attr_name),
attr_value);
}
template <typename GraphViewT>
inline void RemoveAttribute(NewNode<GraphViewT>* new_node,
absl::string_view attr_name) {
new_node->node.mutable_attr()->erase(string(attr_name));
}
template <typename GraphViewT>
inline bool IsWellFormed(
NewNode<GraphViewT>* new_node,
const absl::flat_hash_map<absl::string_view, int>& updated_node_names) {
ResizeByTrimmingEndForValue(&new_node->regular_fanins, EmptyTensorId());
int new_node_regular_fanins_size = new_node->regular_fanins.size();
if (new_node_regular_fanins_size != new_node->num_regular_fanins) {
return false;
}
const string& node_name = new_node->node.name();
auto invalid_node_name = [new_node, updated_node_names,
node_name](absl::string_view fanin_node_name) {
return fanin_node_name == node_name ||
!CheckNodeNameExists(fanin_node_name, updated_node_names,
new_node->graph_view);
};
for (const auto& regular : new_node->regular_fanins) {
if (invalid_node_name(regular.node())) {
return false;
}
}
for (const auto& control : new_node->controlling_fanins) {
if (invalid_node_name(control)) {
return false;
}
}
return true;
}
}
}
}
}
#endif | #include "tensorflow/core/grappler/utils/graph_view_internal.h"
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/grappler/utils/graph_view.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace utils {
namespace internal {
namespace {
using ::tensorflow::test::function::GDef;
using ::tensorflow::test::function::NDef;
constexpr char kNodeOp[] = "NotImportant";
GraphDef SimpleTestGraphForMutation() {
return GDef(
{NDef("a", kNodeOp, {}), NDef("b", kNodeOp, {}), NDef("c", kNodeOp, {}),
NDef("d", kNodeOp, {"a:2", "b:3", "a:4", "^c", "^b"},
{{"attr_1", "a"}, {"attr_2", 2.0f}}, "device_d")},
{});
}
absl::flat_hash_map<absl::string_view, int> GetUpdatedNodeNames(
const MutableGraphView* graph_view) {
absl::flat_hash_map<absl::string_view, int> updated_node_names;
updated_node_names.reserve(graph_view->NumNodes());
for (const auto& node_view : graph_view->GetNodes()) {
updated_node_names.emplace(node_view.GetName(), -1);
}
return updated_node_names;
}
using MutableNodeViewDiff = NodeViewDiff<MutableGraphView>;
TEST(MutableNodeViewDiffTest, UpdateName) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
UpdateName(&diff, "e");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
UpdateName(&diff, "d");
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, UpdateOp) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
UpdateOp(&diff, "RandomOp");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
UpdateOp(&diff, kNodeOp);
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, UpdateDevice) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
UpdateDevice(&diff, "random_device");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
UpdateDevice(&diff, "device_d");
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, AddOrUpdateRegularFanin) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, -1, {"a", 0});
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 0, {"a", 2});
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 0, {"a", 3});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 4, {"b", 4});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 3, {"c", 4});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 5, {"c", 5});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, AddOrUpdateRegularFaninBetweenRemovedFanins) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, 0);
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, 2);
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 1, {"c", 1});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 0, {"c", 0});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, 0);
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 2, {"c", 2});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, RemoveRegularFanin) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, -1);
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, 3);
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 4, {"b", 4});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, 4);
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 4, {"b", 4});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 3, {"c", 4});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, 3);
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, 4);
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 5, {"b", 6});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 3, {"c", 4});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, 4);
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, 3);
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, 5);
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 1, {"a", 3});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, 1);
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 1, {"b", 3});
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, RemoveRegularFaninResize) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 3, {"c", 5});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 4, {"c", 6});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 5, {"c", 7});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, 4);
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, 5);
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, AddControllingFanin) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddControllingFanin(&diff, 0, "c");
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddControllingFanin(&diff, kMissingIndex, "a");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, RemoveControllingFanin) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddControllingFanin(&diff, kMissingIndex, "a");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
RemoveControllingFanin(&diff, 0, "c");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
RemoveControllingFanin(&diff, kMissingIndex, "a");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddControllingFanin(&diff, 0, "c");
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, AddOrUpdateAttribute) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AttrValue attr_1;
attr_1.set_b(true);
AddOrUpdateAttribute(&diff, "attr_1", attr_1);
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AttrValue attr_3;
attr_3.set_i(4);
AddOrUpdateAttribute(&diff, "attr_1", attr_3);
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, RemoveAttribute) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AttrValue attr_1;
attr_1.set_b(true);
AddOrUpdateAttribute(&diff, "attr_1", attr_1);
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
RemoveAttribute(&diff, "attr_1");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
RemoveAttribute(&diff, "attr_3");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, Reset) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, 2);
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddControllingFanin(&diff, kMissingIndex, "a");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AttrValue attr_1;
attr_1.set_b(true);
AddOrUpdateAttribute(&diff, "attr_1", attr_1);
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
Reset(&diff);
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, IsWellFormedWithRemovedAndAppendedFanins) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, 2);
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 3, {"a", 8});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, IsWellFormedSelfLoopRegularUpdate) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 0, {"d", 1});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, IsWellFormedSelfLoopRegularNew) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 3, {"d", 1});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, IsWellFormedSelfLoopControl) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddControllingFanin(&diff, kMissingIndex, "d");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, IsWellFormedMissingFaninRegularUpdate) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 0, {"e", 1});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, IsWellFormedMissingFaninRegularNew) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 3, {"e", 1});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, IsWellFormedMissingControl) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddControllingFanin(&diff, kMissingIndex, "e");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, IsWellFormedRenamedSelfLoopRegularUpdate) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
string old_node_name = "d";
string new_node_name = "e";
updated_node_names.erase(old_node_name);
updated_node_names.emplace(old_node_name, 3);
updated_node_names.emplace(new_node_name, -1);
UpdateName(&diff, "e");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 0, {"e", 1});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, IsWellFormedRenamedSelfLoopRegularNew) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
string old_node_name = "d";
string new_node_name = "e";
updated_node_names.erase(old_node_name);
updated_node_names.emplace(old_node_name, 3);
updated_node_names.emplace(new_node_name, -1);
UpdateName(&diff, "e");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 3, {"e", 1});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, IsWellFormedRenamedSelfLoopControl) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
string old_node_name = "d";
string new_node_name = "e";
updated_node_names.erase(old_node_name);
updated_node_names.emplace(old_node_name, 3);
updated_node_names.emplace(new_node_name, -1);
UpdateName(&diff, "e");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddControllingFanin(&diff, kMissingIndex, "e");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, IsWellFormedRenamedMissingFaninRegularUpdate) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
string old_node_name = "d";
string new_node_name = "e";
updated_node_names.erase(old_node_name);
updated_node_names.emplace(old_node_name, 3);
updated_node_names.emplace(new_node_name, -1);
UpdateName(&diff, "e");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 0, {"f", 1});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, IsWellFormedRenamedMissingFaninRegularNew) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
string old_node_name = "d";
string new_node_name = "e";
updated_node_names.erase(old_node_name);
updated_node_names.emplace(old_node_name, 3);
updated_node_names.emplace(new_node_name, -1);
UpdateName(&diff, "e");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 3, {"f", 1});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, IsWellFormedRenamedMissingFaninControl) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
string old_node_name = "d";
string new_node_name = "e";
updated_node_names.erase(old_node_name);
updated_node_names.emplace(old_node_name, 3);
updated_node_names.emplace(new_node_name, -1);
UpdateName(&diff, "e");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddControllingFanin(&diff, kMissingIndex, "f");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, RenamedAndRemovedFanins) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
string old_node_name = "d";
string new_node_name = "e";
updated_node_names.erase(old_node_name);
updated_node_names.emplace(old_node_name, 3);
updated_node_names.emplace(new_node_name, -1);
UpdateName(&diff, "e");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
for (int i = 0; i < 3; ++i) {
RemoveRegularFanin(&diff, i);
}
RemoveControllingFanin(&diff, 0, "c");
RemoveControllingFanin(&diff, 0, "b");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, RenamedWithSelfLoopControl) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
updated_node_names.erase("d");
UpdateName(&diff, "c");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
}
using MutationNewNodeForTest = NewNode<MutableGraphView>;
TEST(MutationNewNodeTest, UpdateName) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutationNewNodeForTest new_node(&graph_view, {});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
UpdateName(&new_node, "new");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
UpdateName(&new_node, "");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
}
TEST(MutationNewNodeTest, UpdateOp) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutationNewNodeForTest new_node(&graph_view, {});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
UpdateOp(&new_node, "Identity");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
UpdateOp(&new_node, "");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
}
TEST(MutationNewNodeTest, UpdateDevice) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutationNewNodeForTest new_node(&graph_view, {});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
UpdateDevice(&new_node, "foo_device");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
UpdateDevice(&new_node, "");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
}
TEST(MutationNewNodeTest, AddOrUpdateRegularFanin) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutationNewNodeForTest new_node(&graph_view, {});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
UpdateName(&new_node, "new");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
AddOrUpdateRegularFanin(&new_node, -1, {"a", 1});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
AddOrUpdateRegularFanin(&new_node, 1, {"a", 1});
EXPECT_FALSE(IsWellFormed(&new_node, updated_node_names));
AddOrUpdateRegularFanin(&new_node, 0, {"b", 2});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
AddOrUpdateRegularFanin(&new_node, 2, {"c", 3});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
AddOrUpdateRegularFanin(&new_node, 1, {"d", 4});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
AddOrUpdateRegularFanin(&new_node, 1, {"e", 5});
EXPECT_FALSE(IsWellFormed(&new_node, updated_node_names));
AddOrUpdateRegularFanin(&new_node, 1, {"new", 6});
EXPECT_FALSE(IsWellFormed(&new_node, updated_node_names));
AddOrUpdateRegularFanin(&new_node, 1, {"d", 4});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
}
TEST(MutationNewNodeTest, RemoveRegularFanin) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutationNewNodeForTest new_node(&graph_view, {});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
UpdateName(&new_node, "new");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
AddOrUpdateRegularFanin(&new_node, 0, {"a", 1});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
AddOrUpdateRegularFanin(&new_node, 1, {"b", 2});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
AddOrUpdateRegularFanin(&new_node, 2, {"c", 3});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
RemoveRegularFanin(&new_node, 3);
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
RemoveRegularFanin(&new_node, 2);
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
RemoveRegularFanin(&new_node, 0);
EXPECT_FALSE(IsWellFormed(&new_node, updated_node_names));
RemoveRegularFanin(&new_node, 1);
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
}
TEST(MutationNewNodeTest, AddControllingFanin) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutationNewNodeForTest new_node(&graph_view, {});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
UpdateName(&new_node, "new");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
AddControllingFanin(&new_node, "a");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
AddControllingFanin(&new_node, "e");
EXPECT_FALSE(IsWellFormed(&new_node, updated_node_names));
AddControllingFanin(&new_node, "new");
EXPECT_FALSE(IsWellFormed(&new_node, updated_node_names));
RemoveControllingFanin(&new_node, "e");
RemoveControllingFanin(&new_node, "new");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
}
TEST(MutationNewNodeTest, RemoveControllingFanin) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutationNewNodeForTest new_node(&graph_view, {});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
UpdateName(&new_node, "new");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
AddControllingFanin(&new_node, "a");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
RemoveControllingFanin(&new_node, "e");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
RemoveControllingFanin(&new_node, "new");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
RemoveControllingFanin(&new_node, "a");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
}
TEST(MutationNewNodeTest, AddOrUpdateAttribute) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutationNewNodeForTest new_node(&graph_view, {});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
string attr_name = "attr_name";
AttrValue attr_1;
attr_1.set_i(8);
AddOrUpdateAttribute(&new_node, attr_name, attr_1);
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
AttrValue attr_2;
attr_2.set_f(2.0f);
AddOrUpdateAttribute(&new_node, attr_name, attr_2);
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
}
TEST(MutationNewNodeTest, RemoveAttribute) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutationNewNodeForTest new_node(&graph_view, {});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
string attr_name = "attr_name";
AttrValue attr_1;
attr_1.set_i(8);
AddOrUpdateAttribute(&new_node, attr_name, attr_1);
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
RemoveAttribute(&new_node, attr_name);
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
RemoveAttribute(&new_node, attr_name);
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/graph_view_internal.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/graph_view_internal_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0d1d5990-3582-48ac-beea-f64aa5a27125 | cpp | tensorflow/tensorflow | cleanup | tensorflow/c/experimental/filesystem/plugins/gcs/cleanup.h | tensorflow/core/lib/gtl/cleanup_test.cc | #ifndef TENSORFLOW_C_EXPERIMENTAL_FILESYSTEM_PLUGINS_GCS_CLEANUP_H_
#define TENSORFLOW_C_EXPERIMENTAL_FILESYSTEM_PLUGINS_GCS_CLEANUP_H_
#include <type_traits>
#include <utility>
namespace tf_gcs_filesystem {
template <typename F>
class Cleanup {
public:
Cleanup() : released_(true), f_() {}
template <typename G>
explicit Cleanup(G&& f)
: f_(std::forward<G>(f)) {}
Cleanup(Cleanup&& src)
: released_(src.is_released()), f_(src.release()) {}
template <typename G>
Cleanup(Cleanup<G>&& src)
: released_(src.is_released()), f_(src.release()) {}
Cleanup& operator=(Cleanup&& src) {
if (!released_) f_();
released_ = src.released_;
f_ = src.release();
return *this;
}
~Cleanup() {
if (!released_) f_();
}
F release() {
released_ = true;
return std::move(f_);
}
bool is_released() const { return released_; }
private:
static_assert(!std::is_reference<F>::value, "F must not be a reference");
bool released_ = false;
F f_;
};
template <int&... ExplicitParameterBarrier, typename F,
typename DecayF = typename std::decay<F>::type>
Cleanup<DecayF> MakeCleanup(F&& f) {
return Cleanup<DecayF>(std::forward<F>(f));
}
}
#endif | #include "tensorflow/core/lib/gtl/cleanup.h"
#include <functional>
#include <type_traits>
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
using AnyCleanup = gtl::Cleanup<std::function<void()>>;
template <typename T1, typename T2>
void AssertTypeEq() {
static_assert(std::is_same<T1, T2>::value, "unexpected type");
}
TEST(CleanupTest, BasicLambda) {
string s = "active";
{
auto s_cleaner = gtl::MakeCleanup([&s] { s.assign("cleaned"); });
EXPECT_EQ("active", s);
}
EXPECT_EQ("cleaned", s);
}
TEST(FinallyTest, NoCaptureLambda) {
static string& s = *new string;
s.assign("active");
{
auto s_cleaner = gtl::MakeCleanup([] { s.append(" clean"); });
EXPECT_EQ("active", s);
}
EXPECT_EQ("active clean", s);
}
TEST(CleanupTest, Release) {
string s = "active";
{
auto s_cleaner = gtl::MakeCleanup([&s] { s.assign("cleaned"); });
EXPECT_EQ("active", s);
s_cleaner.release();
}
EXPECT_EQ("active", s);
}
TEST(FinallyTest, TypeErasedWithoutFactory) {
string s = "active";
{
AnyCleanup s_cleaner([&s] { s.append(" clean"); });
EXPECT_EQ("active", s);
}
EXPECT_EQ("active clean", s);
}
struct Appender {
Appender(string* s, const string& msg) : s_(s), msg_(msg) {}
void operator()() const { s_->append(msg_); }
string* s_;
string msg_;
};
TEST(CleanupTest, NonLambda) {
string s = "active";
{
auto c = gtl::MakeCleanup(Appender(&s, " cleaned"));
AssertTypeEq<decltype(c), gtl::Cleanup<Appender>>();
EXPECT_EQ("active", s);
}
EXPECT_EQ("active cleaned", s);
}
TEST(CleanupTest, Assign) {
string s = "0";
{
auto clean1 = gtl::MakeCleanup(Appender(&s, " 1"));
auto clean2 = gtl::MakeCleanup(Appender(&s, " 2"));
EXPECT_EQ("0", s);
clean2 = std::move(clean1);
EXPECT_EQ("0 2", s);
}
EXPECT_EQ("0 2 1", s);
}
TEST(CleanupTest, AssignAny) {
string s = "0";
{
auto clean1 = gtl::MakeCleanup(Appender(&s, " 1"));
AnyCleanup clean2 = gtl::MakeCleanup(Appender(&s, " 2"));
EXPECT_EQ("0", s);
clean2 = std::move(clean1);
EXPECT_EQ("0 2", s);
}
EXPECT_EQ("0 2 1", s);
}
TEST(CleanupTest, AssignFromReleased) {
string s = "0";
{
auto clean1 = gtl::MakeCleanup(Appender(&s, " 1"));
auto clean2 = gtl::MakeCleanup(Appender(&s, " 2"));
EXPECT_EQ("0", s);
clean1.release();
clean2 = std::move(clean1);
EXPECT_EQ("0 2", s);
}
EXPECT_EQ("0 2", s);
}
TEST(CleanupTest, AssignToReleased) {
string s = "0";
{
auto clean1 = gtl::MakeCleanup(Appender(&s, " 1"));
auto clean2 = gtl::MakeCleanup(Appender(&s, " 2"));
EXPECT_EQ("0", s);
clean2.release();
EXPECT_EQ("0", s);
clean2 = std::move(clean1);
EXPECT_EQ("0", s);
}
EXPECT_EQ("0 1", s);
}
TEST(CleanupTest, AssignToDefaultInitialized) {
string s = "0";
{
auto clean1 = gtl::MakeCleanup(Appender(&s, " 1"));
{
AnyCleanup clean2;
EXPECT_EQ("0", s);
clean2 = std::move(clean1);
EXPECT_EQ("0", s);
}
EXPECT_EQ("0 1", s);
}
EXPECT_EQ("0 1", s);
}
class CleanupReferenceTest : public ::testing::Test {
public:
struct F {
int* cp;
int* i;
F(int* cp, int* i) : cp(cp), i(i) {}
F(const F& o) : cp(o.cp), i(o.i) { ++*cp; }
F& operator=(const F& o) {
cp = o.cp;
i = o.i;
++*cp;
return *this;
}
F(F&&) = default;
F& operator=(F&&) = default;
void operator()() const { ++*i; }
};
int copies_ = 0;
int calls_ = 0;
F f_ = F(&copies_, &calls_);
static int g_calls;
void SetUp() override { g_calls = 0; }
static void CleanerFunction() { ++g_calls; }
};
int CleanupReferenceTest::g_calls = 0;
TEST_F(CleanupReferenceTest, FunctionPointer) {
{
auto c = gtl::MakeCleanup(&CleanerFunction);
AssertTypeEq<decltype(c), gtl::Cleanup<void (*)()>>();
EXPECT_EQ(0, g_calls);
}
EXPECT_EQ(1, g_calls);
{
auto c = gtl::MakeCleanup(CleanerFunction);
AssertTypeEq<decltype(c), gtl::Cleanup<void (*)()>>();
EXPECT_EQ(1, g_calls);
}
EXPECT_EQ(2, g_calls);
}
TEST_F(CleanupReferenceTest, AssignLvalue) {
string s = "0";
Appender app1(&s, "1");
Appender app2(&s, "2");
{
auto c = gtl::MakeCleanup(app1);
c.release();
c = gtl::MakeCleanup(app2);
EXPECT_EQ("0", s);
app1();
EXPECT_EQ("01", s);
}
EXPECT_EQ("012", s);
}
TEST_F(CleanupReferenceTest, FunctorLvalue) {
EXPECT_EQ(0, copies_);
EXPECT_EQ(0, calls_);
{
auto c = gtl::MakeCleanup(f_);
AssertTypeEq<decltype(c), gtl::Cleanup<F>>();
EXPECT_EQ(1, copies_);
EXPECT_EQ(0, calls_);
}
EXPECT_EQ(1, copies_);
EXPECT_EQ(1, calls_);
{
auto c = gtl::MakeCleanup(f_);
EXPECT_EQ(2, copies_);
EXPECT_EQ(1, calls_);
F f2 = c.release();
EXPECT_EQ(2, copies_);
EXPECT_EQ(1, calls_);
auto c2 = gtl::MakeCleanup(f2);
EXPECT_EQ(3, copies_);
EXPECT_EQ(1, calls_);
}
EXPECT_EQ(3, copies_);
EXPECT_EQ(2, calls_);
}
TEST_F(CleanupReferenceTest, FunctorRvalue) {
{
auto c = gtl::MakeCleanup(std::move(f_));
AssertTypeEq<decltype(c), gtl::Cleanup<F>>();
EXPECT_EQ(0, copies_);
EXPECT_EQ(0, calls_);
}
EXPECT_EQ(0, copies_);
EXPECT_EQ(1, calls_);
}
TEST_F(CleanupReferenceTest, FunctorReferenceWrapper) {
{
auto c = gtl::MakeCleanup(std::cref(f_));
AssertTypeEq<decltype(c), gtl::Cleanup<std::reference_wrapper<const F>>>();
EXPECT_EQ(0, copies_);
EXPECT_EQ(0, calls_);
}
EXPECT_EQ(0, copies_);
EXPECT_EQ(1, calls_);
}
volatile int i;
void Incr(volatile int* ip) { ++*ip; }
void Incr() { Incr(&i); }
void BM_Cleanup(::testing::benchmark::State& state) {
for (auto s : state) {
auto fin = gtl::MakeCleanup([] { Incr(); });
}
}
BENCHMARK(BM_Cleanup);
void BM_AnyCleanup(::testing::benchmark::State& state) {
for (auto s : state) {
AnyCleanup fin = gtl::MakeCleanup([] { Incr(); });
}
}
BENCHMARK(BM_AnyCleanup);
void BM_AnyCleanupNoFactory(::testing::benchmark::State& state) {
for (auto s : state) {
AnyCleanup fin([] { Incr(); });
}
}
BENCHMARK(BM_AnyCleanupNoFactory);
void BM_CleanupBound(::testing::benchmark::State& state) {
volatile int* ip = &i;
for (auto s : state) {
auto fin = gtl::MakeCleanup([ip] { Incr(ip); });
}
}
BENCHMARK(BM_CleanupBound);
void BM_AnyCleanupBound(::testing::benchmark::State& state) {
volatile int* ip = &i;
for (auto s : state) {
AnyCleanup fin = gtl::MakeCleanup([ip] { Incr(ip); });
}
}
BENCHMARK(BM_AnyCleanupBound);
void BM_AnyCleanupNoFactoryBound(::testing::benchmark::State& state) {
volatile int* ip = &i;
for (auto s : state) {
AnyCleanup fin([ip] { Incr(ip); });
}
}
BENCHMARK(BM_AnyCleanupNoFactoryBound);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/filesystem/plugins/gcs/cleanup.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/gtl/cleanup_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
149c0591-4625-43a9-9386-2d63b5cf6225 | cpp | tensorflow/tensorflow | edit_distance | tensorflow/core/lib/gtl/edit_distance.h | tensorflow/core/lib/gtl/edit_distance_test.cc | #ifndef TENSORFLOW_CORE_LIB_GTL_EDIT_DISTANCE_H_
#define TENSORFLOW_CORE_LIB_GTL_EDIT_DISTANCE_H_
#include <numeric>
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
namespace tensorflow {
namespace gtl {
template <typename T, typename Cmp>
inline int64_t LevenshteinDistance(const gtl::ArraySlice<T> s,
const gtl::ArraySlice<T> t, const Cmp& cmp) {
const int64_t s_size = s.size();
const int64_t t_size = t.size();
if (t_size > s_size) return LevenshteinDistance(t, s, cmp);
const T* s_data = s.data();
const T* t_data = t.data();
if (t_size == 0) return s_size;
if (s == t) return 0;
absl::InlinedVector<int64_t, 32UL> scratch_holder(t_size);
int64_t* scratch = scratch_holder.data();
for (size_t j = 1; j < t_size; ++j) scratch[j - 1] = j;
for (size_t i = 1; i <= s_size; ++i) {
int substitution_base_cost = i - 1;
int insertion_cost = i + 1;
for (size_t j = 1; j <= t_size; ++j) {
const int replacement_cost = cmp(s_data[i - 1], t_data[j - 1]) ? 0 : 1;
const int substitution_cost = substitution_base_cost + replacement_cost;
const int deletion_cost = scratch[j - 1] + 1;
const int cheapest =
std::min(deletion_cost, std::min(insertion_cost, substitution_cost));
substitution_base_cost = scratch[j - 1];
scratch[j - 1] = cheapest;
insertion_cost = cheapest + 1;
}
}
return scratch[t_size - 1];
}
template <typename Container1, typename Container2, typename Cmp>
inline int64_t LevenshteinDistance(const Container1& s, const Container2& t,
const Cmp& cmp) {
return LevenshteinDistance(
gtl::ArraySlice<typename Container1::value_type>(s.data(), s.size()),
gtl::ArraySlice<typename Container1::value_type>(t.data(), t.size()),
cmp);
}
}
}
#endif | #include "tensorflow/core/lib/gtl/edit_distance.h"
#include <cctype>
#include <vector>
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace gtl {
namespace {
class LevenshteinDistanceTest : public ::testing::Test {
protected:
std::vector<char> empty_;
std::string s1_;
std::string s1234_;
std::string s567_;
std::string kilo_;
std::string kilogram_;
std::string mother_;
std::string grandmother_;
std::string lower_;
std::string upper_;
std::vector<char> ebab_;
std::vector<char> abcd_;
void SetUp() override {
s1_ = "1";
s1234_ = "1234";
s567_ = "567";
kilo_ = "kilo";
kilogram_ = "kilogram";
mother_ = "mother";
grandmother_ = "grandmother";
lower_ = "lower case";
upper_ = "UPPER case";
ebab_ = {'e', 'b', 'a', 'b'};
abcd_ = {'a', 'b', 'c', 'd'};
}
};
TEST_F(LevenshteinDistanceTest, BothEmpty) {
ASSERT_EQ(LevenshteinDistance(empty_, empty_, std::equal_to<char>()), 0);
}
TEST_F(LevenshteinDistanceTest, Symmetry) {
ASSERT_EQ(LevenshteinDistance(ebab_, abcd_, std::equal_to<char>()), 3);
ASSERT_EQ(LevenshteinDistance(abcd_, ebab_, std::equal_to<char>()), 3);
}
TEST_F(LevenshteinDistanceTest, OneEmpty) {
ASSERT_EQ(LevenshteinDistance(s1234_, empty_, std::equal_to<char>()), 4);
ASSERT_EQ(LevenshteinDistance(empty_, s567_, std::equal_to<char>()), 3);
}
TEST_F(LevenshteinDistanceTest, SingleElement) {
ASSERT_EQ(LevenshteinDistance(s1234_, s1_, std::equal_to<char>()), 3);
ASSERT_EQ(LevenshteinDistance(s1_, s1234_, std::equal_to<char>()), 3);
}
TEST_F(LevenshteinDistanceTest, Prefix) {
ASSERT_EQ(LevenshteinDistance(kilo_, kilogram_, std::equal_to<char>()), 4);
ASSERT_EQ(LevenshteinDistance(kilogram_, kilo_, std::equal_to<char>()), 4);
}
TEST_F(LevenshteinDistanceTest, Suffix) {
ASSERT_EQ(LevenshteinDistance(mother_, grandmother_, std::equal_to<char>()),
5);
ASSERT_EQ(LevenshteinDistance(grandmother_, mother_, std::equal_to<char>()),
5);
}
TEST_F(LevenshteinDistanceTest, DifferentComparisons) {
ASSERT_EQ(LevenshteinDistance(lower_, upper_, std::equal_to<char>()), 5);
ASSERT_EQ(LevenshteinDistance(upper_, lower_, std::equal_to<char>()), 5);
ASSERT_EQ(
LevenshteinDistance(absl::Span<const char>(lower_.data(), lower_.size()),
absl::Span<const char>(upper_.data(), upper_.size()),
std::equal_to<char>()),
5);
auto no_case_cmp = [](char c1, char c2) {
return std::tolower(c1) == std::tolower(c2);
};
ASSERT_EQ(LevenshteinDistance(lower_, upper_, no_case_cmp), 3);
ASSERT_EQ(LevenshteinDistance(upper_, lower_, no_case_cmp), 3);
}
TEST_F(LevenshteinDistanceTest, Vectors) {
ASSERT_EQ(
LevenshteinDistance(std::string("algorithm"), std::string("altruistic"),
std::equal_to<char>()),
6);
}
static void BM_EditDistanceHelper(::testing::benchmark::State& state, int len,
bool completely_different) {
string a =
"The quick brown fox jumped over the lazy dog and on and on and on"
" Every good boy deserves fudge. In fact, this is a very long sentence "
" w/many bytes..";
while (a.size() < static_cast<size_t>(len)) {
a = a + a;
}
string b = a;
if (completely_different) {
for (size_t i = 0; i < b.size(); i++) {
b[i]++;
}
}
for (auto s : state) {
LevenshteinDistance(absl::Span<const char>(a.data(), len),
absl::Span<const char>(b.data(), len),
std::equal_to<char>());
}
}
static void BM_EditDistanceSame(::testing::benchmark::State& state) {
BM_EditDistanceHelper(state, state.range(0), false);
}
static void BM_EditDistanceDiff(::testing::benchmark::State& state) {
BM_EditDistanceHelper(state, state.range(0), true);
}
BENCHMARK(BM_EditDistanceSame)->Arg(5);
BENCHMARK(BM_EditDistanceSame)->Arg(50);
BENCHMARK(BM_EditDistanceSame)->Arg(200);
BENCHMARK(BM_EditDistanceSame)->Arg(1000);
BENCHMARK(BM_EditDistanceDiff)->Arg(5);
BENCHMARK(BM_EditDistanceDiff)->Arg(50);
BENCHMARK(BM_EditDistanceDiff)->Arg(200);
BENCHMARK(BM_EditDistanceDiff)->Arg(1000);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/gtl/edit_distance.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/gtl/edit_distance_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
24c0b822-8f7f-46ea-8ca8-f947c4705560 | cpp | tensorflow/tensorflow | manual_constructor | tensorflow/core/lib/gtl/manual_constructor.h | tensorflow/core/lib/gtl/manual_constructor_test.cc | #ifndef TENSORFLOW_CORE_LIB_GTL_MANUAL_CONSTRUCTOR_H_
#define TENSORFLOW_CORE_LIB_GTL_MANUAL_CONSTRUCTOR_H_
#include <stddef.h>
#include <new>
#include <utility>
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mem.h"
namespace tensorflow {
namespace gtl {
namespace internal {
#ifndef SWIG
template <int alignment, int size>
struct AlignType {};
template <int size>
struct AlignType<0, size> {
typedef char result[size];
};
#if defined(_MSC_VER)
#define TF_LIB_GTL_ALIGN_ATTRIBUTE(X) __declspec(align(X))
#define TF_LIB_GTL_ALIGN_OF(T) __alignof(T)
#else
#define TF_LIB_GTL_ALIGN_ATTRIBUTE(X) __attribute__((aligned(X)))
#define TF_LIB_GTL_ALIGN_OF(T) __alignof__(T)
#endif
#if defined(TF_LIB_GTL_ALIGN_ATTRIBUTE)
#define TF_LIB_GTL_ALIGNTYPE_TEMPLATE(X) \
template <int size> \
struct AlignType<X, size> { \
typedef TF_LIB_GTL_ALIGN_ATTRIBUTE(X) char result[size]; \
}
TF_LIB_GTL_ALIGNTYPE_TEMPLATE(1);
TF_LIB_GTL_ALIGNTYPE_TEMPLATE(2);
TF_LIB_GTL_ALIGNTYPE_TEMPLATE(4);
TF_LIB_GTL_ALIGNTYPE_TEMPLATE(8);
TF_LIB_GTL_ALIGNTYPE_TEMPLATE(16);
TF_LIB_GTL_ALIGNTYPE_TEMPLATE(32);
TF_LIB_GTL_ALIGNTYPE_TEMPLATE(64);
TF_LIB_GTL_ALIGNTYPE_TEMPLATE(128);
TF_LIB_GTL_ALIGNTYPE_TEMPLATE(256);
TF_LIB_GTL_ALIGNTYPE_TEMPLATE(512);
TF_LIB_GTL_ALIGNTYPE_TEMPLATE(1024);
TF_LIB_GTL_ALIGNTYPE_TEMPLATE(2048);
TF_LIB_GTL_ALIGNTYPE_TEMPLATE(4096);
TF_LIB_GTL_ALIGNTYPE_TEMPLATE(8192);
#define TF_LIB_GTL_ALIGNED_CHAR_ARRAY(T, Size) \
typename tensorflow::gtl::internal::AlignType<TF_LIB_GTL_ALIGN_OF(T), \
sizeof(T) * Size>::result
#undef TF_LIB_GTL_ALIGNTYPE_TEMPLATE
#undef TF_LIB_GTL_ALIGN_ATTRIBUTE
#else
#error "You must define TF_LIB_GTL_ALIGNED_CHAR_ARRAY for your compiler."
#endif
#else
template <typename Size>
struct AlignType {
typedef char result[Size];
};
#define TF_LIB_GTL_ALIGNED_CHAR_ARRAY(T, Size) \
tensorflow::gtl::internal::AlignType<Size * sizeof(T)>::result
#define TF_LIB_GTL_ALIGN_OF(Type) 16
#endif
}
}
template <typename Type>
class ManualConstructor {
public:
static void* operator new[](size_t size) {
return port::AlignedMalloc(size, TF_LIB_GTL_ALIGN_OF(Type));
}
static void operator delete[](void* mem) { port::AlignedFree(mem); }
inline Type* get() { return reinterpret_cast<Type*>(space_); }
inline const Type* get() const {
return reinterpret_cast<const Type*>(space_);
}
inline Type* operator->() { return get(); }
inline const Type* operator->() const { return get(); }
inline Type& operator*() { return *get(); }
inline const Type& operator*() const { return *get(); }
inline void Init() { new (space_) Type; }
#ifdef LANG_CXX11
template <typename... Ts>
inline void Init(Ts&&... args) {
new (space_) Type(std::forward<Ts>(args)...);
}
#else
template <typename T1>
inline void Init(const T1& p1) {
new (space_) Type(p1);
}
template <typename T1, typename T2>
inline void Init(const T1& p1, const T2& p2) {
new (space_) Type(p1, p2);
}
template <typename T1, typename T2, typename T3>
inline void Init(const T1& p1, const T2& p2, const T3& p3) {
new (space_) Type(p1, p2, p3);
}
template <typename T1, typename T2, typename T3, typename T4>
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4) {
new (space_) Type(p1, p2, p3, p4);
}
template <typename T1, typename T2, typename T3, typename T4, typename T5>
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
const T5& p5) {
new (space_) Type(p1, p2, p3, p4, p5);
}
template <typename T1, typename T2, typename T3, typename T4, typename T5,
typename T6>
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
const T5& p5, const T6& p6) {
new (space_) Type(p1, p2, p3, p4, p5, p6);
}
template <typename T1, typename T2, typename T3, typename T4, typename T5,
typename T6, typename T7>
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
const T5& p5, const T6& p6, const T7& p7) {
new (space_) Type(p1, p2, p3, p4, p5, p6, p7);
}
template <typename T1, typename T2, typename T3, typename T4, typename T5,
typename T6, typename T7, typename T8>
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
const T5& p5, const T6& p6, const T7& p7, const T8& p8) {
new (space_) Type(p1, p2, p3, p4, p5, p6, p7, p8);
}
template <typename T1, typename T2, typename T3, typename T4, typename T5,
typename T6, typename T7, typename T8, typename T9>
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
const T5& p5, const T6& p6, const T7& p7, const T8& p8,
const T9& p9) {
new (space_) Type(p1, p2, p3, p4, p5, p6, p7, p8, p9);
}
template <typename T1, typename T2, typename T3, typename T4, typename T5,
typename T6, typename T7, typename T8, typename T9, typename T10>
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
const T5& p5, const T6& p6, const T7& p7, const T8& p8,
const T9& p9, const T10& p10) {
new (space_) Type(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10);
}
template <typename T1, typename T2, typename T3, typename T4, typename T5,
typename T6, typename T7, typename T8, typename T9, typename T10,
typename T11>
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
const T5& p5, const T6& p6, const T7& p7, const T8& p8,
const T9& p9, const T10& p10, const T11& p11) {
new (space_) Type(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11);
}
#endif
inline void Destroy() { get()->~Type(); }
private:
TF_LIB_GTL_ALIGNED_CHAR_ARRAY(Type, 1) space_;
};
#undef TF_LIB_GTL_ALIGNED_CHAR_ARRAY
#undef TF_LIB_GTL_ALIGN_OF
}
#endif | #include "tensorflow/core/lib/gtl/manual_constructor.h"
#include <stdint.h>
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
static int constructor_count_ = 0;
template <int kSize>
struct TestN {
TestN() { ++constructor_count_; }
~TestN() { --constructor_count_; }
char a[kSize];
};
typedef TestN<1> Test1;
typedef TestN<2> Test2;
typedef TestN<3> Test3;
typedef TestN<4> Test4;
typedef TestN<5> Test5;
typedef TestN<9> Test9;
typedef TestN<15> Test15;
}
namespace {
TEST(ManualConstructorTest, Sizeof) {
CHECK_EQ(sizeof(ManualConstructor<Test1>), sizeof(Test1));
CHECK_EQ(sizeof(ManualConstructor<Test2>), sizeof(Test2));
CHECK_EQ(sizeof(ManualConstructor<Test3>), sizeof(Test3));
CHECK_EQ(sizeof(ManualConstructor<Test4>), sizeof(Test4));
CHECK_EQ(sizeof(ManualConstructor<Test5>), sizeof(Test5));
CHECK_EQ(sizeof(ManualConstructor<Test9>), sizeof(Test9));
CHECK_EQ(sizeof(ManualConstructor<Test15>), sizeof(Test15));
CHECK_EQ(constructor_count_, 0);
ManualConstructor<Test1> mt[4];
CHECK_EQ(sizeof(mt), 4);
CHECK_EQ(constructor_count_, 0);
mt[0].Init();
CHECK_EQ(constructor_count_, 1);
mt[0].Destroy();
}
TEST(ManualConstructorTest, Alignment) {
struct {
char a;
ManualConstructor<void*> b;
} test1;
struct {
char a;
void* b;
} control1;
EXPECT_EQ(reinterpret_cast<char*>(test1.b.get()) - &test1.a,
reinterpret_cast<char*>(&control1.b) - &control1.a);
EXPECT_EQ(reinterpret_cast<intptr_t>(test1.b.get()) % sizeof(control1.b), 0);
struct {
char a;
ManualConstructor<long double> b;
} test2;
struct {
char a;
long double b;
} control2;
EXPECT_EQ(reinterpret_cast<char*>(test2.b.get()) - &test2.a,
reinterpret_cast<char*>(&control2.b) - &control2.a);
EXPECT_EQ(reinterpret_cast<intptr_t>(test2.b.get()) % alignof(long double),
0);
}
TEST(ManualConstructorTest, DefaultInitialize) {
struct X {
X() : x(123) {}
int x;
};
union {
ManualConstructor<X> x;
ManualConstructor<int> y;
} u;
*u.y = -1;
u.x.Init();
EXPECT_EQ(123, u.x->x);
}
TEST(ManualConstructorTest, ZeroInitializePOD) {
union {
ManualConstructor<int> x;
ManualConstructor<int> y;
} u;
*u.y = -1;
u.x.Init();
EXPECT_EQ(-1, *u.y);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/gtl/manual_constructor.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/gtl/manual_constructor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b1ba6c91-c123-4aa3-ad58-8bf8217b1498 | cpp | tensorflow/tensorflow | top_n | tensorflow/lite/kernels/ctc/top_n.h | tensorflow/core/lib/gtl/top_n_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_CTC_TOP_N_H_
#define TENSORFLOW_LITE_KERNELS_CTC_TOP_N_H_
#include <stddef.h>
#include <algorithm>
#include <functional>
#include <string>
#include <vector>
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace tflite {
namespace gtl {
template <class T, class Cmp = std::greater<T> >
class TopN {
public:
enum State { UNORDERED, BOTTOM_KNOWN, HEAP_SORTED };
using UnsortedIterator = typename std::vector<T>::const_iterator;
explicit TopN(size_t limit) : TopN(limit, Cmp()) {}
TopN(size_t limit, const Cmp &cmp) : limit_(limit), cmp_(cmp) {}
size_t limit() const { return limit_; }
size_t size() const { return std::min(elements_.size(), limit_); }
bool empty() const { return size() == 0; }
void reserve(size_t n) { elements_.reserve(std::min(n, limit_ + 1)); }
void push(const T &v) { push(v, nullptr); }
void push(const T &v, T *dropped) { PushInternal(v, dropped); }
void push(T &&v) {
push(std::move(v), nullptr);
}
void push(T &&v, T *dropped) {
PushInternal(std::move(v), dropped);
}
const T &peek_bottom();
std::vector<T> *Extract();
std::vector<T> *ExtractUnsorted();
std::vector<T> *ExtractNondestructive() const;
void ExtractNondestructive(std::vector<T> *output) const;
std::vector<T> *ExtractUnsortedNondestructive() const;
void ExtractUnsortedNondestructive(std::vector<T> *output) const;
UnsortedIterator unsorted_begin() const { return elements_.begin(); }
UnsortedIterator unsorted_end() const { return elements_.begin() + size(); }
Cmp *comparator() { return &cmp_; }
void Reset();
private:
template <typename U>
void PushInternal(U &&v, T *dropped);
std::vector<T> elements_;
size_t limit_;
Cmp cmp_;
State state_ = UNORDERED;
};
template <class T, class Cmp>
template <typename U>
void TopN<T, Cmp>::PushInternal(U &&v, T *dropped) {
if (limit_ == 0) {
if (dropped) *dropped = std::forward<U>(v);
return;
}
if (state_ != HEAP_SORTED) {
elements_.push_back(std::forward<U>(v));
if (state_ == UNORDERED || cmp_(elements_.back(), elements_.front())) {
} else {
using std::swap;
swap(elements_.front(), elements_.back());
}
if (elements_.size() == limit_ + 1) {
std::make_heap(elements_.begin(), elements_.end(), cmp_);
if (dropped) *dropped = std::move(elements_.front());
std::pop_heap(elements_.begin(), elements_.end(), cmp_);
state_ = HEAP_SORTED;
}
} else {
if (cmp_(v, elements_.front())) {
elements_.back() = std::forward<U>(v);
std::push_heap(elements_.begin(), elements_.end(), cmp_);
if (dropped) *dropped = std::move(elements_.front());
std::pop_heap(elements_.begin(), elements_.end(), cmp_);
} else {
if (dropped) *dropped = std::forward<U>(v);
}
}
}
template <class T, class Cmp>
const T &TopN<T, Cmp>::peek_bottom() {
TFLITE_DCHECK(!empty());
if (state_ == UNORDERED) {
int min_candidate = 0;
for (size_t i = 1; i < elements_.size(); ++i) {
if (cmp_(elements_[min_candidate], elements_[i])) {
min_candidate = i;
}
}
if (min_candidate != 0) {
using std::swap;
swap(elements_[0], elements_[min_candidate]);
}
state_ = BOTTOM_KNOWN;
}
return elements_.front();
}
template <class T, class Cmp>
std::vector<T> *TopN<T, Cmp>::Extract() {
auto out = new std::vector<T>;
out->swap(elements_);
if (state_ != HEAP_SORTED) {
std::sort(out->begin(), out->end(), cmp_);
} else {
out->pop_back();
std::sort_heap(out->begin(), out->end(), cmp_);
}
return out;
}
template <class T, class Cmp>
std::vector<T> *TopN<T, Cmp>::ExtractUnsorted() {
auto out = new std::vector<T>;
out->swap(elements_);
if (state_ == HEAP_SORTED) {
out->pop_back();
}
return out;
}
template <class T, class Cmp>
std::vector<T> *TopN<T, Cmp>::ExtractNondestructive() const {
auto out = new std::vector<T>;
ExtractNondestructive(out);
return out;
}
template <class T, class Cmp>
void TopN<T, Cmp>::ExtractNondestructive(std::vector<T> *output) const {
TFLITE_DCHECK(output);
*output = elements_;
if (state_ != HEAP_SORTED) {
std::sort(output->begin(), output->end(), cmp_);
} else {
output->pop_back();
std::sort_heap(output->begin(), output->end(), cmp_);
}
}
template <class T, class Cmp>
std::vector<T> *TopN<T, Cmp>::ExtractUnsortedNondestructive() const {
auto elements = new std::vector<T>;
ExtractUnsortedNondestructive(elements);
return elements;
}
template <class T, class Cmp>
void TopN<T, Cmp>::ExtractUnsortedNondestructive(std::vector<T> *output) const {
TFLITE_DCHECK(output);
*output = elements_;
if (state_ == HEAP_SORTED) {
output->pop_back();
}
}
template <class T, class Cmp>
void TopN<T, Cmp>::Reset() {
elements_.clear();
state_ = UNORDERED;
}
}
}
#endif | #include "tensorflow/core/lib/gtl/top_n.h"
#include <string>
#include <vector>
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace {
using tensorflow::string;
using tensorflow::gtl::TopN;
using tensorflow::random::PhiloxRandom;
using tensorflow::random::SimplePhilox;
template <class T>
T ConsumeRawPtr(T *p) {
T tmp = std::move(*p);
delete p;
return tmp;
}
template <class Cmp>
void TestIntTopNHelper(size_t limit, size_t n_elements, const Cmp &cmp,
SimplePhilox *random, bool test_peek,
bool test_extract_unsorted) {
LOG(INFO) << "Testing limit=" << limit << ", n_elements=" << n_elements
<< ", test_peek=" << test_peek
<< ", test_extract_unsorted=" << test_extract_unsorted;
TopN<int, Cmp> top(limit, cmp);
std::vector<int> shadow(n_elements);
for (int i = 0; i != n_elements; ++i) shadow[i] = random->Uniform(limit);
for (int e : shadow) top.push(e);
std::sort(shadow.begin(), shadow.end(), cmp);
size_t top_size = std::min(limit, n_elements);
EXPECT_EQ(top_size, top.size());
if (test_peek && top_size != 0) {
EXPECT_EQ(shadow[top_size - 1], top.peek_bottom());
}
std::vector<int> v;
if (test_extract_unsorted) {
v = ConsumeRawPtr(top.ExtractUnsorted());
std::sort(v.begin(), v.end(), cmp);
} else {
v = ConsumeRawPtr(top.Extract());
}
EXPECT_EQ(top_size, v.size());
for (int i = 0; i != top_size; ++i) {
VLOG(1) << "Top element " << v[i];
EXPECT_EQ(shadow[i], v[i]);
}
}
template <class Cmp>
void TestIntTopN(size_t limit, size_t n_elements, const Cmp &cmp,
SimplePhilox *random) {
TestIntTopNHelper(limit, n_elements, cmp, random, true, false);
TestIntTopNHelper(limit, n_elements, cmp, random, false, false);
TestIntTopNHelper(limit, n_elements, cmp, random, true, true);
TestIntTopNHelper(limit, n_elements, cmp, random, false, true);
}
TEST(TopNTest, Misc) {
PhiloxRandom philox(1, 1);
SimplePhilox random(&philox);
TestIntTopN(0, 5, std::greater<int>(), &random);
TestIntTopN(32, 0, std::greater<int>(), &random);
TestIntTopN(6, 6, std::greater<int>(), &random);
TestIntTopN(6, 6, std::less<int>(), &random);
TestIntTopN(1000, 999, std::greater<int>(), &random);
TestIntTopN(1000, 1000, std::greater<int>(), &random);
TestIntTopN(1000, 1001, std::greater<int>(), &random);
TestIntTopN(2300, 28393, std::less<int>(), &random);
TestIntTopN(30, 100, std::greater<int>(), &random);
TestIntTopN(100, 30, std::less<int>(), &random);
TestIntTopN(size_t(-1), 3, std::greater<int>(), &random);
TestIntTopN(size_t(-1), 0, std::greater<int>(), &random);
TestIntTopN(0, 5, std::greater<int>(), &random);
}
TEST(TopNTest, String) {
LOG(INFO) << "Testing strings";
TopN<string> top(3);
EXPECT_TRUE(top.empty());
top.push("abracadabra");
top.push("waldemar");
EXPECT_EQ(2, top.size());
EXPECT_EQ("abracadabra", top.peek_bottom());
top.push("");
EXPECT_EQ(3, top.size());
EXPECT_EQ("", top.peek_bottom());
top.push("top");
EXPECT_EQ(3, top.size());
EXPECT_EQ("abracadabra", top.peek_bottom());
top.push("Google");
top.push("test");
EXPECT_EQ(3, top.size());
EXPECT_EQ("test", top.peek_bottom());
TopN<string> top2(top);
TopN<string> top3(5);
top3 = top;
EXPECT_EQ("test", top3.peek_bottom());
{
std::vector<string> s = ConsumeRawPtr(top.Extract());
EXPECT_EQ(s[0], "waldemar");
EXPECT_EQ(s[1], "top");
EXPECT_EQ(s[2], "test");
}
top2.push("zero");
EXPECT_EQ(top2.peek_bottom(), "top");
{
std::vector<string> s = ConsumeRawPtr(top2.Extract());
EXPECT_EQ(s[0], "zero");
EXPECT_EQ(s[1], "waldemar");
EXPECT_EQ(s[2], "top");
}
{
std::vector<string> s = ConsumeRawPtr(top3.Extract());
EXPECT_EQ(s[0], "waldemar");
EXPECT_EQ(s[1], "top");
EXPECT_EQ(s[2], "test");
}
TopN<string> top4(3);
for (int i = 0; i < 2; ++i) {
top4.push("abcd");
top4.push("ijkl");
top4.push("efgh");
top4.push("mnop");
std::vector<string> s = ConsumeRawPtr(top4.Extract());
EXPECT_EQ(s[0], "mnop");
EXPECT_EQ(s[1], "ijkl");
EXPECT_EQ(s[2], "efgh");
top4.Reset();
}
}
TEST(TopNTest, Ptr) {
LOG(INFO) << "Testing 2-argument push()";
TopN<string *> topn(3);
for (int i = 0; i < 8; ++i) {
string *dropped = nullptr;
topn.push(new string(std::to_string(i)), &dropped);
delete dropped;
}
for (int i = 8; i > 0; --i) {
string *dropped = nullptr;
topn.push(new string(std::to_string(i)), &dropped);
delete dropped;
}
std::vector<string *> extract = ConsumeRawPtr(topn.Extract());
for (auto &temp : extract) {
delete temp;
}
extract.clear();
}
struct PointeeGreater {
template <typename T>
bool operator()(const T &a, const T &b) const {
return *a > *b;
}
};
TEST(TopNTest, MoveOnly) {
using StrPtr = std::unique_ptr<string>;
TopN<StrPtr, PointeeGreater> topn(3);
for (int i = 0; i < 8; ++i) topn.push(StrPtr(new string(std::to_string(i))));
for (int i = 8; i > 0; --i) topn.push(StrPtr(new string(std::to_string(i))));
std::vector<StrPtr> extract = ConsumeRawPtr(topn.Extract());
EXPECT_EQ(extract.size(), 3);
EXPECT_EQ(*(extract[0]), "8");
EXPECT_EQ(*(extract[1]), "7");
EXPECT_EQ(*(extract[2]), "7");
}
TEST(TopNTest, Nondestructive) {
LOG(INFO) << "Testing Nondestructive extracts";
TopN<int> top4(4);
for (int i = 0; i < 8; ++i) {
top4.push(i);
std::vector<int> v = ConsumeRawPtr(top4.ExtractNondestructive());
EXPECT_EQ(std::min(i + 1, 4), v.size());
for (size_t j = 0; j < v.size(); ++j) EXPECT_EQ(i - j, v[j]);
}
TopN<int> top3(3);
for (int i = 0; i < 8; ++i) {
top3.push(i);
std::vector<int> v = ConsumeRawPtr(top3.ExtractUnsortedNondestructive());
std::sort(v.begin(), v.end(), std::greater<int>());
EXPECT_EQ(std::min(i + 1, 3), v.size());
for (size_t j = 0; j < v.size(); ++j) EXPECT_EQ(i - j, v[j]);
}
}
struct ForbiddenCmp {
bool operator()(int lhs, int rhs) const {
LOG(FATAL) << "ForbiddenCmp called " << lhs << " " << rhs;
}
};
TEST(TopNTest, ZeroLimit) {
TopN<int, ForbiddenCmp> top(0);
top.push(1);
top.push(2);
int dropped = -1;
top.push(1, &dropped);
top.push(2, &dropped);
std::vector<int> v;
top.ExtractNondestructive(&v);
EXPECT_EQ(0, v.size());
}
TEST(TopNTest, Iteration) {
TopN<int> top(4);
for (int i = 0; i < 8; ++i) top.push(i);
std::vector<int> actual(top.unsorted_begin(), top.unsorted_end());
std::sort(actual.begin(), actual.end());
EXPECT_EQ(actual.size(), 4);
EXPECT_EQ(actual[0], 4);
EXPECT_EQ(actual[1], 5);
EXPECT_EQ(actual[2], 6);
EXPECT_EQ(actual[3], 7);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/ctc/top_n.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/gtl/top_n_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e9360b62-e10d-4dc4-b8bd-f135d545de17 | cpp | tensorflow/tensorflow | tfprof_advisor | tensorflow/core/profiler/internal/advisor/tfprof_advisor.h | tensorflow/core/profiler/internal/advisor/tfprof_advisor_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_INTERNAL_ADVISOR_TFPROF_ADVISOR_H_
#define TENSORFLOW_CORE_PROFILER_INTERNAL_ADVISOR_TFPROF_ADVISOR_H_
#include <vector>
#include "absl/strings/str_format.h"
#include "tensorflow/core/profiler/internal/advisor/accelerator_utilization_checker.h"
#include "tensorflow/core/profiler/internal/advisor/checker.h"
#include "tensorflow/core/profiler/internal/advisor/expensive_operation_checker.h"
#include "tensorflow/core/profiler/internal/advisor/internal_checker_runner.h"
#include "tensorflow/core/profiler/internal/advisor/operation_checker.h"
#include "tensorflow/core/profiler/tfprof_options.pb.h"
namespace tensorflow {
namespace tfprof {
class Advisor {
public:
Advisor(const TFStats* stats) : stats_(stats) {}
static AdvisorOptionsProto DefaultOptions() {
AdvisorOptionsProto options;
std::vector<string> checkers(
kCheckers, kCheckers + sizeof(kCheckers) / sizeof(*kCheckers));
for (const string& checker : checkers) {
(*options.mutable_checkers())[checker];
}
return options;
}
AdviceProto Advise(const AdvisorOptionsProto& options) {
AdviceProto ret = RunInternalCheckers(options, stats_);
if (options.checkers().find(kCheckers[0]) != options.checkers().end()) {
AcceleratorUtilizationChecker au_checker;
(*ret.mutable_checkers())[kCheckers[0]].MergeFrom(
au_checker.Run(options.checkers().at(kCheckers[0]), stats_));
}
if (options.checkers().find(kCheckers[1]) != options.checkers().end()) {
OperationChecker op_checker;
(*ret.mutable_checkers())[kCheckers[1]].MergeFrom(
op_checker.Run(options.checkers().at(kCheckers[1]), stats_));
}
if (options.checkers().find(kCheckers[2]) != options.checkers().end()) {
ExpensiveOperationChecker expensive_op_checker;
(*ret.mutable_checkers())[kCheckers[2]].MergeFrom(
expensive_op_checker.Run(options.checkers().at(kCheckers[2]),
stats_));
}
for (const auto& checker : ret.checkers()) {
absl::FPrintF(stdout, "\n%s:\n", checker.first);
for (const string& r : checker.second.reports()) {
absl::FPrintF(stdout, "%s\n", r);
}
}
fflush(stdout);
return ret;
}
private:
const TFStats* stats_;
};
}
}
#endif | #include "tensorflow/core/profiler/internal/advisor/tfprof_advisor.h"
#include <map>
#include <memory>
#include <vector>
#include "absl/strings/match.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/internal/advisor/checker.h"
#include "tensorflow/core/profiler/internal/tfprof_node.h"
#include "tensorflow/core/profiler/internal/tfprof_stats.h"
#include "tensorflow/core/profiler/tfprof_options.pb.h"
#include "tensorflow/core/profiler/tfprof_output.pb.h"
namespace tensorflow {
namespace tfprof {
class TFProfAdvisorTest : public ::testing::Test {
protected:
TFProfAdvisorTest() {
stats_ = std::make_unique<TFStats>(std::make_unique<GraphDef>(), nullptr,
nullptr, nullptr);
stats_->AddNodeForTest(
0, CreateNode("n1", "Conv2D", {{"data_format", "NHWC"}}, 0, 10, 2));
stats_->AddNodeForTest(0, CreateNode("n2", "Conv2D", {}, 0, 20, 2));
stats_->BuildAllViews();
advisor_ = std::make_unique<Advisor>(stats_.get());
}
std::unique_ptr<TFGraphNode> CreateNode(const string& name,
const string& type,
std::map<string, string> attrs,
int64_t step, int64_t start_miros,
int64_t end_rel_micros) {
node_defs_.push_back(std::make_unique<NodeDef>());
NodeDef* def = node_defs_.back().get();
def->set_name(name);
def->set_op(type);
for (const auto& attr : attrs) {
(*def->mutable_attr())[attr.first].set_s(attr.second);
}
std::unique_ptr<TFGraphNode> node(new TFGraphNode(def, -1, nullptr));
NodeExecStats node_stat;
node_stat.set_all_start_micros(start_miros);
node_stat.set_op_end_rel_micros(end_rel_micros);
node->AddStepStat(step, "/job:localhost/replica:0/task:0/device:GPU:0",
node_stat);
node->AddStepStat(step,
"/job:localhost/replica:0/task:0/device:GPU:0:stream:all",
node_stat);
node->AddStepStat(step,
"/job:localhost/replica:0/task:0/device:GPU:0:stream:0",
node_stat);
return node;
}
std::unique_ptr<TFStats> stats_;
std::unique_ptr<Advisor> advisor_;
std::vector<std::unique_ptr<NodeDef>> node_defs_;
};
TEST_F(TFProfAdvisorTest, Basics) {
AdvisorOptionsProto options = Advisor::DefaultOptions();
AdviceProto advice = advisor_->Advise(options);
EXPECT_TRUE(advice.checkers().find(kCheckers[0]) != advice.checkers().end());
EXPECT_TRUE(advice.checkers().find(kCheckers[1]) != advice.checkers().end());
EXPECT_TRUE(advice.checkers().find(kCheckers[2]) != advice.checkers().end());
}
TEST_F(TFProfAdvisorTest, OperationChecker) {
AdvisorOptionsProto options;
(*options.mutable_checkers())[kCheckers[1]];
AdviceProto advice = advisor_->Advise(options);
EXPECT_EQ(advice.checkers().at(kCheckers[1]).reports_size(), 1);
EXPECT_TRUE(
absl::StrContains(advice.checkers().at(kCheckers[1]).reports(0), "NCHW"));
}
TEST_F(TFProfAdvisorTest, UtilizationChecker) {
AdvisorOptionsProto options;
(*options.mutable_checkers())[kCheckers[0]];
AdviceProto advice = advisor_->Advise(options);
EXPECT_EQ(advice.checkers().at(kCheckers[0]).reports_size(), 1);
EXPECT_TRUE(absl::StrContains(advice.checkers().at(kCheckers[0]).reports(0),
"low utilization"));
}
TEST_F(TFProfAdvisorTest, ExpensiveOperationChecker) {
AdvisorOptionsProto options;
(*options.mutable_checkers())[kCheckers[2]];
AdviceProto advice = advisor_->Advise(options);
EXPECT_TRUE(absl::StrContains(advice.checkers().at(kCheckers[2]).reports(0),
"top 1 operation type: Conv2D"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/internal/advisor/tfprof_advisor.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/internal/advisor/tfprof_advisor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
239cfaaf-48f0-4bbb-8672-546770e99723 | cpp | tensorflow/tensorflow | thread_safe_buffer | tensorflow/core/data/service/thread_safe_buffer.h | tensorflow/core/data/service/thread_safe_buffer_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_THREAD_SAFE_BUFFER_H_
#define TENSORFLOW_CORE_DATA_SERVICE_THREAD_SAFE_BUFFER_H_
#include <deque>
#include <utility>
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
namespace tensorflow {
namespace data {
template <class T>
class ThreadSafeBuffer final {
public:
explicit ThreadSafeBuffer(size_t buffer_size);
StatusOr<T> Pop();
Status Push(StatusOr<T> value);
void Cancel(Status status);
bool Empty() const;
private:
const size_t buffer_size_;
mutable mutex mu_;
condition_variable ready_to_pop_;
condition_variable ready_to_push_;
std::deque<StatusOr<T>> results_ TF_GUARDED_BY(mu_);
Status status_ TF_GUARDED_BY(mu_) = absl::OkStatus();
ThreadSafeBuffer(const ThreadSafeBuffer&) = delete;
void operator=(const ThreadSafeBuffer&) = delete;
};
template <class T>
ThreadSafeBuffer<T>::ThreadSafeBuffer(size_t buffer_size)
: buffer_size_(buffer_size) {
DCHECK_GT(buffer_size, 0)
<< "ThreadSafeBuffer must have a positive buffer size. Got "
<< buffer_size << ".";
}
template <class T>
bool ThreadSafeBuffer<T>::Empty() const {
tf_shared_lock l(mu_);
return results_.empty();
}
template <class T>
StatusOr<T> ThreadSafeBuffer<T>::Pop() {
mutex_lock l(mu_);
while (status_.ok() && results_.empty()) {
ready_to_pop_.wait(l);
}
if (!status_.ok()) {
return status_;
}
StatusOr<T> result = std::move(results_.front());
results_.pop_front();
ready_to_push_.notify_one();
return result;
}
template <class T>
Status ThreadSafeBuffer<T>::Push(StatusOr<T> value) {
mutex_lock l(mu_);
while (status_.ok() && results_.size() >= buffer_size_) {
ready_to_push_.wait(l);
}
if (!status_.ok()) {
return status_;
}
results_.push_back(std::move(value));
ready_to_pop_.notify_one();
return absl::OkStatus();
}
template <class T>
void ThreadSafeBuffer<T>::Cancel(Status status) {
DCHECK(!status.ok())
<< "Cancelling ThreadSafeBuffer requires a non-OK status. Got " << status;
mutex_lock l(mu_);
status_ = std::move(status);
ready_to_push_.notify_all();
ready_to_pop_.notify_all();
}
}
}
#endif | #include "tensorflow/core/data/service/thread_safe_buffer.h"
#include <memory>
#include <tuple>
#include <vector>
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::tensorflow::testing::IsOk;
using ::tensorflow::testing::StatusIs;
using ::testing::UnorderedElementsAreArray;
class ThreadSafeBufferTest
: public ::testing::Test,
public ::testing::WithParamInterface<std::tuple<size_t, size_t>> {
protected:
size_t GetBufferSize() const { return std::get<0>(GetParam()); }
size_t GetNumOfElements() const { return std::get<1>(GetParam()); }
};
std::vector<int> GetRange(const size_t range) {
std::vector<int> result;
for (int i = 0; i < range; ++i) {
result.push_back(i);
}
return result;
}
INSTANTIATE_TEST_SUITE_P(VaryingBufferAndInputSizes, ThreadSafeBufferTest,
::testing::Values(std::make_tuple(1, 2),
std::make_tuple(2, 10),
std::make_tuple(10, 2)));
TEST_P(ThreadSafeBufferTest, OneReaderAndOneWriter) {
ThreadSafeBuffer<int> buffer(GetBufferSize());
auto thread = absl::WrapUnique(Env::Default()->StartThread(
{}, "writer_thread", [this, &buffer]() {
for (int i = 0; i < GetNumOfElements(); ++i) {
ASSERT_THAT(buffer.Push(i), IsOk());
}
}));
for (size_t i = 0; i < GetNumOfElements(); ++i) {
TF_ASSERT_OK_AND_ASSIGN(int next, buffer.Pop());
EXPECT_EQ(next, i);
}
}
TEST_P(ThreadSafeBufferTest, OneReaderAndMultipleWriters) {
ThreadSafeBuffer<int> buffer(GetBufferSize());
std::vector<std::unique_ptr<Thread>> threads;
for (int i = 0; i < GetNumOfElements(); ++i) {
threads.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("writer_thread_", i),
[&buffer, i] { ASSERT_THAT(buffer.Push(i), IsOk()); })));
}
std::vector<int> results;
for (int i = 0; i < GetNumOfElements(); ++i) {
TF_ASSERT_OK_AND_ASSIGN(int next, buffer.Pop());
results.push_back(next);
}
EXPECT_THAT(results, UnorderedElementsAreArray(GetRange(GetNumOfElements())));
}
TEST_P(ThreadSafeBufferTest, MultipleReadersAndOneWriter) {
ThreadSafeBuffer<int> buffer(GetBufferSize());
mutex mu;
std::vector<int> results;
std::vector<std::unique_ptr<Thread>> threads;
for (int i = 0; i < GetNumOfElements(); ++i) {
threads.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("reader_thread_", i),
[&buffer, &mu, &results]() {
TF_ASSERT_OK_AND_ASSIGN(int next, buffer.Pop());
mutex_lock l(mu);
results.push_back(next);
})));
}
for (int i = 0; i < GetNumOfElements(); ++i) {
ASSERT_THAT(buffer.Push(i), IsOk());
}
threads.clear();
EXPECT_THAT(results, UnorderedElementsAreArray(GetRange(GetNumOfElements())));
}
TEST_P(ThreadSafeBufferTest, MultipleReadersAndWriters) {
ThreadSafeBuffer<int> buffer(GetBufferSize());
mutex mu;
std::vector<int> results;
std::vector<std::unique_ptr<Thread>> threads;
for (int i = 0; i < GetNumOfElements(); ++i) {
threads.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("reader_thread_", i),
[&buffer, &mu, &results]() {
TF_ASSERT_OK_AND_ASSIGN(int next, buffer.Pop());
mutex_lock l(mu);
results.push_back(next);
})));
}
for (int i = 0; i < GetNumOfElements(); ++i) {
threads.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("writer_thread_", i),
[&buffer, i]() { ASSERT_THAT(buffer.Push(i), IsOk()); })));
}
threads.clear();
EXPECT_THAT(results, UnorderedElementsAreArray(GetRange(GetNumOfElements())));
}
TEST_P(ThreadSafeBufferTest, BlockReaderWhenBufferIsEmpty) {
ThreadSafeBuffer<Tensor> buffer(GetBufferSize());
auto thread = absl::WrapUnique(Env::Default()->StartThread(
{}, "reader_thread", [&buffer]() {
TF_ASSERT_OK_AND_ASSIGN(Tensor tensor, buffer.Pop());
test::ExpectEqual(tensor, Tensor("Test tensor"));
}));
Env::Default()->SleepForMicroseconds(10000);
ASSERT_THAT(buffer.Push(Tensor("Test tensor")), IsOk());
}
TEST_P(ThreadSafeBufferTest, BlockWriterWhenBufferIsFull) {
ThreadSafeBuffer<Tensor> buffer(GetBufferSize());
for (int i = 0; i < GetBufferSize(); ++i) {
ASSERT_THAT(buffer.Push(Tensor("Test tensor")), IsOk());
}
uint64 push_time = 0;
auto thread = absl::WrapUnique(Env::Default()->StartThread(
{}, "writer_thread", [&buffer, &push_time]() {
ASSERT_THAT(buffer.Push(Tensor("Test tensor")), IsOk());
push_time = Env::Default()->NowMicros();
}));
Env::Default()->SleepForMicroseconds(10000);
uint64 pop_time = Env::Default()->NowMicros();
ASSERT_THAT(buffer.Pop(), IsOk());
thread.reset();
EXPECT_LE(pop_time, push_time);
}
TEST_P(ThreadSafeBufferTest, CancelReaders) {
ThreadSafeBuffer<int> buffer(GetBufferSize());
std::vector<std::unique_ptr<Thread>> threads;
for (int i = 0; i < GetNumOfElements(); ++i) {
threads.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("reader_thread_", i),
[&buffer]() { EXPECT_THAT(buffer.Pop(), StatusIs(error::ABORTED)); })));
}
buffer.Cancel(errors::Aborted("Aborted"));
}
TEST_P(ThreadSafeBufferTest, CancelWriters) {
ThreadSafeBuffer<Tensor> buffer(GetBufferSize());
for (int i = 0; i < GetBufferSize(); ++i) {
ASSERT_THAT(buffer.Push(Tensor("Test tensor")), IsOk());
}
std::vector<std::unique_ptr<Thread>> threads;
for (int i = 0; i < GetNumOfElements(); ++i) {
threads.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("writer_thread_", i),
[&buffer]() {
for (int i = 0; i < 100; ++i) {
EXPECT_THAT(buffer.Push(Tensor("Test tensor")),
StatusIs(error::CANCELLED));
}
})));
}
buffer.Cancel(errors::Cancelled("Cancelled"));
}
TEST_P(ThreadSafeBufferTest, CancelMultipleTimes) {
ThreadSafeBuffer<Tensor> buffer(GetBufferSize());
buffer.Cancel(errors::Unknown("Unknown"));
EXPECT_THAT(buffer.Push(Tensor("Test tensor")), StatusIs(error::UNKNOWN));
buffer.Cancel(errors::DeadlineExceeded("Deadline exceeded"));
EXPECT_THAT(buffer.Pop(), StatusIs(error::DEADLINE_EXCEEDED));
buffer.Cancel(errors::ResourceExhausted("Resource exhausted"));
EXPECT_THAT(buffer.Push(Tensor("Test tensor")),
StatusIs(error::RESOURCE_EXHAUSTED));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/thread_safe_buffer.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/thread_safe_buffer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b06baea6-f014-46e5-b82a-6472409090f8 | cpp | tensorflow/tensorflow | cross_trainer_cache | tensorflow/core/data/service/cross_trainer_cache.h | tensorflow/core/data/service/cross_trainer_cache_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_CROSS_TRAINER_CACHE_H_
#define TENSORFLOW_CORE_DATA_SERVICE_CROSS_TRAINER_CACHE_H_
#include <cstddef>
#include <deque>
#include <functional>
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/data/service/byte_size.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
template <class ElementType>
class CachableSequence {
public:
virtual ~CachableSequence() = default;
virtual StatusOr<ElementType> GetNext() = 0;
virtual size_t GetElementSizeBytes(const ElementType&) const = 0;
};
template <class ElementType>
class CrossTrainerCache {
public:
explicit CrossTrainerCache(
size_t max_cache_size_bytes,
std::unique_ptr<CachableSequence<ElementType>> cachable_sequence);
virtual ~CrossTrainerCache() = default;
CrossTrainerCache(const CrossTrainerCache&) = delete;
CrossTrainerCache& operator=(const CrossTrainerCache&) = delete;
StatusOr<std::shared_ptr<const ElementType>> Get(
const std::string& trainer_id);
void Cancel(Status status);
bool IsCancelled() const;
private:
struct CacheQueryResult {
std::shared_ptr<const ElementType> element;
bool cache_hit;
};
StatusOr<CacheQueryResult> GetCacheQueryResult(const std::string& trainer_id);
bool IsElementReady(const std::string& trainer_id);
size_t GetElementIndex(const std::string& trainer_id);
StatusOr<std::shared_ptr<const ElementType>> GetElement(
const std::string& trainer_id);
Status ExtendCache();
void FreeSpace(size_t new_element_size_bytes);
void RecordMetrics(const CacheQueryResult& result);
const size_t max_cache_size_bytes_;
std::unique_ptr<CachableSequence<ElementType>> cachable_sequence_;
mutable mutex mu_;
mutable condition_variable cv_;
Status status_ TF_GUARDED_BY(mu_) = absl::OkStatus();
std::deque<std::shared_ptr<const ElementType>> cache_ TF_GUARDED_BY(mu_);
size_t cache_size_bytes_ TF_GUARDED_BY(mu_) = 0;
size_t cache_start_index_ TF_GUARDED_BY(mu_) = 0;
bool extending_cache_ TF_GUARDED_BY(mu_) = false;
absl::flat_hash_map<std::string, size_t> trainer_to_element_index_map_
TF_GUARDED_BY(mu_);
};
template <class ElementType>
CrossTrainerCache<ElementType>::CrossTrainerCache(
size_t max_cache_size_bytes,
std::unique_ptr<CachableSequence<ElementType>> cachable_sequence)
: max_cache_size_bytes_(max_cache_size_bytes),
cachable_sequence_(std::move(cachable_sequence)) {
DCHECK_GT(max_cache_size_bytes, 0)
<< "CrossTrainerCache size must be greater than 0.";
VLOG(2) << "Initialized tf.data service cross-trainer cache with "
<< ByteSize::Bytes(max_cache_size_bytes) << " of memory.";
}
template <class ElementType>
StatusOr<std::shared_ptr<const ElementType>>
CrossTrainerCache<ElementType>::Get(const std::string& trainer_id)
TF_LOCKS_EXCLUDED(mu_) {
if (trainer_id.empty()) {
return errors::InvalidArgument(
"tf.data service cross-trainer cache requires a non-empty trainer ID.");
}
TF_ASSIGN_OR_RETURN(CacheQueryResult result, GetCacheQueryResult(trainer_id));
RecordMetrics(result);
return result.element;
}
template <class ElementType>
StatusOr<typename CrossTrainerCache<ElementType>::CacheQueryResult>
CrossTrainerCache<ElementType>::GetCacheQueryResult(
const std::string& trainer_id) {
bool should_extend_cache = false;
while (true) {
{
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(status_);
if (IsElementReady(trainer_id)) {
TF_ASSIGN_OR_RETURN(std::shared_ptr<const ElementType> element,
GetElement(trainer_id));
return CacheQueryResult{element,
!should_extend_cache};
}
if (extending_cache_) {
should_extend_cache = false;
cv_.wait(l);
} else {
should_extend_cache = true;
extending_cache_ = true;
}
}
if (should_extend_cache) {
Status s = ExtendCache();
mutex_lock l(mu_);
extending_cache_ = false;
cv_.notify_all();
TF_RETURN_IF_ERROR(s);
}
}
}
template <class ElementType>
bool CrossTrainerCache<ElementType>::IsElementReady(
const std::string& trainer_id) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return GetElementIndex(trainer_id) < cache_start_index_ + cache_.size();
}
template <class ElementType>
StatusOr<std::shared_ptr<const ElementType>>
CrossTrainerCache<ElementType>::GetElement(const std::string& trainer_id)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
size_t element_index = GetElementIndex(trainer_id);
if (element_index >= std::numeric_limits<size_t>::max()) {
return errors::Internal(
"tf.data service caching element index exceeds integer limit. Got ",
element_index);
}
std::shared_ptr<const ElementType> result =
cache_[element_index - cache_start_index_];
trainer_to_element_index_map_[trainer_id] = element_index + 1;
return result;
}
template <class ElementType>
size_t CrossTrainerCache<ElementType>::GetElementIndex(
const std::string& trainer_id) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
size_t element_index = trainer_to_element_index_map_[trainer_id];
if (element_index < cache_start_index_) {
element_index = cache_start_index_;
}
return element_index;
}
template <class ElementType>
Status CrossTrainerCache<ElementType>::ExtendCache() TF_LOCKS_EXCLUDED(mu_) {
TF_ASSIGN_OR_RETURN(ElementType element, cachable_sequence_->GetNext());
size_t new_element_size_bytes =
cachable_sequence_->GetElementSizeBytes(element);
if (new_element_size_bytes > max_cache_size_bytes_) {
return errors::InvalidArgument(
"tf.data service element size is larger than cache size in bytes. Got ",
"element size: ", new_element_size_bytes,
" and cache size: ", max_cache_size_bytes_);
}
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(status_);
FreeSpace(new_element_size_bytes);
cache_.push_back(std::make_shared<ElementType>(std::move(element)));
cache_size_bytes_ += new_element_size_bytes;
return absl::OkStatus();
}
template <class ElementType>
void CrossTrainerCache<ElementType>::FreeSpace(size_t new_element_size_bytes)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
size_t num_elements_discarded = 0;
while (!cache_.empty() &&
cache_size_bytes_ + new_element_size_bytes > max_cache_size_bytes_) {
size_t free_bytes =
cachable_sequence_->GetElementSizeBytes(*cache_.front());
cache_.pop_front();
cache_size_bytes_ -= free_bytes;
++cache_start_index_;
++num_elements_discarded;
}
VLOG(3) << "Freed " << num_elements_discarded << " element(s) from "
<< "tf.data service cross-trainer cache. Memory usage: "
<< ByteSize::Bytes(cache_size_bytes_) << ".";
}
template <class ElementType>
void CrossTrainerCache<ElementType>::Cancel(Status status)
TF_LOCKS_EXCLUDED(mu_) {
DCHECK(!status.ok())
<< "Cancelling CrossTrainerCache requires a non-OK status. Got "
<< status;
VLOG(2) << "Cancel tf.data service cross-trainer cache with status "
<< status;
mutex_lock l(mu_);
status_ = std::move(status);
cv_.notify_all();
}
template <class ElementType>
bool CrossTrainerCache<ElementType>::IsCancelled() const
TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
return !status_.ok();
}
template <class ElementType>
void CrossTrainerCache<ElementType>::RecordMetrics(
const CacheQueryResult& result) {
metrics::RecordTFDataServiceCrossTrainerCacheQuery(result.cache_hit);
size_t cache_size_bytes = 0;
{
mutex_lock l(mu_);
cache_size_bytes = cache_size_bytes_;
}
metrics::RecordTFDataServiceCrossTrainerCacheSizeBytes(cache_size_bytes);
}
}
}
#endif | #include "tensorflow/core/data/service/cross_trainer_cache.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/random.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::tensorflow::monitoring::testing::CellReader;
using ::tensorflow::testing::IsOkAndHolds;
using ::tensorflow::testing::StatusIs;
using ::testing::Gt;
using ::testing::HasSubstr;
using ::testing::Pointee;
using ::testing::UnorderedElementsAreArray;
class InfiniteRange : public CachableSequence<int64_t> {
public:
absl::StatusOr<int64_t> GetNext() override { return next_++; }
size_t GetElementSizeBytes(const int64_t& element) const override {
return sizeof(element);
}
private:
int64_t next_ = 0;
};
class TensorDataset : public CachableSequence<Tensor> {
public:
absl::StatusOr<Tensor> GetNext() override { return Tensor("Test Tensor"); }
size_t GetElementSizeBytes(const Tensor& element) const override {
return element.TotalBytes();
}
};
class SlowDataset : public CachableSequence<Tensor> {
public:
explicit SlowDataset(absl::Duration delay) : delay_(delay) {}
absl::StatusOr<Tensor> GetNext() override {
Env::Default()->SleepForMicroseconds(absl::ToInt64Microseconds(delay_));
return Tensor("Test Tensor");
}
size_t GetElementSizeBytes(const Tensor& element) const override {
return element.TotalBytes();
}
private:
absl::Duration delay_;
};
template <class T>
class ElementOrErrorDataset : public CachableSequence<T> {
public:
explicit ElementOrErrorDataset(const std::vector<StatusOr<T>>& elements)
: elements_(elements) {}
StatusOr<T> GetNext() override {
if (next_ >= elements_.size()) {
return errors::OutOfRange("Out of range.");
}
return elements_[next_++];
}
size_t GetElementSizeBytes(const T& element) const override {
return sizeof(element);
}
private:
const std::vector<StatusOr<T>> elements_;
int64_t next_ = 0;
};
template <>
size_t ElementOrErrorDataset<std::string>::GetElementSizeBytes(
const std::string& element) const {
return element.size();
}
template <>
size_t ElementOrErrorDataset<Tensor>::GetElementSizeBytes(
const Tensor& element) const {
return element.TotalBytes();
}
std::vector<int64_t> GetRange(const size_t range) {
std::vector<int64_t> result;
for (int64_t i = 0; i < range; ++i) {
result.push_back(i);
}
return result;
}
bool SequenceIsIncreasing(const std::vector<int64_t> sequence) {
for (int i = 1; i < sequence.size(); ++i) {
if (sequence[i - 1] > sequence[i - 1]) {
return false;
}
}
return true;
}
TEST(CrossTrainerCacheTest, GetFromOneTrainer) {
const size_t num_elements = 10;
CrossTrainerCache<int64_t> cache(
1024, std::make_unique<InfiniteRange>());
for (size_t i = 0; i < num_elements; ++i) {
EXPECT_THAT(cache.Get("Trainer ID"), IsOkAndHolds(Pointee(i)));
}
}
TEST(CrossTrainerCacheTest, GetFromMultipleTrainers) {
const size_t num_elements = 10;
const size_t num_trainers = 10;
CrossTrainerCache<int64_t> cache(
1024, std::make_unique<InfiniteRange>());
for (size_t i = 0; i < num_elements; ++i) {
for (size_t j = 0; j < num_trainers; ++j) {
const std::string trainer_id = absl::StrCat("Trainer ", j);
EXPECT_THAT(cache.Get(trainer_id), IsOkAndHolds(Pointee(i)));
}
}
}
TEST(CrossTrainerCacheTest, SlowTrainersSkipData) {
CrossTrainerCache<int64_t> cache(
5 * sizeof(int64_t),
std::make_unique<InfiniteRange>());
EXPECT_THAT(cache.Get("Fast trainer 1"), IsOkAndHolds(Pointee(0)));
EXPECT_THAT(cache.Get("Fast trainer 2"), IsOkAndHolds(Pointee(0)));
EXPECT_THAT(cache.Get("Slow trainer 1"), IsOkAndHolds(Pointee(0)));
EXPECT_THAT(cache.Get("Slow trainer 2"), IsOkAndHolds(Pointee(0)));
for (int i = 1; i < 20; ++i) {
EXPECT_THAT(cache.Get("Fast trainer 1"), IsOkAndHolds(Pointee(i)));
EXPECT_THAT(cache.Get("Fast trainer 2"), IsOkAndHolds(Pointee(i)));
}
EXPECT_THAT(cache.Get("Slow trainer 1"), IsOkAndHolds(Pointee(Gt(14))));
EXPECT_THAT(cache.Get("Slow trainer 2"), IsOkAndHolds(Pointee(Gt(14))));
for (int i = 20; i < 100; ++i) {
EXPECT_THAT(cache.Get("Fast trainer 1"), IsOkAndHolds(Pointee(i)));
EXPECT_THAT(cache.Get("Fast trainer 2"), IsOkAndHolds(Pointee(i)));
}
EXPECT_THAT(cache.Get("Slow trainer 1"), IsOkAndHolds(Pointee(Gt(94))));
EXPECT_THAT(cache.Get("Slow trainer 2"), IsOkAndHolds(Pointee(Gt(94))));
}
TEST(CrossTrainerCacheTest, NewTrainersStartLate) {
CrossTrainerCache<int64_t> cache(
5 * sizeof(int64_t),
std::make_unique<InfiniteRange>());
for (int i = 0; i < 100; ++i) {
EXPECT_THAT(cache.Get("Old trainer"), IsOkAndHolds(Pointee(i)));
}
for (int j = 0; j < 100; ++j) {
EXPECT_THAT(cache.Get(absl::StrCat("New trainer ", j)),
IsOkAndHolds(Pointee(Gt(94))));
}
}
TEST(CrossTrainerCacheTest, AlternateTrainerExtendsCache) {
CrossTrainerCache<int64_t> cache(
sizeof(int64_t),
std::make_unique<InfiniteRange>());
EXPECT_THAT(cache.Get("Trainer 1"), IsOkAndHolds(Pointee(0)));
EXPECT_THAT(cache.Get("Trainer 1"), IsOkAndHolds(Pointee(1)));
EXPECT_THAT(cache.Get("Trainer 1"), IsOkAndHolds(Pointee(2)));
EXPECT_THAT(cache.Get("Trainer 2"), IsOkAndHolds(Pointee(Gt(0))));
EXPECT_THAT(cache.Get("Trainer 2"), IsOkAndHolds(Pointee(Gt(1))));
EXPECT_THAT(cache.Get("Trainer 2"), IsOkAndHolds(Pointee(Gt(2))));
EXPECT_THAT(cache.Get("Trainer 1"), IsOkAndHolds(Pointee(Gt(1))));
EXPECT_THAT(cache.Get("Trainer 1"), IsOkAndHolds(Pointee(Gt(2))));
EXPECT_THAT(cache.Get("Trainer 1"), IsOkAndHolds(Pointee(Gt(3))));
EXPECT_THAT(cache.Get("Trainer 2"), IsOkAndHolds(Pointee(Gt(2))));
EXPECT_THAT(cache.Get("Trainer 2"), IsOkAndHolds(Pointee(Gt(3))));
EXPECT_THAT(cache.Get("Trainer 2"), IsOkAndHolds(Pointee(Gt(4))));
EXPECT_THAT(cache.Get("Trainer 3"), IsOkAndHolds(Pointee(Gt(3))));
EXPECT_THAT(cache.Get("Trainer 3"), IsOkAndHolds(Pointee(Gt(4))));
EXPECT_THAT(cache.Get("Trainer 3"), IsOkAndHolds(Pointee(Gt(5))));
}
TEST(CrossTrainerCacheTest, CacheHitMetrics) {
CellReader<int64_t> cell_reader(
"/tensorflow/data/service/cross_trainer_cache_queries");
EXPECT_EQ(cell_reader.Delta("true"), 0);
EXPECT_EQ(cell_reader.Delta("false"), 0);
EXPECT_EQ(cell_reader.Read("true"), 0);
EXPECT_EQ(cell_reader.Read("false"), 0);
const size_t num_elements = 10;
CrossTrainerCache<int64_t> cache(
1024, std::make_unique<InfiniteRange>());
for (size_t i = 0; i < num_elements; ++i) {
EXPECT_THAT(cache.Get("Trainer 1"), IsOkAndHolds(Pointee(i)));
}
EXPECT_EQ(cell_reader.Delta("true"), 0);
EXPECT_EQ(cell_reader.Delta("false"), 10);
EXPECT_EQ(cell_reader.Read("true"), 0);
EXPECT_EQ(cell_reader.Read("false"), 10);
for (size_t i = 0; i < num_elements; ++i) {
EXPECT_THAT(cache.Get("Trainer 2"), IsOkAndHolds(Pointee(i)));
}
EXPECT_EQ(cell_reader.Delta("true"), 10);
EXPECT_EQ(cell_reader.Delta("false"), 0);
EXPECT_EQ(cell_reader.Read("true"), 10);
EXPECT_EQ(cell_reader.Read("false"), 10);
}
TEST(CrossTrainerCacheTest, CacheSizeMetrics) {
CellReader<int64_t> cell_reader(
"/tensorflow/data/service/cross_trainer_cache_size_bytes");
const size_t num_elements = 5;
CrossTrainerCache<int64_t> cache(
num_elements * sizeof(int64_t),
std::make_unique<InfiniteRange>());
for (size_t i = 0; i < num_elements; ++i) {
EXPECT_THAT(cache.Get("Trainer 1"), IsOkAndHolds(Pointee(i)));
EXPECT_EQ(cell_reader.Read(), (i + 1) * sizeof(int64_t));
}
for (size_t i = 0; i < 100; ++i) {
EXPECT_THAT(cache.Get("Trainer 1"),
IsOkAndHolds(Pointee(num_elements + i)));
EXPECT_EQ(cell_reader.Read(), 5 * sizeof(int64_t));
}
}
TEST(CrossTrainerCacheTest, ConcurrentReaders) {
size_t num_trainers = 10;
size_t num_elements_to_read = 200;
CrossTrainerCache<int64_t> cache(
3 * sizeof(int64_t),
std::make_unique<InfiniteRange>());
std::vector<std::vector<int64_t>> results;
std::vector<std::unique_ptr<Thread>> reader_threads;
results.reserve(num_trainers);
for (size_t i = 0; i < num_trainers; ++i) {
results.emplace_back();
std::vector<int64_t>& result = results.back();
reader_threads.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("Trainer_", i),
[&cache, num_elements_to_read, &result]() {
for (size_t i = 0; i < num_elements_to_read; ++i) {
if (random::New64() % 5 == 0) {
Env::Default()->SleepForMicroseconds(2000);
}
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<const int64_t> next,
cache.Get(absl::StrCat("Trainer_", i)));
result.push_back(*next);
}
})));
}
reader_threads.clear();
EXPECT_EQ(results.size(), num_trainers);
for (const std::vector<int64_t>& result : results) {
EXPECT_EQ(result.size(), num_elements_to_read);
EXPECT_TRUE(SequenceIsIncreasing(result));
}
}
TEST(CrossTrainerCacheTest, ConcurrentReadersFromOneTrainer) {
size_t num_trainers = 10;
size_t num_elements_to_read = 100;
CrossTrainerCache<int64_t> cache(
3 * sizeof(int64_t),
std::make_unique<InfiniteRange>());
mutex mu;
std::vector<int64_t> results;
std::vector<std::unique_ptr<Thread>> reader_threads;
for (size_t i = 0; i < num_trainers; ++i) {
reader_threads.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("Thread_", i),
[&cache, num_elements_to_read, &results, &mu]() {
for (size_t i = 0; i < num_elements_to_read; ++i) {
if (random::New64() % 5 == 0) {
Env::Default()->SleepForMicroseconds(1000);
}
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<const int64_t> next,
cache.Get("Trainer ID"));
mutex_lock l(mu);
results.push_back(*next);
}
})));
}
reader_threads.clear();
EXPECT_THAT(results, UnorderedElementsAreArray(GetRange(1000)));
}
TEST(CrossTrainerCacheTest, Cancel) {
size_t num_trainers = 10;
CrossTrainerCache<Tensor> cache(
1000, std::make_unique<TensorDataset>());
EXPECT_FALSE(cache.IsCancelled());
mutex mu;
Status status;
std::vector<std::unique_ptr<Thread>> reader_threads;
for (size_t i = 0; i < num_trainers; ++i) {
reader_threads.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("Trainer_", i),
[&cache, &status, &mu]() {
for (int j = 0; true; ++j) {
absl::StatusOr<std::shared_ptr<const Tensor>> tensor =
cache.Get(absl::StrCat("Trainer_", j % 1000));
{
mutex_lock l(mu);
status = tensor.status();
}
if (!tensor.status().ok()) {
return;
}
test::ExpectEqual(*tensor.value(), Tensor("Test Tensor"));
}
})));
}
Env::Default()->SleepForMicroseconds(1000000);
cache.Cancel(errors::Cancelled("Cancelled"));
reader_threads.clear();
mutex_lock l(mu);
EXPECT_THAT(status, StatusIs(error::CANCELLED));
EXPECT_THAT(cache.Get("New trainer"), StatusIs(error::CANCELLED));
EXPECT_TRUE(cache.IsCancelled());
}
TEST(CrossTrainerCacheTest, Errors) {
auto elements = std::make_unique<ElementOrErrorDataset<std::string>>(
std::vector<absl::StatusOr<std::string>>{
std::string("First element"),
errors::Cancelled("Cancelled"),
std::string("Second element"),
errors::InvalidArgument("InvalidArgument"),
std::string("Third element"),
errors::Unavailable("Unavailable"),
});
CrossTrainerCache<std::string> cache(
1000, std::move(elements));
EXPECT_THAT(cache.Get("Trainer ID"),
IsOkAndHolds(Pointee(std::string("First element"))));
EXPECT_THAT(cache.Get("Trainer ID"), StatusIs(error::CANCELLED));
EXPECT_THAT(cache.Get("Trainer ID"),
IsOkAndHolds(Pointee(std::string("Second element"))));
EXPECT_THAT(cache.Get("Trainer ID"), StatusIs(error::INVALID_ARGUMENT));
EXPECT_THAT(cache.Get("Trainer ID"),
IsOkAndHolds(Pointee(std::string("Third element"))));
EXPECT_THAT(cache.Get("Trainer ID"), StatusIs(error::UNAVAILABLE));
EXPECT_THAT(cache.Get("New Trainer"),
IsOkAndHolds(Pointee(std::string("First element"))));
EXPECT_THAT(cache.Get("New Trainer"),
IsOkAndHolds(Pointee(std::string("Second element"))));
EXPECT_THAT(cache.Get("New Trainer"),
IsOkAndHolds(Pointee(std::string("Third element"))));
}
TEST(CrossTrainerCacheTest, CacheSizeIsTooSmall) {
CrossTrainerCache<Tensor> cache(
1, std::make_unique<TensorDataset>());
EXPECT_THAT(cache.Get("Trainer ID"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("tf.data service element size is larger than "
"cache size in bytes.")));
}
TEST(CrossTrainerCacheTest, TrainerIDMustBeNonEmpty) {
CrossTrainerCache<Tensor> cache(
1000, std::make_unique<TensorDataset>());
EXPECT_THAT(cache.Get(""), StatusIs(error::INVALID_ARGUMENT,
"tf.data service cross-trainer cache "
"requires a non-empty trainer ID."));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/cross_trainer_cache.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/cross_trainer_cache_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
241c7c0e-4f18-4489-be6e-d7b6a4d84b49 | cpp | tensorflow/tensorflow | builtin_op_data | tensorflow/compiler/mlir/lite/core/c/builtin_op_data.h | tensorflow/lite/core/c/builtin_op_data_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_LITE_CORE_C_BUILTIN_OP_DATA_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_CORE_C_BUILTIN_OP_DATA_H_
typedef enum {
kTfLitePaddingUnknown = 0,
kTfLitePaddingSame,
kTfLitePaddingValid,
} TfLitePadding;
typedef enum {
kTfLiteActNone = 0,
kTfLiteActRelu,
kTfLiteActReluN1To1,
kTfLiteActRelu6,
kTfLiteActTanh,
kTfLiteActSignBit,
kTfLiteActSigmoid,
} TfLiteFusedActivation;
typedef struct {
int width;
int height;
int width_offset;
int height_offset;
} TfLitePaddingValues;
typedef struct {
TfLitePadding padding;
int stride_width;
int stride_height;
int filter_width;
int filter_height;
TfLiteFusedActivation activation;
struct {
TfLitePaddingValues padding;
} computed;
} TfLitePoolParams;
#endif | #include "tensorflow/lite/core/c/builtin_op_data.h"
#include <gtest/gtest.h>
namespace tflite {
TEST(IntArray, CanCompileStructs) {
TfLitePadding padding = kTfLitePaddingSame;
TfLitePaddingValues padding_values;
TfLiteFusedActivation fused_activation = kTfLiteActRelu;
TfLiteConvParams conv_params;
TfLitePoolParams pool_params;
TfLiteDepthwiseConvParams depthwise_conv_params;
TfLiteSVDFParams svdf_params;
TfLiteRNNParams rnn_params;
TfLiteSequenceRNNParams sequence_rnn_params;
TfLiteFullyConnectedWeightsFormat fully_connected_weights_format =
kTfLiteFullyConnectedWeightsFormatDefault;
TfLiteFullyConnectedParams fully_connected_params;
TfLiteLSHProjectionType projection_type = kTfLiteLshProjectionDense;
TfLiteLSHProjectionParams projection_params;
TfLiteSoftmaxParams softmax_params;
TfLiteConcatenationParams concatenation_params;
TfLiteAddParams add_params;
TfLiteSpaceToBatchNDParams space_to_batch_nd_params;
TfLiteBatchToSpaceNDParams batch_to_space_nd_params;
TfLiteMulParams mul_params;
TfLiteSubParams sub_params;
TfLiteDivParams div_params;
TfLiteL2NormParams l2_norm_params;
TfLiteLocalResponseNormParams local_response_norm_params;
TfLiteLSTMKernelType lstm_kernel_type = kTfLiteLSTMBasicKernel;
TfLiteLSTMParams lstm_params;
TfLiteResizeBilinearParams resize_bilinear_params;
TfLitePadParams pad_params;
TfLitePadV2Params pad_v2_params;
TfLiteReshapeParams reshape_params;
TfLiteSkipGramParams skip_gram_params;
TfLiteSpaceToDepthParams space_to_depth_params;
TfLiteDepthToSpaceParams depth_to_space_params;
TfLiteCastParams cast_params;
TfLiteCombinerType combiner_type = kTfLiteCombinerTypeSqrtn;
TfLiteEmbeddingLookupSparseParams lookup_sparse_params;
TfLiteGatherParams gather_params;
TfLiteTransposeParams transpose_params;
TfLiteReducerParams reducer_params;
TfLiteSplitParams split_params;
TfLiteSplitVParams split_v_params;
TfLiteSqueezeParams squeeze_params;
TfLiteStridedSliceParams strided_slice_params;
TfLiteArgMaxParams arg_max_params;
TfLiteArgMinParams arg_min_params;
TfLiteTransposeConvParams transpose_conv_params;
TfLiteSparseToDenseParams sparse_to_dense_params;
TfLiteShapeParams shape_params;
TfLiteRankParams rank_params;
TfLiteFakeQuantParams fake_quant_params;
TfLitePackParams pack_params;
TfLiteUnpackParams unpack_params;
TfLiteOneHotParams one_hot_params;
TfLiteBidirectionalSequenceRNNParams bidi_sequence_rnn_params;
TfLiteBidirectionalSequenceLSTMParams bidi_sequence_lstm_params;
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/core/c/builtin_op_data.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/c/builtin_op_data_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0bee8605-291b-4815-9e79-77612b2128b2 | cpp | tensorflow/tensorflow | type_to_tflitetype | tensorflow/lite/type_to_tflitetype.h | tensorflow/lite/type_to_tflitetype_test.cc | #ifndef TENSORFLOW_LITE_TYPE_TO_TFLITETYPE_H_
#define TENSORFLOW_LITE_TYPE_TO_TFLITETYPE_H_
#include <complex>
#include <string>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/portable_type_to_tflitetype.h"
namespace tflite {
MATCH_TYPE_AND_TFLITE_TYPE(std::string, kTfLiteString);
MATCH_TYPE_AND_TFLITE_TYPE(std::complex<float>, kTfLiteComplex64);
MATCH_TYPE_AND_TFLITE_TYPE(std::complex<double>, kTfLiteComplex128);
}
#endif | #include "tensorflow/lite/type_to_tflitetype.h"
#include <string>
#include <type_traits>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/portable_type_to_tflitetype.h"
namespace tflite {
namespace {
TEST(TypeToTfLiteType, TypeMapsAreInverseOfEachOther) {
EXPECT_EQ(kTfLiteInt16,
typeToTfLiteType<TfLiteTypeToType<kTfLiteInt16>::Type>());
EXPECT_EQ(kTfLiteUInt16,
typeToTfLiteType<TfLiteTypeToType<kTfLiteUInt16>::Type>());
EXPECT_EQ(kTfLiteInt32,
typeToTfLiteType<TfLiteTypeToType<kTfLiteInt32>::Type>());
EXPECT_EQ(kTfLiteUInt32,
typeToTfLiteType<TfLiteTypeToType<kTfLiteUInt32>::Type>());
EXPECT_EQ(kTfLiteFloat32,
typeToTfLiteType<TfLiteTypeToType<kTfLiteFloat32>::Type>());
EXPECT_EQ(kTfLiteUInt8,
typeToTfLiteType<TfLiteTypeToType<kTfLiteUInt8>::Type>());
EXPECT_EQ(kTfLiteInt8,
typeToTfLiteType<TfLiteTypeToType<kTfLiteInt8>::Type>());
EXPECT_EQ(kTfLiteBool,
typeToTfLiteType<TfLiteTypeToType<kTfLiteBool>::Type>());
EXPECT_EQ(kTfLiteComplex64,
typeToTfLiteType<TfLiteTypeToType<kTfLiteComplex64>::Type>());
EXPECT_EQ(kTfLiteComplex128,
typeToTfLiteType<TfLiteTypeToType<kTfLiteComplex128>::Type>());
EXPECT_EQ(kTfLiteString,
typeToTfLiteType<TfLiteTypeToType<kTfLiteString>::Type>());
EXPECT_EQ(kTfLiteFloat16,
typeToTfLiteType<TfLiteTypeToType<kTfLiteFloat16>::Type>());
EXPECT_EQ(kTfLiteFloat64,
typeToTfLiteType<TfLiteTypeToType<kTfLiteFloat64>::Type>());
}
TEST(TypeToTfLiteType, Sanity) {
EXPECT_EQ(kTfLiteFloat32, typeToTfLiteType<float>());
EXPECT_EQ(kTfLiteBool, typeToTfLiteType<bool>());
EXPECT_EQ(kTfLiteString, typeToTfLiteType<std::string>());
static_assert(
std::is_same<float, TfLiteTypeToType<kTfLiteFloat32>::Type>::value,
"TfLiteTypeToType test failure");
static_assert(std::is_same<bool, TfLiteTypeToType<kTfLiteBool>::Type>::value,
"TfLiteTypeToType test failure");
static_assert(
std::is_same<std::string, TfLiteTypeToType<kTfLiteString>::Type>::value,
"TfLiteTypeToType test failure");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/type_to_tflitetype.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/type_to_tflitetype_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ec5e5467-28e8-4e41-a333-1a91de739759 | cpp | tensorflow/tensorflow | cpu_backend_gemm | tensorflow/lite/kernels/cpu_backend_gemm.h | tensorflow/lite/kernels/cpu_backend_gemm_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_CPU_BACKEND_GEMM_H_
#define TENSORFLOW_LITE_KERNELS_CPU_BACKEND_GEMM_H_
#include <cstdint>
#include "ruy/profiler/instrumentation.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_custom_gemv.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_params.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_ruy.h"
#ifndef TFLITE_WITH_RUY
#include "tensorflow/lite/kernels/cpu_backend_gemm_eigen.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_gemmlowp.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_x86.h"
#endif
namespace tflite {
namespace cpu_backend_gemm {
#if !defined(TFLITE_WITH_RUY) && defined(TFLITE_X86_PLATFORM)
template <typename LhsScalar, typename RhsScalar, typename AccumScalar,
typename DstScalar, QuantizationFlavor quantization_flavor>
struct GemmImpl : detail::GemmImplX86<LhsScalar, RhsScalar, AccumScalar,
DstScalar, quantization_flavor> {};
#else
template <typename LhsScalar, typename RhsScalar, typename AccumScalar,
typename DstScalar, QuantizationFlavor quantization_flavor>
struct GemmImpl : detail::GemmImplUsingRuy<LhsScalar, RhsScalar, AccumScalar,
DstScalar, quantization_flavor> {};
#if !defined(TFLITE_WITH_RUY)
template <typename SrcScalar, typename DstScalar,
QuantizationFlavor quantization_flavor>
struct GemmImpl<SrcScalar, SrcScalar, std::int32_t, DstScalar,
quantization_flavor>
: detail::GemmImplUsingGemmlowp<SrcScalar, SrcScalar, std::int32_t,
DstScalar, quantization_flavor> {};
#if !defined(GEMMLOWP_NEON)
template <typename SrcScalar, QuantizationFlavor quantization_flavor>
struct GemmImpl<SrcScalar, SrcScalar, std::int32_t, std::int8_t,
quantization_flavor>
: detail::GemmImplUsingRuy<SrcScalar, SrcScalar, std::int32_t, std::int8_t,
quantization_flavor> {};
template <typename DstScalar, QuantizationFlavor quantization_flavor>
struct GemmImpl<std::int8_t, std::int8_t, std::int32_t, DstScalar,
quantization_flavor>
: detail::GemmImplUsingRuy<std::int8_t, std::int8_t, std::int32_t,
DstScalar, quantization_flavor> {};
template <QuantizationFlavor quantization_flavor>
struct GemmImpl<std::int8_t, std::int8_t, std::int32_t, std::int8_t,
quantization_flavor>
: detail::GemmImplUsingRuy<std::int8_t, std::int8_t, std::int32_t,
std::int8_t, quantization_flavor> {};
#endif
template <>
struct GemmImpl<float, float, float, float, QuantizationFlavor::kFloatingPoint>
: detail::GemmImplUsingEigen {};
#endif
#endif
template <typename LhsScalar, typename RhsScalar, typename AccumScalar,
typename DstScalar, QuantizationFlavor quantization_flavor>
void Gemm(const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data,
const MatrixParams<RhsScalar>& rhs_params, const RhsScalar* rhs_data,
const MatrixParams<DstScalar>& dst_params, DstScalar* dst_data,
const GemmParams<AccumScalar, DstScalar, quantization_flavor>& params,
CpuBackendContext* context) {
ruy::profiler::ScopeLabel label("cpu_backend_gemm::Gemm");
ValidateParams(lhs_params, rhs_params, dst_params, params);
if (!IsValidGemm(lhs_params, rhs_params, dst_params)) {
TFLITE_DCHECK(false);
return;
}
bool must_use_ruy = false;
if (context->use_caching()) {
must_use_ruy = true;
}
if (lhs_params.order != Order::kRowMajor ||
rhs_params.order != Order::kColMajor ||
dst_params.order != Order::kColMajor) {
must_use_ruy = true;
}
if (must_use_ruy) {
detail::GemmImplUsingRuy<LhsScalar, RhsScalar, AccumScalar, DstScalar,
quantization_flavor>::Run(lhs_params, lhs_data,
rhs_params, rhs_data,
dst_params, dst_data,
params, context);
return;
}
const bool try_custom_gemv = (dst_params.cols == 1);
if (try_custom_gemv) {
if (detail::CustomGemv(lhs_params, lhs_data, rhs_params, rhs_data,
dst_params, dst_data, params, context)) {
return;
}
}
GemmImpl<LhsScalar, RhsScalar, AccumScalar, DstScalar,
quantization_flavor>::Run(lhs_params, lhs_data, rhs_params, rhs_data,
dst_params, dst_data, params, context);
}
template <QuantizationFlavor quantization_flavor>
void Gemm(const MatrixParams<int8_t>& lhs_params, const int8_t* lhs_data,
const MatrixParams<int16_t>& rhs_params, const int16_t* rhs_data,
const MatrixParams<int16_t>& dst_params, int16_t* dst_data,
const GemmParams<int32_t, int16_t, quantization_flavor>& params,
CpuBackendContext* context) {
ruy::profiler::ScopeLabel label("cpu_backend_gemm::Gemm");
ValidateParams(lhs_params, rhs_params, dst_params, params);
if (!IsValidGemm(lhs_params, rhs_params, dst_params)) {
TFLITE_DCHECK(false);
return;
}
detail::GemmImplUsingRuy<int8_t, int16_t, int32_t, int16_t,
quantization_flavor>::Run(lhs_params, lhs_data,
rhs_params, rhs_data,
dst_params, dst_data,
params, context);
}
template <typename LhsScalar, typename RhsScalar,
QuantizationFlavor quantization_flavor>
void Gemm(const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data,
const MatrixParams<RhsScalar>& rhs_params, const RhsScalar* rhs_data,
const MatrixParams<int32_t>& dst_params, int32_t* dst_data,
const GemmParams<int32_t, int32_t, quantization_flavor>& params,
CpuBackendContext* context) {
ruy::profiler::ScopeLabel label("cpu_backend_gemm::Gemm");
ValidateParams(lhs_params, rhs_params, dst_params, params);
ruy::profiler::ScopeLabel label2("cpu_backend_gemm::Gemm: general GEMM");
detail::GemmImplUsingRuy<LhsScalar, RhsScalar, int32_t, int32_t,
quantization_flavor>::Run(lhs_params, lhs_data,
rhs_params, rhs_data,
dst_params, dst_data,
params, context);
}
}
}
#endif | #include "tensorflow/lite/kernels/cpu_backend_gemm.h"
#include <math.h>
#include <stdint.h>
#include <stdlib.h>
#include <algorithm>
#include <iterator>
#include <limits>
#include <random>
#include <sstream>
#include <string>
#include <tuple>
#include <type_traits>
#include <vector>
#include <gtest/gtest.h>
#include "ruy/matrix.h"
#include "ruy/reference_mul.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_params.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_ruy.h"
namespace tflite {
namespace {
using cpu_backend_gemm::Gemm;
using cpu_backend_gemm::GemmParams;
using cpu_backend_gemm::MatrixParams;
using cpu_backend_gemm::QuantizationFlavor;
template <typename Scalar>
std::string ToString(const std::vector<Scalar>& vector) {
std::stringstream s;
if (vector.empty()) {
s << "{}";
} else {
s << "{ " << static_cast<double>(vector[0]);
for (int i = 1; i < vector.size(); i++) {
s << ", " << static_cast<double>(vector[i]);
}
s << "}";
}
return s.str();
}
template <typename Scalar>
void MakeDeterministicPseudoRandomVector(int size,
std::vector<Scalar>* vector) {
std::default_random_engine random_engine;
(void)random_engine();
const double random_min = static_cast<double>(random_engine.min());
const double random_max = static_cast<double>(random_engine.max());
const double result_min =
std::is_floating_point<Scalar>::value
? -1.0
: std::max(-256., static_cast<double>(
std::numeric_limits<Scalar>::lowest()));
const double result_max =
std::is_floating_point<Scalar>::value
? 1.0
: std::min(256.,
static_cast<double>(std::numeric_limits<Scalar>::max()));
const double random_scale =
(result_max - result_min) / (random_max - random_min);
vector->resize(size);
for (int i = 0; i < size; i++) {
double val = random_scale * (random_engine() - random_min);
val = std::max(val,
static_cast<double>(std::numeric_limits<Scalar>::lowest()));
val =
std::min(val, static_cast<double>(std::numeric_limits<Scalar>::max()));
(*vector)[i] = static_cast<Scalar>(val);
}
}
template <typename Scalar>
void MakeVectorFilledWithConsecutiveInts(int size,
std::vector<Scalar>* vector) {
vector->resize(size);
EXPECT_LE(size, std::numeric_limits<Scalar>::max());
for (int i = 0; i < size; i++) {
(*vector)[i] = static_cast<Scalar>(i + 1);
}
}
template <typename Scalar>
Scalar Median(const std::vector<Scalar>& vector) {
EXPECT_GT(vector.size(), 0);
std::vector<Scalar> vector_copy = vector;
std::sort(std::begin(vector_copy), std::end(vector_copy));
return vector_copy[vector_copy.size() / 2];
}
template <typename Scalar>
double MedianAbs(const std::vector<Scalar>& vector) {
EXPECT_GT(vector.size(), 0);
std::vector<double> vector_abs;
vector_abs.resize(vector.size());
for (int i = 0; i < vector.size(); i++) {
vector_abs[i] = std::abs(static_cast<double>(vector[i]));
}
std::sort(std::begin(vector_abs), std::end(vector_abs));
return vector_abs[vector_abs.size() / 2];
}
template <typename Scalar>
void Clamp(const std::vector<Scalar>& src, Scalar clamp_min, Scalar clamp_max,
std::vector<Scalar>* dst) {
dst->resize(src.size());
for (int i = 0; i < src.size(); i++) {
(*dst)[i] = std::max(std::min(src[i], clamp_max), clamp_min);
}
}
template <typename AccumScalar, typename DstScalar,
QuantizationFlavor quantization_flavor>
void Clamp(const GemmParams<AccumScalar, DstScalar, quantization_flavor>& src,
DstScalar clamp_min, DstScalar clamp_max,
GemmParams<AccumScalar, DstScalar, quantization_flavor>* dst) {
*dst = src;
dst->clamp_min = clamp_min;
dst->clamp_max = clamp_max;
}
struct ErrorStats {
int size;
double scale_factor;
double max_abs_diff;
double mean_abs_diff;
double abs_mean_diff;
};
template <typename Scalar>
void ComputeErrorStats(const std::vector<Scalar>& actual,
const std::vector<Scalar>& expected,
ErrorStats* error_stats) {
double max_abs_diff = 0;
double sum_abs_diff = 0;
double sum_diff = 0;
double max_abs_expected = 0;
EXPECT_EQ(actual.size(), expected.size());
for (int i = 0; i < actual.size(); i++) {
double actual_val = static_cast<double>(actual[i]);
double expected_val = static_cast<double>(expected[i]);
double diff = actual_val - expected_val;
max_abs_expected = std::max(max_abs_expected, std::abs(expected_val));
sum_diff += diff;
sum_abs_diff += std::abs(diff);
max_abs_diff = std::max(max_abs_diff, std::abs(diff));
}
error_stats->scale_factor = max_abs_expected;
error_stats->max_abs_diff = max_abs_diff;
error_stats->mean_abs_diff = sum_abs_diff / actual.size();
error_stats->abs_mean_diff = std::abs(sum_diff / actual.size());
error_stats->size = actual.size();
}
template <typename AccumScalar, typename DstScalar>
bool CheckErrorStats(const ErrorStats& error_stats, int accumulation_depth) {
double tolerated_relative_max_abs_diff = 0;
double tolerated_relative_mean_abs_diff = 0;
double tolerated_relative_abs_mean_diff = 0;
double inverse_size = 1. / error_stats.size;
if (std::is_floating_point<AccumScalar>::value) {
tolerated_relative_max_abs_diff =
accumulation_depth * std::numeric_limits<DstScalar>::epsilon();
tolerated_relative_mean_abs_diff =
std::sqrt(static_cast<double>(accumulation_depth)) *
std::numeric_limits<DstScalar>::epsilon();
tolerated_relative_abs_mean_diff =
tolerated_relative_mean_abs_diff * std::sqrt(inverse_size);
} else {
tolerated_relative_max_abs_diff = 1;
tolerated_relative_mean_abs_diff = std::sqrt(inverse_size) * 0.5;
tolerated_relative_abs_mean_diff = inverse_size * 2.;
}
double tolerated_max_abs_diff =
tolerated_relative_max_abs_diff * error_stats.scale_factor;
double tolerated_mean_abs_diff =
tolerated_relative_mean_abs_diff * error_stats.scale_factor;
double tolerated_abs_mean_diff =
tolerated_relative_abs_mean_diff * error_stats.scale_factor;
EXPECT_LE(error_stats.max_abs_diff, tolerated_max_abs_diff);
EXPECT_LE(error_stats.mean_abs_diff, tolerated_mean_abs_diff);
EXPECT_LE(error_stats.abs_mean_diff, tolerated_abs_mean_diff);
return error_stats.max_abs_diff <= tolerated_max_abs_diff &&
error_stats.mean_abs_diff <= tolerated_mean_abs_diff &&
error_stats.abs_mean_diff <= tolerated_abs_mean_diff;
}
template <typename AccumScalar, typename DstScalar>
void CheckErrorForAccumulation(int accumulation_depth,
const std::vector<DstScalar>& actual,
const std::vector<DstScalar>& expected) {
ErrorStats error_stats;
ComputeErrorStats(actual, expected, &error_stats);
bool success =
CheckErrorStats<AccumScalar, DstScalar>(error_stats, accumulation_depth);
EXPECT_TRUE(success) << "Actual vector\n"
<< ToString(actual) << "\ndiffers from expected vector\n"
<< ToString(expected) << "\n";
}
template <typename LhsScalar, typename RhsScalar, typename AccumScalar,
typename DstScalar, QuantizationFlavor quantization_flavor>
void PerformGemmThenCompareResultsThenAgainWithClamping(
const MatrixParams<LhsScalar>& lhs_params,
const std::vector<LhsScalar>& lhs_data,
const MatrixParams<RhsScalar>& rhs_params,
const std::vector<RhsScalar>& rhs_data,
const MatrixParams<DstScalar>& dst_params, std::vector<DstScalar>* dst_data,
const GemmParams<AccumScalar, DstScalar, quantization_flavor>& params,
const std::vector<DstScalar>& expected,
CpuBackendContext* cpu_backend_context) {
const int accumulation_depth = lhs_params.cols;
Gemm(lhs_params, lhs_data.data(), rhs_params, rhs_data.data(), dst_params,
dst_data->data(), params, cpu_backend_context);
CheckErrorForAccumulation<AccumScalar>(accumulation_depth, *dst_data,
expected);
DstScalar expected_median = Median(expected);
std::vector<DstScalar> expected_with_clamp;
GemmParams<AccumScalar, DstScalar, quantization_flavor> params_with_clamp;
DstScalar clamp_min, clamp_max;
clamp_min = std::numeric_limits<DstScalar>::lowest();
clamp_max = expected_median;
Clamp(expected, clamp_min, clamp_max, &expected_with_clamp);
Clamp(params, clamp_min, clamp_max, ¶ms_with_clamp);
Gemm(lhs_params, lhs_data.data(), rhs_params, rhs_data.data(), dst_params,
dst_data->data(), params_with_clamp, cpu_backend_context);
CheckErrorForAccumulation<AccumScalar>(accumulation_depth, *dst_data,
expected_with_clamp);
clamp_min = expected_median;
clamp_max = std::numeric_limits<DstScalar>::max();
Clamp(expected, clamp_min, clamp_max, &expected_with_clamp);
Clamp(params, clamp_min, clamp_max, ¶ms_with_clamp);
Gemm(lhs_params, lhs_data.data(), rhs_params, rhs_data.data(), dst_params,
dst_data->data(), params_with_clamp, cpu_backend_context);
CheckErrorForAccumulation<AccumScalar>(accumulation_depth, *dst_data,
expected_with_clamp);
}
template <typename LhsScalar, typename RhsScalar, typename AccumScalar,
typename DstScalar>
int BisectReasonableMultiplierExponent(
int bisect_min, int bisect_max, const MatrixParams<LhsScalar>& lhs_params,
const std::vector<LhsScalar>& lhs_data,
const MatrixParams<RhsScalar>& rhs_params,
const std::vector<RhsScalar>& rhs_data,
const MatrixParams<DstScalar>& dst_params, std::vector<DstScalar>* dst_data,
const GemmParams<AccumScalar, DstScalar>& params,
CpuBackendContext* cpu_backend_context) {
if (bisect_min == bisect_max) {
return bisect_min;
}
int bisect_mid =
static_cast<int>(std::floor(0.5 * (bisect_min + bisect_max)));
GemmParams<AccumScalar, DstScalar> params_copy(params);
params_copy.multiplier_exponent = bisect_mid;
double clamp_abs = std::max(std::abs(static_cast<double>(params.clamp_min)),
std::abs(static_cast<double>(params.clamp_max)));
Gemm(lhs_params, lhs_data.data(), rhs_params, rhs_data.data(), dst_params,
dst_data->data(), params_copy, cpu_backend_context);
double median_abs = MedianAbs(*dst_data);
if (median_abs < 0.25 * clamp_abs) {
return BisectReasonableMultiplierExponent(
bisect_mid + 1, bisect_max, lhs_params, lhs_data, rhs_params, rhs_data,
dst_params, dst_data, params_copy, cpu_backend_context);
} else {
return BisectReasonableMultiplierExponent(
bisect_min, bisect_mid, lhs_params, lhs_data, rhs_params, rhs_data,
dst_params, dst_data, params_copy, cpu_backend_context);
}
}
template <typename LhsScalar, typename RhsScalar, typename AccumScalar,
typename DstScalar, QuantizationFlavor quantization_flavor>
void ReferenceGemm(
const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data,
const MatrixParams<RhsScalar>& rhs_params, const RhsScalar* rhs_data,
const MatrixParams<DstScalar>& dst_params, DstScalar* dst_data,
const GemmParams<AccumScalar, DstScalar, quantization_flavor>& params,
CpuBackendContext* context) {
ruy::Matrix<LhsScalar> ruy_lhs;
ruy::Matrix<RhsScalar> ruy_rhs;
ruy::Matrix<DstScalar> ruy_dst;
cpu_backend_gemm::detail::MakeRuyMatrix(lhs_params, lhs_data, &ruy_lhs);
cpu_backend_gemm::detail::MakeRuyMatrix(rhs_params, rhs_data, &ruy_rhs);
cpu_backend_gemm::detail::MakeRuyMatrix(dst_params, dst_data, &ruy_dst);
ruy::MulParams<AccumScalar, DstScalar> ruy_mul_params;
cpu_backend_gemm::detail::MakeRuyMulParams(params, &ruy_mul_params);
ruy::ReferenceMul(ruy_lhs, ruy_rhs, ruy_mul_params, &ruy_dst);
}
template <typename LhsScalar, typename RhsScalar, typename AccumScalar,
typename DstScalar>
void TestSomeGemm(int rows, int depth, int cols,
const std::vector<DstScalar>& golden) {
CpuBackendContext cpu_backend_context;
std::default_random_engine random_engine;
cpu_backend_context.SetMaxNumThreads(1 + (random_engine() % 8));
bool use_caching = static_cast<bool>(random_engine() % 2);
cpu_backend_context.SetUseCaching(use_caching);
const bool use_golden = !golden.empty();
std::vector<LhsScalar> lhs_data;
std::vector<RhsScalar> rhs_data;
std::vector<AccumScalar> bias_data;
std::vector<DstScalar> dst_data;
if (use_golden) {
MakeVectorFilledWithConsecutiveInts(rows * depth, &lhs_data);
MakeVectorFilledWithConsecutiveInts(depth * cols, &rhs_data);
MakeVectorFilledWithConsecutiveInts(rows, &bias_data);
} else {
MakeDeterministicPseudoRandomVector(rows * depth, &lhs_data);
MakeDeterministicPseudoRandomVector(depth * cols, &rhs_data);
MakeDeterministicPseudoRandomVector(rows, &bias_data);
}
MakeDeterministicPseudoRandomVector(rows * cols, &dst_data);
auto random_order = [&]() {
return random_engine() % 2 ? cpu_backend_gemm::Order::kRowMajor
: cpu_backend_gemm::Order::kColMajor;
};
MatrixParams<LhsScalar> lhs_params;
lhs_params.order =
use_golden ? cpu_backend_gemm::Order::kRowMajor : random_order();
lhs_params.rows = rows;
lhs_params.cols = depth;
if (!std::is_floating_point<LhsScalar>::value &&
(!std::is_same<LhsScalar, int8_t>::value &&
!std::is_same<RhsScalar, int16_t>::value)) {
lhs_params.zero_point = 1;
if (!use_golden) {
lhs_params.zero_point += random_engine() % 8;
}
}
MatrixParams<RhsScalar> rhs_params;
rhs_params.order =
use_golden ? cpu_backend_gemm::Order::kColMajor : random_order();
rhs_params.rows = depth;
rhs_params.cols = cols;
if (!std::is_floating_point<RhsScalar>::value &&
(!std::is_same<LhsScalar, int8_t>::value &&
!std::is_same<RhsScalar, int16_t>::value)) {
rhs_params.zero_point = 1;
if (!use_golden) {
rhs_params.zero_point += random_engine() % 8;
}
}
MatrixParams<DstScalar> dst_params;
dst_params.order =
use_golden ? cpu_backend_gemm::Order::kColMajor : random_order();
dst_params.rows = rows;
dst_params.cols = cols;
if (!std::is_floating_point<DstScalar>::value &&
(!std::is_same<LhsScalar, int8_t>::value &&
!std::is_same<RhsScalar, int16_t>::value)) {
dst_params.zero_point = 1;
if (!use_golden) {
dst_params.zero_point += random_engine() % 8;
}
}
GemmParams<AccumScalar, DstScalar> params;
if (use_golden || (random_engine() % 2)) {
params.bias = bias_data.data();
}
static constexpr std::int32_t kMultiplierFixedpointMin = 1234567890;
static constexpr std::int32_t kMultiplierFixedpointMax = 1987654321;
if (!std::is_floating_point<AccumScalar>::value) {
params.multiplier_fixedpoint = kMultiplierFixedpointMin;
int bisect_min = -8 * static_cast<int>(sizeof(AccumScalar));
int bisect_max = 0;
params.multiplier_exponent = BisectReasonableMultiplierExponent(
bisect_min, bisect_max, lhs_params, lhs_data, rhs_params, rhs_data,
dst_params, &dst_data, params, &cpu_backend_context);
}
std::vector<DstScalar> expected;
if (use_golden) {
EXPECT_EQ(golden.size(), dst_data.size());
expected = golden;
} else {
expected.resize(dst_data.size());
ReferenceGemm(lhs_params, lhs_data.data(), rhs_params, rhs_data.data(),
dst_params, expected.data(), params, &cpu_backend_context);
}
PerformGemmThenCompareResultsThenAgainWithClamping(
lhs_params, lhs_data, rhs_params, rhs_data, dst_params, &dst_data, params,
expected, &cpu_backend_context);
if (!use_golden && !std::is_floating_point<AccumScalar>::value) {
std::vector<AccumScalar> multiplier_fixedpoint_perchannel(rows);
std::vector<int> multiplier_exponent_perchannel(rows);
for (int i = 0; i < rows; i++) {
multiplier_fixedpoint_perchannel[i] =
kMultiplierFixedpointMin +
(random_engine() %
(kMultiplierFixedpointMax + 1 - kMultiplierFixedpointMin));
const int exponent_min = params.multiplier_exponent - 2;
const int exponent_max = params.multiplier_exponent + 2;
multiplier_exponent_perchannel[i] =
exponent_min + (random_engine() % (exponent_max + 1 - exponent_min));
}
static constexpr QuantizationFlavor perchannel_flavor =
std::is_floating_point<AccumScalar>::value
? QuantizationFlavor::kFloatingPoint
: QuantizationFlavor::kIntegerWithPerRowMultiplier;
GemmParams<AccumScalar, DstScalar, perchannel_flavor> params_perchannel;
params_perchannel.bias = params.bias;
params_perchannel.clamp_min = params.clamp_min;
params_perchannel.clamp_max = params.clamp_max;
params_perchannel.multiplier_fixedpoint_perchannel =
multiplier_fixedpoint_perchannel.data();
params_perchannel.multiplier_exponent_perchannel =
multiplier_exponent_perchannel.data();
ReferenceGemm(lhs_params, lhs_data.data(), rhs_params, rhs_data.data(),
dst_params, expected.data(), params_perchannel,
&cpu_backend_context);
PerformGemmThenCompareResultsThenAgainWithClamping(
lhs_params, lhs_data, rhs_params, rhs_data, dst_params, &dst_data,
params_perchannel, expected, &cpu_backend_context);
}
}
template <typename LhsScalar, typename RhsScalar, typename AccumScalar,
typename DstScalar>
void TestMaybeValidGemm(int lhs_rows, int lhs_cols, int rhs_rows, int rhs_cols,
int dst_rows, int dst_cols) {
CpuBackendContext cpu_backend_context;
std::default_random_engine random_engine;
cpu_backend_context.SetMaxNumThreads(1 + (random_engine() % 8));
bool use_caching = static_cast<bool>(random_engine() % 2);
cpu_backend_context.SetUseCaching(use_caching);
std::vector<LhsScalar> lhs_data;
std::vector<RhsScalar> rhs_data;
std::vector<AccumScalar> bias_data;
std::vector<DstScalar> dst_data;
MakeDeterministicPseudoRandomVector(lhs_rows * lhs_cols, &lhs_data);
MakeDeterministicPseudoRandomVector(rhs_rows * rhs_cols, &rhs_data);
MakeDeterministicPseudoRandomVector(dst_rows, &bias_data);
MakeDeterministicPseudoRandomVector(dst_rows * dst_cols, &dst_data);
MatrixParams<LhsScalar> lhs_params;
lhs_params.order = cpu_backend_gemm::Order::kRowMajor;
lhs_params.rows = lhs_rows;
lhs_params.cols = lhs_cols;
if (!std::is_floating_point<LhsScalar>::value &&
(!std::is_same<LhsScalar, int8_t>::value &&
!std::is_same<RhsScalar, int16_t>::value)) {
lhs_params.zero_point = 1;
}
MatrixParams<RhsScalar> rhs_params;
rhs_params.order = cpu_backend_gemm::Order::kColMajor;
rhs_params.rows = rhs_rows;
rhs_params.cols = rhs_cols;
if (!std::is_floating_point<RhsScalar>::value &&
(!std::is_same<LhsScalar, int8_t>::value &&
!std::is_same<RhsScalar, int16_t>::value)) {
rhs_params.zero_point = 1;
}
MatrixParams<DstScalar> dst_params;
dst_params.order = cpu_backend_gemm::Order::kColMajor;
dst_params.rows = dst_rows;
dst_params.cols = dst_cols;
if (!std::is_floating_point<DstScalar>::value &&
(!std::is_same<LhsScalar, int8_t>::value &&
!std::is_same<RhsScalar, int16_t>::value)) {
dst_params.zero_point = 1;
}
GemmParams<AccumScalar, DstScalar> params;
params.bias = bias_data.data();
static constexpr std::int32_t kMultiplierFixedpointMin = 1234567890;
if (!std::is_floating_point<AccumScalar>::value) {
params.multiplier_fixedpoint = kMultiplierFixedpointMin;
int bisect_min = -8 * static_cast<int>(sizeof(AccumScalar));
int bisect_max = 0;
params.multiplier_exponent = BisectReasonableMultiplierExponent(
bisect_min, bisect_max, lhs_params, lhs_data, rhs_params, rhs_data,
dst_params, &dst_data, params, &cpu_backend_context);
}
Gemm(lhs_params, lhs_data.data(), rhs_params, rhs_data.data(), dst_params,
dst_data.data(), params, &cpu_backend_context);
}
TEST(CpuBackendGemmSimpleTestAgainstGolden, Float) {
TestSomeGemm<float, float, float, float>(2, 3, 4,
{15, 34, 33, 79, 51, 124, 69, 169});
}
TEST(CpuBackendGemmSimpleTestAgainstGolden, Uint8) {
TestSomeGemm<std::uint8_t, std::uint8_t, std::int32_t, std::uint8_t>(
5, 2, 3, {2, 4, 6, 7, 9, 3, 10, 16, 22, 29, 4, 15, 26, 37, 48});
}
TEST(CpuBackendGemmSimpleTestAgainstGolden, Int8) {
TestSomeGemm<std::int8_t, std::int8_t, std::int32_t, std::int8_t>(
2, 6, 3, {13, 32, 31, 81, 50, 127});
}
TEST(CpuBackendGemmInvalidGemmTest, Float) {
TestMaybeValidGemm<float, float, float, float>(2, 3, 3, 4, 2, 4);
#if !defined(TARGET_IPHONE_SIMULATOR) && !defined(TARGET_OS_IPHONE)
ASSERT_DEBUG_DEATH(
(TestMaybeValidGemm<float, float, float, float>(2, 3, 3, 0, 2, 4)), "");
ASSERT_DEBUG_DEATH(
(TestMaybeValidGemm<float, float, float, float>(2, 3, 9, 4, 2, 4)), "");
#endif
}
TEST(CpuBackendGemmSimpleTestAgainstGolden, Int8Int16) {
TestSomeGemm<std::int8_t, std::int8_t, std::int32_t, std::int16_t>(
3, 5, 4, {19, 48, 77, 48, 149, 250, 76, 249, 422, 105, 350, 595});
}
template <typename tLhsScalar, typename tRhsScalar, typename tAccumScalar,
typename tDstScalar>
struct TypesTuple {
using LhsScalar = tLhsScalar;
using RhsScalar = tRhsScalar;
using AccumScalar = tAccumScalar;
using DstScalar = tDstScalar;
};
template <typename TypesTupleType>
void TestRandomGemms(const std::vector<std::tuple<int, int, int>>& shapes) {
using LhsScalar = typename TypesTupleType::LhsScalar;
using RhsScalar = typename TypesTupleType::RhsScalar;
using AccumScalar = typename TypesTupleType::AccumScalar;
using DstScalar = typename TypesTupleType::DstScalar;
for (const auto& shape : shapes) {
int rows = std::get<0>(shape);
int depth = std::get<1>(shape);
int cols = std::get<2>(shape);
TestSomeGemm<LhsScalar, RhsScalar, AccumScalar, DstScalar>(rows, depth,
cols, {});
}
}
template <typename TypesTupleType>
class CpuBackendGemmTest : public testing::Test {};
TYPED_TEST_SUITE_P(CpuBackendGemmTest);
typedef ::testing::Types<
TypesTuple<float, float, float, float>,
TypesTuple<std::uint8_t, std::uint8_t, std::int32_t, std::uint8_t>,
TypesTuple<std::int8_t, std::int8_t, std::int32_t, std::int8_t>,
TypesTuple<std::int8_t, std::int8_t, std::int32_t, std::int16_t>,
TypesTuple<std::int8_t, std::int16_t, std::int32_t, std::int16_t>,
TypesTuple<std::uint8_t, std::uint8_t, std::int32_t, std::int8_t>>
CpuBackendGemmTestInstantiations;
TYPED_TEST_SUITE(CpuBackendGemmTest, CpuBackendGemmTestInstantiations);
TYPED_TEST(CpuBackendGemmTest, Square) {
std::vector<std::tuple<int, int, int>> shapes;
for (int size = 1; size < 50; size++) {
shapes.push_back(std::make_tuple(size, size, size));
}
TestRandomGemms<TypeParam>(shapes);
}
TYPED_TEST(CpuBackendGemmTest, SquarePowerOfTwo) {
std::vector<std::tuple<int, int, int>> shapes;
for (int size = 64; size <= 128; size *= 2) {
shapes.push_back(std::make_tuple(size, size, size));
}
TestRandomGemms<TypeParam>(shapes);
}
TYPED_TEST(CpuBackendGemmTest, MatrixTimesVector) {
std::vector<std::tuple<int, int, int>> shapes;
for (int size = 1; size < 200; size++) {
shapes.push_back(std::make_tuple(size, size, 1));
}
TestRandomGemms<TypeParam>(shapes);
}
TYPED_TEST(CpuBackendGemmTest, VectorTimesMatrix) {
std::vector<std::tuple<int, int, int>> shapes;
for (int size = 1; size < 200; size++) {
shapes.push_back(std::make_tuple(1, size, size));
}
TestRandomGemms<TypeParam>(shapes);
}
TYPED_TEST(CpuBackendGemmTest, MatrixTimesNarrow) {
std::vector<std::tuple<int, int, int>> shapes;
for (int size = 1; size < 50; size++) {
shapes.push_back(std::make_tuple(size, size, 2));
shapes.push_back(std::make_tuple(size, size, 3));
shapes.push_back(std::make_tuple(size, size, 4));
shapes.push_back(std::make_tuple(size, size, 8));
}
TestRandomGemms<TypeParam>(shapes);
}
TYPED_TEST(CpuBackendGemmTest, Rectangular) {
std::vector<std::tuple<int, int, int>> shapes;
for (int size = 1; size < 50; size++) {
shapes.push_back(std::make_tuple(size, size + 5, size + 1));
shapes.push_back(std::make_tuple(size + 10, size + 2, size));
}
TestRandomGemms<TypeParam>(shapes);
}
TYPED_TEST(CpuBackendGemmTest, HighlyRectangular) {
std::vector<std::tuple<int, int, int>> shapes;
for (int size = 1; size <= 1000; size *= 10) {
shapes.push_back(std::make_tuple(size, 10, 10));
shapes.push_back(std::make_tuple(10, size, 10));
shapes.push_back(std::make_tuple(10, 10, size));
}
TestRandomGemms<TypeParam>(shapes);
}
TYPED_TEST(CpuBackendGemmTest, InnerProduct) {
std::vector<std::tuple<int, int, int>> shapes;
for (int size = 1; size < 200; size++) {
shapes.push_back(std::make_tuple(1, size, 1));
}
TestRandomGemms<TypeParam>(shapes);
}
TYPED_TEST(CpuBackendGemmTest, OuterProduct) {
std::vector<std::tuple<int, int, int>> shapes;
for (int size = 1; size < 100; size++) {
shapes.push_back(std::make_tuple(size, 1, size));
}
TestRandomGemms<TypeParam>(shapes);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/cpu_backend_gemm.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/cpu_backend_gemm_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
77cd2394-1ec2-4b7a-876d-77461d0d270a | cpp | tensorflow/tensorflow | transpose_test_utils | tensorflow/lite/kernels/transpose_test_utils.h | tensorflow/lite/kernels/transpose_test_utils_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_TRANSPOSE_TEST_UTILS_H_
#define TENSORFLOW_LITE_KERNELS_TRANSPOSE_TEST_UTILS_H_
#include <functional>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/types/span.h"
#include "tensorflow/lite/kernels/internal/portable_tensor.h"
#include "tensorflow/lite/kernels/internal/reference/transpose.h"
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
template <typename T>
std::vector<T> RunTestPermutation(const absl::Span<const int> shape,
const absl::Span<const int> perms) {
const int count = absl::c_accumulate(shape, 1, std::multiplies<>{});
std::vector<T> out(count);
std::vector<T> input(count);
absl::c_iota(input, static_cast<T>(0));
const RuntimeShape input_shape(shape.size(), shape.data());
RuntimeShape output_shape(perms.size());
for (int i = 0; i < perms.size(); i++) {
output_shape.SetDim(i, input_shape.Dims(perms[i]));
}
TransposeParams params{};
params.perm_count = static_cast<int8_t>(perms.size());
absl::c_copy(perms, params.perm);
reference_ops::Transpose(params, input_shape, input.data(), output_shape,
out.data());
return out;
}
}
#endif | #include "tensorflow/lite/kernels/transpose_test_utils.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
using testing::ElementsAreArray;
namespace tflite {
namespace {
TEST(TransposeTest, TestRefOps1D) {
EXPECT_THAT(RunTestPermutation<float>({3}, {0}), ElementsAreArray({0, 1, 2}));
}
TEST(TransposeTest, TestRefOps2D) {
EXPECT_THAT(RunTestPermutation<float>({3, 2}, {1, 0}),
ElementsAreArray({0, 2, 4, 1, 3, 5}));
EXPECT_THAT(RunTestPermutation<float>({3, 2}, {0, 1}),
ElementsAreArray({0, 1, 2, 3, 4, 5}));
}
TEST(TransposeTest, TestRefOps3D) {
{
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4}, {2, 0, 1}),
ElementsAreArray({0, 4, 8, 12, 16, 20, 1, 5, 9, 13, 17, 21,
2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23}));
}
{
std::vector<float> out =
RunTestPermutation<float>({2, 3, 4}, {0, 1, 2});
std::vector<float> ref(out.size());
absl::c_iota(ref, 0);
EXPECT_THAT(out, ElementsAreArray(ref));
}
{
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4}, {1, 2, 0}),
ElementsAreArray({0, 12, 1, 13, 2, 14, 3, 15, 4, 16, 5, 17,
6, 18, 7, 19, 8, 20, 9, 21, 10, 22, 11, 23}));
}
{
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4}, {0, 2, 1}),
ElementsAreArray({0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11,
12, 16, 20, 13, 17, 21, 14, 18, 22, 15, 19, 23}));
}
{
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4}, {1, 0, 2}),
ElementsAreArray({0, 1, 2, 3, 12, 13, 14, 15, 4, 5, 6, 7,
16, 17, 18, 19, 8, 9, 10, 11, 20, 21, 22, 23}));
}
{
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4}, {2, 1, 0}),
ElementsAreArray({0, 12, 4, 16, 8, 20, 1, 13, 5, 17, 9, 21,
2, 14, 6, 18, 10, 22, 3, 15, 7, 19, 11, 23}));
}
}
TEST(TransposeTest, TestRefOps3D_OneInDimension) {
{
EXPECT_THAT(
RunTestPermutation<float>({1, 2, 3}, {2, 0, 1}),
ElementsAreArray({0, 3, 1, 4, 2, 5}));
}
{
EXPECT_THAT(
RunTestPermutation<float>({1, 2, 3}, {1, 2, 0}),
ElementsAreArray({0, 1, 2, 3, 4, 5}));
}
{
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 1}, {1, 2, 0}),
ElementsAreArray({0, 3, 1, 4, 2, 5}));
}
{
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 1}, {2, 0, 1}),
ElementsAreArray({0, 1, 2, 3, 4, 5}));
}
}
template <typename T>
void TransposeTestTestRefOps4D() {
EXPECT_THAT(
RunTestPermutation<T>({2, 3, 4, 5}, {2, 0, 1, 3}),
ElementsAreArray(
{0, 1, 2, 3, 4, 20, 21, 22, 23, 24, 40, 41, 42, 43, 44,
60, 61, 62, 63, 64, 80, 81, 82, 83, 84, 100, 101, 102, 103, 104,
5, 6, 7, 8, 9, 25, 26, 27, 28, 29, 45, 46, 47, 48, 49,
65, 66, 67, 68, 69, 85, 86, 87, 88, 89, 105, 106, 107, 108, 109,
10, 11, 12, 13, 14, 30, 31, 32, 33, 34, 50, 51, 52, 53, 54,
70, 71, 72, 73, 74, 90, 91, 92, 93, 94, 110, 111, 112, 113, 114,
15, 16, 17, 18, 19, 35, 36, 37, 38, 39, 55, 56, 57, 58, 59,
75, 76, 77, 78, 79, 95, 96, 97, 98, 99, 115, 116, 117, 118, 119}));
{
std::vector<T> out = RunTestPermutation<T>({2, 3, 4, 5}, {0, 1, 2, 3});
std::vector<T> ref(out.size());
absl::c_iota(ref, 0);
EXPECT_THAT(out, ElementsAreArray(ref));
}
}
TEST(TransposeTest, TestRefOps4D) { TransposeTestTestRefOps4D<float>(); }
TEST(TransposeTest, TestRefOps4DInt8) { TransposeTestTestRefOps4D<int8_t>(); }
TEST(TransposeTest, TestRefOps4DInt16) { TransposeTestTestRefOps4D<int16_t>(); }
TEST(TransposeTest, TestRefOps1D0) {
EXPECT_THAT(RunTestPermutation<float>({2}, {0}),
ElementsAreArray({0, 1}));
}
TEST(TransposeTest, TestRefOps2D0) {
EXPECT_THAT(RunTestPermutation<float>({2, 3}, {0, 1}),
ElementsAreArray({0, 1, 2, 3, 4, 5}));
}
TEST(TransposeTest, TestRefOps2D1) {
EXPECT_THAT(RunTestPermutation<float>({2, 3}, {1, 0}),
ElementsAreArray({0, 3, 1, 4, 2, 5}));
}
TEST(TransposeTest, TestRefOps3D0) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4}, {0, 1, 2}),
ElementsAreArray({0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}));
}
TEST(TransposeTest, TestRefOps3D1) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4}, {0, 2, 1}),
ElementsAreArray({0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11,
12, 16, 20, 13, 17, 21, 14, 18, 22, 15, 19, 23}));
}
TEST(TransposeTest, TestRefOps3D2) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4}, {1, 0, 2}),
ElementsAreArray({0, 1, 2, 3, 12, 13, 14, 15, 4, 5, 6, 7,
16, 17, 18, 19, 8, 9, 10, 11, 20, 21, 22, 23}));
}
TEST(TransposeTest, TestRefOps3D3) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4}, {1, 2, 0}),
ElementsAreArray({0, 12, 1, 13, 2, 14, 3, 15, 4, 16, 5, 17,
6, 18, 7, 19, 8, 20, 9, 21, 10, 22, 11, 23}));
}
TEST(TransposeTest, TestRefOps3D4) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4}, {2, 0, 1}),
ElementsAreArray({0, 4, 8, 12, 16, 20, 1, 5, 9, 13, 17, 21,
2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23}));
}
TEST(TransposeTest, TestRefOps3D5) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4}, {2, 1, 0}),
ElementsAreArray({0, 12, 4, 16, 8, 20, 1, 13, 5, 17, 9, 21,
2, 14, 6, 18, 10, 22, 3, 15, 7, 19, 11, 23}));
}
TEST(TransposeTest, TestRefOps4D0) {
const std::vector<float> ref = [] {
std::vector<float> ref(120);
absl::c_iota(ref, 0);
return ref;
}();
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {0, 1, 2, 3}),
ElementsAreArray(ref));
}
TEST(TransposeTest, TestRefOps4D1) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {0, 1, 3, 2}),
ElementsAreArray(
{0, 5, 10, 15, 1, 6, 11, 16, 2, 7, 12, 17, 3, 8,
13, 18, 4, 9, 14, 19, 20, 25, 30, 35, 21, 26, 31, 36,
22, 27, 32, 37, 23, 28, 33, 38, 24, 29, 34, 39, 40, 45,
50, 55, 41, 46, 51, 56, 42, 47, 52, 57, 43, 48, 53, 58,
44, 49, 54, 59, 60, 65, 70, 75, 61, 66, 71, 76, 62, 67,
72, 77, 63, 68, 73, 78, 64, 69, 74, 79, 80, 85, 90, 95,
81, 86, 91, 96, 82, 87, 92, 97, 83, 88, 93, 98, 84, 89,
94, 99, 100, 105, 110, 115, 101, 106, 111, 116, 102, 107, 112, 117,
103, 108, 113, 118, 104, 109, 114, 119}));
}
TEST(TransposeTest, TestRefOps4D2) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {0, 2, 1, 3}),
ElementsAreArray(
{0, 1, 2, 3, 4, 20, 21, 22, 23, 24, 40, 41, 42, 43, 44,
5, 6, 7, 8, 9, 25, 26, 27, 28, 29, 45, 46, 47, 48, 49,
10, 11, 12, 13, 14, 30, 31, 32, 33, 34, 50, 51, 52, 53, 54,
15, 16, 17, 18, 19, 35, 36, 37, 38, 39, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 80, 81, 82, 83, 84, 100, 101, 102, 103, 104,
65, 66, 67, 68, 69, 85, 86, 87, 88, 89, 105, 106, 107, 108, 109,
70, 71, 72, 73, 74, 90, 91, 92, 93, 94, 110, 111, 112, 113, 114,
75, 76, 77, 78, 79, 95, 96, 97, 98, 99, 115, 116, 117, 118, 119}));
}
TEST(TransposeTest, TestRefOps4D3) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {0, 2, 3, 1}),
ElementsAreArray(
{0, 20, 40, 1, 21, 41, 2, 22, 42, 3, 23, 43, 4, 24, 44,
5, 25, 45, 6, 26, 46, 7, 27, 47, 8, 28, 48, 9, 29, 49,
10, 30, 50, 11, 31, 51, 12, 32, 52, 13, 33, 53, 14, 34, 54,
15, 35, 55, 16, 36, 56, 17, 37, 57, 18, 38, 58, 19, 39, 59,
60, 80, 100, 61, 81, 101, 62, 82, 102, 63, 83, 103, 64, 84, 104,
65, 85, 105, 66, 86, 106, 67, 87, 107, 68, 88, 108, 69, 89, 109,
70, 90, 110, 71, 91, 111, 72, 92, 112, 73, 93, 113, 74, 94, 114,
75, 95, 115, 76, 96, 116, 77, 97, 117, 78, 98, 118, 79, 99, 119}));
}
TEST(TransposeTest, TestRefOps4D4) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {0, 3, 1, 2}),
ElementsAreArray({0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55,
1, 6, 11, 16, 21, 26, 31, 36, 41, 46, 51, 56,
2, 7, 12, 17, 22, 27, 32, 37, 42, 47, 52, 57,
3, 8, 13, 18, 23, 28, 33, 38, 43, 48, 53, 58,
4, 9, 14, 19, 24, 29, 34, 39, 44, 49, 54, 59,
60, 65, 70, 75, 80, 85, 90, 95, 100, 105, 110, 115,
61, 66, 71, 76, 81, 86, 91, 96, 101, 106, 111, 116,
62, 67, 72, 77, 82, 87, 92, 97, 102, 107, 112, 117,
63, 68, 73, 78, 83, 88, 93, 98, 103, 108, 113, 118,
64, 69, 74, 79, 84, 89, 94, 99, 104, 109, 114, 119}));
}
TEST(TransposeTest, TestRefOps4D5) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {0, 3, 2, 1}),
ElementsAreArray(
{0, 20, 40, 5, 25, 45, 10, 30, 50, 15, 35, 55, 1, 21, 41,
6, 26, 46, 11, 31, 51, 16, 36, 56, 2, 22, 42, 7, 27, 47,
12, 32, 52, 17, 37, 57, 3, 23, 43, 8, 28, 48, 13, 33, 53,
18, 38, 58, 4, 24, 44, 9, 29, 49, 14, 34, 54, 19, 39, 59,
60, 80, 100, 65, 85, 105, 70, 90, 110, 75, 95, 115, 61, 81, 101,
66, 86, 106, 71, 91, 111, 76, 96, 116, 62, 82, 102, 67, 87, 107,
72, 92, 112, 77, 97, 117, 63, 83, 103, 68, 88, 108, 73, 93, 113,
78, 98, 118, 64, 84, 104, 69, 89, 109, 74, 94, 114, 79, 99, 119}));
}
TEST(TransposeTest, TestRefOps4D6) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {1, 0, 2, 3}),
ElementsAreArray(
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 60, 61, 62, 63, 64, 65, 66, 67,
68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
36, 37, 38, 39, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 40, 41, 42, 43,
44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
58, 59, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
112, 113, 114, 115, 116, 117, 118, 119}));
}
TEST(TransposeTest, TestRefOps4D7) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {1, 0, 3, 2}),
ElementsAreArray(
{0, 5, 10, 15, 1, 6, 11, 16, 2, 7, 12, 17, 3, 8,
13, 18, 4, 9, 14, 19, 60, 65, 70, 75, 61, 66, 71, 76,
62, 67, 72, 77, 63, 68, 73, 78, 64, 69, 74, 79, 20, 25,
30, 35, 21, 26, 31, 36, 22, 27, 32, 37, 23, 28, 33, 38,
24, 29, 34, 39, 80, 85, 90, 95, 81, 86, 91, 96, 82, 87,
92, 97, 83, 88, 93, 98, 84, 89, 94, 99, 40, 45, 50, 55,
41, 46, 51, 56, 42, 47, 52, 57, 43, 48, 53, 58, 44, 49,
54, 59, 100, 105, 110, 115, 101, 106, 111, 116, 102, 107, 112, 117,
103, 108, 113, 118, 104, 109, 114, 119}));
}
TEST(TransposeTest, TestRefOps4D8) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {1, 2, 0, 3}),
ElementsAreArray(
{0, 1, 2, 3, 4, 60, 61, 62, 63, 64, 5, 6, 7, 8,
9, 65, 66, 67, 68, 69, 10, 11, 12, 13, 14, 70, 71, 72,
73, 74, 15, 16, 17, 18, 19, 75, 76, 77, 78, 79, 20, 21,
22, 23, 24, 80, 81, 82, 83, 84, 25, 26, 27, 28, 29, 85,
86, 87, 88, 89, 30, 31, 32, 33, 34, 90, 91, 92, 93, 94,
35, 36, 37, 38, 39, 95, 96, 97, 98, 99, 40, 41, 42, 43,
44, 100, 101, 102, 103, 104, 45, 46, 47, 48, 49, 105, 106, 107,
108, 109, 50, 51, 52, 53, 54, 110, 111, 112, 113, 114, 55, 56,
57, 58, 59, 115, 116, 117, 118, 119}));
}
TEST(TransposeTest, TestRefOps4D9) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {1, 2, 3, 0}),
ElementsAreArray({0, 60, 1, 61, 2, 62, 3, 63, 4, 64, 5, 65,
6, 66, 7, 67, 8, 68, 9, 69, 10, 70, 11, 71,
12, 72, 13, 73, 14, 74, 15, 75, 16, 76, 17, 77,
18, 78, 19, 79, 20, 80, 21, 81, 22, 82, 23, 83,
24, 84, 25, 85, 26, 86, 27, 87, 28, 88, 29, 89,
30, 90, 31, 91, 32, 92, 33, 93, 34, 94, 35, 95,
36, 96, 37, 97, 38, 98, 39, 99, 40, 100, 41, 101,
42, 102, 43, 103, 44, 104, 45, 105, 46, 106, 47, 107,
48, 108, 49, 109, 50, 110, 51, 111, 52, 112, 53, 113,
54, 114, 55, 115, 56, 116, 57, 117, 58, 118, 59, 119}));
}
TEST(TransposeTest, TestRefOps4D10) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {1, 3, 0, 2}),
ElementsAreArray(
{0, 5, 10, 15, 60, 65, 70, 75, 1, 6, 11, 16, 61, 66,
71, 76, 2, 7, 12, 17, 62, 67, 72, 77, 3, 8, 13, 18,
63, 68, 73, 78, 4, 9, 14, 19, 64, 69, 74, 79, 20, 25,
30, 35, 80, 85, 90, 95, 21, 26, 31, 36, 81, 86, 91, 96,
22, 27, 32, 37, 82, 87, 92, 97, 23, 28, 33, 38, 83, 88,
93, 98, 24, 29, 34, 39, 84, 89, 94, 99, 40, 45, 50, 55,
100, 105, 110, 115, 41, 46, 51, 56, 101, 106, 111, 116, 42, 47,
52, 57, 102, 107, 112, 117, 43, 48, 53, 58, 103, 108, 113, 118,
44, 49, 54, 59, 104, 109, 114, 119}));
}
TEST(TransposeTest, TestRefOps4D11) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {1, 3, 2, 0}),
ElementsAreArray({0, 60, 5, 65, 10, 70, 15, 75, 1, 61, 6, 66,
11, 71, 16, 76, 2, 62, 7, 67, 12, 72, 17, 77,
3, 63, 8, 68, 13, 73, 18, 78, 4, 64, 9, 69,
14, 74, 19, 79, 20, 80, 25, 85, 30, 90, 35, 95,
21, 81, 26, 86, 31, 91, 36, 96, 22, 82, 27, 87,
32, 92, 37, 97, 23, 83, 28, 88, 33, 93, 38, 98,
24, 84, 29, 89, 34, 94, 39, 99, 40, 100, 45, 105,
50, 110, 55, 115, 41, 101, 46, 106, 51, 111, 56, 116,
42, 102, 47, 107, 52, 112, 57, 117, 43, 103, 48, 108,
53, 113, 58, 118, 44, 104, 49, 109, 54, 114, 59, 119}));
}
TEST(TransposeTest, TestRefOps4D12) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {2, 0, 1, 3}),
ElementsAreArray(
{0, 1, 2, 3, 4, 20, 21, 22, 23, 24, 40, 41, 42, 43, 44,
60, 61, 62, 63, 64, 80, 81, 82, 83, 84, 100, 101, 102, 103, 104,
5, 6, 7, 8, 9, 25, 26, 27, 28, 29, 45, 46, 47, 48, 49,
65, 66, 67, 68, 69, 85, 86, 87, 88, 89, 105, 106, 107, 108, 109,
10, 11, 12, 13, 14, 30, 31, 32, 33, 34, 50, 51, 52, 53, 54,
70, 71, 72, 73, 74, 90, 91, 92, 93, 94, 110, 111, 112, 113, 114,
15, 16, 17, 18, 19, 35, 36, 37, 38, 39, 55, 56, 57, 58, 59,
75, 76, 77, 78, 79, 95, 96, 97, 98, 99, 115, 116, 117, 118, 119}));
}
TEST(TransposeTest, TestRefOps4D13) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {2, 0, 3, 1}),
ElementsAreArray(
{0, 20, 40, 1, 21, 41, 2, 22, 42, 3, 23, 43, 4, 24, 44,
60, 80, 100, 61, 81, 101, 62, 82, 102, 63, 83, 103, 64, 84, 104,
5, 25, 45, 6, 26, 46, 7, 27, 47, 8, 28, 48, 9, 29, 49,
65, 85, 105, 66, 86, 106, 67, 87, 107, 68, 88, 108, 69, 89, 109,
10, 30, 50, 11, 31, 51, 12, 32, 52, 13, 33, 53, 14, 34, 54,
70, 90, 110, 71, 91, 111, 72, 92, 112, 73, 93, 113, 74, 94, 114,
15, 35, 55, 16, 36, 56, 17, 37, 57, 18, 38, 58, 19, 39, 59,
75, 95, 115, 76, 96, 116, 77, 97, 117, 78, 98, 118, 79, 99, 119}));
}
TEST(TransposeTest, TestRefOps4D14) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {2, 1, 0, 3}),
ElementsAreArray(
{0, 1, 2, 3, 4, 60, 61, 62, 63, 64, 20, 21, 22, 23, 24,
80, 81, 82, 83, 84, 40, 41, 42, 43, 44, 100, 101, 102, 103, 104,
5, 6, 7, 8, 9, 65, 66, 67, 68, 69, 25, 26, 27, 28, 29,
85, 86, 87, 88, 89, 45, 46, 47, 48, 49, 105, 106, 107, 108, 109,
10, 11, 12, 13, 14, 70, 71, 72, 73, 74, 30, 31, 32, 33, 34,
90, 91, 92, 93, 94, 50, 51, 52, 53, 54, 110, 111, 112, 113, 114,
15, 16, 17, 18, 19, 75, 76, 77, 78, 79, 35, 36, 37, 38, 39,
95, 96, 97, 98, 99, 55, 56, 57, 58, 59, 115, 116, 117, 118, 119}));
}
TEST(TransposeTest, TestRefOps4D15) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {2, 1, 3, 0}),
ElementsAreArray(
{0, 60, 1, 61, 2, 62, 3, 63, 4, 64, 20, 80, 21, 81, 22,
82, 23, 83, 24, 84, 40, 100, 41, 101, 42, 102, 43, 103, 44, 104,
5, 65, 6, 66, 7, 67, 8, 68, 9, 69, 25, 85, 26, 86, 27,
87, 28, 88, 29, 89, 45, 105, 46, 106, 47, 107, 48, 108, 49, 109,
10, 70, 11, 71, 12, 72, 13, 73, 14, 74, 30, 90, 31, 91, 32,
92, 33, 93, 34, 94, 50, 110, 51, 111, 52, 112, 53, 113, 54, 114,
15, 75, 16, 76, 17, 77, 18, 78, 19, 79, 35, 95, 36, 96, 37,
97, 38, 98, 39, 99, 55, 115, 56, 116, 57, 117, 58, 118, 59, 119}));
}
TEST(TransposeTest, TestRefOps4D16) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {2, 3, 0, 1}),
ElementsAreArray(
{0, 20, 40, 60, 80, 100, 1, 21, 41, 61, 81, 101, 2, 22, 42,
62, 82, 102, 3, 23, 43, 63, 83, 103, 4, 24, 44, 64, 84, 104,
5, 25, 45, 65, 85, 105, 6, 26, 46, 66, 86, 106, 7, 27, 47,
67, 87, 107, 8, 28, 48, 68, 88, 108, 9, 29, 49, 69, 89, 109,
10, 30, 50, 70, 90, 110, 11, 31, 51, 71, 91, 111, 12, 32, 52,
72, 92, 112, 13, 33, 53, 73, 93, 113, 14, 34, 54, 74, 94, 114,
15, 35, 55, 75, 95, 115, 16, 36, 56, 76, 96, 116, 17, 37, 57,
77, 97, 117, 18, 38, 58, 78, 98, 118, 19, 39, 59, 79, 99, 119}));
}
TEST(TransposeTest, TestRefOps4D17) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {2, 3, 1, 0}),
ElementsAreArray(
{0, 60, 20, 80, 40, 100, 1, 61, 21, 81, 41, 101, 2, 62, 22,
82, 42, 102, 3, 63, 23, 83, 43, 103, 4, 64, 24, 84, 44, 104,
5, 65, 25, 85, 45, 105, 6, 66, 26, 86, 46, 106, 7, 67, 27,
87, 47, 107, 8, 68, 28, 88, 48, 108, 9, 69, 29, 89, 49, 109,
10, 70, 30, 90, 50, 110, 11, 71, 31, 91, 51, 111, 12, 72, 32,
92, 52, 112, 13, 73, 33, 93, 53, 113, 14, 74, 34, 94, 54, 114,
15, 75, 35, 95, 55, 115, 16, 76, 36, 96, 56, 116, 17, 77, 37,
97, 57, 117, 18, 78, 38, 98, 58, 118, 19, 79, 39, 99, 59, 119}));
}
TEST(TransposeTest, TestRefOps4D18) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {3, 0, 1, 2}),
ElementsAreArray({0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55,
60, 65, 70, 75, 80, 85, 90, 95, 100, 105, 110, 115,
1, 6, 11, 16, 21, 26, 31, 36, 41, 46, 51, 56,
61, 66, 71, 76, 81, 86, 91, 96, 101, 106, 111, 116,
2, 7, 12, 17, 22, 27, 32, 37, 42, 47, 52, 57,
62, 67, 72, 77, 82, 87, 92, 97, 102, 107, 112, 117,
3, 8, 13, 18, 23, 28, 33, 38, 43, 48, 53, 58,
63, 68, 73, 78, 83, 88, 93, 98, 103, 108, 113, 118,
4, 9, 14, 19, 24, 29, 34, 39, 44, 49, 54, 59,
64, 69, 74, 79, 84, 89, 94, 99, 104, 109, 114, 119}));
}
TEST(TransposeTest, TestRefOps4D19) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {3, 0, 2, 1}),
ElementsAreArray(
{0, 20, 40, 5, 25, 45, 10, 30, 50, 15, 35, 55, 60, 80, 100,
65, 85, 105, 70, 90, 110, 75, 95, 115, 1, 21, 41, 6, 26, 46,
11, 31, 51, 16, 36, 56, 61, 81, 101, 66, 86, 106, 71, 91, 111,
76, 96, 116, 2, 22, 42, 7, 27, 47, 12, 32, 52, 17, 37, 57,
62, 82, 102, 67, 87, 107, 72, 92, 112, 77, 97, 117, 3, 23, 43,
8, 28, 48, 13, 33, 53, 18, 38, 58, 63, 83, 103, 68, 88, 108,
73, 93, 113, 78, 98, 118, 4, 24, 44, 9, 29, 49, 14, 34, 54,
19, 39, 59, 64, 84, 104, 69, 89, 109, 74, 94, 114, 79, 99, 119}));
}
TEST(TransposeTest, TestRefOps4D20) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {3, 1, 0, 2}),
ElementsAreArray({0, 5, 10, 15, 60, 65, 70, 75, 20, 25, 30, 35,
80, 85, 90, 95, 40, 45, 50, 55, 100, 105, 110, 115,
1, 6, 11, 16, 61, 66, 71, 76, 21, 26, 31, 36,
81, 86, 91, 96, 41, 46, 51, 56, 101, 106, 111, 116,
2, 7, 12, 17, 62, 67, 72, 77, 22, 27, 32, 37,
82, 87, 92, 97, 42, 47, 52, 57, 102, 107, 112, 117,
3, 8, 13, 18, 63, 68, 73, 78, 23, 28, 33, 38,
83, 88, 93, 98, 43, 48, 53, 58, 103, 108, 113, 118,
4, 9, 14, 19, 64, 69, 74, 79, 24, 29, 34, 39,
84, 89, 94, 99, 44, 49, 54, 59, 104, 109, 114, 119}));
}
TEST(TransposeTest, TestRefOps4D21) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {3, 1, 2, 0}),
ElementsAreArray({0, 60, 5, 65, 10, 70, 15, 75, 20, 80, 25, 85,
30, 90, 35, 95, 40, 100, 45, 105, 50, 110, 55, 115,
1, 61, 6, 66, 11, 71, 16, 76, 21, 81, 26, 86,
31, 91, 36, 96, 41, 101, 46, 106, 51, 111, 56, 116,
2, 62, 7, 67, 12, 72, 17, 77, 22, 82, 27, 87,
32, 92, 37, 97, 42, 102, 47, 107, 52, 112, 57, 117,
3, 63, 8, 68, 13, 73, 18, 78, 23, 83, 28, 88,
33, 93, 38, 98, 43, 103, 48, 108, 53, 113, 58, 118,
4, 64, 9, 69, 14, 74, 19, 79, 24, 84, 29, 89,
34, 94, 39, 99, 44, 104, 49, 109, 54, 114, 59, 119}));
}
TEST(TransposeTest, TestRefOps4D22) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {3, 2, 0, 1}),
ElementsAreArray(
{0, 20, 40, 60, 80, 100, 5, 25, 45, 65, 85, 105, 10, 30, 50,
70, 90, 110, 15, 35, 55, 75, 95, 115, 1, 21, 41, 61, 81, 101,
6, 26, 46, 66, 86, 106, 11, 31, 51, 71, 91, 111, 16, 36, 56,
76, 96, 116, 2, 22, 42, 62, 82, 102, 7, 27, 47, 67, 87, 107,
12, 32, 52, 72, 92, 112, 17, 37, 57, 77, 97, 117, 3, 23, 43,
63, 83, 103, 8, 28, 48, 68, 88, 108, 13, 33, 53, 73, 93, 113,
18, 38, 58, 78, 98, 118, 4, 24, 44, 64, 84, 104, 9, 29, 49,
69, 89, 109, 14, 34, 54, 74, 94, 114, 19, 39, 59, 79, 99, 119}));
}
TEST(TransposeTest, TestRefOps4D23) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {3, 2, 1, 0}),
ElementsAreArray(
{0, 60, 20, 80, 40, 100, 5, 65, 25, 85, 45, 105, 10, 70, 30,
90, 50, 110, 15, 75, 35, 95, 55, 115, 1, 61, 21, 81, 41, 101,
6, 66, 26, 86, 46, 106, 11, 71, 31, 91, 51, 111, 16, 76, 36,
96, 56, 116, 2, 62, 22, 82, 42, 102, 7, 67, 27, 87, 47, 107,
12, 72, 32, 92, 52, 112, 17, 77, 37, 97, 57, 117, 3, 63, 23,
83, 43, 103, 8, 68, 28, 88, 48, 108, 13, 73, 33, 93, 53, 113,
18, 78, 38, 98, 58, 118, 4, 64, 24, 84, 44, 104, 9, 69, 29,
89, 49, 109, 14, 74, 34, 94, 54, 114, 19, 79, 39, 99, 59, 119}));
}
TEST(TransposeTest, TestRefOps5D0) {
const std::vector<float> ref = [] {
std::vector<float> ref(720);
absl::c_iota(ref, 0);
return ref;
}();
EXPECT_THAT(RunTestPermutation<float>({2, 3, 4, 5, 6},
{0, 1, 2, 3, 4}),
ElementsAreArray(ref));
}
TEST(TransposeTest, TestRefOps5D1) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5, 6},
{4, 3, 2, 1, 0}),
ElementsAreArray(
{0, 360, 120, 480, 240, 600, 30, 390, 150, 510, 270, 630, 60, 420,
180, 540, 300, 660, 90, 450, 210, 570, 330, 690, 6, 366, 126, 486,
246, 606, 36, 396, 156, 516, 276, 636, 66, 426, 186, 546, 306, 666,
96, 456, 216, 576, 336, 696, 12, 372, 132, 492, 252, 612, 42, 402,
162, 522, 282, 642, 72, 432, 192, 552, 312, 672, 102, 462, 222, 582,
342, 702, 18, 378, 138, 498, 258, 618, 48, 408, 168, 528, 288, 648,
78, 438, 198, 558, 318, 678, 108, 468, 228, 588, 348, 708, 24, 384,
144, 504, 264, 624, 54, 414, 174, 534, 294, 654, 84, 444, 204, 564,
324, 684, 114, 474, 234, 594, 354, 714, 1, 361, 121, 481, 241, 601,
31, 391, 151, 511, 271, 631, 61, 421, 181, 541, 301, 661, 91, 451,
211, 571, 331, 691, 7, 367, 127, 487, 247, 607, 37, 397, 157, 517,
277, 637, 67, 427, 187, 547, 307, 667, 97, 457, 217, 577, 337, 697,
13, 373, 133, 493, 253, 613, 43, 403, 163, 523, 283, 643, 73, 433,
193, 553, 313, 673, 103, 463, 223, 583, 343, 703, 19, 379, 139, 499,
259, 619, 49, 409, 169, 529, 289, 649, 79, 439, 199, 559, 319, 679,
109, 469, 229, 589, 349, 709, 25, 385, 145, 505, 265, 625, 55, 415,
175, 535, 295, 655, 85, 445, 205, 565, 325, 685, 115, 475, 235, 595,
355, 715, 2, 362, 122, 482, 242, 602, 32, 392, 152, 512, 272, 632,
62, 422, 182, 542, 302, 662, 92, 452, 212, 572, 332, 692, 8, 368,
128, 488, 248, 608, 38, 398, 158, 518, 278, 638, 68, 428, 188, 548,
308, 668, 98, 458, 218, 578, 338, 698, 14, 374, 134, 494, 254, 614,
44, 404, 164, 524, 284, 644, 74, 434, 194, 554, 314, 674, 104, 464,
224, 584, 344, 704, 20, 380, 140, 500, 260, 620, 50, 410, 170, 530,
290, 650, 80, 440, 200, 560, 320, 680, 110, 470, 230, 590, 350, 710,
26, 386, 146, 506, 266, 626, 56, 416, 176, 536, 296, 656, 86, 446,
206, 566, 326, 686, 116, 476, 236, 596, 356, 716, 3, 363, 123, 483,
243, 603, 33, 393, 153, 513, 273, 633, 63, 423, 183, 543, 303, 663,
93, 453, 213, 573, 333, 693, 9, 369, 129, 489, 249, 609, 39, 399,
159, 519, 279, 639, 69, 429, 189, 549, 309, 669, 99, 459, 219, 579,
339, 699, 15, 375, 135, 495, 255, 615, 45, 405, 165, 525, 285, 645,
75, 435, 195, 555, 315, 675, 105, 465, 225, 585, 345, 705, 21, 381,
141, 501, 261, 621, 51, 411, 171, 531, 291, 651, 81, 441, 201, 561,
321, 681, 111, 471, 231, 591, 351, 711, 27, 387, 147, 507, 267, 627,
57, 417, 177, 537, 297, 657, 87, 447, 207, 567, 327, 687, 117, 477,
237, 597, 357, 717, 4, 364, 124, 484, 244, 604, 34, 394, 154, 514,
274, 634, 64, 424, 184, 544, 304, 664, 94, 454, 214, 574, 334, 694,
10, 370, 130, 490, 250, 610, 40, 400, 160, 520, 280, 640, 70, 430,
190, 550, 310, 670, 100, 460, 220, 580, 340, 700, 16, 376, 136, 496,
256, 616, 46, 406, 166, 526, 286, 646, 76, 436, 196, 556, 316, 676,
106, 466, 226, 586, 346, 706, 22, 382, 142, 502, 262, 622, 52, 412,
172, 532, 292, 652, 82, 442, 202, 562, 322, 682, 112, 472, 232, 592,
352, 712, 28, 388, 148, 508, 268, 628, 58, 418, 178, 538, 298, 658,
88, 448, 208, 568, 328, 688, 118, 478, 238, 598, 358, 718, 5, 365,
125, 485, 245, 605, 35, 395, 155, 515, 275, 635, 65, 425, 185, 545,
305, 665, 95, 455, 215, 575, 335, 695, 11, 371, 131, 491, 251, 611,
41, 401, 161, 521, 281, 641, 71, 431, 191, 551, 311, 671, 101, 461,
221, 581, 341, 701, 17, 377, 137, 497, 257, 617, 47, 407, 167, 527,
287, 647, 77, 437, 197, 557, 317, 677, 107, 467, 227, 587, 347, 707,
23, 383, 143, 503, 263, 623, 53, 413, 173, 533, 293, 653, 83, 443,
203, 563, 323, 683, 113, 473, 233, 593, 353, 713, 29, 389, 149, 509,
269, 629, 59, 419, 179, 539, 299, 659, 89, 449, 209, 569, 329, 689,
119, 479, 239, 599, 359, 719}));
}
TEST(TransposeTest, TestRefOps5D2) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5, 6},
{1, 0, 2, 3, 4}),
ElementsAreArray(
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83,
84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
112, 113, 114, 115, 116, 117, 118, 119, 360, 361, 362, 363, 364, 365,
366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379,
380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393,
394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407,
408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421,
422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435,
436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449,
450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463,
464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477,
478, 479, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131,
132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145,
146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173,
174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201,
202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215,
216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229,
230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 480, 481, 482, 483,
484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 496, 497,
498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511,
512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525,
526, 527, 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 539,
540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553,
554, 555, 556, 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567,
568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581,
582, 583, 584, 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595,
596, 597, 598, 599, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249,
250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263,
264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277,
278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291,
292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305,
306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319,
320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333,
334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347,
348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 600, 601,
602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615,
616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629,
630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, 643,
644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, 657,
658, 659, 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670, 671,
672, 673, 674, 675, 676, 677, 678, 679, 680, 681, 682, 683, 684, 685,
686, 687, 688, 689, 690, 691, 692, 693, 694, 695, 696, 697, 698, 699,
700, 701, 702, 703, 704, 705, 706, 707, 708, 709, 710, 711, 712, 713,
714, 715, 716, 717, 718, 719}));
}
TEST(TransposeTest, TestRefOps5D3) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5, 6},
{1, 2, 0, 3, 4}),
ElementsAreArray(
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
28, 29, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371,
372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385,
386, 387, 388, 389, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
54, 55, 56, 57, 58, 59, 390, 391, 392, 393, 394, 395, 396, 397,
398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411,
412, 413, 414, 415, 416, 417, 418, 419, 60, 61, 62, 63, 64, 65,
66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 420, 421, 422, 423,
424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437,
438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 90, 91,
92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105,
106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119,
450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463,
464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477,
478, 479, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131,
132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145,
146, 147, 148, 149, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489,
490, 491, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503,
504, 505, 506, 507, 508, 509, 150, 151, 152, 153, 154, 155, 156, 157,
158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171,
172, 173, 174, 175, 176, 177, 178, 179, 510, 511, 512, 513, 514, 515,
516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529,
530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 180, 181, 182, 183,
184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197,
198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 540, 541,
542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555,
556, 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569,
210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223,
224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237,
238, 239, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581,
582, 583, 584, 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595,
596, 597, 598, 599, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249,
250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263,
264, 265, 266, 267, 268, 269, 600, 601, 602, 603, 604, 605, 606, 607,
608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621,
622, 623, 624, 625, 626, 627, 628, 629, 270, 271, 272, 273, 274, 275,
276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 630, 631, 632, 633,
634, 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647,
648, 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, 300, 301,
302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315,
316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329,
660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670, 671, 672, 673,
674, 675, 676, 677, 678, 679, 680, 681, 682, 683, 684, 685, 686, 687,
688, 689, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341,
342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355,
356, 357, 358, 359, 690, 691, 692, 693, 694, 695, 696, 697, 698, 699,
700, 701, 702, 703, 704, 705, 706, 707, 708, 709, 710, 711, 712, 713,
714, 715, 716, 717, 718, 719}));
}
TEST(TransposeTest, TestRefOps5D4) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5, 6},
{1, 2, 3, 0, 4}),
ElementsAreArray(
{0, 1, 2, 3, 4, 5, 360, 361, 362, 363, 364, 365, 6, 7,
8, 9, 10, 11, 366, 367, 368, 369, 370, 371, 12, 13, 14, 15,
16, 17, 372, 373, 374, 375, 376, 377, 18, 19, 20, 21, 22, 23,
378, 379, 380, 381, 382, 383, 24, 25, 26, 27, 28, 29, 384, 385,
386, 387, 388, 389, 30, 31, 32, 33, 34, 35, 390, 391, 392, 393,
394, 395, 36, 37, 38, 39, 40, 41, 396, 397, 398, 399, 400, 401,
42, 43, 44, 45, 46, 47, 402, 403, 404, 405, 406, 407, 48, 49,
50, 51, 52, 53, 408, 409, 410, 411, 412, 413, 54, 55, 56, 57,
58, 59, 414, 415, 416, 417, 418, 419, 60, 61, 62, 63, 64, 65,
420, 421, 422, 423, 424, 425, 66, 67, 68, 69, 70, 71, 426, 427,
428, 429, 430, 431, 72, 73, 74, 75, 76, 77, 432, 433, 434, 435,
436, 437, 78, 79, 80, 81, 82, 83, 438, 439, 440, 441, 442, 443,
84, 85, 86, 87, 88, 89, 444, 445, 446, 447, 448, 449, 90, 91,
92, 93, 94, 95, 450, 451, 452, 453, 454, 455, 96, 97, 98, 99,
100, 101, 456, 457, 458, 459, 460, 461, 102, 103, 104, 105, 106, 107,
462, 463, 464, 465, 466, 467, 108, 109, 110, 111, 112, 113, 468, 469,
470, 471, 472, 473, 114, 115, 116, 117, 118, 119, 474, 475, 476, 477,
478, 479, 120, 121, 122, 123, 124, 125, 480, 481, 482, 483, 484, 485,
126, 127, 128, 129, 130, 131, 486, 487, 488, 489, 490, 491, 132, 133,
134, 135, 136, 137, 492, 493, 494, 495, 496, 497, 138, 139, 140, 141,
142, 143, 498, 499, 500, 501, 502, 503, 144, 145, 146, 147, 148, 149,
504, 505, 506, 507, 508, 509, 150, 151, 152, 153, 154, 155, 510, 511,
512, 513, 514, 515, 156, 157, 158, 159, 160, 161, 516, 517, 518, 519,
520, 521, 162, 163, 164, 165, 166, 167, 522, 523, 524, 525, 526, 527,
168, 169, 170, 171, 172, 173, 528, 529, 530, 531, 532, 533, 174, 175,
176, 177, 178, 179, 534, 535, 536, 537, 538, 539, 180, 181, 182, 183,
184, 185, 540, 541, 542, 543, 544, 545, 186, 187, 188, 189, 190, 191,
546, 547, 548, 549, 550, 551, 192, 193, 194, 195, 196, 197, 552, 553,
554, 555, 556, 557, 198, 199, 200, 201, 202, 203, 558, 559, 560, 561,
562, 563, 204, 205, 206, 207, 208, 209, 564, 565, 566, 567, 568, 569,
210, 211, 212, 213, 214, 215, 570, 571, 572, 573, 574, 575, 216, 217,
218, 219, 220, 221, 576, 577, 578, 579, 580, 581, 222, 223, 224, 225,
226, 227, 582, 583, 584, 585, 586, 587, 228, 229, 230, 231, 232, 233,
588, 589, 590, 591, 592, 593, 234, 235, 236, 237, 238, 239, 594, 595,
596, 597, 598, 599, 240, 241, 242, 243, 244, 245, 600, 601, 602, 603,
604, 605, 246, 247, 248, 249, 250, 251, 606, 607, 608, 609, 610, 611,
252, 253, 254, 255, 256, 257, 612, 613, 614, 615, 616, 617, 258, 259,
260, 261, 262, 263, 618, 619, 620, 621, 622, 623, 264, 265, 266, 267,
268, 269, 624, 625, 626, 627, 628, 629, 270, 271, 272, 273, 274, 275,
630, 631, 632, 633, 634, 635, 276, 277, 278, 279, 280, 281, 636, 637,
638, 639, 640, 641, 282, 283, 284, 285, 286, 287, 642, 643, 644, 645,
646, 647, 288, 289, 290, 291, 292, 293, 648, 649, 650, 651, 652, 653,
294, 295, 296, 297, 298, 299, 654, 655, 656, 657, 658, 659, 300, 301,
302, 303, 304, 305, 660, 661, 662, 663, 664, 665, 306, 307, 308, 309,
310, 311, 666, 667, 668, 669, 670, 671, 312, 313, 314, 315, 316, 317,
672, 673, 674, 675, 676, 677, 318, 319, 320, 321, 322, 323, 678, 679,
680, 681, 682, 683, 324, 325, 326, 327, 328, 329, 684, 685, 686, 687,
688, 689, 330, 331, 332, 333, 334, 335, 690, 691, 692, 693, 694, 695,
336, 337, 338, 339, 340, 341, 696, 697, 698, 699, 700, 701, 342, 343,
344, 345, 346, 347, 702, 703, 704, 705, 706, 707, 348, 349, 350, 351,
352, 353, 708, 709, 710, 711, 712, 713, 354, 355, 356, 357, 358, 359,
714, 715, 716, 717, 718, 719}));
}
TEST(TransposeTest, TestRefOps5D5) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5, 6},
{1, 2, 3, 4, 0}),
ElementsAreArray(
{0, 360, 1, 361, 2, 362, 3, 363, 4, 364, 5, 365, 6, 366,
7, 367, 8, 368, 9, 369, 10, 370, 11, 371, 12, 372, 13, 373,
14, 374, 15, 375, 16, 376, 17, 377, 18, 378, 19, 379, 20, 380,
21, 381, 22, 382, 23, 383, 24, 384, 25, 385, 26, 386, 27, 387,
28, 388, 29, 389, 30, 390, 31, 391, 32, 392, 33, 393, 34, 394,
35, 395, 36, 396, 37, 397, 38, 398, 39, 399, 40, 400, 41, 401,
42, 402, 43, 403, 44, 404, 45, 405, 46, 406, 47, 407, 48, 408,
49, 409, 50, 410, 51, 411, 52, 412, 53, 413, 54, 414, 55, 415,
56, 416, 57, 417, 58, 418, 59, 419, 60, 420, 61, 421, 62, 422,
63, 423, 64, 424, 65, 425, 66, 426, 67, 427, 68, 428, 69, 429,
70, 430, 71, 431, 72, 432, 73, 433, 74, 434, 75, 435, 76, 436,
77, 437, 78, 438, 79, 439, 80, 440, 81, 441, 82, 442, 83, 443,
84, 444, 85, 445, 86, 446, 87, 447, 88, 448, 89, 449, 90, 450,
91, 451, 92, 452, 93, 453, 94, 454, 95, 455, 96, 456, 97, 457,
98, 458, 99, 459, 100, 460, 101, 461, 102, 462, 103, 463, 104, 464,
105, 465, 106, 466, 107, 467, 108, 468, 109, 469, 110, 470, 111, 471,
112, 472, 113, 473, 114, 474, 115, 475, 116, 476, 117, 477, 118, 478,
119, 479, 120, 480, 121, 481, 122, 482, 123, 483, 124, 484, 125, 485,
126, 486, 127, 487, 128, 488, 129, 489, 130, 490, 131, 491, 132, 492,
133, 493, 134, 494, 135, 495, 136, 496, 137, 497, 138, 498, 139, 499,
140, 500, 141, 501, 142, 502, 143, 503, 144, 504, 145, 505, 146, 506,
147, 507, 148, 508, 149, 509, 150, 510, 151, 511, 152, 512, 153, 513,
154, 514, 155, 515, 156, 516, 157, 517, 158, 518, 159, 519, 160, 520,
161, 521, 162, 522, 163, 523, 164, 524, 165, 525, 166, 526, 167, 527,
168, 528, 169, 529, 170, 530, 171, 531, 172, 532, 173, 533, 174, 534,
175, 535, 176, 536, 177, 537, 178, 538, 179, 539, 180, 540, 181, 541,
182, 542, 183, 543, 184, 544, 185, 545, 186, 546, 187, 547, 188, 548,
189, 549, 190, 550, 191, 551, 192, 552, 193, 553, 194, 554, 195, 555,
196, 556, 197, 557, 198, 558, 199, 559, 200, 560, 201, 561, 202, 562,
203, 563, 204, 564, 205, 565, 206, 566, 207, 567, 208, 568, 209, 569,
210, 570, 211, 571, 212, 572, 213, 573, 214, 574, 215, 575, 216, 576,
217, 577, 218, 578, 219, 579, 220, 580, 221, 581, 222, 582, 223, 583,
224, 584, 225, 585, 226, 586, 227, 587, 228, 588, 229, 589, 230, 590,
231, 591, 232, 592, 233, 593, 234, 594, 235, 595, 236, 596, 237, 597,
238, 598, 239, 599, 240, 600, 241, 601, 242, 602, 243, 603, 244, 604,
245, 605, 246, 606, 247, 607, 248, 608, 249, 609, 250, 610, 251, 611,
252, 612, 253, 613, 254, 614, 255, 615, 256, 616, 257, 617, 258, 618,
259, 619, 260, 620, 261, 621, 262, 622, 263, 623, 264, 624, 265, 625,
266, 626, 267, 627, 268, 628, 269, 629, 270, 630, 271, 631, 272, 632,
273, 633, 274, 634, 275, 635, 276, 636, 277, 637, 278, 638, 279, 639,
280, 640, 281, 641, 282, 642, 283, 643, 284, 644, 285, 645, 286, 646,
287, 647, 288, 648, 289, 649, 290, 650, 291, 651, 292, 652, 293, 653,
294, 654, 295, 655, 296, 656, 297, 657, 298, 658, 299, 659, 300, 660,
301, 661, 302, 662, 303, 663, 304, 664, 305, 665, 306, 666, 307, 667,
308, 668, 309, 669, 310, 670, 311, 671, 312, 672, 313, 673, 314, 674,
315, 675, 316, 676, 317, 677, 318, 678, 319, 679, 320, 680, 321, 681,
322, 682, 323, 683, 324, 684, 325, 685, 326, 686, 327, 687, 328, 688,
329, 689, 330, 690, 331, 691, 332, 692, 333, 693, 334, 694, 335, 695,
336, 696, 337, 697, 338, 698, 339, 699, 340, 700, 341, 701, 342, 702,
343, 703, 344, 704, 345, 705, 346, 706, 347, 707, 348, 708, 349, 709,
350, 710, 351, 711, 352, 712, 353, 713, 354, 714, 355, 715, 356, 716,
357, 717, 358, 718, 359, 719}));
}
TEST(TransposeTest, TestRefOps6D0) {
const std::vector<float> ref = [] {
std::vector<float> ref(5040);
absl::c_iota(ref, 0);
return ref;
}();
EXPECT_THAT(RunTestPermutation<float>({2, 3, 4, 5, 6, 7},
{0, 1, 2, 3, 4, 5}),
ElementsAreArray(ref));
}
TEST(TransposeTest, TestRefOps6D1) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5, 6, 7},
{5, 4, 3, 2, 1, 0}),
ElementsAreArray(
{0, 2520, 840, 3360, 1680, 4200, 210, 2730, 1050, 3570, 1890,
4410, 420, 2940, 1260, 3780, 2100, 4620, 630, 3150, 1470, 3990,
2310, 4830, 42, 2562, 882, 3402, 1722, 4242, 252, 2772, 1092,
3612, 1932, 4452, 462, 2982, 1302, 3822, 2142, 4662, 672, 3192,
1512, 4032, 2352, 4872, 84, 2604, 924, 3444, 1764, 4284, 294,
2814, 1134, 3654, 1974, 4494, 504, 3024, 1344, 3864, 2184, 4704,
714, 3234, 1554, 4074, 2394, 4914, 126, 2646, 966, 3486, 1806,
4326, 336, 2856, 1176, 3696, 2016, 4536, 546, 3066, 1386, 3906,
2226, 4746, 756, 3276, 1596, 4116, 2436, 4956, 168, 2688, 1008,
3528, 1848, 4368, 378, 2898, 1218, 3738, 2058, 4578, 588, 3108,
1428, 3948, 2268, 4788, 798, 3318, 1638, 4158, 2478, 4998, 7,
2527, 847, 3367, 1687, 4207, 217, 2737, 1057, 3577, 1897, 4417,
427, 2947, 1267, 3787, 2107, 4627, 637, 3157, 1477, 3997, 2317,
4837, 49, 2569, 889, 3409, 1729, 4249, 259, 2779, 1099, 3619,
1939, 4459, 469, 2989, 1309, 3829, 2149, 4669, 679, 3199, 1519,
4039, 2359, 4879, 91, 2611, 931, 3451, 1771, 4291, 301, 2821,
1141, 3661, 1981, 4501, 511, 3031, 1351, 3871, 2191, 4711, 721,
3241, 1561, 4081, 2401, 4921, 133, 2653, 973, 3493, 1813, 4333,
343, 2863, 1183, 3703, 2023, 4543, 553, 3073, 1393, 3913, 2233,
4753, 763, 3283, 1603, 4123, 2443, 4963, 175, 2695, 1015, 3535,
1855, 4375, 385, 2905, 1225, 3745, 2065, 4585, 595, 3115, 1435,
3955, 2275, 4795, 805, 3325, 1645, 4165, 2485, 5005, 14, 2534,
854, 3374, 1694, 4214, 224, 2744, 1064, 3584, 1904, 4424, 434,
2954, 1274, 3794, 2114, 4634, 644, 3164, 1484, 4004, 2324, 4844,
56, 2576, 896, 3416, 1736, 4256, 266, 2786, 1106, 3626, 1946,
4466, 476, 2996, 1316, 3836, 2156, 4676, 686, 3206, 1526, 4046,
2366, 4886, 98, 2618, 938, 3458, 1778, 4298, 308, 2828, 1148,
3668, 1988, 4508, 518, 3038, 1358, 3878, 2198, 4718, 728, 3248,
1568, 4088, 2408, 4928, 140, 2660, 980, 3500, 1820, 4340, 350,
2870, 1190, 3710, 2030, 4550, 560, 3080, 1400, 3920, 2240, 4760,
770, 3290, 1610, 4130, 2450, 4970, 182, 2702, 1022, 3542, 1862,
4382, 392, 2912, 1232, 3752, 2072, 4592, 602, 3122, 1442, 3962,
2282, 4802, 812, 3332, 1652, 4172, 2492, 5012, 21, 2541, 861,
3381, 1701, 4221, 231, 2751, 1071, 3591, 1911, 4431, 441, 2961,
1281, 3801, 2121, 4641, 651, 3171, 1491, 4011, 2331, 4851, 63,
2583, 903, 3423, 1743, 4263, 273, 2793, 1113, 3633, 1953, 4473,
483, 3003, 1323, 3843, 2163, 4683, 693, 3213, 1533, 4053, 2373,
4893, 105, 2625, 945, 3465, 1785, 4305, 315, 2835, 1155, 3675,
1995, 4515, 525, 3045, 1365, 3885, 2205, 4725, 735, 3255, 1575,
4095, 2415, 4935, 147, 2667, 987, 3507, 1827, 4347, 357, 2877,
1197, 3717, 2037, 4557, 567, 3087, 1407, 3927, 2247, 4767, 777,
3297, 1617, 4137, 2457, 4977, 189, 2709, 1029, 3549, 1869, 4389,
399, 2919, 1239, 3759, 2079, 4599, 609, 3129, 1449, 3969, 2289,
4809, 819, 3339, 1659, 4179, 2499, 5019, 28, 2548, 868, 3388,
1708, 4228, 238, 2758, 1078, 3598, 1918, 4438, 448, 2968, 1288,
3808, 2128, 4648, 658, 3178, 1498, 4018, 2338, 4858, 70, 2590,
910, 3430, 1750, 4270, 280, 2800, 1120, 3640, 1960, 4480, 490,
3010, 1330, 3850, 2170, 4690, 700, 3220, 1540, 4060, 2380, 4900,
112, 2632, 952, 3472, 1792, 4312, 322, 2842, 1162, 3682, 2002,
4522, 532, 3052, 1372, 3892, 2212, 4732, 742, 3262, 1582, 4102,
2422, 4942, 154, 2674, 994, 3514, 1834, 4354, 364, 2884, 1204,
3724, 2044, 4564, 574, 3094, 1414, 3934, 2254, 4774, 784, 3304,
1624, 4144, 2464, 4984, 196, 2716, 1036, 3556, 1876, 4396, 406,
2926, 1246, 3766, 2086, 4606, 616, 3136, 1456, 3976, 2296, 4816,
826, 3346, 1666, 4186, 2506, 5026, 35, 2555, 875, 3395, 1715,
4235, 245, 2765, 1085, 3605, 1925, 4445, 455, 2975, 1295, 3815,
2135, 4655, 665, 3185, 1505, 4025, 2345, 4865, 77, 2597, 917,
3437, 1757, 4277, 287, 2807, 1127, 3647, 1967, 4487, 497, 3017,
1337, 3857, 2177, 4697, 707, 3227, 1547, 4067, 2387, 4907, 119,
2639, 959, 3479, 1799, 4319, 329, 2849, 1169, 3689, 2009, 4529,
539, 3059, 1379, 3899, 2219, 4739, 749, 3269, 1589, 4109, 2429,
4949, 161, 2681, 1001, 3521, 1841, 4361, 371, 2891, 1211, 3731,
2051, 4571, 581, 3101, 1421, 3941, 2261, 4781, 791, 3311, 1631,
4151, 2471, 4991, 203, 2723, 1043, 3563, 1883, 4403, 413, 2933,
1253, 3773, 2093, 4613, 623, 3143, 1463, 3983, 2303, 4823, 833,
3353, 1673, 4193, 2513, 5033, 1, 2521, 841, 3361, 1681, 4201,
211, 2731, 1051, 3571, 1891, 4411, 421, 2941, 1261, 3781, 2101,
4621, 631, 3151, 1471, 3991, 2311, 4831, 43, 2563, 883, 3403,
1723, 4243, 253, 2773, 1093, 3613, 1933, 4453, 463, 2983, 1303,
3823, 2143, 4663, 673, 3193, 1513, 4033, 2353, 4873, 85, 2605,
925, 3445, 1765, 4285, 295, 2815, 1135, 3655, 1975, 4495, 505,
3025, 1345, 3865, 2185, 4705, 715, 3235, 1555, 4075, 2395, 4915,
127, 2647, 967, 3487, 1807, 4327, 337, 2857, 1177, 3697, 2017,
4537, 547, 3067, 1387, 3907, 2227, 4747, 757, 3277, 1597, 4117,
2437, 4957, 169, 2689, 1009, 3529, 1849, 4369, 379, 2899, 1219,
3739, 2059, 4579, 589, 3109, 1429, 3949, 2269, 4789, 799, 3319,
1639, 4159, 2479, 4999, 8, 2528, 848, 3368, 1688, 4208, 218,
2738, 1058, 3578, 1898, 4418, 428, 2948, 1268, 3788, 2108, 4628,
638, 3158, 1478, 3998, 2318, 4838, 50, 2570, 890, 3410, 1730,
4250, 260, 2780, 1100, 3620, 1940, 4460, 470, 2990, 1310, 3830,
2150, 4670, 680, 3200, 1520, 4040, 2360, 4880, 92, 2612, 932,
3452, 1772, 4292, 302, 2822, 1142, 3662, 1982, 4502, 512, 3032,
1352, 3872, 2192, 4712, 722, 3242, 1562, 4082, 2402, 4922, 134,
2654, 974, 3494, 1814, 4334, 344, 2864, 1184, 3704, 2024, 4544,
554, 3074, 1394, 3914, 2234, 4754, 764, 3284, 1604, 4124, 2444,
4964, 176, 2696, 1016, 3536, 1856, 4376, 386, 2906, 1226, 3746,
2066, 4586, 596, 3116, 1436, 3956, 2276, 4796, 806, 3326, 1646,
4166, 2486, 5006, 15, 2535, 855, 3375, 1695, 4215, 225, 2745,
1065, 3585, 1905, 4425, 435, 2955, 1275, 3795, 2115, 4635, 645,
3165, 1485, 4005, 2325, 4845, 57, 2577, 897, 3417, 1737, 4257,
267, 2787, 1107, 3627, 1947, 4467, 477, 2997, 1317, 3837, 2157,
4677, 687, 3207, 1527, 4047, 2367, 4887, 99, 2619, 939, 3459,
1779, 4299, 309, 2829, 1149, 3669, 1989, 4509, 519, 3039, 1359,
3879, 2199, 4719, 729, 3249, 1569, 4089, 2409, 4929, 141, 2661,
981, 3501, 1821, 4341, 351, 2871, 1191, 3711, 2031, 4551, 561,
3081, 1401, 3921, 2241, 4761, 771, 3291, 1611, 4131, 2451, 4971,
183, 2703, 1023, 3543, 1863, 4383, 393, 2913, 1233, 3753, 2073,
4593, 603, 3123, 1443, 3963, 2283, 4803, 813, 3333, 1653, 4173,
2493, 5013, 22, 2542, 862, 3382, 1702, 4222, 232, 2752, 1072,
3592, 1912, 4432, 442, 2962, 1282, 3802, 2122, 4642, 652, 3172,
1492, 4012, 2332, 4852, 64, 2584, 904, 3424, 1744, 4264, 274,
2794, 1114, 3634, 1954, 4474, 484, 3004, 1324, 3844, 2164, 4684,
694, 3214, 1534, 4054, 2374, 4894, 106, 2626, 946, 3466, 1786,
4306, 316, 2836, 1156, 3676, 1996, 4516, 526, 3046, 1366, 3886,
2206, 4726, 736, 3256, 1576, 4096, 2416, 4936, 148, 2668, 988,
3508, 1828, 4348, 358, 2878, 1198, 3718, 2038, 4558, 568, 3088,
1408, 3928, 2248, 4768, 778, 3298, 1618, 4138, 2458, 4978, 190,
2710, 1030, 3550, 1870, 4390, 400, 2920, 1240, 3760, 2080, 4600,
610, 3130, 1450, 3970, 2290, 4810, 820, 3340, 1660, 4180, 2500,
5020, 29, 2549, 869, 3389, 1709, 4229, 239, 2759, 1079, 3599,
1919, 4439, 449, 2969, 1289, 3809, 2129, 4649, 659, 3179, 1499,
4019, 2339, 4859, 71, 2591, 911, 3431, 1751, 4271, 281, 2801,
1121, 3641, 1961, 4481, 491, 3011, 1331, 3851, 2171, 4691, 701,
3221, 1541, 4061, 2381, 4901, 113, 2633, 953, 3473, 1793, 4313,
323, 2843, 1163, 3683, 2003, 4523, 533, 3053, 1373, 3893, 2213,
4733, 743, 3263, 1583, 4103, 2423, 4943, 155, 2675, 995, 3515,
1835, 4355, 365, 2885, 1205, 3725, 2045, 4565, 575, 3095, 1415,
3935, 2255, 4775, 785, 3305, 1625, 4145, 2465, 4985, 197, 2717,
1037, 3557, 1877, 4397, 407, 2927, 1247, 3767, 2087, 4607, 617,
3137, 1457, 3977, 2297, 4817, 827, 3347, 1667, 4187, 2507, 5027,
36, 2556, 876, 3396, 1716, 4236, 246, 2766, 1086, 3606, 1926,
4446, 456, 2976, 1296, 3816, 2136, 4656, 666, 3186, 1506, 4026,
2346, 4866, 78, 2598, 918, 3438, 1758, 4278, 288, 2808, 1128,
3648, 1968, 4488, 498, 3018, 1338, 3858, 2178, 4698, 708, 3228,
1548, 4068, 2388, 4908, 120, 2640, 960, 3480, 1800, 4320, 330,
2850, 1170, 3690, 2010, 4530, 540, 3060, 1380, 3900, 2220, 4740,
750, 3270, 1590, 4110, 2430, 4950, 162, 2682, 1002, 3522, 1842,
4362, 372, 2892, 1212, 3732, 2052, 4572, 582, 3102, 1422, 3942,
2262, 4782, 792, 3312, 1632, 4152, 2472, 4992, 204, 2724, 1044,
3564, 1884, 4404, 414, 2934, 1254, 3774, 2094, 4614, 624, 3144,
1464, 3984, 2304, 4824, 834, 3354, 1674, 4194, 2514, 5034, 2,
2522, 842, 3362, 1682, 4202, 212, 2732, 1052, 3572, 1892, 4412,
422, 2942, 1262, 3782, 2102, 4622, 632, 3152, 1472, 3992, 2312,
4832, 44, 2564, 884, 3404, 1724, 4244, 254, 2774, 1094, 3614,
1934, 4454, 464, 2984, 1304, 3824, 2144, 4664, 674, 3194, 1514,
4034, 2354, 4874, 86, 2606, 926, 3446, 1766, 4286, 296, 2816,
1136, 3656, 1976, 4496, 506, 3026, 1346, 3866, 2186, 4706, 716,
3236, 1556, 4076, 2396, 4916, 128, 2648, 968, 3488, 1808, 4328,
338, 2858, 1178, 3698, 2018, 4538, 548, 3068, 1388, 3908, 2228,
4748, 758, 3278, 1598, 4118, 2438, 4958, 170, 2690, 1010, 3530,
1850, 4370, 380, 2900, 1220, 3740, 2060, 4580, 590, 3110, 1430,
3950, 2270, 4790, 800, 3320, 1640, 4160, 2480, 5000, 9, 2529,
849, 3369, 1689, 4209, 219, 2739, 1059, 3579, 1899, 4419, 429,
2949, 1269, 3789, 2109, 4629, 639, 3159, 1479, 3999, 2319, 4839,
51, 2571, 891, 3411, 1731, 4251, 261, 2781, 1101, 3621, 1941,
4461, 471, 2991, 1311, 3831, 2151, 4671, 681, 3201, 1521, 4041,
2361, 4881, 93, 2613, 933, 3453, 1773, 4293, 303, 2823, 1143,
3663, 1983, 4503, 513, 3033, 1353, 3873, 2193, 4713, 723, 3243,
1563, 4083, 2403, 4923, 135, 2655, 975, 3495, 1815, 4335, 345,
2865, 1185, 3705, 2025, 4545, 555, 3075, 1395, 3915, 2235, 4755,
765, 3285, 1605, 4125, 2445, 4965, 177, 2697, 1017, 3537, 1857,
4377, 387, 2907, 1227, 3747, 2067, 4587, 597, 3117, 1437, 3957,
2277, 4797, 807, 3327, 1647, 4167, 2487, 5007, 16, 2536, 856,
3376, 1696, 4216, 226, 2746, 1066, 3586, 1906, 4426, 436, 2956,
1276, 3796, 2116, 4636, 646, 3166, 1486, 4006, 2326, 4846, 58,
2578, 898, 3418, 1738, 4258, 268, 2788, 1108, 3628, 1948, 4468,
478, 2998, 1318, 3838, 2158, 4678, 688, 3208, 1528, 4048, 2368,
4888, 100, 2620, 940, 3460, 1780, 4300, 310, 2830, 1150, 3670,
1990, 4510, 520, 3040, 1360, 3880, 2200, 4720, 730, 3250, 1570,
4090, 2410, 4930, 142, 2662, 982, 3502, 1822, 4342, 352, 2872,
1192, 3712, 2032, 4552, 562, 3082, 1402, 3922, 2242, 4762, 772,
3292, 1612, 4132, 2452, 4972, 184, 2704, 1024, 3544, 1864, 4384,
394, 2914, 1234, 3754, 2074, 4594, 604, 3124, 1444, 3964, 2284,
4804, 814, 3334, 1654, 4174, 2494, 5014, 23, 2543, 863, 3383,
1703, 4223, 233, 2753, 1073, 3593, 1913, 4433, 443, 2963, 1283,
3803, 2123, 4643, 653, 3173, 1493, 4013, 2333, 4853, 65, 2585,
905, 3425, 1745, 4265, 275, 2795, 1115, 3635, 1955, 4475, 485,
3005, 1325, 3845, 2165, 4685, 695, 3215, 1535, 4055, 2375, 4895,
107, 2627, 947, 3467, 1787, 4307, 317, 2837, 1157, 3677, 1997,
4517, 527, 3047, 1367, 3887, 2207, 4727, 737, 3257, 1577, 4097,
2417, 4937, 149, 2669, 989, 3509, 1829, 4349, 359, 2879, 1199,
3719, 2039, 4559, 569, 3089, 1409, 3929, 2249, 4769, 779, 3299,
1619, 4139, 2459, 4979, 191, 2711, 1031, 3551, 1871, 4391, 401,
2921, 1241, 3761, 2081, 4601, 611, 3131, 1451, 3971, 2291, 4811,
821, 3341, 1661, 4181, 2501, 5021, 30, 2550, 870, 3390, 1710,
4230, 240, 2760, 1080, 3600, 1920, 4440, 450, 2970, 1290, 3810,
2130, 4650, 660, 3180, 1500, 4020, 2340, 4860, 72, 2592, 912,
3432, 1752, 4272, 282, 2802, 1122, 3642, 1962, 4482, 492, 3012,
1332, 3852, 2172, 4692, 702, 3222, 1542, 4062, 2382, 4902, 114,
2634, 954, 3474, 1794, 4314, 324, 2844, 1164, 3684, 2004, 4524,
534, 3054, 1374, 3894, 2214, 4734, 744, 3264, 1584, 4104, 2424,
4944, 156, 2676, 996, 3516, 1836, 4356, 366, 2886, 1206, 3726,
2046, 4566, 576, 3096, 1416, 3936, 2256, 4776, 786, 3306, 1626,
4146, 2466, 4986, 198, 2718, 1038, 3558, 1878, 4398, 408, 2928,
1248, 3768, 2088, 4608, 618, 3138, 1458, 3978, 2298, 4818, 828,
3348, 1668, 4188, 2508, 5028, 37, 2557, 877, 3397, 1717, 4237,
247, 2767, 1087, 3607, 1927, 4447, 457, 2977, 1297, 3817, 2137,
4657, 667, 3187, 1507, 4027, 2347, 4867, 79, 2599, 919, 3439,
1759, 4279, 289, 2809, 1129, 3649, 1969, 4489, 499, 3019, 1339,
3859, 2179, 4699, 709, 3229, 1549, 4069, 2389, 4909, 121, 2641,
961, 3481, 1801, 4321, 331, 2851, 1171, 3691, 2011, 4531, 541,
3061, 1381, 3901, 2221, 4741, 751, 3271, 1591, 4111, 2431, 4951,
163, 2683, 1003, 3523, 1843, 4363, 373, 2893, 1213, 3733, 2053,
4573, 583, 3103, 1423, 3943, 2263, 4783, 793, 3313, 1633, 4153,
2473, 4993, 205, 2725, 1045, 3565, 1885, 4405, 415, 2935, 1255,
3775, 2095, 4615, 625, 3145, 1465, 3985, 2305, 4825, 835, 3355,
1675, 4195, 2515, 5035, 3, 2523, 843, 3363, 1683, 4203, 213,
2733, 1053, 3573, 1893, 4413, 423, 2943, 1263, 3783, 2103, 4623,
633, 3153, 1473, 3993, 2313, 4833, 45, 2565, 885, 3405, 1725,
4245, 255, 2775, 1095, 3615, 1935, 4455, 465, 2985, 1305, 3825,
2145, 4665, 675, 3195, 1515, 4035, 2355, 4875, 87, 2607, 927,
3447, 1767, 4287, 297, 2817, 1137, 3657, 1977, 4497, 507, 3027,
1347, 3867, 2187, 4707, 717, 3237, 1557, 4077, 2397, 4917, 129,
2649, 969, 3489, 1809, 4329, 339, 2859, 1179, 3699, 2019, 4539,
549, 3069, 1389, 3909, 2229, 4749, 759, 3279, 1599, 4119, 2439,
4959, 171, 2691, 1011, 3531, 1851, 4371, 381, 2901, 1221, 3741,
2061, 4581, 591, 3111, 1431, 3951, 2271, 4791, 801, 3321, 1641,
4161, 2481, 5001, 10, 2530, 850, 3370, 1690, 4210, 220, 2740,
1060, 3580, 1900, 4420, 430, 2950, 1270, 3790, 2110, 4630, 640,
3160, 1480, 4000, 2320, 4840, 52, 2572, 892, 3412, 1732, 4252,
262, 2782, 1102, 3622, 1942, 4462, 472, 2992, 1312, 3832, 2152,
4672, 682, 3202, 1522, 4042, 2362, 4882, 94, 2614, 934, 3454,
1774, 4294, 304, 2824, 1144, 3664, 1984, 4504, 514, 3034, 1354,
3874, 2194, 4714, 724, 3244, 1564, 4084, 2404, 4924, 136, 2656,
976, 3496, 1816, 4336, 346, 2866, 1186, 3706, 2026, 4546, 556,
3076, 1396, 3916, 2236, 4756, 766, 3286, 1606, 4126, 2446, 4966,
178, 2698, 1018, 3538, 1858, 4378, 388, 2908, 1228, 3748, 2068,
4588, 598, 3118, 1438, 3958, 2278, 4798, 808, 3328, 1648, 4168,
2488, 5008, 17, 2537, 857, 3377, 1697, 4217, 227, 2747, 1067,
3587, 1907, 4427, 437, 2957, 1277, 3797, 2117, 4637, 647, 3167,
1487, 4007, 2327, 4847, 59, 2579, 899, 3419, 1739, 4259, 269,
2789, 1109, 3629, 1949, 4469, 479, 2999, 1319, 3839, 2159, 4679,
689, 3209, 1529, 4049, 2369, 4889, 101, 2621, 941, 3461, 1781,
4301, 311, 2831, 1151, 3671, 1991, 4511, 521, 3041, 1361, 3881,
2201, 4721, 731, 3251, 1571, 4091, 2411, 4931, 143, 2663, 983,
3503, 1823, 4343, 353, 2873, 1193, 3713, 2033, 4553, 563, 3083,
1403, 3923, 2243, 4763, 773, 3293, 1613, 4133, 2453, 4973, 185,
2705, 1025, 3545, 1865, 4385, 395, 2915, 1235, 3755, 2075, 4595,
605, 3125, 1445, 3965, 2285, 4805, 815, 3335, 1655, 4175, 2495,
5015, 24, 2544, 864, 3384, 1704, 4224, 234, 2754, 1074, 3594,
1914, 4434, 444, 2964, 1284, 3804, 2124, 4644, 654, 3174, 1494,
4014, 2334, 4854, 66, 2586, 906, 3426, 1746, 4266, 276, 2796,
1116, 3636, 1956, 4476, 486, 3006, 1326, 3846, 2166, 4686, 696,
3216, 1536, 4056, 2376, 4896, 108, 2628, 948, 3468, 1788, 4308,
318, 2838, 1158, 3678, 1998, 4518, 528, 3048, 1368, 3888, 2208,
4728, 738, 3258, 1578, 4098, 2418, 4938, 150, 2670, 990, 3510,
1830, 4350, 360, 2880, 1200, 3720, 2040, 4560, 570, 3090, 1410,
3930, 2250, 4770, 780, 3300, 1620, 4140, 2460, 4980, 192, 2712,
1032, 3552, 1872, 4392, 402, 2922, 1242, 3762, 2082, 4602, 612,
3132, 1452, 3972, 2292, 4812, 822, 3342, 1662, 4182, 2502, 5022,
31, 2551, 871, 3391, 1711, 4231, 241, 2761, 1081, 3601, 1921,
4441, 451, 2971, 1291, 3811, 2131, 4651, 661, 3181, 1501, 4021,
2341, 4861, 73, 2593, 913, 3433, 1753, 4273, 283, 2803, 1123,
3643, 1963, 4483, 493, 3013, 1333, 3853, 2173, 4693, 703, 3223,
1543, 4063, 2383, 4903, 115, 2635, 955, 3475, 1795, 4315, 325,
2845, 1165, 3685, 2005, 4525, 535, 3055, 1375, 3895, 2215, 4735,
745, 3265, 1585, 4105, 2425, 4945, 157, 2677, 997, 3517, 1837,
4357, 367, 2887, 1207, 3727, 2047, 4567, 577, 3097, 1417, 3937,
2257, 4777, 787, 3307, 1627, 4147, 2467, 4987, 199, 2719, 1039,
3559, 1879, 4399, 409, 2929, 1249, 3769, 2089, 4609, 619, 3139,
1459, 3979, 2299, 4819, 829, 3349, 1669, 4189, 2509, 5029, 38,
2558, 878, 3398, 1718, 4238, 248, 2768, 1088, 3608, 1928, 4448,
458, 2978, 1298, 3818, 2138, 4658, 668, 3188, 1508, 4028, 2348,
4868, 80, 2600, 920, 3440, 1760, 4280, 290, 2810, 1130, 3650,
1970, 4490, 500, 3020, 1340, 3860, 2180, 4700, 710, 3230, 1550,
4070, 2390, 4910, 122, 2642, 962, 3482, 1802, 4322, 332, 2852,
1172, 3692, 2012, 4532, 542, 3062, 1382, 3902, 2222, 4742, 752,
3272, 1592, 4112, 2432, 4952, 164, 2684, 1004, 3524, 1844, 4364,
374, 2894, 1214, 3734, 2054, 4574, 584, 3104, 1424, 3944, 2264,
4784, 794, 3314, 1634, 4154, 2474, 4994, 206, 2726, 1046, 3566,
1886, 4406, 416, 2936, 1256, 3776, 2096, 4616, 626, 3146, 1466,
3986, 2306, 4826, 836, 3356, 1676, 4196, 2516, 5036, 4, 2524,
844, 3364, 1684, 4204, 214, 2734, 1054, 3574, 1894, 4414, 424,
2944, 1264, 3784, 2104, 4624, 634, 3154, 1474, 3994, 2314, 4834,
46, 2566, 886, 3406, 1726, 4246, 256, 2776, 1096, 3616, 1936,
4456, 466, 2986, 1306, 3826, 2146, 4666, 676, 3196, 1516, 4036,
2356, 4876, 88, 2608, 928, 3448, 1768, 4288, 298, 2818, 1138,
3658, 1978, 4498, 508, 3028, 1348, 3868, 2188, 4708, 718, 3238,
1558, 4078, 2398, 4918, 130, 2650, 970, 3490, 1810, 4330, 340,
2860, 1180, 3700, 2020, 4540, 550, 3070, 1390, 3910, 2230, 4750,
760, 3280, 1600, 4120, 2440, 4960, 172, 2692, 1012, 3532, 1852,
4372, 382, 2902, 1222, 3742, 2062, 4582, 592, 3112, 1432, 3952,
2272, 4792, 802, 3322, 1642, 4162, 2482, 5002, 11, 2531, 851,
3371, 1691, 4211, 221, 2741, 1061, 3581, 1901, 4421, 431, 2951,
1271, 3791, 2111, 4631, 641, 3161, 1481, 4001, 2321, 4841, 53,
2573, 893, 3413, 1733, 4253, 263, 2783, 1103, 3623, 1943, 4463,
473, 2993, 1313, 3833, 2153, 4673, 683, 3203, 1523, 4043, 2363,
4883, 95, 2615, 935, 3455, 1775, 4295, 305, 2825, 1145, 3665,
1985, 4505, 515, 3035, 1355, 3875, 2195, 4715, 725, 3245, 1565,
4085, 2405, 4925, 137, 2657, 977, 3497, 1817, 4337, 347, 2867,
1187, 3707, 2027, 4547, 557, 3077, 1397, 3917, 2237, 4757, 767,
3287, 1607, 4127, 2447, 4967, 179, 2699, 1019, 3539, 1859, 4379,
389, 2909, 1229, 3749, 2069, 4589, 599, 3119, 1439, 3959, 2279,
4799, 809, 3329, 1649, 4169, 2489, 5009, 18, 2538, 858, 3378,
1698, 4218, 228, 2748, 1068, 3588, 1908, 4428, 438, 2958, 1278,
3798, 2118, 4638, 648, 3168, 1488, 4008, 2328, 4848, 60, 2580,
900, 3420, 1740, 4260, 270, 2790, 1110, 3630, 1950, 4470, 480,
3000, 1320, 3840, 2160, 4680, 690, 3210, 1530, 4050, 2370, 4890,
102, 2622, 942, 3462, 1782, 4302, 312, 2832, 1152, 3672, 1992,
4512, 522, 3042, 1362, 3882, 2202, 4722, 732, 3252, 1572, 4092,
2412, 4932, 144, 2664, 984, 3504, 1824, 4344, 354, 2874, 1194,
3714, 2034, 4554, 564, 3084, 1404, 3924, 2244, 4764, 774, 3294,
1614, 4134, 2454, 4974, 186, 2706, 1026, 3546, 1866, 4386, 396,
2916, 1236, 3756, 2076, 4596, 606, 3126, 1446, 3966, 2286, 4806,
816, 3336, 1656, 4176, 2496, 5016, 25, 2545, 865, 3385, 1705,
4225, 235, 2755, 1075, 3595, 1915, 4435, 445, 2965, 1285, 3805,
2125, 4645, 655, 3175, 1495, 4015, 2335, 4855, 67, 2587, 907,
3427, 1747, 4267, 277, 2797, 1117, 3637, 1957, 4477, 487, 3007,
1327, 3847, 2167, 4687, 697, 3217, 1537, 4057, 2377, 4897, 109,
2629, 949, 3469, 1789, 4309, 319, 2839, 1159, 3679, 1999, 4519,
529, 3049, 1369, 3889, 2209, 4729, 739, 3259, 1579, 4099, 2419,
4939, 151, 2671, 991, 3511, 1831, 4351, 361, 2881, 1201, 3721,
2041, 4561, 571, 3091, 1411, 3931, 2251, 4771, 781, 3301, 1621,
4141, 2461, 4981, 193, 2713, 1033, 3553, 1873, 4393, 403, 2923,
1243, 3763, 2083, 4603, 613, 3133, 1453, 3973, 2293, 4813, 823,
3343, 1663, 4183, 2503, 5023, 32, 2552, 872, 3392, 1712, 4232,
242, 2762, 1082, 3602, 1922, 4442, 452, 2972, 1292, 3812, 2132,
4652, 662, 3182, 1502, 4022, 2342, 4862, 74, 2594, 914, 3434,
1754, 4274, 284, 2804, 1124, 3644, 1964, 4484, 494, 3014, 1334,
3854, 2174, 4694, 704, 3224, 1544, 4064, 2384, 4904, 116, 2636,
956, 3476, 1796, 4316, 326, 2846, 1166, 3686, 2006, 4526, 536,
3056, 1376, 3896, 2216, 4736, 746, 3266, 1586, 4106, 2426, 4946,
158, 2678, 998, 3518, 1838, 4358, 368, 2888, 1208, 3728, 2048,
4568, 578, 3098, 1418, 3938, 2258, 4778, 788, 3308, 1628, 4148,
2468, 4988, 200, 2720, 1040, 3560, 1880, 4400, 410, 2930, 1250,
3770, 2090, 4610, 620, 3140, 1460, 3980, 2300, 4820, 830, 3350,
1670, 4190, 2510, 5030, 39, 2559, 879, 3399, 1719, 4239, 249,
2769, 1089, 3609, 1929, 4449, 459, 2979, 1299, 3819, 2139, 4659,
669, 3189, 1509, 4029, 2349, 4869, 81, 2601, 921, 3441, 1761,
4281, 291, 2811, 1131, 3651, 1971, 4491, 501, 3021, 1341, 3861,
2181, 4701, 711, 3231, 1551, 4071, 2391, 4911, 123, 2643, 963,
3483, 1803, 4323, 333, 2853, 1173, 3693, 2013, 4533, 543, 3063,
1383, 3903, 2223, 4743, 753, 3273, 1593, 4113, 2433, 4953, 165,
2685, 1005, 3525, 1845, 4365, 375, 2895, 1215, 3735, 2055, 4575,
585, 3105, 1425, 3945, 2265, 4785, 795, 3315, 1635, 4155, 2475,
4995, 207, 2727, 1047, 3567, 1887, 4407, 417, 2937, 1257, 3777,
2097, 4617, 627, 3147, 1467, 3987, 2307, 4827, 837, 3357, 1677,
4197, 2517, 5037, 5, 2525, 845, 3365, 1685, 4205, 215, 2735,
1055, 3575, 1895, 4415, 425, 2945, 1265, 3785, 2105, 4625, 635,
3155, 1475, 3995, 2315, 4835, 47, 2567, 887, 3407, 1727, 4247,
257, 2777, 1097, 3617, 1937, 4457, 467, 2987, 1307, 3827, 2147,
4667, 677, 3197, 1517, 4037, 2357, 4877, 89, 2609, 929, 3449,
1769, 4289, 299, 2819, 1139, 3659, 1979, 4499, 509, 3029, 1349,
3869, 2189, 4709, 719, 3239, 1559, 4079, 2399, 4919, 131, 2651,
971, 3491, 1811, 4331, 341, 2861, 1181, 3701, 2021, 4541, 551,
3071, 1391, 3911, 2231, 4751, 761, 3281, 1601, 4121, 2441, 4961,
173, 2693, 1013, 3533, 1853, 4373, 383, 2903, 1223, 3743, 2063,
4583, 593, 3113, 1433, 3953, 2273, 4793, 803, 3323, 1643, 4163,
2483, 5003, 12, 2532, 852, 3372, 1692, 4212, 222, 2742, 1062,
3582, 1902, 4422, 432, 2952, 1272, 3792, 2112, 4632, 642, 3162,
1482, 4002, 2322, 4842, 54, 2574, 894, 3414, 1734, 4254, 264,
2784, 1104, 3624, 1944, 4464, 474, 2994, 1314, 3834, 2154, 4674,
684, 3204, 1524, 4044, 2364, 4884, 96, 2616, 936, 3456, 1776,
4296, 306, 2826, 1146, 3666, 1986, 4506, 516, 3036, 1356, 3876,
2196, 4716, 726, 3246, 1566, 4086, 2406, 4926, 138, 2658, 978,
3498, 1818, 4338, 348, 2868, 1188, 3708, 2028, 4548, 558, 3078,
1398, 3918, 2238, 4758, 768, 3288, 1608, 4128, 2448, 4968, 180,
2700, 1020, 3540, 1860, 4380, 390, 2910, 1230, 3750, 2070, 4590,
600, 3120, 1440, 3960, 2280, 4800, 810, 3330, 1650, 4170, 2490,
5010, 19, 2539, 859, 3379, 1699, 4219, 229, 2749, 1069, 3589,
1909, 4429, 439, 2959, 1279, 3799, 2119, 4639, 649, 3169, 1489,
4009, 2329, 4849, 61, 2581, 901, 3421, 1741, 4261, 271, 2791,
1111, 3631, 1951, 4471, 481, 3001, 1321, 3841, 2161, 4681, 691,
3211, 1531, 4051, 2371, 4891, 103, 2623, 943, 3463, 1783, 4303,
313, 2833, 1153, 3673, 1993, 4513, 523, 3043, 1363, 3883, 2203,
4723, 733, 3253, 1573, 4093, 2413, 4933, 145, 2665, 985, 3505,
1825, 4345, 355, 2875, 1195, 3715, 2035, 4555, 565, 3085, 1405,
3925, 2245, 4765, 775, 3295, 1615, 4135, 2455, 4975, 187, 2707,
1027, 3547, 1867, 4387, 397, 2917, 1237, 3757, 2077, 4597, 607,
3127, 1447, 3967, 2287, 4807, 817, 3337, 1657, 4177, 2497, 5017,
26, 2546, 866, 3386, 1706, 4226, 236, 2756, 1076, 3596, 1916,
4436, 446, 2966, 1286, 3806, 2126, 4646, 656, 3176, 1496, 4016,
2336, 4856, 68, 2588, 908, 3428, 1748, 4268, 278, 2798, 1118,
3638, 1958, 4478, 488, 3008, 1328, 3848, 2168, 4688, 698, 3218,
1538, 4058, 2378, 4898, 110, 2630, 950, 3470, 1790, 4310, 320,
2840, 1160, 3680, 2000, 4520, 530, 3050, 1370, 3890, 2210, 4730,
740, 3260, 1580, 4100, 2420, 4940, 152, 2672, 992, 3512, 1832,
4352, 362, 2882, 1202, 3722, 2042, 4562, 572, 3092, 1412, 3932,
2252, 4772, 782, 3302, 1622, 4142, 2462, 4982, 194, 2714, 1034,
3554, 1874, 4394, 404, 2924, 1244, 3764, 2084, 4604, 614, 3134,
1454, 3974, 2294, 4814, 824, 3344, 1664, 4184, 2504, 5024, 33,
2553, 873, 3393, 1713, 4233, 243, 2763, 1083, 3603, 1923, 4443,
453, 2973, 1293, 3813, 2133, 4653, 663, 3183, 1503, 4023, 2343,
4863, 75, 2595, 915, 3435, 1755, 4275, 285, 2805, 1125, 3645,
1965, 4485, 495, 3015, 1335, 3855, 2175, 4695, 705, 3225, 1545,
4065, 2385, 4905, 117, 2637, 957, 3477, 1797, 4317, 327, 2847,
1167, 3687, 2007, 4527, 537, 3057, 1377, 3897, 2217, 4737, 747,
3267, 1587, 4107, 2427, 4947, 159, 2679, 999, 3519, 1839, 4359,
369, 2889, 1209, 3729, 2049, 4569, 579, 3099, 1419, 3939, 2259,
4779, 789, 3309, 1629, 4149, 2469, 4989, 201, 2721, 1041, 3561,
1881, 4401, 411, 2931, 1251, 3771, 2091, 4611, 621, 3141, 1461,
3981, 2301, 4821, 831, 3351, 1671, 4191, 2511, 5031, 40, 2560,
880, 3400, 1720, 4240, 250, 2770, 1090, 3610, 1930, 4450, 460,
2980, 1300, 3820, 2140, 4660, 670, 3190, 1510, 4030, 2350, 4870,
82, 2602, 922, 3442, 1762, 4282, 292, 2812, 1132, 3652, 1972,
4492, 502, 3022, 1342, 3862, 2182, 4702, 712, 3232, 1552, 4072,
2392, 4912, 124, 2644, 964, 3484, 1804, 4324, 334, 2854, 1174,
3694, 2014, 4534, 544, 3064, 1384, 3904, 2224, 4744, 754, 3274,
1594, 4114, 2434, 4954, 166, 2686, 1006, 3526, 1846, 4366, 376,
2896, 1216, 3736, 2056, 4576, 586, 3106, 1426, 3946, 2266, 4786,
796, 3316, 1636, 4156, 2476, 4996, 208, 2728, 1048, 3568, 1888,
4408, 418, 2938, 1258, 3778, 2098, 4618, 628, 3148, 1468, 3988,
2308, 4828, 838, 3358, 1678, 4198, 2518, 5038, 6, 2526, 846,
3366, 1686, 4206, 216, 2736, 1056, 3576, 1896, 4416, 426, 2946,
1266, 3786, 2106, 4626, 636, 3156, 1476, 3996, 2316, 4836, 48,
2568, 888, 3408, 1728, 4248, 258, 2778, 1098, 3618, 1938, 4458,
468, 2988, 1308, 3828, 2148, 4668, 678, 3198, 1518, 4038, 2358,
4878, 90, 2610, 930, 3450, 1770, 4290, 300, 2820, 1140, 3660,
1980, 4500, 510, 3030, 1350, 3870, 2190, 4710, 720, 3240, 1560,
4080, 2400, 4920, 132, 2652, 972, 3492, 1812, 4332, 342, 2862,
1182, 3702, 2022, 4542, 552, 3072, 1392, 3912, 2232, 4752, 762,
3282, 1602, 4122, 2442, 4962, 174, 2694, 1014, 3534, 1854, 4374,
384, 2904, 1224, 3744, 2064, 4584, 594, 3114, 1434, 3954, 2274,
4794, 804, 3324, 1644, 4164, 2484, 5004, 13, 2533, 853, 3373,
1693, 4213, 223, 2743, 1063, 3583, 1903, 4423, 433, 2953, 1273,
3793, 2113, 4633, 643, 3163, 1483, 4003, 2323, 4843, 55, 2575,
895, 3415, 1735, 4255, 265, 2785, 1105, 3625, 1945, 4465, 475,
2995, 1315, 3835, 2155, 4675, 685, 3205, 1525, 4045, 2365, 4885,
97, 2617, 937, 3457, 1777, 4297, 307, 2827, 1147, 3667, 1987,
4507, 517, 3037, 1357, 3877, 2197, 4717, 727, 3247, 1567, 4087,
2407, 4927, 139, 2659, 979, 3499, 1819, 4339, 349, 2869, 1189,
3709, 2029, 4549, 559, 3079, 1399, 3919, 2239, 4759, 769, 3289,
1609, 4129, 2449, 4969, 181, 2701, 1021, 3541, 1861, 4381, 391,
2911, 1231, 3751, 2071, 4591, 601, 3121, 1441, 3961, 2281, 4801,
811, 3331, 1651, 4171, 2491, 5011, 20, 2540, 860, 3380, 1700,
4220, 230, 2750, 1070, 3590, 1910, 4430, 440, 2960, 1280, 3800,
2120, 4640, 650, 3170, 1490, 4010, 2330, 4850, 62, 2582, 902,
3422, 1742, 4262, 272, 2792, 1112, 3632, 1952, 4472, 482, 3002,
1322, 3842, 2162, 4682, 692, 3212, 1532, 4052, 2372, 4892, 104,
2624, 944, 3464, 1784, 4304, 314, 2834, 1154, 3674, 1994, 4514,
524, 3044, 1364, 3884, 2204, 4724, 734, 3254, 1574, 4094, 2414,
4934, 146, 2666, 986, 3506, 1826, 4346, 356, 2876, 1196, 3716,
2036, 4556, 566, 3086, 1406, 3926, 2246, 4766, 776, 3296, 1616,
4136, 2456, 4976, 188, 2708, 1028, 3548, 1868, 4388, 398, 2918,
1238, 3758, 2078, 4598, 608, 3128, 1448, 3968, 2288, 4808, 818,
3338, 1658, 4178, 2498, 5018, 27, 2547, 867, 3387, 1707, 4227,
237, 2757, 1077, 3597, 1917, 4437, 447, 2967, 1287, 3807, 2127,
4647, 657, 3177, 1497, 4017, 2337, 4857, 69, 2589, 909, 3429,
1749, 4269, 279, 2799, 1119, 3639, 1959, 4479, 489, 3009, 1329,
3849, 2169, 4689, 699, 3219, 1539, 4059, 2379, 4899, 111, 2631,
951, 3471, 1791, 4311, 321, 2841, 1161, 3681, 2001, 4521, 531,
3051, 1371, 3891, 2211, 4731, 741, 3261, 1581, 4101, 2421, 4941,
153, 2673, 993, 3513, 1833, 4353, 363, 2883, 1203, 3723, 2043,
4563, 573, 3093, 1413, 3933, 2253, 4773, 783, 3303, 1623, 4143,
2463, 4983, 195, 2715, 1035, 3555, 1875, 4395, 405, 2925, 1245,
3765, 2085, 4605, 615, 3135, 1455, 3975, 2295, 4815, 825, 3345,
1665, 4185, 2505, 5025, 34, 2554, 874, 3394, 1714, 4234, 244,
2764, 1084, 3604, 1924, 4444, 454, 2974, 1294, 3814, 2134, 4654,
664, 3184, 1504, 4024, 2344, 4864, 76, 2596, 916, 3436, 1756,
4276, 286, 2806, 1126, 3646, 1966, 4486, 496, 3016, 1336, 3856,
2176, 4696, 706, 3226, 1546, 4066, 2386, 4906, 118, 2638, 958,
3478, 1798, 4318, 328, 2848, 1168, 3688, 2008, 4528, 538, 3058,
1378, 3898, 2218, 4738, 748, 3268, 1588, 4108, 2428, 4948, 160,
2680, 1000, 3520, 1840, 4360, 370, 2890, 1210, 3730, 2050, 4570,
580, 3100, 1420, 3940, 2260, 4780, 790, 3310, 1630, 4150, 2470,
4990, 202, 2722, 1042, 3562, 1882, 4402, 412, 2932, 1252, 3772,
2092, 4612, 622, 3142, 1462, 3982, 2302, 4822, 832, 3352, 1672,
4192, 2512, 5032, 41, 2561, 881, 3401, 1721, 4241, 251, 2771,
1091, 3611, 1931, 4451, 461, 2981, 1301, 3821, 2141, 4661, 671,
3191, 1511, 4031, 2351, 4871, 83, 2603, 923, 3443, 1763, 4283,
293, 2813, 1133, 3653, 1973, 4493, 503, 3023, 1343, 3863, 2183,
4703, 713, 3233, 1553, 4073, 2393, 4913, 125, 2645, 965, 3485,
1805, 4325, 335, 2855, 1175, 3695, 2015, 4535, 545, 3065, 1385,
3905, 2225, 4745, 755, 3275, 1595, 4115, 2435, 4955, 167, 2687,
1007, 3527, 1847, 4367, 377, 2897, 1217, 3737, 2057, 4577, 587,
3107, 1427, 3947, 2267, 4787, 797, 3317, 1637, 4157, 2477, 4997,
209, 2729, 1049, 3569, 1889, 4409, 419, 2939, 1259, 3779, 2099,
4619, 629, 3149, 1469, 3989, 2309, 4829, 839, 3359, 1679, 4199,
2519, 5039}));
}
TEST(TransposeTest, TestRefOps6D2) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5, 6, 7},
{1, 0, 2, 3, 4, 5}),
ElementsAreArray(
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65,
66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76,
77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87,
88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98,
99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120,
121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131,
132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153,
154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186,
187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197,
198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208,
209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230,
231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241,
242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252,
253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263,
264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285,
286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296,
297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307,
308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318,
319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329,
330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340,
341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351,
352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362,
363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373,
374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384,
385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395,
396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406,
407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417,
418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428,
429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439,
440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450,
451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461,
462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472,
473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 483,
484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494,
495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505,
506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516,
517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527,
528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538,
539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549,
550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560,
561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571,
572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582,
583, 584, 585, 586, 587, 588, 589, 590, 591, 592, 593,
594, 595, 596, 597, 598, 599, 600, 601, 602, 603, 604,
605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615,
616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626,
627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637,
638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648,
649, 650, 651, 652, 653, 654, 655, 656, 657, 658, 659,
660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670,
671, 672, 673, 674, 675, 676, 677, 678, 679, 680, 681,
682, 683, 684, 685, 686, 687, 688, 689, 690, 691, 692,
693, 694, 695, 696, 697, 698, 699, 700, 701, 702, 703,
704, 705, 706, 707, 708, 709, 710, 711, 712, 713, 714,
715, 716, 717, 718, 719, 720, 721, 722, 723, 724, 725,
726, 727, 728, 729, 730, 731, 732, 733, 734, 735, 736,
737, 738, 739, 740, 741, 742, 743, 744, 745, 746, 747,
748, 749, 750, 751, 752, 753, 754, 755, 756, 757, 758,
759, 760, 761, 762, 763, 764, 765, 766, 767, 768, 769,
770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 780,
781, 782, 783, 784, 785, 786, 787, 788, 789, 790, 791,
792, 793, 794, 795, 796, 797, 798, 799, 800, 801, 802,
803, 804, 805, 806, 807, 808, 809, 810, 811, 812, 813,
814, 815, 816, 817, 818, 819, 820, 821, 822, 823, 824,
825, 826, 827, 828, 829, 830, 831, 832, 833, 834, 835,
836, 837, 838, 839, 2520, 2521, 2522, 2523, 2524, 2525, 2526,
2527, 2528, 2529, 2530, 2531, 2532, 2533, 2534, 2535, 2536, 2537,
2538, 2539, 2540, 2541, 2542, 2543, 2544, 2545, 2546, 2547, 2548,
2549, 2550, 2551, 2552, 2553, 2554, 2555, 2556, 2557, 2558, 2559,
2560, 2561, 2562, 2563, 2564, 2565, 2566, 2567, 2568, 2569, 2570,
2571, 2572, 2573, 2574, 2575, 2576, 2577, 2578, 2579, 2580, 2581,
2582, 2583, 2584, 2585, 2586, 2587, 2588, 2589, 2590, 2591, 2592,
2593, 2594, 2595, 2596, 2597, 2598, 2599, 2600, 2601, 2602, 2603,
2604, 2605, 2606, 2607, 2608, 2609, 2610, 2611, 2612, 2613, 2614,
2615, 2616, 2617, 2618, 2619, 2620, 2621, 2622, 2623, 2624, 2625,
2626, 2627, 2628, 2629, 2630, 2631, 2632, 2633, 2634, 2635, 2636,
2637, 2638, 2639, 2640, 2641, 2642, 2643, 2644, 2645, 2646, 2647,
2648, 2649, 2650, 2651, 2652, 2653, 2654, 2655, 2656, 2657, 2658,
2659, 2660, 2661, 2662, 2663, 2664, 2665, 2666, 2667, 2668, 2669,
2670, 2671, 2672, 2673, 2674, 2675, 2676, 2677, 2678, 2679, 2680,
2681, 2682, 2683, 2684, 2685, 2686, 2687, 2688, 2689, 2690, 2691,
2692, 2693, 2694, 2695, 2696, 2697, 2698, 2699, 2700, 2701, 2702,
2703, 2704, 2705, 2706, 2707, 2708, 2709, 2710, 2711, 2712, 2713,
2714, 2715, 2716, 2717, 2718, 2719, 2720, 2721, 2722, 2723, 2724,
2725, 2726, 2727, 2728, 2729, 2730, 2731, 2732, 2733, 2734, 2735,
2736, 2737, 2738, 2739, 2740, 2741, 2742, 2743, 2744, 2745, 2746,
2747, 2748, 2749, 2750, 2751, 2752, 2753, 2754, 2755, 2756, 2757,
2758, 2759, 2760, 2761, 2762, 2763, 2764, 2765, 2766, 2767, 2768,
2769, 2770, 2771, 2772, 2773, 2774, 2775, 2776, 2777, 2778, 2779,
2780, 2781, 2782, 2783, 2784, 2785, 2786, 2787, 2788, 2789, 2790,
2791, 2792, 2793, 2794, 2795, 2796, 2797, 2798, 2799, 2800, 2801,
2802, 2803, 2804, 2805, 2806, 2807, 2808, 2809, 2810, 2811, 2812,
2813, 2814, 2815, 2816, 2817, 2818, 2819, 2820, 2821, 2822, 2823,
2824, 2825, 2826, 2827, 2828, 2829, 2830, 2831, 2832, 2833, 2834,
2835, 2836, 2837, 2838, 2839, 2840, 2841, 2842, 2843, 2844, 2845,
2846, 2847, 2848, 2849, 2850, 2851, 2852, 2853, 2854, 2855, 2856,
2857, 2858, 2859, 2860, 2861, 2862, 2863, 2864, 2865, 2866, 2867,
2868, 2869, 2870, 2871, 2872, 2873, 2874, 2875, 2876, 2877, 2878,
2879, 2880, 2881, 2882, 2883, 2884, 2885, 2886, 2887, 2888, 2889,
2890, 2891, 2892, 2893, 2894, 2895, 2896, 2897, 2898, 2899, 2900,
2901, 2902, 2903, 2904, 2905, 2906, 2907, 2908, 2909, 2910, 2911,
2912, 2913, 2914, 2915, 2916, 2917, 2918, 2919, 2920, 2921, 2922,
2923, 2924, 2925, 2926, 2927, 2928, 2929, 2930, 2931, 2932, 2933,
2934, 2935, 2936, 2937, 2938, 2939, 2940, 2941, 2942, 2943, 2944,
2945, 2946, 2947, 2948, 2949, 2950, 2951, 2952, 2953, 2954, 2955,
2956, 2957, 2958, 2959, 2960, 2961, 2962, 2963, 2964, 2965, 2966,
2967, 2968, 2969, 2970, 2971, 2972, 2973, 2974, 2975, 2976, 2977,
2978, 2979, 2980, 2981, 2982, 2983, 2984, 2985, 2986, 2987, 2988,
2989, 2990, 2991, 2992, 2993, 2994, 2995, 2996, 2997, 2998, 2999,
3000, 3001, 3002, 3003, 3004, 3005, 3006, 3007, 3008, 3009, 3010,
3011, 3012, 3013, 3014, 3015, 3016, 3017, 3018, 3019, 3020, 3021,
3022, 3023, 3024, 3025, 3026, 3027, 3028, 3029, 3030, 3031, 3032,
3033, 3034, 3035, 3036, 3037, 3038, 3039, 3040, 3041, 3042, 3043,
3044, 3045, 3046, 3047, 3048, 3049, 3050, 3051, 3052, 3053, 3054,
3055, 3056, 3057, 3058, 3059, 3060, 3061, 3062, 3063, 3064, 3065,
3066, 3067, 3068, 3069, 3070, 3071, 3072, 3073, 3074, 3075, 3076,
3077, 3078, 3079, 3080, 3081, 3082, 3083, 3084, 3085, 3086, 3087,
3088, 3089, 3090, 3091, 3092, 3093, 3094, 3095, 3096, 3097, 3098,
3099, 3100, 3101, 3102, 3103, 3104, 3105, 3106, 3107, 3108, 3109,
3110, 3111, 3112, 3113, 3114, 3115, 3116, 3117, 3118, 3119, 3120,
3121, 3122, 3123, 3124, 3125, 3126, 3127, 3128, 3129, 3130, 3131,
3132, 3133, 3134, 3135, 3136, 3137, 3138, 3139, 3140, 3141, 3142,
3143, 3144, 3145, 3146, 3147, 3148, 3149, 3150, 3151, 3152, 3153,
3154, 3155, 3156, 3157, 3158, 3159, 3160, 3161, 3162, 3163, 3164,
3165, 3166, 3167, 3168, 3169, 3170, 3171, 3172, 3173, 3174, 3175,
3176, 3177, 3178, 3179, 3180, 3181, 3182, 3183, 3184, 3185, 3186,
3187, 3188, 3189, 3190, 3191, 3192, 3193, 3194, 3195, 3196, 3197,
3198, 3199, 3200, 3201, 3202, 3203, 3204, 3205, 3206, 3207, 3208,
3209, 3210, 3211, 3212, 3213, 3214, 3215, 3216, 3217, 3218, 3219,
3220, 3221, 3222, 3223, 3224, 3225, 3226, 3227, 3228, 3229, 3230,
3231, 3232, 3233, 3234, 3235, 3236, 3237, 3238, 3239, 3240, 3241,
3242, 3243, 3244, 3245, 3246, 3247, 3248, 3249, 3250, 3251, 3252,
3253, 3254, 3255, 3256, 3257, 3258, 3259, 3260, 3261, 3262, 3263,
3264, 3265, 3266, 3267, 3268, 3269, 3270, 3271, 3272, 3273, 3274,
3275, 3276, 3277, 3278, 3279, 3280, 3281, 3282, 3283, 3284, 3285,
3286, 3287, 3288, 3289, 3290, 3291, 3292, 3293, 3294, 3295, 3296,
3297, 3298, 3299, 3300, 3301, 3302, 3303, 3304, 3305, 3306, 3307,
3308, 3309, 3310, 3311, 3312, 3313, 3314, 3315, 3316, 3317, 3318,
3319, 3320, 3321, 3322, 3323, 3324, 3325, 3326, 3327, 3328, 3329,
3330, 3331, 3332, 3333, 3334, 3335, 3336, 3337, 3338, 3339, 3340,
3341, 3342, 3343, 3344, 3345, 3346, 3347, 3348, 3349, 3350, 3351,
3352, 3353, 3354, 3355, 3356, 3357, 3358, 3359, 840, 841, 842,
843, 844, 845, 846, 847, 848, 849, 850, 851, 852, 853,
854, 855, 856, 857, 858, 859, 860, 861, 862, 863, 864,
865, 866, 867, 868, 869, 870, 871, 872, 873, 874, 875,
876, 877, 878, 879, 880, 881, 882, 883, 884, 885, 886,
887, 888, 889, 890, 891, 892, 893, 894, 895, 896, 897,
898, 899, 900, 901, 902, 903, 904, 905, 906, 907, 908,
909, 910, 911, 912, 913, 914, 915, 916, 917, 918, 919,
920, 921, 922, 923, 924, 925, 926, 927, 928, 929, 930,
931, 932, 933, 934, 935, 936, 937, 938, 939, 940, 941,
942, 943, 944, 945, 946, 947, 948, 949, 950, 951, 952,
953, 954, 955, 956, 957, 958, 959, 960, 961, 962, 963,
964, 965, 966, 967, 968, 969, 970, 971, 972, 973, 974,
975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985,
986, 987, 988, 989, 990, 991, 992, 993, 994, 995, 996,
997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 1018,
1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028, 1029,
1030, 1031, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039, 1040,
1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, 1050, 1051,
1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062,
1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073,
1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, 1082, 1083, 1084,
1085, 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1093, 1094, 1095,
1096, 1097, 1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105, 1106,
1107, 1108, 1109, 1110, 1111, 1112, 1113, 1114, 1115, 1116, 1117,
1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128,
1129, 1130, 1131, 1132, 1133, 1134, 1135, 1136, 1137, 1138, 1139,
1140, 1141, 1142, 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150,
1151, 1152, 1153, 1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161,
1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172,
1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1182, 1183,
1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1193, 1194,
1195, 1196, 1197, 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205,
1206, 1207, 1208, 1209, 1210, 1211, 1212, 1213, 1214, 1215, 1216,
1217, 1218, 1219, 1220, 1221, 1222, 1223, 1224, 1225, 1226, 1227,
1228, 1229, 1230, 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238,
1239, 1240, 1241, 1242, 1243, 1244, 1245, 1246, 1247, 1248, 1249,
1250, 1251, 1252, 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260,
1261, 1262, 1263, 1264, 1265, 1266, 1267, 1268, 1269, 1270, 1271,
1272, 1273, 1274, 1275, 1276, 1277, 1278, 1279, 1280, 1281, 1282,
1283, 1284, 1285, 1286, 1287, 1288, 1289, 1290, 1291, 1292, 1293,
1294, 1295, 1296, 1297, 1298, 1299, 1300, 1301, 1302, 1303, 1304,
1305, 1306, 1307, 1308, 1309, 1310, 1311, 1312, 1313, 1314, 1315,
1316, 1317, 1318, 1319, 1320, 1321, 1322, 1323, 1324, 1325, 1326,
1327, 1328, 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, 1337,
1338, 1339, 1340, 1341, 1342, 1343, 1344, 1345, 1346, 1347, 1348,
1349, 1350, 1351, 1352, 1353, 1354, 1355, 1356, 1357, 1358, 1359,
1360, 1361, 1362, 1363, 1364, 1365, 1366, 1367, 1368, 1369, 1370,
1371, 1372, 1373, 1374, 1375, 1376, 1377, 1378, 1379, 1380, 1381,
1382, 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1390, 1391, 1392,
1393, 1394, 1395, 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403,
1404, 1405, 1406, 1407, 1408, 1409, 1410, 1411, 1412, 1413, 1414,
1415, 1416, 1417, 1418, 1419, 1420, 1421, 1422, 1423, 1424, 1425,
1426, 1427, 1428, 1429, 1430, 1431, 1432, 1433, 1434, 1435, 1436,
1437, 1438, 1439, 1440, 1441, 1442, 1443, 1444, 1445, 1446, 1447,
1448, 1449, 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, 1458,
1459, 1460, 1461, 1462, 1463, 1464, 1465, 1466, 1467, 1468, 1469,
1470, 1471, 1472, 1473, 1474, 1475, 1476, 1477, 1478, 1479, 1480,
1481, 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, 1490, 1491,
1492, 1493, 1494, 1495, 1496, 1497, 1498, 1499, 1500, 1501, 1502,
1503, 1504, 1505, 1506, 1507, 1508, 1509, 1510, 1511, 1512, 1513,
1514, 1515, 1516, 1517, 1518, 1519, 1520, 1521, 1522, 1523, 1524,
1525, 1526, 1527, 1528, 1529, 1530, 1531, 1532, 1533, 1534, 1535,
1536, 1537, 1538, 1539, 1540, 1541, 1542, 1543, 1544, 1545, 1546,
1547, 1548, 1549, 1550, 1551, 1552, 1553, 1554, 1555, 1556, 1557,
1558, 1559, 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, 1568,
1569, 1570, 1571, 1572, 1573, 1574, 1575, 1576, 1577, 1578, 1579,
1580, 1581, 1582, 1583, 1584, 1585, 1586, 1587, 1588, 1589, 1590,
1591, 1592, 1593, 1594, 1595, 1596, 1597, 1598, 1599, 1600, 1601,
1602, 1603, 1604, 1605, 1606, 1607, 1608, 1609, 1610, 1611, 1612,
1613, 1614, 1615, 1616, 1617, 1618, 1619, 1620, 1621, 1622, 1623,
1624, 1625, 1626, 1627, 1628, 1629, 1630, 1631, 1632, 1633, 1634,
1635, 1636, 1637, 1638, 1639, 1640, 1641, 1642, 1643, 1644, 1645,
1646, 1647, 1648, 1649, 1650, 1651, 1652, 1653, 1654, 1655, 1656,
1657, 1658, 1659, 1660, 1661, 1662, 1663, 1664, 1665, 1666, 1667,
1668, 1669, 1670, 1671, 1672, 1673, 1674, 1675, 1676, 1677, 1678,
1679, 3360, 3361, 3362, 3363, 3364, 3365, 3366, 3367, 3368, 3369,
3370, 3371, 3372, 3373, 3374, 3375, 3376, 3377, 3378, 3379, 3380,
3381, 3382, 3383, 3384, 3385, 3386, 3387, 3388, 3389, 3390, 3391,
3392, 3393, 3394, 3395, 3396, 3397, 3398, 3399, 3400, 3401, 3402,
3403, 3404, 3405, 3406, 3407, 3408, 3409, 3410, 3411, 3412, 3413,
3414, 3415, 3416, 3417, 3418, 3419, 3420, 3421, 3422, 3423, 3424,
3425, 3426, 3427, 3428, 3429, 3430, 3431, 3432, 3433, 3434, 3435,
3436, 3437, 3438, 3439, 3440, 3441, 3442, 3443, 3444, 3445, 3446,
3447, 3448, 3449, 3450, 3451, 3452, 3453, 3454, 3455, 3456, 3457,
3458, 3459, 3460, 3461, 3462, 3463, 3464, 3465, 3466, 3467, 3468,
3469, 3470, 3471, 3472, 3473, 3474, 3475, 3476, 3477, 3478, 3479,
3480, 3481, 3482, 3483, 3484, 3485, 3486, 3487, 3488, 3489, 3490,
3491, 3492, 3493, 3494, 3495, 3496, 3497, 3498, 3499, 3500, 3501,
3502, 3503, 3504, 3505, 3506, 3507, 3508, 3509, 3510, 3511, 3512,
3513, 3514, 3515, 3516, 3517, 3518, 3519, 3520, 3521, 3522, 3523,
3524, 3525, 3526, 3527, 3528, 3529, 3530, 3531, 3532, 3533, 3534,
3535, 3536, 3537, 3538, 3539, 3540, 3541, 3542, 3543, 3544, 3545,
3546, 3547, 3548, 3549, 3550, 3551, 3552, 3553, 3554, 3555, 3556,
3557, 3558, 3559, 3560, 3561, 3562, 3563, 3564, 3565, 3566, 3567,
3568, 3569, 3570, 3571, 3572, 3573, 3574, 3575, 3576, 3577, 3578,
3579, 3580, 3581, 3582, 3583, 3584, 3585, 3586, 3587, 3588, 3589,
3590, 3591, 3592, 3593, 3594, 3595, 3596, 3597, 3598, 3599, 3600,
3601, 3602, 3603, 3604, 3605, 3606, 3607, 3608, 3609, 3610, 3611,
3612, 3613, 3614, 3615, 3616, 3617, 3618, 3619, 3620, 3621, 3622,
3623, 3624, 3625, 3626, 3627, 3628, 3629, 3630, 3631, 3632, 3633,
3634, 3635, 3636, 3637, 3638, 3639, 3640, 3641, 3642, 3643, 3644,
3645, 3646, 3647, 3648, 3649, 3650, 3651, 3652, 3653, 3654, 3655,
3656, 3657, 3658, 3659, 3660, 3661, 3662, 3663, 3664, 3665, 3666,
3667, 3668, 3669, 3670, 3671, 3672, 3673, 3674, 3675, 3676, 3677,
3678, 3679, 3680, 3681, 3682, 3683, 3684, 3685, 3686, 3687, 3688,
3689, 3690, 3691, 3692, 3693, 3694, 3695, 3696, 3697, 3698, 3699,
3700, 3701, 3702, 3703, 3704, 3705, 3706, 3707, 3708, 3709, 3710,
3711, 3712, 3713, 3714, 3715, 3716, 3717, 3718, 3719, 3720, 3721,
3722, 3723, 3724, 3725, 3726, 3727, 3728, 3729, 3730, 3731, 3732,
3733, 3734, 3735, 3736, 3737, 3738, 3739, 3740, 3741, 3742, 3743,
3744, 3745, 3746, 3747, 3748, 3749, 3750, 3751, 3752, 3753, 3754,
3755, 3756, 3757, 3758, 3759, 3760, 3761, 3762, 3763, 3764, 3765,
3766, 3767, 3768, 3769, 3770, 3771, 3772, 3773, 3774, 3775, 3776,
3777, 3778, 3779, 3780, 3781, 3782, 3783, 3784, 3785, 3786, 3787,
3788, 3789, 3790, 3791, 3792, 3793, 3794, 3795, 3796, 3797, 3798,
3799, 3800, 3801, 3802, 3803, 3804, 3805, 3806, 3807, 3808, 3809,
3810, 3811, 3812, 3813, 3814, 3815, 3816, 3817, 3818, 3819, 3820,
3821, 3822, 3823, 3824, 3825, 3826, 3827, 3828, 3829, 3830, 3831,
3832, 3833, 3834, 3835, 3836, 3837, 3838, 3839, 3840, 3841, 3842,
3843, 3844, 3845, 3846, 3847, 3848, 3849, 3850, 3851, 3852, 3853,
3854, 3855, 3856, 3857, 3858, 3859, 3860, 3861, 3862, 3863, 3864,
3865, 3866, 3867, 3868, 3869, 3870, 3871, 3872, 3873, 3874, 3875,
3876, 3877, 3878, 3879, 3880, 3881, 3882, 3883, 3884, 3885, 3886,
3887, 3888, 3889, 3890, 3891, 3892, 3893, 3894, 3895, 3896, 3897,
3898, 3899, 3900, 3901, 3902, 3903, 3904, 3905, 3906, 3907, 3908,
3909, 3910, 3911, 3912, 3913, 3914, 3915, 3916, 3917, 3918, 3919,
3920, 3921, 3922, 3923, 3924, 3925, 3926, 3927, 3928, 3929, 3930,
3931, 3932, 3933, 3934, 3935, 3936, 3937, 3938, 3939, 3940, 3941,
3942, 3943, 3944, 3945, 3946, 3947, 3948, 3949, 3950, 3951, 3952,
3953, 3954, 3955, 3956, 3957, 3958, 3959, 3960, 3961, 3962, 3963,
3964, 3965, 3966, 3967, 3968, 3969, 3970, 3971, 3972, 3973, 3974,
3975, 3976, 3977, 3978, 3979, 3980, 3981, 3982, 3983, 3984, 3985,
3986, 3987, 3988, 3989, 3990, 3991, 3992, 3993, 3994, 3995, 3996,
3997, 3998, 3999, 4000, 4001, 4002, 4003, 4004, 4005, 4006, 4007,
4008, 4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4017, 4018,
4019, 4020, 4021, 4022, 4023, 4024, 4025, 4026, 4027, 4028, 4029,
4030, 4031, 4032, 4033, 4034, 4035, 4036, 4037, 4038, 4039, 4040,
4041, 4042, 4043, 4044, 4045, 4046, 4047, 4048, 4049, 4050, 4051,
4052, 4053, 4054, 4055, 4056, 4057, 4058, 4059, 4060, 4061, 4062,
4063, 4064, 4065, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073,
4074, 4075, 4076, 4077, 4078, 4079, 4080, 4081, 4082, 4083, 4084,
4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, 4094, 4095,
4096, 4097, 4098, 4099, 4100, 4101, 4102, 4103, 4104, 4105, 4106,
4107, 4108, 4109, 4110, 4111, 4112, 4113, 4114, 4115, 4116, 4117,
4118, 4119, 4120, 4121, 4122, 4123, 4124, 4125, 4126, 4127, 4128,
4129, 4130, 4131, 4132, 4133, 4134, 4135, 4136, 4137, 4138, 4139,
4140, 4141, 4142, 4143, 4144, 4145, 4146, 4147, 4148, 4149, 4150,
4151, 4152, 4153, 4154, 4155, 4156, 4157, 4158, 4159, 4160, 4161,
4162, 4163, 4164, 4165, 4166, 4167, 4168, 4169, 4170, 4171, 4172,
4173, 4174, 4175, 4176, 4177, 4178, 4179, 4180, 4181, 4182, 4183,
4184, 4185, 4186, 4187, 4188, 4189, 4190, 4191, 4192, 4193, 4194,
4195, 4196, 4197, 4198, 4199, 1680, 1681, 1682, 1683, 1684, 1685,
1686, 1687, 1688, 1689, 1690, 1691, 1692, 1693, 1694, 1695, 1696,
1697, 1698, 1699, 1700, 1701, 1702, 1703, 1704, 1705, 1706, 1707,
1708, 1709, 1710, 1711, 1712, 1713, 1714, 1715, 1716, 1717, 1718,
1719, 1720, 1721, 1722, 1723, 1724, 1725, 1726, 1727, 1728, 1729,
1730, 1731, 1732, 1733, 1734, 1735, 1736, 1737, 1738, 1739, 1740,
1741, 1742, 1743, 1744, 1745, 1746, 1747, 1748, 1749, 1750, 1751,
1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, 1760, 1761, 1762,
1763, 1764, 1765, 1766, 1767, 1768, 1769, 1770, 1771, 1772, 1773,
1774, 1775, 1776, 1777, 1778, 1779, 1780, 1781, 1782, 1783, 1784,
1785, 1786, 1787, 1788, 1789, 1790, 1791, 1792, 1793, 1794, 1795,
1796, 1797, 1798, 1799, 1800, 1801, 1802, 1803, 1804, 1805, 1806,
1807, 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1815, 1816, 1817,
1818, 1819, 1820, 1821, 1822, 1823, 1824, 1825, 1826, 1827, 1828,
1829, 1830, 1831, 1832, 1833, 1834, 1835, 1836, 1837, 1838, 1839,
1840, 1841, 1842, 1843, 1844, 1845, 1846, 1847, 1848, 1849, 1850,
1851, 1852, 1853, 1854, 1855, 1856, 1857, 1858, 1859, 1860, 1861,
1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, 1870, 1871, 1872,
1873, 1874, 1875, 1876, 1877, 1878, 1879, 1880, 1881, 1882, 1883,
1884, 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, 1893, 1894,
1895, 1896, 1897, 1898, 1899, 1900, 1901, 1902, 1903, 1904, 1905,
1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, 1914, 1915, 1916,
1917, 1918, 1919, 1920, 1921, 1922, 1923, 1924, 1925, 1926, 1927,
1928, 1929, 1930, 1931, 1932, 1933, 1934, 1935, 1936, 1937, 1938,
1939, 1940, 1941, 1942, 1943, 1944, 1945, 1946, 1947, 1948, 1949,
1950, 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 1960,
1961, 1962, 1963, 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971,
1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982,
1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993,
1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015,
2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023, 2024, 2025, 2026,
2027, 2028, 2029, 2030, 2031, 2032, 2033, 2034, 2035, 2036, 2037,
2038, 2039, 2040, 2041, 2042, 2043, 2044, 2045, 2046, 2047, 2048,
2049, 2050, 2051, 2052, 2053, 2054, 2055, 2056, 2057, 2058, 2059,
2060, 2061, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070,
2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080, 2081,
2082, 2083, 2084, 2085, 2086, 2087, 2088, 2089, 2090, 2091, 2092,
2093, 2094, 2095, 2096, 2097, 2098, 2099, 2100, 2101, 2102, 2103,
2104, 2105, 2106, 2107, 2108, 2109, 2110, 2111, 2112, 2113, 2114,
2115, 2116, 2117, 2118, 2119, 2120, 2121, 2122, 2123, 2124, 2125,
2126, 2127, 2128, 2129, 2130, 2131, 2132, 2133, 2134, 2135, 2136,
2137, 2138, 2139, 2140, 2141, 2142, 2143, 2144, 2145, 2146, 2147,
2148, 2149, 2150, 2151, 2152, 2153, 2154, 2155, 2156, 2157, 2158,
2159, 2160, 2161, 2162, 2163, 2164, 2165, 2166, 2167, 2168, 2169,
2170, 2171, 2172, 2173, 2174, 2175, 2176, 2177, 2178, 2179, 2180,
2181, 2182, 2183, 2184, 2185, 2186, 2187, 2188, 2189, 2190, 2191,
2192, 2193, 2194, 2195, 2196, 2197, 2198, 2199, 2200, 2201, 2202,
2203, 2204, 2205, 2206, 2207, 2208, 2209, 2210, 2211, 2212, 2213,
2214, 2215, 2216, 2217, 2218, 2219, 2220, 2221, 2222, 2223, 2224,
2225, 2226, 2227, 2228, 2229, 2230, 2231, 2232, 2233, 2234, 2235,
2236, 2237, 2238, 2239, 2240, 2241, 2242, 2243, 2244, 2245, 2246,
2247, 2248, 2249, 2250, 2251, 2252, 2253, 2254, 2255, 2256, 2257,
2258, 2259, 2260, 2261, 2262, 2263, 2264, 2265, 2266, 2267, 2268,
2269, 2270, 2271, 2272, 2273, 2274, 2275, 2276, 2277, 2278, 2279,
2280, 2281, 2282, 2283, 2284, 2285, 2286, 2287, 2288, 2289, 2290,
2291, 2292, 2293, 2294, 2295, 2296, 2297, 2298, 2299, 2300, 2301,
2302, 2303, 2304, 2305, 2306, 2307, 2308, 2309, 2310, 2311, 2312,
2313, 2314, 2315, 2316, 2317, 2318, 2319, 2320, 2321, 2322, 2323,
2324, 2325, 2326, 2327, 2328, 2329, 2330, 2331, 2332, 2333, 2334,
2335, 2336, 2337, 2338, 2339, 2340, 2341, 2342, 2343, 2344, 2345,
2346, 2347, 2348, 2349, 2350, 2351, 2352, 2353, 2354, 2355, 2356,
2357, 2358, 2359, 2360, 2361, 2362, 2363, 2364, 2365, 2366, 2367,
2368, 2369, 2370, 2371, 2372, 2373, 2374, 2375, 2376, 2377, 2378,
2379, 2380, 2381, 2382, 2383, 2384, 2385, 2386, 2387, 2388, 2389,
2390, 2391, 2392, 2393, 2394, 2395, 2396, 2397, 2398, 2399, 2400,
2401, 2402, 2403, 2404, 2405, 2406, 2407, 2408, 2409, 2410, 2411,
2412, 2413, 2414, 2415, 2416, 2417, 2418, 2419, 2420, 2421, 2422,
2423, 2424, 2425, 2426, 2427, 2428, 2429, 2430, 2431, 2432, 2433,
2434, 2435, 2436, 2437, 2438, 2439, 2440, 2441, 2442, 2443, 2444,
2445, 2446, 2447, 2448, 2449, 2450, 2451, 2452, 2453, 2454, 2455,
2456, 2457, 2458, 2459, 2460, 2461, 2462, 2463, 2464, 2465, 2466,
2467, 2468, 2469, 2470, 2471, 2472, 2473, 2474, 2475, 2476, 2477,
2478, 2479, 2480, 2481, 2482, 2483, 2484, 2485, 2486, 2487, 2488,
2489, 2490, 2491, 2492, 2493, 2494, 2495, 2496, 2497, 2498, 2499,
2500, 2501, 2502, 2503, 2504, 2505, 2506, 2507, 2508, 2509, 2510,
2511, 2512, 2513, 2514, 2515, 2516, 2517, 2518, 2519, 4200, 4201,
4202, 4203, 4204, 4205, 4206, 4207, 4208, 4209, 4210, 4211, 4212,
4213, 4214, 4215, 4216, 4217, 4218, 4219, 4220, 4221, 4222, 4223,
4224, 4225, 4226, 4227, 4228, 4229, 4230, 4231, 4232, 4233, 4234,
4235, 4236, 4237, 4238, 4239, 4240, 4241, 4242, 4243, 4244, 4245,
4246, 4247, 4248, 4249, 4250, 4251, 4252, 4253, 4254, 4255, 4256,
4257, 4258, 4259, 4260, 4261, 4262, 4263, 4264, 4265, 4266, 4267,
4268, 4269, 4270, 4271, 4272, 4273, 4274, 4275, 4276, 4277, 4278,
4279, 4280, 4281, 4282, 4283, 4284, 4285, 4286, 4287, 4288, 4289,
4290, 4291, 4292, 4293, 4294, 4295, 4296, 4297, 4298, 4299, 4300,
4301, 4302, 4303, 4304, 4305, 4306, 4307, 4308, 4309, 4310, 4311,
4312, 4313, 4314, 4315, 4316, 4317, 4318, 4319, 4320, 4321, 4322,
4323, 4324, 4325, 4326, 4327, 4328, 4329, 4330, 4331, 4332, 4333,
4334, 4335, 4336, 4337, 4338, 4339, 4340, 4341, 4342, 4343, 4344,
4345, 4346, 4347, 4348, 4349, 4350, 4351, 4352, 4353, 4354, 4355,
4356, 4357, 4358, 4359, 4360, 4361, 4362, 4363, 4364, 4365, 4366,
4367, 4368, 4369, 4370, 4371, 4372, 4373, 4374, 4375, 4376, 4377,
4378, 4379, 4380, 4381, 4382, 4383, 4384, 4385, 4386, 4387, 4388,
4389, 4390, 4391, 4392, 4393, 4394, 4395, 4396, 4397, 4398, 4399,
4400, 4401, 4402, 4403, 4404, 4405, 4406, 4407, 4408, 4409, 4410,
4411, 4412, 4413, 4414, 4415, 4416, 4417, 4418, 4419, 4420, 4421,
4422, 4423, 4424, 4425, 4426, 4427, 4428, 4429, 4430, 4431, 4432,
4433, 4434, 4435, 4436, 4437, 4438, 4439, 4440, 4441, 4442, 4443,
4444, 4445, 4446, 4447, 4448, 4449, 4450, 4451, 4452, 4453, 4454,
4455, 4456, 4457, 4458, 4459, 4460, 4461, 4462, 4463, 4464, 4465,
4466, 4467, 4468, 4469, 4470, 4471, 4472, 4473, 4474, 4475, 4476,
4477, 4478, 4479, 4480, 4481, 4482, 4483, 4484, 4485, 4486, 4487,
4488, 4489, 4490, 4491, 4492, 4493, 4494, 4495, 4496, 4497, 4498,
4499, 4500, 4501, 4502, 4503, 4504, 4505, 4506, 4507, 4508, 4509,
4510, 4511, 4512, 4513, 4514, 4515, 4516, 4517, 4518, 4519, 4520,
4521, 4522, 4523, 4524, 4525, 4526, 4527, 4528, 4529, 4530, 4531,
4532, 4533, 4534, 4535, 4536, 4537, 4538, 4539, 4540, 4541, 4542,
4543, 4544, 4545, 4546, 4547, 4548, 4549, 4550, 4551, 4552, 4553,
4554, 4555, 4556, 4557, 4558, 4559, 4560, 4561, 4562, 4563, 4564,
4565, 4566, 4567, 4568, 4569, 4570, 4571, 4572, 4573, 4574, 4575,
4576, 4577, 4578, 4579, 4580, 4581, 4582, 4583, 4584, 4585, 4586,
4587, 4588, 4589, 4590, 4591, 4592, 4593, 4594, 4595, 4596, 4597,
4598, 4599, 4600, 4601, 4602, 4603, 4604, 4605, 4606, 4607, 4608,
4609, 4610, 4611, 4612, 4613, 4614, 4615, 4616, 4617, 4618, 4619,
4620, 4621, 4622, 4623, 4624, 4625, 4626, 4627, 4628, 4629, 4630,
4631, 4632, 4633, 4634, 4635, 4636, 4637, 4638, 4639, 4640, 4641,
4642, 4643, 4644, 4645, 4646, 4647, 4648, 4649, 4650, 4651, 4652,
4653, 4654, 4655, 4656, 4657, 4658, 4659, 4660, 4661, 4662, 4663,
4664, 4665, 4666, 4667, 4668, 4669, 4670, 4671, 4672, 4673, 4674,
4675, 4676, 4677, 4678, 4679, 4680, 4681, 4682, 4683, 4684, 4685,
4686, 4687, 4688, 4689, 4690, 4691, 4692, 4693, 4694, 4695, 4696,
4697, 4698, 4699, 4700, 4701, 4702, 4703, 4704, 4705, 4706, 4707,
4708, 4709, 4710, 4711, 4712, 4713, 4714, 4715, 4716, 4717, 4718,
4719, 4720, 4721, 4722, 4723, 4724, 4725, 4726, 4727, 4728, 4729,
4730, 4731, 4732, 4733, 4734, 4735, 4736, 4737, 4738, 4739, 4740,
4741, 4742, 4743, 4744, 4745, 4746, 4747, 4748, 4749, 4750, 4751,
4752, 4753, 4754, 4755, 4756, 4757, 4758, 4759, 4760, 4761, 4762,
4763, 4764, 4765, 4766, 4767, 4768, 4769, 4770, 4771, 4772, 4773,
4774, 4775, 4776, 4777, 4778, 4779, 4780, 4781, 4782, 4783, 4784,
4785, 4786, 4787, 4788, 4789, 4790, 4791, 4792, 4793, 4794, 4795,
4796, 4797, 4798, 4799, 4800, 4801, 4802, 4803, 4804, 4805, 4806,
4807, 4808, 4809, 4810, 4811, 4812, 4813, 4814, 4815, 4816, 4817,
4818, 4819, 4820, 4821, 4822, 4823, 4824, 4825, 4826, 4827, 4828,
4829, 4830, 4831, 4832, 4833, 4834, 4835, 4836, 4837, 4838, 4839,
4840, 4841, 4842, 4843, 4844, 4845, 4846, 4847, 4848, 4849, 4850,
4851, 4852, 4853, 4854, 4855, 4856, 4857, 4858, 4859, 4860, 4861,
4862, 4863, 4864, 4865, 4866, 4867, 4868, 4869, 4870, 4871, 4872,
4873, 4874, 4875, 4876, 4877, 4878, 4879, 4880, 4881, 4882, 4883,
4884, 4885, 4886, 4887, 4888, 4889, 4890, 4891, 4892, 4893, 4894,
4895, 4896, 4897, 4898, 4899, 4900, 4901, 4902, 4903, 4904, 4905,
4906, 4907, 4908, 4909, 4910, 4911, 4912, 4913, 4914, 4915, 4916,
4917, 4918, 4919, 4920, 4921, 4922, 4923, 4924, 4925, 4926, 4927,
4928, 4929, 4930, 4931, 4932, 4933, 4934, 4935, 4936, 4937, 4938,
4939, 4940, 4941, 4942, 4943, 4944, 4945, 4946, 4947, 4948, 4949,
4950, 4951, 4952, 4953, 4954, 4955, 4956, 4957, 4958, 4959, 4960,
4961, 4962, 4963, 4964, 4965, 4966, 4967, 4968, 4969, 4970, 4971,
4972, 4973, 4974, 4975, 4976, 4977, 4978, 4979, 4980, 4981, 4982,
4983, 4984, 4985, 4986, 4987, 4988, 4989, 4990, 4991, 4992, 4993,
4994, 4995, 4996, 4997, 4998, 4999, 5000, 5001, 5002, 5003, 5004,
5005, 5006, 5007, 5008, 5009, 5010, 5011, 5012, 5013, 5014, 5015,
5016, 5017, 5018, 5019, 5020, 5021, 5022, 5023, 5024, 5025, 5026,
5027, 5028, 5029, 5030, 5031, 5032, 5033, 5034, 5035, 5036, 5037,
5038, 5039}));
}
TEST(TransposeTest, TestRefOps6D3) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5, 6, 7},
{1, 2, 0, 3, 4, 5}),
ElementsAreArray(
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65,
66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76,
77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87,
88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98,
99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120,
121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131,
132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153,
154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186,
187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197,
198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208,
209, 2520, 2521, 2522, 2523, 2524, 2525, 2526, 2527, 2528, 2529,
2530, 2531, 2532, 2533, 2534, 2535, 2536, 2537, 2538, 2539, 2540,
2541, 2542, 2543, 2544, 2545, 2546, 2547, 2548, 2549, 2550, 2551,
2552, 2553, 2554, 2555, 2556, 2557, 2558, 2559, 2560, 2561, 2562,
2563, 2564, 2565, 2566, 2567, 2568, 2569, 2570, 2571, 2572, 2573,
2574, 2575, 2576, 2577, 2578, 2579, 2580, 2581, 2582, 2583, 2584,
2585, 2586, 2587, 2588, 2589, 2590, 2591, 2592, 2593, 2594, 2595,
2596, 2597, 2598, 2599, 2600, 2601, 2602, 2603, 2604, 2605, 2606,
2607, 2608, 2609, 2610, 2611, 2612, 2613, 2614, 2615, 2616, 2617,
2618, 2619, 2620, 2621, 2622, 2623, 2624, 2625, 2626, 2627, 2628,
2629, 2630, 2631, 2632, 2633, 2634, 2635, 2636, 2637, 2638, 2639,
2640, 2641, 2642, 2643, 2644, 2645, 2646, 2647, 2648, 2649, 2650,
2651, 2652, 2653, 2654, 2655, 2656, 2657, 2658, 2659, 2660, 2661,
2662, 2663, 2664, 2665, 2666, 2667, 2668, 2669, 2670, 2671, 2672,
2673, 2674, 2675, 2676, 2677, 2678, 2679, 2680, 2681, 2682, 2683,
2684, 2685, 2686, 2687, 2688, 2689, 2690, 2691, 2692, 2693, 2694,
2695, 2696, 2697, 2698, 2699, 2700, 2701, 2702, 2703, 2704, 2705,
2706, 2707, 2708, 2709, 2710, 2711, 2712, 2713, 2714, 2715, 2716,
2717, 2718, 2719, 2720, 2721, 2722, 2723, 2724, 2725, 2726, 2727,
2728, 2729, 210, 211, 212, 213, 214, 215, 216, 217, 218,
219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229,
230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240,
241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262,
263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273,
274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295,
296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306,
307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317,
318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328,
329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339,
340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350,
351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361,
362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372,
373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383,
384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394,
395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405,
406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416,
417, 418, 419, 2730, 2731, 2732, 2733, 2734, 2735, 2736, 2737,
2738, 2739, 2740, 2741, 2742, 2743, 2744, 2745, 2746, 2747, 2748,
2749, 2750, 2751, 2752, 2753, 2754, 2755, 2756, 2757, 2758, 2759,
2760, 2761, 2762, 2763, 2764, 2765, 2766, 2767, 2768, 2769, 2770,
2771, 2772, 2773, 2774, 2775, 2776, 2777, 2778, 2779, 2780, 2781,
2782, 2783, 2784, 2785, 2786, 2787, 2788, 2789, 2790, 2791, 2792,
2793, 2794, 2795, 2796, 2797, 2798, 2799, 2800, 2801, 2802, 2803,
2804, 2805, 2806, 2807, 2808, 2809, 2810, 2811, 2812, 2813, 2814,
2815, 2816, 2817, 2818, 2819, 2820, 2821, 2822, 2823, 2824, 2825,
2826, 2827, 2828, 2829, 2830, 2831, 2832, 2833, 2834, 2835, 2836,
2837, 2838, 2839, 2840, 2841, 2842, 2843, 2844, 2845, 2846, 2847,
2848, 2849, 2850, 2851, 2852, 2853, 2854, 2855, 2856, 2857, 2858,
2859, 2860, 2861, 2862, 2863, 2864, 2865, 2866, 2867, 2868, 2869,
2870, 2871, 2872, 2873, 2874, 2875, 2876, 2877, 2878, 2879, 2880,
2881, 2882, 2883, 2884, 2885, 2886, 2887, 2888, 2889, 2890, 2891,
2892, 2893, 2894, 2895, 2896, 2897, 2898, 2899, 2900, 2901, 2902,
2903, 2904, 2905, 2906, 2907, 2908, 2909, 2910, 2911, 2912, 2913,
2914, 2915, 2916, 2917, 2918, 2919, 2920, 2921, 2922, 2923, 2924,
2925, 2926, 2927, 2928, 2929, 2930, 2931, 2932, 2933, 2934, 2935,
2936, 2937, 2938, 2939, 420, 421, 422, 423, 424, 425, 426,
427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437,
438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448,
449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459,
460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470,
471, 472, 473, 474, 475, 476, 477, 478, 479, 480, 481,
482, 483, 484, 485, 486, 487, 488, 489, 490, 491, 492,
493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503,
504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514,
515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525,
526, 527, 528, 529, 530, 531, 532, 533, 534, 535, 536,
537, 538, 539, 540, 541, 542, 543, 544, 545, 546, 547,
548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558,
559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569,
570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580,
581, 582, 583, 584, 585, 586, 587, 588, 589, 590, 591,
592, 593, 594, 595, 596, 597, 598, 599, 600, 601, 602,
603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613,
614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624,
625, 626, 627, 628, 629, 2940, 2941, 2942, 2943, 2944, 2945,
2946, 2947, 2948, 2949, 2950, 2951, 2952, 2953, 2954, 2955, 2956,
2957, 2958, 2959, 2960, 2961, 2962, 2963, 2964, 2965, 2966, 2967,
2968, 2969, 2970, 2971, 2972, 2973, 2974, 2975, 2976, 2977, 2978,
2979, 2980, 2981, 2982, 2983, 2984, 2985, 2986, 2987, 2988, 2989,
2990, 2991, 2992, 2993, 2994, 2995, 2996, 2997, 2998, 2999, 3000,
3001, 3002, 3003, 3004, 3005, 3006, 3007, 3008, 3009, 3010, 3011,
3012, 3013, 3014, 3015, 3016, 3017, 3018, 3019, 3020, 3021, 3022,
3023, 3024, 3025, 3026, 3027, 3028, 3029, 3030, 3031, 3032, 3033,
3034, 3035, 3036, 3037, 3038, 3039, 3040, 3041, 3042, 3043, 3044,
3045, 3046, 3047, 3048, 3049, 3050, 3051, 3052, 3053, 3054, 3055,
3056, 3057, 3058, 3059, 3060, 3061, 3062, 3063, 3064, 3065, 3066,
3067, 3068, 3069, 3070, 3071, 3072, 3073, 3074, 3075, 3076, 3077,
3078, 3079, 3080, 3081, 3082, 3083, 3084, 3085, 3086, 3087, 3088,
3089, 3090, 3091, 3092, 3093, 3094, 3095, 3096, 3097, 3098, 3099,
3100, 3101, 3102, 3103, 3104, 3105, 3106, 3107, 3108, 3109, 3110,
3111, 3112, 3113, 3114, 3115, 3116, 3117, 3118, 3119, 3120, 3121,
3122, 3123, 3124, 3125, 3126, 3127, 3128, 3129, 3130, 3131, 3132,
3133, 3134, 3135, 3136, 3137, 3138, 3139, 3140, 3141, 3142, 3143,
3144, 3145, 3146, 3147, 3148, 3149, 630, 631, 632, 633, 634,
635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645,
646, 647, 648, 649, 650, 651, 652, 653, 654, 655, 656,
657, 658, 659, 660, 661, 662, 663, 664, 665, 666, 667,
668, 669, 670, 671, 672, 673, 674, 675, 676, 677, 678,
679, 680, 681, 682, 683, 684, 685, 686, 687, 688, 689,
690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 700,
701, 702, 703, 704, 705, 706, 707, 708, 709, 710, 711,
712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722,
723, 724, 725, 726, 727, 728, 729, 730, 731, 732, 733,
734, 735, 736, 737, 738, 739, 740, 741, 742, 743, 744,
745, 746, 747, 748, 749, 750, 751, 752, 753, 754, 755,
756, 757, 758, 759, 760, 761, 762, 763, 764, 765, 766,
767, 768, 769, 770, 771, 772, 773, 774, 775, 776, 777,
778, 779, 780, 781, 782, 783, 784, 785, 786, 787, 788,
789, 790, 791, 792, 793, 794, 795, 796, 797, 798, 799,
800, 801, 802, 803, 804, 805, 806, 807, 808, 809, 810,
811, 812, 813, 814, 815, 816, 817, 818, 819, 820, 821,
822, 823, 824, 825, 826, 827, 828, 829, 830, 831, 832,
833, 834, 835, 836, 837, 838, 839, 3150, 3151, 3152, 3153,
3154, 3155, 3156, 3157, 3158, 3159, 3160, 3161, 3162, 3163, 3164,
3165, 3166, 3167, 3168, 3169, 3170, 3171, 3172, 3173, 3174, 3175,
3176, 3177, 3178, 3179, 3180, 3181, 3182, 3183, 3184, 3185, 3186,
3187, 3188, 3189, 3190, 3191, 3192, 3193, 3194, 3195, 3196, 3197,
3198, 3199, 3200, 3201, 3202, 3203, 3204, 3205, 3206, 3207, 3208,
3209, 3210, 3211, 3212, 3213, 3214, 3215, 3216, 3217, 3218, 3219,
3220, 3221, 3222, 3223, 3224, 3225, 3226, 3227, 3228, 3229, 3230,
3231, 3232, 3233, 3234, 3235, 3236, 3237, 3238, 3239, 3240, 3241,
3242, 3243, 3244, 3245, 3246, 3247, 3248, 3249, 3250, 3251, 3252,
3253, 3254, 3255, 3256, 3257, 3258, 3259, 3260, 3261, 3262, 3263,
3264, 3265, 3266, 3267, 3268, 3269, 3270, 3271, 3272, 3273, 3274,
3275, 3276, 3277, 3278, 3279, 3280, 3281, 3282, 3283, 3284, 3285,
3286, 3287, 3288, 3289, 3290, 3291, 3292, 3293, 3294, 3295, 3296,
3297, 3298, 3299, 3300, 3301, 3302, 3303, 3304, 3305, 3306, 3307,
3308, 3309, 3310, 3311, 3312, 3313, 3314, 3315, 3316, 3317, 3318,
3319, 3320, 3321, 3322, 3323, 3324, 3325, 3326, 3327, 3328, 3329,
3330, 3331, 3332, 3333, 3334, 3335, 3336, 3337, 3338, 3339, 3340,
3341, 3342, 3343, 3344, 3345, 3346, 3347, 3348, 3349, 3350, 3351,
3352, 3353, 3354, 3355, 3356, 3357, 3358, 3359, 840, 841, 842,
843, 844, 845, 846, 847, 848, 849, 850, 851, 852, 853,
854, 855, 856, 857, 858, 859, 860, 861, 862, 863, 864,
865, 866, 867, 868, 869, 870, 871, 872, 873, 874, 875,
876, 877, 878, 879, 880, 881, 882, 883, 884, 885, 886,
887, 888, 889, 890, 891, 892, 893, 894, 895, 896, 897,
898, 899, 900, 901, 902, 903, 904, 905, 906, 907, 908,
909, 910, 911, 912, 913, 914, 915, 916, 917, 918, 919,
920, 921, 922, 923, 924, 925, 926, 927, 928, 929, 930,
931, 932, 933, 934, 935, 936, 937, 938, 939, 940, 941,
942, 943, 944, 945, 946, 947, 948, 949, 950, 951, 952,
953, 954, 955, 956, 957, 958, 959, 960, 961, 962, 963,
964, 965, 966, 967, 968, 969, 970, 971, 972, 973, 974,
975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985,
986, 987, 988, 989, 990, 991, 992, 993, 994, 995, 996,
997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 1018,
1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028, 1029,
1030, 1031, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039, 1040,
1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, 3360, 3361,
3362, 3363, 3364, 3365, 3366, 3367, 3368, 3369, 3370, 3371, 3372,
3373, 3374, 3375, 3376, 3377, 3378, 3379, 3380, 3381, 3382, 3383,
3384, 3385, 3386, 3387, 3388, 3389, 3390, 3391, 3392, 3393, 3394,
3395, 3396, 3397, 3398, 3399, 3400, 3401, 3402, 3403, 3404, 3405,
3406, 3407, 3408, 3409, 3410, 3411, 3412, 3413, 3414, 3415, 3416,
3417, 3418, 3419, 3420, 3421, 3422, 3423, 3424, 3425, 3426, 3427,
3428, 3429, 3430, 3431, 3432, 3433, 3434, 3435, 3436, 3437, 3438,
3439, 3440, 3441, 3442, 3443, 3444, 3445, 3446, 3447, 3448, 3449,
3450, 3451, 3452, 3453, 3454, 3455, 3456, 3457, 3458, 3459, 3460,
3461, 3462, 3463, 3464, 3465, 3466, 3467, 3468, 3469, 3470, 3471,
3472, 3473, 3474, 3475, 3476, 3477, 3478, 3479, 3480, 3481, 3482,
3483, 3484, 3485, 3486, 3487, 3488, 3489, 3490, 3491, 3492, 3493,
3494, 3495, 3496, 3497, 3498, 3499, 3500, 3501, 3502, 3503, 3504,
3505, 3506, 3507, 3508, 3509, 3510, 3511, 3512, 3513, 3514, 3515,
3516, 3517, 3518, 3519, 3520, 3521, 3522, 3523, 3524, 3525, 3526,
3527, 3528, 3529, 3530, 3531, 3532, 3533, 3534, 3535, 3536, 3537,
3538, 3539, 3540, 3541, 3542, 3543, 3544, 3545, 3546, 3547, 3548,
3549, 3550, 3551, 3552, 3553, 3554, 3555, 3556, 3557, 3558, 3559,
3560, 3561, 3562, 3563, 3564, 3565, 3566, 3567, 3568, 3569, 1050,
1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061,
1062, 1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072,
1073, 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, 1082, 1083,
1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1093, 1094,
1095, 1096, 1097, 1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105,
1106, 1107, 1108, 1109, 1110, 1111, 1112, 1113, 1114, 1115, 1116,
1117, 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127,
1128, 1129, 1130, 1131, 1132, 1133, 1134, 1135, 1136, 1137, 1138,
1139, 1140, 1141, 1142, 1143, 1144, 1145, 1146, 1147, 1148, 1149,
1150, 1151, 1152, 1153, 1154, 1155, 1156, 1157, 1158, 1159, 1160,
1161, 1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1171,
1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1182,
1183, 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1193,
1194, 1195, 1196, 1197, 1198, 1199, 1200, 1201, 1202, 1203, 1204,
1205, 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1213, 1214, 1215,
1216, 1217, 1218, 1219, 1220, 1221, 1222, 1223, 1224, 1225, 1226,
1227, 1228, 1229, 1230, 1231, 1232, 1233, 1234, 1235, 1236, 1237,
1238, 1239, 1240, 1241, 1242, 1243, 1244, 1245, 1246, 1247, 1248,
1249, 1250, 1251, 1252, 1253, 1254, 1255, 1256, 1257, 1258, 1259,
3570, 3571, 3572, 3573, 3574, 3575, 3576, 3577, 3578, 3579, 3580,
3581, 3582, 3583, 3584, 3585, 3586, 3587, 3588, 3589, 3590, 3591,
3592, 3593, 3594, 3595, 3596, 3597, 3598, 3599, 3600, 3601, 3602,
3603, 3604, 3605, 3606, 3607, 3608, 3609, 3610, 3611, 3612, 3613,
3614, 3615, 3616, 3617, 3618, 3619, 3620, 3621, 3622, 3623, 3624,
3625, 3626, 3627, 3628, 3629, 3630, 3631, 3632, 3633, 3634, 3635,
3636, 3637, 3638, 3639, 3640, 3641, 3642, 3643, 3644, 3645, 3646,
3647, 3648, 3649, 3650, 3651, 3652, 3653, 3654, 3655, 3656, 3657,
3658, 3659, 3660, 3661, 3662, 3663, 3664, 3665, 3666, 3667, 3668,
3669, 3670, 3671, 3672, 3673, 3674, 3675, 3676, 3677, 3678, 3679,
3680, 3681, 3682, 3683, 3684, 3685, 3686, 3687, 3688, 3689, 3690,
3691, 3692, 3693, 3694, 3695, 3696, 3697, 3698, 3699, 3700, 3701,
3702, 3703, 3704, 3705, 3706, 3707, 3708, 3709, 3710, 3711, 3712,
3713, 3714, 3715, 3716, 3717, 3718, 3719, 3720, 3721, 3722, 3723,
3724, 3725, 3726, 3727, 3728, 3729, 3730, 3731, 3732, 3733, 3734,
3735, 3736, 3737, 3738, 3739, 3740, 3741, 3742, 3743, 3744, 3745,
3746, 3747, 3748, 3749, 3750, 3751, 3752, 3753, 3754, 3755, 3756,
3757, 3758, 3759, 3760, 3761, 3762, 3763, 3764, 3765, 3766, 3767,
3768, 3769, 3770, 3771, 3772, 3773, 3774, 3775, 3776, 3777, 3778,
3779, 1260, 1261, 1262, 1263, 1264, 1265, 1266, 1267, 1268, 1269,
1270, 1271, 1272, 1273, 1274, 1275, 1276, 1277, 1278, 1279, 1280,
1281, 1282, 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1290, 1291,
1292, 1293, 1294, 1295, 1296, 1297, 1298, 1299, 1300, 1301, 1302,
1303, 1304, 1305, 1306, 1307, 1308, 1309, 1310, 1311, 1312, 1313,
1314, 1315, 1316, 1317, 1318, 1319, 1320, 1321, 1322, 1323, 1324,
1325, 1326, 1327, 1328, 1329, 1330, 1331, 1332, 1333, 1334, 1335,
1336, 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, 1345, 1346,
1347, 1348, 1349, 1350, 1351, 1352, 1353, 1354, 1355, 1356, 1357,
1358, 1359, 1360, 1361, 1362, 1363, 1364, 1365, 1366, 1367, 1368,
1369, 1370, 1371, 1372, 1373, 1374, 1375, 1376, 1377, 1378, 1379,
1380, 1381, 1382, 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1390,
1391, 1392, 1393, 1394, 1395, 1396, 1397, 1398, 1399, 1400, 1401,
1402, 1403, 1404, 1405, 1406, 1407, 1408, 1409, 1410, 1411, 1412,
1413, 1414, 1415, 1416, 1417, 1418, 1419, 1420, 1421, 1422, 1423,
1424, 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432, 1433, 1434,
1435, 1436, 1437, 1438, 1439, 1440, 1441, 1442, 1443, 1444, 1445,
1446, 1447, 1448, 1449, 1450, 1451, 1452, 1453, 1454, 1455, 1456,
1457, 1458, 1459, 1460, 1461, 1462, 1463, 1464, 1465, 1466, 1467,
1468, 1469, 3780, 3781, 3782, 3783, 3784, 3785, 3786, 3787, 3788,
3789, 3790, 3791, 3792, 3793, 3794, 3795, 3796, 3797, 3798, 3799,
3800, 3801, 3802, 3803, 3804, 3805, 3806, 3807, 3808, 3809, 3810,
3811, 3812, 3813, 3814, 3815, 3816, 3817, 3818, 3819, 3820, 3821,
3822, 3823, 3824, 3825, 3826, 3827, 3828, 3829, 3830, 3831, 3832,
3833, 3834, 3835, 3836, 3837, 3838, 3839, 3840, 3841, 3842, 3843,
3844, 3845, 3846, 3847, 3848, 3849, 3850, 3851, 3852, 3853, 3854,
3855, 3856, 3857, 3858, 3859, 3860, 3861, 3862, 3863, 3864, 3865,
3866, 3867, 3868, 3869, 3870, 3871, 3872, 3873, 3874, 3875, 3876,
3877, 3878, 3879, 3880, 3881, 3882, 3883, 3884, 3885, 3886, 3887,
3888, 3889, 3890, 3891, 3892, 3893, 3894, 3895, 3896, 3897, 3898,
3899, 3900, 3901, 3902, 3903, 3904, 3905, 3906, 3907, 3908, 3909,
3910, 3911, 3912, 3913, 3914, 3915, 3916, 3917, 3918, 3919, 3920,
3921, 3922, 3923, 3924, 3925, 3926, 3927, 3928, 3929, 3930, 3931,
3932, 3933, 3934, 3935, 3936, 3937, 3938, 3939, 3940, 3941, 3942,
3943, 3944, 3945, 3946, 3947, 3948, 3949, 3950, 3951, 3952, 3953,
3954, 3955, 3956, 3957, 3958, 3959, 3960, 3961, 3962, 3963, 3964,
3965, 3966, 3967, 3968, 3969, 3970, 3971, 3972, 3973, 3974, 3975,
3976, 3977, 3978, 3979, 3980, 3981, 3982, 3983, 3984, 3985, 3986,
3987, 3988, 3989, 1470, 1471, 1472, 1473, 1474, 1475, 1476, 1477,
1478, 1479, 1480, 1481, 1482, 1483, 1484, 1485, 1486, 1487, 1488,
1489, 1490, 1491, 1492, 1493, 1494, 1495, 1496, 1497, 1498, 1499,
1500, 1501, 1502, 1503, 1504, 1505, 1506, 1507, 1508, 1509, 1510,
1511, 1512, 1513, 1514, 1515, 1516, 1517, 1518, 1519, 1520, 1521,
1522, 1523, 1524, 1525, 1526, 1527, 1528, 1529, 1530, 1531, 1532,
1533, 1534, 1535, 1536, 1537, 1538, 1539, 1540, 1541, 1542, 1543,
1544, 1545, 1546, 1547, 1548, 1549, 1550, 1551, 1552, 1553, 1554,
1555, 1556, 1557, 1558, 1559, 1560, 1561, 1562, 1563, 1564, 1565,
1566, 1567, 1568, 1569, 1570, 1571, 1572, 1573, 1574, 1575, 1576,
1577, 1578, 1579, 1580, 1581, 1582, 1583, 1584, 1585, 1586, 1587,
1588, 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1597, 1598,
1599, 1600, 1601, 1602, 1603, 1604, 1605, 1606, 1607, 1608, 1609,
1610, 1611, 1612, 1613, 1614, 1615, 1616, 1617, 1618, 1619, 1620,
1621, 1622, 1623, 1624, 1625, 1626, 1627, 1628, 1629, 1630, 1631,
1632, 1633, 1634, 1635, 1636, 1637, 1638, 1639, 1640, 1641, 1642,
1643, 1644, 1645, 1646, 1647, 1648, 1649, 1650, 1651, 1652, 1653,
1654, 1655, 1656, 1657, 1658, 1659, 1660, 1661, 1662, 1663, 1664,
1665, 1666, 1667, 1668, 1669, 1670, 1671, 1672, 1673, 1674, 1675,
1676, 1677, 1678, 1679, 3990, 3991, 3992, 3993, 3994, 3995, 3996,
3997, 3998, 3999, 4000, 4001, 4002, 4003, 4004, 4005, 4006, 4007,
4008, 4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4017, 4018,
4019, 4020, 4021, 4022, 4023, 4024, 4025, 4026, 4027, 4028, 4029,
4030, 4031, 4032, 4033, 4034, 4035, 4036, 4037, 4038, 4039, 4040,
4041, 4042, 4043, 4044, 4045, 4046, 4047, 4048, 4049, 4050, 4051,
4052, 4053, 4054, 4055, 4056, 4057, 4058, 4059, 4060, 4061, 4062,
4063, 4064, 4065, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073,
4074, 4075, 4076, 4077, 4078, 4079, 4080, 4081, 4082, 4083, 4084,
4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, 4094, 4095,
4096, 4097, 4098, 4099, 4100, 4101, 4102, 4103, 4104, 4105, 4106,
4107, 4108, 4109, 4110, 4111, 4112, 4113, 4114, 4115, 4116, 4117,
4118, 4119, 4120, 4121, 4122, 4123, 4124, 4125, 4126, 4127, 4128,
4129, 4130, 4131, 4132, 4133, 4134, 4135, 4136, 4137, 4138, 4139,
4140, 4141, 4142, 4143, 4144, 4145, 4146, 4147, 4148, 4149, 4150,
4151, 4152, 4153, 4154, 4155, 4156, 4157, 4158, 4159, 4160, 4161,
4162, 4163, 4164, 4165, 4166, 4167, 4168, 4169, 4170, 4171, 4172,
4173, 4174, 4175, 4176, 4177, 4178, 4179, 4180, 4181, 4182, 4183,
4184, 4185, 4186, 4187, 4188, 4189, 4190, 4191, 4192, 4193, 4194,
4195, 4196, 4197, 4198, 4199, 1680, 1681, 1682, 1683, 1684, 1685,
1686, 1687, 1688, 1689, 1690, 1691, 1692, 1693, 1694, 1695, 1696,
1697, 1698, 1699, 1700, 1701, 1702, 1703, 1704, 1705, 1706, 1707,
1708, 1709, 1710, 1711, 1712, 1713, 1714, 1715, 1716, 1717, 1718,
1719, 1720, 1721, 1722, 1723, 1724, 1725, 1726, 1727, 1728, 1729,
1730, 1731, 1732, 1733, 1734, 1735, 1736, 1737, 1738, 1739, 1740,
1741, 1742, 1743, 1744, 1745, 1746, 1747, 1748, 1749, 1750, 1751,
1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, 1760, 1761, 1762,
1763, 1764, 1765, 1766, 1767, 1768, 1769, 1770, 1771, 1772, 1773,
1774, 1775, 1776, 1777, 1778, 1779, 1780, 1781, 1782, 1783, 1784,
1785, 1786, 1787, 1788, 1789, 1790, 1791, 1792, 1793, 1794, 1795,
1796, 1797, 1798, 1799, 1800, 1801, 1802, 1803, 1804, 1805, 1806,
1807, 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1815, 1816, 1817,
1818, 1819, 1820, 1821, 1822, 1823, 1824, 1825, 1826, 1827, 1828,
1829, 1830, 1831, 1832, 1833, 1834, 1835, 1836, 1837, 1838, 1839,
1840, 1841, 1842, 1843, 1844, 1845, 1846, 1847, 1848, 1849, 1850,
1851, 1852, 1853, 1854, 1855, 1856, 1857, 1858, 1859, 1860, 1861,
1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, 1870, 1871, 1872,
1873, 1874, 1875, 1876, 1877, 1878, 1879, 1880, 1881, 1882, 1883,
1884, 1885, 1886, 1887, 1888, 1889, 4200, 4201, 4202, 4203, 4204,
4205, 4206, 4207, 4208, 4209, 4210, 4211, 4212, 4213, 4214, 4215,
4216, 4217, 4218, 4219, 4220, 4221, 4222, 4223, 4224, 4225, 4226,
4227, 4228, 4229, 4230, 4231, 4232, 4233, 4234, 4235, 4236, 4237,
4238, 4239, 4240, 4241, 4242, 4243, 4244, 4245, 4246, 4247, 4248,
4249, 4250, 4251, 4252, 4253, 4254, 4255, 4256, 4257, 4258, 4259,
4260, 4261, 4262, 4263, 4264, 4265, 4266, 4267, 4268, 4269, 4270,
4271, 4272, 4273, 4274, 4275, 4276, 4277, 4278, 4279, 4280, 4281,
4282, 4283, 4284, 4285, 4286, 4287, 4288, 4289, 4290, 4291, 4292,
4293, 4294, 4295, 4296, 4297, 4298, 4299, 4300, 4301, 4302, 4303,
4304, 4305, 4306, 4307, 4308, 4309, 4310, 4311, 4312, 4313, 4314,
4315, 4316, 4317, 4318, 4319, 4320, 4321, 4322, 4323, 4324, 4325,
4326, 4327, 4328, 4329, 4330, 4331, 4332, 4333, 4334, 4335, 4336,
4337, 4338, 4339, 4340, 4341, 4342, 4343, 4344, 4345, 4346, 4347,
4348, 4349, 4350, 4351, 4352, 4353, 4354, 4355, 4356, 4357, 4358,
4359, 4360, 4361, 4362, 4363, 4364, 4365, 4366, 4367, 4368, 4369,
4370, 4371, 4372, 4373, 4374, 4375, 4376, 4377, 4378, 4379, 4380,
4381, 4382, 4383, 4384, 4385, 4386, 4387, 4388, 4389, 4390, 4391,
4392, 4393, 4394, 4395, 4396, 4397, 4398, 4399, 4400, 4401, 4402,
4403, 4404, 4405, 4406, 4407, 4408, 4409, 1890, 1891, 1892, 1893,
1894, 1895, 1896, 1897, 1898, 1899, 1900, 1901, 1902, 1903, 1904,
1905, 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, 1914, 1915,
1916, 1917, 1918, 1919, 1920, 1921, 1922, 1923, 1924, 1925, 1926,
1927, 1928, 1929, 1930, 1931, 1932, 1933, 1934, 1935, 1936, 1937,
1938, 1939, 1940, 1941, 1942, 1943, 1944, 1945, 1946, 1947, 1948,
1949, 1950, 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959,
1960, 1961, 1962, 1963, 1964, 1965, 1966, 1967, 1968, 1969, 1970,
1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981,
1982, 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, 1991, 1992,
1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014,
2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023, 2024, 2025,
2026, 2027, 2028, 2029, 2030, 2031, 2032, 2033, 2034, 2035, 2036,
2037, 2038, 2039, 2040, 2041, 2042, 2043, 2044, 2045, 2046, 2047,
2048, 2049, 2050, 2051, 2052, 2053, 2054, 2055, 2056, 2057, 2058,
2059, 2060, 2061, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069,
2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080,
2081, 2082, 2083, 2084, 2085, 2086, 2087, 2088, 2089, 2090, 2091,
2092, 2093, 2094, 2095, 2096, 2097, 2098, 2099, 4410, 4411, 4412,
4413, 4414, 4415, 4416, 4417, 4418, 4419, 4420, 4421, 4422, 4423,
4424, 4425, 4426, 4427, 4428, 4429, 4430, 4431, 4432, 4433, 4434,
4435, 4436, 4437, 4438, 4439, 4440, 4441, 4442, 4443, 4444, 4445,
4446, 4447, 4448, 4449, 4450, 4451, 4452, 4453, 4454, 4455, 4456,
4457, 4458, 4459, 4460, 4461, 4462, 4463, 4464, 4465, 4466, 4467,
4468, 4469, 4470, 4471, 4472, 4473, 4474, 4475, 4476, 4477, 4478,
4479, 4480, 4481, 4482, 4483, 4484, 4485, 4486, 4487, 4488, 4489,
4490, 4491, 4492, 4493, 4494, 4495, 4496, 4497, 4498, 4499, 4500,
4501, 4502, 4503, 4504, 4505, 4506, 4507, 4508, 4509, 4510, 4511,
4512, 4513, 4514, 4515, 4516, 4517, 4518, 4519, 4520, 4521, 4522,
4523, 4524, 4525, 4526, 4527, 4528, 4529, 4530, 4531, 4532, 4533,
4534, 4535, 4536, 4537, 4538, 4539, 4540, 4541, 4542, 4543, 4544,
4545, 4546, 4547, 4548, 4549, 4550, 4551, 4552, 4553, 4554, 4555,
4556, 4557, 4558, 4559, 4560, 4561, 4562, 4563, 4564, 4565, 4566,
4567, 4568, 4569, 4570, 4571, 4572, 4573, 4574, 4575, 4576, 4577,
4578, 4579, 4580, 4581, 4582, 4583, 4584, 4585, 4586, 4587, 4588,
4589, 4590, 4591, 4592, 4593, 4594, 4595, 4596, 4597, 4598, 4599,
4600, 4601, 4602, 4603, 4604, 4605, 4606, 4607, 4608, 4609, 4610,
4611, 4612, 4613, 4614, 4615, 4616, 4617, 4618, 4619, 2100, 2101,
2102, 2103, 2104, 2105, 2106, 2107, 2108, 2109, 2110, 2111, 2112,
2113, 2114, 2115, 2116, 2117, 2118, 2119, 2120, 2121, 2122, 2123,
2124, 2125, 2126, 2127, 2128, 2129, 2130, 2131, 2132, 2133, 2134,
2135, 2136, 2137, 2138, 2139, 2140, 2141, 2142, 2143, 2144, 2145,
2146, 2147, 2148, 2149, 2150, 2151, 2152, 2153, 2154, 2155, 2156,
2157, 2158, 2159, 2160, 2161, 2162, 2163, 2164, 2165, 2166, 2167,
2168, 2169, 2170, 2171, 2172, 2173, 2174, 2175, 2176, 2177, 2178,
2179, 2180, 2181, 2182, 2183, 2184, 2185, 2186, 2187, 2188, 2189,
2190, 2191, 2192, 2193, 2194, 2195, 2196, 2197, 2198, 2199, 2200,
2201, 2202, 2203, 2204, 2205, 2206, 2207, 2208, 2209, 2210, 2211,
2212, 2213, 2214, 2215, 2216, 2217, 2218, 2219, 2220, 2221, 2222,
2223, 2224, 2225, 2226, 2227, 2228, 2229, 2230, 2231, 2232, 2233,
2234, 2235, 2236, 2237, 2238, 2239, 2240, 2241, 2242, 2243, 2244,
2245, 2246, 2247, 2248, 2249, 2250, 2251, 2252, 2253, 2254, 2255,
2256, 2257, 2258, 2259, 2260, 2261, 2262, 2263, 2264, 2265, 2266,
2267, 2268, 2269, 2270, 2271, 2272, 2273, 2274, 2275, 2276, 2277,
2278, 2279, 2280, 2281, 2282, 2283, 2284, 2285, 2286, 2287, 2288,
2289, 2290, 2291, 2292, 2293, 2294, 2295, 2296, 2297, 2298, 2299,
2300, 2301, 2302, 2303, 2304, 2305, 2306, 2307, 2308, 2309, 4620,
4621, 4622, 4623, 4624, 4625, 4626, 4627, 4628, 4629, 4630, 4631,
4632, 4633, 4634, 4635, 4636, 4637, 4638, 4639, 4640, 4641, 4642,
4643, 4644, 4645, 4646, 4647, 4648, 4649, 4650, 4651, 4652, 4653,
4654, 4655, 4656, 4657, 4658, 4659, 4660, 4661, 4662, 4663, 4664,
4665, 4666, 4667, 4668, 4669, 4670, 4671, 4672, 4673, 4674, 4675,
4676, 4677, 4678, 4679, 4680, 4681, 4682, 4683, 4684, 4685, 4686,
4687, 4688, 4689, 4690, 4691, 4692, 4693, 4694, 4695, 4696, 4697,
4698, 4699, 4700, 4701, 4702, 4703, 4704, 4705, 4706, 4707, 4708,
4709, 4710, 4711, 4712, 4713, 4714, 4715, 4716, 4717, 4718, 4719,
4720, 4721, 4722, 4723, 4724, 4725, 4726, 4727, 4728, 4729, 4730,
4731, 4732, 4733, 4734, 4735, 4736, 4737, 4738, 4739, 4740, 4741,
4742, 4743, 4744, 4745, 4746, 4747, 4748, 4749, 4750, 4751, 4752,
4753, 4754, 4755, 4756, 4757, 4758, 4759, 4760, 4761, 4762, 4763,
4764, 4765, 4766, 4767, 4768, 4769, 4770, 4771, 4772, 4773, 4774,
4775, 4776, 4777, 4778, 4779, 4780, 4781, 4782, 4783, 4784, 4785,
4786, 4787, 4788, 4789, 4790, 4791, 4792, 4793, 4794, 4795, 4796,
4797, 4798, 4799, 4800, 4801, 4802, 4803, 4804, 4805, 4806, 4807,
4808, 4809, 4810, 4811, 4812, 4813, 4814, 4815, 4816, 4817, 4818,
4819, 4820, 4821, 4822, 4823, 4824, 4825, 4826, 4827, 4828, 4829,
2310, 2311, 2312, 2313, 2314, 2315, 2316, 2317, 2318, 2319, 2320,
2321, 2322, 2323, 2324, 2325, 2326, 2327, 2328, 2329, 2330, 2331,
2332, 2333, 2334, 2335, 2336, 2337, 2338, 2339, 2340, 2341, 2342,
2343, 2344, 2345, 2346, 2347, 2348, 2349, 2350, 2351, 2352, 2353,
2354, 2355, 2356, 2357, 2358, 2359, 2360, 2361, 2362, 2363, 2364,
2365, 2366, 2367, 2368, 2369, 2370, 2371, 2372, 2373, 2374, 2375,
2376, 2377, 2378, 2379, 2380, 2381, 2382, 2383, 2384, 2385, 2386,
2387, 2388, 2389, 2390, 2391, 2392, 2393, 2394, 2395, 2396, 2397,
2398, 2399, 2400, 2401, 2402, 2403, 2404, 2405, 2406, 2407, 2408,
2409, 2410, 2411, 2412, 2413, 2414, 2415, 2416, 2417, 2418, 2419,
2420, 2421, 2422, 2423, 2424, 2425, 2426, 2427, 2428, 2429, 2430,
2431, 2432, 2433, 2434, 2435, 2436, 2437, 2438, 2439, 2440, 2441,
2442, 2443, 2444, 2445, 2446, 2447, 2448, 2449, 2450, 2451, 2452,
2453, 2454, 2455, 2456, 2457, 2458, 2459, 2460, 2461, 2462, 2463,
2464, 2465, 2466, 2467, 2468, 2469, 2470, 2471, 2472, 2473, 2474,
2475, 2476, 2477, 2478, 2479, 2480, 2481, 2482, 2483, 2484, 2485,
2486, 2487, 2488, 2489, 2490, 2491, 2492, 2493, 2494, 2495, 2496,
2497, 2498, 2499, 2500, 2501, 2502, 2503, 2504, 2505, 2506, 2507,
2508, 2509, 2510, 2511, 2512, 2513, 2514, 2515, 2516, 2517, 2518,
2519, 4830, 4831, 4832, 4833, 4834, 4835, 4836, 4837, 4838, 4839,
4840, 4841, 4842, 4843, 4844, 4845, 4846, 4847, 4848, 4849, 4850,
4851, 4852, 4853, 4854, 4855, 4856, 4857, 4858, 4859, 4860, 4861,
4862, 4863, 4864, 4865, 4866, 4867, 4868, 4869, 4870, 4871, 4872,
4873, 4874, 4875, 4876, 4877, 4878, 4879, 4880, 4881, 4882, 4883,
4884, 4885, 4886, 4887, 4888, 4889, 4890, 4891, 4892, 4893, 4894,
4895, 4896, 4897, 4898, 4899, 4900, 4901, 4902, 4903, 4904, 4905,
4906, 4907, 4908, 4909, 4910, 4911, 4912, 4913, 4914, 4915, 4916,
4917, 4918, 4919, 4920, 4921, 4922, 4923, 4924, 4925, 4926, 4927,
4928, 4929, 4930, 4931, 4932, 4933, 4934, 4935, 4936, 4937, 4938,
4939, 4940, 4941, 4942, 4943, 4944, 4945, 4946, 4947, 4948, 4949,
4950, 4951, 4952, 4953, 4954, 4955, 4956, 4957, 4958, 4959, 4960,
4961, 4962, 4963, 4964, 4965, 4966, 4967, 4968, 4969, 4970, 4971,
4972, 4973, 4974, 4975, 4976, 4977, 4978, 4979, 4980, 4981, 4982,
4983, 4984, 4985, 4986, 4987, 4988, 4989, 4990, 4991, 4992, 4993,
4994, 4995, 4996, 4997, 4998, 4999, 5000, 5001, 5002, 5003, 5004,
5005, 5006, 5007, 5008, 5009, 5010, 5011, 5012, 5013, 5014, 5015,
5016, 5017, 5018, 5019, 5020, 5021, 5022, 5023, 5024, 5025, 5026,
5027, 5028, 5029, 5030, 5031, 5032, 5033, 5034, 5035, 5036, 5037,
5038, 5039}));
}
TEST(TransposeTest, TestRefOps6D4) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5, 6, 7},
{1, 2, 3, 0, 4, 5}),
ElementsAreArray(
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
33, 34, 35, 36, 37, 38, 39, 40, 41, 2520, 2521,
2522, 2523, 2524, 2525, 2526, 2527, 2528, 2529, 2530, 2531, 2532,
2533, 2534, 2535, 2536, 2537, 2538, 2539, 2540, 2541, 2542, 2543,
2544, 2545, 2546, 2547, 2548, 2549, 2550, 2551, 2552, 2553, 2554,
2555, 2556, 2557, 2558, 2559, 2560, 2561, 42, 43, 44, 45,
46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
79, 80, 81, 82, 83, 2562, 2563, 2564, 2565, 2566, 2567,
2568, 2569, 2570, 2571, 2572, 2573, 2574, 2575, 2576, 2577, 2578,
2579, 2580, 2581, 2582, 2583, 2584, 2585, 2586, 2587, 2588, 2589,
2590, 2591, 2592, 2593, 2594, 2595, 2596, 2597, 2598, 2599, 2600,
2601, 2602, 2603, 84, 85, 86, 87, 88, 89, 90, 91,
92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113,
114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124,
125, 2604, 2605, 2606, 2607, 2608, 2609, 2610, 2611, 2612, 2613,
2614, 2615, 2616, 2617, 2618, 2619, 2620, 2621, 2622, 2623, 2624,
2625, 2626, 2627, 2628, 2629, 2630, 2631, 2632, 2633, 2634, 2635,
2636, 2637, 2638, 2639, 2640, 2641, 2642, 2643, 2644, 2645, 126,
127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137,
138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
160, 161, 162, 163, 164, 165, 166, 167, 2646, 2647, 2648,
2649, 2650, 2651, 2652, 2653, 2654, 2655, 2656, 2657, 2658, 2659,
2660, 2661, 2662, 2663, 2664, 2665, 2666, 2667, 2668, 2669, 2670,
2671, 2672, 2673, 2674, 2675, 2676, 2677, 2678, 2679, 2680, 2681,
2682, 2683, 2684, 2685, 2686, 2687, 168, 169, 170, 171, 172,
173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183,
184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
206, 207, 208, 209, 2688, 2689, 2690, 2691, 2692, 2693, 2694,
2695, 2696, 2697, 2698, 2699, 2700, 2701, 2702, 2703, 2704, 2705,
2706, 2707, 2708, 2709, 2710, 2711, 2712, 2713, 2714, 2715, 2716,
2717, 2718, 2719, 2720, 2721, 2722, 2723, 2724, 2725, 2726, 2727,
2728, 2729, 210, 211, 212, 213, 214, 215, 216, 217, 218,
219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229,
230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240,
241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
2730, 2731, 2732, 2733, 2734, 2735, 2736, 2737, 2738, 2739, 2740,
2741, 2742, 2743, 2744, 2745, 2746, 2747, 2748, 2749, 2750, 2751,
2752, 2753, 2754, 2755, 2756, 2757, 2758, 2759, 2760, 2761, 2762,
2763, 2764, 2765, 2766, 2767, 2768, 2769, 2770, 2771, 252, 253,
254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264,
265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
287, 288, 289, 290, 291, 292, 293, 2772, 2773, 2774, 2775,
2776, 2777, 2778, 2779, 2780, 2781, 2782, 2783, 2784, 2785, 2786,
2787, 2788, 2789, 2790, 2791, 2792, 2793, 2794, 2795, 2796, 2797,
2798, 2799, 2800, 2801, 2802, 2803, 2804, 2805, 2806, 2807, 2808,
2809, 2810, 2811, 2812, 2813, 294, 295, 296, 297, 298, 299,
300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310,
311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321,
322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332,
333, 334, 335, 2814, 2815, 2816, 2817, 2818, 2819, 2820, 2821,
2822, 2823, 2824, 2825, 2826, 2827, 2828, 2829, 2830, 2831, 2832,
2833, 2834, 2835, 2836, 2837, 2838, 2839, 2840, 2841, 2842, 2843,
2844, 2845, 2846, 2847, 2848, 2849, 2850, 2851, 2852, 2853, 2854,
2855, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345,
346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356,
357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367,
368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 2856,
2857, 2858, 2859, 2860, 2861, 2862, 2863, 2864, 2865, 2866, 2867,
2868, 2869, 2870, 2871, 2872, 2873, 2874, 2875, 2876, 2877, 2878,
2879, 2880, 2881, 2882, 2883, 2884, 2885, 2886, 2887, 2888, 2889,
2890, 2891, 2892, 2893, 2894, 2895, 2896, 2897, 378, 379, 380,
381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391,
392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402,
403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413,
414, 415, 416, 417, 418, 419, 2898, 2899, 2900, 2901, 2902,
2903, 2904, 2905, 2906, 2907, 2908, 2909, 2910, 2911, 2912, 2913,
2914, 2915, 2916, 2917, 2918, 2919, 2920, 2921, 2922, 2923, 2924,
2925, 2926, 2927, 2928, 2929, 2930, 2931, 2932, 2933, 2934, 2935,
2936, 2937, 2938, 2939, 420, 421, 422, 423, 424, 425, 426,
427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437,
438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448,
449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459,
460, 461, 2940, 2941, 2942, 2943, 2944, 2945, 2946, 2947, 2948,
2949, 2950, 2951, 2952, 2953, 2954, 2955, 2956, 2957, 2958, 2959,
2960, 2961, 2962, 2963, 2964, 2965, 2966, 2967, 2968, 2969, 2970,
2971, 2972, 2973, 2974, 2975, 2976, 2977, 2978, 2979, 2980, 2981,
462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472,
473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 483,
484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494,
495, 496, 497, 498, 499, 500, 501, 502, 503, 2982, 2983,
2984, 2985, 2986, 2987, 2988, 2989, 2990, 2991, 2992, 2993, 2994,
2995, 2996, 2997, 2998, 2999, 3000, 3001, 3002, 3003, 3004, 3005,
3006, 3007, 3008, 3009, 3010, 3011, 3012, 3013, 3014, 3015, 3016,
3017, 3018, 3019, 3020, 3021, 3022, 3023, 504, 505, 506, 507,
508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518,
519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529,
530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 540,
541, 542, 543, 544, 545, 3024, 3025, 3026, 3027, 3028, 3029,
3030, 3031, 3032, 3033, 3034, 3035, 3036, 3037, 3038, 3039, 3040,
3041, 3042, 3043, 3044, 3045, 3046, 3047, 3048, 3049, 3050, 3051,
3052, 3053, 3054, 3055, 3056, 3057, 3058, 3059, 3060, 3061, 3062,
3063, 3064, 3065, 546, 547, 548, 549, 550, 551, 552, 553,
554, 555, 556, 557, 558, 559, 560, 561, 562, 563, 564,
565, 566, 567, 568, 569, 570, 571, 572, 573, 574, 575,
576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 586,
587, 3066, 3067, 3068, 3069, 3070, 3071, 3072, 3073, 3074, 3075,
3076, 3077, 3078, 3079, 3080, 3081, 3082, 3083, 3084, 3085, 3086,
3087, 3088, 3089, 3090, 3091, 3092, 3093, 3094, 3095, 3096, 3097,
3098, 3099, 3100, 3101, 3102, 3103, 3104, 3105, 3106, 3107, 588,
589, 590, 591, 592, 593, 594, 595, 596, 597, 598, 599,
600, 601, 602, 603, 604, 605, 606, 607, 608, 609, 610,
611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621,
622, 623, 624, 625, 626, 627, 628, 629, 3108, 3109, 3110,
3111, 3112, 3113, 3114, 3115, 3116, 3117, 3118, 3119, 3120, 3121,
3122, 3123, 3124, 3125, 3126, 3127, 3128, 3129, 3130, 3131, 3132,
3133, 3134, 3135, 3136, 3137, 3138, 3139, 3140, 3141, 3142, 3143,
3144, 3145, 3146, 3147, 3148, 3149, 630, 631, 632, 633, 634,
635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645,
646, 647, 648, 649, 650, 651, 652, 653, 654, 655, 656,
657, 658, 659, 660, 661, 662, 663, 664, 665, 666, 667,
668, 669, 670, 671, 3150, 3151, 3152, 3153, 3154, 3155, 3156,
3157, 3158, 3159, 3160, 3161, 3162, 3163, 3164, 3165, 3166, 3167,
3168, 3169, 3170, 3171, 3172, 3173, 3174, 3175, 3176, 3177, 3178,
3179, 3180, 3181, 3182, 3183, 3184, 3185, 3186, 3187, 3188, 3189,
3190, 3191, 672, 673, 674, 675, 676, 677, 678, 679, 680,
681, 682, 683, 684, 685, 686, 687, 688, 689, 690, 691,
692, 693, 694, 695, 696, 697, 698, 699, 700, 701, 702,
703, 704, 705, 706, 707, 708, 709, 710, 711, 712, 713,
3192, 3193, 3194, 3195, 3196, 3197, 3198, 3199, 3200, 3201, 3202,
3203, 3204, 3205, 3206, 3207, 3208, 3209, 3210, 3211, 3212, 3213,
3214, 3215, 3216, 3217, 3218, 3219, 3220, 3221, 3222, 3223, 3224,
3225, 3226, 3227, 3228, 3229, 3230, 3231, 3232, 3233, 714, 715,
716, 717, 718, 719, 720, 721, 722, 723, 724, 725, 726,
727, 728, 729, 730, 731, 732, 733, 734, 735, 736, 737,
738, 739, 740, 741, 742, 743, 744, 745, 746, 747, 748,
749, 750, 751, 752, 753, 754, 755, 3234, 3235, 3236, 3237,
3238, 3239, 3240, 3241, 3242, 3243, 3244, 3245, 3246, 3247, 3248,
3249, 3250, 3251, 3252, 3253, 3254, 3255, 3256, 3257, 3258, 3259,
3260, 3261, 3262, 3263, 3264, 3265, 3266, 3267, 3268, 3269, 3270,
3271, 3272, 3273, 3274, 3275, 756, 757, 758, 759, 760, 761,
762, 763, 764, 765, 766, 767, 768, 769, 770, 771, 772,
773, 774, 775, 776, 777, 778, 779, 780, 781, 782, 783,
784, 785, 786, 787, 788, 789, 790, 791, 792, 793, 794,
795, 796, 797, 3276, 3277, 3278, 3279, 3280, 3281, 3282, 3283,
3284, 3285, 3286, 3287, 3288, 3289, 3290, 3291, 3292, 3293, 3294,
3295, 3296, 3297, 3298, 3299, 3300, 3301, 3302, 3303, 3304, 3305,
3306, 3307, 3308, 3309, 3310, 3311, 3312, 3313, 3314, 3315, 3316,
3317, 798, 799, 800, 801, 802, 803, 804, 805, 806, 807,
808, 809, 810, 811, 812, 813, 814, 815, 816, 817, 818,
819, 820, 821, 822, 823, 824, 825, 826, 827, 828, 829,
830, 831, 832, 833, 834, 835, 836, 837, 838, 839, 3318,
3319, 3320, 3321, 3322, 3323, 3324, 3325, 3326, 3327, 3328, 3329,
3330, 3331, 3332, 3333, 3334, 3335, 3336, 3337, 3338, 3339, 3340,
3341, 3342, 3343, 3344, 3345, 3346, 3347, 3348, 3349, 3350, 3351,
3352, 3353, 3354, 3355, 3356, 3357, 3358, 3359, 840, 841, 842,
843, 844, 845, 846, 847, 848, 849, 850, 851, 852, 853,
854, 855, 856, 857, 858, 859, 860, 861, 862, 863, 864,
865, 866, 867, 868, 869, 870, 871, 872, 873, 874, 875,
876, 877, 878, 879, 880, 881, 3360, 3361, 3362, 3363, 3364,
3365, 3366, 3367, 3368, 3369, 3370, 3371, 3372, 3373, 3374, 3375,
3376, 3377, 3378, 3379, 3380, 3381, 3382, 3383, 3384, 3385, 3386,
3387, 3388, 3389, 3390, 3391, 3392, 3393, 3394, 3395, 3396, 3397,
3398, 3399, 3400, 3401, 882, 883, 884, 885, 886, 887, 888,
889, 890, 891, 892, 893, 894, 895, 896, 897, 898, 899,
900, 901, 902, 903, 904, 905, 906, 907, 908, 909, 910,
911, 912, 913, 914, 915, 916, 917, 918, 919, 920, 921,
922, 923, 3402, 3403, 3404, 3405, 3406, 3407, 3408, 3409, 3410,
3411, 3412, 3413, 3414, 3415, 3416, 3417, 3418, 3419, 3420, 3421,
3422, 3423, 3424, 3425, 3426, 3427, 3428, 3429, 3430, 3431, 3432,
3433, 3434, 3435, 3436, 3437, 3438, 3439, 3440, 3441, 3442, 3443,
924, 925, 926, 927, 928, 929, 930, 931, 932, 933, 934,
935, 936, 937, 938, 939, 940, 941, 942, 943, 944, 945,
946, 947, 948, 949, 950, 951, 952, 953, 954, 955, 956,
957, 958, 959, 960, 961, 962, 963, 964, 965, 3444, 3445,
3446, 3447, 3448, 3449, 3450, 3451, 3452, 3453, 3454, 3455, 3456,
3457, 3458, 3459, 3460, 3461, 3462, 3463, 3464, 3465, 3466, 3467,
3468, 3469, 3470, 3471, 3472, 3473, 3474, 3475, 3476, 3477, 3478,
3479, 3480, 3481, 3482, 3483, 3484, 3485, 966, 967, 968, 969,
970, 971, 972, 973, 974, 975, 976, 977, 978, 979, 980,
981, 982, 983, 984, 985, 986, 987, 988, 989, 990, 991,
992, 993, 994, 995, 996, 997, 998, 999, 1000, 1001, 1002,
1003, 1004, 1005, 1006, 1007, 3486, 3487, 3488, 3489, 3490, 3491,
3492, 3493, 3494, 3495, 3496, 3497, 3498, 3499, 3500, 3501, 3502,
3503, 3504, 3505, 3506, 3507, 3508, 3509, 3510, 3511, 3512, 3513,
3514, 3515, 3516, 3517, 3518, 3519, 3520, 3521, 3522, 3523, 3524,
3525, 3526, 3527, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026,
1027, 1028, 1029, 1030, 1031, 1032, 1033, 1034, 1035, 1036, 1037,
1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048,
1049, 3528, 3529, 3530, 3531, 3532, 3533, 3534, 3535, 3536, 3537,
3538, 3539, 3540, 3541, 3542, 3543, 3544, 3545, 3546, 3547, 3548,
3549, 3550, 3551, 3552, 3553, 3554, 3555, 3556, 3557, 3558, 3559,
3560, 3561, 3562, 3563, 3564, 3565, 3566, 3567, 3568, 3569, 1050,
1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061,
1062, 1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072,
1073, 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, 1082, 1083,
1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, 3570, 3571, 3572,
3573, 3574, 3575, 3576, 3577, 3578, 3579, 3580, 3581, 3582, 3583,
3584, 3585, 3586, 3587, 3588, 3589, 3590, 3591, 3592, 3593, 3594,
3595, 3596, 3597, 3598, 3599, 3600, 3601, 3602, 3603, 3604, 3605,
3606, 3607, 3608, 3609, 3610, 3611, 1092, 1093, 1094, 1095, 1096,
1097, 1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105, 1106, 1107,
1108, 1109, 1110, 1111, 1112, 1113, 1114, 1115, 1116, 1117, 1118,
1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129,
1130, 1131, 1132, 1133, 3612, 3613, 3614, 3615, 3616, 3617, 3618,
3619, 3620, 3621, 3622, 3623, 3624, 3625, 3626, 3627, 3628, 3629,
3630, 3631, 3632, 3633, 3634, 3635, 3636, 3637, 3638, 3639, 3640,
3641, 3642, 3643, 3644, 3645, 3646, 3647, 3648, 3649, 3650, 3651,
3652, 3653, 1134, 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142,
1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150, 1151, 1152, 1153,
1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161, 1162, 1163, 1164,
1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175,
3654, 3655, 3656, 3657, 3658, 3659, 3660, 3661, 3662, 3663, 3664,
3665, 3666, 3667, 3668, 3669, 3670, 3671, 3672, 3673, 3674, 3675,
3676, 3677, 3678, 3679, 3680, 3681, 3682, 3683, 3684, 3685, 3686,
3687, 3688, 3689, 3690, 3691, 3692, 3693, 3694, 3695, 1176, 1177,
1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188,
1189, 1190, 1191, 1192, 1193, 1194, 1195, 1196, 1197, 1198, 1199,
1200, 1201, 1202, 1203, 1204, 1205, 1206, 1207, 1208, 1209, 1210,
1211, 1212, 1213, 1214, 1215, 1216, 1217, 3696, 3697, 3698, 3699,
3700, 3701, 3702, 3703, 3704, 3705, 3706, 3707, 3708, 3709, 3710,
3711, 3712, 3713, 3714, 3715, 3716, 3717, 3718, 3719, 3720, 3721,
3722, 3723, 3724, 3725, 3726, 3727, 3728, 3729, 3730, 3731, 3732,
3733, 3734, 3735, 3736, 3737, 1218, 1219, 1220, 1221, 1222, 1223,
1224, 1225, 1226, 1227, 1228, 1229, 1230, 1231, 1232, 1233, 1234,
1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1245,
1246, 1247, 1248, 1249, 1250, 1251, 1252, 1253, 1254, 1255, 1256,
1257, 1258, 1259, 3738, 3739, 3740, 3741, 3742, 3743, 3744, 3745,
3746, 3747, 3748, 3749, 3750, 3751, 3752, 3753, 3754, 3755, 3756,
3757, 3758, 3759, 3760, 3761, 3762, 3763, 3764, 3765, 3766, 3767,
3768, 3769, 3770, 3771, 3772, 3773, 3774, 3775, 3776, 3777, 3778,
3779, 1260, 1261, 1262, 1263, 1264, 1265, 1266, 1267, 1268, 1269,
1270, 1271, 1272, 1273, 1274, 1275, 1276, 1277, 1278, 1279, 1280,
1281, 1282, 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1290, 1291,
1292, 1293, 1294, 1295, 1296, 1297, 1298, 1299, 1300, 1301, 3780,
3781, 3782, 3783, 3784, 3785, 3786, 3787, 3788, 3789, 3790, 3791,
3792, 3793, 3794, 3795, 3796, 3797, 3798, 3799, 3800, 3801, 3802,
3803, 3804, 3805, 3806, 3807, 3808, 3809, 3810, 3811, 3812, 3813,
3814, 3815, 3816, 3817, 3818, 3819, 3820, 3821, 1302, 1303, 1304,
1305, 1306, 1307, 1308, 1309, 1310, 1311, 1312, 1313, 1314, 1315,
1316, 1317, 1318, 1319, 1320, 1321, 1322, 1323, 1324, 1325, 1326,
1327, 1328, 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, 1337,
1338, 1339, 1340, 1341, 1342, 1343, 3822, 3823, 3824, 3825, 3826,
3827, 3828, 3829, 3830, 3831, 3832, 3833, 3834, 3835, 3836, 3837,
3838, 3839, 3840, 3841, 3842, 3843, 3844, 3845, 3846, 3847, 3848,
3849, 3850, 3851, 3852, 3853, 3854, 3855, 3856, 3857, 3858, 3859,
3860, 3861, 3862, 3863, 1344, 1345, 1346, 1347, 1348, 1349, 1350,
1351, 1352, 1353, 1354, 1355, 1356, 1357, 1358, 1359, 1360, 1361,
1362, 1363, 1364, 1365, 1366, 1367, 1368, 1369, 1370, 1371, 1372,
1373, 1374, 1375, 1376, 1377, 1378, 1379, 1380, 1381, 1382, 1383,
1384, 1385, 3864, 3865, 3866, 3867, 3868, 3869, 3870, 3871, 3872,
3873, 3874, 3875, 3876, 3877, 3878, 3879, 3880, 3881, 3882, 3883,
3884, 3885, 3886, 3887, 3888, 3889, 3890, 3891, 3892, 3893, 3894,
3895, 3896, 3897, 3898, 3899, 3900, 3901, 3902, 3903, 3904, 3905,
1386, 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, 1396,
1397, 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405, 1406, 1407,
1408, 1409, 1410, 1411, 1412, 1413, 1414, 1415, 1416, 1417, 1418,
1419, 1420, 1421, 1422, 1423, 1424, 1425, 1426, 1427, 3906, 3907,
3908, 3909, 3910, 3911, 3912, 3913, 3914, 3915, 3916, 3917, 3918,
3919, 3920, 3921, 3922, 3923, 3924, 3925, 3926, 3927, 3928, 3929,
3930, 3931, 3932, 3933, 3934, 3935, 3936, 3937, 3938, 3939, 3940,
3941, 3942, 3943, 3944, 3945, 3946, 3947, 1428, 1429, 1430, 1431,
1432, 1433, 1434, 1435, 1436, 1437, 1438, 1439, 1440, 1441, 1442,
1443, 1444, 1445, 1446, 1447, 1448, 1449, 1450, 1451, 1452, 1453,
1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, 1462, 1463, 1464,
1465, 1466, 1467, 1468, 1469, 3948, 3949, 3950, 3951, 3952, 3953,
3954, 3955, 3956, 3957, 3958, 3959, 3960, 3961, 3962, 3963, 3964,
3965, 3966, 3967, 3968, 3969, 3970, 3971, 3972, 3973, 3974, 3975,
3976, 3977, 3978, 3979, 3980, 3981, 3982, 3983, 3984, 3985, 3986,
3987, 3988, 3989, 1470, 1471, 1472, 1473, 1474, 1475, 1476, 1477,
1478, 1479, 1480, 1481, 1482, 1483, 1484, 1485, 1486, 1487, 1488,
1489, 1490, 1491, 1492, 1493, 1494, 1495, 1496, 1497, 1498, 1499,
1500, 1501, 1502, 1503, 1504, 1505, 1506, 1507, 1508, 1509, 1510,
1511, 3990, 3991, 3992, 3993, 3994, 3995, 3996, 3997, 3998, 3999,
4000, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008, 4009, 4010,
4011, 4012, 4013, 4014, 4015, 4016, 4017, 4018, 4019, 4020, 4021,
4022, 4023, 4024, 4025, 4026, 4027, 4028, 4029, 4030, 4031, 1512,
1513, 1514, 1515, 1516, 1517, 1518, 1519, 1520, 1521, 1522, 1523,
1524, 1525, 1526, 1527, 1528, 1529, 1530, 1531, 1532, 1533, 1534,
1535, 1536, 1537, 1538, 1539, 1540, 1541, 1542, 1543, 1544, 1545,
1546, 1547, 1548, 1549, 1550, 1551, 1552, 1553, 4032, 4033, 4034,
4035, 4036, 4037, 4038, 4039, 4040, 4041, 4042, 4043, 4044, 4045,
4046, 4047, 4048, 4049, 4050, 4051, 4052, 4053, 4054, 4055, 4056,
4057, 4058, 4059, 4060, 4061, 4062, 4063, 4064, 4065, 4066, 4067,
4068, 4069, 4070, 4071, 4072, 4073, 1554, 1555, 1556, 1557, 1558,
1559, 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, 1568, 1569,
1570, 1571, 1572, 1573, 1574, 1575, 1576, 1577, 1578, 1579, 1580,
1581, 1582, 1583, 1584, 1585, 1586, 1587, 1588, 1589, 1590, 1591,
1592, 1593, 1594, 1595, 4074, 4075, 4076, 4077, 4078, 4079, 4080,
4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091,
4092, 4093, 4094, 4095, 4096, 4097, 4098, 4099, 4100, 4101, 4102,
4103, 4104, 4105, 4106, 4107, 4108, 4109, 4110, 4111, 4112, 4113,
4114, 4115, 1596, 1597, 1598, 1599, 1600, 1601, 1602, 1603, 1604,
1605, 1606, 1607, 1608, 1609, 1610, 1611, 1612, 1613, 1614, 1615,
1616, 1617, 1618, 1619, 1620, 1621, 1622, 1623, 1624, 1625, 1626,
1627, 1628, 1629, 1630, 1631, 1632, 1633, 1634, 1635, 1636, 1637,
4116, 4117, 4118, 4119, 4120, 4121, 4122, 4123, 4124, 4125, 4126,
4127, 4128, 4129, 4130, 4131, 4132, 4133, 4134, 4135, 4136, 4137,
4138, 4139, 4140, 4141, 4142, 4143, 4144, 4145, 4146, 4147, 4148,
4149, 4150, 4151, 4152, 4153, 4154, 4155, 4156, 4157, 1638, 1639,
1640, 1641, 1642, 1643, 1644, 1645, 1646, 1647, 1648, 1649, 1650,
1651, 1652, 1653, 1654, 1655, 1656, 1657, 1658, 1659, 1660, 1661,
1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, 1670, 1671, 1672,
1673, 1674, 1675, 1676, 1677, 1678, 1679, 4158, 4159, 4160, 4161,
4162, 4163, 4164, 4165, 4166, 4167, 4168, 4169, 4170, 4171, 4172,
4173, 4174, 4175, 4176, 4177, 4178, 4179, 4180, 4181, 4182, 4183,
4184, 4185, 4186, 4187, 4188, 4189, 4190, 4191, 4192, 4193, 4194,
4195, 4196, 4197, 4198, 4199, 1680, 1681, 1682, 1683, 1684, 1685,
1686, 1687, 1688, 1689, 1690, 1691, 1692, 1693, 1694, 1695, 1696,
1697, 1698, 1699, 1700, 1701, 1702, 1703, 1704, 1705, 1706, 1707,
1708, 1709, 1710, 1711, 1712, 1713, 1714, 1715, 1716, 1717, 1718,
1719, 1720, 1721, 4200, 4201, 4202, 4203, 4204, 4205, 4206, 4207,
4208, 4209, 4210, 4211, 4212, 4213, 4214, 4215, 4216, 4217, 4218,
4219, 4220, 4221, 4222, 4223, 4224, 4225, 4226, 4227, 4228, 4229,
4230, 4231, 4232, 4233, 4234, 4235, 4236, 4237, 4238, 4239, 4240,
4241, 1722, 1723, 1724, 1725, 1726, 1727, 1728, 1729, 1730, 1731,
1732, 1733, 1734, 1735, 1736, 1737, 1738, 1739, 1740, 1741, 1742,
1743, 1744, 1745, 1746, 1747, 1748, 1749, 1750, 1751, 1752, 1753,
1754, 1755, 1756, 1757, 1758, 1759, 1760, 1761, 1762, 1763, 4242,
4243, 4244, 4245, 4246, 4247, 4248, 4249, 4250, 4251, 4252, 4253,
4254, 4255, 4256, 4257, 4258, 4259, 4260, 4261, 4262, 4263, 4264,
4265, 4266, 4267, 4268, 4269, 4270, 4271, 4272, 4273, 4274, 4275,
4276, 4277, 4278, 4279, 4280, 4281, 4282, 4283, 1764, 1765, 1766,
1767, 1768, 1769, 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777,
1778, 1779, 1780, 1781, 1782, 1783, 1784, 1785, 1786, 1787, 1788,
1789, 1790, 1791, 1792, 1793, 1794, 1795, 1796, 1797, 1798, 1799,
1800, 1801, 1802, 1803, 1804, 1805, 4284, 4285, 4286, 4287, 4288,
4289, 4290, 4291, 4292, 4293, 4294, 4295, 4296, 4297, 4298, 4299,
4300, 4301, 4302, 4303, 4304, 4305, 4306, 4307, 4308, 4309, 4310,
4311, 4312, 4313, 4314, 4315, 4316, 4317, 4318, 4319, 4320, 4321,
4322, 4323, 4324, 4325, 1806, 1807, 1808, 1809, 1810, 1811, 1812,
1813, 1814, 1815, 1816, 1817, 1818, 1819, 1820, 1821, 1822, 1823,
1824, 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832, 1833, 1834,
1835, 1836, 1837, 1838, 1839, 1840, 1841, 1842, 1843, 1844, 1845,
1846, 1847, 4326, 4327, 4328, 4329, 4330, 4331, 4332, 4333, 4334,
4335, 4336, 4337, 4338, 4339, 4340, 4341, 4342, 4343, 4344, 4345,
4346, 4347, 4348, 4349, 4350, 4351, 4352, 4353, 4354, 4355, 4356,
4357, 4358, 4359, 4360, 4361, 4362, 4363, 4364, 4365, 4366, 4367,
1848, 1849, 1850, 1851, 1852, 1853, 1854, 1855, 1856, 1857, 1858,
1859, 1860, 1861, 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869,
1870, 1871, 1872, 1873, 1874, 1875, 1876, 1877, 1878, 1879, 1880,
1881, 1882, 1883, 1884, 1885, 1886, 1887, 1888, 1889, 4368, 4369,
4370, 4371, 4372, 4373, 4374, 4375, 4376, 4377, 4378, 4379, 4380,
4381, 4382, 4383, 4384, 4385, 4386, 4387, 4388, 4389, 4390, 4391,
4392, 4393, 4394, 4395, 4396, 4397, 4398, 4399, 4400, 4401, 4402,
4403, 4404, 4405, 4406, 4407, 4408, 4409, 1890, 1891, 1892, 1893,
1894, 1895, 1896, 1897, 1898, 1899, 1900, 1901, 1902, 1903, 1904,
1905, 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, 1914, 1915,
1916, 1917, 1918, 1919, 1920, 1921, 1922, 1923, 1924, 1925, 1926,
1927, 1928, 1929, 1930, 1931, 4410, 4411, 4412, 4413, 4414, 4415,
4416, 4417, 4418, 4419, 4420, 4421, 4422, 4423, 4424, 4425, 4426,
4427, 4428, 4429, 4430, 4431, 4432, 4433, 4434, 4435, 4436, 4437,
4438, 4439, 4440, 4441, 4442, 4443, 4444, 4445, 4446, 4447, 4448,
4449, 4450, 4451, 1932, 1933, 1934, 1935, 1936, 1937, 1938, 1939,
1940, 1941, 1942, 1943, 1944, 1945, 1946, 1947, 1948, 1949, 1950,
1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 1960, 1961,
1962, 1963, 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, 1972,
1973, 4452, 4453, 4454, 4455, 4456, 4457, 4458, 4459, 4460, 4461,
4462, 4463, 4464, 4465, 4466, 4467, 4468, 4469, 4470, 4471, 4472,
4473, 4474, 4475, 4476, 4477, 4478, 4479, 4480, 4481, 4482, 4483,
4484, 4485, 4486, 4487, 4488, 4489, 4490, 4491, 4492, 4493, 1974,
1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1983, 1984, 1985,
1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996,
1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 4494, 4495, 4496,
4497, 4498, 4499, 4500, 4501, 4502, 4503, 4504, 4505, 4506, 4507,
4508, 4509, 4510, 4511, 4512, 4513, 4514, 4515, 4516, 4517, 4518,
4519, 4520, 4521, 4522, 4523, 4524, 4525, 4526, 4527, 4528, 4529,
4530, 4531, 4532, 4533, 4534, 4535, 2016, 2017, 2018, 2019, 2020,
2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, 2030, 2031,
2032, 2033, 2034, 2035, 2036, 2037, 2038, 2039, 2040, 2041, 2042,
2043, 2044, 2045, 2046, 2047, 2048, 2049, 2050, 2051, 2052, 2053,
2054, 2055, 2056, 2057, 4536, 4537, 4538, 4539, 4540, 4541, 4542,
4543, 4544, 4545, 4546, 4547, 4548, 4549, 4550, 4551, 4552, 4553,
4554, 4555, 4556, 4557, 4558, 4559, 4560, 4561, 4562, 4563, 4564,
4565, 4566, 4567, 4568, 4569, 4570, 4571, 4572, 4573, 4574, 4575,
4576, 4577, 2058, 2059, 2060, 2061, 2062, 2063, 2064, 2065, 2066,
2067, 2068, 2069, 2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077,
2078, 2079, 2080, 2081, 2082, 2083, 2084, 2085, 2086, 2087, 2088,
2089, 2090, 2091, 2092, 2093, 2094, 2095, 2096, 2097, 2098, 2099,
4578, 4579, 4580, 4581, 4582, 4583, 4584, 4585, 4586, 4587, 4588,
4589, 4590, 4591, 4592, 4593, 4594, 4595, 4596, 4597, 4598, 4599,
4600, 4601, 4602, 4603, 4604, 4605, 4606, 4607, 4608, 4609, 4610,
4611, 4612, 4613, 4614, 4615, 4616, 4617, 4618, 4619, 2100, 2101,
2102, 2103, 2104, 2105, 2106, 2107, 2108, 2109, 2110, 2111, 2112,
2113, 2114, 2115, 2116, 2117, 2118, 2119, 2120, 2121, 2122, 2123,
2124, 2125, 2126, 2127, 2128, 2129, 2130, 2131, 2132, 2133, 2134,
2135, 2136, 2137, 2138, 2139, 2140, 2141, 4620, 4621, 4622, 4623,
4624, 4625, 4626, 4627, 4628, 4629, 4630, 4631, 4632, 4633, 4634,
4635, 4636, 4637, 4638, 4639, 4640, 4641, 4642, 4643, 4644, 4645,
4646, 4647, 4648, 4649, 4650, 4651, 4652, 4653, 4654, 4655, 4656,
4657, 4658, 4659, 4660, 4661, 2142, 2143, 2144, 2145, 2146, 2147,
2148, 2149, 2150, 2151, 2152, 2153, 2154, 2155, 2156, 2157, 2158,
2159, 2160, 2161, 2162, 2163, 2164, 2165, 2166, 2167, 2168, 2169,
2170, 2171, 2172, 2173, 2174, 2175, 2176, 2177, 2178, 2179, 2180,
2181, 2182, 2183, 4662, 4663, 4664, 4665, 4666, 4667, 4668, 4669,
4670, 4671, 4672, 4673, 4674, 4675, 4676, 4677, 4678, 4679, 4680,
4681, 4682, 4683, 4684, 4685, 4686, 4687, 4688, 4689, 4690, 4691,
4692, 4693, 4694, 4695, 4696, 4697, 4698, 4699, 4700, 4701, 4702,
4703, 2184, 2185, 2186, 2187, 2188, 2189, 2190, 2191, 2192, 2193,
2194, 2195, 2196, 2197, 2198, 2199, 2200, 2201, 2202, 2203, 2204,
2205, 2206, 2207, 2208, 2209, 2210, 2211, 2212, 2213, 2214, 2215,
2216, 2217, 2218, 2219, 2220, 2221, 2222, 2223, 2224, 2225, 4704,
4705, 4706, 4707, 4708, 4709, 4710, 4711, 4712, 4713, 4714, 4715,
4716, 4717, 4718, 4719, 4720, 4721, 4722, 4723, 4724, 4725, 4726,
4727, 4728, 4729, 4730, 4731, 4732, 4733, 4734, 4735, 4736, 4737,
4738, 4739, 4740, 4741, 4742, 4743, 4744, 4745, 2226, 2227, 2228,
2229, 2230, 2231, 2232, 2233, 2234, 2235, 2236, 2237, 2238, 2239,
2240, 2241, 2242, 2243, 2244, 2245, 2246, 2247, 2248, 2249, 2250,
2251, 2252, 2253, 2254, 2255, 2256, 2257, 2258, 2259, 2260, 2261,
2262, 2263, 2264, 2265, 2266, 2267, 4746, 4747, 4748, 4749, 4750,
4751, 4752, 4753, 4754, 4755, 4756, 4757, 4758, 4759, 4760, 4761,
4762, 4763, 4764, 4765, 4766, 4767, 4768, 4769, 4770, 4771, 4772,
4773, 4774, 4775, 4776, 4777, 4778, 4779, 4780, 4781, 4782, 4783,
4784, 4785, 4786, 4787, 2268, 2269, 2270, 2271, 2272, 2273, 2274,
2275, 2276, 2277, 2278, 2279, 2280, 2281, 2282, 2283, 2284, 2285,
2286, 2287, 2288, 2289, 2290, 2291, 2292, 2293, 2294, 2295, 2296,
2297, 2298, 2299, 2300, 2301, 2302, 2303, 2304, 2305, 2306, 2307,
2308, 2309, 4788, 4789, 4790, 4791, 4792, 4793, 4794, 4795, 4796,
4797, 4798, 4799, 4800, 4801, 4802, 4803, 4804, 4805, 4806, 4807,
4808, 4809, 4810, 4811, 4812, 4813, 4814, 4815, 4816, 4817, 4818,
4819, 4820, 4821, 4822, 4823, 4824, 4825, 4826, 4827, 4828, 4829,
2310, 2311, 2312, 2313, 2314, 2315, 2316, 2317, 2318, 2319, 2320,
2321, 2322, 2323, 2324, 2325, 2326, 2327, 2328, 2329, 2330, 2331,
2332, 2333, 2334, 2335, 2336, 2337, 2338, 2339, 2340, 2341, 2342,
2343, 2344, 2345, 2346, 2347, 2348, 2349, 2350, 2351, 4830, 4831,
4832, 4833, 4834, 4835, 4836, 4837, 4838, 4839, 4840, 4841, 4842,
4843, 4844, 4845, 4846, 4847, 4848, 4849, 4850, 4851, 4852, 4853,
4854, 4855, 4856, 4857, 4858, 4859, 4860, 4861, 4862, 4863, 4864,
4865, 4866, 4867, 4868, 4869, 4870, 4871, 2352, 2353, 2354, 2355,
2356, 2357, 2358, 2359, 2360, 2361, 2362, 2363, 2364, 2365, 2366,
2367, 2368, 2369, 2370, 2371, 2372, 2373, 2374, 2375, 2376, 2377,
2378, 2379, 2380, 2381, 2382, 2383, 2384, 2385, 2386, 2387, 2388,
2389, 2390, 2391, 2392, 2393, 4872, 4873, 4874, 4875, 4876, 4877,
4878, 4879, 4880, 4881, 4882, 4883, 4884, 4885, 4886, 4887, 4888,
4889, 4890, 4891, 4892, 4893, 4894, 4895, 4896, 4897, 4898, 4899,
4900, 4901, 4902, 4903, 4904, 4905, 4906, 4907, 4908, 4909, 4910,
4911, 4912, 4913, 2394, 2395, 2396, 2397, 2398, 2399, 2400, 2401,
2402, 2403, 2404, 2405, 2406, 2407, 2408, 2409, 2410, 2411, 2412,
2413, 2414, 2415, 2416, 2417, 2418, 2419, 2420, 2421, 2422, 2423,
2424, 2425, 2426, 2427, 2428, 2429, 2430, 2431, 2432, 2433, 2434,
2435, 4914, 4915, 4916, 4917, 4918, 4919, 4920, 4921, 4922, 4923,
4924, 4925, 4926, 4927, 4928, 4929, 4930, 4931, 4932, 4933, 4934,
4935, 4936, 4937, 4938, 4939, 4940, 4941, 4942, 4943, 4944, 4945,
4946, 4947, 4948, 4949, 4950, 4951, 4952, 4953, 4954, 4955, 2436,
2437, 2438, 2439, 2440, 2441, 2442, 2443, 2444, 2445, 2446, 2447,
2448, 2449, 2450, 2451, 2452, 2453, 2454, 2455, 2456, 2457, 2458,
2459, 2460, 2461, 2462, 2463, 2464, 2465, 2466, 2467, 2468, 2469,
2470, 2471, 2472, 2473, 2474, 2475, 2476, 2477, 4956, 4957, 4958,
4959, 4960, 4961, 4962, 4963, 4964, 4965, 4966, 4967, 4968, 4969,
4970, 4971, 4972, 4973, 4974, 4975, 4976, 4977, 4978, 4979, 4980,
4981, 4982, 4983, 4984, 4985, 4986, 4987, 4988, 4989, 4990, 4991,
4992, 4993, 4994, 4995, 4996, 4997, 2478, 2479, 2480, 2481, 2482,
2483, 2484, 2485, 2486, 2487, 2488, 2489, 2490, 2491, 2492, 2493,
2494, 2495, 2496, 2497, 2498, 2499, 2500, 2501, 2502, 2503, 2504,
2505, 2506, 2507, 2508, 2509, 2510, 2511, 2512, 2513, 2514, 2515,
2516, 2517, 2518, 2519, 4998, 4999, 5000, 5001, 5002, 5003, 5004,
5005, 5006, 5007, 5008, 5009, 5010, 5011, 5012, 5013, 5014, 5015,
5016, 5017, 5018, 5019, 5020, 5021, 5022, 5023, 5024, 5025, 5026,
5027, 5028, 5029, 5030, 5031, 5032, 5033, 5034, 5035, 5036, 5037,
5038, 5039}));
}
TEST(TransposeTest, TestRefOps6D5) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5, 6, 7},
{1, 2, 3, 4, 0, 5}),
ElementsAreArray(
{0, 1, 2, 3, 4, 5, 6, 2520, 2521, 2522, 2523,
2524, 2525, 2526, 7, 8, 9, 10, 11, 12, 13, 2527,
2528, 2529, 2530, 2531, 2532, 2533, 14, 15, 16, 17, 18,
19, 20, 2534, 2535, 2536, 2537, 2538, 2539, 2540, 21, 22,
23, 24, 25, 26, 27, 2541, 2542, 2543, 2544, 2545, 2546,
2547, 28, 29, 30, 31, 32, 33, 34, 2548, 2549, 2550,
2551, 2552, 2553, 2554, 35, 36, 37, 38, 39, 40, 41,
2555, 2556, 2557, 2558, 2559, 2560, 2561, 42, 43, 44, 45,
46, 47, 48, 2562, 2563, 2564, 2565, 2566, 2567, 2568, 49,
50, 51, 52, 53, 54, 55, 2569, 2570, 2571, 2572, 2573,
2574, 2575, 56, 57, 58, 59, 60, 61, 62, 2576, 2577,
2578, 2579, 2580, 2581, 2582, 63, 64, 65, 66, 67, 68,
69, 2583, 2584, 2585, 2586, 2587, 2588, 2589, 70, 71, 72,
73, 74, 75, 76, 2590, 2591, 2592, 2593, 2594, 2595, 2596,
77, 78, 79, 80, 81, 82, 83, 2597, 2598, 2599, 2600,
2601, 2602, 2603, 84, 85, 86, 87, 88, 89, 90, 2604,
2605, 2606, 2607, 2608, 2609, 2610, 91, 92, 93, 94, 95,
96, 97, 2611, 2612, 2613, 2614, 2615, 2616, 2617, 98, 99,
100, 101, 102, 103, 104, 2618, 2619, 2620, 2621, 2622, 2623,
2624, 105, 106, 107, 108, 109, 110, 111, 2625, 2626, 2627,
2628, 2629, 2630, 2631, 112, 113, 114, 115, 116, 117, 118,
2632, 2633, 2634, 2635, 2636, 2637, 2638, 119, 120, 121, 122,
123, 124, 125, 2639, 2640, 2641, 2642, 2643, 2644, 2645, 126,
127, 128, 129, 130, 131, 132, 2646, 2647, 2648, 2649, 2650,
2651, 2652, 133, 134, 135, 136, 137, 138, 139, 2653, 2654,
2655, 2656, 2657, 2658, 2659, 140, 141, 142, 143, 144, 145,
146, 2660, 2661, 2662, 2663, 2664, 2665, 2666, 147, 148, 149,
150, 151, 152, 153, 2667, 2668, 2669, 2670, 2671, 2672, 2673,
154, 155, 156, 157, 158, 159, 160, 2674, 2675, 2676, 2677,
2678, 2679, 2680, 161, 162, 163, 164, 165, 166, 167, 2681,
2682, 2683, 2684, 2685, 2686, 2687, 168, 169, 170, 171, 172,
173, 174, 2688, 2689, 2690, 2691, 2692, 2693, 2694, 175, 176,
177, 178, 179, 180, 181, 2695, 2696, 2697, 2698, 2699, 2700,
2701, 182, 183, 184, 185, 186, 187, 188, 2702, 2703, 2704,
2705, 2706, 2707, 2708, 189, 190, 191, 192, 193, 194, 195,
2709, 2710, 2711, 2712, 2713, 2714, 2715, 196, 197, 198, 199,
200, 201, 202, 2716, 2717, 2718, 2719, 2720, 2721, 2722, 203,
204, 205, 206, 207, 208, 209, 2723, 2724, 2725, 2726, 2727,
2728, 2729, 210, 211, 212, 213, 214, 215, 216, 2730, 2731,
2732, 2733, 2734, 2735, 2736, 217, 218, 219, 220, 221, 222,
223, 2737, 2738, 2739, 2740, 2741, 2742, 2743, 224, 225, 226,
227, 228, 229, 230, 2744, 2745, 2746, 2747, 2748, 2749, 2750,
231, 232, 233, 234, 235, 236, 237, 2751, 2752, 2753, 2754,
2755, 2756, 2757, 238, 239, 240, 241, 242, 243, 244, 2758,
2759, 2760, 2761, 2762, 2763, 2764, 245, 246, 247, 248, 249,
250, 251, 2765, 2766, 2767, 2768, 2769, 2770, 2771, 252, 253,
254, 255, 256, 257, 258, 2772, 2773, 2774, 2775, 2776, 2777,
2778, 259, 260, 261, 262, 263, 264, 265, 2779, 2780, 2781,
2782, 2783, 2784, 2785, 266, 267, 268, 269, 270, 271, 272,
2786, 2787, 2788, 2789, 2790, 2791, 2792, 273, 274, 275, 276,
277, 278, 279, 2793, 2794, 2795, 2796, 2797, 2798, 2799, 280,
281, 282, 283, 284, 285, 286, 2800, 2801, 2802, 2803, 2804,
2805, 2806, 287, 288, 289, 290, 291, 292, 293, 2807, 2808,
2809, 2810, 2811, 2812, 2813, 294, 295, 296, 297, 298, 299,
300, 2814, 2815, 2816, 2817, 2818, 2819, 2820, 301, 302, 303,
304, 305, 306, 307, 2821, 2822, 2823, 2824, 2825, 2826, 2827,
308, 309, 310, 311, 312, 313, 314, 2828, 2829, 2830, 2831,
2832, 2833, 2834, 315, 316, 317, 318, 319, 320, 321, 2835,
2836, 2837, 2838, 2839, 2840, 2841, 322, 323, 324, 325, 326,
327, 328, 2842, 2843, 2844, 2845, 2846, 2847, 2848, 329, 330,
331, 332, 333, 334, 335, 2849, 2850, 2851, 2852, 2853, 2854,
2855, 336, 337, 338, 339, 340, 341, 342, 2856, 2857, 2858,
2859, 2860, 2861, 2862, 343, 344, 345, 346, 347, 348, 349,
2863, 2864, 2865, 2866, 2867, 2868, 2869, 350, 351, 352, 353,
354, 355, 356, 2870, 2871, 2872, 2873, 2874, 2875, 2876, 357,
358, 359, 360, 361, 362, 363, 2877, 2878, 2879, 2880, 2881,
2882, 2883, 364, 365, 366, 367, 368, 369, 370, 2884, 2885,
2886, 2887, 2888, 2889, 2890, 371, 372, 373, 374, 375, 376,
377, 2891, 2892, 2893, 2894, 2895, 2896, 2897, 378, 379, 380,
381, 382, 383, 384, 2898, 2899, 2900, 2901, 2902, 2903, 2904,
385, 386, 387, 388, 389, 390, 391, 2905, 2906, 2907, 2908,
2909, 2910, 2911, 392, 393, 394, 395, 396, 397, 398, 2912,
2913, 2914, 2915, 2916, 2917, 2918, 399, 400, 401, 402, 403,
404, 405, 2919, 2920, 2921, 2922, 2923, 2924, 2925, 406, 407,
408, 409, 410, 411, 412, 2926, 2927, 2928, 2929, 2930, 2931,
2932, 413, 414, 415, 416, 417, 418, 419, 2933, 2934, 2935,
2936, 2937, 2938, 2939, 420, 421, 422, 423, 424, 425, 426,
2940, 2941, 2942, 2943, 2944, 2945, 2946, 427, 428, 429, 430,
431, 432, 433, 2947, 2948, 2949, 2950, 2951, 2952, 2953, 434,
435, 436, 437, 438, 439, 440, 2954, 2955, 2956, 2957, 2958,
2959, 2960, 441, 442, 443, 444, 445, 446, 447, 2961, 2962,
2963, 2964, 2965, 2966, 2967, 448, 449, 450, 451, 452, 453,
454, 2968, 2969, 2970, 2971, 2972, 2973, 2974, 455, 456, 457,
458, 459, 460, 461, 2975, 2976, 2977, 2978, 2979, 2980, 2981,
462, 463, 464, 465, 466, 467, 468, 2982, 2983, 2984, 2985,
2986, 2987, 2988, 469, 470, 471, 472, 473, 474, 475, 2989,
2990, 2991, 2992, 2993, 2994, 2995, 476, 477, 478, 479, 480,
481, 482, 2996, 2997, 2998, 2999, 3000, 3001, 3002, 483, 484,
485, 486, 487, 488, 489, 3003, 3004, 3005, 3006, 3007, 3008,
3009, 490, 491, 492, 493, 494, 495, 496, 3010, 3011, 3012,
3013, 3014, 3015, 3016, 497, 498, 499, 500, 501, 502, 503,
3017, 3018, 3019, 3020, 3021, 3022, 3023, 504, 505, 506, 507,
508, 509, 510, 3024, 3025, 3026, 3027, 3028, 3029, 3030, 511,
512, 513, 514, 515, 516, 517, 3031, 3032, 3033, 3034, 3035,
3036, 3037, 518, 519, 520, 521, 522, 523, 524, 3038, 3039,
3040, 3041, 3042, 3043, 3044, 525, 526, 527, 528, 529, 530,
531, 3045, 3046, 3047, 3048, 3049, 3050, 3051, 532, 533, 534,
535, 536, 537, 538, 3052, 3053, 3054, 3055, 3056, 3057, 3058,
539, 540, 541, 542, 543, 544, 545, 3059, 3060, 3061, 3062,
3063, 3064, 3065, 546, 547, 548, 549, 550, 551, 552, 3066,
3067, 3068, 3069, 3070, 3071, 3072, 553, 554, 555, 556, 557,
558, 559, 3073, 3074, 3075, 3076, 3077, 3078, 3079, 560, 561,
562, 563, 564, 565, 566, 3080, 3081, 3082, 3083, 3084, 3085,
3086, 567, 568, 569, 570, 571, 572, 573, 3087, 3088, 3089,
3090, 3091, 3092, 3093, 574, 575, 576, 577, 578, 579, 580,
3094, 3095, 3096, 3097, 3098, 3099, 3100, 581, 582, 583, 584,
585, 586, 587, 3101, 3102, 3103, 3104, 3105, 3106, 3107, 588,
589, 590, 591, 592, 593, 594, 3108, 3109, 3110, 3111, 3112,
3113, 3114, 595, 596, 597, 598, 599, 600, 601, 3115, 3116,
3117, 3118, 3119, 3120, 3121, 602, 603, 604, 605, 606, 607,
608, 3122, 3123, 3124, 3125, 3126, 3127, 3128, 609, 610, 611,
612, 613, 614, 615, 3129, 3130, 3131, 3132, 3133, 3134, 3135,
616, 617, 618, 619, 620, 621, 622, 3136, 3137, 3138, 3139,
3140, 3141, 3142, 623, 624, 625, 626, 627, 628, 629, 3143,
3144, 3145, 3146, 3147, 3148, 3149, 630, 631, 632, 633, 634,
635, 636, 3150, 3151, 3152, 3153, 3154, 3155, 3156, 637, 638,
639, 640, 641, 642, 643, 3157, 3158, 3159, 3160, 3161, 3162,
3163, 644, 645, 646, 647, 648, 649, 650, 3164, 3165, 3166,
3167, 3168, 3169, 3170, 651, 652, 653, 654, 655, 656, 657,
3171, 3172, 3173, 3174, 3175, 3176, 3177, 658, 659, 660, 661,
662, 663, 664, 3178, 3179, 3180, 3181, 3182, 3183, 3184, 665,
666, 667, 668, 669, 670, 671, 3185, 3186, 3187, 3188, 3189,
3190, 3191, 672, 673, 674, 675, 676, 677, 678, 3192, 3193,
3194, 3195, 3196, 3197, 3198, 679, 680, 681, 682, 683, 684,
685, 3199, 3200, 3201, 3202, 3203, 3204, 3205, 686, 687, 688,
689, 690, 691, 692, 3206, 3207, 3208, 3209, 3210, 3211, 3212,
693, 694, 695, 696, 697, 698, 699, 3213, 3214, 3215, 3216,
3217, 3218, 3219, 700, 701, 702, 703, 704, 705, 706, 3220,
3221, 3222, 3223, 3224, 3225, 3226, 707, 708, 709, 710, 711,
712, 713, 3227, 3228, 3229, 3230, 3231, 3232, 3233, 714, 715,
716, 717, 718, 719, 720, 3234, 3235, 3236, 3237, 3238, 3239,
3240, 721, 722, 723, 724, 725, 726, 727, 3241, 3242, 3243,
3244, 3245, 3246, 3247, 728, 729, 730, 731, 732, 733, 734,
3248, 3249, 3250, 3251, 3252, 3253, 3254, 735, 736, 737, 738,
739, 740, 741, 3255, 3256, 3257, 3258, 3259, 3260, 3261, 742,
743, 744, 745, 746, 747, 748, 3262, 3263, 3264, 3265, 3266,
3267, 3268, 749, 750, 751, 752, 753, 754, 755, 3269, 3270,
3271, 3272, 3273, 3274, 3275, 756, 757, 758, 759, 760, 761,
762, 3276, 3277, 3278, 3279, 3280, 3281, 3282, 763, 764, 765,
766, 767, 768, 769, 3283, 3284, 3285, 3286, 3287, 3288, 3289,
770, 771, 772, 773, 774, 775, 776, 3290, 3291, 3292, 3293,
3294, 3295, 3296, 777, 778, 779, 780, 781, 782, 783, 3297,
3298, 3299, 3300, 3301, 3302, 3303, 784, 785, 786, 787, 788,
789, 790, 3304, 3305, 3306, 3307, 3308, 3309, 3310, 791, 792,
793, 794, 795, 796, 797, 3311, 3312, 3313, 3314, 3315, 3316,
3317, 798, 799, 800, 801, 802, 803, 804, 3318, 3319, 3320,
3321, 3322, 3323, 3324, 805, 806, 807, 808, 809, 810, 811,
3325, 3326, 3327, 3328, 3329, 3330, 3331, 812, 813, 814, 815,
816, 817, 818, 3332, 3333, 3334, 3335, 3336, 3337, 3338, 819,
820, 821, 822, 823, 824, 825, 3339, 3340, 3341, 3342, 3343,
3344, 3345, 826, 827, 828, 829, 830, 831, 832, 3346, 3347,
3348, 3349, 3350, 3351, 3352, 833, 834, 835, 836, 837, 838,
839, 3353, 3354, 3355, 3356, 3357, 3358, 3359, 840, 841, 842,
843, 844, 845, 846, 3360, 3361, 3362, 3363, 3364, 3365, 3366,
847, 848, 849, 850, 851, 852, 853, 3367, 3368, 3369, 3370,
3371, 3372, 3373, 854, 855, 856, 857, 858, 859, 860, 3374,
3375, 3376, 3377, 3378, 3379, 3380, 861, 862, 863, 864, 865,
866, 867, 3381, 3382, 3383, 3384, 3385, 3386, 3387, 868, 869,
870, 871, 872, 873, 874, 3388, 3389, 3390, 3391, 3392, 3393,
3394, 875, 876, 877, 878, 879, 880, 881, 3395, 3396, 3397,
3398, 3399, 3400, 3401, 882, 883, 884, 885, 886, 887, 888,
3402, 3403, 3404, 3405, 3406, 3407, 3408, 889, 890, 891, 892,
893, 894, 895, 3409, 3410, 3411, 3412, 3413, 3414, 3415, 896,
897, 898, 899, 900, 901, 902, 3416, 3417, 3418, 3419, 3420,
3421, 3422, 903, 904, 905, 906, 907, 908, 909, 3423, 3424,
3425, 3426, 3427, 3428, 3429, 910, 911, 912, 913, 914, 915,
916, 3430, 3431, 3432, 3433, 3434, 3435, 3436, 917, 918, 919,
920, 921, 922, 923, 3437, 3438, 3439, 3440, 3441, 3442, 3443,
924, 925, 926, 927, 928, 929, 930, 3444, 3445, 3446, 3447,
3448, 3449, 3450, 931, 932, 933, 934, 935, 936, 937, 3451,
3452, 3453, 3454, 3455, 3456, 3457, 938, 939, 940, 941, 942,
943, 944, 3458, 3459, 3460, 3461, 3462, 3463, 3464, 945, 946,
947, 948, 949, 950, 951, 3465, 3466, 3467, 3468, 3469, 3470,
3471, 952, 953, 954, 955, 956, 957, 958, 3472, 3473, 3474,
3475, 3476, 3477, 3478, 959, 960, 961, 962, 963, 964, 965,
3479, 3480, 3481, 3482, 3483, 3484, 3485, 966, 967, 968, 969,
970, 971, 972, 3486, 3487, 3488, 3489, 3490, 3491, 3492, 973,
974, 975, 976, 977, 978, 979, 3493, 3494, 3495, 3496, 3497,
3498, 3499, 980, 981, 982, 983, 984, 985, 986, 3500, 3501,
3502, 3503, 3504, 3505, 3506, 987, 988, 989, 990, 991, 992,
993, 3507, 3508, 3509, 3510, 3511, 3512, 3513, 994, 995, 996,
997, 998, 999, 1000, 3514, 3515, 3516, 3517, 3518, 3519, 3520,
1001, 1002, 1003, 1004, 1005, 1006, 1007, 3521, 3522, 3523, 3524,
3525, 3526, 3527, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 3528,
3529, 3530, 3531, 3532, 3533, 3534, 1015, 1016, 1017, 1018, 1019,
1020, 1021, 3535, 3536, 3537, 3538, 3539, 3540, 3541, 1022, 1023,
1024, 1025, 1026, 1027, 1028, 3542, 3543, 3544, 3545, 3546, 3547,
3548, 1029, 1030, 1031, 1032, 1033, 1034, 1035, 3549, 3550, 3551,
3552, 3553, 3554, 3555, 1036, 1037, 1038, 1039, 1040, 1041, 1042,
3556, 3557, 3558, 3559, 3560, 3561, 3562, 1043, 1044, 1045, 1046,
1047, 1048, 1049, 3563, 3564, 3565, 3566, 3567, 3568, 3569, 1050,
1051, 1052, 1053, 1054, 1055, 1056, 3570, 3571, 3572, 3573, 3574,
3575, 3576, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 3577, 3578,
3579, 3580, 3581, 3582, 3583, 1064, 1065, 1066, 1067, 1068, 1069,
1070, 3584, 3585, 3586, 3587, 3588, 3589, 3590, 1071, 1072, 1073,
1074, 1075, 1076, 1077, 3591, 3592, 3593, 3594, 3595, 3596, 3597,
1078, 1079, 1080, 1081, 1082, 1083, 1084, 3598, 3599, 3600, 3601,
3602, 3603, 3604, 1085, 1086, 1087, 1088, 1089, 1090, 1091, 3605,
3606, 3607, 3608, 3609, 3610, 3611, 1092, 1093, 1094, 1095, 1096,
1097, 1098, 3612, 3613, 3614, 3615, 3616, 3617, 3618, 1099, 1100,
1101, 1102, 1103, 1104, 1105, 3619, 3620, 3621, 3622, 3623, 3624,
3625, 1106, 1107, 1108, 1109, 1110, 1111, 1112, 3626, 3627, 3628,
3629, 3630, 3631, 3632, 1113, 1114, 1115, 1116, 1117, 1118, 1119,
3633, 3634, 3635, 3636, 3637, 3638, 3639, 1120, 1121, 1122, 1123,
1124, 1125, 1126, 3640, 3641, 3642, 3643, 3644, 3645, 3646, 1127,
1128, 1129, 1130, 1131, 1132, 1133, 3647, 3648, 3649, 3650, 3651,
3652, 3653, 1134, 1135, 1136, 1137, 1138, 1139, 1140, 3654, 3655,
3656, 3657, 3658, 3659, 3660, 1141, 1142, 1143, 1144, 1145, 1146,
1147, 3661, 3662, 3663, 3664, 3665, 3666, 3667, 1148, 1149, 1150,
1151, 1152, 1153, 1154, 3668, 3669, 3670, 3671, 3672, 3673, 3674,
1155, 1156, 1157, 1158, 1159, 1160, 1161, 3675, 3676, 3677, 3678,
3679, 3680, 3681, 1162, 1163, 1164, 1165, 1166, 1167, 1168, 3682,
3683, 3684, 3685, 3686, 3687, 3688, 1169, 1170, 1171, 1172, 1173,
1174, 1175, 3689, 3690, 3691, 3692, 3693, 3694, 3695, 1176, 1177,
1178, 1179, 1180, 1181, 1182, 3696, 3697, 3698, 3699, 3700, 3701,
3702, 1183, 1184, 1185, 1186, 1187, 1188, 1189, 3703, 3704, 3705,
3706, 3707, 3708, 3709, 1190, 1191, 1192, 1193, 1194, 1195, 1196,
3710, 3711, 3712, 3713, 3714, 3715, 3716, 1197, 1198, 1199, 1200,
1201, 1202, 1203, 3717, 3718, 3719, 3720, 3721, 3722, 3723, 1204,
1205, 1206, 1207, 1208, 1209, 1210, 3724, 3725, 3726, 3727, 3728,
3729, 3730, 1211, 1212, 1213, 1214, 1215, 1216, 1217, 3731, 3732,
3733, 3734, 3735, 3736, 3737, 1218, 1219, 1220, 1221, 1222, 1223,
1224, 3738, 3739, 3740, 3741, 3742, 3743, 3744, 1225, 1226, 1227,
1228, 1229, 1230, 1231, 3745, 3746, 3747, 3748, 3749, 3750, 3751,
1232, 1233, 1234, 1235, 1236, 1237, 1238, 3752, 3753, 3754, 3755,
3756, 3757, 3758, 1239, 1240, 1241, 1242, 1243, 1244, 1245, 3759,
3760, 3761, 3762, 3763, 3764, 3765, 1246, 1247, 1248, 1249, 1250,
1251, 1252, 3766, 3767, 3768, 3769, 3770, 3771, 3772, 1253, 1254,
1255, 1256, 1257, 1258, 1259, 3773, 3774, 3775, 3776, 3777, 3778,
3779, 1260, 1261, 1262, 1263, 1264, 1265, 1266, 3780, 3781, 3782,
3783, 3784, 3785, 3786, 1267, 1268, 1269, 1270, 1271, 1272, 1273,
3787, 3788, 3789, 3790, 3791, 3792, 3793, 1274, 1275, 1276, 1277,
1278, 1279, 1280, 3794, 3795, 3796, 3797, 3798, 3799, 3800, 1281,
1282, 1283, 1284, 1285, 1286, 1287, 3801, 3802, 3803, 3804, 3805,
3806, 3807, 1288, 1289, 1290, 1291, 1292, 1293, 1294, 3808, 3809,
3810, 3811, 3812, 3813, 3814, 1295, 1296, 1297, 1298, 1299, 1300,
1301, 3815, 3816, 3817, 3818, 3819, 3820, 3821, 1302, 1303, 1304,
1305, 1306, 1307, 1308, 3822, 3823, 3824, 3825, 3826, 3827, 3828,
1309, 1310, 1311, 1312, 1313, 1314, 1315, 3829, 3830, 3831, 3832,
3833, 3834, 3835, 1316, 1317, 1318, 1319, 1320, 1321, 1322, 3836,
3837, 3838, 3839, 3840, 3841, 3842, 1323, 1324, 1325, 1326, 1327,
1328, 1329, 3843, 3844, 3845, 3846, 3847, 3848, 3849, 1330, 1331,
1332, 1333, 1334, 1335, 1336, 3850, 3851, 3852, 3853, 3854, 3855,
3856, 1337, 1338, 1339, 1340, 1341, 1342, 1343, 3857, 3858, 3859,
3860, 3861, 3862, 3863, 1344, 1345, 1346, 1347, 1348, 1349, 1350,
3864, 3865, 3866, 3867, 3868, 3869, 3870, 1351, 1352, 1353, 1354,
1355, 1356, 1357, 3871, 3872, 3873, 3874, 3875, 3876, 3877, 1358,
1359, 1360, 1361, 1362, 1363, 1364, 3878, 3879, 3880, 3881, 3882,
3883, 3884, 1365, 1366, 1367, 1368, 1369, 1370, 1371, 3885, 3886,
3887, 3888, 3889, 3890, 3891, 1372, 1373, 1374, 1375, 1376, 1377,
1378, 3892, 3893, 3894, 3895, 3896, 3897, 3898, 1379, 1380, 1381,
1382, 1383, 1384, 1385, 3899, 3900, 3901, 3902, 3903, 3904, 3905,
1386, 1387, 1388, 1389, 1390, 1391, 1392, 3906, 3907, 3908, 3909,
3910, 3911, 3912, 1393, 1394, 1395, 1396, 1397, 1398, 1399, 3913,
3914, 3915, 3916, 3917, 3918, 3919, 1400, 1401, 1402, 1403, 1404,
1405, 1406, 3920, 3921, 3922, 3923, 3924, 3925, 3926, 1407, 1408,
1409, 1410, 1411, 1412, 1413, 3927, 3928, 3929, 3930, 3931, 3932,
3933, 1414, 1415, 1416, 1417, 1418, 1419, 1420, 3934, 3935, 3936,
3937, 3938, 3939, 3940, 1421, 1422, 1423, 1424, 1425, 1426, 1427,
3941, 3942, 3943, 3944, 3945, 3946, 3947, 1428, 1429, 1430, 1431,
1432, 1433, 1434, 3948, 3949, 3950, 3951, 3952, 3953, 3954, 1435,
1436, 1437, 1438, 1439, 1440, 1441, 3955, 3956, 3957, 3958, 3959,
3960, 3961, 1442, 1443, 1444, 1445, 1446, 1447, 1448, 3962, 3963,
3964, 3965, 3966, 3967, 3968, 1449, 1450, 1451, 1452, 1453, 1454,
1455, 3969, 3970, 3971, 3972, 3973, 3974, 3975, 1456, 1457, 1458,
1459, 1460, 1461, 1462, 3976, 3977, 3978, 3979, 3980, 3981, 3982,
1463, 1464, 1465, 1466, 1467, 1468, 1469, 3983, 3984, 3985, 3986,
3987, 3988, 3989, 1470, 1471, 1472, 1473, 1474, 1475, 1476, 3990,
3991, 3992, 3993, 3994, 3995, 3996, 1477, 1478, 1479, 1480, 1481,
1482, 1483, 3997, 3998, 3999, 4000, 4001, 4002, 4003, 1484, 1485,
1486, 1487, 1488, 1489, 1490, 4004, 4005, 4006, 4007, 4008, 4009,
4010, 1491, 1492, 1493, 1494, 1495, 1496, 1497, 4011, 4012, 4013,
4014, 4015, 4016, 4017, 1498, 1499, 1500, 1501, 1502, 1503, 1504,
4018, 4019, 4020, 4021, 4022, 4023, 4024, 1505, 1506, 1507, 1508,
1509, 1510, 1511, 4025, 4026, 4027, 4028, 4029, 4030, 4031, 1512,
1513, 1514, 1515, 1516, 1517, 1518, 4032, 4033, 4034, 4035, 4036,
4037, 4038, 1519, 1520, 1521, 1522, 1523, 1524, 1525, 4039, 4040,
4041, 4042, 4043, 4044, 4045, 1526, 1527, 1528, 1529, 1530, 1531,
1532, 4046, 4047, 4048, 4049, 4050, 4051, 4052, 1533, 1534, 1535,
1536, 1537, 1538, 1539, 4053, 4054, 4055, 4056, 4057, 4058, 4059,
1540, 1541, 1542, 1543, 1544, 1545, 1546, 4060, 4061, 4062, 4063,
4064, 4065, 4066, 1547, 1548, 1549, 1550, 1551, 1552, 1553, 4067,
4068, 4069, 4070, 4071, 4072, 4073, 1554, 1555, 1556, 1557, 1558,
1559, 1560, 4074, 4075, 4076, 4077, 4078, 4079, 4080, 1561, 1562,
1563, 1564, 1565, 1566, 1567, 4081, 4082, 4083, 4084, 4085, 4086,
4087, 1568, 1569, 1570, 1571, 1572, 1573, 1574, 4088, 4089, 4090,
4091, 4092, 4093, 4094, 1575, 1576, 1577, 1578, 1579, 1580, 1581,
4095, 4096, 4097, 4098, 4099, 4100, 4101, 1582, 1583, 1584, 1585,
1586, 1587, 1588, 4102, 4103, 4104, 4105, 4106, 4107, 4108, 1589,
1590, 1591, 1592, 1593, 1594, 1595, 4109, 4110, 4111, 4112, 4113,
4114, 4115, 1596, 1597, 1598, 1599, 1600, 1601, 1602, 4116, 4117,
4118, 4119, 4120, 4121, 4122, 1603, 1604, 1605, 1606, 1607, 1608,
1609, 4123, 4124, 4125, 4126, 4127, 4128, 4129, 1610, 1611, 1612,
1613, 1614, 1615, 1616, 4130, 4131, 4132, 4133, 4134, 4135, 4136,
1617, 1618, 1619, 1620, 1621, 1622, 1623, 4137, 4138, 4139, 4140,
4141, 4142, 4143, 1624, 1625, 1626, 1627, 1628, 1629, 1630, 4144,
4145, 4146, 4147, 4148, 4149, 4150, 1631, 1632, 1633, 1634, 1635,
1636, 1637, 4151, 4152, 4153, 4154, 4155, 4156, 4157, 1638, 1639,
1640, 1641, 1642, 1643, 1644, 4158, 4159, 4160, 4161, 4162, 4163,
4164, 1645, 1646, 1647, 1648, 1649, 1650, 1651, 4165, 4166, 4167,
4168, 4169, 4170, 4171, 1652, 1653, 1654, 1655, 1656, 1657, 1658,
4172, 4173, 4174, 4175, 4176, 4177, 4178, 1659, 1660, 1661, 1662,
1663, 1664, 1665, 4179, 4180, 4181, 4182, 4183, 4184, 4185, 1666,
1667, 1668, 1669, 1670, 1671, 1672, 4186, 4187, 4188, 4189, 4190,
4191, 4192, 1673, 1674, 1675, 1676, 1677, 1678, 1679, 4193, 4194,
4195, 4196, 4197, 4198, 4199, 1680, 1681, 1682, 1683, 1684, 1685,
1686, 4200, 4201, 4202, 4203, 4204, 4205, 4206, 1687, 1688, 1689,
1690, 1691, 1692, 1693, 4207, 4208, 4209, 4210, 4211, 4212, 4213,
1694, 1695, 1696, 1697, 1698, 1699, 1700, 4214, 4215, 4216, 4217,
4218, 4219, 4220, 1701, 1702, 1703, 1704, 1705, 1706, 1707, 4221,
4222, 4223, 4224, 4225, 4226, 4227, 1708, 1709, 1710, 1711, 1712,
1713, 1714, 4228, 4229, 4230, 4231, 4232, 4233, 4234, 1715, 1716,
1717, 1718, 1719, 1720, 1721, 4235, 4236, 4237, 4238, 4239, 4240,
4241, 1722, 1723, 1724, 1725, 1726, 1727, 1728, 4242, 4243, 4244,
4245, 4246, 4247, 4248, 1729, 1730, 1731, 1732, 1733, 1734, 1735,
4249, 4250, 4251, 4252, 4253, 4254, 4255, 1736, 1737, 1738, 1739,
1740, 1741, 1742, 4256, 4257, 4258, 4259, 4260, 4261, 4262, 1743,
1744, 1745, 1746, 1747, 1748, 1749, 4263, 4264, 4265, 4266, 4267,
4268, 4269, 1750, 1751, 1752, 1753, 1754, 1755, 1756, 4270, 4271,
4272, 4273, 4274, 4275, 4276, 1757, 1758, 1759, 1760, 1761, 1762,
1763, 4277, 4278, 4279, 4280, 4281, 4282, 4283, 1764, 1765, 1766,
1767, 1768, 1769, 1770, 4284, 4285, 4286, 4287, 4288, 4289, 4290,
1771, 1772, 1773, 1774, 1775, 1776, 1777, 4291, 4292, 4293, 4294,
4295, 4296, 4297, 1778, 1779, 1780, 1781, 1782, 1783, 1784, 4298,
4299, 4300, 4301, 4302, 4303, 4304, 1785, 1786, 1787, 1788, 1789,
1790, 1791, 4305, 4306, 4307, 4308, 4309, 4310, 4311, 1792, 1793,
1794, 1795, 1796, 1797, 1798, 4312, 4313, 4314, 4315, 4316, 4317,
4318, 1799, 1800, 1801, 1802, 1803, 1804, 1805, 4319, 4320, 4321,
4322, 4323, 4324, 4325, 1806, 1807, 1808, 1809, 1810, 1811, 1812,
4326, 4327, 4328, 4329, 4330, 4331, 4332, 1813, 1814, 1815, 1816,
1817, 1818, 1819, 4333, 4334, 4335, 4336, 4337, 4338, 4339, 1820,
1821, 1822, 1823, 1824, 1825, 1826, 4340, 4341, 4342, 4343, 4344,
4345, 4346, 1827, 1828, 1829, 1830, 1831, 1832, 1833, 4347, 4348,
4349, 4350, 4351, 4352, 4353, 1834, 1835, 1836, 1837, 1838, 1839,
1840, 4354, 4355, 4356, 4357, 4358, 4359, 4360, 1841, 1842, 1843,
1844, 1845, 1846, 1847, 4361, 4362, 4363, 4364, 4365, 4366, 4367,
1848, 1849, 1850, 1851, 1852, 1853, 1854, 4368, 4369, 4370, 4371,
4372, 4373, 4374, 1855, 1856, 1857, 1858, 1859, 1860, 1861, 4375,
4376, 4377, 4378, 4379, 4380, 4381, 1862, 1863, 1864, 1865, 1866,
1867, 1868, 4382, 4383, 4384, 4385, 4386, 4387, 4388, 1869, 1870,
1871, 1872, 1873, 1874, 1875, 4389, 4390, 4391, 4392, 4393, 4394,
4395, 1876, 1877, 1878, 1879, 1880, 1881, 1882, 4396, 4397, 4398,
4399, 4400, 4401, 4402, 1883, 1884, 1885, 1886, 1887, 1888, 1889,
4403, 4404, 4405, 4406, 4407, 4408, 4409, 1890, 1891, 1892, 1893,
1894, 1895, 1896, 4410, 4411, 4412, 4413, 4414, 4415, 4416, 1897,
1898, 1899, 1900, 1901, 1902, 1903, 4417, 4418, 4419, 4420, 4421,
4422, 4423, 1904, 1905, 1906, 1907, 1908, 1909, 1910, 4424, 4425,
4426, 4427, 4428, 4429, 4430, 1911, 1912, 1913, 1914, 1915, 1916,
1917, 4431, 4432, 4433, 4434, 4435, 4436, 4437, 1918, 1919, 1920,
1921, 1922, 1923, 1924, 4438, 4439, 4440, 4441, 4442, 4443, 4444,
1925, 1926, 1927, 1928, 1929, 1930, 1931, 4445, 4446, 4447, 4448,
4449, 4450, 4451, 1932, 1933, 1934, 1935, 1936, 1937, 1938, 4452,
4453, 4454, 4455, 4456, 4457, 4458, 1939, 1940, 1941, 1942, 1943,
1944, 1945, 4459, 4460, 4461, 4462, 4463, 4464, 4465, 1946, 1947,
1948, 1949, 1950, 1951, 1952, 4466, 4467, 4468, 4469, 4470, 4471,
4472, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 4473, 4474, 4475,
4476, 4477, 4478, 4479, 1960, 1961, 1962, 1963, 1964, 1965, 1966,
4480, 4481, 4482, 4483, 4484, 4485, 4486, 1967, 1968, 1969, 1970,
1971, 1972, 1973, 4487, 4488, 4489, 4490, 4491, 4492, 4493, 1974,
1975, 1976, 1977, 1978, 1979, 1980, 4494, 4495, 4496, 4497, 4498,
4499, 4500, 1981, 1982, 1983, 1984, 1985, 1986, 1987, 4501, 4502,
4503, 4504, 4505, 4506, 4507, 1988, 1989, 1990, 1991, 1992, 1993,
1994, 4508, 4509, 4510, 4511, 4512, 4513, 4514, 1995, 1996, 1997,
1998, 1999, 2000, 2001, 4515, 4516, 4517, 4518, 4519, 4520, 4521,
2002, 2003, 2004, 2005, 2006, 2007, 2008, 4522, 4523, 4524, 4525,
4526, 4527, 4528, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 4529,
4530, 4531, 4532, 4533, 4534, 4535, 2016, 2017, 2018, 2019, 2020,
2021, 2022, 4536, 4537, 4538, 4539, 4540, 4541, 4542, 2023, 2024,
2025, 2026, 2027, 2028, 2029, 4543, 4544, 4545, 4546, 4547, 4548,
4549, 2030, 2031, 2032, 2033, 2034, 2035, 2036, 4550, 4551, 4552,
4553, 4554, 4555, 4556, 2037, 2038, 2039, 2040, 2041, 2042, 2043,
4557, 4558, 4559, 4560, 4561, 4562, 4563, 2044, 2045, 2046, 2047,
2048, 2049, 2050, 4564, 4565, 4566, 4567, 4568, 4569, 4570, 2051,
2052, 2053, 2054, 2055, 2056, 2057, 4571, 4572, 4573, 4574, 4575,
4576, 4577, 2058, 2059, 2060, 2061, 2062, 2063, 2064, 4578, 4579,
4580, 4581, 4582, 4583, 4584, 2065, 2066, 2067, 2068, 2069, 2070,
2071, 4585, 4586, 4587, 4588, 4589, 4590, 4591, 2072, 2073, 2074,
2075, 2076, 2077, 2078, 4592, 4593, 4594, 4595, 4596, 4597, 4598,
2079, 2080, 2081, 2082, 2083, 2084, 2085, 4599, 4600, 4601, 4602,
4603, 4604, 4605, 2086, 2087, 2088, 2089, 2090, 2091, 2092, 4606,
4607, 4608, 4609, 4610, 4611, 4612, 2093, 2094, 2095, 2096, 2097,
2098, 2099, 4613, 4614, 4615, 4616, 4617, 4618, 4619, 2100, 2101,
2102, 2103, 2104, 2105, 2106, 4620, 4621, 4622, 4623, 4624, 4625,
4626, 2107, 2108, 2109, 2110, 2111, 2112, 2113, 4627, 4628, 4629,
4630, 4631, 4632, 4633, 2114, 2115, 2116, 2117, 2118, 2119, 2120,
4634, 4635, 4636, 4637, 4638, 4639, 4640, 2121, 2122, 2123, 2124,
2125, 2126, 2127, 4641, 4642, 4643, 4644, 4645, 4646, 4647, 2128,
2129, 2130, 2131, 2132, 2133, 2134, 4648, 4649, 4650, 4651, 4652,
4653, 4654, 2135, 2136, 2137, 2138, 2139, 2140, 2141, 4655, 4656,
4657, 4658, 4659, 4660, 4661, 2142, 2143, 2144, 2145, 2146, 2147,
2148, 4662, 4663, 4664, 4665, 4666, 4667, 4668, 2149, 2150, 2151,
2152, 2153, 2154, 2155, 4669, 4670, 4671, 4672, 4673, 4674, 4675,
2156, 2157, 2158, 2159, 2160, 2161, 2162, 4676, 4677, 4678, 4679,
4680, 4681, 4682, 2163, 2164, 2165, 2166, 2167, 2168, 2169, 4683,
4684, 4685, 4686, 4687, 4688, 4689, 2170, 2171, 2172, 2173, 2174,
2175, 2176, 4690, 4691, 4692, 4693, 4694, 4695, 4696, 2177, 2178,
2179, 2180, 2181, 2182, 2183, 4697, 4698, 4699, 4700, 4701, 4702,
4703, 2184, 2185, 2186, 2187, 2188, 2189, 2190, 4704, 4705, 4706,
4707, 4708, 4709, 4710, 2191, 2192, 2193, 2194, 2195, 2196, 2197,
4711, 4712, 4713, 4714, 4715, 4716, 4717, 2198, 2199, 2200, 2201,
2202, 2203, 2204, 4718, 4719, 4720, 4721, 4722, 4723, 4724, 2205,
2206, 2207, 2208, 2209, 2210, 2211, 4725, 4726, 4727, 4728, 4729,
4730, 4731, 2212, 2213, 2214, 2215, 2216, 2217, 2218, 4732, 4733,
4734, 4735, 4736, 4737, 4738, 2219, 2220, 2221, 2222, 2223, 2224,
2225, 4739, 4740, 4741, 4742, 4743, 4744, 4745, 2226, 2227, 2228,
2229, 2230, 2231, 2232, 4746, 4747, 4748, 4749, 4750, 4751, 4752,
2233, 2234, 2235, 2236, 2237, 2238, 2239, 4753, 4754, 4755, 4756,
4757, 4758, 4759, 2240, 2241, 2242, 2243, 2244, 2245, 2246, 4760,
4761, 4762, 4763, 4764, 4765, 4766, 2247, 2248, 2249, 2250, 2251,
2252, 2253, 4767, 4768, 4769, 4770, 4771, 4772, 4773, 2254, 2255,
2256, 2257, 2258, 2259, 2260, 4774, 4775, 4776, 4777, 4778, 4779,
4780, 2261, 2262, 2263, 2264, 2265, 2266, 2267, 4781, 4782, 4783,
4784, 4785, 4786, 4787, 2268, 2269, 2270, 2271, 2272, 2273, 2274,
4788, 4789, 4790, 4791, 4792, 4793, 4794, 2275, 2276, 2277, 2278,
2279, 2280, 2281, 4795, 4796, 4797, 4798, 4799, 4800, 4801, 2282,
2283, 2284, 2285, 2286, 2287, 2288, 4802, 4803, 4804, 4805, 4806,
4807, 4808, 2289, 2290, 2291, 2292, 2293, 2294, 2295, 4809, 4810,
4811, 4812, 4813, 4814, 4815, 2296, 2297, 2298, 2299, 2300, 2301,
2302, 4816, 4817, 4818, 4819, 4820, 4821, 4822, 2303, 2304, 2305,
2306, 2307, 2308, 2309, 4823, 4824, 4825, 4826, 4827, 4828, 4829,
2310, 2311, 2312, 2313, 2314, 2315, 2316, 4830, 4831, 4832, 4833,
4834, 4835, 4836, 2317, 2318, 2319, 2320, 2321, 2322, 2323, 4837,
4838, 4839, 4840, 4841, 4842, 4843, 2324, 2325, 2326, 2327, 2328,
2329, 2330, 4844, 4845, 4846, 4847, 4848, 4849, 4850, 2331, 2332,
2333, 2334, 2335, 2336, 2337, 4851, 4852, 4853, 4854, 4855, 4856,
4857, 2338, 2339, 2340, 2341, 2342, 2343, 2344, 4858, 4859, 4860,
4861, 4862, 4863, 4864, 2345, 2346, 2347, 2348, 2349, 2350, 2351,
4865, 4866, 4867, 4868, 4869, 4870, 4871, 2352, 2353, 2354, 2355,
2356, 2357, 2358, 4872, 4873, 4874, 4875, 4876, 4877, 4878, 2359,
2360, 2361, 2362, 2363, 2364, 2365, 4879, 4880, 4881, 4882, 4883,
4884, 4885, 2366, 2367, 2368, 2369, 2370, 2371, 2372, 4886, 4887,
4888, 4889, 4890, 4891, 4892, 2373, 2374, 2375, 2376, 2377, 2378,
2379, 4893, 4894, 4895, 4896, 4897, 4898, 4899, 2380, 2381, 2382,
2383, 2384, 2385, 2386, 4900, 4901, 4902, 4903, 4904, 4905, 4906,
2387, 2388, 2389, 2390, 2391, 2392, 2393, 4907, 4908, 4909, 4910,
4911, 4912, 4913, 2394, 2395, 2396, 2397, 2398, 2399, 2400, 4914,
4915, 4916, 4917, 4918, 4919, 4920, 2401, 2402, 2403, 2404, 2405,
2406, 2407, 4921, 4922, 4923, 4924, 4925, 4926, 4927, 2408, 2409,
2410, 2411, 2412, 2413, 2414, 4928, 4929, 4930, 4931, 4932, 4933,
4934, 2415, 2416, 2417, 2418, 2419, 2420, 2421, 4935, 4936, 4937,
4938, 4939, 4940, 4941, 2422, 2423, 2424, 2425, 2426, 2427, 2428,
4942, 4943, 4944, 4945, 4946, 4947, 4948, 2429, 2430, 2431, 2432,
2433, 2434, 2435, 4949, 4950, 4951, 4952, 4953, 4954, 4955, 2436,
2437, 2438, 2439, 2440, 2441, 2442, 4956, 4957, 4958, 4959, 4960,
4961, 4962, 2443, 2444, 2445, 2446, 2447, 2448, 2449, 4963, 4964,
4965, 4966, 4967, 4968, 4969, 2450, 2451, 2452, 2453, 2454, 2455,
2456, 4970, 4971, 4972, 4973, 4974, 4975, 4976, 2457, 2458, 2459,
2460, 2461, 2462, 2463, 4977, 4978, 4979, 4980, 4981, 4982, 4983,
2464, 2465, 2466, 2467, 2468, 2469, 2470, 4984, 4985, 4986, 4987,
4988, 4989, 4990, 2471, 2472, 2473, 2474, 2475, 2476, 2477, 4991,
4992, 4993, 4994, 4995, 4996, 4997, 2478, 2479, 2480, 2481, 2482,
2483, 2484, 4998, 4999, 5000, 5001, 5002, 5003, 5004, 2485, 2486,
2487, 2488, 2489, 2490, 2491, 5005, 5006, 5007, 5008, 5009, 5010,
5011, 2492, 2493, 2494, 2495, 2496, 2497, 2498, 5012, 5013, 5014,
5015, 5016, 5017, 5018, 2499, 2500, 2501, 2502, 2503, 2504, 2505,
5019, 5020, 5021, 5022, 5023, 5024, 5025, 2506, 2507, 2508, 2509,
2510, 2511, 2512, 5026, 5027, 5028, 5029, 5030, 5031, 5032, 2513,
2514, 2515, 2516, 2517, 2518, 2519, 5033, 5034, 5035, 5036, 5037,
5038, 5039}));
}
TEST(TransposeTest, TestRefOps6D6) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5, 6, 7},
{1, 2, 3, 4, 5, 0}),
ElementsAreArray(
{0, 2520, 1, 2521, 2, 2522, 3, 2523, 4, 2524, 5,
2525, 6, 2526, 7, 2527, 8, 2528, 9, 2529, 10, 2530,
11, 2531, 12, 2532, 13, 2533, 14, 2534, 15, 2535, 16,
2536, 17, 2537, 18, 2538, 19, 2539, 20, 2540, 21, 2541,
22, 2542, 23, 2543, 24, 2544, 25, 2545, 26, 2546, 27,
2547, 28, 2548, 29, 2549, 30, 2550, 31, 2551, 32, 2552,
33, 2553, 34, 2554, 35, 2555, 36, 2556, 37, 2557, 38,
2558, 39, 2559, 40, 2560, 41, 2561, 42, 2562, 43, 2563,
44, 2564, 45, 2565, 46, 2566, 47, 2567, 48, 2568, 49,
2569, 50, 2570, 51, 2571, 52, 2572, 53, 2573, 54, 2574,
55, 2575, 56, 2576, 57, 2577, 58, 2578, 59, 2579, 60,
2580, 61, 2581, 62, 2582, 63, 2583, 64, 2584, 65, 2585,
66, 2586, 67, 2587, 68, 2588, 69, 2589, 70, 2590, 71,
2591, 72, 2592, 73, 2593, 74, 2594, 75, 2595, 76, 2596,
77, 2597, 78, 2598, 79, 2599, 80, 2600, 81, 2601, 82,
2602, 83, 2603, 84, 2604, 85, 2605, 86, 2606, 87, 2607,
88, 2608, 89, 2609, 90, 2610, 91, 2611, 92, 2612, 93,
2613, 94, 2614, 95, 2615, 96, 2616, 97, 2617, 98, 2618,
99, 2619, 100, 2620, 101, 2621, 102, 2622, 103, 2623, 104,
2624, 105, 2625, 106, 2626, 107, 2627, 108, 2628, 109, 2629,
110, 2630, 111, 2631, 112, 2632, 113, 2633, 114, 2634, 115,
2635, 116, 2636, 117, 2637, 118, 2638, 119, 2639, 120, 2640,
121, 2641, 122, 2642, 123, 2643, 124, 2644, 125, 2645, 126,
2646, 127, 2647, 128, 2648, 129, 2649, 130, 2650, 131, 2651,
132, 2652, 133, 2653, 134, 2654, 135, 2655, 136, 2656, 137,
2657, 138, 2658, 139, 2659, 140, 2660, 141, 2661, 142, 2662,
143, 2663, 144, 2664, 145, 2665, 146, 2666, 147, 2667, 148,
2668, 149, 2669, 150, 2670, 151, 2671, 152, 2672, 153, 2673,
154, 2674, 155, 2675, 156, 2676, 157, 2677, 158, 2678, 159,
2679, 160, 2680, 161, 2681, 162, 2682, 163, 2683, 164, 2684,
165, 2685, 166, 2686, 167, 2687, 168, 2688, 169, 2689, 170,
2690, 171, 2691, 172, 2692, 173, 2693, 174, 2694, 175, 2695,
176, 2696, 177, 2697, 178, 2698, 179, 2699, 180, 2700, 181,
2701, 182, 2702, 183, 2703, 184, 2704, 185, 2705, 186, 2706,
187, 2707, 188, 2708, 189, 2709, 190, 2710, 191, 2711, 192,
2712, 193, 2713, 194, 2714, 195, 2715, 196, 2716, 197, 2717,
198, 2718, 199, 2719, 200, 2720, 201, 2721, 202, 2722, 203,
2723, 204, 2724, 205, 2725, 206, 2726, 207, 2727, 208, 2728,
209, 2729, 210, 2730, 211, 2731, 212, 2732, 213, 2733, 214,
2734, 215, 2735, 216, 2736, 217, 2737, 218, 2738, 219, 2739,
220, 2740, 221, 2741, 222, 2742, 223, 2743, 224, 2744, 225,
2745, 226, 2746, 227, 2747, 228, 2748, 229, 2749, 230, 2750,
231, 2751, 232, 2752, 233, 2753, 234, 2754, 235, 2755, 236,
2756, 237, 2757, 238, 2758, 239, 2759, 240, 2760, 241, 2761,
242, 2762, 243, 2763, 244, 2764, 245, 2765, 246, 2766, 247,
2767, 248, 2768, 249, 2769, 250, 2770, 251, 2771, 252, 2772,
253, 2773, 254, 2774, 255, 2775, 256, 2776, 257, 2777, 258,
2778, 259, 2779, 260, 2780, 261, 2781, 262, 2782, 263, 2783,
264, 2784, 265, 2785, 266, 2786, 267, 2787, 268, 2788, 269,
2789, 270, 2790, 271, 2791, 272, 2792, 273, 2793, 274, 2794,
275, 2795, 276, 2796, 277, 2797, 278, 2798, 279, 2799, 280,
2800, 281, 2801, 282, 2802, 283, 2803, 284, 2804, 285, 2805,
286, 2806, 287, 2807, 288, 2808, 289, 2809, 290, 2810, 291,
2811, 292, 2812, 293, 2813, 294, 2814, 295, 2815, 296, 2816,
297, 2817, 298, 2818, 299, 2819, 300, 2820, 301, 2821, 302,
2822, 303, 2823, 304, 2824, 305, 2825, 306, 2826, 307, 2827,
308, 2828, 309, 2829, 310, 2830, 311, 2831, 312, 2832, 313,
2833, 314, 2834, 315, 2835, 316, 2836, 317, 2837, 318, 2838,
319, 2839, 320, 2840, 321, 2841, 322, 2842, 323, 2843, 324,
2844, 325, 2845, 326, 2846, 327, 2847, 328, 2848, 329, 2849,
330, 2850, 331, 2851, 332, 2852, 333, 2853, 334, 2854, 335,
2855, 336, 2856, 337, 2857, 338, 2858, 339, 2859, 340, 2860,
341, 2861, 342, 2862, 343, 2863, 344, 2864, 345, 2865, 346,
2866, 347, 2867, 348, 2868, 349, 2869, 350, 2870, 351, 2871,
352, 2872, 353, 2873, 354, 2874, 355, 2875, 356, 2876, 357,
2877, 358, 2878, 359, 2879, 360, 2880, 361, 2881, 362, 2882,
363, 2883, 364, 2884, 365, 2885, 366, 2886, 367, 2887, 368,
2888, 369, 2889, 370, 2890, 371, 2891, 372, 2892, 373, 2893,
374, 2894, 375, 2895, 376, 2896, 377, 2897, 378, 2898, 379,
2899, 380, 2900, 381, 2901, 382, 2902, 383, 2903, 384, 2904,
385, 2905, 386, 2906, 387, 2907, 388, 2908, 389, 2909, 390,
2910, 391, 2911, 392, 2912, 393, 2913, 394, 2914, 395, 2915,
396, 2916, 397, 2917, 398, 2918, 399, 2919, 400, 2920, 401,
2921, 402, 2922, 403, 2923, 404, 2924, 405, 2925, 406, 2926,
407, 2927, 408, 2928, 409, 2929, 410, 2930, 411, 2931, 412,
2932, 413, 2933, 414, 2934, 415, 2935, 416, 2936, 417, 2937,
418, 2938, 419, 2939, 420, 2940, 421, 2941, 422, 2942, 423,
2943, 424, 2944, 425, 2945, 426, 2946, 427, 2947, 428, 2948,
429, 2949, 430, 2950, 431, 2951, 432, 2952, 433, 2953, 434,
2954, 435, 2955, 436, 2956, 437, 2957, 438, 2958, 439, 2959,
440, 2960, 441, 2961, 442, 2962, 443, 2963, 444, 2964, 445,
2965, 446, 2966, 447, 2967, 448, 2968, 449, 2969, 450, 2970,
451, 2971, 452, 2972, 453, 2973, 454, 2974, 455, 2975, 456,
2976, 457, 2977, 458, 2978, 459, 2979, 460, 2980, 461, 2981,
462, 2982, 463, 2983, 464, 2984, 465, 2985, 466, 2986, 467,
2987, 468, 2988, 469, 2989, 470, 2990, 471, 2991, 472, 2992,
473, 2993, 474, 2994, 475, 2995, 476, 2996, 477, 2997, 478,
2998, 479, 2999, 480, 3000, 481, 3001, 482, 3002, 483, 3003,
484, 3004, 485, 3005, 486, 3006, 487, 3007, 488, 3008, 489,
3009, 490, 3010, 491, 3011, 492, 3012, 493, 3013, 494, 3014,
495, 3015, 496, 3016, 497, 3017, 498, 3018, 499, 3019, 500,
3020, 501, 3021, 502, 3022, 503, 3023, 504, 3024, 505, 3025,
506, 3026, 507, 3027, 508, 3028, 509, 3029, 510, 3030, 511,
3031, 512, 3032, 513, 3033, 514, 3034, 515, 3035, 516, 3036,
517, 3037, 518, 3038, 519, 3039, 520, 3040, 521, 3041, 522,
3042, 523, 3043, 524, 3044, 525, 3045, 526, 3046, 527, 3047,
528, 3048, 529, 3049, 530, 3050, 531, 3051, 532, 3052, 533,
3053, 534, 3054, 535, 3055, 536, 3056, 537, 3057, 538, 3058,
539, 3059, 540, 3060, 541, 3061, 542, 3062, 543, 3063, 544,
3064, 545, 3065, 546, 3066, 547, 3067, 548, 3068, 549, 3069,
550, 3070, 551, 3071, 552, 3072, 553, 3073, 554, 3074, 555,
3075, 556, 3076, 557, 3077, 558, 3078, 559, 3079, 560, 3080,
561, 3081, 562, 3082, 563, 3083, 564, 3084, 565, 3085, 566,
3086, 567, 3087, 568, 3088, 569, 3089, 570, 3090, 571, 3091,
572, 3092, 573, 3093, 574, 3094, 575, 3095, 576, 3096, 577,
3097, 578, 3098, 579, 3099, 580, 3100, 581, 3101, 582, 3102,
583, 3103, 584, 3104, 585, 3105, 586, 3106, 587, 3107, 588,
3108, 589, 3109, 590, 3110, 591, 3111, 592, 3112, 593, 3113,
594, 3114, 595, 3115, 596, 3116, 597, 3117, 598, 3118, 599,
3119, 600, 3120, 601, 3121, 602, 3122, 603, 3123, 604, 3124,
605, 3125, 606, 3126, 607, 3127, 608, 3128, 609, 3129, 610,
3130, 611, 3131, 612, 3132, 613, 3133, 614, 3134, 615, 3135,
616, 3136, 617, 3137, 618, 3138, 619, 3139, 620, 3140, 621,
3141, 622, 3142, 623, 3143, 624, 3144, 625, 3145, 626, 3146,
627, 3147, 628, 3148, 629, 3149, 630, 3150, 631, 3151, 632,
3152, 633, 3153, 634, 3154, 635, 3155, 636, 3156, 637, 3157,
638, 3158, 639, 3159, 640, 3160, 641, 3161, 642, 3162, 643,
3163, 644, 3164, 645, 3165, 646, 3166, 647, 3167, 648, 3168,
649, 3169, 650, 3170, 651, 3171, 652, 3172, 653, 3173, 654,
3174, 655, 3175, 656, 3176, 657, 3177, 658, 3178, 659, 3179,
660, 3180, 661, 3181, 662, 3182, 663, 3183, 664, 3184, 665,
3185, 666, 3186, 667, 3187, 668, 3188, 669, 3189, 670, 3190,
671, 3191, 672, 3192, 673, 3193, 674, 3194, 675, 3195, 676,
3196, 677, 3197, 678, 3198, 679, 3199, 680, 3200, 681, 3201,
682, 3202, 683, 3203, 684, 3204, 685, 3205, 686, 3206, 687,
3207, 688, 3208, 689, 3209, 690, 3210, 691, 3211, 692, 3212,
693, 3213, 694, 3214, 695, 3215, 696, 3216, 697, 3217, 698,
3218, 699, 3219, 700, 3220, 701, 3221, 702, 3222, 703, 3223,
704, 3224, 705, 3225, 706, 3226, 707, 3227, 708, 3228, 709,
3229, 710, 3230, 711, 3231, 712, 3232, 713, 3233, 714, 3234,
715, 3235, 716, 3236, 717, 3237, 718, 3238, 719, 3239, 720,
3240, 721, 3241, 722, 3242, 723, 3243, 724, 3244, 725, 3245,
726, 3246, 727, 3247, 728, 3248, 729, 3249, 730, 3250, 731,
3251, 732, 3252, 733, 3253, 734, 3254, 735, 3255, 736, 3256,
737, 3257, 738, 3258, 739, 3259, 740, 3260, 741, 3261, 742,
3262, 743, 3263, 744, 3264, 745, 3265, 746, 3266, 747, 3267,
748, 3268, 749, 3269, 750, 3270, 751, 3271, 752, 3272, 753,
3273, 754, 3274, 755, 3275, 756, 3276, 757, 3277, 758, 3278,
759, 3279, 760, 3280, 761, 3281, 762, 3282, 763, 3283, 764,
3284, 765, 3285, 766, 3286, 767, 3287, 768, 3288, 769, 3289,
770, 3290, 771, 3291, 772, 3292, 773, 3293, 774, 3294, 775,
3295, 776, 3296, 777, 3297, 778, 3298, 779, 3299, 780, 3300,
781, 3301, 782, 3302, 783, 3303, 784, 3304, 785, 3305, 786,
3306, 787, 3307, 788, 3308, 789, 3309, 790, 3310, 791, 3311,
792, 3312, 793, 3313, 794, 3314, 795, 3315, 796, 3316, 797,
3317, 798, 3318, 799, 3319, 800, 3320, 801, 3321, 802, 3322,
803, 3323, 804, 3324, 805, 3325, 806, 3326, 807, 3327, 808,
3328, 809, 3329, 810, 3330, 811, 3331, 812, 3332, 813, 3333,
814, 3334, 815, 3335, 816, 3336, 817, 3337, 818, 3338, 819,
3339, 820, 3340, 821, 3341, 822, 3342, 823, 3343, 824, 3344,
825, 3345, 826, 3346, 827, 3347, 828, 3348, 829, 3349, 830,
3350, 831, 3351, 832, 3352, 833, 3353, 834, 3354, 835, 3355,
836, 3356, 837, 3357, 838, 3358, 839, 3359, 840, 3360, 841,
3361, 842, 3362, 843, 3363, 844, 3364, 845, 3365, 846, 3366,
847, 3367, 848, 3368, 849, 3369, 850, 3370, 851, 3371, 852,
3372, 853, 3373, 854, 3374, 855, 3375, 856, 3376, 857, 3377,
858, 3378, 859, 3379, 860, 3380, 861, 3381, 862, 3382, 863,
3383, 864, 3384, 865, 3385, 866, 3386, 867, 3387, 868, 3388,
869, 3389, 870, 3390, 871, 3391, 872, 3392, 873, 3393, 874,
3394, 875, 3395, 876, 3396, 877, 3397, 878, 3398, 879, 3399,
880, 3400, 881, 3401, 882, 3402, 883, 3403, 884, 3404, 885,
3405, 886, 3406, 887, 3407, 888, 3408, 889, 3409, 890, 3410,
891, 3411, 892, 3412, 893, 3413, 894, 3414, 895, 3415, 896,
3416, 897, 3417, 898, 3418, 899, 3419, 900, 3420, 901, 3421,
902, 3422, 903, 3423, 904, 3424, 905, 3425, 906, 3426, 907,
3427, 908, 3428, 909, 3429, 910, 3430, 911, 3431, 912, 3432,
913, 3433, 914, 3434, 915, 3435, 916, 3436, 917, 3437, 918,
3438, 919, 3439, 920, 3440, 921, 3441, 922, 3442, 923, 3443,
924, 3444, 925, 3445, 926, 3446, 927, 3447, 928, 3448, 929,
3449, 930, 3450, 931, 3451, 932, 3452, 933, 3453, 934, 3454,
935, 3455, 936, 3456, 937, 3457, 938, 3458, 939, 3459, 940,
3460, 941, 3461, 942, 3462, 943, 3463, 944, 3464, 945, 3465,
946, 3466, 947, 3467, 948, 3468, 949, 3469, 950, 3470, 951,
3471, 952, 3472, 953, 3473, 954, 3474, 955, 3475, 956, 3476,
957, 3477, 958, 3478, 959, 3479, 960, 3480, 961, 3481, 962,
3482, 963, 3483, 964, 3484, 965, 3485, 966, 3486, 967, 3487,
968, 3488, 969, 3489, 970, 3490, 971, 3491, 972, 3492, 973,
3493, 974, 3494, 975, 3495, 976, 3496, 977, 3497, 978, 3498,
979, 3499, 980, 3500, 981, 3501, 982, 3502, 983, 3503, 984,
3504, 985, 3505, 986, 3506, 987, 3507, 988, 3508, 989, 3509,
990, 3510, 991, 3511, 992, 3512, 993, 3513, 994, 3514, 995,
3515, 996, 3516, 997, 3517, 998, 3518, 999, 3519, 1000, 3520,
1001, 3521, 1002, 3522, 1003, 3523, 1004, 3524, 1005, 3525, 1006,
3526, 1007, 3527, 1008, 3528, 1009, 3529, 1010, 3530, 1011, 3531,
1012, 3532, 1013, 3533, 1014, 3534, 1015, 3535, 1016, 3536, 1017,
3537, 1018, 3538, 1019, 3539, 1020, 3540, 1021, 3541, 1022, 3542,
1023, 3543, 1024, 3544, 1025, 3545, 1026, 3546, 1027, 3547, 1028,
3548, 1029, 3549, 1030, 3550, 1031, 3551, 1032, 3552, 1033, 3553,
1034, 3554, 1035, 3555, 1036, 3556, 1037, 3557, 1038, 3558, 1039,
3559, 1040, 3560, 1041, 3561, 1042, 3562, 1043, 3563, 1044, 3564,
1045, 3565, 1046, 3566, 1047, 3567, 1048, 3568, 1049, 3569, 1050,
3570, 1051, 3571, 1052, 3572, 1053, 3573, 1054, 3574, 1055, 3575,
1056, 3576, 1057, 3577, 1058, 3578, 1059, 3579, 1060, 3580, 1061,
3581, 1062, 3582, 1063, 3583, 1064, 3584, 1065, 3585, 1066, 3586,
1067, 3587, 1068, 3588, 1069, 3589, 1070, 3590, 1071, 3591, 1072,
3592, 1073, 3593, 1074, 3594, 1075, 3595, 1076, 3596, 1077, 3597,
1078, 3598, 1079, 3599, 1080, 3600, 1081, 3601, 1082, 3602, 1083,
3603, 1084, 3604, 1085, 3605, 1086, 3606, 1087, 3607, 1088, 3608,
1089, 3609, 1090, 3610, 1091, 3611, 1092, 3612, 1093, 3613, 1094,
3614, 1095, 3615, 1096, 3616, 1097, 3617, 1098, 3618, 1099, 3619,
1100, 3620, 1101, 3621, 1102, 3622, 1103, 3623, 1104, 3624, 1105,
3625, 1106, 3626, 1107, 3627, 1108, 3628, 1109, 3629, 1110, 3630,
1111, 3631, 1112, 3632, 1113, 3633, 1114, 3634, 1115, 3635, 1116,
3636, 1117, 3637, 1118, 3638, 1119, 3639, 1120, 3640, 1121, 3641,
1122, 3642, 1123, 3643, 1124, 3644, 1125, 3645, 1126, 3646, 1127,
3647, 1128, 3648, 1129, 3649, 1130, 3650, 1131, 3651, 1132, 3652,
1133, 3653, 1134, 3654, 1135, 3655, 1136, 3656, 1137, 3657, 1138,
3658, 1139, 3659, 1140, 3660, 1141, 3661, 1142, 3662, 1143, 3663,
1144, 3664, 1145, 3665, 1146, 3666, 1147, 3667, 1148, 3668, 1149,
3669, 1150, 3670, 1151, 3671, 1152, 3672, 1153, 3673, 1154, 3674,
1155, 3675, 1156, 3676, 1157, 3677, 1158, 3678, 1159, 3679, 1160,
3680, 1161, 3681, 1162, 3682, 1163, 3683, 1164, 3684, 1165, 3685,
1166, 3686, 1167, 3687, 1168, 3688, 1169, 3689, 1170, 3690, 1171,
3691, 1172, 3692, 1173, 3693, 1174, 3694, 1175, 3695, 1176, 3696,
1177, 3697, 1178, 3698, 1179, 3699, 1180, 3700, 1181, 3701, 1182,
3702, 1183, 3703, 1184, 3704, 1185, 3705, 1186, 3706, 1187, 3707,
1188, 3708, 1189, 3709, 1190, 3710, 1191, 3711, 1192, 3712, 1193,
3713, 1194, 3714, 1195, 3715, 1196, 3716, 1197, 3717, 1198, 3718,
1199, 3719, 1200, 3720, 1201, 3721, 1202, 3722, 1203, 3723, 1204,
3724, 1205, 3725, 1206, 3726, 1207, 3727, 1208, 3728, 1209, 3729,
1210, 3730, 1211, 3731, 1212, 3732, 1213, 3733, 1214, 3734, 1215,
3735, 1216, 3736, 1217, 3737, 1218, 3738, 1219, 3739, 1220, 3740,
1221, 3741, 1222, 3742, 1223, 3743, 1224, 3744, 1225, 3745, 1226,
3746, 1227, 3747, 1228, 3748, 1229, 3749, 1230, 3750, 1231, 3751,
1232, 3752, 1233, 3753, 1234, 3754, 1235, 3755, 1236, 3756, 1237,
3757, 1238, 3758, 1239, 3759, 1240, 3760, 1241, 3761, 1242, 3762,
1243, 3763, 1244, 3764, 1245, 3765, 1246, 3766, 1247, 3767, 1248,
3768, 1249, 3769, 1250, 3770, 1251, 3771, 1252, 3772, 1253, 3773,
1254, 3774, 1255, 3775, 1256, 3776, 1257, 3777, 1258, 3778, 1259,
3779, 1260, 3780, 1261, 3781, 1262, 3782, 1263, 3783, 1264, 3784,
1265, 3785, 1266, 3786, 1267, 3787, 1268, 3788, 1269, 3789, 1270,
3790, 1271, 3791, 1272, 3792, 1273, 3793, 1274, 3794, 1275, 3795,
1276, 3796, 1277, 3797, 1278, 3798, 1279, 3799, 1280, 3800, 1281,
3801, 1282, 3802, 1283, 3803, 1284, 3804, 1285, 3805, 1286, 3806,
1287, 3807, 1288, 3808, 1289, 3809, 1290, 3810, 1291, 3811, 1292,
3812, 1293, 3813, 1294, 3814, 1295, 3815, 1296, 3816, 1297, 3817,
1298, 3818, 1299, 3819, 1300, 3820, 1301, 3821, 1302, 3822, 1303,
3823, 1304, 3824, 1305, 3825, 1306, 3826, 1307, 3827, 1308, 3828,
1309, 3829, 1310, 3830, 1311, 3831, 1312, 3832, 1313, 3833, 1314,
3834, 1315, 3835, 1316, 3836, 1317, 3837, 1318, 3838, 1319, 3839,
1320, 3840, 1321, 3841, 1322, 3842, 1323, 3843, 1324, 3844, 1325,
3845, 1326, 3846, 1327, 3847, 1328, 3848, 1329, 3849, 1330, 3850,
1331, 3851, 1332, 3852, 1333, 3853, 1334, 3854, 1335, 3855, 1336,
3856, 1337, 3857, 1338, 3858, 1339, 3859, 1340, 3860, 1341, 3861,
1342, 3862, 1343, 3863, 1344, 3864, 1345, 3865, 1346, 3866, 1347,
3867, 1348, 3868, 1349, 3869, 1350, 3870, 1351, 3871, 1352, 3872,
1353, 3873, 1354, 3874, 1355, 3875, 1356, 3876, 1357, 3877, 1358,
3878, 1359, 3879, 1360, 3880, 1361, 3881, 1362, 3882, 1363, 3883,
1364, 3884, 1365, 3885, 1366, 3886, 1367, 3887, 1368, 3888, 1369,
3889, 1370, 3890, 1371, 3891, 1372, 3892, 1373, 3893, 1374, 3894,
1375, 3895, 1376, 3896, 1377, 3897, 1378, 3898, 1379, 3899, 1380,
3900, 1381, 3901, 1382, 3902, 1383, 3903, 1384, 3904, 1385, 3905,
1386, 3906, 1387, 3907, 1388, 3908, 1389, 3909, 1390, 3910, 1391,
3911, 1392, 3912, 1393, 3913, 1394, 3914, 1395, 3915, 1396, 3916,
1397, 3917, 1398, 3918, 1399, 3919, 1400, 3920, 1401, 3921, 1402,
3922, 1403, 3923, 1404, 3924, 1405, 3925, 1406, 3926, 1407, 3927,
1408, 3928, 1409, 3929, 1410, 3930, 1411, 3931, 1412, 3932, 1413,
3933, 1414, 3934, 1415, 3935, 1416, 3936, 1417, 3937, 1418, 3938,
1419, 3939, 1420, 3940, 1421, 3941, 1422, 3942, 1423, 3943, 1424,
3944, 1425, 3945, 1426, 3946, 1427, 3947, 1428, 3948, 1429, 3949,
1430, 3950, 1431, 3951, 1432, 3952, 1433, 3953, 1434, 3954, 1435,
3955, 1436, 3956, 1437, 3957, 1438, 3958, 1439, 3959, 1440, 3960,
1441, 3961, 1442, 3962, 1443, 3963, 1444, 3964, 1445, 3965, 1446,
3966, 1447, 3967, 1448, 3968, 1449, 3969, 1450, 3970, 1451, 3971,
1452, 3972, 1453, 3973, 1454, 3974, 1455, 3975, 1456, 3976, 1457,
3977, 1458, 3978, 1459, 3979, 1460, 3980, 1461, 3981, 1462, 3982,
1463, 3983, 1464, 3984, 1465, 3985, 1466, 3986, 1467, 3987, 1468,
3988, 1469, 3989, 1470, 3990, 1471, 3991, 1472, 3992, 1473, 3993,
1474, 3994, 1475, 3995, 1476, 3996, 1477, 3997, 1478, 3998, 1479,
3999, 1480, 4000, 1481, 4001, 1482, 4002, 1483, 4003, 1484, 4004,
1485, 4005, 1486, 4006, 1487, 4007, 1488, 4008, 1489, 4009, 1490,
4010, 1491, 4011, 1492, 4012, 1493, 4013, 1494, 4014, 1495, 4015,
1496, 4016, 1497, 4017, 1498, 4018, 1499, 4019, 1500, 4020, 1501,
4021, 1502, 4022, 1503, 4023, 1504, 4024, 1505, 4025, 1506, 4026,
1507, 4027, 1508, 4028, 1509, 4029, 1510, 4030, 1511, 4031, 1512,
4032, 1513, 4033, 1514, 4034, 1515, 4035, 1516, 4036, 1517, 4037,
1518, 4038, 1519, 4039, 1520, 4040, 1521, 4041, 1522, 4042, 1523,
4043, 1524, 4044, 1525, 4045, 1526, 4046, 1527, 4047, 1528, 4048,
1529, 4049, 1530, 4050, 1531, 4051, 1532, 4052, 1533, 4053, 1534,
4054, 1535, 4055, 1536, 4056, 1537, 4057, 1538, 4058, 1539, 4059,
1540, 4060, 1541, 4061, 1542, 4062, 1543, 4063, 1544, 4064, 1545,
4065, 1546, 4066, 1547, 4067, 1548, 4068, 1549, 4069, 1550, 4070,
1551, 4071, 1552, 4072, 1553, 4073, 1554, 4074, 1555, 4075, 1556,
4076, 1557, 4077, 1558, 4078, 1559, 4079, 1560, 4080, 1561, 4081,
1562, 4082, 1563, 4083, 1564, 4084, 1565, 4085, 1566, 4086, 1567,
4087, 1568, 4088, 1569, 4089, 1570, 4090, 1571, 4091, 1572, 4092,
1573, 4093, 1574, 4094, 1575, 4095, 1576, 4096, 1577, 4097, 1578,
4098, 1579, 4099, 1580, 4100, 1581, 4101, 1582, 4102, 1583, 4103,
1584, 4104, 1585, 4105, 1586, 4106, 1587, 4107, 1588, 4108, 1589,
4109, 1590, 4110, 1591, 4111, 1592, 4112, 1593, 4113, 1594, 4114,
1595, 4115, 1596, 4116, 1597, 4117, 1598, 4118, 1599, 4119, 1600,
4120, 1601, 4121, 1602, 4122, 1603, 4123, 1604, 4124, 1605, 4125,
1606, 4126, 1607, 4127, 1608, 4128, 1609, 4129, 1610, 4130, 1611,
4131, 1612, 4132, 1613, 4133, 1614, 4134, 1615, 4135, 1616, 4136,
1617, 4137, 1618, 4138, 1619, 4139, 1620, 4140, 1621, 4141, 1622,
4142, 1623, 4143, 1624, 4144, 1625, 4145, 1626, 4146, 1627, 4147,
1628, 4148, 1629, 4149, 1630, 4150, 1631, 4151, 1632, 4152, 1633,
4153, 1634, 4154, 1635, 4155, 1636, 4156, 1637, 4157, 1638, 4158,
1639, 4159, 1640, 4160, 1641, 4161, 1642, 4162, 1643, 4163, 1644,
4164, 1645, 4165, 1646, 4166, 1647, 4167, 1648, 4168, 1649, 4169,
1650, 4170, 1651, 4171, 1652, 4172, 1653, 4173, 1654, 4174, 1655,
4175, 1656, 4176, 1657, 4177, 1658, 4178, 1659, 4179, 1660, 4180,
1661, 4181, 1662, 4182, 1663, 4183, 1664, 4184, 1665, 4185, 1666,
4186, 1667, 4187, 1668, 4188, 1669, 4189, 1670, 4190, 1671, 4191,
1672, 4192, 1673, 4193, 1674, 4194, 1675, 4195, 1676, 4196, 1677,
4197, 1678, 4198, 1679, 4199, 1680, 4200, 1681, 4201, 1682, 4202,
1683, 4203, 1684, 4204, 1685, 4205, 1686, 4206, 1687, 4207, 1688,
4208, 1689, 4209, 1690, 4210, 1691, 4211, 1692, 4212, 1693, 4213,
1694, 4214, 1695, 4215, 1696, 4216, 1697, 4217, 1698, 4218, 1699,
4219, 1700, 4220, 1701, 4221, 1702, 4222, 1703, 4223, 1704, 4224,
1705, 4225, 1706, 4226, 1707, 4227, 1708, 4228, 1709, 4229, 1710,
4230, 1711, 4231, 1712, 4232, 1713, 4233, 1714, 4234, 1715, 4235,
1716, 4236, 1717, 4237, 1718, 4238, 1719, 4239, 1720, 4240, 1721,
4241, 1722, 4242, 1723, 4243, 1724, 4244, 1725, 4245, 1726, 4246,
1727, 4247, 1728, 4248, 1729, 4249, 1730, 4250, 1731, 4251, 1732,
4252, 1733, 4253, 1734, 4254, 1735, 4255, 1736, 4256, 1737, 4257,
1738, 4258, 1739, 4259, 1740, 4260, 1741, 4261, 1742, 4262, 1743,
4263, 1744, 4264, 1745, 4265, 1746, 4266, 1747, 4267, 1748, 4268,
1749, 4269, 1750, 4270, 1751, 4271, 1752, 4272, 1753, 4273, 1754,
4274, 1755, 4275, 1756, 4276, 1757, 4277, 1758, 4278, 1759, 4279,
1760, 4280, 1761, 4281, 1762, 4282, 1763, 4283, 1764, 4284, 1765,
4285, 1766, 4286, 1767, 4287, 1768, 4288, 1769, 4289, 1770, 4290,
1771, 4291, 1772, 4292, 1773, 4293, 1774, 4294, 1775, 4295, 1776,
4296, 1777, 4297, 1778, 4298, 1779, 4299, 1780, 4300, 1781, 4301,
1782, 4302, 1783, 4303, 1784, 4304, 1785, 4305, 1786, 4306, 1787,
4307, 1788, 4308, 1789, 4309, 1790, 4310, 1791, 4311, 1792, 4312,
1793, 4313, 1794, 4314, 1795, 4315, 1796, 4316, 1797, 4317, 1798,
4318, 1799, 4319, 1800, 4320, 1801, 4321, 1802, 4322, 1803, 4323,
1804, 4324, 1805, 4325, 1806, 4326, 1807, 4327, 1808, 4328, 1809,
4329, 1810, 4330, 1811, 4331, 1812, 4332, 1813, 4333, 1814, 4334,
1815, 4335, 1816, 4336, 1817, 4337, 1818, 4338, 1819, 4339, 1820,
4340, 1821, 4341, 1822, 4342, 1823, 4343, 1824, 4344, 1825, 4345,
1826, 4346, 1827, 4347, 1828, 4348, 1829, 4349, 1830, 4350, 1831,
4351, 1832, 4352, 1833, 4353, 1834, 4354, 1835, 4355, 1836, 4356,
1837, 4357, 1838, 4358, 1839, 4359, 1840, 4360, 1841, 4361, 1842,
4362, 1843, 4363, 1844, 4364, 1845, 4365, 1846, 4366, 1847, 4367,
1848, 4368, 1849, 4369, 1850, 4370, 1851, 4371, 1852, 4372, 1853,
4373, 1854, 4374, 1855, 4375, 1856, 4376, 1857, 4377, 1858, 4378,
1859, 4379, 1860, 4380, 1861, 4381, 1862, 4382, 1863, 4383, 1864,
4384, 1865, 4385, 1866, 4386, 1867, 4387, 1868, 4388, 1869, 4389,
1870, 4390, 1871, 4391, 1872, 4392, 1873, 4393, 1874, 4394, 1875,
4395, 1876, 4396, 1877, 4397, 1878, 4398, 1879, 4399, 1880, 4400,
1881, 4401, 1882, 4402, 1883, 4403, 1884, 4404, 1885, 4405, 1886,
4406, 1887, 4407, 1888, 4408, 1889, 4409, 1890, 4410, 1891, 4411,
1892, 4412, 1893, 4413, 1894, 4414, 1895, 4415, 1896, 4416, 1897,
4417, 1898, 4418, 1899, 4419, 1900, 4420, 1901, 4421, 1902, 4422,
1903, 4423, 1904, 4424, 1905, 4425, 1906, 4426, 1907, 4427, 1908,
4428, 1909, 4429, 1910, 4430, 1911, 4431, 1912, 4432, 1913, 4433,
1914, 4434, 1915, 4435, 1916, 4436, 1917, 4437, 1918, 4438, 1919,
4439, 1920, 4440, 1921, 4441, 1922, 4442, 1923, 4443, 1924, 4444,
1925, 4445, 1926, 4446, 1927, 4447, 1928, 4448, 1929, 4449, 1930,
4450, 1931, 4451, 1932, 4452, 1933, 4453, 1934, 4454, 1935, 4455,
1936, 4456, 1937, 4457, 1938, 4458, 1939, 4459, 1940, 4460, 1941,
4461, 1942, 4462, 1943, 4463, 1944, 4464, 1945, 4465, 1946, 4466,
1947, 4467, 1948, 4468, 1949, 4469, 1950, 4470, 1951, 4471, 1952,
4472, 1953, 4473, 1954, 4474, 1955, 4475, 1956, 4476, 1957, 4477,
1958, 4478, 1959, 4479, 1960, 4480, 1961, 4481, 1962, 4482, 1963,
4483, 1964, 4484, 1965, 4485, 1966, 4486, 1967, 4487, 1968, 4488,
1969, 4489, 1970, 4490, 1971, 4491, 1972, 4492, 1973, 4493, 1974,
4494, 1975, 4495, 1976, 4496, 1977, 4497, 1978, 4498, 1979, 4499,
1980, 4500, 1981, 4501, 1982, 4502, 1983, 4503, 1984, 4504, 1985,
4505, 1986, 4506, 1987, 4507, 1988, 4508, 1989, 4509, 1990, 4510,
1991, 4511, 1992, 4512, 1993, 4513, 1994, 4514, 1995, 4515, 1996,
4516, 1997, 4517, 1998, 4518, 1999, 4519, 2000, 4520, 2001, 4521,
2002, 4522, 2003, 4523, 2004, 4524, 2005, 4525, 2006, 4526, 2007,
4527, 2008, 4528, 2009, 4529, 2010, 4530, 2011, 4531, 2012, 4532,
2013, 4533, 2014, 4534, 2015, 4535, 2016, 4536, 2017, 4537, 2018,
4538, 2019, 4539, 2020, 4540, 2021, 4541, 2022, 4542, 2023, 4543,
2024, 4544, 2025, 4545, 2026, 4546, 2027, 4547, 2028, 4548, 2029,
4549, 2030, 4550, 2031, 4551, 2032, 4552, 2033, 4553, 2034, 4554,
2035, 4555, 2036, 4556, 2037, 4557, 2038, 4558, 2039, 4559, 2040,
4560, 2041, 4561, 2042, 4562, 2043, 4563, 2044, 4564, 2045, 4565,
2046, 4566, 2047, 4567, 2048, 4568, 2049, 4569, 2050, 4570, 2051,
4571, 2052, 4572, 2053, 4573, 2054, 4574, 2055, 4575, 2056, 4576,
2057, 4577, 2058, 4578, 2059, 4579, 2060, 4580, 2061, 4581, 2062,
4582, 2063, 4583, 2064, 4584, 2065, 4585, 2066, 4586, 2067, 4587,
2068, 4588, 2069, 4589, 2070, 4590, 2071, 4591, 2072, 4592, 2073,
4593, 2074, 4594, 2075, 4595, 2076, 4596, 2077, 4597, 2078, 4598,
2079, 4599, 2080, 4600, 2081, 4601, 2082, 4602, 2083, 4603, 2084,
4604, 2085, 4605, 2086, 4606, 2087, 4607, 2088, 4608, 2089, 4609,
2090, 4610, 2091, 4611, 2092, 4612, 2093, 4613, 2094, 4614, 2095,
4615, 2096, 4616, 2097, 4617, 2098, 4618, 2099, 4619, 2100, 4620,
2101, 4621, 2102, 4622, 2103, 4623, 2104, 4624, 2105, 4625, 2106,
4626, 2107, 4627, 2108, 4628, 2109, 4629, 2110, 4630, 2111, 4631,
2112, 4632, 2113, 4633, 2114, 4634, 2115, 4635, 2116, 4636, 2117,
4637, 2118, 4638, 2119, 4639, 2120, 4640, 2121, 4641, 2122, 4642,
2123, 4643, 2124, 4644, 2125, 4645, 2126, 4646, 2127, 4647, 2128,
4648, 2129, 4649, 2130, 4650, 2131, 4651, 2132, 4652, 2133, 4653,
2134, 4654, 2135, 4655, 2136, 4656, 2137, 4657, 2138, 4658, 2139,
4659, 2140, 4660, 2141, 4661, 2142, 4662, 2143, 4663, 2144, 4664,
2145, 4665, 2146, 4666, 2147, 4667, 2148, 4668, 2149, 4669, 2150,
4670, 2151, 4671, 2152, 4672, 2153, 4673, 2154, 4674, 2155, 4675,
2156, 4676, 2157, 4677, 2158, 4678, 2159, 4679, 2160, 4680, 2161,
4681, 2162, 4682, 2163, 4683, 2164, 4684, 2165, 4685, 2166, 4686,
2167, 4687, 2168, 4688, 2169, 4689, 2170, 4690, 2171, 4691, 2172,
4692, 2173, 4693, 2174, 4694, 2175, 4695, 2176, 4696, 2177, 4697,
2178, 4698, 2179, 4699, 2180, 4700, 2181, 4701, 2182, 4702, 2183,
4703, 2184, 4704, 2185, 4705, 2186, 4706, 2187, 4707, 2188, 4708,
2189, 4709, 2190, 4710, 2191, 4711, 2192, 4712, 2193, 4713, 2194,
4714, 2195, 4715, 2196, 4716, 2197, 4717, 2198, 4718, 2199, 4719,
2200, 4720, 2201, 4721, 2202, 4722, 2203, 4723, 2204, 4724, 2205,
4725, 2206, 4726, 2207, 4727, 2208, 4728, 2209, 4729, 2210, 4730,
2211, 4731, 2212, 4732, 2213, 4733, 2214, 4734, 2215, 4735, 2216,
4736, 2217, 4737, 2218, 4738, 2219, 4739, 2220, 4740, 2221, 4741,
2222, 4742, 2223, 4743, 2224, 4744, 2225, 4745, 2226, 4746, 2227,
4747, 2228, 4748, 2229, 4749, 2230, 4750, 2231, 4751, 2232, 4752,
2233, 4753, 2234, 4754, 2235, 4755, 2236, 4756, 2237, 4757, 2238,
4758, 2239, 4759, 2240, 4760, 2241, 4761, 2242, 4762, 2243, 4763,
2244, 4764, 2245, 4765, 2246, 4766, 2247, 4767, 2248, 4768, 2249,
4769, 2250, 4770, 2251, 4771, 2252, 4772, 2253, 4773, 2254, 4774,
2255, 4775, 2256, 4776, 2257, 4777, 2258, 4778, 2259, 4779, 2260,
4780, 2261, 4781, 2262, 4782, 2263, 4783, 2264, 4784, 2265, 4785,
2266, 4786, 2267, 4787, 2268, 4788, 2269, 4789, 2270, 4790, 2271,
4791, 2272, 4792, 2273, 4793, 2274, 4794, 2275, 4795, 2276, 4796,
2277, 4797, 2278, 4798, 2279, 4799, 2280, 4800, 2281, 4801, 2282,
4802, 2283, 4803, 2284, 4804, 2285, 4805, 2286, 4806, 2287, 4807,
2288, 4808, 2289, 4809, 2290, 4810, 2291, 4811, 2292, 4812, 2293,
4813, 2294, 4814, 2295, 4815, 2296, 4816, 2297, 4817, 2298, 4818,
2299, 4819, 2300, 4820, 2301, 4821, 2302, 4822, 2303, 4823, 2304,
4824, 2305, 4825, 2306, 4826, 2307, 4827, 2308, 4828, 2309, 4829,
2310, 4830, 2311, 4831, 2312, 4832, 2313, 4833, 2314, 4834, 2315,
4835, 2316, 4836, 2317, 4837, 2318, 4838, 2319, 4839, 2320, 4840,
2321, 4841, 2322, 4842, 2323, 4843, 2324, 4844, 2325, 4845, 2326,
4846, 2327, 4847, 2328, 4848, 2329, 4849, 2330, 4850, 2331, 4851,
2332, 4852, 2333, 4853, 2334, 4854, 2335, 4855, 2336, 4856, 2337,
4857, 2338, 4858, 2339, 4859, 2340, 4860, 2341, 4861, 2342, 4862,
2343, 4863, 2344, 4864, 2345, 4865, 2346, 4866, 2347, 4867, 2348,
4868, 2349, 4869, 2350, 4870, 2351, 4871, 2352, 4872, 2353, 4873,
2354, 4874, 2355, 4875, 2356, 4876, 2357, 4877, 2358, 4878, 2359,
4879, 2360, 4880, 2361, 4881, 2362, 4882, 2363, 4883, 2364, 4884,
2365, 4885, 2366, 4886, 2367, 4887, 2368, 4888, 2369, 4889, 2370,
4890, 2371, 4891, 2372, 4892, 2373, 4893, 2374, 4894, 2375, 4895,
2376, 4896, 2377, 4897, 2378, 4898, 2379, 4899, 2380, 4900, 2381,
4901, 2382, 4902, 2383, 4903, 2384, 4904, 2385, 4905, 2386, 4906,
2387, 4907, 2388, 4908, 2389, 4909, 2390, 4910, 2391, 4911, 2392,
4912, 2393, 4913, 2394, 4914, 2395, 4915, 2396, 4916, 2397, 4917,
2398, 4918, 2399, 4919, 2400, 4920, 2401, 4921, 2402, 4922, 2403,
4923, 2404, 4924, 2405, 4925, 2406, 4926, 2407, 4927, 2408, 4928,
2409, 4929, 2410, 4930, 2411, 4931, 2412, 4932, 2413, 4933, 2414,
4934, 2415, 4935, 2416, 4936, 2417, 4937, 2418, 4938, 2419, 4939,
2420, 4940, 2421, 4941, 2422, 4942, 2423, 4943, 2424, 4944, 2425,
4945, 2426, 4946, 2427, 4947, 2428, 4948, 2429, 4949, 2430, 4950,
2431, 4951, 2432, 4952, 2433, 4953, 2434, 4954, 2435, 4955, 2436,
4956, 2437, 4957, 2438, 4958, 2439, 4959, 2440, 4960, 2441, 4961,
2442, 4962, 2443, 4963, 2444, 4964, 2445, 4965, 2446, 4966, 2447,
4967, 2448, 4968, 2449, 4969, 2450, 4970, 2451, 4971, 2452, 4972,
2453, 4973, 2454, 4974, 2455, 4975, 2456, 4976, 2457, 4977, 2458,
4978, 2459, 4979, 2460, 4980, 2461, 4981, 2462, 4982, 2463, 4983,
2464, 4984, 2465, 4985, 2466, 4986, 2467, 4987, 2468, 4988, 2469,
4989, 2470, 4990, 2471, 4991, 2472, 4992, 2473, 4993, 2474, 4994,
2475, 4995, 2476, 4996, 2477, 4997, 2478, 4998, 2479, 4999, 2480,
5000, 2481, 5001, 2482, 5002, 2483, 5003, 2484, 5004, 2485, 5005,
2486, 5006, 2487, 5007, 2488, 5008, 2489, 5009, 2490, 5010, 2491,
5011, 2492, 5012, 2493, 5013, 2494, 5014, 2495, 5015, 2496, 5016,
2497, 5017, 2498, 5018, 2499, 5019, 2500, 5020, 2501, 5021, 2502,
5022, 2503, 5023, 2504, 5024, 2505, 5025, 2506, 5026, 2507, 5027,
2508, 5028, 2509, 5029, 2510, 5030, 2511, 5031, 2512, 5032, 2513,
5033, 2514, 5034, 2515, 5035, 2516, 5036, 2517, 5037, 2518, 5038,
2519, 5039}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/transpose_test_utils.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/transpose_test_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
82311f9f-96cf-4304-b0ea-6fd681e6896c | cpp | tensorflow/tensorflow | cpu_backend_threadpool | tensorflow/lite/kernels/cpu_backend_threadpool.h | tensorflow/lite/kernels/cpu_backend_threadpool_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_CPU_BACKEND_THREADPOOL_H_
#define TENSORFLOW_LITE_KERNELS_CPU_BACKEND_THREADPOOL_H_
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#ifdef TFLITE_WITH_RUY
#include "ruy/context.h"
#include "ruy/thread_pool.h"
#else
#include "public/gemmlowp.h"
#endif
namespace tflite {
namespace cpu_backend_threadpool {
#ifdef TFLITE_WITH_RUY
using Task = ruy::Task;
template <typename TaskType>
void Execute(int tasks_count, TaskType* tasks,
CpuBackendContext* cpu_backend_context) {
TFLITE_DCHECK_LE(tasks_count, cpu_backend_context->max_num_threads());
cpu_backend_context->ruy_context()->mutable_thread_pool()->Execute(
tasks_count, tasks);
}
#else
using Task = gemmlowp::Task;
template <typename TaskType>
void Execute(int tasks_count, TaskType* tasks,
CpuBackendContext* cpu_backend_context) {
TFLITE_DCHECK_LE(tasks_count, cpu_backend_context->max_num_threads());
cpu_backend_context->gemmlowp_context()->workers_pool()->Execute(tasks_count,
tasks);
}
#endif
}
}
#endif | #include "tensorflow/lite/kernels/cpu_backend_threadpool.h"
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/cpu_backend_context.h"
namespace tflite {
namespace {
class TestGenerateArrayOfIncrementingIntsTask
: public cpu_backend_threadpool::Task {
public:
TestGenerateArrayOfIncrementingIntsTask(int* buffer, int start, int end)
: buffer_(buffer), start_(start), end_(end) {}
void Run() override {
for (int i = start_; i < end_; i++) {
buffer_[i] = i;
}
}
private:
int* buffer_;
int start_;
int end_;
};
void TestGenerateArrayOfIncrementingInts(int num_threads, int size) {
std::vector<int> buffer(size);
std::vector<TestGenerateArrayOfIncrementingIntsTask> tasks;
int rough_size_per_thread = size / num_threads;
int start = 0;
for (int thread = 0; thread < num_threads; thread++) {
int end = start + rough_size_per_thread;
if (thread == num_threads - 1) {
end = size;
}
tasks.emplace_back(buffer.data(), start, end);
start = end;
}
ASSERT_EQ(num_threads, tasks.size());
CpuBackendContext context;
context.SetMaxNumThreads(num_threads);
cpu_backend_threadpool::Execute(tasks.size(), tasks.data(), &context);
for (int i = 0; i < size; i++) {
ASSERT_EQ(buffer[i], i);
}
}
TEST(CpuBackendThreadpoolTest, OneThreadSize100) {
TestGenerateArrayOfIncrementingInts(1, 100);
}
TEST(CpuBackendThreadpoolTest, ThreeThreadsSize1000000) {
TestGenerateArrayOfIncrementingInts(3, 1000000);
}
TEST(CpuBackendThreadpoolTest, TenThreadsSize1234567) {
TestGenerateArrayOfIncrementingInts(10, 1234567);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/cpu_backend_threadpool.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/cpu_backend_threadpool_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fc8bfaa4-1e18-4a7e-9928-d841f1cf4f11 | cpp | tensorflow/tensorflow | stablehlo_reduce_window_test_util | tensorflow/lite/kernels/stablehlo_reduce_window_test_util.h | tensorflow/lite/kernels/stablehlo_reduce_window_test_util_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_STABLEHLO_REDUCE_WINDOW_TEST_UTIL_H_
#define TENSORFLOW_LITE_KERNELS_STABLEHLO_REDUCE_WINDOW_TEST_UTIL_H_
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <initializer_list>
#include <numeric>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
namespace tflite {
namespace reduce_window {
namespace reference {
constexpr int kMaxDims = 6;
template <class T>
struct Tensor {
std::vector<int64_t> shape;
std::vector<T> data;
static Tensor<T> FromShape(std::vector<int64_t> shape,
const T init_value = 0) {
Tensor tensor{std::move(shape)};
tensor.data.resize(tensor.size(), init_value);
return tensor;
}
template <class I>
static Tensor<T> iota(std::initializer_list<I> shape) {
Tensor<T> tensor;
tensor.shape.assign(shape.begin(), shape.end());
tensor.data.resize(absl::c_accumulate(shape, 1, std::multiplies<>()));
absl::c_iota(tensor.data, 1);
return tensor;
}
int64_t size() const {
return absl::c_accumulate(shape, 1, std::multiplies<>());
}
std::vector<int64_t> Strides() const {
std::vector<int64_t> strides(kMaxDims, 0);
if (!shape.empty()) {
strides[shape.size() - 1] = 1;
for (size_t i = shape.size() - 1; i > 0; --i) {
strides[i - 1] = shape[i] * strides[i];
}
}
return strides;
}
};
inline std::vector<int64_t> ExtendToMaxDim(std::vector<int64_t> vec,
int64_t val = 0) {
vec.resize(kMaxDims, val);
return vec;
}
inline std::vector<int64_t> DilateShape(std::vector<int64_t> shape,
const std::vector<int64_t> dilations) {
for (size_t i = 0; i < shape.size(); ++i) {
shape[i] = (shape[i] - 1) * dilations[i] + 1;
}
if (absl::c_any_of(shape, [](auto s) { return s <= 0; })) {
absl::c_fill(shape, 0);
}
return shape;
}
template <class T>
Tensor<T> Dilate(const Tensor<T>& input, const std::vector<int64_t>& dilations,
const T padding_value) {
Tensor<T> output =
Tensor<T>::FromShape(DilateShape(input.shape, dilations), padding_value);
if (absl::c_all_of(output.shape, [](auto s) { return s == 0; })) {
return output;
}
const std::vector<int64_t> strides = input.Strides();
const std::vector<int64_t> output_strides = output.Strides();
const std::vector<int64_t> safe_dilations = ExtendToMaxDim(dilations);
const std::vector<int64_t> safe_input_shape = ExtendToMaxDim(input.shape);
int a = 0;
do {
int b = 0;
do {
int c = 0;
do {
int d = 0;
do {
int e = 0;
do {
int f = 0;
do {
const int i_idx = a * strides[0] + b * strides[1] +
c * strides[2] + d * strides[3] +
e * strides[4] + f * strides[5];
const int o_idx = a * safe_dilations[0] * output_strides[0] +
b * safe_dilations[1] * output_strides[1] +
c * safe_dilations[2] * output_strides[2] +
d * safe_dilations[3] * output_strides[3] +
e * safe_dilations[4] * output_strides[4] +
f * safe_dilations[5] * output_strides[5];
output.data[o_idx] = input.data[i_idx];
} while (++f < safe_input_shape[5]);
} while (++e < safe_input_shape[4]);
} while (++d < safe_input_shape[3]);
} while (++c < safe_input_shape[2]);
} while (++b < safe_input_shape[1]);
} while (++a < safe_input_shape[0]);
return output;
}
inline std::vector<int64_t> PadCropShape(std::vector<int64_t> shape,
const std::vector<int64_t> padding) {
for (size_t i = 0; i < shape.size(); ++i) {
shape[i] = shape[i] + padding[2 * i] + padding[2 * i + 1];
}
if (absl::c_any_of(shape, [](auto s) { return s <= 0; })) {
absl::c_fill(shape, 0);
}
return shape;
}
template <class T>
Tensor<T> Pad(const Tensor<T>& input, const std::vector<int64_t>& padding,
const T padding_value) {
std::vector<int64_t> safe_padding(kMaxDims * 2, 0);
absl::c_transform(padding, safe_padding.begin(),
[](int64_t p) { return std::max<int64_t>(p, 0); });
Tensor<T> output = Tensor<T>::FromShape(
PadCropShape(input.shape, safe_padding), padding_value);
if (absl::c_all_of(output.shape, [](auto s) { return s == 0; })) {
return output;
}
const std::vector<int64_t> strides = input.Strides();
const std::vector<int64_t> output_strides = output.Strides();
const std::vector<int64_t> safe_input_shape = ExtendToMaxDim(input.shape);
int a = 0;
do {
int b = 0;
do {
int c = 0;
do {
int d = 0;
do {
int e = 0;
do {
int f = 0;
do {
const int i_idx = a * strides[0] + b * strides[1] +
c * strides[2] + d * strides[3] +
e * strides[4] + f * strides[5];
const int o_idx = (a + safe_padding[0]) * output_strides[0] +
(b + safe_padding[2]) * output_strides[1] +
(c + safe_padding[4]) * output_strides[2] +
(d + safe_padding[6]) * output_strides[3] +
(e + safe_padding[8]) * output_strides[4] +
(f + safe_padding[10]) * output_strides[5];
output.data[o_idx] = input.data[i_idx];
} while (++f < safe_input_shape[5]);
} while (++e < safe_input_shape[4]);
} while (++d < safe_input_shape[3]);
} while (++c < safe_input_shape[2]);
} while (++b < safe_input_shape[1]);
} while (++a < safe_input_shape[0]);
return output;
}
template <class T>
Tensor<T> Crop(const Tensor<T>& input, const std::vector<int64_t>& cropping) {
std::vector<int64_t> safe_cropping(kMaxDims * 2, 0);
absl::c_transform(cropping, safe_cropping.begin(),
[](int64_t p) { return std::min<int64_t>(p, 0); });
Tensor<T> output =
Tensor<T>::FromShape(PadCropShape(input.shape, safe_cropping));
if (absl::c_all_of(output.shape, [](auto s) { return s == 0; })) {
return output;
}
const std::vector<int64_t> strides = input.Strides();
const std::vector<int64_t> output_strides = output.Strides();
const std::vector<int64_t> safe_output_shape = ExtendToMaxDim(output.shape);
int a = 0;
do {
int b = 0;
do {
int c = 0;
do {
int d = 0;
do {
int e = 0;
do {
int f = 0;
do {
const int i_idx = (a - safe_cropping[0]) * strides[0] +
(b - safe_cropping[2]) * strides[1] +
(c - safe_cropping[4]) * strides[2] +
(d - safe_cropping[6]) * strides[3] +
(e - safe_cropping[8]) * strides[4] +
(f - safe_cropping[10]) * strides[5];
const int o_idx = a * output_strides[0] + b * output_strides[1] +
c * output_strides[2] + d * output_strides[3] +
e * output_strides[4] + f * output_strides[5];
output.data[o_idx] = input.data[i_idx];
} while (++f < safe_output_shape[5]);
} while (++e < safe_output_shape[4]);
} while (++d < safe_output_shape[3]);
} while (++c < safe_output_shape[2]);
} while (++b < safe_output_shape[1]);
} while (++a < safe_output_shape[0]);
return output;
}
template <class T>
Tensor<T> WindowCopy(const Tensor<T>& input,
const std::vector<int64_t>& window_dimensions,
const std::vector<int64_t>& window_dilations,
const std::vector<int64_t>& window_offset) {
Tensor<T> output = Tensor<T>::FromShape(window_dimensions);
const std::vector<int64_t> safe_window_dimensions =
ExtendToMaxDim(window_dimensions);
const std::vector<int64_t> safe_window_dilations =
ExtendToMaxDim(window_dilations, 1);
const std::vector<int64_t> safe_window_offset = ExtendToMaxDim(window_offset);
const std::vector<int64_t> strides = input.Strides();
const std::vector<int64_t> output_strides = output.Strides();
int a = 0;
do {
int b = 0;
do {
int c = 0;
do {
int d = 0;
do {
int e = 0;
do {
int f = 0;
do {
const int i_idx =
(a * safe_window_dilations[0] + safe_window_offset[0]) *
strides[0] +
(b * safe_window_dilations[1] + safe_window_offset[1]) *
strides[1] +
(c * safe_window_dilations[2] + safe_window_offset[2]) *
strides[2] +
(d * safe_window_dilations[3] + safe_window_offset[3]) *
strides[3] +
(e * safe_window_dilations[4] + safe_window_offset[4]) *
strides[4] +
(f * safe_window_dilations[5] + safe_window_offset[5]) *
strides[5];
const int o_idx = a * output_strides[0] + b * output_strides[1] +
c * output_strides[2] + d * output_strides[3] +
e * output_strides[4] + f * output_strides[5];
output.data[o_idx] = input.data[i_idx];
} while (++f < safe_window_dimensions[5]);
} while (++e < safe_window_dimensions[4]);
} while (++d < safe_window_dimensions[3]);
} while (++c < safe_window_dimensions[2]);
} while (++b < safe_window_dimensions[1]);
} while (++a < safe_window_dimensions[0]);
return output;
}
inline std::vector<int64_t> ReduceWindowShape(
std::vector<int64_t> shape, const std::vector<int64_t>& base_dilations,
const std::vector<int64_t>& padding,
const std::vector<int64_t>& window_dimensions,
const std::vector<int64_t>& window_dilations,
const std::vector<int64_t>& window_strides) {
const std::vector<int64_t> base_shape =
PadCropShape(DilateShape(shape, base_dilations), padding);
const std::vector<int64_t> dilated_window_dimensions =
DilateShape(window_dimensions, window_dilations);
shape.assign(base_shape.size(), 0);
for (int i = 0; i < base_shape.size(); ++i) {
if (base_shape[i] >= dilated_window_dimensions[i]) {
shape[i] =
(base_shape[i] - dilated_window_dimensions[i]) / window_strides[i] +
1;
}
}
return shape;
}
template <class T, class F>
Tensor<T> ReduceWindow(const Tensor<T>& input,
const std::vector<int64_t>& base_dilations,
const std::vector<int64_t>& padding, const T& init_value,
const std::vector<int64_t>& window_dimensions,
const std::vector<int64_t>& window_dilations,
const std::vector<int64_t>& window_strides, F&& body) {
Tensor<T> output = Tensor<T>::FromShape(
ReduceWindowShape(input.shape, base_dilations, padding, window_dimensions,
window_dilations, window_strides),
init_value);
if (output.data.empty()) {
return output;
}
const std::vector<int64_t> safe_output_shape = ExtendToMaxDim(output.shape);
const std::vector<int64_t> safe_window_strides =
ExtendToMaxDim(window_strides);
const std::vector<int64_t> output_strides = output.Strides();
const Tensor<T> dilated = Dilate<T>(input, base_dilations, init_value);
const Tensor<T> padded = Pad<T>(dilated, padding, init_value);
const Tensor<T> base = Crop<T>(padded, padding);
std::vector<int64_t> output_offsets(6, 0);
std::vector<int64_t> window_offsets(6, 0);
do {
output_offsets[1] = 0;
window_offsets[1] = 0;
do {
output_offsets[2] = 0;
window_offsets[2] = 0;
do {
output_offsets[3] = 0;
window_offsets[3] = 0;
do {
output_offsets[4] = 0;
window_offsets[4] = 0;
do {
output_offsets[5] = 0;
window_offsets[5] = 0;
do {
const int64_t o_idx = output_offsets[0] * output_strides[0] +
output_offsets[1] * output_strides[1] +
output_offsets[2] * output_strides[2] +
output_offsets[3] * output_strides[3] +
output_offsets[4] * output_strides[4] +
output_offsets[5] * output_strides[5];
const Tensor<T> window = WindowCopy(
base, window_dimensions, window_dilations, window_offsets);
if (window.data.empty()) {
output.data[o_idx] = init_value;
} else {
output.data[o_idx] = std::accumulate(
window.data.begin(), window.data.end(), init_value, body);
}
window_offsets[5] += safe_window_strides[5];
} while (++output_offsets[5] < safe_output_shape[5]);
window_offsets[4] += safe_window_strides[4];
} while (++output_offsets[4] < safe_output_shape[4]);
window_offsets[3] += safe_window_strides[3];
} while (++output_offsets[3] < safe_output_shape[3]);
window_offsets[2] += safe_window_strides[2];
} while (++output_offsets[2] < safe_output_shape[2]);
window_offsets[1] += safe_window_strides[1];
} while (++output_offsets[1] < safe_output_shape[1]);
window_offsets[0] += safe_window_strides[0];
} while (++output_offsets[0] < safe_output_shape[0]);
return output;
}
}
}
}
#endif | #include "tensorflow/lite/kernels/stablehlo_reduce_window_test_util.h"
#include <functional>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite::reduce_window::reference {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
TEST(ReferenceTest, DilateWorks) {
reference::Tensor<int> input = reference::Tensor<int>::iota({3, 3});
reference::Tensor<int> output =
reference::Dilate(input, {2, 3}, -1);
EXPECT_THAT(output.data, ElementsAreArray({
1, -1, -1, 2, -1, -1, 3,
-1, -1, -1, -1, -1, -1, -1,
4, -1, -1, 5, -1, -1, 6,
-1, -1, -1, -1, -1, -1, -1,
7, -1, -1, 8, -1, -1, 9
}));
}
TEST(ReferenceTest, PadWorks) {
reference::Tensor<int> input = reference::Tensor<int>::iota({3, 3});
reference::Tensor<int> output =
reference::Pad(input, {1, 2, 3, 4}, -1);
EXPECT_THAT(output.data,
ElementsAreArray({
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, 1, 2, 3, -1, -1, -1, -1,
-1, -1, -1, 4, 5, 6, -1, -1, -1, -1,
-1, -1, -1, 7, 8, 9, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
}));
}
TEST(ReferenceTest, PadIgnoresNegativeValues) {
reference::Tensor<int> input = reference::Tensor<int>::iota({3, 3});
reference::Tensor<int> output =
reference::Pad(input, {-1, -1, -1, -1}, -1);
EXPECT_THAT(output.data, ElementsAreArray({1, 2, 3, 4, 5, 6, 7, 8, 9}));
}
TEST(ReferenceTest, CropWorks) {
reference::Tensor<int> input =
reference::Tensor<int>::iota({6, 10});
reference::Tensor<int> output =
reference::Crop(input, {-4, -1, -2, -3});
EXPECT_THAT(output.data, ElementsAreArray({43, 44, 45, 46, 47}));
}
TEST(ReferenceTest, CropIgnoresPositiveValues) {
reference::Tensor<int> input = reference::Tensor<int>::iota({3, 3});
reference::Tensor<int> output =
reference::Crop(input, {0, 0, 0, 0});
EXPECT_THAT(output.data, ElementsAreArray({1, 2, 3, 4, 5, 6, 7, 8, 9}));
}
TEST(ReferenceTest, WindowCopyWorks) {
reference::Tensor<int> input = reference::Tensor<int>::iota({6, 4});
EXPECT_THAT(reference::WindowCopy(input, {2, 2},
{2, 2},
{2, 1})
.data,
ElementsAreArray({10, 12, 18, 20}));
}
TEST(ReferenceTest, RandomJaxReference0) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{1, -1, 0, 0},
0,
{1, 2},
{2, 2},
{1, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(19, 8));
EXPECT_THAT(
res.data,
ElementsAreArray(
{0, 0, 0, 0, 0, 0, 0, 0, 4, 6, 8, 10, 12, 14, 16, 18,
0, 0, 0, 0, 0, 0, 0, 0, 24, 26, 28, 30, 32, 34, 36, 38,
0, 0, 0, 0, 0, 0, 0, 0, 44, 46, 48, 50, 52, 54, 56, 58,
0, 0, 0, 0, 0, 0, 0, 0, 64, 66, 68, 70, 72, 74, 76, 78,
0, 0, 0, 0, 0, 0, 0, 0, 84, 86, 88, 90, 92, 94, 96, 98,
0, 0, 0, 0, 0, 0, 0, 0, 104, 106, 108, 110, 112, 114, 116, 118,
0, 0, 0, 0, 0, 0, 0, 0, 124, 126, 128, 130, 132, 134, 136, 138,
0, 0, 0, 0, 0, 0, 0, 0, 144, 146, 148, 150, 152, 154, 156, 158,
0, 0, 0, 0, 0, 0, 0, 0, 164, 166, 168, 170, 172, 174, 176, 178,
0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference1) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{2, -1, 1, 0},
0,
{1, 2},
{1, 2},
{2, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(6, 18));
EXPECT_THAT(
res.data,
ElementsAreArray({0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 3, 0, 5, 0, 7, 0, 9, 0, 11,
0, 13, 0, 15, 0, 17, 0, 19, 0, 43, 0, 45, 0, 47,
0, 49, 0, 51, 0, 53, 0, 55, 0, 57, 0, 59, 0, 83,
0, 85, 0, 87, 0, 89, 0, 91, 0, 93, 0, 95, 0, 97,
0, 99, 0, 123, 0, 125, 0, 127, 0, 129, 0, 131, 0, 133,
0, 135, 0, 137, 0, 139, 0, 163, 0, 165, 0, 167, 0, 169,
0, 171, 0, 173, 0, 175, 0, 177, 0, 179}));
}
TEST(ReferenceTest, RandomJaxReference2) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{2, -2, -2, 2},
-2147483647,
{2, 2},
{2, 2},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(8, 4));
EXPECT_THAT(res.data,
ElementsAreArray({5, 7, 9, 9, 15, 17, 19, 19, 25, 27, 29,
29, 35, 37, 39, 39, 45, 47, 49, 49, 55, 57,
59, 59, 65, 67, 69, 69, 75, 77, 79, 79}));
}
TEST(ReferenceTest, RandomJaxReference3) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{0, 1, -1, 1},
-2147483647,
{1, 1},
{1, 2},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(6, 19));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, 2, -2147483647, 3, -2147483647,
4, -2147483647, 5, -2147483647, 6,
-2147483647, 7, -2147483647, 8, -2147483647,
9, -2147483647, 10, -2147483647, -2147483647,
22, -2147483647, 23, -2147483647, 24,
-2147483647, 25, -2147483647, 26, -2147483647,
27, -2147483647, 28, -2147483647, 29,
-2147483647, 30, -2147483647, -2147483647, 42,
-2147483647, 43, -2147483647, 44, -2147483647,
45, -2147483647, 46, -2147483647, 47,
-2147483647, 48, -2147483647, 49, -2147483647,
50, -2147483647, -2147483647, 62, -2147483647,
63, -2147483647, 64, -2147483647, 65,
-2147483647, 66, -2147483647, 67, -2147483647,
68, -2147483647, 69, -2147483647, 70,
-2147483647, -2147483647, 82, -2147483647, 83,
-2147483647, 84, -2147483647, 85, -2147483647,
86, -2147483647, 87, -2147483647, 88,
-2147483647, 89, -2147483647, 90, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference4) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-2, -2, -1, -2},
0,
{1, 2},
{1, 2},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(3, 3));
EXPECT_THAT(res.data,
ElementsAreArray({46, 50, 54, 86, 90, 94, 126, 130, 134}));
}
TEST(ReferenceTest, RandomJaxReference5) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{1, 2, 1, 1},
1,
{2, 1},
{2, 2},
{1, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(11, 6));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 12, 14, 16, 18, 20, 1, 44, 96, 156, 224,
300, 1, 384, 476, 576, 684, 800, 1, 924, 1056, 1196,
1344, 1500, 1, 1664, 1836, 2016, 2204, 2400, 1, 2604, 2816,
3036, 3264, 3500, 1, 3744, 3996, 4256, 4524, 4800, 1, 5084,
5376, 5676, 5984, 6300, 1, 6624, 6956, 7296, 7644, 8000, 1,
82, 84, 86, 88, 90, 1, 92, 94, 96, 98, 100}));
}
TEST(ReferenceTest, RandomJaxReference6) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{2, -1, 0, -2},
-2147483647,
{2, 1},
{2, 1},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(9, 17));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, -2147483647, 2, -2147483647, 3,
-2147483647, 4, -2147483647, 5, -2147483647,
6, -2147483647, 7, -2147483647, 8,
-2147483647, 9, 11, -2147483647, 12,
-2147483647, 13, -2147483647, 14, -2147483647,
15, -2147483647, 16, -2147483647, 17,
-2147483647, 18, -2147483647, 19, 21,
-2147483647, 22, -2147483647, 23, -2147483647,
24, -2147483647, 25, -2147483647, 26,
-2147483647, 27, -2147483647, 28, -2147483647,
29, 31, -2147483647, 32, -2147483647,
33, -2147483647, 34, -2147483647, 35,
-2147483647, 36, -2147483647, 37, -2147483647,
38, -2147483647, 39, 41, -2147483647,
42, -2147483647, 43, -2147483647, 44,
-2147483647, 45, -2147483647, 46, -2147483647,
47, -2147483647, 48, -2147483647, 49,
51, -2147483647, 52, -2147483647, 53,
-2147483647, 54, -2147483647, 55, -2147483647,
56, -2147483647, 57, -2147483647, 58,
-2147483647, 59, 61, -2147483647, 62,
-2147483647, 63, -2147483647, 64, -2147483647,
65, -2147483647, 66, -2147483647, 67,
-2147483647, 68, -2147483647, 69, 71,
-2147483647, 72, -2147483647, 73, -2147483647,
74, -2147483647, 75, -2147483647, 76,
-2147483647, 77, -2147483647, 78, -2147483647,
79, 81, -2147483647, 82, -2147483647,
83, -2147483647, 84, -2147483647, 85,
-2147483647, 86, -2147483647, 87, -2147483647,
88, -2147483647, 89}));
}
TEST(ReferenceTest, RandomJaxReference7) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-2, -2, 1, 0},
0,
{1, 1},
{1, 1},
{2, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(3, 11));
EXPECT_THAT(res.data,
ElementsAreArray({0, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
0, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
0, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70}));
}
TEST(ReferenceTest, RandomJaxReference8) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{2, 1, -2, -2},
-2147483647,
{1, 2},
{1, 1},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(13, 3));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, 4, 6, 8, 14,
16, 18, 24, 26, 28,
34, 36, 38, 44, 46,
48, 54, 56, 58, 64,
66, 68, 74, 76, 78,
84, 86, 88, 94, 96,
98, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference9) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-1, 2, -2, -2},
-2147483647,
{2, 2},
{2, 1},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(9, 7));
EXPECT_THAT(res.data, ElementsAreArray(
{32, 33, 34, 35, 36, 37, 38, 42, 43, 44, 45, 46, 47,
48, 52, 53, 54, 55, 56, 57, 58, 62, 63, 64, 65, 66,
67, 68, 72, 73, 74, 75, 76, 77, 78, 82, 83, 84, 85,
86, 87, 88, 92, 93, 94, 95, 96, 97, 98, 82, 83, 84,
85, 86, 87, 88, 92, 93, 94, 95, 96, 97, 98}));
}
TEST(ReferenceTest, RandomJaxReference10) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{0, -1, 0, 2},
0,
{2, 2},
{1, 1},
{1, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(17, 10));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 29, 30, 21, 22, 23, 24, 25, 26, 27, 28,
29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
57, 58, 59, 60, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
63, 64, 65, 66, 67, 68, 69, 70, 61, 62, 63, 64, 65, 66, 67, 68,
69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 71, 72, 73, 74,
75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90}));
}
TEST(ReferenceTest, RandomJaxReference11) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{0, 0, 2, 0},
0,
{2, 1},
{2, 1},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(4, 6));
EXPECT_THAT(res.data,
ElementsAreArray({0, 22, 26, 30, 34, 38, 0, 62,
66, 70, 74, 78, 0, 102, 106, 110,
114, 118, 0, 142, 146, 150, 154, 158}));
}
TEST(ReferenceTest, RandomJaxReference12) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{1, -2, 1, -2},
-2147483647,
{1, 1},
{2, 2},
{2, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(9, 5));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference13) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{1, 2, 1, -2},
-2147483647,
{1, 1},
{1, 2},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(13, 5));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, 2, 4, 6, 8,
-2147483647, 12, 14, 16, 18,
-2147483647, 22, 24, 26, 28,
-2147483647, 32, 34, 36, 38,
-2147483647, 42, 44, 46, 48,
-2147483647, 52, 54, 56, 58,
-2147483647, 62, 64, 66, 68,
-2147483647, 72, 74, 76, 78,
-2147483647, 82, 84, 86, 88,
-2147483647, 92, 94, 96, 98,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference14) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{1, 2, 1, -1},
1,
{1, 2},
{2, 1},
{2, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(11, 9));
EXPECT_THAT(res.data,
ElementsAreArray(
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference15) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-2, -2, 1, 2},
2147483646,
{1, 2},
{2, 1},
{2, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(3, 11));
EXPECT_THAT(
res.data,
ElementsAreArray({21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 2147483646,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 2147483646,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference16) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{0, 0, 0, 0},
2147483646,
{2, 1},
{1, 1},
{2, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(5, 19));
EXPECT_THAT(res.data,
ElementsAreArray(
{1, 2147483646, 2, 2147483646, 3,
2147483646, 4, 2147483646, 5, 2147483646,
6, 2147483646, 7, 2147483646, 8,
2147483646, 9, 2147483646, 10, 21,
2147483646, 22, 2147483646, 23, 2147483646,
24, 2147483646, 25, 2147483646, 26,
2147483646, 27, 2147483646, 28, 2147483646,
29, 2147483646, 30, 41, 2147483646,
42, 2147483646, 43, 2147483646, 44,
2147483646, 45, 2147483646, 46, 2147483646,
47, 2147483646, 48, 2147483646, 49,
2147483646, 50, 61, 2147483646, 62,
2147483646, 63, 2147483646, 64, 2147483646,
65, 2147483646, 66, 2147483646, 67,
2147483646, 68, 2147483646, 69, 2147483646,
70, 81, 2147483646, 82, 2147483646,
83, 2147483646, 84, 2147483646, 85,
2147483646, 86, 2147483646, 87, 2147483646,
88, 2147483646, 89, 2147483646, 90}));
}
TEST(ReferenceTest, RandomJaxReference17) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{2, -1, 2, 1},
2147483646,
{2, 2},
{1, 2},
{1, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(10, 20));
EXPECT_THAT(
res.data,
ElementsAreArray(
{2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
1, 2147483646, 1, 2147483646, 2,
2147483646, 3, 2147483646, 4, 2147483646,
5, 2147483646, 6, 2147483646, 7,
2147483646, 8, 2147483646, 9, 2147483646,
1, 2147483646, 1, 2147483646, 2,
2147483646, 3, 2147483646, 4, 2147483646,
5, 2147483646, 6, 2147483646, 7,
2147483646, 8, 2147483646, 9, 2147483646,
11, 2147483646, 11, 2147483646, 12,
2147483646, 13, 2147483646, 14, 2147483646,
15, 2147483646, 16, 2147483646, 17,
2147483646, 18, 2147483646, 19, 2147483646,
21, 2147483646, 21, 2147483646, 22,
2147483646, 23, 2147483646, 24, 2147483646,
25, 2147483646, 26, 2147483646, 27,
2147483646, 28, 2147483646, 29, 2147483646,
31, 2147483646, 31, 2147483646, 32,
2147483646, 33, 2147483646, 34, 2147483646,
35, 2147483646, 36, 2147483646, 37,
2147483646, 38, 2147483646, 39, 2147483646,
41, 2147483646, 41, 2147483646, 42,
2147483646, 43, 2147483646, 44, 2147483646,
45, 2147483646, 46, 2147483646, 47,
2147483646, 48, 2147483646, 49, 2147483646,
51, 2147483646, 51, 2147483646, 52,
2147483646, 53, 2147483646, 54, 2147483646,
55, 2147483646, 56, 2147483646, 57,
2147483646, 58, 2147483646, 59, 2147483646,
61, 2147483646, 61, 2147483646, 62,
2147483646, 63, 2147483646, 64, 2147483646,
65, 2147483646, 66, 2147483646, 67,
2147483646, 68, 2147483646, 69, 2147483646,
71, 2147483646, 71, 2147483646, 72,
2147483646, 73, 2147483646, 74, 2147483646,
75, 2147483646, 76, 2147483646, 77,
2147483646, 78, 2147483646, 79, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference18) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{1, -2, -1, 0},
1,
{1, 1},
{1, 2},
{1, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(9, 18));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 2, 1, 3, 1, 4, 1, 5, 1, 6, 1, 7, 1, 8, 1, 9, 1, 10,
1, 12, 1, 13, 1, 14, 1, 15, 1, 16, 1, 17, 1, 18, 1, 19, 1, 20,
1, 22, 1, 23, 1, 24, 1, 25, 1, 26, 1, 27, 1, 28, 1, 29, 1, 30,
1, 32, 1, 33, 1, 34, 1, 35, 1, 36, 1, 37, 1, 38, 1, 39, 1, 40,
1, 42, 1, 43, 1, 44, 1, 45, 1, 46, 1, 47, 1, 48, 1, 49, 1, 50,
1, 52, 1, 53, 1, 54, 1, 55, 1, 56, 1, 57, 1, 58, 1, 59, 1, 60,
1, 62, 1, 63, 1, 64, 1, 65, 1, 66, 1, 67, 1, 68, 1, 69, 1, 70,
1, 72, 1, 73, 1, 74, 1, 75, 1, 76, 1, 77, 1, 78, 1, 79, 1, 80}));
}
TEST(ReferenceTest, RandomJaxReference19) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{1, 0, 0, -1},
2147483646,
{2, 1},
{1, 1},
{1, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(10, 9));
EXPECT_THAT(res.data, ElementsAreArray(
{1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4,
5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18,
19, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 32, 33,
34, 35, 36, 37, 38, 39, 41, 42, 43, 44, 45, 46, 47,
48, 49, 51, 52, 53, 54, 55, 56, 57, 58, 59, 61, 62,
63, 64, 65, 66, 67, 68, 69, 71, 72, 73, 74, 75, 76,
77, 78, 79, 81, 82, 83, 84, 85, 86, 87, 88, 89}));
}
TEST(ReferenceTest, RandomJaxReference20) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{1, 2, 1, -1},
2147483646,
{2, 2},
{1, 1},
{2, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(11, 5));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 2, 4, 6, 8, 11, 12,
14, 16, 18, 21, 22, 24, 26,
28, 31, 32, 34, 36, 38, 41,
42, 44, 46, 48, 51, 52, 54,
56, 58, 61, 62, 64, 66, 68,
71, 72, 74, 76, 78, 81, 82,
84, 86, 88, 91, 92, 94, 96,
98, 2147483646, 2147483646, 2147483646, 2147483646, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference21) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{1, 0, 1, -1},
0,
{2, 2},
{1, 1},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(5, 9));
EXPECT_THAT(res.data,
ElementsAreArray({1, 2, 3, 4, 5, 6, 7, 8, 9,
32, 34, 36, 38, 40, 42, 44, 46, 48,
72, 74, 76, 78, 80, 82, 84, 86, 88,
112, 114, 116, 118, 120, 122, 124, 126, 128,
152, 154, 156, 158, 160, 162, 164, 166, 168}));
}
TEST(ReferenceTest, RandomJaxReference22) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-2, 2, -2, -2},
-2147483647,
{1, 2},
{1, 2},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(10, 7));
EXPECT_THAT(
res.data,
ElementsAreArray(
{23, 24, 25, 26, 27,
28, 29, 33, 34, 35,
36, 37, 38, 39, 43,
44, 45, 46, 47, 48,
49, 53, 54, 55, 56,
57, 58, 59, 63, 64,
65, 66, 67, 68, 69,
73, 74, 75, 76, 77,
78, 79, 83, 84, 85,
86, 87, 88, 89, 93,
94, 95, 96, 97, 98,
99, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference23) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{2, -2, 2, 0},
0,
{1, 2},
{1, 1},
{1, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(10, 11));
EXPECT_THAT(
res.data,
ElementsAreArray(
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3, 5, 7, 9,
11, 13, 15, 17, 19, 0, 11, 23, 25, 27, 29, 31, 33, 35,
37, 39, 0, 21, 43, 45, 47, 49, 51, 53, 55, 57, 59, 0,
31, 63, 65, 67, 69, 71, 73, 75, 77, 79, 0, 41, 83, 85,
87, 89, 91, 93, 95, 97, 99, 0, 51, 103, 105, 107, 109, 111,
113, 115, 117, 119, 0, 61, 123, 125, 127, 129, 131, 133, 135, 137,
139, 0, 71, 143, 145, 147, 149, 151, 153, 155, 157, 159}));
}
TEST(ReferenceTest, RandomJaxReference24) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{2, 2, -2, -2},
0,
{2, 1},
{2, 2},
{2, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(11, 6));
EXPECT_THAT(
res.data,
ElementsAreArray({3, 4, 5, 6, 7, 8, 16, 18, 20, 22, 24,
26, 36, 38, 40, 42, 44, 46, 56, 58, 60, 62,
64, 66, 76, 78, 80, 82, 84, 86, 96, 98, 100,
102, 104, 106, 116, 118, 120, 122, 124, 126, 136, 138,
140, 142, 144, 146, 156, 158, 160, 162, 164, 166, 176,
178, 180, 182, 184, 186, 93, 94, 95, 96, 97, 98}));
}
TEST(ReferenceTest, RandomJaxReference25) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{2, -1, 2, 2},
1,
{2, 1},
{1, 1},
{2, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(10, 14));
EXPECT_THAT(
res.data,
ElementsAreArray({1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 1,
1, 1, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 1, 1,
1, 1, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 1, 1,
1, 1, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 1, 1,
1, 1, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 1, 1,
1, 1, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 1, 1,
1, 1, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 1, 1,
1, 1, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 1, 1,
1, 1, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference26) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{-1, 1, -1, -2},
-2147483647,
{2, 2},
{2, 2},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(17, 7));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference27) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{1, -2, 2, -2},
0,
{2, 1},
{1, 2},
{1, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(8, 5));
EXPECT_THAT(res.data,
ElementsAreArray({0, 1, 3, 5, 7, 0, 12, 16, 20, 24,
0, 32, 36, 40, 44, 0, 52, 56, 60, 64,
0, 72, 76, 80, 84, 0, 92, 96, 100, 104,
0, 112, 116, 120, 124, 0, 132, 136, 140, 144}));
}
TEST(ReferenceTest, RandomJaxReference28) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-2, -2, 0, -2},
1,
{1, 1},
{2, 1},
{1, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(6, 8));
EXPECT_THAT(res.data, ElementsAreArray(
{21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33, 34,
35, 36, 37, 38, 41, 42, 43, 44, 45, 46, 47, 48,
51, 52, 53, 54, 55, 56, 57, 58, 61, 62, 63, 64,
65, 66, 67, 68, 71, 72, 73, 74, 75, 76, 77, 78}));
}
TEST(ReferenceTest, RandomJaxReference29) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-1, -1, 2, 0},
2147483646,
{1, 1},
{1, 1},
{2, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(4, 21));
EXPECT_THAT(res.data,
ElementsAreArray(
{2147483646, 2147483646, 11, 2147483646, 12,
2147483646, 13, 2147483646, 14, 2147483646,
15, 2147483646, 16, 2147483646, 17,
2147483646, 18, 2147483646, 19, 2147483646,
20, 2147483646, 2147483646, 31, 2147483646,
32, 2147483646, 33, 2147483646, 34,
2147483646, 35, 2147483646, 36, 2147483646,
37, 2147483646, 38, 2147483646, 39,
2147483646, 40, 2147483646, 2147483646, 51,
2147483646, 52, 2147483646, 53, 2147483646,
54, 2147483646, 55, 2147483646, 56,
2147483646, 57, 2147483646, 58, 2147483646,
59, 2147483646, 60, 2147483646, 2147483646,
71, 2147483646, 72, 2147483646, 73,
2147483646, 74, 2147483646, 75, 2147483646,
76, 2147483646, 77, 2147483646, 78,
2147483646, 79, 2147483646, 80}));
}
TEST(ReferenceTest, RandomJaxReference30) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-1, 1, -2, -1},
0,
{1, 1},
{1, 2},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(10, 4));
EXPECT_THAT(res.data,
ElementsAreArray({0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference31) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-2, 1, -1, -2},
-2147483647,
{1, 1},
{2, 2},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(5, 16));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, 22, -2147483647, 23, -2147483647,
24, -2147483647, 25, -2147483647, 26,
-2147483647, 27, -2147483647, 28, -2147483647,
29, -2147483647, 42, -2147483647, 43,
-2147483647, 44, -2147483647, 45, -2147483647,
46, -2147483647, 47, -2147483647, 48,
-2147483647, 49, -2147483647, 62, -2147483647,
63, -2147483647, 64, -2147483647, 65,
-2147483647, 66, -2147483647, 67, -2147483647,
68, -2147483647, 69, -2147483647, 82,
-2147483647, 83, -2147483647, 84, -2147483647,
85, -2147483647, 86, -2147483647, 87,
-2147483647, 88, -2147483647, 89, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference32) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-1, 2, -1, 0},
0,
{2, 1},
{2, 2},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(9, 5));
EXPECT_THAT(res.data,
ElementsAreArray({0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference33) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{1, -1, 2, 1},
-2147483647,
{2, 2},
{2, 2},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(17, 10));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
11, 12, 13, 14, 15,
16, 17, 18, 19, 20,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
21, 22, 23, 24, 25,
26, 27, 28, 29, 30,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
31, 32, 33, 34, 35,
36, 37, 38, 39, 40,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
41, 42, 43, 44, 45,
46, 47, 48, 49, 50,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
51, 52, 53, 54, 55,
56, 57, 58, 59, 60,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
61, 62, 63, 64, 65,
66, 67, 68, 69, 70,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
71, 72, 73, 74, 75,
76, 77, 78, 79, 80,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
81, 82, 83, 84, 85,
86, 87, 88, 89, 90,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference34) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{0, 2, 2, 2},
0,
{1, 2},
{1, 2},
{1, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(12, 12));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 2, 4, 6, 8, 10, 12, 14, 16, 18, 9, 10, 11, 12,
24, 26, 28, 30, 32, 34, 36, 38, 19, 20, 21, 22, 44, 46,
48, 50, 52, 54, 56, 58, 29, 30, 31, 32, 64, 66, 68, 70,
72, 74, 76, 78, 39, 40, 41, 42, 84, 86, 88, 90, 92, 94,
96, 98, 49, 50, 51, 52, 104, 106, 108, 110, 112, 114, 116, 118,
59, 60, 61, 62, 124, 126, 128, 130, 132, 134, 136, 138, 69, 70,
71, 72, 144, 146, 148, 150, 152, 154, 156, 158, 79, 80, 81, 82,
164, 166, 168, 170, 172, 174, 176, 178, 89, 90, 91, 92, 184, 186,
188, 190, 192, 194, 196, 198, 99, 100, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference35) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{1, 2, 1, -1},
0,
{2, 2},
{2, 1},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(6, 9));
EXPECT_THAT(
res.data,
ElementsAreArray({11, 12, 13, 14, 15, 16, 17, 18, 19, 42, 44,
46, 48, 50, 52, 54, 56, 58, 82, 84, 86, 88,
90, 92, 94, 96, 98, 122, 124, 126, 128, 130, 132,
134, 136, 138, 162, 164, 166, 168, 170, 172, 174, 176,
178, 91, 92, 93, 94, 95, 96, 97, 98, 99}));
}
TEST(ReferenceTest, RandomJaxReference36) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{2, 2, 2, 1},
-2147483647,
{2, 1},
{2, 1},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(11, 22));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, 1, -2147483647, 2,
-2147483647, 3, -2147483647, 4, -2147483647,
5, -2147483647, 6, -2147483647, 7,
-2147483647, 8, -2147483647, 9, -2147483647,
10, -2147483647, -2147483647, -2147483647, 11,
-2147483647, 12, -2147483647, 13, -2147483647,
14, -2147483647, 15, -2147483647, 16,
-2147483647, 17, -2147483647, 18, -2147483647,
19, -2147483647, 20, -2147483647, -2147483647,
-2147483647, 21, -2147483647, 22, -2147483647,
23, -2147483647, 24, -2147483647, 25,
-2147483647, 26, -2147483647, 27, -2147483647,
28, -2147483647, 29, -2147483647, 30,
-2147483647, -2147483647, -2147483647, 31, -2147483647,
32, -2147483647, 33, -2147483647, 34,
-2147483647, 35, -2147483647, 36, -2147483647,
37, -2147483647, 38, -2147483647, 39,
-2147483647, 40, -2147483647, -2147483647, -2147483647,
41, -2147483647, 42, -2147483647, 43,
-2147483647, 44, -2147483647, 45, -2147483647,
46, -2147483647, 47, -2147483647, 48,
-2147483647, 49, -2147483647, 50, -2147483647,
-2147483647, -2147483647, 51, -2147483647, 52,
-2147483647, 53, -2147483647, 54, -2147483647,
55, -2147483647, 56, -2147483647, 57,
-2147483647, 58, -2147483647, 59, -2147483647,
60, -2147483647, -2147483647, -2147483647, 61,
-2147483647, 62, -2147483647, 63, -2147483647,
64, -2147483647, 65, -2147483647, 66,
-2147483647, 67, -2147483647, 68, -2147483647,
69, -2147483647, 70, -2147483647, -2147483647,
-2147483647, 71, -2147483647, 72, -2147483647,
73, -2147483647, 74, -2147483647, 75,
-2147483647, 76, -2147483647, 77, -2147483647,
78, -2147483647, 79, -2147483647, 80,
-2147483647, -2147483647, -2147483647, 81, -2147483647,
82, -2147483647, 83, -2147483647, 84,
-2147483647, 85, -2147483647, 86, -2147483647,
87, -2147483647, 88, -2147483647, 89,
-2147483647, 90, -2147483647, -2147483647, -2147483647,
91, -2147483647, 92, -2147483647, 93,
-2147483647, 94, -2147483647, 95, -2147483647,
96, -2147483647, 97, -2147483647, 98,
-2147483647, 99, -2147483647, 100, -2147483647,
-2147483647, -2147483647, 91, -2147483647, 92,
-2147483647, 93, -2147483647, 94, -2147483647,
95, -2147483647, 96, -2147483647, 97,
-2147483647, 98, -2147483647, 99, -2147483647,
100, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference37) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-2, 2, 1, 2},
-2147483647,
{2, 2},
{1, 2},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(18, 6));
EXPECT_THAT(
res.data,
ElementsAreArray(
{12, 14, 16, 18, 20,
20, 22, 24, 26, 28,
30, 30, 22, 24, 26,
28, 30, 30, 32, 34,
36, 38, 40, 40, 32,
34, 36, 38, 40, 40,
42, 44, 46, 48, 50,
50, 42, 44, 46, 48,
50, 50, 52, 54, 56,
58, 60, 60, 52, 54,
56, 58, 60, 60, 62,
64, 66, 68, 70, 70,
62, 64, 66, 68, 70,
70, 72, 74, 76, 78,
80, 80, 72, 74, 76,
78, 80, 80, 82, 84,
86, 88, 90, 90, 82,
84, 86, 88, 90, 90,
92, 94, 96, 98, 100,
100, 92, 94, 96, 98,
100, 100, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference38) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{0, -2, 1, 1},
-2147483647,
{1, 2},
{1, 1},
{1, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(17, 11));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
10, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, 11, 12, 13,
14, 15, 16, 17, 18,
19, 20, 20, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, 21,
22, 23, 24, 25, 26,
27, 28, 29, 30, 30,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, 31, 32, 33, 34,
35, 36, 37, 38, 39,
40, 40, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, 41, 42,
43, 44, 45, 46, 47,
48, 49, 50, 50, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
51, 52, 53, 54, 55,
56, 57, 58, 59, 60,
60, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, 61, 62, 63,
64, 65, 66, 67, 68,
69, 70, 70, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, 71,
72, 73, 74, 75, 76,
77, 78, 79, 80, 80,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, 81, 82, 83, 84,
85, 86, 87, 88, 89,
90, 90}));
}
TEST(ReferenceTest, RandomJaxReference39) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-1, -1, -2, 0},
0,
{2, 1},
{2, 1},
{1, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(15, 8));
EXPECT_THAT(
res.data,
ElementsAreArray(
{0, 0, 0, 0, 0, 0, 0, 0, 36, 38, 40, 42, 44, 46, 48,
50, 0, 0, 0, 0, 0, 0, 0, 0, 56, 58, 60, 62, 64, 66,
68, 70, 0, 0, 0, 0, 0, 0, 0, 0, 76, 78, 80, 82, 84,
86, 88, 90, 0, 0, 0, 0, 0, 0, 0, 0, 96, 98, 100, 102,
104, 106, 108, 110, 0, 0, 0, 0, 0, 0, 0, 0, 116, 118, 120,
122, 124, 126, 128, 130, 0, 0, 0, 0, 0, 0, 0, 0, 136, 138,
140, 142, 144, 146, 148, 150, 0, 0, 0, 0, 0, 0, 0, 0, 156,
158, 160, 162, 164, 166, 168, 170, 0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference40) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{2, -1, -2, 2},
1,
{2, 1},
{1, 1},
{1, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(19, 5));
EXPECT_THAT(
res.data,
ElementsAreArray({1, 1, 1, 1, 1, 3, 5, 7, 9, 1, 3, 5, 7, 9,
1, 13, 15, 17, 19, 1, 13, 15, 17, 19, 1, 23, 25, 27,
29, 1, 23, 25, 27, 29, 1, 33, 35, 37, 39, 1, 33, 35,
37, 39, 1, 43, 45, 47, 49, 1, 43, 45, 47, 49, 1, 53,
55, 57, 59, 1, 53, 55, 57, 59, 1, 63, 65, 67, 69, 1,
63, 65, 67, 69, 1, 73, 75, 77, 79, 1, 73, 75, 77, 79,
1, 83, 85, 87, 89, 1, 83, 85, 87, 89, 1}));
}
TEST(ReferenceTest, RandomJaxReference41) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{-1, 2, -2, 0},
-2147483647,
{2, 2},
{2, 2},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(18, 8));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, 23, 24,
25, 26, 27, 28, 29,
30, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, 33,
34, 35, 36, 37, 38,
39, 40, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
43, 44, 45, 46, 47,
48, 49, 50, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, 53, 54, 55, 56,
57, 58, 59, 60, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, 63, 64, 65,
66, 67, 68, 69, 70,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, 73, 74,
75, 76, 77, 78, 79,
80, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, 83,
84, 85, 86, 87, 88,
89, 90, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
93, 94, 95, 96, 97,
98, 99, 100, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, 93, 94, 95, 96,
97, 98, 99, 100}));
}
TEST(ReferenceTest, RandomJaxReference42) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-2, -1, -1, 1},
1,
{2, 2},
{1, 1},
{1, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(15, 9));
EXPECT_THAT(
res.data,
ElementsAreArray(
{156, 182, 210, 240, 272, 306, 342, 380, 20, 506, 552,
600, 650, 702, 756, 812, 870, 30, 506, 552, 600, 650,
702, 756, 812, 870, 30, 1056, 1122, 1190, 1260, 1332, 1406,
1482, 1560, 40, 1056, 1122, 1190, 1260, 1332, 1406, 1482, 1560,
40, 1806, 1892, 1980, 2070, 2162, 2256, 2352, 2450, 50, 1806,
1892, 1980, 2070, 2162, 2256, 2352, 2450, 50, 2756, 2862, 2970,
3080, 3192, 3306, 3422, 3540, 60, 2756, 2862, 2970, 3080, 3192,
3306, 3422, 3540, 60, 3906, 4032, 4160, 4290, 4422, 4556, 4692,
4830, 70, 3906, 4032, 4160, 4290, 4422, 4556, 4692, 4830, 70,
5256, 5402, 5550, 5700, 5852, 6006, 6162, 6320, 80, 5256, 5402,
5550, 5700, 5852, 6006, 6162, 6320, 80, 6806, 6972, 7140, 7310,
7482, 7656, 7832, 8010, 90, 6806, 6972, 7140, 7310, 7482, 7656,
7832, 8010, 90}));
}
TEST(ReferenceTest, RandomJaxReference43) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{1, 0, -2, 1},
-2147483647,
{2, 1},
{1, 1},
{1, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(19, 18));
EXPECT_THAT(
res.data,
ElementsAreArray(
{2, -2147483647, 3, -2147483647, 4, -2147483647, 5, -2147483647,
6, -2147483647, 7, -2147483647, 8, -2147483647, 9, -2147483647,
10, -2147483647, 2, -2147483647, 3, -2147483647, 4, -2147483647,
5, -2147483647, 6, -2147483647, 7, -2147483647, 8, -2147483647,
9, -2147483647, 10, -2147483647, 12, -2147483647, 13, -2147483647,
14, -2147483647, 15, -2147483647, 16, -2147483647, 17, -2147483647,
18, -2147483647, 19, -2147483647, 20, -2147483647, 12, -2147483647,
13, -2147483647, 14, -2147483647, 15, -2147483647, 16, -2147483647,
17, -2147483647, 18, -2147483647, 19, -2147483647, 20, -2147483647,
22, -2147483647, 23, -2147483647, 24, -2147483647, 25, -2147483647,
26, -2147483647, 27, -2147483647, 28, -2147483647, 29, -2147483647,
30, -2147483647, 22, -2147483647, 23, -2147483647, 24, -2147483647,
25, -2147483647, 26, -2147483647, 27, -2147483647, 28, -2147483647,
29, -2147483647, 30, -2147483647, 32, -2147483647, 33, -2147483647,
34, -2147483647, 35, -2147483647, 36, -2147483647, 37, -2147483647,
38, -2147483647, 39, -2147483647, 40, -2147483647, 32, -2147483647,
33, -2147483647, 34, -2147483647, 35, -2147483647, 36, -2147483647,
37, -2147483647, 38, -2147483647, 39, -2147483647, 40, -2147483647,
42, -2147483647, 43, -2147483647, 44, -2147483647, 45, -2147483647,
46, -2147483647, 47, -2147483647, 48, -2147483647, 49, -2147483647,
50, -2147483647, 42, -2147483647, 43, -2147483647, 44, -2147483647,
45, -2147483647, 46, -2147483647, 47, -2147483647, 48, -2147483647,
49, -2147483647, 50, -2147483647, 52, -2147483647, 53, -2147483647,
54, -2147483647, 55, -2147483647, 56, -2147483647, 57, -2147483647,
58, -2147483647, 59, -2147483647, 60, -2147483647, 52, -2147483647,
53, -2147483647, 54, -2147483647, 55, -2147483647, 56, -2147483647,
57, -2147483647, 58, -2147483647, 59, -2147483647, 60, -2147483647,
62, -2147483647, 63, -2147483647, 64, -2147483647, 65, -2147483647,
66, -2147483647, 67, -2147483647, 68, -2147483647, 69, -2147483647,
70, -2147483647, 62, -2147483647, 63, -2147483647, 64, -2147483647,
65, -2147483647, 66, -2147483647, 67, -2147483647, 68, -2147483647,
69, -2147483647, 70, -2147483647, 72, -2147483647, 73, -2147483647,
74, -2147483647, 75, -2147483647, 76, -2147483647, 77, -2147483647,
78, -2147483647, 79, -2147483647, 80, -2147483647, 72, -2147483647,
73, -2147483647, 74, -2147483647, 75, -2147483647, 76, -2147483647,
77, -2147483647, 78, -2147483647, 79, -2147483647, 80, -2147483647,
82, -2147483647, 83, -2147483647, 84, -2147483647, 85, -2147483647,
86, -2147483647, 87, -2147483647, 88, -2147483647, 89, -2147483647,
90, -2147483647, 82, -2147483647, 83, -2147483647, 84, -2147483647,
85, -2147483647, 86, -2147483647, 87, -2147483647, 88, -2147483647,
89, -2147483647, 90, -2147483647, 92, -2147483647, 93, -2147483647,
94, -2147483647, 95, -2147483647, 96, -2147483647, 97, -2147483647,
98, -2147483647, 99, -2147483647, 100, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference44) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{0, -2, 2, -1},
-2147483647,
{1, 1},
{2, 2},
{1, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(17, 11));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, 1, 2, 3,
4, 5, 6, 7, 8,
9, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, 11,
12, 13, 14, 15, 16,
17, 18, 19, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, 21, 22, 23, 24,
25, 26, 27, 28, 29,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, 31, 32,
33, 34, 35, 36, 37,
38, 39, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
41, 42, 43, 44, 45,
46, 47, 48, 49, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, 51, 52, 53,
54, 55, 56, 57, 58,
59, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, 61,
62, 63, 64, 65, 66,
67, 68, 69, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, 71, 72, 73, 74,
75, 76, 77, 78, 79,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, 81, 82,
83, 84, 85, 86, 87,
88, 89}));
}
TEST(ReferenceTest, RandomJaxReference45) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{0, -1, -2, -2},
1,
{1, 1},
{2, 1},
{1, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(18, 6));
EXPECT_THAT(
res.data,
ElementsAreArray({3, 4, 5, 6, 7, 8, 1, 1, 1, 1, 1, 1, 13, 14,
15, 16, 17, 18, 1, 1, 1, 1, 1, 1, 23, 24, 25, 26,
27, 28, 1, 1, 1, 1, 1, 1, 33, 34, 35, 36, 37, 38,
1, 1, 1, 1, 1, 1, 43, 44, 45, 46, 47, 48, 1, 1,
1, 1, 1, 1, 53, 54, 55, 56, 57, 58, 1, 1, 1, 1,
1, 1, 63, 64, 65, 66, 67, 68, 1, 1, 1, 1, 1, 1,
73, 74, 75, 76, 77, 78, 1, 1, 1, 1, 1, 1, 83, 84,
85, 86, 87, 88, 1, 1, 1, 1, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference46) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{-1, 2, 0, -1},
1,
{2, 2},
{1, 1},
{2, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(10, 17));
EXPECT_THAT(
res.data,
ElementsAreArray(
{11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19,
21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29,
31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39,
41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49,
51, 52, 52, 53, 53, 54, 54, 55, 55, 56, 56, 57, 57, 58, 58, 59, 59,
61, 62, 62, 63, 63, 64, 64, 65, 65, 66, 66, 67, 67, 68, 68, 69, 69,
71, 72, 72, 73, 73, 74, 74, 75, 75, 76, 76, 77, 77, 78, 78, 79, 79,
81, 82, 82, 83, 83, 84, 84, 85, 85, 86, 86, 87, 87, 88, 88, 89, 89,
91, 92, 92, 93, 93, 94, 94, 95, 95, 96, 96, 97, 97, 98, 98, 99, 99,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference47) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{0, -1, 0, 0},
0,
{1, 1},
{1, 1},
{1, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(18, 10));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 31, 32, 33, 34, 35, 36, 37, 38,
39, 40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 41, 42, 43, 44, 45,
46, 47, 48, 49, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 51, 52,
53, 54, 55, 56, 57, 58, 59, 60, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference48) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-2, -1, 1, 2},
1,
{1, 2},
{2, 1},
{1, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(16, 6));
EXPECT_THAT(
res.data,
ElementsAreArray({11, 156, 210, 272, 342, 20, 1, 1, 1, 1, 1, 1,
21, 506, 600, 702, 812, 30, 1, 1, 1, 1, 1, 1,
31, 1056, 1190, 1332, 1482, 40, 1, 1, 1, 1, 1, 1,
41, 1806, 1980, 2162, 2352, 50, 1, 1, 1, 1, 1, 1,
51, 2756, 2970, 3192, 3422, 60, 1, 1, 1, 1, 1, 1,
61, 3906, 4160, 4422, 4692, 70, 1, 1, 1, 1, 1, 1,
71, 5256, 5550, 5852, 6162, 80, 1, 1, 1, 1, 1, 1,
81, 6806, 7140, 7482, 7832, 90, 1, 1, 1, 1, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference49) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{0, 1, -2, 0},
2147483646,
{1, 1},
{2, 2},
{2, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(10, 17));
EXPECT_THAT(res.data,
ElementsAreArray(
{2, 2147483646, 3, 2147483646, 4,
2147483646, 5, 2147483646, 6, 2147483646,
7, 2147483646, 8, 2147483646, 9,
2147483646, 10, 12, 2147483646, 13,
2147483646, 14, 2147483646, 15, 2147483646,
16, 2147483646, 17, 2147483646, 18,
2147483646, 19, 2147483646, 20, 22,
2147483646, 23, 2147483646, 24, 2147483646,
25, 2147483646, 26, 2147483646, 27,
2147483646, 28, 2147483646, 29, 2147483646,
30, 32, 2147483646, 33, 2147483646,
34, 2147483646, 35, 2147483646, 36,
2147483646, 37, 2147483646, 38, 2147483646,
39, 2147483646, 40, 42, 2147483646,
43, 2147483646, 44, 2147483646, 45,
2147483646, 46, 2147483646, 47, 2147483646,
48, 2147483646, 49, 2147483646, 50,
52, 2147483646, 53, 2147483646, 54,
2147483646, 55, 2147483646, 56, 2147483646,
57, 2147483646, 58, 2147483646, 59,
2147483646, 60, 62, 2147483646, 63,
2147483646, 64, 2147483646, 65, 2147483646,
66, 2147483646, 67, 2147483646, 68,
2147483646, 69, 2147483646, 70, 72,
2147483646, 73, 2147483646, 74, 2147483646,
75, 2147483646, 76, 2147483646, 77,
2147483646, 78, 2147483646, 79, 2147483646,
80, 82, 2147483646, 83, 2147483646,
84, 2147483646, 85, 2147483646, 86,
2147483646, 87, 2147483646, 88, 2147483646,
89, 2147483646, 90, 92, 2147483646,
93, 2147483646, 94, 2147483646, 95,
2147483646, 96, 2147483646, 97, 2147483646,
98, 2147483646, 99, 2147483646, 100}));
}
TEST(ReferenceTest, RandomJaxReference50) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{-1, -1, 1, 0},
2147483646,
{2, 1},
{1, 1},
{1, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(16, 10));
EXPECT_THAT(
res.data,
ElementsAreArray(
{2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference51) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{0, 2, -2, -1},
2147483646,
{2, 2},
{2, 2},
{1, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(19, 7));
EXPECT_THAT(res.data,
ElementsAreArray(
{2, 3, 4, 5, 6,
7, 8, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 12,
13, 14, 15, 16, 17,
18, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 22, 23,
24, 25, 26, 27, 28,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 32, 33, 34,
35, 36, 37, 38, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 42, 43, 44, 45,
46, 47, 48, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
52, 53, 54, 55, 56,
57, 58, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 62,
63, 64, 65, 66, 67,
68, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 72, 73,
74, 75, 76, 77, 78,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 82, 83, 84,
85, 86, 87, 88, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 92, 93, 94, 95,
96, 97, 98}));
}
TEST(ReferenceTest, RandomJaxReference52) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-2, 0, 1, 2},
0,
{1, 2},
{1, 1},
{1, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(8, 11));
EXPECT_THAT(res.data,
ElementsAreArray(
{21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 0, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 0, 41, 42, 43, 44, 45, 46, 47, 48,
49, 50, 0, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 0, 61,
62, 63, 64, 65, 66, 67, 68, 69, 70, 0, 71, 72, 73, 74, 75,
76, 77, 78, 79, 80, 0, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 0, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 0}));
}
TEST(ReferenceTest, RandomJaxReference53) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{2, 1, 0, 2},
2147483646,
{2, 2},
{1, 1},
{2, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(11, 10));
EXPECT_THAT(res.data,
ElementsAreArray(
{2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
11, 12, 13, 14, 15,
16, 17, 18, 19, 20,
21, 22, 23, 24, 25,
26, 27, 28, 29, 30,
31, 32, 33, 34, 35,
36, 37, 38, 39, 40,
41, 42, 43, 44, 45,
46, 47, 48, 49, 50,
51, 52, 53, 54, 55,
56, 57, 58, 59, 60,
61, 62, 63, 64, 65,
66, 67, 68, 69, 70,
71, 72, 73, 74, 75,
76, 77, 78, 79, 80,
81, 82, 83, 84, 85,
86, 87, 88, 89, 90,
91, 92, 93, 94, 95,
96, 97, 98, 99, 100}));
}
TEST(ReferenceTest, RandomJaxReference54) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-2, 0, 0, 2},
-2147483647,
{1, 1},
{2, 2},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(9, 12));
EXPECT_THAT(
res.data,
ElementsAreArray(
{11, 12, 13, 14, 15, 16, 17, 18, 19, 20, -2147483647, -2147483647,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, -2147483647, -2147483647,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40, -2147483647, -2147483647,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50, -2147483647, -2147483647,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60, -2147483647, -2147483647,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70, -2147483647, -2147483647,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80, -2147483647, -2147483647,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90, -2147483647, -2147483647,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference55) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{2, 1, -2, 2},
-2147483647,
{2, 1},
{2, 2},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(20, 5));
EXPECT_THAT(
res.data,
ElementsAreArray(
{3, 5, 7, 9, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
13, 15, 17, 19, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
23, 25, 27, 29, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
33, 35, 37, 39, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
43, 45, 47, 49, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
53, 55, 57, 59, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
63, 65, 67, 69, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
73, 75, 77, 79, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
83, 85, 87, 89, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
93, 95, 97, 99, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference56) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{0, 0, 0, 1},
1,
{2, 1},
{1, 2},
{1, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(18, 11));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 1, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 1, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 1, 21, 22, 23, 24, 25, 26, 27,
28, 29, 30, 1, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 1, 31, 32,
33, 34, 35, 36, 37, 38, 39, 40, 1, 41, 42, 43, 44, 45, 46, 47, 48,
49, 50, 1, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 1, 51, 52, 53,
54, 55, 56, 57, 58, 59, 60, 1, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 1, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 1, 61, 62, 63, 64,
65, 66, 67, 68, 69, 70, 1, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
1, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 1, 81, 82, 83, 84, 85,
86, 87, 88, 89, 90, 1, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 1,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 1}));
}
TEST(ReferenceTest, RandomJaxReference57) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{0, 0, -2, 2},
-2147483647,
{1, 2},
{1, 2},
{2, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(10, 9));
EXPECT_THAT(
res.data,
ElementsAreArray({3, 4, 5, 6, 7, 8, 9, 10, 10, 13, 14, 15, 16,
17, 18, 19, 20, 20, 23, 24, 25, 26, 27, 28, 29, 30,
30, 33, 34, 35, 36, 37, 38, 39, 40, 40, 43, 44, 45,
46, 47, 48, 49, 50, 50, 53, 54, 55, 56, 57, 58, 59,
60, 60, 63, 64, 65, 66, 67, 68, 69, 70, 70, 73, 74,
75, 76, 77, 78, 79, 80, 80, 83, 84, 85, 86, 87, 88,
89, 90, 90, 93, 94, 95, 96, 97, 98, 99, 100, 100}));
}
TEST(ReferenceTest, RandomJaxReference58) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-1, 2, 1, -2},
0,
{1, 1},
{2, 2},
{1, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(11, 9));
EXPECT_THAT(res.data,
ElementsAreArray(
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference59) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{2, -2, 2, 2},
2147483646,
{2, 2},
{1, 2},
{1, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(18, 11));
EXPECT_THAT(res.data,
ElementsAreArray(
{2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 1, 1, 2, 3,
4, 5, 6, 7, 8,
9, 10, 1, 1, 2,
3, 4, 5, 6, 7,
8, 9, 10, 11, 11,
12, 13, 14, 15, 16,
17, 18, 19, 20, 11,
11, 12, 13, 14, 15,
16, 17, 18, 19, 20,
21, 21, 22, 23, 24,
25, 26, 27, 28, 29,
30, 21, 21, 22, 23,
24, 25, 26, 27, 28,
29, 30, 31, 31, 32,
33, 34, 35, 36, 37,
38, 39, 40, 31, 31,
32, 33, 34, 35, 36,
37, 38, 39, 40, 41,
41, 42, 43, 44, 45,
46, 47, 48, 49, 50,
41, 41, 42, 43, 44,
45, 46, 47, 48, 49,
50, 51, 51, 52, 53,
54, 55, 56, 57, 58,
59, 60, 51, 51, 52,
53, 54, 55, 56, 57,
58, 59, 60, 61, 61,
62, 63, 64, 65, 66,
67, 68, 69, 70, 61,
61, 62, 63, 64, 65,
66, 67, 68, 69, 70,
71, 71, 72, 73, 74,
75, 76, 77, 78, 79,
80, 71, 71, 72, 73,
74, 75, 76, 77, 78,
79, 80, 81, 81, 82,
83, 84, 85, 86, 87,
88, 89, 90}));
}
TEST(ReferenceTest, RandomJaxReference60) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{0, 2, -1, 0},
0,
{1, 2},
{2, 1},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(6, 4));
EXPECT_THAT(res.data,
ElementsAreArray({5, 9, 13, 17, 45, 49, 53, 57,
85, 89, 93, 97, 125, 129, 133, 137,
165, 169, 173, 177, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference61) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{0, -1, 2, -1},
0,
{2, 1},
{1, 1},
{1, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(17, 20));
EXPECT_THAT(
res.data,
ElementsAreArray(
{0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 8, 0,
9, 0, 0, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 0, 16, 0, 17, 0,
18, 0, 19, 0, 0, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 0, 16, 0,
17, 0, 18, 0, 19, 0, 0, 0, 21, 0, 22, 0, 23, 0, 24, 0, 25, 0,
26, 0, 27, 0, 28, 0, 29, 0, 0, 0, 21, 0, 22, 0, 23, 0, 24, 0,
25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 0, 0, 31, 0, 32, 0, 33, 0,
34, 0, 35, 0, 36, 0, 37, 0, 38, 0, 39, 0, 0, 0, 31, 0, 32, 0,
33, 0, 34, 0, 35, 0, 36, 0, 37, 0, 38, 0, 39, 0, 0, 0, 41, 0,
42, 0, 43, 0, 44, 0, 45, 0, 46, 0, 47, 0, 48, 0, 49, 0, 0, 0,
41, 0, 42, 0, 43, 0, 44, 0, 45, 0, 46, 0, 47, 0, 48, 0, 49, 0,
0, 0, 51, 0, 52, 0, 53, 0, 54, 0, 55, 0, 56, 0, 57, 0, 58, 0,
59, 0, 0, 0, 51, 0, 52, 0, 53, 0, 54, 0, 55, 0, 56, 0, 57, 0,
58, 0, 59, 0, 0, 0, 61, 0, 62, 0, 63, 0, 64, 0, 65, 0, 66, 0,
67, 0, 68, 0, 69, 0, 0, 0, 61, 0, 62, 0, 63, 0, 64, 0, 65, 0,
66, 0, 67, 0, 68, 0, 69, 0, 0, 0, 71, 0, 72, 0, 73, 0, 74, 0,
75, 0, 76, 0, 77, 0, 78, 0, 79, 0, 0, 0, 71, 0, 72, 0, 73, 0,
74, 0, 75, 0, 76, 0, 77, 0, 78, 0, 79, 0, 0, 0, 81, 0, 82, 0,
83, 0, 84, 0, 85, 0, 86, 0, 87, 0, 88, 0, 89, 0, 0, 0, 81, 0,
82, 0, 83, 0, 84, 0, 85, 0, 86, 0, 87, 0, 88, 0, 89, 0}));
}
TEST(ReferenceTest, RandomJaxReference62) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-2, -1, 2, 0},
-2147483647,
{2, 1},
{2, 2},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(3, 12));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
-2147483647, -2147483647, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
-2147483647, -2147483647, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90}));
}
TEST(ReferenceTest, RandomJaxReference63) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-1, 0, 2, -2},
1,
{2, 1},
{2, 2},
{1, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(16, 10));
EXPECT_THAT(
res.data,
ElementsAreArray({1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 231, 264, 299, 336, 375, 416, 459, 504,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 651, 704, 759, 816, 875, 936, 999, 1064,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1271, 1344, 1419, 1496, 1575, 1656, 1739, 1824,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 2091, 2184, 2279, 2376, 2475, 2576, 2679, 2784,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 3111, 3224, 3339, 3456, 3575, 3696, 3819, 3944,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 4331, 4464, 4599, 4736, 4875, 5016, 5159, 5304,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 5751, 5904, 6059, 6216, 6375, 6536, 6699, 6864,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 7371, 7544, 7719, 7896, 8075, 8256, 8439, 8624}));
}
TEST(ReferenceTest, RandomJaxReference64) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{1, 2, 0, -2},
-2147483647,
{2, 2},
{1, 2},
{2, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(11, 3));
EXPECT_THAT(res.data,
ElementsAreArray(
{3, 5, 7, 13, 15, 17, 23, 25, 27,
33, 35, 37, 43, 45, 47, 53, 55, 57,
63, 65, 67, 73, 75, 77, 83, 85, 87,
93, 95, 97, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference65) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-1, 0, 2, 0},
0,
{2, 1},
{1, 2},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(4, 11));
EXPECT_THAT(
res.data,
ElementsAreArray({0, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50,
0, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90,
0, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130,
0, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170}));
}
TEST(ReferenceTest, RandomJaxReference66) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{0, 0, -1, -1},
0,
{2, 2},
{1, 1},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(5, 8));
EXPECT_THAT(
res.data,
ElementsAreArray({14, 16, 18, 20, 22, 24, 26, 28, 54, 56,
58, 60, 62, 64, 66, 68, 94, 96, 98, 100,
102, 104, 106, 108, 134, 136, 138, 140, 142, 144,
146, 148, 174, 176, 178, 180, 182, 184, 186, 188}));
}
TEST(ReferenceTest, RandomJaxReference67) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{1, 0, 2, 2},
0,
{1, 2},
{1, 1},
{2, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(6, 13));
EXPECT_THAT(
res.data,
ElementsAreArray(
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 11, 23, 25, 27, 29, 31, 33, 35, 37, 39, 20, 0,
0, 31, 63, 65, 67, 69, 71, 73, 75, 77, 79, 40, 0,
0, 51, 103, 105, 107, 109, 111, 113, 115, 117, 119, 60, 0,
0, 71, 143, 145, 147, 149, 151, 153, 155, 157, 159, 80, 0,
0, 91, 183, 185, 187, 189, 191, 193, 195, 197, 199, 100, 0}));
}
TEST(ReferenceTest, RandomJaxReference68) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{2, 2, 1, -2},
0,
{2, 1},
{1, 2},
{1, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(13, 9));
EXPECT_THAT(res.data,
ElementsAreArray(
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference69) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-2, 1, -2, -1},
-2147483647,
{2, 2},
{2, 2},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(8, 5));
EXPECT_THAT(
res.data,
ElementsAreArray({25, 26, 27, 28, 29, 35, 36, 37, 38, 39, 45, 46, 47, 48,
49, 55, 56, 57, 58, 59, 65, 66, 67, 68, 69, 75, 76, 77,
78, 79, 85, 86, 87, 88, 89, 95, 96, 97, 98, 99}));
}
TEST(ReferenceTest, RandomJaxReference70) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-1, -2, 0, 2},
2147483646,
{1, 2},
{1, 1},
{2, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(4, 6));
EXPECT_THAT(res.data, ElementsAreArray({11, 13, 15, 17, 19, 2147483646,
31, 33, 35, 37, 39, 2147483646,
51, 53, 55, 57, 59, 2147483646,
71, 73, 75, 77, 79, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference71) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{1, 2, -2, 2},
-2147483647,
{2, 1},
{1, 1},
{1, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(21, 10));
EXPECT_THAT(
res.data,
ElementsAreArray(
{3, 4, 5, 6, 7,
8, 9, 10, -2147483647, -2147483647,
3, 4, 5, 6, 7,
8, 9, 10, -2147483647, -2147483647,
13, 14, 15, 16, 17,
18, 19, 20, -2147483647, -2147483647,
13, 14, 15, 16, 17,
18, 19, 20, -2147483647, -2147483647,
23, 24, 25, 26, 27,
28, 29, 30, -2147483647, -2147483647,
23, 24, 25, 26, 27,
28, 29, 30, -2147483647, -2147483647,
33, 34, 35, 36, 37,
38, 39, 40, -2147483647, -2147483647,
33, 34, 35, 36, 37,
38, 39, 40, -2147483647, -2147483647,
43, 44, 45, 46, 47,
48, 49, 50, -2147483647, -2147483647,
43, 44, 45, 46, 47,
48, 49, 50, -2147483647, -2147483647,
53, 54, 55, 56, 57,
58, 59, 60, -2147483647, -2147483647,
53, 54, 55, 56, 57,
58, 59, 60, -2147483647, -2147483647,
63, 64, 65, 66, 67,
68, 69, 70, -2147483647, -2147483647,
63, 64, 65, 66, 67,
68, 69, 70, -2147483647, -2147483647,
73, 74, 75, 76, 77,
78, 79, 80, -2147483647, -2147483647,
73, 74, 75, 76, 77,
78, 79, 80, -2147483647, -2147483647,
83, 84, 85, 86, 87,
88, 89, 90, -2147483647, -2147483647,
83, 84, 85, 86, 87,
88, 89, 90, -2147483647, -2147483647,
93, 94, 95, 96, 97,
98, 99, 100, -2147483647, -2147483647,
93, 94, 95, 96, 97,
98, 99, 100, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference72) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{0, -1, 2, 0},
0,
{1, 2},
{2, 2},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(5, 5));
EXPECT_THAT(res.data,
ElementsAreArray({1, 4, 8, 12, 16, 21, 44, 48, 52,
56, 41, 84, 88, 92, 96, 61, 124, 128,
132, 136, 81, 164, 168, 172, 176}));
}
TEST(ReferenceTest, RandomJaxReference73) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{0, 0, 0, 0},
-2147483647,
{1, 2},
{1, 2},
{1, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(10, 8));
EXPECT_THAT(
res.data,
ElementsAreArray({3, 4, 5, 6, 7, 8, 9, 10, 13, 14, 15, 16, 17, 18,
19, 20, 23, 24, 25, 26, 27, 28, 29, 30, 33, 34, 35, 36,
37, 38, 39, 40, 43, 44, 45, 46, 47, 48, 49, 50, 53, 54,
55, 56, 57, 58, 59, 60, 63, 64, 65, 66, 67, 68, 69, 70,
73, 74, 75, 76, 77, 78, 79, 80, 83, 84, 85, 86, 87, 88,
89, 90, 93, 94, 95, 96, 97, 98, 99, 100}));
}
TEST(ReferenceTest, RandomJaxReference74) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{0, -2, -2, -1},
0,
{2, 2},
{1, 2},
{1, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(7, 5));
EXPECT_THAT(res.data,
ElementsAreArray({36, 40, 44, 48, 52, 76, 80, 84, 88,
92, 116, 120, 124, 128, 132, 156, 160, 164,
168, 172, 196, 200, 204, 208, 212, 236, 240,
244, 248, 252, 276, 280, 284, 288, 292}));
}
TEST(ReferenceTest, RandomJaxReference75) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{0, 1, -2, 1},
0,
{2, 1},
{2, 1},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(9, 5));
EXPECT_THAT(res.data,
ElementsAreArray({16, 20, 24, 28, 0, 36, 40, 44, 48,
0, 56, 60, 64, 68, 0, 76, 80, 84,
88, 0, 96, 100, 104, 108, 0, 116, 120,
124, 128, 0, 136, 140, 144, 148, 0, 156,
160, 164, 168, 0, 176, 180, 184, 188, 0}));
}
TEST(ReferenceTest, RandomJaxReference76) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{2, -1, -1, 0},
0,
{1, 1},
{1, 2},
{2, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(6, 18));
EXPECT_THAT(
res.data,
ElementsAreArray({0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 8,
0, 9, 0, 10, 0, 22, 0, 23, 0, 24, 0, 25, 0, 26, 0, 27,
0, 28, 0, 29, 0, 30, 0, 42, 0, 43, 0, 44, 0, 45, 0, 46,
0, 47, 0, 48, 0, 49, 0, 50, 0, 62, 0, 63, 0, 64, 0, 65,
0, 66, 0, 67, 0, 68, 0, 69, 0, 70, 0, 82, 0, 83, 0, 84,
0, 85, 0, 86, 0, 87, 0, 88, 0, 89, 0, 90}));
}
TEST(ReferenceTest, RandomJaxReference77) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-1, 2, -1, -2},
-2147483647,
{1, 2},
{2, 2},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(10, 5));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference78) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-2, 1, 2, -1},
0,
{1, 1},
{2, 1},
{1, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(18, 6));
EXPECT_THAT(
res.data,
ElementsAreArray({0, 11, 13, 15, 17, 19, 0, 0, 0, 0, 0, 0, 0, 21,
23, 25, 27, 29, 0, 0, 0, 0, 0, 0, 0, 31, 33, 35,
37, 39, 0, 0, 0, 0, 0, 0, 0, 41, 43, 45, 47, 49,
0, 0, 0, 0, 0, 0, 0, 51, 53, 55, 57, 59, 0, 0,
0, 0, 0, 0, 0, 61, 63, 65, 67, 69, 0, 0, 0, 0,
0, 0, 0, 71, 73, 75, 77, 79, 0, 0, 0, 0, 0, 0,
0, 81, 83, 85, 87, 89, 0, 0, 0, 0, 0, 0, 0, 91,
93, 95, 97, 99, 0, 0, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference79) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-1, -1, -2, 1},
2147483646,
{1, 1},
{1, 1},
{1, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(8, 9));
EXPECT_THAT(res.data,
ElementsAreArray(
{12, 13, 14, 15, 16, 17, 18, 19, 20, 22, 23, 24, 25, 26, 27,
28, 29, 30, 32, 33, 34, 35, 36, 37, 38, 39, 40, 42, 43, 44,
45, 46, 47, 48, 49, 50, 52, 53, 54, 55, 56, 57, 58, 59, 60,
62, 63, 64, 65, 66, 67, 68, 69, 70, 72, 73, 74, 75, 76, 77,
78, 79, 80, 82, 83, 84, 85, 86, 87, 88, 89, 90}));
}
TEST(ReferenceTest, RandomJaxReference80) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{0, 2, 1, -1},
1,
{2, 1},
{2, 2},
{2, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(10, 5));
EXPECT_THAT(
res.data,
ElementsAreArray({1, 24, 56, 96, 144, 1, 264, 336, 416, 504,
1, 704, 816, 936, 1064, 1, 1344, 1496, 1656, 1824,
1, 2184, 2376, 2576, 2784, 1, 3224, 3456, 3696, 3944,
1, 4464, 4736, 5016, 5304, 1, 5904, 6216, 6536, 6864,
1, 7544, 7896, 8256, 8624, 1, 92, 94, 96, 98}));
}
TEST(ReferenceTest, RandomJaxReference81) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{0, -1, 0, 2},
1,
{1, 1},
{2, 1},
{2, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(5, 6));
EXPECT_THAT(res.data,
ElementsAreArray({1, 3, 5, 7, 9, 1, 21, 23, 25, 27,
29, 1, 41, 43, 45, 47, 49, 1, 61, 63,
65, 67, 69, 1, 81, 83, 85, 87, 89, 1}));
}
TEST(ReferenceTest, RandomJaxReference82) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{0, 2, 0, 2},
1,
{2, 2},
{1, 2},
{1, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(11, 5));
EXPECT_THAT(
res.data,
ElementsAreArray(
{429, 2925, 8925, 20349, 171, 69069, 112125,
172125, 252909, 551, 494109, 664125, 874125, 1129869,
1131, 1803549, 2234925, 2738925, 3323229, 1911, 4765389,
5640525, 6630525, 7744989, 2891, 10387629, 11936925, 13652925,
15547149, 4071, 19918269, 22420125, 25150125, 28121709, 5451,
34845309, 38626125, 42706125, 47100669, 7031, 56896749, 62330925,
68144925, 74356029, 8811, 8463, 8835, 9215, 9603,
99, 1, 1, 1, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference83) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{2, -1, -2, -2},
-2147483647,
{2, 1},
{1, 1},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(10, 8));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, 2, 3,
4, 5, 6, 7, 8,
9, 12, 13, 14, 15,
16, 17, 18, 19, 22,
23, 24, 25, 26, 27,
28, 29, 32, 33, 34,
35, 36, 37, 38, 39,
42, 43, 44, 45, 46,
47, 48, 49, 52, 53,
54, 55, 56, 57, 58,
59, 62, 63, 64, 65,
66, 67, 68, 69, 72,
73, 74, 75, 76, 77,
78, 79, 82, 83, 84,
85, 86, 87, 88, 89}));
}
TEST(ReferenceTest, RandomJaxReference84) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{2, -2, -2, 2},
2147483646,
{1, 1},
{2, 2},
{1, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(19, 10));
EXPECT_THAT(
res.data,
ElementsAreArray(
{2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2, 3, 4, 5, 6,
7, 8, 9, 10, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
12, 13, 14, 15, 16,
17, 18, 19, 20, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
22, 23, 24, 25, 26,
27, 28, 29, 30, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
32, 33, 34, 35, 36,
37, 38, 39, 40, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
42, 43, 44, 45, 46,
47, 48, 49, 50, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
52, 53, 54, 55, 56,
57, 58, 59, 60, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
62, 63, 64, 65, 66,
67, 68, 69, 70, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
72, 73, 74, 75, 76,
77, 78, 79, 80, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
82, 83, 84, 85, 86,
87, 88, 89, 90, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference85) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{1, 2, -2, -2},
-2147483647,
{1, 2},
{2, 2},
{2, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(11, 2));
EXPECT_THAT(res.data, ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference86) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-2, -1, 2, -2},
-2147483647,
{1, 2},
{2, 1},
{2, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(8, 5));
EXPECT_THAT(res.data,
ElementsAreArray(
{-2147483647, 12, 14, 16, 18, -2147483647, 22, 24, 26, 28,
-2147483647, 32, 34, 36, 38, -2147483647, 42, 44, 46, 48,
-2147483647, 52, 54, 56, 58, -2147483647, 62, 64, 66, 68,
-2147483647, 72, 74, 76, 78, -2147483647, 82, 84, 86, 88}));
}
TEST(ReferenceTest, RandomJaxReference87) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-2, 0, 2, -1},
-2147483647,
{1, 2},
{1, 1},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(8, 10));
EXPECT_THAT(res.data, ElementsAreArray(
{-2147483647, 21, 22, 23, 24, 25, 26, 27, 28, 29,
-2147483647, 31, 32, 33, 34, 35, 36, 37, 38, 39,
-2147483647, 41, 42, 43, 44, 45, 46, 47, 48, 49,
-2147483647, 51, 52, 53, 54, 55, 56, 57, 58, 59,
-2147483647, 61, 62, 63, 64, 65, 66, 67, 68, 69,
-2147483647, 71, 72, 73, 74, 75, 76, 77, 78, 79,
-2147483647, 81, 82, 83, 84, 85, 86, 87, 88, 89,
-2147483647, 91, 92, 93, 94, 95, 96, 97, 98, 99}));
}
TEST(ReferenceTest, RandomJaxReference88) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-2, 1, 2, 0},
-2147483647,
{2, 2},
{2, 1},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(4, 11));
EXPECT_THAT(
res.data,
ElementsAreArray({-2147483647, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
-2147483647, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
-2147483647, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
-2147483647, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90}));
}
TEST(ReferenceTest, RandomJaxReference89) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{1, -2, 2, 2},
0,
{1, 1},
{2, 1},
{2, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(9, 14));
EXPECT_THAT(
res.data,
ElementsAreArray({0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference90) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-2, 0, 1, 1},
0,
{1, 1},
{1, 1},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(4, 11));
EXPECT_THAT(res.data,
ElementsAreArray({0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference91) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-2, -2, 1, 2},
1,
{2, 2},
{1, 2},
{1, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(5, 6));
EXPECT_THAT(
res.data,
ElementsAreArray({704, 574464, 763776, 995904, 1276800, 1200,
1344, 2010624, 2477376, 3020544, 3648000, 2000,
2184, 5189184, 6120576, 7171584, 8352000, 3000,
3224, 11142144, 12773376, 14577024, 16564800, 4200,
4464, 21141504, 23755776, 26604864, 29702400, 5600}));
}
TEST(ReferenceTest, RandomJaxReference92) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{0, 0, 0, 2},
2147483646,
{2, 2},
{1, 2},
{2, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(9, 10));
EXPECT_THAT(res.data, ElementsAreArray(
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65,
66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90}));
}
TEST(ReferenceTest, RandomJaxReference93) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{0, -1, 0, -2},
2147483646,
{1, 1},
{1, 2},
{1, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(9, 17));
EXPECT_THAT(res.data,
ElementsAreArray(
{1, 2147483646, 2, 2147483646, 3,
2147483646, 4, 2147483646, 5, 2147483646,
6, 2147483646, 7, 2147483646, 8,
2147483646, 9, 11, 2147483646, 12,
2147483646, 13, 2147483646, 14, 2147483646,
15, 2147483646, 16, 2147483646, 17,
2147483646, 18, 2147483646, 19, 21,
2147483646, 22, 2147483646, 23, 2147483646,
24, 2147483646, 25, 2147483646, 26,
2147483646, 27, 2147483646, 28, 2147483646,
29, 31, 2147483646, 32, 2147483646,
33, 2147483646, 34, 2147483646, 35,
2147483646, 36, 2147483646, 37, 2147483646,
38, 2147483646, 39, 41, 2147483646,
42, 2147483646, 43, 2147483646, 44,
2147483646, 45, 2147483646, 46, 2147483646,
47, 2147483646, 48, 2147483646, 49,
51, 2147483646, 52, 2147483646, 53,
2147483646, 54, 2147483646, 55, 2147483646,
56, 2147483646, 57, 2147483646, 58,
2147483646, 59, 61, 2147483646, 62,
2147483646, 63, 2147483646, 64, 2147483646,
65, 2147483646, 66, 2147483646, 67,
2147483646, 68, 2147483646, 69, 71,
2147483646, 72, 2147483646, 73, 2147483646,
74, 2147483646, 75, 2147483646, 76,
2147483646, 77, 2147483646, 78, 2147483646,
79, 81, 2147483646, 82, 2147483646,
83, 2147483646, 84, 2147483646, 85,
2147483646, 86, 2147483646, 87, 2147483646,
88, 2147483646, 89}));
}
TEST(ReferenceTest, RandomJaxReference94) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-2, 0, -1, -2},
-2147483647,
{1, 2},
{2, 1},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(8, 3));
EXPECT_THAT(res.data, ElementsAreArray({23, 25, 27, 33, 35, 37, 43, 45,
47, 53, 55, 57, 63, 65, 67, 73,
75, 77, 83, 85, 87, 93, 95, 97}));
}
TEST(ReferenceTest, RandomJaxReference95) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{0, 0, 2, 2},
1,
{1, 1},
{1, 1},
{1, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(10, 23));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 1, 1, 1, 2, 1, 3, 1, 4, 1, 5, 1, 6, 1, 7, 1, 8,
1, 9, 1, 10, 1, 1, 1, 1, 11, 1, 12, 1, 13, 1, 14, 1, 15,
1, 16, 1, 17, 1, 18, 1, 19, 1, 20, 1, 1, 1, 1, 21, 1, 22,
1, 23, 1, 24, 1, 25, 1, 26, 1, 27, 1, 28, 1, 29, 1, 30, 1,
1, 1, 1, 31, 1, 32, 1, 33, 1, 34, 1, 35, 1, 36, 1, 37, 1,
38, 1, 39, 1, 40, 1, 1, 1, 1, 41, 1, 42, 1, 43, 1, 44, 1,
45, 1, 46, 1, 47, 1, 48, 1, 49, 1, 50, 1, 1, 1, 1, 51, 1,
52, 1, 53, 1, 54, 1, 55, 1, 56, 1, 57, 1, 58, 1, 59, 1, 60,
1, 1, 1, 1, 61, 1, 62, 1, 63, 1, 64, 1, 65, 1, 66, 1, 67,
1, 68, 1, 69, 1, 70, 1, 1, 1, 1, 71, 1, 72, 1, 73, 1, 74,
1, 75, 1, 76, 1, 77, 1, 78, 1, 79, 1, 80, 1, 1, 1, 1, 81,
1, 82, 1, 83, 1, 84, 1, 85, 1, 86, 1, 87, 1, 88, 1, 89, 1,
90, 1, 1, 1, 1, 91, 1, 92, 1, 93, 1, 94, 1, 95, 1, 96, 1,
97, 1, 98, 1, 99, 1, 100, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference96) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{2, -1, -1, 2},
0,
{2, 2},
{1, 1},
{1, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(10, 10));
EXPECT_THAT(
res.data,
ElementsAreArray(
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 7, 9,
11, 13, 15, 17, 19, 10, 0, 30, 34, 38, 42, 46, 50,
54, 58, 30, 0, 70, 74, 78, 82, 86, 90, 94, 98, 50,
0, 110, 114, 118, 122, 126, 130, 134, 138, 70, 0, 150, 154,
158, 162, 166, 170, 174, 178, 90, 0, 190, 194, 198, 202, 206,
210, 214, 218, 110, 0, 230, 234, 238, 242, 246, 250, 254, 258,
130, 0, 270, 274, 278, 282, 286, 290, 294, 298, 150, 0, 310,
314, 318, 322, 326, 330, 334, 338, 170, 0}));
}
TEST(ReferenceTest, RandomJaxReference97) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{2, 2, -1, 1},
0,
{2, 2},
{2, 1},
{1, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(12, 5));
EXPECT_THAT(
res.data,
ElementsAreArray({5, 9, 13, 17, 10, 25, 29, 33, 37, 20,
50, 58, 66, 74, 40, 90, 98, 106, 114, 60,
130, 138, 146, 154, 80, 170, 178, 186, 194, 100,
210, 218, 226, 234, 120, 250, 258, 266, 274, 140,
290, 298, 306, 314, 160, 330, 338, 346, 354, 180,
165, 169, 173, 177, 90, 185, 189, 193, 197, 100}));
}
TEST(ReferenceTest, RandomJaxReference98) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{2, -2, -1, 0},
2147483646,
{2, 2},
{1, 1},
{1, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(18, 17));
EXPECT_THAT(res.data,
ElementsAreArray(
{2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2, 2, 3,
3, 4, 4, 5, 5,
6, 6, 7, 7, 8,
8, 9, 9, 10, 2,
2, 3, 3, 4, 4,
5, 5, 6, 6, 7,
7, 8, 8, 9, 9,
10, 12, 12, 13, 13,
14, 14, 15, 15, 16,
16, 17, 17, 18, 18,
19, 19, 20, 12, 12,
13, 13, 14, 14, 15,
15, 16, 16, 17, 17,
18, 18, 19, 19, 20,
22, 22, 23, 23, 24,
24, 25, 25, 26, 26,
27, 27, 28, 28, 29,
29, 30, 22, 22, 23,
23, 24, 24, 25, 25,
26, 26, 27, 27, 28,
28, 29, 29, 30, 32,
32, 33, 33, 34, 34,
35, 35, 36, 36, 37,
37, 38, 38, 39, 39,
40, 32, 32, 33, 33,
34, 34, 35, 35, 36,
36, 37, 37, 38, 38,
39, 39, 40, 42, 42,
43, 43, 44, 44, 45,
45, 46, 46, 47, 47,
48, 48, 49, 49, 50,
42, 42, 43, 43, 44,
44, 45, 45, 46, 46,
47, 47, 48, 48, 49,
49, 50, 52, 52, 53,
53, 54, 54, 55, 55,
56, 56, 57, 57, 58,
58, 59, 59, 60, 52,
52, 53, 53, 54, 54,
55, 55, 56, 56, 57,
57, 58, 58, 59, 59,
60, 62, 62, 63, 63,
64, 64, 65, 65, 66,
66, 67, 67, 68, 68,
69, 69, 70, 62, 62,
63, 63, 64, 64, 65,
65, 66, 66, 67, 67,
68, 68, 69, 69, 70,
72, 72, 73, 73, 74,
74, 75, 75, 76, 76,
77, 77, 78, 78, 79,
79, 80, 72, 72, 73,
73, 74, 74, 75, 75,
76, 76, 77, 77, 78,
78, 79, 79, 80, 82,
82, 83, 83, 84, 84,
85, 85, 86, 86, 87,
87, 88, 88, 89, 89,
90}));
}
TEST(ReferenceTest, RandomJaxReference99) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-1, -1, -2, 1},
1,
{1, 1},
{2, 1},
{2, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(4, 9));
EXPECT_THAT(res.data, ElementsAreArray({12, 13, 14, 15, 16, 17, 18, 19, 20,
32, 33, 34, 35, 36, 37, 38, 39, 40,
52, 53, 54, 55, 56, 57, 58, 59, 60,
72, 73, 74, 75, 76, 77, 78, 79, 80}));
}
TEST(ReferenceTest, RandomJaxReference100) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{0, 1, 1, 1},
2147483646,
{1, 2},
{1, 1},
{2, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(10, 20));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9,
9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17,
18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26,
26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34, 34,
35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43,
43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 50, 50, 51, 51,
52, 52, 53, 53, 54, 54, 55, 55, 56, 56, 57, 57, 58, 58, 59, 59, 60,
60, 61, 61, 62, 62, 63, 63, 64, 64, 65, 65, 66, 66, 67, 67, 68, 68,
69, 69, 70, 70, 71, 71, 72, 72, 73, 73, 74, 74, 75, 75, 76, 76, 77,
77, 78, 78, 79, 79, 80, 80, 81, 81, 82, 82, 83, 83, 84, 84, 85, 85,
86, 86, 87, 87, 88, 88, 89, 89, 90, 90, 91, 91, 92, 92, 93, 93, 94,
94, 95, 95, 96, 96, 97, 97, 98, 98, 99, 99, 100, 100}));
}
TEST(ReferenceTest, RandomJaxReference101) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-2, 2, 2, 0},
1,
{2, 1},
{2, 1},
{1, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(17, 12));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 1, 231, 264, 299, 336, 375, 416, 459, 504, 551, 600,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 651, 704, 759, 816, 875, 936, 999, 1064, 1131, 1200,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1271, 1344, 1419, 1496, 1575, 1656, 1739, 1824, 1911, 2000,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 2091, 2184, 2279, 2376, 2475, 2576, 2679, 2784, 2891, 3000,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 3111, 3224, 3339, 3456, 3575, 3696, 3819, 3944, 4071, 4200,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 4331, 4464, 4599, 4736, 4875, 5016, 5159, 5304, 5451, 5600,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 5751, 5904, 6059, 6216, 6375, 6536, 6699, 6864, 7031, 7200,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 7371, 7544, 7719, 7896, 8075, 8256, 8439, 8624, 8811, 9000,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100}));
}
TEST(ReferenceTest, RandomJaxReference102) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{1, 1, -2, 1},
-2147483647,
{1, 2},
{2, 2},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(11, 16));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647}));
}
TEST(ReferenceTest, RandomJaxReference103) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{0, 1, 1, -1},
1,
{1, 2},
{2, 2},
{1, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(11, 8));
EXPECT_THAT(
res.data,
ElementsAreArray(
{2, 3, 8, 15, 24, 35, 48, 63, 12, 143, 168,
195, 224, 255, 288, 323, 22, 483, 528, 575, 624, 675,
728, 783, 32, 1023, 1088, 1155, 1224, 1295, 1368, 1443, 42,
1763, 1848, 1935, 2024, 2115, 2208, 2303, 52, 2703, 2808, 2915,
3024, 3135, 3248, 3363, 62, 3843, 3968, 4095, 4224, 4355, 4488,
4623, 72, 5183, 5328, 5475, 5624, 5775, 5928, 6083, 82, 6723,
6888, 7055, 7224, 7395, 7568, 7743, 92, 8463, 8648, 8835, 9024,
9215, 9408, 9603, 1, 1, 1, 1, 1, 1, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference104) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{2, -1, 1, -1},
0,
{2, 2},
{2, 1},
{1, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(18, 9));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 3, 5, 7, 9, 11, 13, 15, 17, 0, 0, 0, 0, 0,
0, 0, 0, 0, 12, 26, 30, 34, 38, 42, 46, 50, 54, 0,
0, 0, 0, 0, 0, 0, 0, 0, 32, 66, 70, 74, 78, 82,
86, 90, 94, 0, 0, 0, 0, 0, 0, 0, 0, 0, 52, 106,
110, 114, 118, 122, 126, 130, 134, 0, 0, 0, 0, 0, 0, 0,
0, 0, 72, 146, 150, 154, 158, 162, 166, 170, 174, 0, 0, 0,
0, 0, 0, 0, 0, 0, 92, 186, 190, 194, 198, 202, 206, 210,
214, 0, 0, 0, 0, 0, 0, 0, 0, 0, 112, 226, 230, 234,
238, 242, 246, 250, 254, 0, 0, 0, 0, 0, 0, 0, 0, 0,
132, 266, 270, 274, 278, 282, 286, 290, 294, 0, 0, 0, 0, 0,
0, 0, 0, 0, 152, 306, 310, 314, 318, 322, 326, 330, 334, 0,
0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference105) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-1, 2, 1, -1},
1,
{2, 1},
{2, 1},
{1, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(18, 10));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
231, 264, 299, 336, 375, 416, 459, 504, 551, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 651, 704,
759, 816, 875, 936, 999, 1064, 1131, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1271, 1344, 1419, 1496,
1575, 1656, 1739, 1824, 1911, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 2091, 2184, 2279, 2376, 2475, 2576,
2679, 2784, 2891, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 3111, 3224, 3339, 3456, 3575, 3696, 3819, 3944,
4071, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 4331, 4464, 4599, 4736, 4875, 5016, 5159, 5304, 5451, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5751,
5904, 6059, 6216, 6375, 6536, 6699, 6864, 7031, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 7371, 7544, 7719,
7896, 8075, 8256, 8439, 8624, 8811, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 91, 92, 93, 94, 95,
96, 97, 98, 99}));
}
TEST(ReferenceTest, RandomJaxReference106) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{2, 2, -2, 2},
1,
{1, 2},
{2, 1},
{2, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(7, 18));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9,
9, 10, 10, 1, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28,
28, 29, 29, 30, 30, 1, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47,
47, 48, 48, 49, 49, 50, 50, 1, 62, 63, 63, 64, 64, 65, 65, 66,
66, 67, 67, 68, 68, 69, 69, 70, 70, 1, 82, 83, 83, 84, 84, 85,
85, 86, 86, 87, 87, 88, 88, 89, 89, 90, 90, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference107) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{0, 1, 2, 0},
2147483646,
{1, 1},
{1, 2},
{2, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(10, 11));
EXPECT_THAT(
res.data,
ElementsAreArray({2147483646, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
2147483646, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
2147483646, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
2147483646, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
2147483646, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
2147483646, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
2147483646, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
2147483646, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
2147483646, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
2147483646, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100}));
}
TEST(ReferenceTest, RandomJaxReference108) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{2, -1, 2, -1},
-2147483647,
{1, 1},
{2, 1},
{1, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(11, 20));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, 1, -2147483647, 2,
-2147483647, 3, -2147483647, 4, -2147483647,
5, -2147483647, 6, -2147483647, 7,
-2147483647, 8, -2147483647, 9, -2147483647,
-2147483647, -2147483647, 11, -2147483647, 12,
-2147483647, 13, -2147483647, 14, -2147483647,
15, -2147483647, 16, -2147483647, 17,
-2147483647, 18, -2147483647, 19, -2147483647,
-2147483647, -2147483647, 21, -2147483647, 22,
-2147483647, 23, -2147483647, 24, -2147483647,
25, -2147483647, 26, -2147483647, 27,
-2147483647, 28, -2147483647, 29, -2147483647,
-2147483647, -2147483647, 31, -2147483647, 32,
-2147483647, 33, -2147483647, 34, -2147483647,
35, -2147483647, 36, -2147483647, 37,
-2147483647, 38, -2147483647, 39, -2147483647,
-2147483647, -2147483647, 41, -2147483647, 42,
-2147483647, 43, -2147483647, 44, -2147483647,
45, -2147483647, 46, -2147483647, 47,
-2147483647, 48, -2147483647, 49, -2147483647,
-2147483647, -2147483647, 51, -2147483647, 52,
-2147483647, 53, -2147483647, 54, -2147483647,
55, -2147483647, 56, -2147483647, 57,
-2147483647, 58, -2147483647, 59, -2147483647,
-2147483647, -2147483647, 61, -2147483647, 62,
-2147483647, 63, -2147483647, 64, -2147483647,
65, -2147483647, 66, -2147483647, 67,
-2147483647, 68, -2147483647, 69, -2147483647,
-2147483647, -2147483647, 71, -2147483647, 72,
-2147483647, 73, -2147483647, 74, -2147483647,
75, -2147483647, 76, -2147483647, 77,
-2147483647, 78, -2147483647, 79, -2147483647,
-2147483647, -2147483647, 81, -2147483647, 82,
-2147483647, 83, -2147483647, 84, -2147483647,
85, -2147483647, 86, -2147483647, 87,
-2147483647, 88, -2147483647, 89, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference109) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{0, -2, 0, 0},
2147483646,
{2, 2},
{1, 1},
{2, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(8, 5));
EXPECT_THAT(
res.data,
ElementsAreArray({1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27,
29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55,
57, 59, 61, 63, 65, 67, 69, 71, 73, 75, 77, 79}));
}
TEST(ReferenceTest, RandomJaxReference110) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{-1, -1, 2, 0},
1,
{1, 2},
{1, 1},
{1, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(17, 20));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17,
17, 18, 18, 19, 19, 20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 21, 21, 22, 22, 23, 23, 24,
24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 31,
31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39,
40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46,
47, 47, 48, 48, 49, 49, 50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 51, 51, 52, 52, 53, 53,
54, 54, 55, 55, 56, 56, 57, 57, 58, 58, 59, 59, 60, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
61, 61, 62, 62, 63, 63, 64, 64, 65, 65, 66, 66, 67, 67, 68, 68, 69,
69, 70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 71, 71, 72, 72, 73, 73, 74, 74, 75, 75, 76,
76, 77, 77, 78, 78, 79, 79, 80, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 81, 81, 82, 82, 83,
83, 84, 84, 85, 85, 86, 86, 87, 87, 88, 88, 89, 89, 90, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference111) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-1, 0, 2, -1},
2147483646,
{2, 1},
{1, 2},
{1, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(8, 11));
EXPECT_THAT(
res.data,
ElementsAreArray(
{2147483646, 2147483646, 11, 12, 13, 14, 15, 16, 17, 18, 19,
2147483646, 2147483646, 21, 22, 23, 24, 25, 26, 27, 28, 29,
2147483646, 2147483646, 31, 32, 33, 34, 35, 36, 37, 38, 39,
2147483646, 2147483646, 41, 42, 43, 44, 45, 46, 47, 48, 49,
2147483646, 2147483646, 51, 52, 53, 54, 55, 56, 57, 58, 59,
2147483646, 2147483646, 61, 62, 63, 64, 65, 66, 67, 68, 69,
2147483646, 2147483646, 71, 72, 73, 74, 75, 76, 77, 78, 79,
2147483646, 2147483646, 81, 82, 83, 84, 85, 86, 87, 88, 89}));
}
TEST(ReferenceTest, RandomJaxReference112) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{1, 1, 1, 2},
2147483646,
{2, 1},
{1, 2},
{1, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(20, 13));
EXPECT_THAT(
res.data,
ElementsAreArray(
{2147483646, 1, 2, 3, 4,
5, 6, 7, 8, 9,
10, 2147483646, 2147483646, 2147483646, 1,
2, 3, 4, 5, 6,
7, 8, 9, 10, 2147483646,
2147483646, 2147483646, 11, 12, 13,
14, 15, 16, 17, 18,
19, 20, 2147483646, 2147483646, 2147483646,
11, 12, 13, 14, 15,
16, 17, 18, 19, 20,
2147483646, 2147483646, 2147483646, 21, 22,
23, 24, 25, 26, 27,
28, 29, 30, 2147483646, 2147483646,
2147483646, 21, 22, 23, 24,
25, 26, 27, 28, 29,
30, 2147483646, 2147483646, 2147483646, 31,
32, 33, 34, 35, 36,
37, 38, 39, 40, 2147483646,
2147483646, 2147483646, 31, 32, 33,
34, 35, 36, 37, 38,
39, 40, 2147483646, 2147483646, 2147483646,
41, 42, 43, 44, 45,
46, 47, 48, 49, 50,
2147483646, 2147483646, 2147483646, 41, 42,
43, 44, 45, 46, 47,
48, 49, 50, 2147483646, 2147483646,
2147483646, 51, 52, 53, 54,
55, 56, 57, 58, 59,
60, 2147483646, 2147483646, 2147483646, 51,
52, 53, 54, 55, 56,
57, 58, 59, 60, 2147483646,
2147483646, 2147483646, 61, 62, 63,
64, 65, 66, 67, 68,
69, 70, 2147483646, 2147483646, 2147483646,
61, 62, 63, 64, 65,
66, 67, 68, 69, 70,
2147483646, 2147483646, 2147483646, 71, 72,
73, 74, 75, 76, 77,
78, 79, 80, 2147483646, 2147483646,
2147483646, 71, 72, 73, 74,
75, 76, 77, 78, 79,
80, 2147483646, 2147483646, 2147483646, 81,
82, 83, 84, 85, 86,
87, 88, 89, 90, 2147483646,
2147483646, 2147483646, 81, 82, 83,
84, 85, 86, 87, 88,
89, 90, 2147483646, 2147483646, 2147483646,
91, 92, 93, 94, 95,
96, 97, 98, 99, 100,
2147483646, 2147483646, 2147483646, 91, 92,
93, 94, 95, 96, 97,
98, 99, 100, 2147483646, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference113) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{2, -2, 1, 0},
-2147483647,
{1, 2},
{2, 1},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(5, 10));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
21, 22, 23, 24, 25,
26, 27, 28, 29, 30,
41, 42, 43, 44, 45,
46, 47, 48, 49, 50,
61, 62, 63, 64, 65,
66, 67, 68, 69, 70}));
}
TEST(ReferenceTest, RandomJaxReference114) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-2, -1, 1, 1},
0,
{1, 2},
{1, 2},
{2, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(8, 10));
EXPECT_THAT(
res.data,
ElementsAreArray(
{12, 24, 26, 28, 30, 32, 34, 36, 38, 19, 22, 44, 46, 48,
50, 52, 54, 56, 58, 29, 32, 64, 66, 68, 70, 72, 74, 76,
78, 39, 42, 84, 86, 88, 90, 92, 94, 96, 98, 49, 52, 104,
106, 108, 110, 112, 114, 116, 118, 59, 62, 124, 126, 128, 130, 132,
134, 136, 138, 69, 72, 144, 146, 148, 150, 152, 154, 156, 158, 79,
82, 164, 166, 168, 170, 172, 174, 176, 178, 89}));
}
TEST(ReferenceTest, RandomJaxReference115) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-2, -2, -2, 1},
0,
{2, 2},
{2, 1},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(7, 4));
EXPECT_THAT(res.data, ElementsAreArray({74, 82, 90, 98, 114, 122, 130,
138, 154, 162, 170, 178, 194, 202,
210, 218, 234, 242, 250, 258, 274,
282, 290, 298, 314, 322, 330, 338}));
}
TEST(ReferenceTest, RandomJaxReference116) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{0, -2, 1, 1},
-2147483647,
{2, 1},
{1, 2},
{1, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(16, 21));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, 1, -2147483647, 2, -2147483647,
3, -2147483647, 4, -2147483647, 5,
-2147483647, 6, -2147483647, 7, -2147483647,
8, -2147483647, 9, -2147483647, 10,
-2147483647, -2147483647, 11, -2147483647, 12,
-2147483647, 13, -2147483647, 14, -2147483647,
15, -2147483647, 16, -2147483647, 17,
-2147483647, 18, -2147483647, 19, -2147483647,
20, -2147483647, -2147483647, 11, -2147483647,
12, -2147483647, 13, -2147483647, 14,
-2147483647, 15, -2147483647, 16, -2147483647,
17, -2147483647, 18, -2147483647, 19,
-2147483647, 20, -2147483647, -2147483647, 21,
-2147483647, 22, -2147483647, 23, -2147483647,
24, -2147483647, 25, -2147483647, 26,
-2147483647, 27, -2147483647, 28, -2147483647,
29, -2147483647, 30, -2147483647, -2147483647,
21, -2147483647, 22, -2147483647, 23,
-2147483647, 24, -2147483647, 25, -2147483647,
26, -2147483647, 27, -2147483647, 28,
-2147483647, 29, -2147483647, 30, -2147483647,
-2147483647, 31, -2147483647, 32, -2147483647,
33, -2147483647, 34, -2147483647, 35,
-2147483647, 36, -2147483647, 37, -2147483647,
38, -2147483647, 39, -2147483647, 40,
-2147483647, -2147483647, 31, -2147483647, 32,
-2147483647, 33, -2147483647, 34, -2147483647,
35, -2147483647, 36, -2147483647, 37,
-2147483647, 38, -2147483647, 39, -2147483647,
40, -2147483647, -2147483647, 41, -2147483647,
42, -2147483647, 43, -2147483647, 44,
-2147483647, 45, -2147483647, 46, -2147483647,
47, -2147483647, 48, -2147483647, 49,
-2147483647, 50, -2147483647, -2147483647, 41,
-2147483647, 42, -2147483647, 43, -2147483647,
44, -2147483647, 45, -2147483647, 46,
-2147483647, 47, -2147483647, 48, -2147483647,
49, -2147483647, 50, -2147483647, -2147483647,
51, -2147483647, 52, -2147483647, 53,
-2147483647, 54, -2147483647, 55, -2147483647,
56, -2147483647, 57, -2147483647, 58,
-2147483647, 59, -2147483647, 60, -2147483647,
-2147483647, 51, -2147483647, 52, -2147483647,
53, -2147483647, 54, -2147483647, 55,
-2147483647, 56, -2147483647, 57, -2147483647,
58, -2147483647, 59, -2147483647, 60,
-2147483647, -2147483647, 61, -2147483647, 62,
-2147483647, 63, -2147483647, 64, -2147483647,
65, -2147483647, 66, -2147483647, 67,
-2147483647, 68, -2147483647, 69, -2147483647,
70, -2147483647, -2147483647, 61, -2147483647,
62, -2147483647, 63, -2147483647, 64,
-2147483647, 65, -2147483647, 66, -2147483647,
67, -2147483647, 68, -2147483647, 69,
-2147483647, 70, -2147483647, -2147483647, 71,
-2147483647, 72, -2147483647, 73, -2147483647,
74, -2147483647, 75, -2147483647, 76,
-2147483647, 77, -2147483647, 78, -2147483647,
79, -2147483647, 80, -2147483647, -2147483647,
71, -2147483647, 72, -2147483647, 73,
-2147483647, 74, -2147483647, 75, -2147483647,
76, -2147483647, 77, -2147483647, 78,
-2147483647, 79, -2147483647, 80, -2147483647,
-2147483647, 81, -2147483647, 82, -2147483647,
83, -2147483647, 84, -2147483647, 85,
-2147483647, 86, -2147483647, 87, -2147483647,
88, -2147483647, 89, -2147483647, 90,
-2147483647}));
}
TEST(ReferenceTest, RandomJaxReference117) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-2, -2, -1, 0},
1,
{1, 2},
{2, 1},
{2, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(8, 8));
EXPECT_THAT(
res.data,
ElementsAreArray(
{156, 182, 210, 240, 272, 306, 342, 380, 506, 552, 600,
650, 702, 756, 812, 870, 1056, 1122, 1190, 1260, 1332, 1406,
1482, 1560, 1806, 1892, 1980, 2070, 2162, 2256, 2352, 2450, 2756,
2862, 2970, 3080, 3192, 3306, 3422, 3540, 3906, 4032, 4160, 4290,
4422, 4556, 4692, 4830, 5256, 5402, 5550, 5700, 5852, 6006, 6162,
6320, 6806, 6972, 7140, 7310, 7482, 7656, 7832, 8010}));
}
TEST(ReferenceTest, RandomJaxReference118) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{-2, -1, -2, 1},
0,
{1, 2},
{2, 2},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(8, 8));
EXPECT_THAT(
res.data,
ElementsAreArray({25, 27, 29, 31, 33, 35, 37, 39, 45, 47, 49,
51, 53, 55, 57, 59, 65, 67, 69, 71, 73, 75,
77, 79, 85, 87, 89, 91, 93, 95, 97, 99, 105,
107, 109, 111, 113, 115, 117, 119, 125, 127, 129, 131,
133, 135, 137, 139, 145, 147, 149, 151, 153, 155, 157,
159, 165, 167, 169, 171, 173, 175, 177, 179}));
}
TEST(ReferenceTest, RandomJaxReference119) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-2, 0, 1, 2},
1,
{2, 1},
{2, 2},
{1, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(6, 22));
EXPECT_THAT(
res.data,
ElementsAreArray({1, 861, 1, 924, 1, 989, 1, 1056, 1, 1125, 1, 1196,
1, 1269, 1, 1344, 1, 1421, 1, 1500, 1, 1, 1, 1581,
1, 1664, 1, 1749, 1, 1836, 1, 1925, 1, 2016, 1, 2109,
1, 2204, 1, 2301, 1, 2400, 1, 1, 1, 2501, 1, 2604,
1, 2709, 1, 2816, 1, 2925, 1, 3036, 1, 3149, 1, 3264,
1, 3381, 1, 3500, 1, 1, 1, 3621, 1, 3744, 1, 3869,
1, 3996, 1, 4125, 1, 4256, 1, 4389, 1, 4524, 1, 4661,
1, 4800, 1, 1, 1, 4941, 1, 5084, 1, 5229, 1, 5376,
1, 5525, 1, 5676, 1, 5829, 1, 5984, 1, 6141, 1, 6300,
1, 1, 1, 6461, 1, 6624, 1, 6789, 1, 6956, 1, 7125,
1, 7296, 1, 7469, 1, 7644, 1, 7821, 1, 8000, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference120) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{-2, 1, 2, 0},
2147483646,
{2, 1},
{1, 1},
{2, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(9, 21));
EXPECT_THAT(res.data,
ElementsAreArray(
{2147483646, 2147483646, 11, 2147483646, 12,
2147483646, 13, 2147483646, 14, 2147483646,
15, 2147483646, 16, 2147483646, 17,
2147483646, 18, 2147483646, 19, 2147483646,
20, 2147483646, 2147483646, 21, 2147483646,
22, 2147483646, 23, 2147483646, 24,
2147483646, 25, 2147483646, 26, 2147483646,
27, 2147483646, 28, 2147483646, 29,
2147483646, 30, 2147483646, 2147483646, 31,
2147483646, 32, 2147483646, 33, 2147483646,
34, 2147483646, 35, 2147483646, 36,
2147483646, 37, 2147483646, 38, 2147483646,
39, 2147483646, 40, 2147483646, 2147483646,
41, 2147483646, 42, 2147483646, 43,
2147483646, 44, 2147483646, 45, 2147483646,
46, 2147483646, 47, 2147483646, 48,
2147483646, 49, 2147483646, 50, 2147483646,
2147483646, 51, 2147483646, 52, 2147483646,
53, 2147483646, 54, 2147483646, 55,
2147483646, 56, 2147483646, 57, 2147483646,
58, 2147483646, 59, 2147483646, 60,
2147483646, 2147483646, 61, 2147483646, 62,
2147483646, 63, 2147483646, 64, 2147483646,
65, 2147483646, 66, 2147483646, 67,
2147483646, 68, 2147483646, 69, 2147483646,
70, 2147483646, 2147483646, 71, 2147483646,
72, 2147483646, 73, 2147483646, 74,
2147483646, 75, 2147483646, 76, 2147483646,
77, 2147483646, 78, 2147483646, 79,
2147483646, 80, 2147483646, 2147483646, 81,
2147483646, 82, 2147483646, 83, 2147483646,
84, 2147483646, 85, 2147483646, 86,
2147483646, 87, 2147483646, 88, 2147483646,
89, 2147483646, 90, 2147483646, 2147483646,
91, 2147483646, 92, 2147483646, 93,
2147483646, 94, 2147483646, 95, 2147483646,
96, 2147483646, 97, 2147483646, 98,
2147483646, 99, 2147483646, 100}));
}
TEST(ReferenceTest, RandomJaxReference121) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{0, 2, -1, 1},
-2147483647,
{2, 2},
{1, 2},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(20, 9));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference122) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{0, 2, -1, 1},
-2147483647,
{2, 1},
{2, 1},
{2, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(5, 10));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference123) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-2, -2, 0, 2},
0,
{1, 2},
{1, 1},
{1, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(6, 6));
EXPECT_THAT(res.data,
ElementsAreArray({43, 47, 51, 55, 59, 0, 63, 67, 71,
75, 79, 0, 83, 87, 91, 95, 99, 0,
103, 107, 111, 115, 119, 0, 123, 127, 131,
135, 139, 0, 143, 147, 151, 155, 159, 0}));
}
TEST(ReferenceTest, RandomJaxReference124) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{0, 2, -2, 0},
1,
{2, 1},
{2, 1},
{1, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(10, 4));
EXPECT_THAT(res.data,
ElementsAreArray({69, 125, 189, 261, 429, 525, 629, 741,
989, 1125, 1269, 1421, 1749, 1925, 2109, 2301,
2709, 2925, 3149, 3381, 3869, 4125, 4389, 4661,
5229, 5525, 5829, 6141, 6789, 7125, 7469, 7821,
83, 85, 87, 89, 93, 95, 97, 99}));
}
TEST(ReferenceTest, RandomJaxReference125) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{-1, -1, 2, 1},
0,
{1, 2},
{2, 1},
{2, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(9, 21));
EXPECT_THAT(
res.data,
ElementsAreArray({0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference126) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{0, 1, 0, 0},
-2147483647,
{1, 1},
{1, 2},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(20, 5));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 3, 5, 7, 9,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
11, 13, 15, 17, 19,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
21, 23, 25, 27, 29,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
31, 33, 35, 37, 39,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
41, 43, 45, 47, 49,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
51, 53, 55, 57, 59,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
61, 63, 65, 67, 69,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
71, 73, 75, 77, 79,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
81, 83, 85, 87, 89,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
91, 93, 95, 97, 99,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference127) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{1, -2, 0, -2},
0,
{2, 1},
{2, 1},
{1, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(16, 4));
EXPECT_THAT(
res.data,
ElementsAreArray(
{0, 0, 0, 0, 12, 16, 20, 24, 0, 0, 0, 0, 32, 36, 40, 44,
0, 0, 0, 0, 52, 56, 60, 64, 0, 0, 0, 0, 72, 76, 80, 84,
0, 0, 0, 0, 92, 96, 100, 104, 0, 0, 0, 0, 112, 116, 120, 124,
0, 0, 0, 0, 132, 136, 140, 144, 0, 0, 0, 0, 152, 156, 160, 164}));
}
TEST(ReferenceTest, RandomJaxReference128) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-1, -2, 0, -2},
2147483646,
{1, 2},
{2, 2},
{1, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(7, 3));
EXPECT_THAT(res.data,
ElementsAreArray({11, 13, 15, 21, 23, 25, 31, 33, 35, 41, 43,
45, 51, 53, 55, 61, 63, 65, 71, 73, 75}));
}
TEST(ReferenceTest, RandomJaxReference129) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{1, 2, -1, 2},
1,
{2, 2},
{1, 2},
{1, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(12, 9));
EXPECT_THAT(
res.data,
ElementsAreArray({1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference130) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-1, 1, 1, 1},
1,
{1, 1},
{2, 2},
{1, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(19, 6));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 1, 1, 1, 1, 1, 1, 12, 14, 16, 18, 20, 1, 1, 1, 1, 1,
1, 1, 22, 24, 26, 28, 30, 1, 1, 1, 1, 1, 1, 1, 32, 34, 36,
38, 40, 1, 1, 1, 1, 1, 1, 1, 42, 44, 46, 48, 50, 1, 1, 1,
1, 1, 1, 1, 52, 54, 56, 58, 60, 1, 1, 1, 1, 1, 1, 1, 62,
64, 66, 68, 70, 1, 1, 1, 1, 1, 1, 1, 72, 74, 76, 78, 80, 1,
1, 1, 1, 1, 1, 1, 82, 84, 86, 88, 90, 1, 1, 1, 1, 1, 1,
1, 92, 94, 96, 98, 100, 1, 1, 1, 1, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference131) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{1, -1, -2, -1},
-2147483647,
{2, 1},
{1, 2},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(9, 16));
EXPECT_THAT(
res.data,
ElementsAreArray({2, -2147483647, 3, -2147483647, 4, -2147483647,
5, -2147483647, 6, -2147483647, 7, -2147483647,
8, -2147483647, 9, -2147483647, 12, -2147483647,
13, -2147483647, 14, -2147483647, 15, -2147483647,
16, -2147483647, 17, -2147483647, 18, -2147483647,
19, -2147483647, 22, -2147483647, 23, -2147483647,
24, -2147483647, 25, -2147483647, 26, -2147483647,
27, -2147483647, 28, -2147483647, 29, -2147483647,
32, -2147483647, 33, -2147483647, 34, -2147483647,
35, -2147483647, 36, -2147483647, 37, -2147483647,
38, -2147483647, 39, -2147483647, 42, -2147483647,
43, -2147483647, 44, -2147483647, 45, -2147483647,
46, -2147483647, 47, -2147483647, 48, -2147483647,
49, -2147483647, 52, -2147483647, 53, -2147483647,
54, -2147483647, 55, -2147483647, 56, -2147483647,
57, -2147483647, 58, -2147483647, 59, -2147483647,
62, -2147483647, 63, -2147483647, 64, -2147483647,
65, -2147483647, 66, -2147483647, 67, -2147483647,
68, -2147483647, 69, -2147483647, 72, -2147483647,
73, -2147483647, 74, -2147483647, 75, -2147483647,
76, -2147483647, 77, -2147483647, 78, -2147483647,
79, -2147483647, 82, -2147483647, 83, -2147483647,
84, -2147483647, 85, -2147483647, 86, -2147483647,
87, -2147483647, 88, -2147483647, 89, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference132) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{0, 2, 2, -1},
-2147483647,
{2, 2},
{2, 1},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(10, 10));
EXPECT_THAT(res.data, ElementsAreArray(
{-2147483647, 11, 12, 13, 14, 15, 16, 17, 18, 19,
-2147483647, 21, 22, 23, 24, 25, 26, 27, 28, 29,
-2147483647, 31, 32, 33, 34, 35, 36, 37, 38, 39,
-2147483647, 41, 42, 43, 44, 45, 46, 47, 48, 49,
-2147483647, 51, 52, 53, 54, 55, 56, 57, 58, 59,
-2147483647, 61, 62, 63, 64, 65, 66, 67, 68, 69,
-2147483647, 71, 72, 73, 74, 75, 76, 77, 78, 79,
-2147483647, 81, 82, 83, 84, 85, 86, 87, 88, 89,
-2147483647, 91, 92, 93, 94, 95, 96, 97, 98, 99,
-2147483647, 91, 92, 93, 94, 95, 96, 97, 98, 99}));
}
TEST(ReferenceTest, RandomJaxReference133) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{0, 2, 1, -1},
2147483646,
{2, 2},
{2, 2},
{2, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(10, 4));
EXPECT_THAT(
res.data,
ElementsAreArray({2, 2, 4, 6, 12, 12, 14, 16, 22, 22, 24, 26, 32, 32,
34, 36, 42, 42, 44, 46, 52, 52, 54, 56, 62, 62, 64, 66,
72, 72, 74, 76, 82, 82, 84, 86, 92, 92, 94, 96}));
}
TEST(ReferenceTest, RandomJaxReference134) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-2, 2, 2, 1},
-2147483647,
{2, 1},
{1, 1},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(5, 22));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, 31, -2147483647, 32,
-2147483647, 33, -2147483647, 34, -2147483647,
35, -2147483647, 36, -2147483647, 37,
-2147483647, 38, -2147483647, 39, -2147483647,
40, -2147483647, -2147483647, -2147483647, 51,
-2147483647, 52, -2147483647, 53, -2147483647,
54, -2147483647, 55, -2147483647, 56,
-2147483647, 57, -2147483647, 58, -2147483647,
59, -2147483647, 60, -2147483647, -2147483647,
-2147483647, 71, -2147483647, 72, -2147483647,
73, -2147483647, 74, -2147483647, 75,
-2147483647, 76, -2147483647, 77, -2147483647,
78, -2147483647, 79, -2147483647, 80,
-2147483647, -2147483647, -2147483647, 91, -2147483647,
92, -2147483647, 93, -2147483647, 94,
-2147483647, 95, -2147483647, 96, -2147483647,
97, -2147483647, 98, -2147483647, 99,
-2147483647, 100, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference135) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{1, 0, 0, 2},
2147483646,
{1, 1},
{2, 1},
{2, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(10, 12));
EXPECT_THAT(
res.data,
ElementsAreArray(
{2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference136) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{0, 1, 0, 0},
2147483646,
{1, 2},
{2, 1},
{1, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(11, 9));
EXPECT_THAT(res.data,
ElementsAreArray(
{1, 2, 3, 4, 5,
6, 7, 8, 9, 11,
12, 13, 14, 15, 16,
17, 18, 19, 21, 22,
23, 24, 25, 26, 27,
28, 29, 31, 32, 33,
34, 35, 36, 37, 38,
39, 41, 42, 43, 44,
45, 46, 47, 48, 49,
51, 52, 53, 54, 55,
56, 57, 58, 59, 61,
62, 63, 64, 65, 66,
67, 68, 69, 71, 72,
73, 74, 75, 76, 77,
78, 79, 81, 82, 83,
84, 85, 86, 87, 88,
89, 91, 92, 93, 94,
95, 96, 97, 98, 99,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference137) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{0, -1, 2, -1},
2147483646,
{2, 2},
{2, 2},
{2, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(4, 5));
EXPECT_THAT(res.data,
ElementsAreArray({1, 1, 3, 5, 7, 21, 21, 23, 25, 27,
41, 41, 43, 45, 47, 61, 61, 63, 65, 67}));
}
TEST(ReferenceTest, RandomJaxReference138) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{0, -1, 1, 2},
-2147483647,
{1, 1},
{2, 2},
{1, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(18, 22));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, 1, -2147483647, 2, -2147483647,
3, -2147483647, 4, -2147483647, 5,
-2147483647, 6, -2147483647, 7, -2147483647,
8, -2147483647, 9, -2147483647, 10,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
11, -2147483647, 12, -2147483647, 13,
-2147483647, 14, -2147483647, 15, -2147483647,
16, -2147483647, 17, -2147483647, 18,
-2147483647, 19, -2147483647, 20, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, 21,
-2147483647, 22, -2147483647, 23, -2147483647,
24, -2147483647, 25, -2147483647, 26,
-2147483647, 27, -2147483647, 28, -2147483647,
29, -2147483647, 30, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, 31, -2147483647,
32, -2147483647, 33, -2147483647, 34,
-2147483647, 35, -2147483647, 36, -2147483647,
37, -2147483647, 38, -2147483647, 39,
-2147483647, 40, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, 41, -2147483647, 42,
-2147483647, 43, -2147483647, 44, -2147483647,
45, -2147483647, 46, -2147483647, 47,
-2147483647, 48, -2147483647, 49, -2147483647,
50, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, 51, -2147483647, 52, -2147483647,
53, -2147483647, 54, -2147483647, 55,
-2147483647, 56, -2147483647, 57, -2147483647,
58, -2147483647, 59, -2147483647, 60,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
61, -2147483647, 62, -2147483647, 63,
-2147483647, 64, -2147483647, 65, -2147483647,
66, -2147483647, 67, -2147483647, 68,
-2147483647, 69, -2147483647, 70, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, 71,
-2147483647, 72, -2147483647, 73, -2147483647,
74, -2147483647, 75, -2147483647, 76,
-2147483647, 77, -2147483647, 78, -2147483647,
79, -2147483647, 80, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, 81, -2147483647,
82, -2147483647, 83, -2147483647, 84,
-2147483647, 85, -2147483647, 86, -2147483647,
87, -2147483647, 88, -2147483647, 89,
-2147483647, 90, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647}));
}
TEST(ReferenceTest, RandomJaxReference139) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{1, 2, 0, 2},
1,
{2, 2},
{2, 2},
{2, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(6, 10));
EXPECT_THAT(
res.data,
ElementsAreArray(
{132, 156, 182, 210, 240, 272, 306,
342, 380, 20, 130944, 164736, 204204, 249900,
302400, 362304, 430236, 506844, 592800, 800, 2630784,
2910336, 3211164, 3534300, 3880800, 4251744, 4648236, 5071404,
5522400, 2400, 13557024, 14485536, 15460524, 16483500, 17556000,
18679584, 19855836, 21086364, 22372800, 4800, 42797664, 44970336,
47224284, 49561500, 51984000, 54493824, 57093036, 59783724, 62568000,
8000, 8372, 8556, 8742, 8930, 9120, 9312,
9506, 9702, 9900, 100}));
}
TEST(ReferenceTest, RandomJaxReference140) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{-2, 0, -1, -2},
1,
{2, 1},
{2, 1},
{1, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(15, 8));
EXPECT_THAT(
res.data,
ElementsAreArray({1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference141) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-2, -2, 1, 1},
-2147483647,
{1, 1},
{2, 1},
{2, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(3, 6));
EXPECT_THAT(res.data, ElementsAreArray({-2147483647, 22, 24, 26, 28, 30,
-2147483647, 42, 44, 46, 48, 50,
-2147483647, 62, 64, 66, 68, 70}));
}
TEST(ReferenceTest, RandomJaxReference142) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{1, 0, 0, -1},
1,
{2, 1},
{2, 2},
{1, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(18, 9));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 1, 1, 1, 1, 1, 1, 1, 1, 11, 24,
39, 56, 75, 96, 119, 144, 171, 1, 1, 1, 1,
1, 1, 1, 1, 1, 231, 264, 299, 336, 375, 416,
459, 504, 551, 1, 1, 1, 1, 1, 1, 1, 1,
1, 651, 704, 759, 816, 875, 936, 999, 1064, 1131, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1271, 1344, 1419,
1496, 1575, 1656, 1739, 1824, 1911, 1, 1, 1, 1, 1,
1, 1, 1, 1, 2091, 2184, 2279, 2376, 2475, 2576, 2679,
2784, 2891, 1, 1, 1, 1, 1, 1, 1, 1, 1,
3111, 3224, 3339, 3456, 3575, 3696, 3819, 3944, 4071, 1, 1,
1, 1, 1, 1, 1, 1, 1, 4331, 4464, 4599, 4736,
4875, 5016, 5159, 5304, 5451, 1, 1, 1, 1, 1, 1,
1, 1, 1, 5751, 5904, 6059, 6216, 6375, 6536, 6699, 6864,
7031, 1, 1, 1, 1, 1, 1, 1, 1, 1, 7371,
7544, 7719, 7896, 8075, 8256, 8439, 8624, 8811}));
}
TEST(ReferenceTest, RandomJaxReference143) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{2, -1, -2, -1},
-2147483647,
{2, 1},
{2, 2},
{2, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(5, 8));
EXPECT_THAT(
res.data,
ElementsAreArray({2, 3, 4, 5, 6, 7, 8, 9, 22, 23, 24, 25, 26, 27,
28, 29, 42, 43, 44, 45, 46, 47, 48, 49, 62, 63, 64, 65,
66, 67, 68, 69, 82, 83, 84, 85, 86, 87, 88, 89}));
}
TEST(ReferenceTest, RandomJaxReference144) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{0, 1, 2, 2},
2147483646,
{1, 1},
{1, 1},
{2, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(6, 23));
EXPECT_THAT(res.data,
ElementsAreArray(
{2147483646, 2147483646, 1, 2147483646, 2,
2147483646, 3, 2147483646, 4, 2147483646,
5, 2147483646, 6, 2147483646, 7,
2147483646, 8, 2147483646, 9, 2147483646,
10, 2147483646, 2147483646, 2147483646, 2147483646,
21, 2147483646, 22, 2147483646, 23,
2147483646, 24, 2147483646, 25, 2147483646,
26, 2147483646, 27, 2147483646, 28,
2147483646, 29, 2147483646, 30, 2147483646,
2147483646, 2147483646, 2147483646, 41, 2147483646,
42, 2147483646, 43, 2147483646, 44,
2147483646, 45, 2147483646, 46, 2147483646,
47, 2147483646, 48, 2147483646, 49,
2147483646, 50, 2147483646, 2147483646, 2147483646,
2147483646, 61, 2147483646, 62, 2147483646,
63, 2147483646, 64, 2147483646, 65,
2147483646, 66, 2147483646, 67, 2147483646,
68, 2147483646, 69, 2147483646, 70,
2147483646, 2147483646, 2147483646, 2147483646, 81,
2147483646, 82, 2147483646, 83, 2147483646,
84, 2147483646, 85, 2147483646, 86,
2147483646, 87, 2147483646, 88, 2147483646,
89, 2147483646, 90, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference145) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{2, -2, 2, -2},
1,
{1, 2},
{2, 1},
{2, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(10, 5));
EXPECT_THAT(
res.data,
ElementsAreArray({1, 1, 1, 1, 1, 1, 2, 12, 30, 56,
1, 132, 182, 240, 306, 1, 462, 552, 650, 756,
1, 992, 1122, 1260, 1406, 1, 1722, 1892, 2070, 2256,
1, 2652, 2862, 3080, 3306, 1, 3782, 4032, 4290, 4556,
1, 5112, 5402, 5700, 6006, 1, 6642, 6972, 7310, 7656}));
}
TEST(ReferenceTest, RandomJaxReference146) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{1, -2, 1, 0},
2147483646,
{2, 1},
{1, 2},
{1, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(17, 6));
EXPECT_THAT(
res.data,
ElementsAreArray(
{2147483646, 2, 4, 6, 8, 10, 2147483646, 2, 4, 6, 8, 10,
2147483646, 12, 14, 16, 18, 20, 2147483646, 12, 14, 16, 18, 20,
2147483646, 22, 24, 26, 28, 30, 2147483646, 22, 24, 26, 28, 30,
2147483646, 32, 34, 36, 38, 40, 2147483646, 32, 34, 36, 38, 40,
2147483646, 42, 44, 46, 48, 50, 2147483646, 42, 44, 46, 48, 50,
2147483646, 52, 54, 56, 58, 60, 2147483646, 52, 54, 56, 58, 60,
2147483646, 62, 64, 66, 68, 70, 2147483646, 62, 64, 66, 68, 70,
2147483646, 72, 74, 76, 78, 80, 2147483646, 72, 74, 76, 78, 80,
2147483646, 82, 84, 86, 88, 90}));
}
TEST(ReferenceTest, RandomJaxReference147) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-2, 0, 2, 0},
0,
{2, 2},
{1, 2},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(8, 5));
EXPECT_THAT(res.data, ElementsAreArray(
{11, 24, 28, 32, 36, 21, 44, 48, 52, 56,
31, 64, 68, 72, 76, 41, 84, 88, 92, 96,
51, 104, 108, 112, 116, 61, 124, 128, 132, 136,
71, 144, 148, 152, 156, 81, 164, 168, 172, 176}));
}
TEST(ReferenceTest, RandomJaxReference148) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{1, -2, 2, 1},
1,
{2, 1},
{1, 1},
{1, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(17, 22));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 1, 1, 1, 2, 1, 3, 1, 4, 1, 5, 1, 6, 1, 7, 1, 8, 1, 9, 1,
10, 1, 1, 1, 1, 1, 2, 1, 3, 1, 4, 1, 5, 1, 6, 1, 7, 1, 8, 1,
9, 1, 10, 1, 1, 1, 11, 1, 12, 1, 13, 1, 14, 1, 15, 1, 16, 1, 17, 1,
18, 1, 19, 1, 20, 1, 1, 1, 11, 1, 12, 1, 13, 1, 14, 1, 15, 1, 16, 1,
17, 1, 18, 1, 19, 1, 20, 1, 1, 1, 21, 1, 22, 1, 23, 1, 24, 1, 25, 1,
26, 1, 27, 1, 28, 1, 29, 1, 30, 1, 1, 1, 21, 1, 22, 1, 23, 1, 24, 1,
25, 1, 26, 1, 27, 1, 28, 1, 29, 1, 30, 1, 1, 1, 31, 1, 32, 1, 33, 1,
34, 1, 35, 1, 36, 1, 37, 1, 38, 1, 39, 1, 40, 1, 1, 1, 31, 1, 32, 1,
33, 1, 34, 1, 35, 1, 36, 1, 37, 1, 38, 1, 39, 1, 40, 1, 1, 1, 41, 1,
42, 1, 43, 1, 44, 1, 45, 1, 46, 1, 47, 1, 48, 1, 49, 1, 50, 1, 1, 1,
41, 1, 42, 1, 43, 1, 44, 1, 45, 1, 46, 1, 47, 1, 48, 1, 49, 1, 50, 1,
1, 1, 51, 1, 52, 1, 53, 1, 54, 1, 55, 1, 56, 1, 57, 1, 58, 1, 59, 1,
60, 1, 1, 1, 51, 1, 52, 1, 53, 1, 54, 1, 55, 1, 56, 1, 57, 1, 58, 1,
59, 1, 60, 1, 1, 1, 61, 1, 62, 1, 63, 1, 64, 1, 65, 1, 66, 1, 67, 1,
68, 1, 69, 1, 70, 1, 1, 1, 61, 1, 62, 1, 63, 1, 64, 1, 65, 1, 66, 1,
67, 1, 68, 1, 69, 1, 70, 1, 1, 1, 71, 1, 72, 1, 73, 1, 74, 1, 75, 1,
76, 1, 77, 1, 78, 1, 79, 1, 80, 1, 1, 1, 71, 1, 72, 1, 73, 1, 74, 1,
75, 1, 76, 1, 77, 1, 78, 1, 79, 1, 80, 1, 1, 1, 81, 1, 82, 1, 83, 1,
84, 1, 85, 1, 86, 1, 87, 1, 88, 1, 89, 1, 90, 1}));
}
TEST(ReferenceTest, RandomJaxReference149) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-1, -2, -2, 2},
-2147483647,
{2, 1},
{1, 1},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(6, 5));
EXPECT_THAT(res.data,
ElementsAreArray(
{23, 25, 27, 29, -2147483647, 33, 35, 37, 39, -2147483647,
43, 45, 47, 49, -2147483647, 53, 55, 57, 59, -2147483647,
63, 65, 67, 69, -2147483647, 73, 75, 77, 79, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference150) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{2, -1, -2, 0},
0,
{1, 1},
{2, 2},
{2, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(6, 17));
EXPECT_THAT(
res.data,
ElementsAreArray({0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 8,
0, 9, 0, 10, 22, 0, 23, 0, 24, 0, 25, 0, 26, 0, 27,
0, 28, 0, 29, 0, 30, 42, 0, 43, 0, 44, 0, 45, 0, 46,
0, 47, 0, 48, 0, 49, 0, 50, 62, 0, 63, 0, 64, 0, 65,
0, 66, 0, 67, 0, 68, 0, 69, 0, 70, 82, 0, 83, 0, 84,
0, 85, 0, 86, 0, 87, 0, 88, 0, 89, 0, 90}));
}
TEST(ReferenceTest, RandomJaxReference151) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{2, 1, 2, -1},
1,
{2, 2},
{2, 1},
{2, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(10, 19));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5,
6, 6, 7, 7, 8, 8, 9, 9, 1, 11, 11,
24, 24, 39, 39, 56, 56, 75, 75, 96, 96, 119,
119, 144, 144, 171, 171, 1, 231, 231, 264, 264, 299,
299, 336, 336, 375, 375, 416, 416, 459, 459, 504, 504,
551, 551, 1, 651, 651, 704, 704, 759, 759, 816, 816,
875, 875, 936, 936, 999, 999, 1064, 1064, 1131, 1131, 1,
1271, 1271, 1344, 1344, 1419, 1419, 1496, 1496, 1575, 1575, 1656,
1656, 1739, 1739, 1824, 1824, 1911, 1911, 1, 2091, 2091, 2184,
2184, 2279, 2279, 2376, 2376, 2475, 2475, 2576, 2576, 2679, 2679,
2784, 2784, 2891, 2891, 1, 3111, 3111, 3224, 3224, 3339, 3339,
3456, 3456, 3575, 3575, 3696, 3696, 3819, 3819, 3944, 3944, 4071,
4071, 1, 4331, 4331, 4464, 4464, 4599, 4599, 4736, 4736, 4875,
4875, 5016, 5016, 5159, 5159, 5304, 5304, 5451, 5451, 1, 5751,
5751, 5904, 5904, 6059, 6059, 6216, 6216, 6375, 6375, 6536, 6536,
6699, 6699, 6864, 6864, 7031, 7031, 1, 7371, 7371, 7544, 7544,
7719, 7719, 7896, 7896, 8075, 8075, 8256, 8256, 8439, 8439, 8624,
8624, 8811, 8811}));
}
TEST(ReferenceTest, RandomJaxReference152) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-1, 2, -2, 1},
1,
{2, 2},
{2, 2},
{2, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(9, 7));
EXPECT_THAT(res.data,
ElementsAreArray({1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference153) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-2, 2, -1, 2},
1,
{2, 2},
{2, 2},
{2, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(9, 5));
EXPECT_THAT(
res.data,
ElementsAreArray(
{88704, 139776, 209664, 302400, 600, 574464, 763776,
995904, 1276800, 1200, 2010624, 2477376, 3020544, 3648000,
2000, 5189184, 6120576, 7171584, 8352000, 3000, 11142144,
12773376, 14577024, 16564800, 4200, 21141504, 23755776, 26604864,
29702400, 5600, 36699264, 40627776, 44863104, 49420800, 7200,
59567424, 65189376, 71199744, 77616000, 9000, 8648, 9024,
9408, 9800, 100}));
}
TEST(ReferenceTest, RandomJaxReference154) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{2, 2, -1, 0},
-2147483647,
{1, 1},
{2, 2},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(7, 18));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, 2,
-2147483647, 3, -2147483647, 4, -2147483647,
5, -2147483647, 6, -2147483647, 7,
-2147483647, 8, -2147483647, 9, -2147483647,
10, -2147483647, 22, -2147483647, 23,
-2147483647, 24, -2147483647, 25, -2147483647,
26, -2147483647, 27, -2147483647, 28,
-2147483647, 29, -2147483647, 30, -2147483647,
42, -2147483647, 43, -2147483647, 44,
-2147483647, 45, -2147483647, 46, -2147483647,
47, -2147483647, 48, -2147483647, 49,
-2147483647, 50, -2147483647, 62, -2147483647,
63, -2147483647, 64, -2147483647, 65,
-2147483647, 66, -2147483647, 67, -2147483647,
68, -2147483647, 69, -2147483647, 70,
-2147483647, 82, -2147483647, 83, -2147483647,
84, -2147483647, 85, -2147483647, 86,
-2147483647, 87, -2147483647, 88, -2147483647,
89, -2147483647, 90, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647}));
}
TEST(ReferenceTest, RandomJaxReference155) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{0, 2, 2, 1},
0,
{1, 2},
{1, 1},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(11, 6));
EXPECT_THAT(
res.data,
ElementsAreArray({0, 3, 7, 11, 15, 19, 0, 23, 27, 31, 35,
39, 0, 43, 47, 51, 55, 59, 0, 63, 67, 71,
75, 79, 0, 83, 87, 91, 95, 99, 0, 103, 107,
111, 115, 119, 0, 123, 127, 131, 135, 139, 0, 143,
147, 151, 155, 159, 0, 163, 167, 171, 175, 179, 0,
183, 187, 191, 195, 199, 0, 0, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference156) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{2, -1, -1, -1},
-2147483647,
{1, 2},
{1, 2},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(11, 3));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, 4, 6, 8, 14,
16, 18, 24, 26, 28,
34, 36, 38, 44, 46,
48, 54, 56, 58, 64,
66, 68, 74, 76, 78,
84, 86, 88}));
}
TEST(ReferenceTest, RandomJaxReference157) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-1, -1, -2, -1},
-2147483647,
{2, 1},
{1, 2},
{1, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(16, 7));
EXPECT_THAT(
res.data,
ElementsAreArray(
{13, 14, 15, 16, 17, 18, 19, 13, 14, 15, 16, 17, 18, 19, 23, 24,
25, 26, 27, 28, 29, 23, 24, 25, 26, 27, 28, 29, 33, 34, 35, 36,
37, 38, 39, 33, 34, 35, 36, 37, 38, 39, 43, 44, 45, 46, 47, 48,
49, 43, 44, 45, 46, 47, 48, 49, 53, 54, 55, 56, 57, 58, 59, 53,
54, 55, 56, 57, 58, 59, 63, 64, 65, 66, 67, 68, 69, 63, 64, 65,
66, 67, 68, 69, 73, 74, 75, 76, 77, 78, 79, 73, 74, 75, 76, 77,
78, 79, 83, 84, 85, 86, 87, 88, 89, 83, 84, 85, 86, 87, 88, 89}));
}
TEST(ReferenceTest, RandomJaxReference158) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{2, -2, 2, 0},
0,
{2, 1},
{1, 1},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(9, 6));
EXPECT_THAT(
res.data,
ElementsAreArray({0, 0, 0, 0, 0, 0, 0, 1, 3, 5, 7, 9, 0, 11,
13, 15, 17, 19, 0, 21, 23, 25, 27, 29, 0, 31, 33, 35,
37, 39, 0, 41, 43, 45, 47, 49, 0, 51, 53, 55, 57, 59,
0, 61, 63, 65, 67, 69, 0, 71, 73, 75, 77, 79}));
}
TEST(ReferenceTest, RandomJaxReference159) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{1, 2, -1, 1},
2147483646,
{1, 2},
{2, 1},
{1, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(13, 9));
EXPECT_THAT(res.data,
ElementsAreArray(
{2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2,
3, 4, 5, 6, 7,
8, 9, 10, 12, 13,
14, 15, 16, 17, 18,
19, 20, 22, 23, 24,
25, 26, 27, 28, 29,
30, 32, 33, 34, 35,
36, 37, 38, 39, 40,
42, 43, 44, 45, 46,
47, 48, 49, 50, 52,
53, 54, 55, 56, 57,
58, 59, 60, 62, 63,
64, 65, 66, 67, 68,
69, 70, 72, 73, 74,
75, 76, 77, 78, 79,
80, 82, 83, 84, 85,
86, 87, 88, 89, 90,
92, 93, 94, 95, 96,
97, 98, 99, 100, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference160) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-1, 0, -2, 1},
0,
{2, 2},
{1, 1},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(4, 4));
EXPECT_THAT(res.data,
ElementsAreArray({74, 82, 90, 98, 154, 162, 170, 178, 234, 242,
250, 258, 314, 322, 330, 338}));
}
TEST(ReferenceTest, RandomJaxReference161) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{0, 2, -1, 1},
0,
{1, 2},
{1, 1},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(11, 5));
EXPECT_THAT(
res.data,
ElementsAreArray({5, 9, 13, 17, 10, 25, 29, 33, 37, 20, 45,
49, 53, 57, 30, 65, 69, 73, 77, 40, 85, 89,
93, 97, 50, 105, 109, 113, 117, 60, 125, 129, 133,
137, 70, 145, 149, 153, 157, 80, 165, 169, 173, 177,
90, 185, 189, 193, 197, 100, 0, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference162) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{2, -1, -1, 0},
0,
{2, 2},
{1, 1},
{1, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(10, 17));
EXPECT_THAT(
res.data,
ElementsAreArray(
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7,
7, 8, 8, 9, 9, 10, 14, 14, 16, 16, 18, 18, 20, 20,
22, 22, 24, 24, 26, 26, 28, 28, 30, 34, 34, 36, 36, 38,
38, 40, 40, 42, 42, 44, 44, 46, 46, 48, 48, 50, 54, 54,
56, 56, 58, 58, 60, 60, 62, 62, 64, 64, 66, 66, 68, 68,
70, 74, 74, 76, 76, 78, 78, 80, 80, 82, 82, 84, 84, 86,
86, 88, 88, 90, 94, 94, 96, 96, 98, 98, 100, 100, 102, 102,
104, 104, 106, 106, 108, 108, 110, 114, 114, 116, 116, 118, 118, 120,
120, 122, 122, 124, 124, 126, 126, 128, 128, 130, 134, 134, 136, 136,
138, 138, 140, 140, 142, 142, 144, 144, 146, 146, 148, 148, 150, 154,
154, 156, 156, 158, 158, 160, 160, 162, 162, 164, 164, 166, 166, 168,
168, 170}));
}
TEST(ReferenceTest, RandomJaxReference163) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{0, 0, 0, 2},
0,
{1, 1},
{1, 1},
{1, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(10, 12));
EXPECT_THAT(
res.data,
ElementsAreArray({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 0, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 0, 0, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, 0, 0, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 0, 0, 41, 42, 43, 44, 45, 46, 47, 48,
49, 50, 0, 0, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
0, 0, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 0, 0,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 0, 0, 81, 82,
83, 84, 85, 86, 87, 88, 89, 90, 0, 0, 91, 92, 93, 94,
95, 96, 97, 98, 99, 100, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference164) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-1, 0, 2, 1},
-2147483647,
{1, 1},
{1, 2},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(9, 7));
EXPECT_THAT(res.data,
ElementsAreArray({-2147483647, 11, 13, 15, 17, 19, -2147483647,
-2147483647, 21, 23, 25, 27, 29, -2147483647,
-2147483647, 31, 33, 35, 37, 39, -2147483647,
-2147483647, 41, 43, 45, 47, 49, -2147483647,
-2147483647, 51, 53, 55, 57, 59, -2147483647,
-2147483647, 61, 63, 65, 67, 69, -2147483647,
-2147483647, 71, 73, 75, 77, 79, -2147483647,
-2147483647, 81, 83, 85, 87, 89, -2147483647,
-2147483647, 91, 93, 95, 97, 99, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference165) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{2, -2, 2, -1},
1,
{1, 2},
{1, 2},
{2, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(5, 18));
EXPECT_THAT(
res.data,
ElementsAreArray({1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 6, 1,
12, 1, 20, 1, 30, 1, 42, 1, 56, 1, 72, 1,
21, 1, 462, 1, 506, 1, 552, 1, 600, 1, 650, 1,
702, 1, 756, 1, 812, 1, 41, 1, 1722, 1, 1806, 1,
1892, 1, 1980, 1, 2070, 1, 2162, 1, 2256, 1, 2352, 1,
61, 1, 3782, 1, 3906, 1, 4032, 1, 4160, 1, 4290, 1,
4422, 1, 4556, 1, 4692, 1}));
}
TEST(ReferenceTest, RandomJaxReference166) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{1, -1, 0, -2},
-2147483647,
{2, 2},
{2, 2},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(8, 3));
EXPECT_THAT(res.data, ElementsAreArray({13, 15, 17, 23, 25, 27, 33, 35,
37, 43, 45, 47, 53, 55, 57, 63,
65, 67, 73, 75, 77, 83, 85, 87}));
}
TEST(ReferenceTest, RandomJaxReference167) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-2, 2, 0, 1},
0,
{2, 2},
{2, 1},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(9, 5));
EXPECT_THAT(res.data,
ElementsAreArray({66, 74, 82, 90, 98, 106, 114, 122, 130,
138, 146, 154, 162, 170, 178, 186, 194, 202,
210, 218, 226, 234, 242, 250, 258, 266, 274,
282, 290, 298, 306, 314, 322, 330, 338, 346,
354, 362, 370, 378, 183, 187, 191, 195, 199}));
}
TEST(ReferenceTest, RandomJaxReference168) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{0, -1, -1, -2},
-2147483647,
{2, 2},
{2, 2},
{2, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(4, 7));
EXPECT_THAT(
res.data,
ElementsAreArray({-2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference169) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{1, -2, 0, 1},
1,
{1, 2},
{2, 2},
{1, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(9, 9));
EXPECT_THAT(
res.data,
ElementsAreArray({1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 6, 12, 20, 30, 42, 56, 72, 90,
132, 156, 182, 210, 240, 272, 306, 342, 380,
462, 506, 552, 600, 650, 702, 756, 812, 870,
992, 1056, 1122, 1190, 1260, 1332, 1406, 1482, 1560,
1722, 1806, 1892, 1980, 2070, 2162, 2256, 2352, 2450,
2652, 2756, 2862, 2970, 3080, 3192, 3306, 3422, 3540,
3782, 3906, 4032, 4160, 4290, 4422, 4556, 4692, 4830,
5112, 5256, 5402, 5550, 5700, 5852, 6006, 6162, 6320}));
}
TEST(ReferenceTest, RandomJaxReference170) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{2, -1, 0, -1},
1,
{1, 1},
{2, 1},
{2, 1},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(6, 18));
EXPECT_THAT(
res.data,
ElementsAreArray({1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 2, 1, 3, 1, 4, 1, 5, 1, 6, 1, 7, 1,
8, 1, 9, 1, 21, 1, 22, 1, 23, 1, 24, 1, 25, 1, 26, 1,
27, 1, 28, 1, 29, 1, 41, 1, 42, 1, 43, 1, 44, 1, 45, 1,
46, 1, 47, 1, 48, 1, 49, 1, 61, 1, 62, 1, 63, 1, 64, 1,
65, 1, 66, 1, 67, 1, 68, 1, 69, 1, 81, 1, 82, 1, 83, 1,
84, 1, 85, 1, 86, 1, 87, 1, 88, 1, 89, 1}));
}
TEST(ReferenceTest, RandomJaxReference171) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{0, -2, 2, 0},
1,
{2, 2},
{1, 1},
{1, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(7, 10));
EXPECT_THAT(res.data,
ElementsAreArray(
{1, 11, 24, 39, 56, 75, 96, 119, 144, 171,
1, 231, 264, 299, 336, 375, 416, 459, 504, 551,
1, 651, 704, 759, 816, 875, 936, 999, 1064, 1131,
1, 1271, 1344, 1419, 1496, 1575, 1656, 1739, 1824, 1911,
1, 2091, 2184, 2279, 2376, 2475, 2576, 2679, 2784, 2891,
1, 3111, 3224, 3339, 3456, 3575, 3696, 3819, 3944, 4071,
1, 4331, 4464, 4599, 4736, 4875, 5016, 5159, 5304, 5451}));
}
TEST(ReferenceTest, RandomJaxReference172) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{-1, 1, 2, 2},
0,
{1, 1},
{2, 1},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(10, 12));
EXPECT_THAT(
res.data,
ElementsAreArray({0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(ReferenceTest, RandomJaxReference173) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{1, 1, 1, 0},
-2147483647,
{1, 2},
{1, 1},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(21, 10));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
11, 12, 13, 14, 15,
16, 17, 18, 19, 20,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
21, 22, 23, 24, 25,
26, 27, 28, 29, 30,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
31, 32, 33, 34, 35,
36, 37, 38, 39, 40,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
41, 42, 43, 44, 45,
46, 47, 48, 49, 50,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
51, 52, 53, 54, 55,
56, 57, 58, 59, 60,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
61, 62, 63, 64, 65,
66, 67, 68, 69, 70,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
71, 72, 73, 74, 75,
76, 77, 78, 79, 80,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
81, 82, 83, 84, 85,
86, 87, 88, 89, 90,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
91, 92, 93, 94, 95,
96, 97, 98, 99, 100,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference174) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{0, -1, -2, -1},
2147483646,
{1, 2},
{1, 2},
{1, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(18, 7));
EXPECT_THAT(res.data,
ElementsAreArray(
{2, 3, 4, 5, 6,
7, 8, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 12,
13, 14, 15, 16, 17,
18, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 22, 23,
24, 25, 26, 27, 28,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 32, 33, 34,
35, 36, 37, 38, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 42, 43, 44, 45,
46, 47, 48, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
52, 53, 54, 55, 56,
57, 58, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 62,
63, 64, 65, 66, 67,
68, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 72, 73,
74, 75, 76, 77, 78,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 82, 83, 84,
85, 86, 87, 88, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646}));
}
TEST(ReferenceTest, RandomJaxReference175) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-2, 2, 0, 0},
1,
{2, 2},
{1, 2},
{1, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(9, 9));
EXPECT_THAT(
res.data,
ElementsAreArray(
{458304, 534336, 619344, 714000, 819000, 935064, 1062936,
1203384, 1357200, 1708224, 1907136, 2122824, 2356200, 2608200,
2879784, 3171936, 3485664, 3822000, 4566744, 4977336, 5414904,
5880600, 6375600, 6901104, 7458336, 8048544, 8673000, 10029864,
10764936, 11539584, 12355200, 13213200, 14115024, 15062136, 16056024,
17098200, 19333584, 20529936, 21780864, 23088000, 24453000, 25877544,
27363336, 28912104, 30525600, 33953904, 35772336, 37662744, 39627000,
41667000, 43784664, 45981936, 48260784, 50623200, 55606824, 58232136,
60949224, 63760200, 66667200, 69672384, 72777936, 75986064, 79299000,
8372, 8556, 8742, 8930, 9120, 9312, 9506,
9702, 9900, 1, 1, 1, 1, 1,
1, 1, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference176) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{1, -2, -1, 2},
2147483646,
{2, 2},
{2, 1},
{2, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(8, 10));
EXPECT_THAT(
res.data,
ElementsAreArray(
{2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference177) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{0, 1, 0, 2},
0,
{1, 2},
{2, 2},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(10, 5));
EXPECT_THAT(res.data, ElementsAreArray(
{4, 8, 12, 16, 9, 24, 28, 32, 36, 19,
44, 48, 52, 56, 29, 64, 68, 72, 76, 39,
84, 88, 92, 96, 49, 104, 108, 112, 116, 59,
124, 128, 132, 136, 69, 144, 148, 152, 156, 79,
164, 168, 172, 176, 89, 184, 188, 192, 196, 99}));
}
TEST(ReferenceTest, RandomJaxReference178) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{2, 1, 2, 1},
2147483646,
{1, 2},
{2, 2},
{2, 1},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(7, 11));
EXPECT_THAT(res.data,
ElementsAreArray(
{2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 1, 2, 1, 2,
3, 4, 5, 6, 7,
8, 9, 21, 22, 21,
22, 23, 24, 25, 26,
27, 28, 29, 41, 42,
41, 42, 43, 44, 45,
46, 47, 48, 49, 61,
62, 61, 62, 63, 64,
65, 66, 67, 68, 69,
81, 82, 81, 82, 83,
84, 85, 86, 87, 88,
89, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference179) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-2, -2, 2, 0},
1,
{1, 1},
{1, 2},
{2, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(3, 11));
EXPECT_THAT(res.data,
ElementsAreArray({1, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
1, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
1, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70}));
}
TEST(ReferenceTest, RandomJaxReference180) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{1, -2, 1, 0},
-2147483647,
{1, 1},
{1, 1},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(9, 6));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, 2, 4, 6,
8, 10, -2147483647, 12, 14,
16, 18, 20, -2147483647, 22,
24, 26, 28, 30, -2147483647,
32, 34, 36, 38, 40,
-2147483647, 42, 44, 46, 48,
50, -2147483647, 52, 54, 56,
58, 60, -2147483647, 62, 64,
66, 68, 70, -2147483647, 72,
74, 76, 78, 80}));
}
TEST(ReferenceTest, RandomJaxReference181) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-2, -1, -1, -2},
2147483646,
{1, 1},
{1, 1},
{1, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(7, 8));
EXPECT_THAT(res.data,
ElementsAreArray(
{2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646}));
}
TEST(ReferenceTest, RandomJaxReference182) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-1, -1, 2, -1},
0,
{2, 1},
{2, 1},
{2, 1},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(3, 20));
EXPECT_THAT(res.data, ElementsAreArray(
{0, 0, 42, 0, 44, 0, 46, 0, 48, 0, 50, 0,
52, 0, 54, 0, 56, 0, 58, 0, 0, 0, 82, 0,
84, 0, 86, 0, 88, 0, 90, 0, 92, 0, 94, 0,
96, 0, 98, 0, 0, 0, 122, 0, 124, 0, 126, 0,
128, 0, 130, 0, 132, 0, 134, 0, 136, 0, 138, 0}));
}
TEST(ReferenceTest, RandomJaxReference183) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{1, -1, -2, -1},
1,
{2, 2},
{2, 1},
{1, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(17, 8));
EXPECT_THAT(
res.data,
ElementsAreArray(
{1, 1, 1, 1, 1, 1, 1, 1, 24, 39, 56,
75, 96, 119, 144, 171, 1, 1, 1, 1, 1, 1,
1, 1, 264, 299, 336, 375, 416, 459, 504, 551, 1,
1, 1, 1, 1, 1, 1, 1, 704, 759, 816, 875,
936, 999, 1064, 1131, 1, 1, 1, 1, 1, 1, 1,
1, 1344, 1419, 1496, 1575, 1656, 1739, 1824, 1911, 1, 1,
1, 1, 1, 1, 1, 1, 2184, 2279, 2376, 2475, 2576,
2679, 2784, 2891, 1, 1, 1, 1, 1, 1, 1, 1,
3224, 3339, 3456, 3575, 3696, 3819, 3944, 4071, 1, 1, 1,
1, 1, 1, 1, 1, 4464, 4599, 4736, 4875, 5016, 5159,
5304, 5451, 1, 1, 1, 1, 1, 1, 1, 1, 5904,
6059, 6216, 6375, 6536, 6699, 6864, 7031, 1, 1, 1, 1,
1, 1, 1, 1}));
}
TEST(ReferenceTest, RandomJaxReference184) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-1, 1, 2, -1},
2147483646,
{2, 2},
{2, 2},
{2, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(9, 5));
EXPECT_THAT(
res.data,
ElementsAreArray(
{2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference185) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{2, -1, -2, 0},
0,
{1, 1},
{2, 2},
{1, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(11, 9));
EXPECT_THAT(
res.data,
ElementsAreArray(
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18,
19, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 32, 33, 34, 35, 36, 37,
38, 39, 40, 42, 43, 44, 45, 46, 47, 48, 49, 50, 52, 53, 54, 55, 56,
57, 58, 59, 60, 62, 63, 64, 65, 66, 67, 68, 69, 70, 72, 73, 74, 75,
76, 77, 78, 79, 80, 82, 83, 84, 85, 86, 87, 88, 89, 90}));
}
TEST(ReferenceTest, RandomJaxReference186) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{0, 0, 0, -2},
-2147483647,
{2, 2},
{1, 1},
{1, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(9, 7));
EXPECT_THAT(res.data, ElementsAreArray(
{12, 13, 14, 15, 16, 17, 18, 22, 23, 24, 25, 26, 27,
28, 32, 33, 34, 35, 36, 37, 38, 42, 43, 44, 45, 46,
47, 48, 52, 53, 54, 55, 56, 57, 58, 62, 63, 64, 65,
66, 67, 68, 72, 73, 74, 75, 76, 77, 78, 82, 83, 84,
85, 86, 87, 88, 92, 93, 94, 95, 96, 97, 98}));
}
TEST(ReferenceTest, RandomJaxReference187) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{0, 0, 0, -2},
2147483646,
{2, 1},
{2, 1},
{2, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(4, 4));
EXPECT_THAT(res.data, ElementsAreArray({1, 3, 5, 7, 21, 23, 25, 27, 41, 43,
45, 47, 61, 63, 65, 67}));
}
TEST(ReferenceTest, RandomJaxReference188) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{2, 2, -1, -1},
-2147483647,
{2, 1},
{2, 1},
{2, 1},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(11, 17));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, 2, -2147483647, 3, -2147483647,
4, -2147483647, 5, -2147483647, 6,
-2147483647, 7, -2147483647, 8, -2147483647,
9, -2147483647, -2147483647, 12, -2147483647,
13, -2147483647, 14, -2147483647, 15,
-2147483647, 16, -2147483647, 17, -2147483647,
18, -2147483647, 19, -2147483647, -2147483647,
22, -2147483647, 23, -2147483647, 24,
-2147483647, 25, -2147483647, 26, -2147483647,
27, -2147483647, 28, -2147483647, 29,
-2147483647, -2147483647, 32, -2147483647, 33,
-2147483647, 34, -2147483647, 35, -2147483647,
36, -2147483647, 37, -2147483647, 38,
-2147483647, 39, -2147483647, -2147483647, 42,
-2147483647, 43, -2147483647, 44, -2147483647,
45, -2147483647, 46, -2147483647, 47,
-2147483647, 48, -2147483647, 49, -2147483647,
-2147483647, 52, -2147483647, 53, -2147483647,
54, -2147483647, 55, -2147483647, 56,
-2147483647, 57, -2147483647, 58, -2147483647,
59, -2147483647, -2147483647, 62, -2147483647,
63, -2147483647, 64, -2147483647, 65,
-2147483647, 66, -2147483647, 67, -2147483647,
68, -2147483647, 69, -2147483647, -2147483647,
72, -2147483647, 73, -2147483647, 74,
-2147483647, 75, -2147483647, 76, -2147483647,
77, -2147483647, 78, -2147483647, 79,
-2147483647, -2147483647, 82, -2147483647, 83,
-2147483647, 84, -2147483647, 85, -2147483647,
86, -2147483647, 87, -2147483647, 88,
-2147483647, 89, -2147483647, -2147483647, 92,
-2147483647, 93, -2147483647, 94, -2147483647,
95, -2147483647, 96, -2147483647, 97,
-2147483647, 98, -2147483647, 99, -2147483647,
-2147483647, 92, -2147483647, 93, -2147483647,
94, -2147483647, 95, -2147483647, 96,
-2147483647, 97, -2147483647, 98, -2147483647,
99, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference189) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{1, 0, -2, 2},
0,
{2, 2},
{1, 1},
{2, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(5, 5));
EXPECT_THAT(res.data,
ElementsAreArray({7, 11, 15, 19, 0, 74, 82, 90, 98,
0, 154, 162, 170, 178, 0, 234, 242, 250,
258, 0, 314, 322, 330, 338, 0}));
}
TEST(ReferenceTest, RandomJaxReference190) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{1, -1, 2, 0},
2147483646,
{2, 1},
{2, 1},
{2, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(9, 6));
EXPECT_THAT(res.data,
ElementsAreArray(
{2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference191) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{1, 0, 2, -2},
0,
{1, 2},
{2, 1},
{1, 2},
std::plus<>());
EXPECT_THAT(res.shape, ElementsAre(11, 5));
EXPECT_THAT(
res.data,
ElementsAreArray(
{0, 0, 0, 0, 0, 0, 3, 7, 11, 15, 0, 23, 27, 31,
35, 0, 43, 47, 51, 55, 0, 63, 67, 71, 75, 0, 83, 87,
91, 95, 0, 103, 107, 111, 115, 0, 123, 127, 131, 135, 0, 143,
147, 151, 155, 0, 163, 167, 171, 175, 0, 183, 187, 191, 195}));
}
TEST(ReferenceTest, RandomJaxReference192) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{-1, 2, 0, -1},
2147483646,
{1, 2},
{2, 1},
{1, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(11, 4));
EXPECT_THAT(res.data,
ElementsAreArray(
{11, 13, 15, 17, 21,
23, 25, 27, 31, 33,
35, 37, 41, 43, 45,
47, 51, 53, 55, 57,
61, 63, 65, 67, 71,
73, 75, 77, 81, 83,
85, 87, 91, 93, 95,
97, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference193) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{2, 2, 1, 0},
1,
{2, 1},
{2, 2},
{1, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(21, 6));
EXPECT_THAT(res.data, ElementsAreArray(
{1, 2, 4, 6, 8, 10, 1, 1, 1, 1, 1, 1,
1, 24, 56, 96, 144, 200, 1, 1, 1, 1, 1, 1,
1, 264, 336, 416, 504, 600, 1, 1, 1, 1, 1, 1,
1, 704, 816, 936, 1064, 1200, 1, 1, 1, 1, 1, 1,
1, 1344, 1496, 1656, 1824, 2000, 1, 1, 1, 1, 1, 1,
1, 2184, 2376, 2576, 2784, 3000, 1, 1, 1, 1, 1, 1,
1, 3224, 3456, 3696, 3944, 4200, 1, 1, 1, 1, 1, 1,
1, 4464, 4736, 5016, 5304, 5600, 1, 1, 1, 1, 1, 1,
1, 5904, 6216, 6536, 6864, 7200, 1, 1, 1, 1, 1, 1,
1, 7544, 7896, 8256, 8624, 9000, 1, 1, 1, 1, 1, 1,
1, 92, 94, 96, 98, 100}));
}
TEST(ReferenceTest, RandomJaxReference194) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{-2, -1, -2, -1},
-2147483647,
{2, 2},
{2, 1},
{2, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(7, 8));
EXPECT_THAT(res.data,
ElementsAreArray({22, 23, 24, 25, 26, 27, 28, 29, 32, 33, 34, 35,
36, 37, 38, 39, 42, 43, 44, 45, 46, 47, 48, 49,
52, 53, 54, 55, 56, 57, 58, 59, 62, 63, 64, 65,
66, 67, 68, 69, 72, 73, 74, 75, 76, 77, 78, 79,
82, 83, 84, 85, 86, 87, 88, 89}));
}
TEST(ReferenceTest, RandomJaxReference195) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{-2, 1, -2, 2},
-2147483647,
{1, 2},
{2, 2},
{2, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(5, 9));
EXPECT_THAT(
res.data,
ElementsAreArray(
{23, 24, 25, 26, 27,
28, 29, 30, 30, 43,
44, 45, 46, 47, 48,
49, 50, 50, 63, 64,
65, 66, 67, 68, 69,
70, 70, 83, 84, 85,
86, 87, 88, 89, 90,
90, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647}));
}
TEST(ReferenceTest, RandomJaxReference196) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 1},
{1, 1, 1, -1},
1,
{1, 2},
{2, 1},
{2, 2},
std::multiplies<>());
EXPECT_THAT(res.shape, ElementsAre(6, 5));
EXPECT_THAT(res.data,
ElementsAreArray({1, 1, 1, 1, 1, 11, 156, 210,
272, 342, 31, 1056, 1190, 1332, 1482, 51,
2756, 2970, 3192, 3422, 71, 5256, 5550, 5852,
6162, 91, 8556, 8930, 9312, 9702}));
}
TEST(ReferenceTest, RandomJaxReference197) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 1},
{-2, -2, -2, -2},
-2147483647,
{2, 1},
{2, 2},
{2, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(7, 3));
EXPECT_THAT(res.data,
ElementsAreArray({23, 25, 27, 33, 35, 37, 43, 45, 47, 53, 55,
57, 63, 65, 67, 73, 75, 77, 83, 85, 87}));
}
TEST(ReferenceTest, RandomJaxReference198) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{1, 2},
{1, 1, -2, 0},
2147483646,
{1, 1},
{2, 1},
{1, 2},
[](auto a, auto b) { return a <= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(12, 9));
EXPECT_THAT(res.data,
ElementsAreArray(
{2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2,
3, 4, 5, 6, 7,
8, 9, 10, 12, 13,
14, 15, 16, 17, 18,
19, 20, 22, 23, 24,
25, 26, 27, 28, 29,
30, 32, 33, 34, 35,
36, 37, 38, 39, 40,
42, 43, 44, 45, 46,
47, 48, 49, 50, 52,
53, 54, 55, 56, 57,
58, 59, 60, 62, 63,
64, 65, 66, 67, 68,
69, 70, 72, 73, 74,
75, 76, 77, 78, 79,
80, 82, 83, 84, 85,
86, 87, 88, 89, 90,
92, 93, 94, 95, 96,
97, 98, 99, 100, 2147483646,
2147483646, 2147483646, 2147483646, 2147483646, 2147483646,
2147483646, 2147483646, 2147483646}));
}
TEST(ReferenceTest, RandomJaxReference199) {
const Tensor<int> res = ReduceWindow<int>(
Tensor<int>::iota({10, 10}),
{2, 2},
{-1, 1, -1, -2},
-2147483647,
{2, 1},
{2, 1},
{1, 2},
[](auto a, auto b) { return a >= b ? a : b; });
EXPECT_THAT(res.shape, ElementsAre(17, 8));
EXPECT_THAT(
res.data,
ElementsAreArray(
{-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647, -2147483647, -2147483647, -2147483647, -2147483647,
-2147483647}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_reduce_window_test_util.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_reduce_window_test_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0c721096-8562-4b1d-b7aa-af2b88995ff3 | cpp | tensorflow/tensorflow | strided_slice_logic | tensorflow/lite/kernels/internal/strided_slice_logic.h | tensorflow/lite/kernels/internal/strided_slice_logic_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_STRIDED_SLICE_LOGIC_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_STRIDED_SLICE_LOGIC_H_
#include <limits>
#include <vector>
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace strided_slice {
inline int Clamp(const int v, const int lo, const int hi) {
TFLITE_DCHECK(!(hi < lo));
if (hi < v) return hi;
if (v < lo) return lo;
return v;
}
inline void StridedSlicePadIndices(tflite::StridedSliceParams* p,
int dim_count) {
TFLITE_CHECK_LE(dim_count, 5);
TFLITE_CHECK_GE(dim_count, p->start_indices_count);
TFLITE_CHECK_EQ(p->start_indices_count, p->stop_indices_count);
TFLITE_CHECK_EQ(p->stop_indices_count, p->strides_count);
const int pad_count = dim_count - p->start_indices_count;
for (int i = p->start_indices_count - 1; i >= 0; --i) {
p->strides[i + pad_count] = p->strides[i];
p->start_indices[i + pad_count] = p->start_indices[i];
p->stop_indices[i + pad_count] = p->stop_indices[i];
}
for (int i = 0; i < pad_count; ++i) {
p->start_indices[i] = 0;
p->stop_indices[i] = 1;
p->strides[i] = 1;
}
p->shrink_axis_mask <<= pad_count;
p->ellipsis_mask <<= pad_count;
p->new_axis_mask <<= pad_count;
p->begin_mask <<= pad_count;
p->end_mask <<= pad_count;
p->begin_mask |= (1 << pad_count) - 1;
p->end_mask |= (1 << pad_count) - 1;
p->start_indices_count = dim_count;
p->stop_indices_count = dim_count;
p->strides_count = dim_count;
}
inline int StridedSliceStartForAxis(const tflite::StridedSliceParams& params,
const RuntimeShape& input_shape,
int32_t axis) {
const int32_t axis_size = input_shape.Dims(axis);
int32_t start = params.start_indices[axis];
const int32_t stride = params.strides[axis];
const int32_t begin_mask = (params.begin_mask & 1 << axis);
if (start < 0) {
start += axis_size;
}
if (stride > 0) {
start = Clamp(start, 0, axis_size);
} else {
start = Clamp(start, -1, axis_size - 1);
}
if (begin_mask) {
if (stride > 0) {
start = 0;
} else {
start = axis_size - 1;
}
}
return start;
}
inline int StridedSliceEndForAxis(const tflite::StridedSliceParams& params,
const RuntimeShape& input_shape, int axis,
int start) {
const auto shrink_axis_mask = params.shrink_axis_mask;
const bool shrink_axis = shrink_axis_mask & (1 << axis);
const int axis_size = input_shape.Dims(axis);
const bool offset = params.offset;
if (shrink_axis) {
if (start >= axis_size) {
return start;
} else {
return start + 1;
}
}
const auto* indices = params.stop_indices;
int end = indices[axis];
if (offset) {
end += start;
}
const int32_t stride = params.strides[axis];
const int32_t end_mask = (params.end_mask & 1 << axis);
if (end < 0) {
end += axis_size;
}
if (stride > 0) {
end = Clamp(end, 0, axis_size);
} else {
end = Clamp(end, -1, axis_size - 1);
}
if (end_mask) {
if (stride > 0) {
end = axis_size;
} else {
end = -1;
}
}
return end;
}
inline int StartForAxis(const tflite::StridedSliceParams& params,
const RuntimeShape& input_shape, int axis) {
const auto begin_mask = params.begin_mask;
const auto* start_indices = params.start_indices;
const auto* strides = params.strides;
const int axis_size = input_shape.Dims(axis);
if (axis_size == 0) {
return 0;
}
int start = start_indices[axis];
if (begin_mask & 1 << axis) {
if (strides[axis] > 0) {
start = std::numeric_limits<int>::lowest();
} else {
start = std::numeric_limits<int>::max();
}
}
if (start < 0) {
start += axis_size;
}
if (strides[axis] > 0) {
start = Clamp(start, 0, axis_size);
} else {
start = Clamp(start, -1, axis_size - 1);
}
return start;
}
inline int StopForAxis(const tflite::StridedSliceParams& params,
const RuntimeShape& input_shape, int axis,
int start_for_axis) {
const auto end_mask = params.end_mask;
const auto shrink_axis_mask = params.shrink_axis_mask;
const auto* stop_indices = params.stop_indices;
const auto* strides = params.strides;
const int axis_size = input_shape.Dims(axis);
if (axis_size == 0) {
return 0;
}
const bool shrink_axis = shrink_axis_mask & (1 << axis);
int stop = stop_indices[axis];
if (shrink_axis) {
return start_for_axis + 1;
}
if (end_mask & (1 << axis)) {
if (strides[axis] > 0) {
stop = std::numeric_limits<int>::max();
} else {
stop = std::numeric_limits<int>::lowest();
}
}
if (stop < 0) {
stop += axis_size;
}
if (strides[axis] > 0) {
stop = Clamp(stop, 0, axis_size);
} else {
stop = Clamp(stop, -1, axis_size - 1);
}
return stop;
}
inline bool LoopCondition(int index, int stop, int stride) {
return stride > 0 ? index >= stop : index <= stop;
}
inline tflite::StridedSliceParams BuildStridedSliceParams(
int begin_mask, int end_mask, int shrink_axis_mask,
const std::vector<int>& start_indices, const std::vector<int>& stop_indices,
const std::vector<int>& strides) {
tflite::StridedSliceParams op_params{};
const int dims_count = start_indices.size();
op_params.start_indices_count = dims_count;
op_params.stop_indices_count = dims_count;
op_params.strides_count = dims_count;
for (int i = 0; i < dims_count; ++i) {
op_params.start_indices[i] = start_indices[i];
op_params.stop_indices[i] = stop_indices[i];
op_params.strides[i] = strides[i];
}
op_params.begin_mask = begin_mask;
op_params.ellipsis_mask = 0;
op_params.end_mask = end_mask;
op_params.new_axis_mask = 0;
op_params.shrink_axis_mask = shrink_axis_mask;
return op_params;
}
}
}
#endif | #include "tensorflow/lite/kernels/internal/strided_slice_logic.h"
#include <initializer_list>
#include <gtest/gtest.h>
namespace tflite {
namespace {
void RunStridedSlicePadIndices(std::initializer_list<int> begin,
std::initializer_list<int> end,
std::initializer_list<int> stride,
std::initializer_list<int> expected_begin,
std::initializer_list<int> expected_end,
std::initializer_list<int> expected_stride) {
StridedSliceParams op_params;
int dims = begin.size();
op_params.start_indices_count = dims;
op_params.stop_indices_count = dims;
op_params.strides_count = dims;
for (int i = 0; i < dims; ++i) {
op_params.start_indices[i] = begin.begin()[i];
op_params.stop_indices[i] = end.begin()[i];
op_params.strides[i] = stride.begin()[i];
}
strided_slice::StridedSlicePadIndices(&op_params, 4);
for (int i = 0; i < 4; ++i) {
EXPECT_EQ(op_params.start_indices[i], expected_begin.begin()[i]);
EXPECT_EQ(op_params.stop_indices[i], expected_end.begin()[i]);
EXPECT_EQ(op_params.strides[i], expected_stride.begin()[i]);
}
}
TEST(RunStridedSlicePadIndices, Pad1) {
RunStridedSlicePadIndices({1, 2, 3},
{4, 5, 6},
{2, 2, 2},
{0, 1, 2, 3},
{1, 4, 5, 6},
{1, 2, 2, 2}
);
}
TEST(RunStridedSlicePadIndices, Pad2) {
RunStridedSlicePadIndices({1, 2},
{4, 5},
{2, 2},
{0, 0, 1, 2},
{1, 1, 4, 5},
{1, 1, 2, 2}
);
}
TEST(RunStridedSlicePadIndices, Pad3) {
RunStridedSlicePadIndices({1},
{4},
{2},
{0, 0, 0, 1},
{1, 1, 1, 4},
{1, 1, 1, 2}
);
}
TEST(StridedSliceStartForAxis, NegativeOOBIndex) {
StridedSliceParams params{};
params.begin_mask = 0;
params.end_mask = 0;
params.start_indices[0] = -11;
params.strides[0] = 1;
int start = strided_slice::StridedSliceStartForAxis(
params, RuntimeShape({10}), 0);
EXPECT_EQ(start, 0);
}
TEST(StridedSliceStartForAxis, NegativeOneTheBoundaryIndex) {
StridedSliceParams params{};
params.begin_mask = 0;
params.end_mask = 0;
params.start_indices[0] = -10;
params.strides[0] = 1;
int start = strided_slice::StridedSliceStartForAxis(
params, RuntimeShape({10}), 0);
EXPECT_EQ(start, 0);
}
TEST(StridedSliceStartForAxis, NegativeWithinBoundsIndex) {
StridedSliceParams params{};
params.begin_mask = 0;
params.end_mask = 0;
params.start_indices[0] = -9;
params.strides[0] = 1;
int start = strided_slice::StridedSliceStartForAxis(
params, RuntimeShape({10}), 0);
EXPECT_EQ(start, 1);
}
TEST(StridedSliceStartForAxis, MinusOneIndex) {
StridedSliceParams params{};
params.begin_mask = 0;
params.end_mask = 0;
params.start_indices[0] = -1;
params.strides[0] = 1;
int start = strided_slice::StridedSliceStartForAxis(
params, RuntimeShape({10}), 0);
EXPECT_EQ(start, 9);
}
TEST(StridedSliceStartForAxis, ZeroIndex) {
StridedSliceParams params{};
params.begin_mask = 0;
params.end_mask = 0;
params.start_indices[0] = 0;
params.strides[0] = 1;
int start = strided_slice::StridedSliceStartForAxis(
params, RuntimeShape({10}), 0);
EXPECT_EQ(start, 0);
}
TEST(StridedSliceStartForAxis, OneIndex) {
StridedSliceParams params{};
params.begin_mask = 0;
params.end_mask = 0;
params.start_indices[0] = 1;
params.strides[0] = 1;
int start = strided_slice::StridedSliceStartForAxis(
params, RuntimeShape({10}), 0);
EXPECT_EQ(start, 1);
}
TEST(StridedSliceStartForAxis, PositiveBoundaryIndex) {
StridedSliceParams params{};
params.begin_mask = 0;
params.end_mask = 0;
params.start_indices[0] = 9;
params.strides[0] = 1;
int start = strided_slice::StridedSliceStartForAxis(
params, RuntimeShape({10}), 0);
EXPECT_EQ(start, 9);
}
TEST(StridedSliceStartForAxis, PositiveOOBIndexSizeofArray) {
StridedSliceParams params{};
params.begin_mask = 0;
params.end_mask = 0;
params.start_indices[0] = 10;
params.strides[0] = 1;
int start = strided_slice::StridedSliceStartForAxis(
params, RuntimeShape({10}), 0);
EXPECT_EQ(start, 10);
}
TEST(StridedSliceStartForAxis, PositiveOOBIndex) {
StridedSliceParams params{};
params.begin_mask = 0;
params.end_mask = 0;
params.start_indices[0] = 11;
params.strides[0] = 1;
int start = strided_slice::StridedSliceStartForAxis(
params, RuntimeShape({10}), 0);
EXPECT_EQ(start, 10);
}
TEST(StridedSliceStartForAxis, TenFourMinus1) {
StridedSliceParams params{};
params.begin_mask = 0;
params.end_mask = 0;
params.start_indices[0] = 5;
params.stop_indices[0] = 2;
params.strides[0] = -1;
int start = strided_slice::StridedSliceStartForAxis(params, RuntimeShape({4}),
0);
int stop = strided_slice::StridedSliceEndForAxis(params, RuntimeShape({4}),
0, start);
EXPECT_EQ(start, 3);
EXPECT_EQ(stop, 2);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/strided_slice_logic.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/strided_slice_logic_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6c3acfcc-9911-4a1a-ab26-87375436a5b8 | cpp | tensorflow/tensorflow | avx2_quantization_utils | tensorflow/lite/kernels/internal/optimized/avx2_quantization_utils.h | tensorflow/lite/kernels/internal/optimized/avx2_quantization_utils_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_AVX2_QUANTIZATION_UTILS_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_AVX2_QUANTIZATION_UTILS_H_
#ifdef __AVX2__
#include <immintrin.h>
#include <limits>
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace tflite {
namespace avx2_utils {
static inline void mm_storeu_si64(void *dst, __m128i v) {
#if (defined __clang__) || (defined _MSC_VER)
_mm_storeu_si64(dst, v);
#else
*static_cast<std::int64_t *>(dst) = _mm_extract_epi64(v, 0);
#endif
}
static inline __m256i mm256_blendv_epi32(const __m256i &a, const __m256i &b,
const __m256i &mask) {
__m256 result =
_mm256_blendv_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b),
_mm256_castsi256_ps(mask));
return _mm256_castps_si256(result);
}
static inline __m256i rounding_right_shift(const __m256i &value,
int32_t right_shift) {
TFLITE_DCHECK_GT(right_shift, 0);
const int32_t one_shift_exp_minus1 = 1 << (right_shift - 1);
__m256i nudge = _mm256_set1_epi32(one_shift_exp_minus1);
const __m256i r_plus_nudge = _mm256_add_epi32(value, nudge);
const __m256i shifted_sum =
_mm256_srav_epi32(r_plus_nudge, _mm256_set1_epi32(right_shift));
const __m256i mask_num_plus_nudge_overflow = _mm256_cmpgt_epi32(
value, _mm256_set1_epi32(0x7fffffff - one_shift_exp_minus1));
return mm256_blendv_epi32(
shifted_sum, _mm256_set1_epi32(std::numeric_limits<std::int32_t>::max()),
mask_num_plus_nudge_overflow);
}
static inline __m256i rounding_right_shift(const __m256i &value,
const __m256i right_shift) {
const __m256i zeros = _mm256_setzero_si256();
const __m256i mask_rightshift_gtz = _mm256_cmpgt_epi32(right_shift, zeros);
const __m256i one_shift_exp_minus1 =
_mm256_sllv_epi32(_mm256_set1_epi32(1),
_mm256_sub_epi32(right_shift, _mm256_set1_epi32(1)));
__m256i nudge =
mm256_blendv_epi32(zeros, one_shift_exp_minus1, mask_rightshift_gtz);
const __m256i r_plus_nudge = _mm256_add_epi32(value, nudge);
const __m256i shifted_sum = _mm256_srav_epi32(r_plus_nudge, right_shift);
const __m256i mask_num_plus_nudge_overflow = _mm256_cmpgt_epi32(
value, _mm256_sub_epi32(_mm256_set1_epi32(0x7fffffff), nudge));
return mm256_blendv_epi32(
shifted_sum, _mm256_set1_epi32(std::numeric_limits<std::int32_t>::max()),
mask_num_plus_nudge_overflow);
}
inline void CastInt32ToInt16AndStore(int16 *dst, const __m256i v) {
const __m256i repack_perm = _mm256_set1_epi64x(0x0d0c090805040100);
const __m256i shuffled_v = _mm256_shuffle_epi8(v, repack_perm);
mm_storeu_si64(dst, _mm256_extracti128_si256(shuffled_v, 0));
mm_storeu_si64(dst + 4, _mm256_extracti128_si256(shuffled_v, 1));
}
inline __m256i MultiplyByQuantizedMultiplier(const __m256i &value,
const int32_t multiplier,
const int32_t left_shift) {
const __m256i repack_perm = _mm256_setr_epi32(0, 2, 4, 6, 1, 3, 5, 7);
const __m256i shifted_value =
left_shift > 0 ? _mm256_sllv_epi32(value, _mm256_set1_epi32(left_shift))
: value;
__m256i scaled_v_low = _mm256_mul_epi32(
_mm256_cvtepi32_epi64(_mm256_extracti128_si256(shifted_value, 0)),
_mm256_set1_epi64x(multiplier));
__m256i scaled_v_high = _mm256_mul_epi32(
_mm256_cvtepi32_epi64(_mm256_extracti128_si256(shifted_value, 1)),
_mm256_set1_epi64x(multiplier));
scaled_v_low = _mm256_srlv_epi64(scaled_v_low, _mm256_set1_epi64x(31));
scaled_v_high = _mm256_srlv_epi64(scaled_v_high, _mm256_set1_epi64x(31));
scaled_v_high = _mm256_slli_epi64(scaled_v_high, 32);
__m256i result = _mm256_blend_epi32(scaled_v_low, scaled_v_high, 0xaa);
result = _mm256_permutevar8x32_epi32(result, repack_perm);
if (left_shift >= 0) {
return result;
}
return rounding_right_shift(result, -left_shift);
}
inline __m256i MultiplyByQuantizedMultiplier(const __m256i &value,
const __m256i multiplier,
const __m256i left_shift) {
const __m256i zero_vector = _mm256_setzero_si256();
const __m256i positive_left_shift = _mm256_max_epi32(left_shift, zero_vector);
const __m256i positive_right_shift =
_mm256_max_epi32(_mm256_sub_epi32(zero_vector, left_shift), zero_vector);
const __m256i repack_perm = _mm256_setr_epi32(0, 2, 4, 6, 1, 3, 5, 7);
const __m256i shifted_value = _mm256_sllv_epi32(value, positive_left_shift);
const __m256i multiplier_low =
_mm256_cvtepi32_epi64(_mm256_extracti128_si256(multiplier, 0));
const __m256i multiplier_high =
_mm256_cvtepi32_epi64(_mm256_extracti128_si256(multiplier, 1));
__m256i scaled_v_low = _mm256_mul_epi32(
_mm256_cvtepi32_epi64(_mm256_extracti128_si256(shifted_value, 0)),
multiplier_low);
__m256i scaled_v_high = _mm256_mul_epi32(
_mm256_cvtepi32_epi64(_mm256_extracti128_si256(shifted_value, 1)),
multiplier_high);
scaled_v_low = _mm256_srlv_epi64(scaled_v_low, _mm256_set1_epi64x(31));
scaled_v_high = _mm256_srlv_epi64(scaled_v_high, _mm256_set1_epi64x(31));
scaled_v_high = _mm256_slli_epi64(scaled_v_high, 32);
__m256i result = _mm256_blend_epi32(scaled_v_low, scaled_v_high, 0xaa);
result = _mm256_permutevar8x32_epi32(result, repack_perm);
return rounding_right_shift(result, positive_right_shift);
}
}
}
#endif
#endif | #include "tensorflow/lite/kernels/internal/optimized/avx2_quantization_utils.h"
#include <gmock/gmock.h>
#include "tensorflow/lite/kernels/internal/common.h"
#ifdef __AVX2__
namespace tflite {
namespace avx2_utils {
namespace {
using ::testing::ElementsAreArray;
__m256i FillVectorWithInt32(const std::vector<int32_t>& src) {
return _mm256_set_epi32(src[7], src[6], src[5], src[4], src[3], src[2],
src[1], src[0]);
}
void CompareWithReferenceValue(std::vector<int32_t>& reference_values,
const __m256i& result) {
EXPECT_NEAR(reference_values[0], _mm256_extract_epi32(result, 0), 1);
EXPECT_NEAR(reference_values[1], _mm256_extract_epi32(result, 1), 1);
EXPECT_NEAR(reference_values[2], _mm256_extract_epi32(result, 2), 1);
EXPECT_NEAR(reference_values[3], _mm256_extract_epi32(result, 3), 1);
EXPECT_NEAR(reference_values[4], _mm256_extract_epi32(result, 4), 1);
EXPECT_NEAR(reference_values[5], _mm256_extract_epi32(result, 5), 1);
EXPECT_NEAR(reference_values[6], _mm256_extract_epi32(result, 6), 1);
EXPECT_NEAR(reference_values[7], _mm256_extract_epi32(result, 7), 1);
}
TEST(CastInt32ToInt16AndStoreTest, CastInt32ToInt16AndStoreTest) {
const std::vector<int16_t> src = {1, 2, 3, 4, 5, 6, 7, 8};
int16_t dst[8];
const __m256i src_vector = _mm256_set_epi32(src[7], src[6], src[5], src[4],
src[3], src[2], src[1], src[0]);
CastInt32ToInt16AndStore(dst, src_vector);
EXPECT_THAT(src, ElementsAreArray(dst));
}
TEST(MultiplyByQuantizedMultiplierTest, PositiveLeftShiftTest) {
std::vector<int32_t> values = {100, 200, 300, 400, 500, 600, 700, 800};
const __m256i src_vector = FillVectorWithInt32(values);
const int32_t left_shift = 20;
const int32_t multiplier = 12345;
const __m256i result =
MultiplyByQuantizedMultiplier(src_vector, multiplier, left_shift);
for (int i = 0; i < values.size(); i++) {
values[i] = tflite::MultiplyByQuantizedMultiplier(values[i], multiplier,
left_shift);
}
CompareWithReferenceValue(values, result);
}
TEST(MultiplyByQuantizedMultiplierTest, NegativeLeftShiftTest) {
std::vector<int32_t> values = {1000, 2000, 3000, 4000,
5000, 6000, 7000, 8000};
const __m256i src_vector = FillVectorWithInt32(values);
const int32_t left_shift = -3;
const int32_t multiplier = 1234567890;
const __m256i result =
MultiplyByQuantizedMultiplier(src_vector, multiplier, left_shift);
for (int i = 0; i < values.size(); i++) {
values[i] = tflite::MultiplyByQuantizedMultiplier(values[i], multiplier,
left_shift);
}
CompareWithReferenceValue(values, result);
}
TEST(MultiplyByQuantizedMultiplierTest, VectorPositiveLeftShiftTest) {
std::vector<int32_t> values = {100, 200, 300, 400, 500, 600, 700, 800};
const std::vector<int32_t> left_shifts = {20, 19, 18, 17, 16, 15, 14, 13};
const std::vector<int32_t> multipliers = {10000, 20000, 30000, 40000,
50000, 60000, 70000, 80000};
const __m256i src_vector = FillVectorWithInt32(values);
const __m256i left_shifts_vector = FillVectorWithInt32(left_shifts);
const __m256i multipliers_vector = FillVectorWithInt32(multipliers);
const __m256i result = MultiplyByQuantizedMultiplier(
src_vector, multipliers_vector, left_shifts_vector);
for (int i = 0; i < values.size(); i++) {
values[i] = tflite::MultiplyByQuantizedMultiplier(values[i], multipliers[i],
left_shifts[i]);
}
CompareWithReferenceValue(values, result);
}
TEST(MultiplyByQuantizedMultiplierTest, VectorNegativeLeftShiftTest) {
std::vector<int32_t> values = {1000, 2000, 3000, 4000,
5000, 6000, 7000, 8000};
const std::vector<int32_t> left_shifts = {-3, -4, -5, -6, -7, -8, -9, -10};
const std::vector<int32_t> multipliers = {1000000000, 1100000000, 1200000000,
1300000000, 1400000000, 1500000000,
1600000000, 1700000000};
const __m256i src_vector = FillVectorWithInt32(values);
const __m256i left_shifts_vector = FillVectorWithInt32(left_shifts);
const __m256i multipliers_vector = FillVectorWithInt32(multipliers);
const __m256i result = MultiplyByQuantizedMultiplier(
src_vector, multipliers_vector, left_shifts_vector);
for (int i = 0; i < values.size(); i++) {
values[i] = tflite::MultiplyByQuantizedMultiplier(values[i], multipliers[i],
left_shifts[i]);
}
CompareWithReferenceValue(values, result);
}
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/optimized/avx2_quantization_utils.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/optimized/avx2_quantization_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ebc77db0-35d1-443b-8e86-281f12c77876 | cpp | tensorflow/tensorflow | fully_connected_4bit | tensorflow/lite/kernels/internal/optimized/fully_connected_4bit.h | tensorflow/lite/kernels/fully_connected_4bit_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_FULLY_CONNECTED_4BIT_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_FULLY_CONNECTED_4BIT_H_
#include <stdint.h>
#ifndef TFLITE_MMAP_DISABLED
#include <sys/mman.h>
#endif
#include <cstdlib>
#include <memory>
#if defined(FC_4BIT_SSE) && defined(__SSSE3__)
#include "tensorflow/lite/kernels/internal/optimized/4bit/sse_fully_connected.h"
#elif defined(FC_4BIT_NEON) && (defined(__ARM_NEON__) || defined(__ARM_NEON))
#include "tensorflow/lite/kernels/internal/optimized/4bit/neon_fully_connected.h"
#else
#include "tensorflow/lite/kernels/internal/optimized/4bit/fully_connected_reference.h"
#endif
namespace tflite {
namespace optimized_4bit {
constexpr int FilterWidth = 4;
constexpr int FilterDepth = 32;
constexpr int kDefaultAlignmentPadding = 63;
struct Deleter {
explicit Deleter(size_t size = 0) : size(size) {}
void operator()(uint8_t* memory) {
if (!memory) {
return;
}
#ifdef TFLITE_MMAP_DISABLED
delete[] memory;
#else
munmap(memory, size);
#endif
}
size_t size;
};
struct OpData4Bit {
int rows_right = 1;
int batch_size = 0;
bool needs_prepack = true;
uint8_t* prepacked_cache = nullptr;
std::unique_ptr<uint8_t[], Deleter> prepacked_cache_buffer;
size_t prepacked_cache_buffer_size = 0;
void AllocatePackedRegion(size_t required_size) {
#ifdef TFLITE_MMAP_DISABLED
uint8_t* region = new uint8_t[required_size];
prepacked_cache_buffer =
std::unique_ptr<uint8_t[], Deleter>(region, Deleter());
#else
uint8_t* region = reinterpret_cast<uint8_t*>(
mmap(nullptr, required_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
prepacked_cache_buffer =
std::unique_ptr<uint8_t[], Deleter>(region, Deleter(required_size));
#ifdef MADV_MERGEABLE
madvise(region, required_size, MADV_MERGEABLE);
#endif
#endif
prepacked_cache = reinterpret_cast<uint8_t*>(
(reinterpret_cast<uintptr_t>(prepacked_cache_buffer.get()) +
kDefaultAlignmentPadding) &
~kDefaultAlignmentPadding);
prepacked_cache_buffer_size = required_size;
}
};
namespace api {
inline void Prepack(uint8_t* dest, const int8_t* tensor, int layout_rows,
int layout_cols, int src_rows, int src_cols, int width,
int depth) {
optimized_4bit::Prepack(dest, tensor, layout_rows, layout_cols, src_rows,
src_cols, width, depth);
}
inline void BatchQuantizeFloats4Bit(const float* float_data_ptr, int n_batch,
int n_data, int8_t* quantized_data_ptr,
float* scaling_factors, int width,
int depth, int32_t* input_offsets) {
optimized_4bit::BatchQuantizeFloats4Bit(float_data_ptr, n_batch, n_data,
quantized_data_ptr, scaling_factors,
width, depth, input_offsets);
}
inline void AssignBiasAndComputeOffsets(const int32_t* input_offsets,
const float* batch_scales,
float* filter_scales,
const float* bias_ptr,
float* output_ptr, int output_depth,
int batch_size) {
optimized_4bit::AssignBiasAndComputeOffsets(
input_offsets, batch_scales, filter_scales, bias_ptr, output_ptr,
output_depth, batch_size);
}
inline void RunAndUnpack(int rhs_width, const uint8_t* lhs, const int8_t* rhs,
int32_t* dst, int output_depth, int batch_size,
int lhs_layout_rows, int lhs_layout_cols,
int rhs_layout_rows, int rhs_layout_cols,
int dst_layout_rows, int dst_layout_cols,
float* output_ptr, const float* scaling_factors,
const float* filter_scales) {
optimized_4bit::RunAndUnpack(
rhs_width, lhs, rhs, dst, output_depth, batch_size, lhs_layout_rows,
lhs_layout_cols, rhs_layout_rows, rhs_layout_cols, dst_layout_rows,
dst_layout_cols, output_ptr, scaling_factors, filter_scales);
}
}
}
}
#endif | #include <cstdlib>
#include <memory>
#include <random>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/fully_connected.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
class FullyConnected4BitOpModel : public SingleOpModel {
public:
FullyConnected4BitOpModel(
int units, int batches, const TensorData& input,
const TensorData& weights, const TensorData& output,
std::vector<int8_t> weights_initializer, TfLiteRegistration* registration,
ActivationFunctionType activation_func = ActivationFunctionType_RELU)
: batches_(batches), units_(units) {
int total_input_size = 1;
for (size_t i = 0; i < input.shape.size(); ++i) {
total_input_size *= input.shape[i];
}
input_size_ = total_input_size / batches_;
input_ = AddInput(input);
const std::vector<int8_t> quantized_data(weights_initializer);
std::vector<int8_t> weight_data(quantized_data.size() / 2);
for (int i = 0; i < quantized_data.size(); i++) {
uint8_t val = quantized_data[i] & UINT8_C(15);
if ((i % 2) == 0) {
weight_data[i / 2] = val & INT8_C(15);
} else {
weight_data[i / 2] |= (val << 4);
}
}
weights_ =
AddConstInput<int8_t>(weights, weight_data.data(), weight_data.size());
bias_ = AddInput({TensorType_FLOAT32, {units_}});
output_ = AddOutput(output);
FullyConnectedOptionsWeightsFormat weights_format =
FullyConnectedOptionsWeightsFormat_DEFAULT;
SetBuiltinOp(BuiltinOperator_FULLY_CONNECTED,
BuiltinOptions_FullyConnectedOptions,
CreateFullyConnectedOptions(builder_, activation_func,
weights_format, true)
.Union());
resolver_ = std::make_unique<SingleOpResolver>(
BuiltinOperator_FULLY_CONNECTED, registration);
BuildInterpreter({GetShape(input_), GetShape(weights_), GetShape(bias_)});
SetUnitScale();
}
void SetUnitScale() {
TfLiteTensor* t = interpreter_->tensor(weights_);
t->type = kTfLiteInt4;
t->params.scale = 1.0;
auto filter_params =
reinterpret_cast<TfLiteAffineQuantization*>(t->quantization.params);
if (filter_params && filter_params->scale &&
filter_params->scale->size > 0) {
for (int i = 0; i < filter_params->scale->size; i++) {
filter_params->scale->data[i] = 1.0;
}
}
}
void SetInput(const std::vector<float>& f) { PopulateTensor(input_, f); }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
void SetBias(const std::vector<float>& f) { PopulateTensor(bias_, f); }
int input_size() { return input_size_; }
int num_units() { return units_; }
int num_batches() { return batches_; }
protected:
int input_;
int weights_;
int bias_;
int output_;
int batches_;
int units_;
int input_size_;
bool use_native_int4_ = false;
};
TEST(Hybrid4BitFullyConnectedOpTest, SimpleTestHybridInt4) {
int units = 5;
int batches = 4;
int cols = 40;
FullyConnected4BitOpModel m(
units, batches,
{TensorType_FLOAT32, {batches, cols}},
{TensorType_INT4, {units, cols}, 0.0, 0.0, 1.0},
{TensorType_FLOAT32, {units, batches}},
{
-1, 2, 3, 4, 5, 6, 7, 1, 2, 3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
-1, 2, 3, 4, 5, 6, 7, 1, 2, 3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
1, 2, 3, 4, 5, 6, 7, 1, 2, -3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
-1, 2, 3, 4, 5, 6, 7, 1, 2, 3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
1, 2, 3, 4, 5, 6, 7, 1, 2, -3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
-1, 2, 3, 4, 5, 6, 7, 1, 2, 3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
1, 2, 3, 4, 5, 6, 7, 1, 2, -3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
-1, 2, 3, 4, 5, 6, 7, 1, 2, 3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
-1, 2, 3, 4, 5, 6, 7, 1, 2, 3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
-1, 2, 3, 4, 5, 6, 7, 1, 2, 3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
},
ops::builtin::Register_FULLY_CONNECTED_GENERIC_OPT(),
ActivationFunctionType_RELU);
m.SetBias({1, 2, 3, 1, 2});
m.SetInput({
1, 2, 3, 4, 5, 6, 7, 8, -9, -10, 1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, 8, -9, -10, 1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, -8, 9, -10, 1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, 8, -9, -10, 1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, 8, -9, -10, 1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, 8, -9, -10, 1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, 8, -9, -10, 1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, 8, -9, -10, 1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
});
m.Invoke();
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{393., 456., 457., 455., 394., 413., 476., 477., 475., 414.,
393., 456., 457., 455., 394., 393., 456., 457., 455., 394},
1.3f)));
}
TEST(Hybrid4BitFullyConnectedOpTest, TestHybridInt4AllZeroBatch) {
int units = 5;
int batches = 4;
int cols = 40;
FullyConnected4BitOpModel m(
units, batches,
{TensorType_FLOAT32, {batches, cols}},
{TensorType_INT4, {units, cols}, 0.0, 0.0, 1.0},
{TensorType_FLOAT32, {units, batches}},
{
-1, 2, 3, 4, 5, 6, 7, 1, 2, 3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
-1, 2, 3, 4, 5, 6, 7, 1, 2, 3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
1, 2, 3, 4, 5, 6, 7, 1, 2, -3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
-1, 2, 3, 4, 5, 6, 7, 1, 2, 3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
1, 2, 3, 4, 5, 6, 7, 1, 2, -3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
-1, 2, 3, 4, 5, 6, 7, 1, 2, 3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
1, 2, 3, 4, 5, 6, 7, 1, 2, -3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
-1, 2, 3, 4, 5, 6, 7, 1, 2, 3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
-1, 2, 3, 4, 5, 6, 7, 1, 2, 3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
-1, 2, 3, 4, 5, 6, 7, 1, 2, 3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
},
ops::builtin::Register_FULLY_CONNECTED_GENERIC_OPT(),
ActivationFunctionType_RELU);
m.SetBias({1, 2, 3, 1, 2});
m.SetInput({
1, 2, 3, 4, 5, 6, 7, 8, -9, -10, 1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, 8, -9, -10, 1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, -8, 9, -10, 1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, 8, -9, -10, 1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, 8, -9, -10, 1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, 8, -9, -10, 1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
});
m.Invoke();
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{393., 456., 457., 455., 394., 413., 476., 477., 475., 414.,
393., 456., 457., 455., 394., 1, 2, 3, 1, 2},
1.3f)));
}
std::mt19937 random_engine(2023);
std::uniform_real_distribution<float> real_dist(0.f, 1.f);
std::uniform_int_distribution<int32_t> int_dist(-7, 7);
class Hybrid4BitFullyConnectedVsReferenceOpTests
: public ::testing::TestWithParam<::testing::tuple<int, int, int>> {};
TEST_P(Hybrid4BitFullyConnectedVsReferenceOpTests, TestHybridInt4) {
auto params = GetParam();
int units = std::get<0>(params);
int batches = std::get<1>(params);
int cols = std::get<2>(params);
std::vector<int8_t> weight_data(units * cols, 0);
std::vector<float> input_data(batches * cols, 0);
std::vector<float> bias_data(units, 0);
for (int i = 0; i < units * cols; ++i) {
weight_data[i] = int_dist(random_engine);
}
for (int i = 0; i < batches * cols; ++i) {
input_data[i] = real_dist(random_engine);
}
for (int i = 0; i < units; ++i) {
bias_data[i] = real_dist(random_engine);
}
FullyConnected4BitOpModel test(
units, batches,
{TensorType_FLOAT32, {batches, cols}},
{TensorType_INT4, {units, cols}, 0.0, 0.0, 1.0},
{TensorType_FLOAT32, {units, batches}}, weight_data,
ops::builtin::Register_FULLY_CONNECTED_GENERIC_OPT(),
ActivationFunctionType_RELU);
test.SetBias(bias_data);
test.SetInput(input_data);
test.Invoke();
std::vector<float> test_data = test.GetOutput();
FullyConnected4BitOpModel expected(
units, batches,
{TensorType_FLOAT32, {batches, cols}},
{TensorType_INT4, {units, cols}, 0.0, 0.0, 1.0},
{TensorType_FLOAT32, {units, batches}}, weight_data,
ops::builtin::Register_FULLY_CONNECTED_REF(),
ActivationFunctionType_RELU);
expected.SetBias(bias_data);
expected.SetInput(input_data);
expected.Invoke();
std::vector<float> expected_data = expected.GetOutput();
EXPECT_THAT(test_data, ElementsAreArray(ArrayFloatNear(
expected_data, 1e-3f)));
}
INSTANTIATE_TEST_SUITE_P(Hybrid4BitFullyConnectedVsReferenceOpTests,
Hybrid4BitFullyConnectedVsReferenceOpTests,
::testing::ValuesIn({
std::make_tuple(4, 1, 32),
std::make_tuple(4, 1, 64),
std::make_tuple(5, 1, 128),
std::make_tuple(5, 4, 128),
std::make_tuple(5, 6, 128),
std::make_tuple(5, 1, 38),
std::make_tuple(5, 4, 72),
std::make_tuple(5, 6, 130),
std::make_tuple(4, 1, 56),
std::make_tuple(4, 1, 48),
std::make_tuple(4, 1, 120),
}));
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/optimized/fully_connected_4bit.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/fully_connected_4bit_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ecaeac1a-3764-4ad2-9601-bb802ba6ec39 | cpp | tensorflow/tensorflow | reduce_utils | tensorflow/lite/kernels/internal/optimized/reduce_utils.h | tensorflow/lite/kernels/internal/optimized/reduce_utils_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_REDUCE_UTILS_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_REDUCE_UTILS_H_
#include <stdint.h>
#include <algorithm>
#include <cstring>
namespace tflite {
namespace reduce_utils {
inline void RemoveSize1Dims(int* shape_out, int& out_num_dims, int* axis_out,
int& out_num_axis) {
for (int64_t i = 0; i < out_num_dims;) {
if (shape_out[i] == 1) {
for (int64_t j = i + 1; j < out_num_dims; ++j) {
shape_out[j - 1] = shape_out[j];
}
for (int64_t j = 0; j < out_num_axis; ++j) {
if (axis_out[j] == i) {
for (int64_t k = j + 1; k < out_num_axis; ++k) {
axis_out[k - 1] = axis_out[k];
}
out_num_axis -= 1;
break;
}
}
for (int64_t j = 0; j < out_num_axis; ++j) {
if (axis_out[j] > i) {
axis_out[j] -= 1;
}
}
--out_num_dims;
} else {
++i;
}
}
}
inline bool ResolveAxis(const int num_dims, const int* axis,
const int64_t num_axis, int* axis_out,
int& out_num_axis, const int* shape_in, int* shape_out,
int& out_num_dims) {
if (num_dims == 0) {
out_num_axis = 0;
out_num_dims = 0;
return true;
}
out_num_axis = 0;
out_num_dims = num_dims;
for (int64_t idx = 0; idx < num_axis; ++idx) {
int current = axis[idx] < 0 ? (axis[idx] + num_dims) : axis[idx];
if (current < 0 || current >= num_dims) {
return false;
}
bool is_dup = false;
for (int j = 0; j < out_num_axis; ++j) {
if (axis_out[j] == current) {
is_dup = true;
break;
}
}
if (!is_dup) {
axis_out[out_num_axis] = current;
out_num_axis += 1;
}
}
memcpy(shape_out, shape_in, num_dims * sizeof(int));
std::sort(&axis_out[0], &axis_out[out_num_axis]);
RemoveSize1Dims(shape_out, out_num_dims, axis_out, out_num_axis);
if (out_num_axis > 0) {
int64_t j = out_num_axis - 1;
bool previous_here = (axis_out[j] == out_num_dims - 1);
if (previous_here) {
j -= 1;
}
for (int64_t i = out_num_dims - 2; i >= 0; --i) {
bool current_here = j >= 0 ? (axis_out[j] == i) : false;
if (current_here == previous_here) {
shape_out[i] *= shape_out[i + 1];
for (int64_t k = i + 1; k + 1 < out_num_dims; ++k) {
shape_out[k] = shape_out[k + 1];
}
for (int64_t k = 0; k < out_num_axis; ++k) {
if (axis_out[k] > i) {
axis_out[k] -= 1;
}
}
if (current_here) {
for (int64_t k = j + 1; k + 1 < out_num_axis; ++k) {
axis_out[k] = axis_out[k + 1];
}
out_num_axis -= 1;
}
out_num_dims -= 1;
}
if (current_here) {
j -= 1;
}
previous_here = current_here;
}
}
return true;
}
}
}
#endif | #include "tensorflow/lite/kernels/internal/optimized/reduce_utils.h"
#include <gmock/gmock.h>
namespace tflite {
namespace reduce_utils {
namespace {
using ::testing::ElementsAreArray;
void TestFunction(const std::vector<int>& axis_in,
const std::vector<int>& shape_in,
const std::vector<int>& expected_axis_out,
const std::vector<int>& expected_shape_out) {
int num_dims = shape_in.size();
int expected_out_num_dims = expected_shape_out.size();
int actual_out_num_dims;
int expected_out_num_axis = expected_axis_out.size();
int actual_out_num_axis;
std::vector<int> actual_shape_out(num_dims);
std::vector<int> actual_axis_out(num_dims);
ResolveAxis(shape_in.size(), axis_in.data(), axis_in.size(),
actual_axis_out.data(), actual_out_num_axis, shape_in.data(),
actual_shape_out.data(), actual_out_num_dims);
EXPECT_EQ(expected_out_num_dims, actual_out_num_dims);
EXPECT_EQ(expected_out_num_axis, actual_out_num_axis);
EXPECT_THAT(expected_shape_out,
ElementsAreArray(actual_shape_out.data(), expected_out_num_dims));
EXPECT_THAT(expected_axis_out,
ElementsAreArray(actual_axis_out.data(), expected_out_num_axis));
}
TEST(ResolveAxisTest, Flatten_0_1_2) {
const std::vector<int> axis_in = {0, 1, 2};
const std::vector<int> shape_in = {2, 3, 4, 5};
const std::vector<int> expected_shape_out{24, 5};
const std::vector<int> expected_axis_out{0};
TestFunction(axis_in, shape_in, expected_axis_out, expected_shape_out);
}
TEST(ResolveAxisTest, Flatten_0_1_2_3) {
const std::vector<int> axis_in = {3, 2};
const std::vector<int> shape_in = {2, 3, 4, 5};
const std::vector<int> expected_shape_out{6, 20};
const std::vector<int> expected_axis_out{1};
TestFunction(axis_in, shape_in, expected_axis_out, expected_shape_out);
}
TEST(ResolveAxisTest, ZeroDims) {
const std::vector<int> axis_in = {};
const std::vector<int> shape_in = {};
const std::vector<int> expected_shape_out{};
const std::vector<int> expected_axis_out{};
TestFunction(axis_in, shape_in, expected_axis_out, expected_shape_out);
}
TEST(ResolveAxisTest, DoNothing) {
const std::vector<int> axis_in = {0};
const std::vector<int> shape_in = {4, 5};
const std::vector<int> expected_shape_out{4, 5};
const std::vector<int> expected_axis_out{0};
TestFunction(axis_in, shape_in, expected_axis_out, expected_shape_out);
}
TEST(ResolveAxisTest, NegativeAxis) {
const std::vector<int> axis_in = {-2};
const std::vector<int> shape_in = {4, 3};
const std::vector<int> expected_shape_out{4, 3};
const std::vector<int> expected_axis_out{0};
TestFunction(axis_in, shape_in, expected_axis_out, expected_shape_out);
}
TEST(ResolveAxisTest, NegativeAxisFold) {
const std::vector<int> axis_in = {-1};
const std::vector<int> shape_in = {4, 3, 5};
const std::vector<int> expected_shape_out{12, 5};
const std::vector<int> expected_axis_out{1};
TestFunction(axis_in, shape_in, expected_axis_out, expected_shape_out);
}
TEST(ResolveAxisTest, DuplicateAxis) {
const std::vector<int> axis_in = {2, 1, 2, 1, 2, 1};
const std::vector<int> shape_in = {4, 3, 2};
const std::vector<int> expected_shape_out{4, 6};
const std::vector<int> expected_axis_out{1};
TestFunction(axis_in, shape_in, expected_axis_out, expected_shape_out);
}
TEST(ResolveAxisTest, DuplicateNegativeAxis) {
const std::vector<int> axis_in = {2, -1, -2, -1, 2, 1};
const std::vector<int> shape_in = {4, 3, 2};
const std::vector<int> expected_shape_out{4, 6};
const std::vector<int> expected_axis_out{1};
TestFunction(axis_in, shape_in, expected_axis_out, expected_shape_out);
}
TEST(ResolveAxisTest, RemoveSize1Dim) {
const std::vector<int> axis_in = {0};
const std::vector<int> shape_in = {1, 4, 3, 1};
const std::vector<int> expected_shape_out{4, 3};
const std::vector<int> expected_axis_out{};
TestFunction(axis_in, shape_in, expected_axis_out, expected_shape_out);
}
TEST(ResolveAxisTest, OneSize1DimToScalar) {
const std::vector<int> axis_in = {0};
const std::vector<int> shape_in = {1};
const std::vector<int> expected_shape_out{};
const std::vector<int> expected_axis_out{};
TestFunction(axis_in, shape_in, expected_axis_out, expected_shape_out);
}
TEST(ResolveAxisTest, InterleavedSize1Dim) {
const std::vector<int> axis_in = {1, 3};
const std::vector<int> shape_in = {1, 2, 1, 4, 1, 7};
const std::vector<int> expected_shape_out{8, 7};
const std::vector<int> expected_axis_out{0};
TestFunction(axis_in, shape_in, expected_axis_out, expected_shape_out);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/optimized/reduce_utils.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/optimized/reduce_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c7cec2c6-4c63-4839-b12d-e64a94d8516d | cpp | tensorflow/tensorflow | depthwiseconv_float | tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h | tensorflow/lite/kernels/internal/depthwiseconv_float_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTHWISECONV_FLOAT_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTHWISECONV_FLOAT_H_
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
inline void DepthwiseConv(
const DepthwiseParams& params, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& filter_shape,
const float* filter_data, const RuntimeShape& bias_shape,
const float* bias_data, const RuntimeShape& output_shape,
float* output_data) {
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
const int depth_multiplier = params.depth_multiplier;
const float output_activation_min = params.float_activation_min;
const float output_activation_max = params.float_activation_max;
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int input_depth = input_shape.Dims(3);
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
TFLITE_DCHECK_EQ(output_depth, input_depth * depth_multiplier);
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth);
for (int b = 0; b < batches; ++b) {
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
for (int ic = 0; ic < input_depth; ++ic) {
for (int m = 0; m < depth_multiplier; m++) {
const int oc = m + ic * depth_multiplier;
const int in_x_origin = (out_x * stride_width) - pad_width;
const int in_y_origin = (out_y * stride_height) - pad_height;
float total = 0.f;
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
const int in_x = in_x_origin + dilation_width_factor * filter_x;
const int in_y =
in_y_origin + dilation_height_factor * filter_y;
if ((in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
(in_y < input_height)) {
float input_value =
input_data[Offset(input_shape, b, in_y, in_x, ic)];
float filter_value = filter_data[Offset(
filter_shape, 0, filter_y, filter_x, oc)];
total += (input_value * filter_value);
}
}
}
float bias_value = 0.0f;
if (bias_data) {
bias_value = bias_data[oc];
}
output_data[Offset(output_shape, b, out_y, out_x, oc)] =
ActivationFunctionWithMinMax(total + bias_value,
output_activation_min,
output_activation_max);
}
}
}
}
}
}
}
}
#endif | #include <algorithm>
#include <cmath>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/test_util.h"
#include "tensorflow/lite/kernels/internal/types.h"
#define ALLOW_SLOW_GENERIC_DEPTHWISECONV_FALLBACK
#include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
#include "tensorflow/lite/kernels/internal/optimized/depthwiseconv_float.h"
#include "tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h"
namespace tflite {
namespace {
void TestOneDepthwiseConv(
const DepthwiseParams& params, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& filter_shape,
const float* filter_data, const RuntimeShape& bias_shape,
const float* bias_data, const RuntimeShape& output_shape) {
const int output_buffer_size = output_shape.FlatSize();
std::vector<float> output_data(output_buffer_size);
std::vector<float> reference_output_data(output_buffer_size);
reference_ops::DepthwiseConv(params, input_shape, input_data, filter_shape,
filter_data, bias_shape, bias_data, output_shape,
reference_output_data.data());
optimized_ops::DepthwiseConvImpl(
params, input_shape, input_data, filter_shape, filter_data, bias_shape,
bias_data, output_shape, output_data.data(), CpuFlags(),
0,
output_shape.Dims(1), 1);
double sum_abs_diff = 0;
float max_abs_val = 0;
for (int i = 0; i < output_buffer_size; i++) {
sum_abs_diff += std::abs(output_data[i] - reference_output_data[i]);
max_abs_val = std::max(max_abs_val, std::abs(reference_output_data[i]));
}
if (sum_abs_diff != 0.f) {
const float mean_diff =
static_cast<float>(sum_abs_diff / output_buffer_size);
const float relative_error = std::abs(mean_diff) / max_abs_val;
ASSERT_LT(relative_error, 1e-5f);
}
}
bool TryTestOneDepthwiseConv() {
const int batch = UniformRandomInt(1, 2);
const int input_depth = ExponentialRandomPositiveInt(0.9f, 6, 50);
const int input_width = ExponentialRandomPositiveInt(0.9f, 20, 200);
const int input_height = ExponentialRandomPositiveInt(0.9f, 20, 200);
const int filter_width = ExponentialRandomPositiveInt(0.9f, 4, 10);
const int filter_height = ExponentialRandomPositiveInt(0.9f, 4, 10);
const int depth_multiplier = ExponentialRandomPositiveInt(0.8f, 6, 50);
const int stride = ExponentialRandomPositiveInt(0.9f, 3, 8);
const int output_depth = input_depth * depth_multiplier;
const int dilation_width_factor = RandomElement(std::vector<int>({1, 2, 4}));
const int dilation_height_factor = RandomElement(std::vector<int>({1, 2, 4}));
float output_activation_min, output_activation_max;
FusedActivationFunctionType ac =
RandomElement(std::vector<FusedActivationFunctionType>(
{FusedActivationFunctionType::kNone,
FusedActivationFunctionType::kRelu,
FusedActivationFunctionType::kRelu1,
FusedActivationFunctionType::kRelu6}));
GetActivationMinMax(ac, &output_activation_min, &output_activation_max);
const int kMaxSupportedOutputDepth = 1024;
if (output_depth > kMaxSupportedOutputDepth) {
return false;
}
RuntimeShape input_shape_inference(
{batch, input_height, input_width, input_depth});
RuntimeShape output_shape_inference;
int pad_width, pad_height;
const auto padding_type =
UniformRandomInt(0, 1) ? PaddingType::kSame : PaddingType::kValid;
if (!ComputeConvSizes(input_shape_inference, output_depth, filter_width,
filter_height, stride, dilation_width_factor,
dilation_height_factor, padding_type,
&output_shape_inference, &pad_width, &pad_height)) {
return false;
}
RuntimeShape filter_shape_inference(
{1, filter_height, filter_width, output_depth});
RuntimeShape bias_shape_inference({1, 1, 1, output_depth});
const int input_buffer_size = input_shape_inference.FlatSize();
const int filter_buffer_size = filter_shape_inference.FlatSize();
std::vector<float> input_data(input_buffer_size);
std::vector<float> filter_data(filter_buffer_size);
std::vector<float> bias_data(output_depth);
const float input_amplitude = 1.f;
const float filter_amplitude = 1.f;
const float bias_amplitude =
filter_width * filter_height * input_amplitude * filter_amplitude;
FillRandom(&input_data, -input_amplitude, input_amplitude);
FillRandom(&filter_data, -filter_amplitude, filter_amplitude);
FillRandom(&bias_data, -bias_amplitude, bias_amplitude);
DepthwiseParams op_params;
op_params.padding_type = PaddingType::kSame;
op_params.padding_values.width = pad_width;
op_params.padding_values.height = pad_height;
op_params.stride_width = stride;
op_params.stride_height = stride;
op_params.dilation_width_factor = dilation_width_factor;
op_params.dilation_height_factor = dilation_height_factor;
op_params.depth_multiplier = depth_multiplier;
op_params.float_activation_min = output_activation_min;
op_params.float_activation_max = output_activation_max;
TestOneDepthwiseConv(op_params, input_shape_inference, input_data.data(),
filter_shape_inference, filter_data.data(),
bias_shape_inference, bias_data.data(),
output_shape_inference);
return true;
}
void TestOneDepthwiseConv() {
while (!TryTestOneDepthwiseConv()) {
}
}
TEST(TestDepthwiseConv, TestDepthwiseConv) {
const int kTestsToRun = 10 * 1000;
for (int i = 0; i < kTestsToRun; i++) {
TestOneDepthwiseConv();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/depthwiseconv_float_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a0b74122-fd6d-4716-ae45-35839fa3648e | cpp | tensorflow/tensorflow | leaky_relu | tensorflow/lite/kernels/internal/reference/leaky_relu.h | tensorflow/lite/delegates/xnnpack/leaky_relu_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LEAKY_RELU_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LEAKY_RELU_H_
#include <algorithm>
#include <limits>
#include "tensorflow/lite/kernels/internal/common.h"
namespace tflite {
namespace reference_ops {
inline void LeakyRelu(const tflite::LeakyReluParams& params,
const RuntimeShape& input_shape, const float* input_data,
const RuntimeShape& output_shape, float* output_data) {
const int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; ++i) {
const float val = input_data[i];
output_data[i] = val > 0 ? val : val * params.alpha;
}
}
template <typename T>
inline void QuantizeLeakyRelu(const LeakyReluParams& params,
const RuntimeShape& input_shape,
const T* input_data,
const RuntimeShape& output_shape,
T* output_data) {
const int flat_size = MatchingFlatSize(input_shape, output_shape);
static const int32_t quantized_min = std::numeric_limits<T>::min();
static const int32_t quantized_max = std::numeric_limits<T>::max();
for (int i = 0; i < flat_size; ++i) {
const int32_t input_value = input_data[i] - params.input_offset;
int32_t unclamped_output;
if (input_value >= 0) {
unclamped_output = params.output_offset +
MultiplyByQuantizedMultiplier(
input_value, params.output_multiplier_identity,
params.output_shift_identity);
} else {
unclamped_output = params.output_offset +
MultiplyByQuantizedMultiplier(
input_value, params.output_multiplier_alpha,
params.output_shift_alpha);
}
const T clamped_output =
std::min(quantized_max, std::max(quantized_min, unclamped_output));
output_data[i] = static_cast<T>(clamped_output);
}
}
}
}
#endif | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/leaky_relu_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(LeakyRelu, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
LeakyReluTester()
.Shape({batch, height, width, channels})
.Test(xnnpack_delegate.get());
}
TEST(LeakyRelu, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
LeakyReluTester()
.Shape({batch, width, channels})
.Test(xnnpack_delegate.get());
}
TEST(LeakyRelu, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
LeakyReluTester().Shape({batch, channels}).Test(xnnpack_delegate.get());
}
TEST(LeakyRelu, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
LeakyReluTester().Shape({batch}).Test(xnnpack_delegate.get());
}
TEST(LeakyRelu, NegativeSlope) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
LeakyReluTester()
.Shape({batch, height, width, channels})
.NegativeSlope(-0.75f)
.Test(xnnpack_delegate.get());
}
TEST(LeakyRelu, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
LeakyReluTester()
.Shape({batch, height, width, channels})
.Test(xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/reference/leaky_relu.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/leaky_relu_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9662c336-34ba-4275-bdd6-126ea9e0e928 | cpp | tensorflow/tensorflow | depthwise_conv_hybrid | tensorflow/lite/kernels/internal/optimized/integer_ops/depthwise_conv_hybrid.h | tensorflow/lite/kernels/depthwise_conv_hybrid_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_DEPTHWISE_CONV_HYBRID_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_DEPTHWISE_CONV_HYBRID_H_
#include <algorithm>
#include <memory>
#include "ruy/profiler/instrumentation.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/cpu_backend_threadpool.h"
#include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
#include "tensorflow/lite/kernels/internal/optimized/depthwiseconv_3x3_filter_common.h"
#include "tensorflow/lite/kernels/internal/optimized/integer_ops/depthwise_conv.h"
#include "tensorflow/lite/kernels/internal/optimized/integer_ops/depthwise_conv_hybrid_3x3_filter.h"
#include "tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace optimized_integer_ops {
namespace depthwise_conv {
inline void DepthwiseConvInitAccBuffer(int num_output_pixels, int output_depth,
int32* acc_buffer) {
memset(acc_buffer, 0,
sizeof(acc_buffer[0]) * output_depth * num_output_pixels);
}
static void DoDepthwiseConvHybridGeneral(
const DepthwiseParams& params, const float* input_scales,
const RuntimeShape& input_shape, const int8* input_data,
const RuntimeShape& filter_shape, const int8* filter_data,
const RuntimeShape& bias_shape, const float* bias_data,
const RuntimeShape& output_shape, float* output_data,
const float* per_channel_scales, const int32_t* input_offsets,
int thread_start, int thread_end, int thread_dim, int32* acc_buffer,
int32 acc_buffer_size) {
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
const int depth_multiplier = params.depth_multiplier;
const float output_activation_min = params.float_activation_min;
const float output_activation_max = params.float_activation_max;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int input_depth = input_shape.Dims(3);
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
const int output_rows = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
TFLITE_DCHECK_GE(acc_buffer_size, output_depth);
const int kOutputPixelsInAccBuffer = acc_buffer_size / output_depth;
const int kAccBufferActualSize = kOutputPixelsInAccBuffer * output_depth;
TFLITE_DCHECK_LE(kOutputPixelsInAccBuffer * output_depth,
kAccBufferActualSize);
TFLITE_DCHECK_LE(kAccBufferActualSize, acc_buffer_size);
TFLITE_DCHECK_GE(kOutputPixelsInAccBuffer, 1);
TFLITE_DCHECK(thread_dim == 0 || thread_dim == 1);
using row_accum_func_t = decltype(&QuantizedDepthwiseConvAccumRowGeneric);
row_accum_func_t row_accum_func = nullptr;
#define TFMINI_USE_DEPTHWISECONV_KERNEL(ALLOW_STRIDED, FIXED_INPUT_DEPTH, \
FIXED_DEPTH_MULTIPLIER) \
if (!row_accum_func && (stride_width == 1 || ALLOW_STRIDED) && \
(input_depth == FIXED_INPUT_DEPTH || FIXED_INPUT_DEPTH == 0) && \
depth_multiplier == FIXED_DEPTH_MULTIPLIER) { \
row_accum_func = \
QuantizedDepthwiseConvAccumRow<ALLOW_STRIDED, FIXED_INPUT_DEPTH, \
FIXED_DEPTH_MULTIPLIER>; \
}
#ifdef USE_NEON
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 1, 2)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 2, 2)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 4, 2)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 1, 4)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 4, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 4, 4)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 8, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 2, 8)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 2, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 12, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 8, 2)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 16, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 1, 16)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 1, 20)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 1, 32)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 1, 8)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 8, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 2, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 4, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 0, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 0, 2)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 0, 3)
#endif
if (!row_accum_func) {
row_accum_func = QuantizedDepthwiseConvAccumRowGeneric;
}
#undef TFMINI_USE_DEPTHWISECONV_KERNEL
const int input_height_stride = input_shape.Dims(3) * input_shape.Dims(2);
const int input_batch_stride = input_height_stride * input_shape.Dims(1);
const int filter_height_stride = filter_shape.Dims(3) * filter_shape.Dims(2);
int batch_start = 0;
int batch_end = batches;
int row_start = 0;
int row_end = output_rows;
int output_ptr_offset = 0;
switch (thread_dim) {
case 0:
TFLITE_DCHECK_GE(thread_start, 0);
TFLITE_DCHECK_LE(thread_end, batches);
batch_start = thread_start;
batch_end = thread_end;
output_ptr_offset = batch_start * FlatSizeSkipDim(output_shape, 0);
break;
case 1:
TFLITE_DCHECK_GE(thread_start, 0);
TFLITE_DCHECK_LE(thread_end, output_rows);
row_start = thread_start;
row_end = thread_end;
output_ptr_offset = row_start * output_width * output_depth;
break;
}
float* output_ptr = output_data + output_ptr_offset;
int batch_step =
(output_rows + row_start - row_end) * output_width * output_depth;
for (int b = batch_start; b < batch_end; ++b) {
float input_scale = input_scales[b];
int32_t input_offset = input_offsets[b];
for (int out_y = row_start; out_y < row_end; ++out_y) {
const int in_y_origin = (out_y * stride_height) - pad_height;
const int filter_y_start =
std::max(0, (-in_y_origin + dilation_height_factor - 1) /
dilation_height_factor);
const int filter_y_end =
std::min(filter_height,
(input_height - in_y_origin + dilation_height_factor - 1) /
dilation_height_factor);
for (int out_x_buffer_start = 0; out_x_buffer_start < output_width;
out_x_buffer_start += kOutputPixelsInAccBuffer) {
const int out_x_buffer_end = std::min(
output_width, out_x_buffer_start + kOutputPixelsInAccBuffer);
const int num_output_pixels = out_x_buffer_end - out_x_buffer_start;
DepthwiseConvInitAccBuffer(num_output_pixels, output_depth, acc_buffer);
for (int filter_y = filter_y_start; filter_y < filter_y_end;
++filter_y) {
const int in_y = in_y_origin + dilation_height_factor * filter_y;
row_accum_func(
stride_width, dilation_width_factor, input_depth, input_width,
input_data + in_y * input_height_stride + b * input_batch_stride,
-input_offset, pad_width, depth_multiplier, filter_width,
filter_data + filter_y * filter_height_stride, out_x_buffer_start,
out_x_buffer_end, output_depth, acc_buffer);
}
gemmlowp::ScopedProfilingLabel label("store");
const int num_output_values = output_depth * num_output_pixels;
int c = 0;
while (c < output_depth) {
int target_output_depth = output_depth;
#ifdef USE_NEON
const float32x4_t output_activation_min_vec =
vdupq_n_f32(output_activation_min);
const float32x4_t output_activation_max_vec =
vdupq_n_f32(output_activation_max);
const float32x4_t input_scale_32x4 = vdupq_n_f32(input_scale);
for (; c <= output_depth - 4; c += 4) {
if ((c + 4) > output_depth) {
break;
}
const float32x4_t channel_scale_32x4 =
vld1q_f32(per_channel_scales + c);
const float32x4_t bias_32x4 = vld1q_f32(bias_data + c);
for (int n = 0; n < num_output_pixels; ++n) {
int loc = n * output_depth + c;
int32x4_t acc = vld1q_s32(acc_buffer + loc);
float32x4_t float_acc = vcvtq_f32_s32(acc);
float_acc = vmulq_f32(float_acc, channel_scale_32x4);
float_acc = vmulq_f32(float_acc, input_scale_32x4);
float_acc = vaddq_f32(float_acc, bias_32x4);
float_acc = vmaxq_f32(float_acc, output_activation_min_vec);
float_acc = vminq_f32(float_acc, output_activation_max_vec);
vst1q_f32(output_ptr + loc, float_acc);
}
}
#endif
for (; c < target_output_depth; c++) {
for (int n = 0; n < num_output_pixels; ++n) {
int loc = n * output_depth + c;
int32 acc = acc_buffer[loc];
float float_acc = acc * input_scale * per_channel_scales[c];
float_acc += bias_data[c];
float_acc = std::max(float_acc, output_activation_min);
float_acc = std::min(float_acc, output_activation_max);
output_ptr[loc] = float_acc;
}
}
}
output_ptr += num_output_values;
}
}
output_ptr += batch_step;
}
}
static void DoDepthwiseConvHybridGeneralStatic(
const DepthwiseParams& params, const float* input_scales,
const RuntimeShape& input_shape, const int8* input_data,
const RuntimeShape& filter_shape, const int8* filter_data,
const RuntimeShape& bias_shape, const float* bias_data,
const RuntimeShape& output_shape, float* output_data,
const float* per_channel_scales, const int32_t* input_offsets,
int thread_start, int thread_end, int thread_dim) {
static const int kStaticAccBufferMaxSize = 2048;
int32 stack_acc_buffer[kStaticAccBufferMaxSize];
DoDepthwiseConvHybridGeneral(
params, input_scales, input_shape, input_data, filter_shape, filter_data,
bias_shape, bias_data, output_shape, output_data, per_channel_scales,
input_offsets, thread_start, thread_end, thread_dim, stack_acc_buffer,
kStaticAccBufferMaxSize);
}
inline void DepthwiseConvHybridGeneral(
const DepthwiseParams& params, const float* input_scales,
const RuntimeShape& input_shape, const int8* input_data,
const RuntimeShape& filter_shape, const int8* filter_data,
const RuntimeShape& bias_shape, const float* bias_data,
const RuntimeShape& output_shape, float* output_data,
const float* per_channel_scales, const int32_t* input_offsets,
int thread_start, int thread_end, int thread_dim) {
#ifndef TF_LITE_STATIC_MEMORY
static const int kStaticAccBufferMaxSize = 2048;
const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3);
if (kStaticAccBufferMaxSize < output_depth) {
std::unique_ptr<int32[]> heap_acc_buffer(new int32[output_depth]);
DoDepthwiseConvHybridGeneral(
params, input_scales, input_shape, input_data, filter_shape,
filter_data, bias_shape, bias_data, output_shape, output_data,
per_channel_scales, input_offsets, thread_start, thread_end, thread_dim,
heap_acc_buffer.get(), output_depth);
return;
}
#endif
DoDepthwiseConvHybridGeneralStatic(
params, input_scales, input_shape, input_data, filter_shape, filter_data,
bias_shape, bias_data, output_shape, output_data, per_channel_scales,
input_offsets, thread_start, thread_end, thread_dim);
}
}
template <DepthwiseConvOutputRounding kOutputRounding>
inline void DepthwiseConvHybridWithRounding(
const DepthwiseParams& params, const float* input_scales,
const RuntimeShape& input_shape, const int8* input_data,
const RuntimeShape& filter_shape, const int8* filter_data,
const RuntimeShape& bias_shape, const float* bias_data,
const RuntimeShape& output_shape, float* output_data,
const float* per_channel_scales, const int32_t* input_offsets,
int thread_start, int thread_end, int thread_dim) {
gemmlowp::ScopedProfilingLabel label("DepthwiseConvHybridInt8/8bit");
const int depth_multiplier = params.depth_multiplier;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
TFLITE_DCHECK_GE(dilation_width_factor, 1);
TFLITE_DCHECK_GE(dilation_height_factor, 1);
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3);
const int input_depth = input_shape.Dims(3);
TFLITE_DCHECK_EQ(output_depth, input_depth * depth_multiplier);
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth);
#if defined(__aarch64__) && !defined(GOOGLE_L4T)
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
if (optimized_ops::depthwise_conv::Fast3x3FilterKernelSupported<
optimized_ops::depthwise_conv::QuantizationType::kNonPerChannelUint8>(
input_shape, filter_shape, stride_width, stride_height,
dilation_width_factor, dilation_height_factor, pad_width, pad_height,
depth_multiplier, output_shape, 0, nullptr)) {
gemmlowp::ScopedProfilingLabel specialized_label(
"DepthwiseConvHybridInt8/8bit/3x3");
optimized_ops::depthwise_conv::DepthwiseConvHybrid3x3FilterPerChannel<
DepthwiseConvOutputRounding::kUpward>(
params, input_scales, input_shape, input_data,
filter_shape, filter_data, bias_shape, bias_data, output_shape,
output_data, per_channel_scales, input_offsets,
thread_start, thread_end, thread_dim);
return;
}
#endif
gemmlowp::ScopedProfilingLabel specialized_label(
"DepthwiseConvHybridInt8/8bit/General");
depthwise_conv::DepthwiseConvHybridGeneral(
params, input_scales, input_shape, input_data,
filter_shape, filter_data, bias_shape, bias_data, output_shape,
output_data, per_channel_scales, input_offsets,
thread_start, thread_end, thread_dim);
}
inline void DepthwiseConvHybridImpl(
const DepthwiseParams& params, const float* input_scales,
const RuntimeShape& input_shape, const int8* input_data,
const RuntimeShape& filter_shape, const int8* filter_data,
const RuntimeShape& bias_shape, const float* bias_data,
const RuntimeShape& output_shape, float* output_data,
const float* per_channel_scales, const int32_t* input_offsets,
int thread_start, int thread_end, int thread_dim) {
return DepthwiseConvHybridWithRounding<
DepthwiseConvOutputRounding::kAwayFromZero>(
params, input_scales, input_shape, input_data,
filter_shape, filter_data, bias_shape, bias_data, output_shape,
output_data, per_channel_scales, input_offsets,
thread_start, thread_end, thread_dim);
}
template <typename T, typename TS>
struct DepthwiseConvHybridWorkerTask : cpu_backend_threadpool::Task {
DepthwiseConvHybridWorkerTask(const DepthwiseParams& params,
const float* input_scales,
const RuntimeShape& input_shape,
const T* input_data,
const RuntimeShape& filter_shape,
const T* filter_data,
const RuntimeShape& bias_shape,
const TS* bias_data,
const RuntimeShape& output_shape,
float* output_data,
const float* per_channel_scales,
const int32_t* input_offsets,
int thread_start, int thread_end,
int thread_dim)
: params(params),
input_scales(input_scales),
input_shape(input_shape),
input_data(input_data),
filter_shape(filter_shape),
filter_data(filter_data),
bias_shape(bias_shape),
bias_data(bias_data),
output_shape(output_shape),
output_data(output_data),
per_channel_scales(per_channel_scales),
input_offsets(input_offsets),
thread_start(thread_start),
thread_end(thread_end),
thread_dim(thread_dim) {}
void Run() override {
DepthwiseConvHybridImpl(params, input_scales, input_shape,
input_data, filter_shape, filter_data,
bias_shape, bias_data, output_shape,
output_data, per_channel_scales, input_offsets,
thread_start, thread_end, thread_dim);
}
private:
const DepthwiseParams& params;
const float* input_scales;
const RuntimeShape& input_shape;
const T* input_data;
const RuntimeShape& filter_shape;
const T* filter_data;
const RuntimeShape& bias_shape;
const TS* bias_data;
const RuntimeShape& output_shape;
float* output_data;
const float* per_channel_scales;
const int32_t* input_offsets;
int thread_start;
int thread_end;
int thread_dim;
};
inline void DepthwiseConvHybridPerChannel(
const DepthwiseParams& params, const float* input_scales,
const RuntimeShape& input_shape, const int8* input_data,
const RuntimeShape& filter_shape, const int8* filter_data,
const RuntimeShape& bias_shape, const float* bias_data,
const RuntimeShape& output_shape, float* output_data,
const float* per_channel_scales, int32_t* input_offsets,
CpuBackendContext* cpu_backend_context) {
gemmlowp::ScopedProfilingLabel label("DepthwiseConvHybridInt8");
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int output_batches = output_shape.Dims(0);
const int output_rows = output_shape.Dims(1);
int thread_count_batch = HowManyConvThreads(output_shape, filter_shape, 0);
int thread_count_row = HowManyConvThreads(output_shape, filter_shape, 1);
int thread_dim, thread_count, thread_dim_size;
if (thread_count_batch > thread_count_row) {
thread_dim = 0;
thread_dim_size = output_batches;
thread_count = thread_count_batch;
} else {
thread_dim = 1;
thread_dim_size = output_rows;
thread_count = thread_count_row;
}
const int max_threads = cpu_backend_context->max_num_threads();
thread_count = std::max(1, std::min(thread_count, max_threads));
if (thread_count == 1) {
DepthwiseConvHybridImpl(params, input_scales, input_shape,
input_data, filter_shape, filter_data, bias_shape,
bias_data, output_shape, output_data,
per_channel_scales, input_offsets,
0, output_rows,
1);
} else {
std::vector<DepthwiseConvHybridWorkerTask<int8, float>> tasks;
tasks.reserve(thread_count);
int thread_start = 0;
for (int i = 0; i < thread_count; ++i) {
int thread_end =
thread_start + (thread_dim_size - thread_start) / (thread_count - i);
tasks.emplace_back(params, input_scales, input_shape,
input_data, filter_shape, filter_data, bias_shape,
bias_data, output_shape, output_data,
per_channel_scales, input_offsets, thread_start,
thread_end, thread_dim);
thread_start = thread_end;
}
cpu_backend_threadpool::Execute(tasks.size(), tasks.data(),
cpu_backend_context);
}
}
}
}
#endif | #include <stddef.h>
#include <cstdint>
#include <initializer_list>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/internal/test_util.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
namespace tflite {
namespace ops {
namespace builtin {
TfLiteRegistration* Register_DEPTHWISE_CONVOLUTION_REF();
TfLiteRegistration* Register_DEPTHWISE_CONVOLUTION_GENERIC_OPT();
TfLiteRegistration* Register_DEPTHWISE_CONVOLUTION_NEON_OPT();
}
}
namespace {
using ::testing::ElementsAreArray;
class BaseDepthwiseConvolutionOpModel : public SingleOpModel {
public:
BaseDepthwiseConvolutionOpModel(
TfLiteRegistration* registration, const TensorData& input,
const TensorData& filter, const TensorData& output, Padding padding_type,
int dilation_factor = 1, int stride_width = 1, int stride_height = 1,
ActivationFunctionType fused_activation_function =
ActivationFunctionType_NONE) {
input_ = AddInput(input);
filter_ = AddInput(filter);
int bias_size = GetShape(filter_)[3];
if (input.type == TensorType_FLOAT32) {
bias_ = AddInput({TensorType_FLOAT32, {bias_size}});
} else {
if (filter.per_channel_quantization) {
std::vector<float> bias_scale(
filter.per_channel_quantization_scales.size());
std::vector<int64_t> bias_zero_points(
filter.per_channel_quantization_scales.size());
for (size_t i = 0; i < filter.per_channel_quantization_scales.size();
++i) {
bias_scale[i] =
input.scale * filter.per_channel_quantization_scales[i];
bias_zero_points[i] = 0;
}
TensorData bias{TensorType_INT32,
{bias_size},
0,
0,
0,
0,
true,
bias_scale,
bias_zero_points,
0};
bias_ = AddInput(bias);
} else {
auto bias_scale = GetScale(input_) * GetScale(filter_);
TensorData bias{TensorType_INT32, {bias_size}, 0, 0, bias_scale};
bias_ = AddInput(bias);
}
}
output_ = AddOutput(output);
int input_depth = GetShape(input_)[3];
int output_depth = GetShape(filter_)[3];
int depth_mul = output_depth / input_depth;
SetBuiltinOp(
BuiltinOperator_DEPTHWISE_CONV_2D,
BuiltinOptions_DepthwiseConv2DOptions,
CreateDepthwiseConv2DOptions(
builder_, padding_type, stride_width, stride_height, depth_mul,
fused_activation_function, dilation_factor, dilation_factor)
.Union());
resolver_ = std::make_unique<SingleOpResolver>(
BuiltinOperator_DEPTHWISE_CONV_2D, registration);
BuildInterpreter({GetShape(input_), GetShape(filter_), GetShape(bias_)});
}
protected:
int input_;
int filter_;
int bias_;
int output_;
};
class PerChannelHybridDepthwiseConvolutionOpModel
: public BaseDepthwiseConvolutionOpModel {
public:
using BaseDepthwiseConvolutionOpModel::BaseDepthwiseConvolutionOpModel;
void SetInput(std::initializer_list<float> data) {
PopulateTensor(input_, data);
}
void SetFilter(std::initializer_list<float> data) {
PerChannelSymmetricQuantizeAndPopulate(filter_, data);
}
void SetBias(std::initializer_list<float> data) {
PopulateTensor(bias_, data);
}
void SetInput(const std::vector<float>& data) {
PopulateTensor(input_, data);
}
void SetFilter(const std::vector<float>& data) {
PerChannelSymmetricQuantizeAndPopulate(filter_, data);
}
void SetBias(const std::vector<float>& data) { PopulateTensor(bias_, data); }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
};
const auto kKernelMap = new std::map<string, TfLiteRegistration*>({
{"Reference", ops::builtin::Register_DEPTHWISE_CONVOLUTION_REF()},
{"GenericOptimized",
ops::builtin::Register_DEPTHWISE_CONVOLUTION_GENERIC_OPT()},
{"NeonOptimized", ops::builtin::Register_DEPTHWISE_CONVOLUTION_NEON_OPT()},
});
class PerChannelHybridDepthwiseConvolutionOptimizedOpTest
: public SingleOpTest {
protected:
const std::map<string, TfLiteRegistration*>& GetKernelMap() override {
return *kKernelMap;
}
};
class PerChannelHybridDepthwiseConvolutionOpTest : public SingleOpTest {
protected:
const std::map<string, TfLiteRegistration*>& GetKernelMap() override {
return *kKernelMap;
}
};
void RandomTest(int b, int h, int w, int c, int fs, bool padding, int sw) {
const float element_max = 1.0;
const int input_size = b * h * w * c;
const int filter_size = 1 * fs * fs * c;
const int bias_size = c;
std::vector<float> input_data(input_size);
std::vector<float> filter_data(filter_size);
std::vector<float> bias_data(bias_size);
for (int i = 0; i < input_size; ++i) {
input_data[i] = UniformRandomFloat(-element_max, element_max);
}
for (int i = 0; i < filter_size; ++i) {
filter_data[i] = UniformRandomFloat(-element_max, element_max);
}
for (int i = 0; i < bias_size; ++i) {
bias_data[i] = UniformRandomFloat(-element_max, element_max);
}
const TensorData input({TensorType_FLOAT32, {b, h, w, c}});
const TensorData output({TensorType_FLOAT32, {}});
std::vector<float> scales;
std::vector<int64_t> offsets;
for (int i = 0; i < c; i++) {
scales.push_back(1.0 / 127.0);
offsets.push_back(0.0);
}
const TensorData filter({TensorType_INT8,
{1, fs, fs, c},
0,
0,
0,
0,
true,
scales,
offsets,
3});
PerChannelHybridDepthwiseConvolutionOpModel hybrid_generic(
ops::builtin::Register_DEPTHWISE_CONVOLUTION_REF(), input, filter, output,
padding ? Padding_SAME : Padding_VALID,
1,
sw,
sw);
hybrid_generic.SetInput(input_data);
hybrid_generic.SetFilter(filter_data);
hybrid_generic.SetBias(bias_data);
ASSERT_EQ(hybrid_generic.Invoke(), kTfLiteOk);
std::vector<float> hybrid_generic_output = hybrid_generic.GetOutput();
PerChannelHybridDepthwiseConvolutionOpModel hybrid_optimized(
ops::builtin::Register_DEPTHWISE_CONVOLUTION_NEON_OPT(), input, filter,
output, padding ? Padding_SAME : Padding_VALID,
1,
sw,
sw);
hybrid_optimized.SetInput(input_data);
hybrid_optimized.SetFilter(filter_data);
hybrid_optimized.SetBias(bias_data);
ASSERT_EQ(hybrid_optimized.Invoke(), kTfLiteOk);
std::vector<float> hybrid_optimized_output = hybrid_optimized.GetOutput();
EXPECT_THAT(hybrid_generic_output,
ElementsAreArray(ArrayFloatNear(hybrid_optimized_output)));
}
void RandomTest(int b, int w, int h, int c, int fs) {
RandomTest(b, w, h, c, fs, false, 1);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest, AccuracyTest32) {
RandomTest(1, 10, 10, 8, 3);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest, AccuracyTest64) {
RandomTest(1, 112, 112, 64, 3);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest, AccuracyTest128) {
RandomTest(1, 56, 56, 128, 3);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest, AccuracyTest256) {
RandomTest(1, 28, 28, 256, 3);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest, AccuracyTest512) {
RandomTest(1, 14, 14, 512, 3);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest, AccuracyTest1024) {
RandomTest(1, 3, 3, 1024, 3);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest,
AccuracyPaddingTest32) {
RandomTest(1, 112, 112, 32, 3, true, 1);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest,
AccuracyPaddingTest64) {
RandomTest(1, 112, 112, 64, 3, true, 1);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest,
AccuracyPaddingTest128) {
RandomTest(1, 56, 56, 128, 3, true, 1);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest,
AccuracyPaddingTest256) {
RandomTest(1, 28, 28, 256, 3, true, 1);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest,
AccuracyPaddingTest512) {
RandomTest(1, 14, 14, 512, 3, true, 1);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest,
AccuracyPaddingTest1024) {
RandomTest(1, 3, 3, 1024, 3, true, 1);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest,
AccuracyPaddiacc_buffer_sizengTest4096) {
RandomTest(1, 3, 3, 4096, 3, true, 1);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest,
Accuracy2x2StrideTest32) {
RandomTest(1, 112, 112, 32, 3, false, 2);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest,
Accuracy2x2StrideTest64) {
RandomTest(1, 112, 112, 64, 3, false, 2);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest,
Accuracy2x2StrideTest128) {
RandomTest(1, 56, 56, 128, 3, false, 2);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest,
Accuracy2x2StrideTest256) {
RandomTest(1, 28, 28, 256, 3, false, 2);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest,
Accuracy2x2StrideTest512) {
RandomTest(1, 14, 14, 512, 3, false, 2);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest,
Accuracy2x2StrideTest1024) {
RandomTest(1, 3, 3, 1024, 3, false, 1);
}
TEST_P(PerChannelHybridDepthwiseConvolutionOpTest, SimpleTest) {
PerChannelHybridDepthwiseConvolutionOpModel m(
GetRegistration(), {TensorType_FLOAT32, {1, 2, 3, 2}},
{TensorType_INT8,
{1, 2, 2, 4},
0,
0,
0,
0,
true,
{1, 2, 3, 4},
{0, 0, 0, 0},
3},
{TensorType_FLOAT32, {}}, Padding_VALID);
m.SetInput({
3, 2,
1, -1,
-2, -3,
4, 3,
2, -2,
-3, -4,
});
m.SetFilter(
{
1, 2, 3, 4,
3, 4, 5, 6,
7, 8, 5, 6,
3, 4, 1, 2,
});
m.SetBias({3, -2, 4, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{42.9373, 47.9451, 22.0706, 22.0627, 3, -4.00784, -29.1294, -54.1098},
0.16)));
}
TEST_P(PerChannelHybridDepthwiseConvolutionOpTest, Simple3x3FilterTest) {
PerChannelHybridDepthwiseConvolutionOpModel m(
GetRegistration(), {TensorType_FLOAT32, {1, 3, 3, 8}},
{TensorType_INT8,
{1, 3, 3, 8},
0,
0,
0,
0,
true,
{1, 2, 3, 4, 4, 3, 2, 1},
{0, 0, 0, 0, 0, 0, 0, 0},
3},
{TensorType_FLOAT32, {}}, Padding_VALID);
m.SetInput({
1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1,
0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0,
1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1,
0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0});
m.SetFilter(
{
1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8});
m.SetBias({0, 0, 0, 0, 0, 0, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{9, 18, 0, 0, 36, 54, 0, 0}, 0.16)));
}
TEST_P(PerChannelHybridDepthwiseConvolutionOpTest,
Simple3x3FilterPaddingSameTest) {
PerChannelHybridDepthwiseConvolutionOpModel m(
GetRegistration(), {TensorType_FLOAT32, {1, 3, 3, 8}},
{TensorType_INT8,
{1, 3, 3, 8},
0,
0,
0,
0,
true,
{1, 2, 3, 4, 4, 3, 2, 1},
{0, 0, 0, 0, 0, 0, 0, 0},
3},
{TensorType_FLOAT32, {}}, Padding_SAME);
m.SetInput({
1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1,
0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0,
1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1,
0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0});
m.SetFilter(
{
1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8});
m.SetBias({0, 0, 0, 0, 0, 0, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{
4, 8, 0, 0, 16, 24, 0, 0, 6, 12, 0, 0, 24, 36, 0,
0, 4, 8, 0, 0, 16, 24, 0, 0, 6, 12, 0, 0, 24, 36,
0, 0, 9, 18, 0, 0, 36, 54, 0, 0, 6, 12, 0, 0, 24,
36, 0, 0, 4, 8, 0, 0, 16, 24, 0, 0, 6, 12, 0, 0,
24, 36, 0, 0, 4, 8, 0, 0, 16, 24, 0, 0,
},
0.16)));
}
INSTANTIATE_TEST_SUITE_P(
PerChannelHybridDepthwiseConvolutionOpTest,
PerChannelHybridDepthwiseConvolutionOpTest,
::testing::ValuesIn(SingleOpTest::GetKernelTags(*kKernelMap)));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/optimized/integer_ops/depthwise_conv_hybrid.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/depthwise_conv_hybrid_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cb11a735-6e22-4839-a3bd-054b7b308c9b | cpp | tensorflow/tensorflow | hard_swish | tensorflow/lite/kernels/internal/reference/hard_swish.h | tensorflow/lite/delegates/xnnpack/hard_swish_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_HARD_SWISH_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_HARD_SWISH_H_
#include <algorithm>
#include "ruy/profiler/instrumentation.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
inline int16_t SaturatingLeftShift(int16_t value, int amount) {
int64_t result = static_cast<int64_t>(value) * (1 << amount);
result = std::min<int64_t>(result, std::numeric_limits<int16_t>::max());
result = std::max<int64_t>(result, std::numeric_limits<int16_t>::min());
return result;
}
inline std::int16_t SaturatingDoublingHighMul(std::int16_t a, std::int16_t b) {
bool overflow = a == b && a == std::numeric_limits<std::int16_t>::min();
std::int32_t a_32(a);
std::int32_t b_32(b);
std::int32_t ab_32 = a_32 * b_32;
std::int16_t ab_x2_high16 = static_cast<std::int16_t>((ab_32) / (1 << 15));
return overflow ? std::numeric_limits<std::int16_t>::max() : ab_x2_high16;
}
template <typename T>
inline void HardSwish(const RuntimeShape& input_shape, const T* input_data,
const RuntimeShape& output_shape, T* output_data) {
ruy::profiler::ScopeLabel label("ReferenceHardSwish/Float");
auto matching_size = MatchingFlatSize(input_shape, output_shape);
const T* in_end = input_data + matching_size;
for (; input_data < in_end; input_data++, output_data++) {
const float in = *input_data;
*output_data =
in * std::min(static_cast<T>(6), std::max(static_cast<T>(0), in + 3)) /
6;
}
}
template <typename T>
inline void HardSwish(const HardSwishParams& params,
const RuntimeShape& input_shape, const T* input_data,
const RuntimeShape& output_shape, T* output_data) {
ruy::profiler::ScopeLabel label("ReferenceHardSwish/Quantized");
const int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; i++) {
const int16_t input_value = input_data[i] - params.input_zero_point;
const int16_t input_value_on_hires_input_scale = input_value * (1 << 7);
const int16_t input_value_on_preshift_output_scale =
gemmlowp::SaturatingRoundingDoublingHighMul(
input_value_on_hires_input_scale,
params.output_multiplier_fixedpoint_int16);
int16_t reluish_value = input_value_on_hires_input_scale;
if (params.reluish_multiplier_exponent > 0) {
reluish_value = SaturatingLeftShift(
reluish_value, params.reluish_multiplier_exponent - 1);
}
reluish_value = gemmlowp::SaturatingRoundingDoublingHighMul(
reluish_value, params.reluish_multiplier_fixedpoint_int16);
if (params.reluish_multiplier_exponent > 0) {
reluish_value = SaturatingLeftShift(reluish_value, 1);
}
if (params.reluish_multiplier_exponent < 0) {
reluish_value = gemmlowp::RoundingDivideByPOT(
reluish_value, -params.reluish_multiplier_exponent);
}
reluish_value = (reluish_value + (1 << 15)) >> 1;
const int16_t preshift_output_value = SaturatingDoublingHighMul(
reluish_value, input_value_on_preshift_output_scale);
int16_t output_value = gemmlowp::RoundingDivideByPOT(
preshift_output_value, -params.output_multiplier_exponent);
output_value += params.output_zero_point;
output_value =
std::min<int16_t>(output_value, std::numeric_limits<T>::max());
output_value =
std::max<int16_t>(output_value, std::numeric_limits<T>::min());
output_data[i] = output_value;
}
}
}
}
#endif | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/unary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
TEST(HardSwish, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_HARD_SWISH, xnnpack_delegate.get());
}
TEST(HardSwish, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, width, channels})
.Test(BuiltinOperator_HARD_SWISH, xnnpack_delegate.get());
}
TEST(HardSwish, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, channels})
.Test(BuiltinOperator_HARD_SWISH, xnnpack_delegate.get());
}
TEST(HardSwish, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
UnaryElementwiseTester().Shape({batch}).Test(BuiltinOperator_HARD_SWISH,
xnnpack_delegate.get());
}
TEST(HardSwish, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_HARD_SWISH, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/reference/hard_swish.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/hard_swish_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
41037164-37cc-4768-8671-a4dc89ea1e73 | cpp | tensorflow/tensorflow | elu | tensorflow/lite/kernels/internal/reference/elu.h | tensorflow/lite/delegates/xnnpack/elu_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ELU_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ELU_H_
#include "tensorflow/lite/kernels/internal/cppmath.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
inline void Elu(const RuntimeShape& input_shape, const float* input_data,
const RuntimeShape& output_shape, float* output_data) {
const int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; ++i) {
const float val = input_data[i];
output_data[i] = val < 0.0f ? TfLiteExpm1(val) : val;
}
}
}
}
#endif | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/unary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
TEST(Elu, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_ELU, xnnpack_delegate.get());
}
TEST(Elu, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, width, channels})
.Test(BuiltinOperator_ELU, xnnpack_delegate.get());
}
TEST(Elu, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, channels})
.Test(BuiltinOperator_ELU, xnnpack_delegate.get());
}
TEST(Elu, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
UnaryElementwiseTester().Shape({batch}).Test(BuiltinOperator_ELU,
xnnpack_delegate.get());
}
TEST(Elu, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_ELU, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/reference/elu.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/elu_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ef42979d-a408-4ce0-8289-0539d58d3676 | cpp | tensorflow/tensorflow | tflite_op_wrapper | tensorflow/lite/kernels/shim/tflite_op_wrapper.h | tensorflow/lite/kernels/shim/tflite_op_wrapper_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_SHIM_TFLITE_OP_WRAPPER_H_
#define TENSORFLOW_LITE_KERNELS_SHIM_TFLITE_OP_WRAPPER_H_
#include <cstdint>
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <variant>
#include <vector>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/shim/op_kernel.h"
#include "tensorflow/lite/kernels/shim/status_macros.h"
#include "tensorflow/lite/portable_type_to_tflitetype.h"
namespace tflite {
namespace shim {
namespace op_wrapper {
using ::tflite::shim::OpKernelShim;
using ::tflite::shim::Runtime;
template <typename N, typename... T>
struct Attr {
const char* Name() const { return N::Name(); }
};
template <char const* str>
struct AttrName {
static const char* Name() { return str; }
};
template <typename T>
struct AttrType {
using type = T;
};
template <typename T, typename... Us>
static constexpr std::tuple<T, Us...> prependTypeInner(T, std::tuple<Us...>);
template <typename T, typename... Us>
static constexpr auto prependType(T, std::tuple<Us...>)
-> std::tuple<decltype(prependTypeInner(std::declval<T>(),
std::declval<Us>()))...>;
template <typename Name, typename... Ts>
static constexpr std::tuple<std::tuple<Ts>...> getCombinations(
Attr<Name, Ts...>);
template <typename Name, typename Head, typename... Attrs>
static constexpr auto getCombinations(Attr<Name, Head>, Attrs...)
-> decltype(prependType(std::declval<Head>(),
getCombinations(std::declval<Attrs>()...)));
template <typename Name, typename Head, typename... Tail, typename... Attrs>
static constexpr auto getCombinations(Attr<Name, Head, Tail...>, Attrs...)
-> decltype(std::tuple_cat(
prependType(std::declval<Head>(),
getCombinations(std::declval<Attrs>()...)),
getCombinations(std::declval<Attr<Name, Tail...>>(),
std::declval<Attrs>()...)));
template <Runtime Rt, template <Runtime, typename...> typename Op,
typename... Ts>
static constexpr Op<Rt, Ts...> convertTuplesToOpsInner(std::tuple<Ts...>);
template <Runtime Rt, template <Runtime, typename...> typename Op,
typename... Ts>
static constexpr auto convertTuplesToOps(std::tuple<Ts...>) -> std::tuple<
decltype(convertTuplesToOpsInner<Rt, Op>(std::declval<Ts>()))...>;
template <typename... Ts>
static constexpr std::variant<Ts...> convertTupleToVariant(std::tuple<Ts...>);
template <Runtime Rt, template <Runtime, typename...> typename Op,
typename FirstAttr, typename... OtherAttrs>
struct VariantOp {
using type =
decltype(convertTupleToVariant(convertTuplesToOps<Rt, Op>(getCombinations(
std::declval<FirstAttr>(), std::declval<OtherAttrs>()...))));
};
template <Runtime Rt>
class OpWrapperExtension : public OpKernelShim<OpWrapperExtension, Rt> {};
template <Runtime Rt, template <Runtime, typename...> typename Op,
typename... As>
class OpWrapper : public OpWrapperExtension<Rt> {
public:
using TmplOpType = typename VariantOp<Rt, Op, As...>::type;
using TmplOpType0 = typename std::variant_alternative<0, TmplOpType>::type;
using typename OpKernelShim<OpWrapperExtension, Rt>::InitContext;
using typename OpKernelShim<OpWrapperExtension, Rt>::InvokeContext;
using typename OpKernelShim<OpWrapperExtension, Rt>::ShapeInferenceContext;
OpWrapper() = default;
static const char* OpName() { return TmplOpType0::OpName(); }
static const char* Doc() { return TmplOpType0::Doc(); }
static std::vector<std::string> Attrs() { return TmplOpType0::Attrs(); }
static std::vector<std::string> Inputs() { return TmplOpType0::Inputs(); }
static std::vector<std::string> Outputs() { return TmplOpType0::Outputs(); }
static absl::Status ShapeInference(ShapeInferenceContext* context) {
return TmplOpType0::ShapeInference(context);
}
absl::Status Init(InitContext* context) {
SH_RETURN_IF_ERROR(SetVariantOp<As...>(context));
return std::visit(
[context](auto&& op) -> absl::Status { return op.Init(context); },
*op_);
}
absl::Status Invoke(InvokeContext* context) {
return std::visit(
[context](auto&& op) -> absl::Status { return op.Invoke(context); },
*op_);
}
private:
template <typename FirstAttr, typename... Attrs>
absl::Status SetVariantOp(InitContext* c) {
return CombineAttributeTypes(this, c, FirstAttr{}, Attrs{}...);
}
template <typename F, typename Name, typename T>
struct Forwarder {
public:
explicit Forwarder(F* f) : inner(f) {}
template <typename... Args>
absl::Status SetOpCombination(Args... args) {
return inner->SetOpCombination(Name::Name(), AttrType<T>{}, args...);
}
private:
F* inner;
};
template <typename F, typename Name, typename Head, typename... Tail,
typename... Attrs>
absl::Status CombineAttributeTypes(F* obj, InitContext* c,
Attr<Name, Head, Tail...>, Attrs... rest) {
SH_RETURN_IF_ERROR(
ApplyAttrType(obj, c, Name{}, AttrType<Head>{}, rest...));
return CombineAttributeTypes(obj, c, Attr<Name, Tail...>{}, rest...);
}
template <typename F, typename Name, typename... Attrs>
absl::Status CombineAttributeTypes(F*, InitContext*, Attr<Name>, Attrs...) {
return absl::OkStatus();
}
template <typename F, typename Name, typename T, typename Attr,
typename... Attrs>
absl::Status ApplyAttrType(F* obj, InitContext* c, Name, AttrType<T>, Attr a,
Attrs... rest) {
Forwarder<F, Name, T> forwarder(obj);
return CombineAttributeTypes(&forwarder, c, a, rest...);
}
template <typename F, typename Name, typename T>
absl::Status ApplyAttrType(F* obj, InitContext* c, Name, AttrType<T> t) {
return obj->SetOpCombination(Name::Name(), t, c);
}
template <typename T>
absl::Status SetOpCombination(std::string Name1, AttrType<T>,
InitContext* context) {
int64_t datatype_1;
SH_RETURN_IF_ERROR(context->GetAttr(Name1, &datatype_1));
if (datatype_1 == typeToTfLiteType<T>()) {
this->op_ = std::make_unique<TmplOpType>(Op<Rt, T>());
}
return absl::OkStatus();
}
template <typename T, typename U>
absl::Status SetOpCombination(std::string Name1, AttrType<T>,
std::string Name2, AttrType<U>,
InitContext* context) {
int64_t datatype_1, datatype_2;
SH_RETURN_IF_ERROR(context->GetAttr(Name1, &datatype_1));
SH_RETURN_IF_ERROR(context->GetAttr(Name2, &datatype_2));
if (datatype_1 == typeToTfLiteType<T>() &&
datatype_2 == typeToTfLiteType<U>()) {
this->op_ = std::make_unique<TmplOpType>(Op<Rt, T, U>());
}
return absl::OkStatus();
}
template <typename T, typename U, typename V>
absl::Status SetOpCombination(std::string Name1, AttrType<T>,
std::string Name2, AttrType<U>,
std::string Name3, AttrType<V>,
InitContext* context) {
int64_t datatype_1, datatype_2, datatype_3;
SH_RETURN_IF_ERROR(context->GetAttr(Name1, &datatype_1));
SH_RETURN_IF_ERROR(context->GetAttr(Name2, &datatype_2));
SH_RETURN_IF_ERROR(context->GetAttr(Name3, &datatype_3));
if (datatype_1 == typeToTfLiteType<T>() &&
datatype_2 == typeToTfLiteType<U>() &&
datatype_3 == typeToTfLiteType<V>()) {
this->op_ = std::make_unique<TmplOpType>(Op<Rt, T, U, V>());
}
return absl::OkStatus();
}
template <typename T, typename U, typename V, typename W>
absl::Status SetOpCombination(std::string Name1, AttrType<T>,
std::string Name2, AttrType<U>,
std::string Name3, AttrType<V>,
std::string Name4, AttrType<W>,
InitContext* context) {
int64_t datatype_1, datatype_2, datatype_3, datatype_4;
SH_RETURN_IF_ERROR(context->GetAttr(Name1, &datatype_1));
SH_RETURN_IF_ERROR(context->GetAttr(Name2, &datatype_2));
SH_RETURN_IF_ERROR(context->GetAttr(Name3, &datatype_3));
SH_RETURN_IF_ERROR(context->GetAttr(Name4, &datatype_4));
if (datatype_1 == typeToTfLiteType<T>() &&
datatype_2 == typeToTfLiteType<U>() &&
datatype_3 == typeToTfLiteType<V>() &&
datatype_4 == typeToTfLiteType<W>()) {
this->op_ = std::make_unique<TmplOpType>(Op<Rt, T, U, V, W>());
}
return absl::OkStatus();
}
template <typename T, typename U, typename V, typename W, typename X>
absl::Status SetOpCombination(std::string Name1, AttrType<T>,
std::string Name2, AttrType<U>,
std::string Name3, AttrType<V>,
std::string Name4, AttrType<W>,
std::string Name5, AttrType<X>,
InitContext* context) {
int64_t datatype_1, datatype_2, datatype_3, datatype_4, datatype_5;
SH_RETURN_IF_ERROR(context->GetAttr(Name1, &datatype_1));
SH_RETURN_IF_ERROR(context->GetAttr(Name2, &datatype_2));
SH_RETURN_IF_ERROR(context->GetAttr(Name3, &datatype_3));
SH_RETURN_IF_ERROR(context->GetAttr(Name4, &datatype_4));
SH_RETURN_IF_ERROR(context->GetAttr(Name5, &datatype_5));
if (datatype_1 == typeToTfLiteType<T>() &&
datatype_2 == typeToTfLiteType<U>() &&
datatype_3 == typeToTfLiteType<V>() &&
datatype_4 == typeToTfLiteType<W>() &&
datatype_5 == typeToTfLiteType<X>()) {
this->op_ = std::make_unique<TmplOpType>(Op<Rt, T, U, V, W, X>());
}
return absl::OkStatus();
}
protected:
std::unique_ptr<TmplOpType> op_;
};
}
}
}
#endif | #include "tensorflow/lite/kernels/shim/tflite_op_wrapper.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/lite/kernels/shim/op_kernel.h"
#include "tensorflow/lite/kernels/shim/tflite_op_shim.h"
namespace tflite {
namespace shim {
namespace op_wrapper {
namespace {
#ifndef EXPECT_OK
#define EXPECT_OK(x) EXPECT_TRUE(x.ok());
#endif
class VariantOpTest : public ::testing::Test {
public:
template <shim::Runtime Rt, typename... Ts>
class TmplOp {};
template <typename T, typename VARIANT_T>
struct isVariantMember;
template <typename T, typename... ALL_T>
struct isVariantMember<T, std::variant<ALL_T...>>
: public std::disjunction<std::is_same<T, ALL_T>...> {};
static constexpr char kAttrName[] = "AttrName";
};
TEST_F(VariantOpTest, TestVariantOpCreation_1) {
using VOp = VariantOp<Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName>, int64_t>>::type;
EXPECT_EQ(std::variant_size_v<VOp>, 1);
bool b;
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t>, VOp>::value;
EXPECT_TRUE(b);
}
TEST_F(VariantOpTest, TestVariantOpCreation_2) {
using VOp = VariantOp<Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName>, int64_t, bool>>::type;
EXPECT_EQ(std::variant_size_v<VOp>, 2);
bool b;
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t>, VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, bool>, VOp>::value;
EXPECT_TRUE(b);
}
TEST_F(VariantOpTest, TestVariantOpCreation_1x1) {
using VOp =
VariantOp<Runtime::kTfLite, TmplOp, Attr<AttrName<kAttrName>, int64_t>,
Attr<AttrName<kAttrName>, bool>>::type;
EXPECT_EQ(std::variant_size_v<VOp>, 1);
bool b;
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, bool>, VOp>::value;
EXPECT_TRUE(b);
}
TEST_F(VariantOpTest, TestVariantOpCreation_1x1x1) {
using VOp =
VariantOp<Runtime::kTfLite, TmplOp, Attr<AttrName<kAttrName>, int64_t>,
Attr<AttrName<kAttrName>, bool>,
Attr<AttrName<kAttrName>, bool>>::type;
EXPECT_EQ(std::variant_size_v<VOp>, 1);
bool b;
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, bool, bool>,
VOp>::value;
EXPECT_TRUE(b);
}
TEST_F(VariantOpTest, TestVariantOpCreation_2x1) {
using VOp = VariantOp<Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName>, int64_t, float>,
Attr<AttrName<kAttrName>, bool>>::type;
EXPECT_EQ(std::variant_size_v<VOp>, 2);
bool b;
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, bool>, VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, float, bool>, VOp>::value;
EXPECT_TRUE(b);
}
TEST_F(VariantOpTest, TestVariantOpCreation_1x2) {
using VOp =
VariantOp<Runtime::kTfLite, TmplOp, Attr<AttrName<kAttrName>, int64_t>,
Attr<AttrName<kAttrName>, bool, float>>::type;
EXPECT_EQ(std::variant_size_v<VOp>, 2);
bool b;
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, bool>, VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, float>, VOp>::value;
EXPECT_TRUE(b);
}
TEST_F(VariantOpTest, TestVariantOpCreation_2x2) {
using VOp = VariantOp<Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName>, int64_t, int32_t>,
Attr<AttrName<kAttrName>, bool, float>>::type;
EXPECT_EQ(std::variant_size_v<VOp>, 4);
bool b;
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, bool>, VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, float>, VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int32_t, bool>, VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int32_t, float>, VOp>::value;
EXPECT_TRUE(b);
}
TEST_F(VariantOpTest, TestVariantOpCreation_3x3) {
using VOp = VariantOp<Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName>, int64_t, int32_t, int8_t>,
Attr<AttrName<kAttrName>, bool, float, char>>::type;
EXPECT_EQ(std::variant_size_v<VOp>, 9);
bool b;
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, bool>, VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, float>, VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, char>, VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int32_t, bool>, VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int32_t, float>, VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int32_t, char>, VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int8_t, bool>, VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int8_t, float>, VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int8_t, char>, VOp>::value;
EXPECT_TRUE(b);
}
TEST_F(VariantOpTest, TestVariantOpCreation_2x2x2) {
using VOp = VariantOp<Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName>, int64_t, int32_t>,
Attr<AttrName<kAttrName>, bool, float>,
Attr<AttrName<kAttrName>, char, int8_t>>::type;
EXPECT_EQ(std::variant_size_v<VOp>, 8);
bool b;
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, bool, char>,
VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, bool, int8_t>,
VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, float, char>,
VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, float, int8_t>,
VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int32_t, bool, char>,
VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int32_t, bool, int8_t>,
VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int32_t, float, char>,
VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int32_t, float, int8_t>,
VOp>::value;
EXPECT_TRUE(b);
}
TEST_F(VariantOpTest, TestVariantOpCreation_2x1x3x1) {
using VOp = VariantOp<Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName>, int64_t, int32_t>,
Attr<AttrName<kAttrName>, bool>,
Attr<AttrName<kAttrName>, char, int8_t, float>,
Attr<AttrName<kAttrName>, uint16_t>>::type;
EXPECT_EQ(std::variant_size_v<VOp>, 6);
bool b;
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, bool, char, uint16_t>,
VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, bool, int8_t, uint16_t>,
VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, bool, float, uint16_t>,
VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int32_t, bool, char, uint16_t>,
VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int32_t, bool, int8_t, uint16_t>,
VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int32_t, bool, float, uint16_t>,
VOp>::value;
EXPECT_TRUE(b);
}
TEST_F(VariantOpTest, TestVariantOpCreation_4x4x6) {
using VOp =
VariantOp<Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName>, int64_t, int32_t, int16_t, int8_t>,
Attr<AttrName<kAttrName>, int64_t, int32_t, int16_t, int8_t>,
Attr<AttrName<kAttrName>, int64_t, int32_t, int16_t, int8_t,
bool, float>>::type;
EXPECT_EQ(std::variant_size_v<VOp>, 96);
}
class SetVariantOpTest : public ::testing::Test {
public:
template <Runtime Rt, template <Runtime, typename...> typename Op,
typename... As>
class OpWrapperFriend : public OpWrapper<Rt, Op, As...> {
public:
using TmplOpType = typename VariantOp<Rt, Op, As...>::type;
TmplOpType* GetOp() { return this->op_.get(); }
};
template <Runtime Rt, typename... Ts>
class TmplOp : public OpKernelShim<TmplOp, Rt, Ts...> {
public:
using typename OpKernelShim<TmplOp, Rt, Ts...>::InitContext;
absl::Status Init(InitContext* ctx) { return absl::OkStatus(); }
};
class FakeInitContext : public TfLiteInitContext {
public:
explicit FakeInitContext(const flexbuffers::Map* m)
: TfLiteInitContext(nullptr, m) {}
};
template <typename T>
flexbuffers::Map CreateAttrMap() {
fbb_ = std::make_unique<flexbuffers::Builder>();
fbb_->Map([&]() {
fbb_->Int(kAttrName1, static_cast<int>(typeToTfLiteType<T>()));
});
fbb_->Finish();
return flexbuffers::GetRoot(fbb_->GetBuffer()).AsMap();
}
template <typename T, typename U>
flexbuffers::Map CreateAttrMap() {
fbb_ = std::make_unique<flexbuffers::Builder>();
fbb_->Map([&]() {
fbb_->Int(kAttrName1, static_cast<int>(typeToTfLiteType<T>()));
fbb_->Int(kAttrName2, static_cast<int>(typeToTfLiteType<U>()));
});
fbb_->Finish();
return flexbuffers::GetRoot(fbb_->GetBuffer()).AsMap();
}
template <typename T, typename U, typename V>
flexbuffers::Map CreateAttrMap() {
fbb_ = std::make_unique<flexbuffers::Builder>();
fbb_->Map([&]() {
fbb_->Int(kAttrName1, static_cast<int>(typeToTfLiteType<T>()));
fbb_->Int(kAttrName2, static_cast<int>(typeToTfLiteType<U>()));
fbb_->Int(kAttrName3, static_cast<int>(typeToTfLiteType<V>()));
});
fbb_->Finish();
return flexbuffers::GetRoot(fbb_->GetBuffer()).AsMap();
}
static constexpr char kAttrName1[] = "AttrName1";
static constexpr char kAttrName2[] = "AttrName2";
static constexpr char kAttrName3[] = "AttrName3";
private:
std::unique_ptr<flexbuffers::Builder> fbb_;
};
TEST_F(SetVariantOpTest, TestSetVariantOp_1) {
auto op_wrapper = OpWrapperFriend<Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName1>, bool>>();
auto map = CreateAttrMap<bool>();
auto context = FakeInitContext(&map);
EXPECT_OK(op_wrapper.Init(&context));
bool b = std::holds_alternative<TmplOp<Runtime::kTfLite, bool>>(
*op_wrapper.GetOp());
EXPECT_TRUE(b);
}
TEST_F(SetVariantOpTest, TestSetVariantOp_1x1) {
auto op_wrapper = OpWrapperFriend<Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName1>, bool>,
Attr<AttrName<kAttrName2>, int32_t>>();
auto map = CreateAttrMap<bool, int32_t>();
auto context = FakeInitContext(&map);
EXPECT_OK(op_wrapper.Init(&context));
bool b = std::holds_alternative<TmplOp<Runtime::kTfLite, bool, int32_t>>(
*op_wrapper.GetOp());
EXPECT_TRUE(b);
}
TEST_F(SetVariantOpTest, TestSetVariantOp_1x1x1) {
auto op_wrapper = OpWrapperFriend<
Runtime::kTfLite, TmplOp, Attr<AttrName<kAttrName1>, bool>,
Attr<AttrName<kAttrName2>, int32_t>, Attr<AttrName<kAttrName3>, float>>();
auto map = CreateAttrMap<bool, int32_t, float>();
auto context = FakeInitContext(&map);
EXPECT_OK(op_wrapper.Init(&context));
bool b =
std::holds_alternative<TmplOp<Runtime::kTfLite, bool, int32_t, float>>(
*op_wrapper.GetOp());
EXPECT_TRUE(b);
}
TEST_F(SetVariantOpTest, TestSetVariantOp_2) {
auto op_wrapper =
OpWrapperFriend<Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName1>, bool, int32_t>>();
auto map = CreateAttrMap<bool>();
auto context = FakeInitContext(&map);
EXPECT_OK(op_wrapper.Init(&context));
bool b;
b = std::holds_alternative<TmplOp<Runtime::kTfLite, bool>>(
*op_wrapper.GetOp());
EXPECT_TRUE(b);
b = std::holds_alternative<TmplOp<Runtime::kTfLite, int32_t>>(
*op_wrapper.GetOp());
EXPECT_FALSE(b);
}
TEST_F(SetVariantOpTest, TestSetVariantOp_2x1) {
auto op_wrapper = OpWrapperFriend<Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName1>, bool, int32_t>,
Attr<AttrName<kAttrName2>, float>>();
auto map = CreateAttrMap<int32_t, float>();
auto context = FakeInitContext(&map);
EXPECT_OK(op_wrapper.Init(&context));
bool b;
b = std::holds_alternative<TmplOp<Runtime::kTfLite, int32_t, float>>(
*op_wrapper.GetOp());
EXPECT_TRUE(b);
b = std::holds_alternative<TmplOp<Runtime::kTfLite, bool, float>>(
*op_wrapper.GetOp());
EXPECT_FALSE(b);
}
TEST_F(SetVariantOpTest, TestSetVariantOp_1x2) {
auto op_wrapper =
OpWrapperFriend<Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName1>, bool>,
Attr<AttrName<kAttrName2>, float, int32_t>>();
auto map = CreateAttrMap<bool, float>();
auto context = FakeInitContext(&map);
EXPECT_OK(op_wrapper.Init(&context));
bool b;
b = std::holds_alternative<TmplOp<Runtime::kTfLite, bool, float>>(
*op_wrapper.GetOp());
EXPECT_TRUE(b);
b = std::holds_alternative<TmplOp<Runtime::kTfLite, bool, int32_t>>(
*op_wrapper.GetOp());
EXPECT_FALSE(b);
}
TEST_F(SetVariantOpTest, TestSetVariantOp_2x2) {
auto op_wrapper =
OpWrapperFriend<Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName1>, bool, int64_t>,
Attr<AttrName<kAttrName2>, float, int32_t>>();
auto map = CreateAttrMap<bool, float>();
auto context = FakeInitContext(&map);
EXPECT_OK(op_wrapper.Init(&context));
bool b;
b = std::holds_alternative<TmplOp<Runtime::kTfLite, bool, float>>(
*op_wrapper.GetOp());
EXPECT_TRUE(b);
b = std::holds_alternative<TmplOp<Runtime::kTfLite, bool, int32_t>>(
*op_wrapper.GetOp());
EXPECT_FALSE(b);
b = std::holds_alternative<TmplOp<Runtime::kTfLite, int64_t, float>>(
*op_wrapper.GetOp());
EXPECT_FALSE(b);
b = std::holds_alternative<TmplOp<Runtime::kTfLite, int64_t, int32_t>>(
*op_wrapper.GetOp());
EXPECT_FALSE(b);
map = CreateAttrMap<bool, int32_t>();
context = FakeInitContext(&map);
EXPECT_OK(op_wrapper.Init(&context));
b = std::holds_alternative<TmplOp<Runtime::kTfLite, bool, float>>(
*op_wrapper.GetOp());
EXPECT_FALSE(b);
b = std::holds_alternative<TmplOp<Runtime::kTfLite, bool, int32_t>>(
*op_wrapper.GetOp());
EXPECT_TRUE(b);
b = std::holds_alternative<TmplOp<Runtime::kTfLite, int64_t, float>>(
*op_wrapper.GetOp());
EXPECT_FALSE(b);
b = std::holds_alternative<TmplOp<Runtime::kTfLite, int64_t, int32_t>>(
*op_wrapper.GetOp());
EXPECT_FALSE(b);
map = CreateAttrMap<int64_t, float>();
context = FakeInitContext(&map);
EXPECT_OK(op_wrapper.Init(&context));
b = std::holds_alternative<TmplOp<Runtime::kTfLite, bool, float>>(
*op_wrapper.GetOp());
EXPECT_FALSE(b);
b = std::holds_alternative<TmplOp<Runtime::kTfLite, bool, int32_t>>(
*op_wrapper.GetOp());
EXPECT_FALSE(b);
b = std::holds_alternative<TmplOp<Runtime::kTfLite, int64_t, float>>(
*op_wrapper.GetOp());
EXPECT_TRUE(b);
b = std::holds_alternative<TmplOp<Runtime::kTfLite, int64_t, int32_t>>(
*op_wrapper.GetOp());
EXPECT_FALSE(b);
map = CreateAttrMap<int64_t, int32_t>();
context = FakeInitContext(&map);
EXPECT_OK(op_wrapper.Init(&context));
b = std::holds_alternative<TmplOp<Runtime::kTfLite, bool, float>>(
*op_wrapper.GetOp());
EXPECT_FALSE(b);
b = std::holds_alternative<TmplOp<Runtime::kTfLite, bool, int32_t>>(
*op_wrapper.GetOp());
EXPECT_FALSE(b);
b = std::holds_alternative<TmplOp<Runtime::kTfLite, int64_t, float>>(
*op_wrapper.GetOp());
EXPECT_FALSE(b);
b = std::holds_alternative<TmplOp<Runtime::kTfLite, int64_t, int32_t>>(
*op_wrapper.GetOp());
EXPECT_TRUE(b);
}
TEST_F(SetVariantOpTest, TestSetVariantOp_3x3) {
auto op_wrapper = OpWrapperFriend<
Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName1>, bool, int64_t, ::tensorflow::tstring>,
Attr<AttrName<kAttrName2>, float, int32_t, uint32_t>>();
auto map = CreateAttrMap<::tensorflow::tstring, int32_t>();
auto context = FakeInitContext(&map);
EXPECT_OK(op_wrapper.Init(&context));
bool b;
b = std::holds_alternative<
TmplOp<Runtime::kTfLite, ::tensorflow::tstring, int32_t>>(
*op_wrapper.GetOp());
EXPECT_TRUE(b);
}
TEST_F(SetVariantOpTest, TestSetVariantOp_2x2x2) {
auto op_wrapper = OpWrapperFriend<
Runtime::kTfLite, TmplOp, Attr<AttrName<kAttrName1>, bool, int32_t>,
Attr<AttrName<kAttrName2>, float, uint32_t>,
Attr<AttrName<kAttrName3>, ::tensorflow::tstring, int64_t>>();
auto map = CreateAttrMap<int32_t, uint32_t, ::tensorflow::tstring>();
auto context = FakeInitContext(&map);
EXPECT_OK(op_wrapper.Init(&context));
bool b = std::holds_alternative<
TmplOp<Runtime::kTfLite, int32_t, uint32_t, ::tensorflow::tstring>>(
*op_wrapper.GetOp());
EXPECT_TRUE(b);
}
TEST_F(SetVariantOpTest, TestSetVariantOp_2x1x3) {
auto op_wrapper = OpWrapperFriend<
Runtime::kTfLite, TmplOp, Attr<AttrName<kAttrName1>, bool, int32_t>,
Attr<AttrName<kAttrName2>, float>,
Attr<AttrName<kAttrName3>, ::tensorflow::tstring, int64_t, uint32_t>>();
auto map = CreateAttrMap<int32_t, float, ::tensorflow::tstring>();
auto context = FakeInitContext(&map);
EXPECT_OK(op_wrapper.Init(&context));
bool b = std::holds_alternative<
TmplOp<Runtime::kTfLite, int32_t, float, ::tensorflow::tstring>>(
*op_wrapper.GetOp());
EXPECT_TRUE(b);
}
TEST_F(SetVariantOpTest, TestSetVariantOp_4x4x6) {
auto op_wrapper = OpWrapperFriend<
Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName1>, bool, int32_t, uint32_t, int8_t>,
Attr<AttrName<kAttrName2>, float, int16_t, int32_t, uint32_t>,
Attr<AttrName<kAttrName3>, int8_t, uint8_t, int64_t, uint64_t, int32_t,
uint32_t>>();
auto map = CreateAttrMap<int32_t, float, uint32_t>();
auto context = FakeInitContext(&map);
EXPECT_OK(op_wrapper.Init(&context));
bool b = std::holds_alternative<
TmplOp<Runtime::kTfLite, int32_t, float, uint32_t>>(*op_wrapper.GetOp());
EXPECT_TRUE(b);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/shim/tflite_op_wrapper.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/shim/tflite_op_wrapper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
478e159d-c7e6-4af3-b9bc-6be8519c096c | cpp | tensorflow/tensorflow | libjpeg_handle | tensorflow/lite/experimental/acceleration/mini_benchmark/libjpeg_handle.h | tensorflow/lite/experimental/acceleration/mini_benchmark/libjpeg_handle_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_LIBJPEG_HANDLE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_LIBJPEG_HANDLE_H_
#include <stddef.h>
#include <stdio.h>
#include <memory>
#include <string>
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/decode_jpeg_status.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/libjpeg.h"
namespace tflite {
namespace acceleration {
namespace decode_jpeg_kernel {
class LibjpegHandle {
public:
static std::unique_ptr<LibjpegHandle> Create(Status& status);
~LibjpegHandle();
LibjpegHandle(LibjpegHandle const&) = delete;
LibjpegHandle& operator=(const LibjpegHandle&) = delete;
LibjpegHandle(LibjpegHandle&& LibjpegHandle) = delete;
LibjpegHandle& operator=(LibjpegHandle&& other) = delete;
static const int kLibjpegVersion = 62;
struct jpeg_error_mgr* (*jpeg_std_error_)(struct jpeg_error_mgr*);
void (*jpeg_destroy_decompress_)(j_decompress_ptr);
void (*jpeg_create_decompress_)(j_decompress_ptr, int, size_t);
void (*jpeg_stdio_src_)(j_decompress_ptr, FILE*);
int (*jpeg_read_header_)(j_decompress_ptr, boolean);
boolean (*jpeg_start_decompress_)(j_decompress_ptr);
unsigned int (*jpeg_read_scanlines_)(j_decompress_ptr, JSAMPARRAY,
JDIMENSION);
boolean (*jpeg_finish_decompress_)(j_decompress_ptr);
private:
LibjpegHandle() {}
void* libjpeg_ = nullptr;
};
}
}
}
#endif | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/libjpeg_handle.h"
#include <memory>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/decode_jpeg_status.h"
namespace tflite {
namespace acceleration {
namespace decode_jpeg_kernel {
namespace {
TEST(LibjpegHandleTest, LoadingSucceeds) {
Status status;
std::unique_ptr<LibjpegHandle> handle = LibjpegHandle::Create(status);
EXPECT_TRUE(handle != nullptr);
EXPECT_EQ(status.error_message, "");
EXPECT_EQ(status.code, kTfLiteOk);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/libjpeg_handle.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/libjpeg_handle_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b1eaa4f7-89a4-4061-89a3-e9e5d277c703 | cpp | tensorflow/tensorflow | jpeg_decompress_buffered_struct | tensorflow/lite/experimental/acceleration/mini_benchmark/jpeg_decompress_buffered_struct.h | tensorflow/lite/experimental/acceleration/mini_benchmark/jpeg_decompress_buffered_struct_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_JPEG_DECOMPRESS_BUFFERED_STRUCT_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_MINI_BENCHMARK_JPEG_DECOMPRESS_BUFFERED_STRUCT_H_
#include <algorithm>
#include <cstddef>
#include <cstdlib>
#include <vector>
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/libjpeg.h"
namespace tflite {
namespace acceleration {
namespace decode_jpeg_kernel {
class JpegDecompressBufferedStruct {
public:
explicit JpegDecompressBufferedStruct(std::size_t expected_size)
: resized_size_(std::max(sizeof(jpeg_decompress_struct), expected_size)),
buffer_(reinterpret_cast<char*>(malloc(resized_size_))) {
while (--expected_size >= sizeof(jpeg_decompress_struct)) {
buffer_[expected_size] = 0;
}
}
~JpegDecompressBufferedStruct() { std::free(buffer_); }
JpegDecompressBufferedStruct(const JpegDecompressBufferedStruct&) = delete;
JpegDecompressBufferedStruct& operator=(const JpegDecompressBufferedStruct&) =
delete;
jpeg_decompress_struct* get() const {
return reinterpret_cast<jpeg_decompress_struct*>(buffer_);
}
int const size() { return resized_size_; }
const char* buffer() { return buffer_; }
private:
int resized_size_;
char* const buffer_;
};
}
}
}
#endif | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/jpeg_decompress_buffered_struct.h"
#include <cstddef>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
namespace acceleration {
namespace decode_jpeg_kernel {
namespace {
const int kSizeOfJpegDecompressStruct = sizeof(jpeg_decompress_struct);
TEST(JpegDecompressBufferedStructTest,
ExpectInitializationSizeMatchesStructSize) {
JpegDecompressBufferedStruct buffered_struct(kSizeOfJpegDecompressStruct);
EXPECT_EQ(buffered_struct.size(), kSizeOfJpegDecompressStruct);
}
TEST(JpegDecompressBufferedStructTest,
StructWithSizeGreaterThanCompiledStruct) {
int excess_bytes = 16;
JpegDecompressBufferedStruct buffered_struct(kSizeOfJpegDecompressStruct +
excess_bytes);
EXPECT_EQ(buffered_struct.size(), kSizeOfJpegDecompressStruct + excess_bytes);
const char* buffer = buffered_struct.buffer();
ASSERT_NE(buffer, nullptr);
while (excess_bytes--) {
EXPECT_EQ(
(unsigned char)(buffer[kSizeOfJpegDecompressStruct + excess_bytes]),
'\0');
}
}
TEST(JpegDecompressBufferedStructTest, StructWithSizeLessThanCompiledStruct) {
JpegDecompressBufferedStruct buffered_struct(kSizeOfJpegDecompressStruct -
16);
EXPECT_EQ(buffered_struct.size(), kSizeOfJpegDecompressStruct);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/jpeg_decompress_buffered_struct.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/jpeg_decompress_buffered_struct_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
de5331d4-fad9-4f40-929c-87bc3b1f7279 | cpp | tensorflow/tensorflow | algo | tensorflow/lite/experimental/lrt/core/algo.h | tensorflow/lite/experimental/lrt/core/algo_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_LRT_CORE_ALGO_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_LRT_CORE_ALGO_H_
#include <algorithm>
#include <memory>
#include <optional>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "llvm/ADT/MapVector.h"
#include "tensorflow/lite/experimental/lrt/c/lite_rt_model.h"
#include "tensorflow/lite/experimental/lrt/c/lite_rt_op_code.h"
#include "tensorflow/lite/experimental/lrt/cc/lite_rt_support.h"
#include "tensorflow/lite/experimental/lrt/core/model.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace algo {
class DisjointSets {
public:
static std::vector<std::vector<LrtOp>> GetPartitionsFromFlatList(
const std::vector<LrtOp>& flat_op_list);
private:
void Insert(LrtOp op, LrtOp parent);
std::vector<std::vector<LrtOp>> GetBuckets();
LrtOp GetBucket(LrtOp op);
llvm::MapVector<LrtOp, LrtOp> map_;
};
inline std::vector<std::vector<LrtOp>> DisjointSets::GetPartitionsFromFlatList(
const std::vector<LrtOp>& flat_op_list) {
DisjointSets disjoint_sets;
for (auto* op : flat_op_list) {
disjoint_sets.map_[op] = op;
}
for (auto* op : flat_op_list) {
for (auto* output : op->outputs) {
for (auto* user : output->users) {
if (disjoint_sets.map_.count(user) == 0) {
continue;
}
disjoint_sets.Insert(op, user);
}
}
}
return disjoint_sets.GetBuckets();
}
inline void DisjointSets::Insert(LrtOp op, LrtOp parent) {
auto* parent_bucket = GetBucket(parent);
auto* op_bucket = GetBucket(op);
if (op_bucket == parent_bucket) {
return;
}
map_[op_bucket] = parent_bucket;
}
inline std::vector<std::vector<LrtOp>> DisjointSets::GetBuckets() {
std::unordered_map<LrtOp, std::vector<LrtOp>> invert_map;
for (const auto& entry : map_) {
auto* bucket = GetBucket(entry.first);
if (invert_map.find(bucket) == invert_map.end()) {
invert_map.insert_or_assign(bucket, std::vector<LrtOp>{});
}
invert_map[bucket].push_back(entry.first);
}
std::vector<std::vector<LrtOp>> res;
res.reserve(invert_map.size());
for (auto& entry : invert_map) {
res.push_back(std::move(entry.second));
}
return res;
}
inline LrtOp DisjointSets::GetBucket(LrtOp op) {
auto* parent = map_[op];
if (op != parent) {
parent = GetBucket(parent);
map_[op] = parent;
}
return parent;
}
inline void CloneOpData(const LrtOpT& old_op, LrtOpT& new_op) {
new_op.op_code = old_op.op_code;
}
inline void CloneTensorData(const LrtTensorT& old_tensor,
LrtTensorT& new_tensor) {
new_tensor.type_id = old_tensor.type_id;
new_tensor.type_detail = old_tensor.type_detail;
new_tensor.weights.fb_buffer = std::make_unique<tflite::BufferT>();
}
inline std::optional<lrt_param_index_t> FindUseInd(LrtTensor tensor,
LrtOp user) {
for (lrt_param_index_t i = 0; i < tensor->users.size(); ++i) {
if (tensor->users[i] == user) {
return i;
}
}
return std::nullopt;
}
inline void EraseUse(LrtTensor tensor, lrt_param_index_t use_ind) {
if (use_ind < 0 || use_ind >= tensor->users.size()) {
return;
}
tensor->users[use_ind] = tensor->users.back();
tensor->users.pop_back();
tensor->user_arg_inds[use_ind] = tensor->user_arg_inds.back();
tensor->user_arg_inds.pop_back();
}
inline void EraseUse(LrtTensor tensor, LrtOp user) {
auto use_ind = FindUseInd(tensor, user);
if (!use_ind.has_value()) {
_LRT_D_MSG("Trying to erase from tensor that doesn't use.")
return;
}
EraseUse(tensor, use_ind.value());
}
inline void AddUse(LrtTensorT& tensor, LrtOpT& op) {
op.inputs.push_back(&tensor);
tensor.users.push_back(&op);
tensor.user_arg_inds.push_back(op.inputs.size() - 1);
}
inline void AddOutput(LrtOpT& op, LrtTensorT& tensor) {
DCHECK(tensor.defining_op == nullptr);
op.outputs.push_back(&tensor);
tensor.defining_op = &op;
tensor.defining_op_out_ind = op.outputs.size() - 1;
}
inline LrtTensor RequestNewTensor(LrtSubgraph subgraph,
const LrtTensorT& like) {
auto& new_tensor = subgraph->tensors_storage.emplace_back();
CloneTensorData(like, new_tensor);
return &new_tensor;
}
inline LrtTensor RequestNewInput(LrtSubgraph subgraph, const LrtTensorT& like) {
auto new_tensor = RequestNewTensor(subgraph, like);
subgraph->inputs.push_back(new_tensor);
return new_tensor;
}
inline LrtOp RequestNewOp(LrtSubgraph subgraph, const LrtOpT& like) {
auto& new_op = subgraph->ops_storage.emplace_back();
CloneOpData(like, new_op);
return &new_op;
}
inline void AddOutput(LrtSubgraph subgraph, LrtTensor tensor) {
subgraph->outputs.push_back(tensor);
}
inline bool IsOutput(const LrtSubgraphT& subgraph, LrtTensor tensor) {
return std::count(subgraph.outputs.begin(), subgraph.outputs.end(), tensor) >
0;
}
inline void UpdateReferences(LrtSubgraphT& subgraph) {
subgraph.tensors.clear();
subgraph.ops.clear();
for (auto& tensor : subgraph.tensors_storage) {
subgraph.tensors.push_back(&tensor);
}
for (auto& op : subgraph.ops_storage) {
subgraph.ops.push_back(&op);
}
}
inline void Drop(LrtOpT& op) {
for (auto tensor : op.inputs) {
EraseUse(tensor, &op);
}
op.inputs.clear();
for (auto tensor : op.outputs) {
tensor->defining_op = nullptr;
}
op.outputs.clear();
}
inline void DCE(LrtSubgraphT& subgraph) {
auto& ops = subgraph.ops_storage;
for (auto it = ops.begin(); it != ops.end();) {
if (it->inputs.empty() && it->outputs.empty()) {
it = ops.erase(it);
} else {
++it;
}
}
std::set<LrtTensor> inputs(subgraph.inputs.begin(), subgraph.inputs.end());
std::set<LrtTensor> outputs(subgraph.outputs.begin(), subgraph.outputs.end());
auto& tensors = subgraph.tensors_storage;
for (auto it = tensors.begin(); it != tensors.end();) {
auto* tensor = &*it;
const bool not_in = inputs.find(tensor) == inputs.end();
const bool not_out = outputs.find(tensor) == outputs.end();
const bool dead = tensor->defining_op == nullptr && tensor->users.empty();
if (not_in && not_out && dead) {
it = tensors.erase(it);
} else {
++it;
}
}
UpdateReferences(subgraph);
}
class GraphSlicer {
public:
static LrtOp SlicePartitionFromGraph(LrtSubgraphT& root, LrtSubgraph slice,
std::vector<LrtOp>& partition);
private:
explicit GraphSlicer(LrtSubgraph slice) : slice_(slice) {}
void CloneInto(const LrtOpT& op);
void RerouteTensorsThroughCustomOp(const LrtSubgraphT& root);
LrtSubgraph slice_;
llvm::MapVector<LrtTensor, LrtTensor> tensor_map_;
LrtOp hal_cal_op_ = nullptr;
};
inline LrtOp GraphSlicer::SlicePartitionFromGraph(
LrtSubgraphT& root, LrtSubgraph slice, std::vector<LrtOp>& partition) {
GraphSlicer slicer(slice);
for (auto* op : partition) {
slicer.CloneInto(*op);
}
for (auto* op : partition) {
Drop(*op);
}
slicer.hal_cal_op_ = partition.back();
slicer.hal_cal_op_->op_code = kLrtOpCodeTflCustom;
UpdateReferences(*slicer.slice_);
slicer.RerouteTensorsThroughCustomOp(root);
DCE(root);
return slicer.hal_cal_op_;
}
inline void GraphSlicer::RerouteTensorsThroughCustomOp(
const LrtSubgraphT& root) {
for (auto& [old_tensor, new_tensor] : tensor_map_) {
if (new_tensor->defining_op == nullptr) {
AddUse(*old_tensor, *hal_cal_op_);
continue;
}
if (!old_tensor->users.empty() || IsOutput(root, old_tensor)) {
DCHECK(old_tensor->defining_op == nullptr)
<< "Defining op should have been removed from the graph";
AddOutput(*hal_cal_op_, *old_tensor);
AddOutput(slice_, new_tensor);
}
}
}
inline void GraphSlicer::CloneInto(const LrtOpT& old_op) {
auto& new_op = *RequestNewOp(slice_, old_op);
for (int i = 0; i < old_op.inputs.size(); ++i) {
auto old_input = old_op.inputs[i];
LrtTensor new_input;
if (tensor_map_.contains(old_input)) {
new_input = tensor_map_[old_input];
} else {
new_input = RequestNewInput(slice_, *old_input);
tensor_map_.insert({old_input, new_input});
}
AddUse(*new_input, new_op);
}
for (int i = 0; i < old_op.outputs.size(); ++i) {
auto old_output = old_op.outputs[i];
auto new_output = RequestNewTensor(slice_, *old_output);
AddOutput(new_op, *new_output);
tensor_map_.insert({old_output, new_output});
}
}
}
#endif | #include "tensorflow/lite/experimental/lrt/core/algo.h"
#include <memory>
#include <unordered_set>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/lrt/c/lite_rt_model.h"
#include "tensorflow/lite/experimental/lrt/c/lite_rt_op_code.h"
#include "tensorflow/lite/experimental/lrt/cc/lite_rt_support.h"
#include "tensorflow/lite/experimental/lrt/core/graph_tools.h"
#include "tensorflow/lite/experimental/lrt/core/model.h"
#include "tensorflow/lite/experimental/lrt/test_data/test_data_util.h"
namespace {
using ::algo::DisjointSets;
using ::algo::GraphSlicer;
bool HasValidGeneralTopology(LrtSubgraph subgraph) {
if (!::graph_tools::ValidateTopology(subgraph->ops)) {
_LRT_D_MSG("Failed validate op tolopology");
return false;
}
std::unordered_set<LrtTensor> implied_subgraph_outs;
for (auto tensor : subgraph->tensors) {
if (tensor->users.empty()) {
implied_subgraph_outs.insert(tensor);
}
}
if (implied_subgraph_outs.size() != subgraph->outputs.size()) {
_LRT_D_MSG("Outs not same size");
return false;
}
for (auto tensor : subgraph->outputs) {
if (implied_subgraph_outs.find(tensor) == implied_subgraph_outs.end()) {
_LRT_D_MSG("Mismatched subgraph outs");
return false;
}
}
std::unordered_set<LrtTensor> implied_subgraph_ins;
for (auto tensor : subgraph->tensors) {
if (tensor->defining_op == nullptr &&
tensor->weights.fb_buffer->data.empty()) {
implied_subgraph_ins.insert(tensor);
}
}
if (implied_subgraph_ins.size() != subgraph->inputs.size()) {
_LRT_D_MSG("Ins not same size");
return false;
}
for (auto tensor : subgraph->inputs) {
if (implied_subgraph_ins.find(tensor) == implied_subgraph_ins.end()) {
_LRT_D_MSG("Mismatched subgraph ins");
return false;
}
}
return true;
}
TEST(TestPartitionsFromFlatList, SimpleMultiOp) {
auto model = LoadTestFileModel("simple_multi_op.tflite");
ASSERT_RESULT_OK_ASSIGN(auto subgraph, graph_tools::GetSubgraph(model.get()));
ASSERT_RESULT_OK_ASSIGN(auto ops, graph_tools::GetSubgraphOps(subgraph));
{
std::vector<LrtOp> partition;
partition.push_back(ops[1]);
partition.push_back(ops[2]);
auto partitions = DisjointSets::GetPartitionsFromFlatList(partition);
ASSERT_EQ(partitions.size(), 1);
ASSERT_EQ(partitions.front().size(), 2);
EXPECT_EQ(partitions.front().at(0), partition.at(0));
EXPECT_EQ(partitions.front().at(1), partition.at(1));
}
{
std::vector<LrtOp> partition;
partition.push_back(ops[1]);
partition.push_back(ops[3]);
auto partitions = DisjointSets::GetPartitionsFromFlatList(partition);
ASSERT_EQ(partitions.size(), 2);
ASSERT_EQ(partitions.front().size(), 1);
ASSERT_EQ(partitions.back().size(), 1);
auto p1_op_code = partitions.front().front()->op_code;
auto p2_op_code = partitions.back().front()->op_code;
ASSERT_TRUE(
(p1_op_code == kLrtOpCodeTflMul && p2_op_code == kLrtOpCodeTflAdd) ||
(p1_op_code == kLrtOpCodeTflAdd && p2_op_code == kLrtOpCodeTflMul));
}
{
std::vector<LrtOp> partition;
auto partitions = DisjointSets::GetPartitionsFromFlatList(partition);
ASSERT_EQ(partitions.size(), 0);
}
{
std::vector<LrtOp> partition;
partition.push_back(ops[0]);
partition.push_back(ops[1]);
partition.push_back(ops[2]);
partition.push_back(ops[3]);
auto partitions = DisjointSets::GetPartitionsFromFlatList(partition);
ASSERT_EQ(partitions.size(), 1);
ASSERT_EQ(partitions.front().size(), 4);
EXPECT_EQ(partitions.front().at(0), partition.at(0));
EXPECT_EQ(partitions.front().at(1), partition.at(1));
EXPECT_EQ(partitions.front().at(2), partition.at(2));
EXPECT_EQ(partitions.front().at(3), partition.at(3));
}
}
TEST(TestSliceSubgraphSimpleMultiOp, OnePartition) {
auto model = LoadTestFileModel("simple_multi_op.tflite");
ASSERT_RESULT_OK_ASSIGN(auto subgraph, graph_tools::GetSubgraph(model.get()));
ASSERT_RESULT_OK_ASSIGN(auto ops, graph_tools::GetSubgraphOps(subgraph));
std::vector<LrtOp> partition;
partition.push_back(ops[1]);
partition.push_back(ops[2]);
LrtSubgraph sliced_graph = &model->subgraphs.emplace_back();
auto* hal_cal_op =
GraphSlicer::SlicePartitionFromGraph(*subgraph, sliced_graph, partition);
ASSERT_TRUE(HasValidGeneralTopology(sliced_graph));
ASSERT_TRUE(HasValidGeneralTopology(subgraph));
ASSERT_RESULT_OK_ASSIGN(auto edited_subgraph_ops,
graph_tools::GetSubgraphOps(subgraph));
ASSERT_EQ(edited_subgraph_ops.size(), 3);
ASSERT_EQ(edited_subgraph_ops[0]->op_code, kLrtOpCodeTflAdd);
ASSERT_EQ(edited_subgraph_ops[1]->op_code, kLrtOpCodeTflCustom);
ASSERT_EQ(edited_subgraph_ops[2]->op_code, kLrtOpCodeTflAdd);
ASSERT_RESULT_OK_ASSIGN(auto sliced_subgraph_ops,
graph_tools::GetSubgraphOps(sliced_graph));
ASSERT_EQ(sliced_subgraph_ops.size(), 2);
ASSERT_EQ(sliced_subgraph_ops[0]->op_code, kLrtOpCodeTflMul);
ASSERT_EQ(sliced_subgraph_ops[1]->op_code, kLrtOpCodeTflMul);
ASSERT_EQ(hal_cal_op, edited_subgraph_ops[1]);
{
ASSERT_RESULT_OK_ASSIGN(auto hal_cal_op_ins,
graph_tools::GetOpIns(hal_cal_op));
ASSERT_EQ(hal_cal_op_ins.size(), 1);
ASSERT_TRUE(graph_tools::MatchTensorDefiningOp(hal_cal_op_ins[0], 0,
edited_subgraph_ops[0]));
ASSERT_RESULT_OK_ASSIGN(auto sliced_subgraph_inputs,
graph_tools::GetSubgraphInputs(sliced_graph));
ASSERT_EQ(sliced_subgraph_inputs.size(), 1);
ASSERT_TRUE(graph_tools::MatchTensorHasUses(
sliced_subgraph_inputs[0],
{{sliced_subgraph_ops[0], 0}, {sliced_subgraph_ops[0], 1}}));
ASSERT_TRUE(
graph_tools::MatchTensorNoDefiningOp(sliced_subgraph_inputs[0]));
}
{
ASSERT_RESULT_OK_ASSIGN(auto hal_cal_op_out,
graph_tools::GetOnlyOpOut(hal_cal_op));
ASSERT_TRUE(graph_tools::MatchTensorHasUses(
hal_cal_op_out,
{{edited_subgraph_ops.back(), 0}, {edited_subgraph_ops.back(), 1}}));
ASSERT_RESULT_OK_ASSIGN(auto sliced_subgraph_outputs,
graph_tools::GetSubgraphOutputs(sliced_graph));
ASSERT_EQ(sliced_subgraph_outputs.size(), 1);
ASSERT_TRUE(graph_tools::MatchTensorDefiningOp(
sliced_subgraph_outputs[0], 0, sliced_subgraph_ops.back()));
ASSERT_TRUE(graph_tools::MatchkTensorNoUses(sliced_subgraph_outputs[0]));
}
}
TEST(TestSliceSubgraphSimpleMultiOp, TwoPartitions) {
auto model = LoadTestFileModel("simple_multi_op.tflite");
ASSERT_RESULT_OK_ASSIGN(auto subgraph, graph_tools::GetSubgraph(model.get()));
ASSERT_RESULT_OK_ASSIGN(auto ops, graph_tools::GetSubgraphOps(subgraph));
std::vector<LrtOp> partition_1;
partition_1.push_back(ops[0]);
LrtSubgraph sliced_graph_1 = &model->subgraphs.emplace_back();
GraphSlicer::SlicePartitionFromGraph(*subgraph, sliced_graph_1, partition_1);
ASSERT_TRUE(HasValidGeneralTopology(sliced_graph_1));
ASSERT_TRUE(HasValidGeneralTopology(subgraph));
std::vector<LrtOp> partition_2;
partition_2.push_back(ops[2]);
partition_2.push_back(ops[3]);
LrtSubgraph sliced_graph_2 = &model->subgraphs.emplace_back();
GraphSlicer::SlicePartitionFromGraph(*subgraph, sliced_graph_2, partition_2);
ASSERT_TRUE(HasValidGeneralTopology(sliced_graph_2));
ASSERT_TRUE(HasValidGeneralTopology(subgraph));
ASSERT_RESULT_OK_ASSIGN(auto edited_subgraph_ops,
graph_tools::GetSubgraphOps(subgraph));
ASSERT_EQ(edited_subgraph_ops.size(), 3);
ASSERT_EQ(edited_subgraph_ops[0]->op_code, kLrtOpCodeTflCustom);
ASSERT_EQ(edited_subgraph_ops[1]->op_code, kLrtOpCodeTflMul);
ASSERT_EQ(edited_subgraph_ops[2]->op_code, kLrtOpCodeTflCustom);
{
ASSERT_RESULT_OK_ASSIGN(auto sliced_ops,
graph_tools::GetSubgraphOps(sliced_graph_1));
ASSERT_EQ(sliced_ops.size(), 1);
ASSERT_EQ(sliced_ops[0]->op_code, kLrtOpCodeTflAdd);
}
{
ASSERT_RESULT_OK_ASSIGN(auto sliced_ops,
graph_tools::GetSubgraphOps(sliced_graph_2));
ASSERT_EQ(sliced_ops.size(), 2);
ASSERT_EQ(sliced_ops[0]->op_code, kLrtOpCodeTflMul);
ASSERT_EQ(sliced_ops[1]->op_code, kLrtOpCodeTflAdd);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/lrt/core/algo.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/lrt/core/algo_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
813f4cdc-b499-41a1-9d6d-a182f641d0ef | cpp | tensorflow/tensorflow | bf16 | tensorflow/lite/experimental/shlo/legacy/src/bf16.h | tensorflow/lite/experimental/shlo/bf16_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_LEGACY_SRC_BF16_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_LEGACY_SRC_BF16_H_
#include "tensorflow/lite/experimental/shlo/legacy/src/has_keyword.h"
#if defined(__STDCPP_BFLOAT16_T__)
#include <stdfloat>
namespace stablehlo {
using BF16 = bfloat16_t;
}
#elif __has_keyword(__bf16) && __x86_64__
namespace stablehlo {
using BF16 = __bf16;
}
#elif __has_keyword(__bf16) && __aarch64__
#include <cmath>
#include <cstdint>
namespace stablehlo {
class BF16 {
public:
BF16(float f = 0.0f) {
if (std::isnan(f)) {
value_ = std::signbit(f) ? 0xFFC0 : 0x7FC0;
} else {
uint32_t input = *reinterpret_cast<const uint32_t*>(&f);
uint32_t lsb = (input >> 16) & 1;
uint32_t rounding_bias = 0x7fff + lsb;
input += rounding_bias;
value_ = static_cast<uint16_t>(input >> 16u);
}
}
BF16& operator=(BF16 other) {
value_ = other.value_;
return *this;
}
bool operator==(BF16 other) const { return value_ == other.value_; }
bool operator!=(BF16 other) const { return !(*this == other); }
operator float() const {
uint32_t tmp = value_ << 16;
return *reinterpret_cast<float*>(&tmp);
}
BF16 operator-() const { return BF16(-static_cast<float>(*this)); }
BF16& operator+=(BF16 other) {
value_ = BF16(static_cast<float>(*this) + static_cast<float>(other)).value_;
return *this;
}
BF16& operator-=(BF16 other) {
value_ = BF16(static_cast<float>(*this) - static_cast<float>(other)).value_;
return *this;
}
BF16& operator*=(BF16 other) {
value_ = BF16(static_cast<float>(*this) * static_cast<float>(other)).value_;
return *this;
}
BF16& operator/=(BF16 other) {
value_ = BF16(static_cast<float>(*this) / static_cast<float>(other)).value_;
return *this;
}
private:
uint16_t value_;
};
inline BF16 operator+(BF16 x, BF16 y) {
x += y;
return x;
}
inline BF16 operator-(BF16 x, BF16 y) {
x -= y;
return x;
}
inline BF16 operator*(BF16 x, BF16 y) {
x *= y;
return x;
}
inline BF16 operator/(BF16 x, BF16 y) {
x /= y;
return x;
}
}
#else
#error Type BF16 is not available
#endif
#endif | #include "tensorflow/lite/experimental/shlo/bf16.h"
#include <cmath>
#include <cstdint>
#include <cstring>
#include <limits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/casts.h"
namespace shlo_ref {
namespace {
::testing::Matcher<BF16> MatchesBits(uint16_t bits) {
return ::testing::ResultOf([](BF16 y) { return absl::bit_cast<uint16_t>(y); },
::testing::Eq(bits));
}
::testing::Matcher<float> NearFloat(float x, float relative_error = 1e-3) {
return ::testing::FloatNear(x, std::abs(x) * relative_error);
}
float BinaryToFloat(uint32_t sign, uint32_t exponent, uint32_t high_mantissa,
uint32_t low_mantissa) {
float dest;
uint32_t src =
(sign << 31) + (exponent << 23) + (high_mantissa << 16) + low_mantissa;
memcpy(static_cast<void*>(&dest), static_cast<const void*>(&src),
sizeof(dest));
return dest;
}
template <typename T>
void TestRoundtrips() {
for (T value : {
-std::numeric_limits<T>::infinity(),
std::numeric_limits<T>::infinity(),
T(-1.0),
T(-0.5),
T(-0.0),
T(1.0),
T(0.5),
T(0.0),
}) {
EXPECT_EQ(value, static_cast<T>(static_cast<BF16>(value)));
}
}
TEST(BF16Test, FloatRoundtrips) { TestRoundtrips<float>(); }
TEST(BF16Test, DoubleRoundtrips) { TestRoundtrips<double>(); }
TEST(BF16Test, Float16Roundtrips) { TestRoundtrips<BF16>(); }
TEST(BF16Test, ConversionFromFloat) {
EXPECT_THAT(BF16(1.0f), MatchesBits(0x3f80));
EXPECT_THAT(BF16(0.5f), MatchesBits(0x3f00));
EXPECT_THAT(BF16(0.33333f), MatchesBits(0x3eab));
EXPECT_THAT(BF16(3.38e38f), MatchesBits(0x7f7e));
EXPECT_THAT(BF16(3.40e38f), MatchesBits(0x7f80));
}
TEST(BF16Test, RoundToNearestEven) {
float val1 = static_cast<float>(absl::bit_cast<BF16>(uint16_t{0x3c00}));
float val2 = static_cast<float>(absl::bit_cast<BF16>(uint16_t{0x3c01}));
float val3 = static_cast<float>(absl::bit_cast<BF16>(uint16_t{0x3c02}));
EXPECT_THAT(BF16(0.5f * (val1 + val2)), MatchesBits(0x3c00));
EXPECT_THAT(BF16(0.5f * (val2 + val3)), MatchesBits(0x3c02));
}
TEST(BF16Test, ConversionFromInt) {
EXPECT_THAT(BF16(-1), MatchesBits(0xbf80));
EXPECT_THAT(BF16(0), MatchesBits(0x0000));
EXPECT_THAT(BF16(1), MatchesBits(0x3f80));
EXPECT_THAT(BF16(2), MatchesBits(0x4000));
EXPECT_THAT(BF16(3), MatchesBits(0x4040));
EXPECT_THAT(BF16(12), MatchesBits(0x4140));
}
TEST(BF16Test, ConversionFromBool) {
EXPECT_THAT(BF16(false), MatchesBits(0x0000));
EXPECT_THAT(BF16(true), MatchesBits(0x3f80));
}
TEST(BF16Test, ConversionToBool) {
EXPECT_EQ(static_cast<bool>(BF16(3)), true);
EXPECT_EQ(static_cast<bool>(BF16(0.33333f)), true);
EXPECT_EQ(BF16(-0.0), false);
EXPECT_EQ(static_cast<bool>(BF16(0.0)), false);
}
TEST(BF16Test, ExplicitConversionToFloat) {
EXPECT_EQ(static_cast<float>(absl::bit_cast<BF16, uint16_t>(0x0000)), 0.0f);
EXPECT_EQ(static_cast<float>(absl::bit_cast<BF16, uint16_t>(0x3f80)), 1.0f);
}
TEST(BF16Test, ImplicitConversionToFloat) {
EXPECT_EQ((absl::bit_cast<BF16, uint16_t>(0x0000)), 0.0f);
EXPECT_EQ((absl::bit_cast<BF16, uint16_t>(0x3f80)), 1.0f);
}
TEST(BF16Test, Zero) {
EXPECT_EQ(BF16(0.0f), BF16(0.0f));
EXPECT_EQ(BF16(-0.0f), BF16(0.0f));
EXPECT_EQ(BF16(-0.0f), BF16(-0.0f));
EXPECT_THAT(BF16(0.0f), MatchesBits(0x0000));
EXPECT_THAT(BF16(-0.0f), MatchesBits(0x8000));
}
TEST(BF16Test, DefaultConstruct) {
EXPECT_EQ(static_cast<float>(BF16()), 0.0f);
}
TEST(BF16Test, Conversion) {
for (int i = 0; i < 100; ++i) {
float a = i + 1.25;
BF16 b = static_cast<BF16>(a);
float c = static_cast<float>(b);
EXPECT_LE(std::abs(c - a), a / 128);
}
}
TEST(BF16Test, Epsilon) {
EXPECT_LE(1.0f, static_cast<float>(std::numeric_limits<BF16>::epsilon() +
BF16(1.0f)));
EXPECT_EQ(1.0f, static_cast<float>(std::numeric_limits<BF16>::epsilon() /
BF16(2.0f) +
BF16(1.0f)));
}
TEST(BF16Test, Negate) {
EXPECT_EQ(static_cast<float>(-BF16(3.0f)), -3.0f);
EXPECT_EQ(static_cast<float>(-BF16(-4.5f)), 4.5f);
}
TEST(BF16Test, DivisionByZero) {
EXPECT_TRUE(std::isnan(static_cast<float>(BF16(0.0 / 0.0))));
EXPECT_TRUE(std::isinf(static_cast<float>(BF16(1.0 / 0.0))));
EXPECT_TRUE(std::isinf(static_cast<float>(BF16(-1.0 / 0.0))));
EXPECT_TRUE(std::isnan(BF16(0.0 / 0.0)));
EXPECT_TRUE(std::isinf(BF16(1.0 / 0.0)));
EXPECT_TRUE(std::isinf(BF16(-1.0 / 0.0)));
}
TEST(BF16Test, NonFinite) {
EXPECT_FALSE(std::isinf(
static_cast<float>(BF16(3.38e38f))));
EXPECT_FALSE(std::isnan(static_cast<float>(BF16(0.0f))));
EXPECT_TRUE(
std::isinf(static_cast<float>(absl::bit_cast<BF16, uint16_t>(0xff80))));
EXPECT_TRUE(
std::isnan(static_cast<float>(absl::bit_cast<BF16, uint16_t>(0xffc0))));
EXPECT_TRUE(
std::isinf(static_cast<float>(absl::bit_cast<BF16, uint16_t>(0x7f80))));
EXPECT_TRUE(
std::isnan(static_cast<float>(absl::bit_cast<BF16, uint16_t>(0x7fc0))));
EXPECT_FALSE(isinf(absl::bit_cast<BF16, uint16_t>(0x7bff)));
EXPECT_FALSE(isnan(absl::bit_cast<BF16, uint16_t>(0x0000)));
EXPECT_TRUE(isinf(absl::bit_cast<BF16, uint16_t>(0xff80)));
EXPECT_TRUE(isnan(absl::bit_cast<BF16, uint16_t>(0xffc0)));
EXPECT_TRUE(isinf(absl::bit_cast<BF16, uint16_t>(0x7f80)));
EXPECT_TRUE(isnan(absl::bit_cast<BF16, uint16_t>(0x7fc0)));
EXPECT_THAT(BF16(BinaryToFloat(0x0, 0xff, 0x40, 0x0)),
MatchesBits(0x7fe0));
EXPECT_THAT(BF16(BinaryToFloat(0x1, 0xff, 0x40, 0x0)),
MatchesBits(0xffe0));
}
TEST(BF16Test, NumericLimits) {
static_assert(std::numeric_limits<BF16>::is_signed);
EXPECT_EQ(
absl::bit_cast<uint16_t>(std::numeric_limits<BF16>::infinity()),
absl::bit_cast<uint16_t>(BF16(std::numeric_limits<float>::infinity())));
constexpr uint16_t BFLOAT16_QUIET_BIT = 0x0040;
EXPECT_TRUE(isnan(std::numeric_limits<BF16>::quiet_NaN()));
EXPECT_TRUE(isnan(BF16(std::numeric_limits<float>::quiet_NaN())));
EXPECT_GT((absl::bit_cast<uint16_t>(std::numeric_limits<BF16>::quiet_NaN()) &
BFLOAT16_QUIET_BIT),
0);
EXPECT_GT(
(absl::bit_cast<uint16_t>(BF16(std::numeric_limits<float>::quiet_NaN())) &
BFLOAT16_QUIET_BIT),
0);
EXPECT_TRUE(isnan(std::numeric_limits<BF16>::signaling_NaN()));
EXPECT_TRUE(isnan(BF16(std::numeric_limits<float>::signaling_NaN())));
EXPECT_EQ(
0, (absl::bit_cast<uint16_t>(std::numeric_limits<BF16>::signaling_NaN()) &
BFLOAT16_QUIET_BIT));
EXPECT_EQ(0, (absl::bit_cast<uint16_t>(
BF16(std::numeric_limits<float>::signaling_NaN())) &
BFLOAT16_QUIET_BIT));
EXPECT_GT(std::numeric_limits<BF16>::min(), BF16(0.f));
EXPECT_GT(std::numeric_limits<BF16>::denorm_min(), BF16(0.f));
EXPECT_EQ(std::numeric_limits<BF16>::denorm_min() / BF16(2), BF16(0.f));
}
TEST(BF16Test, Arithmetic) {
EXPECT_EQ(static_cast<float>(BF16(2) + BF16(2)), 4);
EXPECT_EQ(static_cast<float>(BF16(2) + BF16(-2)), 0);
EXPECT_THAT(static_cast<float>(BF16(0.33333f) + BF16(0.66667f)),
NearFloat(1.0f));
EXPECT_EQ(static_cast<float>(BF16(2.0f) * BF16(-5.5f)), -11.0f);
EXPECT_THAT(static_cast<float>(BF16(1.0f) / BF16(3.0f)), NearFloat(0.3339f));
EXPECT_EQ(static_cast<float>(-BF16(4096.0f)), -4096.0f);
EXPECT_EQ(static_cast<float>(-BF16(-4096.0f)), 4096.0f);
}
TEST(BF16Test, Comparison) {
EXPECT_TRUE(BF16(1.0f) > BF16(0.5f));
EXPECT_TRUE(BF16(0.5f) < BF16(1.0f));
EXPECT_FALSE((BF16(1.0f) < BF16(0.5f)));
EXPECT_FALSE((BF16(0.5f) > BF16(1.0f)));
EXPECT_FALSE((BF16(4.0f) > BF16(4.0f)));
EXPECT_FALSE((BF16(4.0f) < BF16(4.0f)));
EXPECT_FALSE((BF16(0.0f) < BF16(-0.0f)));
EXPECT_FALSE((BF16(-0.0f) < BF16(0.0f)));
EXPECT_FALSE((BF16(0.0f) > BF16(-0.0f)));
EXPECT_FALSE((BF16(-0.0f) > BF16(0.0f)));
EXPECT_TRUE(BF16(0.2f) > BF16(-1.0f));
EXPECT_TRUE(BF16(-1.0f) < BF16(0.2f));
EXPECT_TRUE(BF16(-16.0f) < BF16(-15.0f));
EXPECT_TRUE(BF16(1.0f) == BF16(1.0f));
EXPECT_TRUE(BF16(1.0f) != BF16(2.0f));
EXPECT_FALSE((BF16(0.0 / 0.0) == BF16(0.0 / 0.0)));
EXPECT_TRUE(BF16(0.0 / 0.0) != BF16(0.0 / 0.0));
EXPECT_FALSE((BF16(1.0) == BF16(0.0 / 0.0)));
EXPECT_FALSE((BF16(1.0) < BF16(0.0 / 0.0)));
EXPECT_FALSE((BF16(1.0) > BF16(0.0 / 0.0)));
EXPECT_TRUE(BF16(1.0) != BF16(0.0 / 0.0));
EXPECT_TRUE(BF16(1.0) < BF16(1.0 / 0.0));
EXPECT_TRUE(BF16(1.0) > BF16(-1.0 / 0.0));
}
constexpr float PI = 3.14159265358979323846f;
TEST(BF16Test, BasicFunctions) {
EXPECT_EQ(static_cast<float>(abs(BF16(3.5f))), 3.5f);
EXPECT_EQ(static_cast<float>(abs(BF16(3.5f))), 3.5f);
EXPECT_EQ(static_cast<float>(abs(BF16(-3.5f))), 3.5f);
EXPECT_EQ(static_cast<float>(abs(BF16(-3.5f))), 3.5f);
EXPECT_EQ(static_cast<float>(floor(BF16(3.5f))), 3.0f);
EXPECT_EQ(static_cast<float>(floor(BF16(3.5f))), 3.0f);
EXPECT_EQ(static_cast<float>(floor(BF16(-3.5f))), -4.0f);
EXPECT_EQ(static_cast<float>(floor(BF16(-3.5f))), -4.0f);
EXPECT_EQ(static_cast<float>(ceil(BF16(3.5f))), 4.0f);
EXPECT_EQ(static_cast<float>(ceil(BF16(3.5f))), 4.0f);
EXPECT_EQ(static_cast<float>(ceil(BF16(-3.5f))), -3.0f);
EXPECT_EQ(static_cast<float>(ceil(BF16(-3.5f))), -3.0f);
EXPECT_FLOAT_EQ(static_cast<float>(sqrt(BF16(0.0f))), 0.0f);
EXPECT_FLOAT_EQ(static_cast<float>(sqrt(BF16(0.0f))), 0.0f);
EXPECT_FLOAT_EQ(static_cast<float>(sqrt(BF16(4.0f))), 2.0f);
EXPECT_FLOAT_EQ(static_cast<float>(sqrt(BF16(4.0f))), 2.0f);
EXPECT_FLOAT_EQ(static_cast<float>(pow(BF16(0.0f), BF16(1.0f))), 0.0f);
EXPECT_FLOAT_EQ(static_cast<float>(pow(BF16(0.0f), BF16(1.0f))), 0.0f);
EXPECT_FLOAT_EQ(static_cast<float>(pow(BF16(2.0f), BF16(2.0f))), 4.0f);
EXPECT_FLOAT_EQ(static_cast<float>(pow(BF16(2.0f), BF16(2.0f))), 4.0f);
EXPECT_EQ(static_cast<float>(exp(BF16(0.0f))), 1.0f);
EXPECT_EQ(static_cast<float>(exp(BF16(0.0f))), 1.0f);
EXPECT_THAT(static_cast<float>(exp(BF16(PI))),
NearFloat(20.f + static_cast<float>(PI)));
EXPECT_THAT(static_cast<float>(exp(BF16(PI))),
NearFloat(20.f + static_cast<float>(PI)));
EXPECT_EQ(static_cast<float>(expm1(BF16(0.0f))), 0.0f);
EXPECT_EQ(static_cast<float>(expm1(BF16(0.0f))), 0.0f);
EXPECT_THAT(static_cast<float>(expm1(BF16(2.0f))), NearFloat(6.375f));
EXPECT_THAT(static_cast<float>(expm1(BF16(2.0f))), NearFloat(6.375f));
EXPECT_EQ(static_cast<float>(log(BF16(1.0f))), 0.0f);
EXPECT_EQ(static_cast<float>(log(BF16(1.0f))), 0.0f);
EXPECT_THAT(static_cast<float>(log(BF16(10.0f))), NearFloat(2.296875f));
EXPECT_THAT(static_cast<float>(log(BF16(10.0f))), NearFloat(2.296875f));
EXPECT_EQ(static_cast<float>(log1p(BF16(0.0f))), 0.0f);
EXPECT_EQ(static_cast<float>(log1p(BF16(0.0f))), 0.0f);
EXPECT_THAT(static_cast<float>(log1p(BF16(10.0f))), NearFloat(2.390625f));
EXPECT_THAT(static_cast<float>(log1p(BF16(10.0f))), NearFloat(2.390625f));
}
TEST(BF16Test, TrigonometricFunctions) {
EXPECT_THAT(cos(BF16(0.0f)), NearFloat(BF16(std::cos(0.0f))));
EXPECT_THAT(cos(BF16(0.0f)), NearFloat(BF16(std::cos(0.0f))));
EXPECT_FLOAT_EQ(cos(BF16(PI)), BF16(std::cos(PI)));
EXPECT_NEAR(cos(BF16(PI / 2)), BF16(std::cos(PI / 2)), 1e-3);
EXPECT_NEAR(cos(BF16(3 * PI / 2)), BF16(std::cos(3 * PI / 2)), 1e-2);
EXPECT_THAT(cos(BF16(3.5f)), NearFloat(BF16(std::cos(3.5f))));
EXPECT_FLOAT_EQ(sin(BF16(0.0f)), BF16(std::sin(0.0f)));
EXPECT_FLOAT_EQ(sin(BF16(0.0f)), BF16(std::sin(0.0f)));
EXPECT_NEAR(sin(BF16(PI)), BF16(std::sin(PI)), 1e-3);
EXPECT_THAT(sin(BF16(PI / 2)), NearFloat(BF16(std::sin(PI / 2))));
EXPECT_THAT(sin(BF16(3 * PI / 2)), NearFloat(BF16(std::sin(3 * PI / 2))));
EXPECT_THAT(sin(BF16(3.5f)), NearFloat(BF16(std::sin(3.5f))));
EXPECT_FLOAT_EQ(tan(BF16(0.0f)), BF16(std::tan(0.0f)));
EXPECT_FLOAT_EQ(tan(BF16(0.0f)), BF16(std::tan(0.0f)));
EXPECT_NEAR(tan(BF16(PI)), BF16(std::tan(PI)), 1e-3);
EXPECT_THAT(tan(BF16(3.5f)), NearFloat(BF16(std::tan(3.5f))));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/src/bf16.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/bf16_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7af3d205-5d2b-4bdd-a060-042589eba699 | cpp | tensorflow/tensorflow | f16 | tensorflow/lite/experimental/shlo/legacy/src/f16.h | tensorflow/lite/experimental/shlo/f16_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_LEGACY_SRC_F16_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_LEGACY_SRC_F16_H_
#include "tensorflow/lite/experimental/shlo/legacy/src/has_keyword.h"
#if defined(__STDCPP_FLOAT16_T__)
#include <stdfloat>
namespace stablehlo {
using F16 = float16_t;
}
#elif __has_keyword(_Float16)
namespace stablehlo {
using F16 = _Float16;
}
#else
#error Type F16 is not available
#endif
#endif | #include "tensorflow/lite/experimental/shlo/f16.h"
#include <cstdint>
#include <limits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/casts.h"
namespace shlo_ref {
namespace {
using ::testing::FloatNear;
using RoundtripTypeList = ::testing::Types<float, double>;
template <class T>
struct RoundtripF16Test : testing::Test {};
TYPED_TEST_SUITE(RoundtripF16Test, RoundtripTypeList);
TYPED_TEST(RoundtripF16Test, RoundtripConversions) {
for (TypeParam value : {
-std::numeric_limits<TypeParam>::infinity(),
std::numeric_limits<TypeParam>::infinity(),
TypeParam(-1.0),
TypeParam(-0.5),
TypeParam(-0.0),
TypeParam(1.0),
TypeParam(0.5),
TypeParam(0.0),
}) {
EXPECT_EQ(value, static_cast<TypeParam>(static_cast<F16>(value)));
}
}
TEST(F16Test, Arithmetic) {
EXPECT_EQ(static_cast<float>(F16(2) + F16(2)), 4);
EXPECT_EQ(static_cast<float>(F16(2) + F16(-2)), 0);
EXPECT_THAT(static_cast<float>(F16(0.33333f) + F16(0.66667f)),
FloatNear(1.0f, 1e-3));
EXPECT_EQ(static_cast<float>(F16(2.0f) * F16(-5.5f)), -11.0f);
EXPECT_THAT(static_cast<float>(F16(1.0f) / F16(3.0f)),
FloatNear(0.3339f, 1e-3));
EXPECT_EQ(static_cast<float>(-F16(4096.0f)), -4096.0f);
EXPECT_EQ(static_cast<float>(-F16(-4096.0f)), 4096.0f);
}
TEST(F16Test, DefaultConstruct) { EXPECT_EQ(static_cast<float>(F16()), 0.0f); }
TEST(F16Test, ImplicitConversionToFloat) {
EXPECT_EQ((absl::bit_cast<F16, uint16_t>(0x0000)), 0.0f);
EXPECT_EQ((absl::bit_cast<F16, uint16_t>(0x3C00)), 1.0f);
}
TEST(F16Test, ConstructFromArithmeticType) {
const F16 from_int8(static_cast<int8_t>(1));
EXPECT_EQ(static_cast<float>(from_int8), 1);
const F16 from_int16(static_cast<int16_t>(1));
EXPECT_EQ(static_cast<float>(from_int16), 1);
const F16 from_int32(static_cast<int32_t>(1));
EXPECT_EQ(static_cast<float>(from_int32), 1);
const F16 from_int64(static_cast<int64_t>(1));
EXPECT_EQ(static_cast<float>(from_int64), 1);
const F16 from_float(static_cast<float>(1));
EXPECT_EQ(static_cast<float>(from_float), 1);
const F16 from_double(static_cast<double>(1));
EXPECT_EQ(static_cast<float>(from_double), 1);
}
template <class T>
T ImplicitConversion(T v) {
return v;
}
TEST(F16Test, ConvertToArithmeticType) {
const F16 ref(-1);
EXPECT_EQ(ImplicitConversion<int8_t>(ref), -1);
EXPECT_EQ(ImplicitConversion<int16_t>(ref), -1);
EXPECT_EQ(ImplicitConversion<int32_t>(ref), -1);
EXPECT_EQ(ImplicitConversion<int64_t>(ref), -1);
EXPECT_EQ(ImplicitConversion<float>(ref), -1);
EXPECT_EQ(ImplicitConversion<double>(ref), -1);
}
TEST(F16Test, ArithmeticOperations) {
for (int i = -8; i < 8; ++i) {
for (int j = -8; j < 8; ++j) {
EXPECT_EQ(F16(i) == F16(j), i == j);
EXPECT_EQ(F16(i) != F16(j), i != j);
EXPECT_EQ(F16(i) > F16(j), i > j);
EXPECT_EQ(F16(i) >= F16(j), i >= j);
EXPECT_EQ(F16(i) < F16(j), i < j);
EXPECT_EQ(F16(i) <= F16(j), i <= j);
}
}
F16 val(0);
EXPECT_EQ(++val, 1);
EXPECT_EQ(val++, 1);
EXPECT_EQ(val, 2);
EXPECT_EQ(val--, 2);
EXPECT_EQ(val, 1);
EXPECT_EQ(--val, 0);
EXPECT_EQ(val += F16(1), 1);
EXPECT_EQ(val, 1);
EXPECT_EQ(val *= F16(2), 2);
EXPECT_EQ(val, 2);
EXPECT_EQ(val /= F16(2), 1);
EXPECT_EQ(val, 1);
EXPECT_EQ(val -= F16(4), -3);
EXPECT_EQ(val, -3);
EXPECT_EQ(val = F16(7), 7);
EXPECT_EQ(val, 7);
EXPECT_EQ(+val, 7);
EXPECT_EQ(-val, -7);
EXPECT_EQ(static_cast<bool>(val), true);
EXPECT_EQ(!val, false);
EXPECT_EQ(val && F16(2), true);
EXPECT_EQ(val && F16(0), false);
EXPECT_EQ(val || F16(0), true);
EXPECT_EQ(F16(0) || F16(0), false);
}
using ArithmeticTypeList =
::testing::Types<int8_t, int16_t, int32_t, int64_t, float, double>;
template <class T>
struct ArithmeticTypeF16Test : testing::Test {};
TYPED_TEST_SUITE(ArithmeticTypeF16Test, ArithmeticTypeList);
TYPED_TEST(ArithmeticTypeF16Test, InPlaceArithmetic) {
for (TypeParam i = -8; i < 8; ++i) {
for (TypeParam j = -8; j < 8; ++j) {
EXPECT_EQ(F16(i) == j, i == j);
EXPECT_EQ(i == F16(j), i == j);
EXPECT_EQ(F16(i) != j, i != j);
EXPECT_EQ(i != F16(j), i != j);
EXPECT_EQ(F16(i) > j, i > j);
EXPECT_EQ(i > F16(j), i > j);
EXPECT_EQ(F16(i) >= j, i >= j);
EXPECT_EQ(i >= F16(j), i >= j);
EXPECT_EQ(F16(i) < j, i < j);
EXPECT_EQ(i < F16(j), i < j);
EXPECT_EQ(F16(i) <= j, i <= j);
EXPECT_EQ(i <= F16(j), i <= j);
}
}
const TypeParam one = TypeParam(1);
const TypeParam two = TypeParam(2);
const TypeParam four = TypeParam(4);
F16 val(0);
EXPECT_EQ(val += one, 1);
EXPECT_EQ(val, 1);
EXPECT_EQ(val *= two, 2);
EXPECT_EQ(val, 2);
EXPECT_EQ(val /= two, 1);
EXPECT_EQ(val, 1);
EXPECT_EQ(val -= four, -3);
EXPECT_EQ(val, -3);
const F16 f16_three(3);
EXPECT_EQ(f16_three + one, 4.);
EXPECT_EQ(f16_three - one, 2.);
EXPECT_EQ(f16_three * two, 3. * two);
EXPECT_EQ(f16_three / two, 3. / two);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/src/f16.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/f16_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d91e02de-81f7-406d-b4a6-caa95336163b | cpp | tensorflow/tensorflow | overload | tensorflow/lite/experimental/shlo/overload.h | tensorflow/lite/experimental/shlo/overload_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OVERLOAD_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OVERLOAD_H_
namespace shlo_ref {
template <class... Ts>
class Overload : public Ts... {
public:
explicit Overload(Ts&&... ts) : Ts(static_cast<Ts&&>(ts))... {}
using Ts::operator()...;
};
template <class... Ts>
Overload(Ts&&...) -> Overload<Ts...>;
}
#endif | #include "tensorflow/lite/experimental/shlo/overload.h"
#include <cstdint>
#include <string>
#include <type_traits>
#include <variant>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
namespace {
TEST(OverloadTest, DispatchConsidersTypeWithAutoFallback) {
auto overloaded = shlo_ref::Overload(
[](int v) -> std::string { return absl::StrCat("int ", v); },
[](double v) -> std::string { return absl::StrCat("double ", v); },
[](const char* v) -> std::string {
return absl::StrCat("const char* ", v);
},
[](auto v) -> std::string { return absl::StrCat("auto ", v); }
);
EXPECT_EQ("int 1", overloaded(1));
EXPECT_EQ("double 2.5", overloaded(2.5));
EXPECT_EQ("const char* hello", overloaded("hello"));
EXPECT_EQ("auto 1.5", overloaded(1.5f));
}
TEST(OverloadTest, DispatchConsidersNumberOfArguments) {
auto overloaded = shlo_ref::Overload(
[](int a) { return a + 1; },
[](int a, int b) { return a * b; },
[]() -> absl::string_view { return "none"; }
);
EXPECT_EQ(3, overloaded(2));
EXPECT_EQ(21, overloaded(3, 7));
EXPECT_EQ("none", overloaded());
}
TEST(OverloadTest, SupportsConstantEvaluation) {
auto overloaded = shlo_ref::Overload(
[](int a) { return a + 1; },
[](int a, int b) { return a * b; },
[]() -> absl::string_view { return "none"; }
);
static_assert(overloaded() == "none");
static_assert(overloaded(2) == 3);
static_assert(overloaded(3, 7) == 21);
}
TEST(OverloadTest, PropogatesDefaults) {
auto overloaded = shlo_ref::Overload(
[](int a, int b = 5) { return a * b; },
[](double c) { return c; }
);
EXPECT_EQ(21, overloaded(3, 7));
EXPECT_EQ(35, overloaded(7));
EXPECT_EQ(2.5, overloaded(2.5));
}
TEST(OverloadTest, AmbiguousWithDefaultsNotInvocable) {
auto overloaded = shlo_ref::Overload(
[](int a, int b = 5) { return a * b; },
[](int c) { return c; }
);
static_assert(!std::is_invocable_v<decltype(overloaded), int>);
static_assert(std::is_invocable_v<decltype(overloaded), int, int>);
}
TEST(OverloadTest, AmbiguousDuplicatesNotInvocable) {
auto overloaded = shlo_ref::Overload(
[](int a) { return a; },
[](int c) { return c; }
);
static_assert(!std::is_invocable_v<decltype(overloaded), int>);
}
TEST(OverloadTest, AmbiguousConversionNotInvocable) {
auto overloaded = shlo_ref::Overload(
[](uint16_t a) { return a; },
[](uint64_t c) { return c; }
);
static_assert(!std::is_invocable_v<decltype(overloaded), int>);
}
TEST(OverloadTest, AmbiguousConversionWithAutoNotInvocable) {
auto overloaded = shlo_ref::Overload(
[](auto a) { return a; },
[](auto c) { return c; }
);
static_assert(!std::is_invocable_v<decltype(overloaded), int>);
}
TEST(OverloadTest, DispatchConsidersSfinae) {
auto overloaded = shlo_ref::Overload(
[](auto a) -> decltype(a + 1) { return a + 1; }
);
static_assert(std::is_invocable_v<decltype(overloaded), int>);
static_assert(!std::is_invocable_v<decltype(overloaded), std::string>);
}
TEST(OverloadTest, VariantVisitDispatchesCorrectly) {
std::variant<int, double, std::string> v(1);
auto overloaded = shlo_ref::Overload(
[](int) -> absl::string_view { return "int"; },
[](double) -> absl::string_view { return "double"; },
[](const std::string&) -> absl::string_view { return "string"; }
);
EXPECT_EQ("int", std::visit(overloaded, v));
v = 1.1;
EXPECT_EQ("double", std::visit(overloaded, v));
v = "hello";
EXPECT_EQ("string", std::visit(overloaded, v));
}
TEST(OverloadTest, VariantVisitWithAutoFallbackDispatchesCorrectly) {
std::variant<std::string, int32_t, int64_t> v(int32_t{1});
auto overloaded =
shlo_ref::Overload([](const std::string& s) { return s.size(); },
[](const auto& s) { return sizeof(s); }
);
EXPECT_EQ(4, std::visit(overloaded, v));
v = int64_t{1};
EXPECT_EQ(8, std::visit(overloaded, v));
v = std::string("hello");
EXPECT_EQ(5, std::visit(overloaded, v));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/overload.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/overload_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5ac5caa6-167b-4fcd-a4de-786282f38404 | cpp | tensorflow/tensorflow | i4 | tensorflow/lite/experimental/shlo/i4.h | tensorflow/lite/experimental/shlo/i4_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_I4_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_I4_H_
#include <cstdint>
#include <limits>
#include <ostream>
#include <type_traits>
namespace shlo_ref {
struct I4 {
int8_t data = 0;
constexpr I4() = default;
constexpr I4(const I4&) = default;
constexpr I4& operator=(const I4&) = default;
template <class T>
constexpr I4(T v) : data(v) {}
template <class T>
constexpr operator T() const {
return static_cast<T>(data);
}
friend I4& operator++(I4& lhs) {
++lhs.data;
return lhs;
}
friend I4& operator--(I4& lhs) {
--lhs.data;
return lhs;
}
friend I4 operator++(I4& lhs, int) {
I4 ret = lhs;
++lhs.data;
return ret;
}
friend I4 operator--(I4& lhs, int) {
I4 ret = lhs;
--lhs.data;
return ret;
}
friend I4& operator+=(I4& lhs, I4 rhs) {
lhs.data += rhs.data;
return lhs;
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend I4& operator+=(I4& lhs, T rhs) {
using C = std::common_type_t<T, int>;
lhs.data += static_cast<C>(rhs);
return lhs;
}
friend I4& operator-=(I4& lhs, I4 rhs) {
lhs.data -= rhs.data;
return lhs;
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend I4& operator-=(I4& lhs, T rhs) {
using C = std::common_type_t<T, int>;
lhs.data -= static_cast<C>(rhs);
return lhs;
}
friend I4& operator*=(I4& lhs, I4 rhs) {
lhs.data *= rhs.data;
return lhs;
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend I4& operator*=(I4& lhs, T rhs) {
using C = std::common_type_t<T, int>;
lhs.data *= static_cast<C>(rhs);
return lhs;
}
friend I4& operator/=(I4& lhs, I4 rhs) {
lhs.data /= rhs.data;
return lhs;
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend I4& operator/=(I4& lhs, T rhs) {
using C = std::common_type_t<T, int>;
lhs.data /= static_cast<C>(rhs);
return lhs;
}
friend I4& operator%=(I4& lhs, I4 rhs) {
lhs.data %= rhs.data;
return lhs;
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend I4& operator%=(I4& lhs, T rhs) {
using C = std::common_type_t<T, int>;
lhs.data %= static_cast<C>(rhs);
return lhs;
}
friend I4& operator&=(I4& lhs, I4 rhs) {
lhs.data &= rhs.data;
return lhs;
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend I4& operator&=(I4& lhs, T rhs) {
using C = std::common_type_t<T, int>;
lhs.data &= static_cast<C>(rhs);
return lhs;
}
friend I4& operator|=(I4& lhs, I4 rhs) {
lhs.data |= rhs.data;
return lhs;
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend I4& operator|=(I4& lhs, T rhs) {
using C = std::common_type_t<T, int>;
lhs.data |= static_cast<C>(rhs);
return lhs;
}
friend I4& operator^=(I4& lhs, I4 rhs) {
lhs.data ^= rhs.data;
return lhs;
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend I4& operator^=(I4& lhs, T rhs) {
using C = std::common_type_t<T, int>;
lhs.data ^= static_cast<C>(rhs);
return lhs;
}
friend I4& operator<<=(I4& lhs, I4 rhs) {
lhs.data <<= rhs.data;
return lhs;
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend I4& operator<<=(I4& lhs, T rhs) {
using C = std::common_type_t<T, int>;
lhs.data <<= static_cast<C>(rhs);
return lhs;
}
friend I4& operator>>=(I4& lhs, I4 rhs) {
lhs.data >>= rhs.data;
return lhs;
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend I4& operator>>=(I4& lhs, T rhs) {
using C = std::common_type_t<T, int>;
lhs.data >>= static_cast<C>(rhs);
return lhs;
}
friend auto operator+(I4 lhs) { return +lhs.data; }
friend auto operator-(I4 lhs) { return -lhs.data; }
friend auto operator+(I4 lhs, I4 rhs) { return lhs.data + rhs.data; }
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend auto operator+(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data + static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend auto operator+(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) + rhs.data;
}
friend auto operator-(I4 lhs, I4 rhs) { return lhs.data - rhs.data; }
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend auto operator-(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data - static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend auto operator-(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) - rhs.data;
}
friend auto operator*(I4 lhs, I4 rhs) { return lhs.data * rhs.data; }
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend auto operator*(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data * static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend auto operator*(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) * rhs.data;
}
friend auto operator/(I4 lhs, I4 rhs) { return lhs.data / rhs.data; }
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend auto operator/(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data / static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend auto operator/(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) / rhs.data;
}
friend auto operator%(I4 lhs, I4 rhs) { return lhs.data % rhs.data; }
template <class T, class = std::enable_if_t<std::is_integral_v<T>>>
friend auto operator%(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data % static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_integral_v<T>>>
friend auto operator%(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) % rhs.data;
}
friend auto operator~(I4 lhs) { return ~lhs.data; }
friend auto operator&(I4 lhs, I4 rhs) { return lhs.data & rhs.data; }
template <class T, class = std::enable_if_t<std::is_integral_v<T>>>
friend auto operator&(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data & static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_integral_v<T>>>
friend auto operator&(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) & rhs.data;
}
friend auto operator|(I4 lhs, I4 rhs) { return lhs.data | rhs.data; }
template <class T, class = std::enable_if_t<std::is_integral_v<T>>>
friend auto operator|(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data | static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_integral_v<T>>>
friend auto operator|(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) | rhs.data;
}
friend auto operator^(I4 lhs, I4 rhs) { return lhs.data ^ rhs.data; }
template <class T, class = std::enable_if_t<std::is_integral_v<T>>>
friend auto operator^(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data ^ static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_integral_v<T>>>
friend auto operator^(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) ^ rhs.data;
}
friend auto operator<<(I4 lhs, I4 rhs) { return lhs.data << rhs.data; }
template <class T, class = std::enable_if_t<std::is_integral_v<T>>>
friend auto operator<<(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data << static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_integral_v<T>>>
friend auto operator<<(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) << rhs.data;
}
friend auto operator>>(I4 lhs, I4 rhs) { return lhs.data >> rhs.data; }
template <class T, class = std::enable_if_t<std::is_integral_v<T>>>
friend auto operator>>(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data >> static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_integral_v<T>>>
friend auto operator>>(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) >> rhs.data;
}
friend bool operator!(I4 v) { return !v.data; }
friend auto operator&&(I4 lhs, I4 rhs) { return lhs.data && rhs.data; }
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend auto operator&&(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data && static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend auto operator&&(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) && rhs.data;
}
friend auto operator||(I4 lhs, I4 rhs) { return lhs.data || rhs.data; }
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend auto operator||(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data || static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend auto operator||(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) || rhs.data;
}
friend bool operator==(I4 lhs, I4 rhs) { return lhs.data == rhs.data; }
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend bool operator==(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data == static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend bool operator==(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) == rhs.data;
}
friend bool operator!=(I4 lhs, I4 rhs) { return lhs.data != rhs.data; }
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend bool operator!=(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data != static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend bool operator!=(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) != rhs.data;
}
friend bool operator<(I4 lhs, I4 rhs) { return lhs.data < rhs.data; }
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend bool operator<(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data < static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend bool operator<(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) < rhs.data;
}
friend bool operator>(I4 lhs, I4 rhs) { return lhs.data > rhs.data; }
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend bool operator>(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data > static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend bool operator>(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) > rhs.data;
}
friend bool operator<=(I4 lhs, I4 rhs) { return lhs.data <= rhs.data; }
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend bool operator<=(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data <= static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend bool operator<=(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) <= rhs.data;
}
friend bool operator>=(I4 lhs, I4 rhs) { return lhs.data >= rhs.data; }
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend bool operator>=(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data >= static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend bool operator>=(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) >= rhs.data;
}
friend std::ostream& operator<<(std::ostream& os, I4 v) { return os << +v; }
};
}
namespace std {
template <>
struct numeric_limits<shlo_ref::I4> : std::numeric_limits<int8_t> {
static constexpr shlo_ref::I4 min() noexcept { return shlo_ref::I4(-8); }
static constexpr shlo_ref::I4 lowest() noexcept { return min(); }
static constexpr shlo_ref::I4 max() noexcept { return shlo_ref::I4(7); }
};
}
#endif | #include "tensorflow/lite/experimental/shlo/i4.h"
#include <cstdint>
#include <gtest/gtest.h>
namespace shlo_ref {
namespace {
TEST(I4Test, ConstructFromArithmeticType) {
const I4 from_int8(static_cast<int8_t>(1));
EXPECT_EQ(from_int8.data, 1);
const I4 from_int16(static_cast<int16_t>(1));
EXPECT_EQ(from_int16.data, 1);
const I4 from_int32(static_cast<int32_t>(1));
EXPECT_EQ(from_int32.data, 1);
const I4 from_int64(static_cast<int64_t>(1));
EXPECT_EQ(from_int64.data, 1);
const I4 from_float(static_cast<float>(1));
EXPECT_EQ(from_float.data, 1);
const I4 from_double(static_cast<double>(1));
EXPECT_EQ(from_double.data, 1);
}
template <class T>
T ImplicitConversion(T v) {
return v;
}
TEST(I4Test, ConvertToArithmeticType) {
const I4 ref(-1);
EXPECT_EQ(ImplicitConversion<int8_t>(ref), -1);
EXPECT_EQ(ImplicitConversion<int16_t>(ref), -1);
EXPECT_EQ(ImplicitConversion<int32_t>(ref), -1);
EXPECT_EQ(ImplicitConversion<int64_t>(ref), -1);
EXPECT_EQ(ImplicitConversion<float>(ref), -1);
EXPECT_EQ(ImplicitConversion<double>(ref), -1);
}
TEST(I4Test, Arithmetic) {
for (int i = -8; i < 8; ++i) {
for (int j = -8; j < 8; ++j) {
EXPECT_EQ(I4(i) == I4(j), i == j);
EXPECT_EQ(I4(i) != I4(j), i != j);
EXPECT_EQ(I4(i) > I4(j), i > j);
EXPECT_EQ(I4(i) >= I4(j), i >= j);
EXPECT_EQ(I4(i) < I4(j), i < j);
EXPECT_EQ(I4(i) <= I4(j), i <= j);
}
}
I4 val(0);
EXPECT_EQ(++val, 1);
EXPECT_EQ(val++, 1);
EXPECT_EQ(val, 2);
EXPECT_EQ(val--, 2);
EXPECT_EQ(val, 1);
EXPECT_EQ(--val, 0);
EXPECT_EQ(val += I4(1), 1);
EXPECT_EQ(val, 1);
EXPECT_EQ(val *= I4(2), 2);
EXPECT_EQ(val, 2);
EXPECT_EQ(val /= I4(2), 1);
EXPECT_EQ(val, 1);
EXPECT_EQ(val -= I4(4), -3);
EXPECT_EQ(val, -3);
EXPECT_EQ(val %= I4(2), -1);
EXPECT_EQ(val, -1);
EXPECT_EQ(val = I4(7), 7);
EXPECT_EQ(val, 7);
EXPECT_EQ(val &= I4(2), 2);
EXPECT_EQ(val, 2);
EXPECT_EQ(val |= I4(1), 3);
EXPECT_EQ(val, 3);
EXPECT_EQ(val ^= I4(7), 4);
EXPECT_EQ(val, 4);
EXPECT_EQ(val >>= I4(1), 2);
EXPECT_EQ(val, 2);
EXPECT_EQ(val <<= I4(1), 4);
EXPECT_EQ(val, 4);
EXPECT_EQ(val >>= I4(1), 2);
EXPECT_EQ(val, 2);
EXPECT_EQ(val <<= I4(1), 4);
EXPECT_EQ(val, 4);
EXPECT_EQ(+val, 4);
EXPECT_EQ(-val, -4);
EXPECT_EQ(!val, false);
EXPECT_EQ(~val, ~4);
EXPECT_EQ(val && I4(2), true);
EXPECT_EQ(val && I4(0), false);
EXPECT_EQ(val || I4(0), true);
EXPECT_EQ(I4(0) || I4(0), false);
}
using IntegralTypeList = ::testing::Types<int8_t, int16_t, int32_t, int64_t>;
using ArithmeticTypeList =
::testing::Types<int8_t, int16_t, int32_t, int64_t, float, double>;
template <class T>
struct ArithmeticTypeI4Test : testing::Test {};
TYPED_TEST_SUITE(ArithmeticTypeI4Test, ArithmeticTypeList);
TYPED_TEST(ArithmeticTypeI4Test, Arithmetic) {
for (TypeParam i = -8; i < 8; ++i) {
for (TypeParam j = -8; j < 8; ++j) {
EXPECT_EQ(I4(i) == j, i == j);
EXPECT_EQ(i == I4(j), i == j);
EXPECT_EQ(I4(i) != j, i != j);
EXPECT_EQ(i != I4(j), i != j);
EXPECT_EQ(I4(i) > j, i > j);
EXPECT_EQ(i > I4(j), i > j);
EXPECT_EQ(I4(i) >= j, i >= j);
EXPECT_EQ(i >= I4(j), i >= j);
EXPECT_EQ(I4(i) < j, i < j);
EXPECT_EQ(i < I4(j), i < j);
EXPECT_EQ(I4(i) <= j, i <= j);
EXPECT_EQ(i <= I4(j), i <= j);
}
}
I4 val(0);
const TypeParam one = TypeParam(1);
const TypeParam two = TypeParam(2);
const TypeParam three = TypeParam(3);
const TypeParam four = TypeParam(4);
EXPECT_EQ(val += one, 1);
EXPECT_EQ(val, 1);
EXPECT_EQ(val *= two, 2);
EXPECT_EQ(val, 2);
EXPECT_EQ(val /= two, 1);
EXPECT_EQ(val, 1);
EXPECT_EQ(val -= four, -3);
EXPECT_EQ(val, -3);
const I4 i4_three(3);
EXPECT_EQ(i4_three + one, four);
EXPECT_EQ(i4_three - one, two);
EXPECT_EQ(i4_three * two, three * two);
EXPECT_EQ(i4_three / two, three / two);
}
template <class T>
struct IntegralTypeI4Test : testing::Test {};
TYPED_TEST_SUITE(IntegralTypeI4Test, IntegralTypeList);
TYPED_TEST(IntegralTypeI4Test, Arithmetic) {
const TypeParam minus_one = TypeParam(-1);
const TypeParam one = TypeParam(1);
const TypeParam two = TypeParam(2);
const TypeParam three = TypeParam(3);
const TypeParam four = TypeParam(4);
const TypeParam six = TypeParam(6);
const TypeParam seven = TypeParam(7);
const I4 i4_three(3);
EXPECT_EQ(i4_three % two, one);
EXPECT_EQ(i4_three & two, two);
EXPECT_EQ(i4_three | four, seven);
EXPECT_EQ(i4_three ^ four, seven);
EXPECT_EQ(i4_three << one, six);
EXPECT_EQ(i4_three >> one, one);
I4 val(-3);
EXPECT_EQ(val %= two, minus_one);
EXPECT_EQ(val, -1);
EXPECT_EQ(val = I4(7), seven);
EXPECT_EQ(val, 7);
EXPECT_EQ(val &= two, two);
EXPECT_EQ(val, 2);
EXPECT_EQ(val |= one, three);
EXPECT_EQ(val, 3);
EXPECT_EQ(val ^= seven, four);
EXPECT_EQ(val, 4);
EXPECT_EQ(val >>= one, two);
EXPECT_EQ(val, 2);
EXPECT_EQ(val <<= one, four);
EXPECT_EQ(val, 4);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/i4.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/i4_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a7fcf972-41f2-48c3-b0a6-e320192e62bb | cpp | tensorflow/tensorflow | dispatch | tensorflow/lite/experimental/shlo/legacy/src/dispatch.h | tensorflow/lite/experimental/shlo/dispatch_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_LEGACY_SRC_DISPATCH_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_LEGACY_SRC_DISPATCH_H_
namespace stablehlo {
#define DISPATCH_INT(name, element_type, ...) \
{ \
switch (element_type) { \
case ElementType::kSI8: \
return name<ElementType::kSI8, ElementType::kSI8>(__VA_ARGS__); \
case ElementType::kSI16: \
return name<ElementType::kSI16, ElementType::kSI16>(__VA_ARGS__); \
case ElementType::kSI32: \
return name<ElementType::kSI32, ElementType::kSI32>(__VA_ARGS__); \
default: \
return absl::InvalidArgumentError("Unsupported element type"); \
} \
}
#define DISPATCH_FLOAT(name, element_type, ...) \
{ \
switch (element_type) { \
case ElementType::kBF16: \
return name<ElementType::kBF16, ElementType::kBF16>(__VA_ARGS__); \
case ElementType::kF16: \
return name<ElementType::kF16, ElementType::kF16>(__VA_ARGS__); \
case ElementType::kF32: \
return name<ElementType::kF32, ElementType::kF32>(__VA_ARGS__); \
default: \
return absl::InvalidArgumentError("Unsupported element type"); \
} \
}
#define DISPATCH_INT_FLOAT(name, element_type, ...) \
{ \
switch (element_type) { \
case ElementType::kSI8: \
return name<ElementType::kSI8, ElementType::kSI8>(__VA_ARGS__); \
case ElementType::kSI16: \
return name<ElementType::kSI16, ElementType::kSI16>(__VA_ARGS__); \
case ElementType::kSI32: \
return name<ElementType::kSI32, ElementType::kSI32>(__VA_ARGS__); \
case ElementType::kBF16: \
return name<ElementType::kBF16, ElementType::kBF16>(__VA_ARGS__); \
case ElementType::kF16: \
return name<ElementType::kF16, ElementType::kF16>(__VA_ARGS__); \
case ElementType::kF32: \
return name<ElementType::kF32, ElementType::kF32>(__VA_ARGS__); \
default: \
return absl::InvalidArgumentError("Unsupported element type"); \
} \
}
#define DISPATCH_BOOL_INT_FLOAT(name, element_type, ...) \
{ \
switch (element_type) { \
case ElementType::kI1: \
return name<ElementType::kI1, ElementType::kI1>(__VA_ARGS__); \
case ElementType::kSI8: \
return name<ElementType::kSI8, ElementType::kSI8>(__VA_ARGS__); \
case ElementType::kSI16: \
return name<ElementType::kSI16, ElementType::kSI16>(__VA_ARGS__); \
case ElementType::kSI32: \
return name<ElementType::kSI32, ElementType::kSI32>(__VA_ARGS__); \
case ElementType::kBF16: \
return name<ElementType::kBF16, ElementType::kBF16>(__VA_ARGS__); \
case ElementType::kF16: \
return name<ElementType::kF16, ElementType::kF16>(__VA_ARGS__); \
case ElementType::kF32: \
return name<ElementType::kF32, ElementType::kF32>(__VA_ARGS__); \
default: \
return absl::InvalidArgumentError("Unsupported element type"); \
} \
}
#define DISPATCH_QUANTIZED(name, storage_type, expressed_type, ...) \
{ \
switch (storage_type) { \
case ElementType::kSI8: \
switch (expressed_type) { \
case ElementType::kBF16: \
return name<ElementType::kSI8, ElementType::kBF16>(__VA_ARGS__); \
case ElementType::kF16: \
return name<ElementType::kSI8, ElementType::kF16>(__VA_ARGS__); \
case ElementType::kF32: \
return name<ElementType::kSI8, ElementType::kF32>(__VA_ARGS__); \
default: \
return absl::InvalidArgumentError("Unsupported expressed type"); \
} \
break; \
case ElementType::kSI16: \
switch (expressed_type) { \
case ElementType::kBF16: \
return name<ElementType::kSI16, ElementType::kBF16>(__VA_ARGS__); \
case ElementType::kF16: \
return name<ElementType::kSI16, ElementType::kF16>(__VA_ARGS__); \
case ElementType::kF32: \
return name<ElementType::kSI16, ElementType::kF32>(__VA_ARGS__); \
default: \
return absl::InvalidArgumentError("Unsupported expressed type"); \
} \
break; \
case ElementType::kSI32: \
switch (expressed_type) { \
case ElementType::kBF16: \
return name<ElementType::kSI32, ElementType::kBF16>(__VA_ARGS__); \
case ElementType::kF16: \
return name<ElementType::kSI32, ElementType::kF16>(__VA_ARGS__); \
case ElementType::kF32: \
return name<ElementType::kSI32, ElementType::kF32>(__VA_ARGS__); \
default: \
return absl::InvalidArgumentError("Unsupported expressed type"); \
} \
break; \
default: \
return absl::InvalidArgumentError("Unsupported storage type"); \
} \
}
}
#endif | #include "tensorflow/lite/experimental/shlo/dispatch.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
namespace {
void VoidFunction() {}
TEST(DispatchTest, ReturnAbslOkIfVoidCompiles) {
auto f = []() -> absl::Status { RETURN_OK_STATUS_IF_VOID(VoidFunction()); };
EXPECT_OK(f());
}
TEST(DispatchTest, AbslOkStatusCompiles) {
auto f = []() -> absl::Status { RETURN_OK_STATUS_IF_VOID(absl::OkStatus()); };
EXPECT_OK(f());
}
TEST(DispatchTest, AbslErrorCompiles) {
auto f = []() -> absl::Status {
RETURN_OK_STATUS_IF_VOID(absl::UnknownError("error message"));
};
EXPECT_EQ(f(), absl::UnknownError("error message"));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/src/dispatch.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/dispatch_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9686cbe2-1b68-4171-9e71-4db9ddcde1c8 | cpp | tensorflow/tensorflow | matchers | tensorflow/lite/testing/matchers.h | tensorflow/lite/testing/matchers_test.cc | #ifndef TENSORFLOW_LITE_TESTING_MATCHERS_H_
#define TENSORFLOW_LITE_TESTING_MATCHERS_H_
#include <algorithm>
#include <cfloat>
#include <cmath>
#include <cstring>
#include <iomanip>
#include <iostream>
#include <limits>
#include <sstream>
#include <string>
#include <string_view>
#include <vector>
#include <gtest/gtest.h>
#include "absl/base/casts.h"
#include "absl/log/absl_check.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/kernel_util.h"
inline void PrintTo(const TfLiteTensor& tensor, std::ostream* os) {
*os << "\n" << ::tflite::GetTensorDebugString(&tensor);
}
namespace testing {
namespace tflite {
namespace internal {
enum class FloatComparison { kExact, kApproximate };
struct TensorComparison {
FloatComparison float_comp = FloatComparison::kExact;
bool custom_margin = false;
bool custom_fraction = false;
double margin = 0.0;
double fraction = 0.0;
};
class TensorMatcher {
public:
TensorMatcher(const TensorComparison& comp, const TfLiteTensor& expected)
: comp_(comp), expected_(expected) {}
bool MatchAndExplain(const TfLiteTensor& actual,
MatchResultListener* listener) const {
const bool match = Match(actual);
if (listener->IsInterested() && !match) *listener << DescribeDiff(actual);
return match;
}
void DescribeTo(std::ostream* os) const { Describe(os, "is "); }
void DescribeNegationTo(std::ostream* os) const { Describe(os, "is not "); }
void SetCompareApproximately() {
comp_.float_comp = FloatComparison::kApproximate;
}
void SetMargin(double margin) {
ABSL_QCHECK_GE(margin, 0.0)
<< "Using a negative margin for Approximately";
comp_.custom_margin = true;
comp_.margin = margin;
}
void SetFraction(double fraction) {
ABSL_QCHECK(0.0 <= fraction && fraction < 1.0)
<< "Fraction for Approximately must be >= 0.0 and < 1.0";
comp_.custom_fraction = true;
comp_.fraction = fraction;
}
private:
static std::string TensorIndex(int index, const TfLiteIntArray* dims) {
if (!dims->size) return "";
std::vector<int> index_nd(dims->size);
for (int i = dims->size - 1; i >= 0; --i) {
index_nd[i] = index % dims->data[i];
index /= dims->data[i];
}
return absl::StrCat("[", absl::StrJoin(index_nd, "]["), "]");
}
bool CompareFloat(float x, float y) const {
switch (comp_.float_comp) {
case FloatComparison::kExact:
return x == y;
case FloatComparison::kApproximate:
if (x == y) return true;
float fraction, margin;
if (comp_.custom_margin || comp_.custom_fraction) {
fraction = comp_.fraction;
margin = comp_.margin;
} else {
constexpr float kEpsilon = 32 * FLT_EPSILON;
if (std::fabs(x) <= kEpsilon && std::fabs(y) <= kEpsilon) return true;
fraction = kEpsilon;
margin = kEpsilon;
}
if (!std::isfinite(x) || !std::isfinite(y)) return false;
float relative_margin = fraction * std::max(std::fabs(x), std::fabs(y));
return std::fabs(x - y) <= std::max(margin, relative_margin);
}
return false;
}
void Describe(std::ostream* os, std::string_view prefix) const {
*os << prefix;
if (comp_.float_comp == FloatComparison::kApproximate) {
*os << "approximately ";
if (comp_.custom_margin || comp_.custom_fraction) {
*os << "(";
if (comp_.custom_margin) {
std::stringstream ss;
ss << std::setprecision(std::numeric_limits<double>::digits10 + 2)
<< comp_.margin;
*os << "absolute error of float values <= " << ss.str();
}
if (comp_.custom_margin && comp_.custom_fraction) {
*os << " or ";
}
if (comp_.custom_fraction) {
std::stringstream ss;
ss << std::setprecision(std::numeric_limits<double>::digits10 + 2)
<< comp_.fraction;
*os << "relative error of float values <= " << ss.str();
}
*os << ") ";
}
}
*os << "equal to ";
PrintTo(expected_, os);
}
std::string DescribeDiff(const TfLiteTensor& actual) const {
if (actual.type != expected_.type) {
return absl::StrCat(
"dtypes don't match: ", TfLiteTypeGetName(actual.type), " vs ",
TfLiteTypeGetName(expected_.type));
}
if (!actual.dims) return "actual.dims is null.";
if (!expected_.dims) return "expected.dims is null.";
if (actual.dims->size != expected_.dims->size) {
return absl::StrCat("dims don't match: ", actual.dims->size, "D vs ",
expected_.dims->size, "D");
}
if (int n = actual.dims->size;
std::memcmp(actual.dims->data, expected_.dims->data, n * sizeof(int))) {
return absl::StrCat(
"shapes don't match: ", ::tflite::GetShapeDebugString(actual.dims),
" vs ", ::tflite::GetShapeDebugString(expected_.dims));
}
if (!actual.data.raw) return "actual.data is null.";
if (!expected_.data.raw) return "expected.data is null.";
if (actual.bytes != expected_.bytes) {
return absl::StrCat("bytes don't match: ", actual.bytes, " vs ",
expected_.bytes);
}
std::string error = "\n";
TfLiteIntArray* dims = actual.dims;
int n = ::tflite::NumElements(dims);
constexpr int kMaxMismatches = 20;
for (int i = 0, j = 0; i < n; ++i) {
if (!CompareFloat(actual.data.f[i], expected_.data.f[i])) {
absl::StrAppend(&error, "data", TensorIndex(i, dims),
" don't match: ", actual.data.f[i], " vs ",
expected_.data.f[i], "\n");
++j;
}
if (j == kMaxMismatches) {
absl::StrAppend(&error, "Too many mismatches; stopping after ", j,
".\n");
break;
}
}
return error;
}
bool Match(const TfLiteTensor& actual) const {
if (actual.type != expected_.type) return false;
if (!actual.dims) return false;
if (!expected_.dims) return false;
if (actual.dims->size != expected_.dims->size) return false;
if (int n = actual.dims->size;
std::memcmp(actual.dims->data, expected_.dims->data, n * sizeof(int))) {
return false;
}
if (!actual.data.raw) return false;
if (!expected_.data.raw) return false;
if (actual.bytes != expected_.bytes) return false;
switch (comp_.float_comp) {
case FloatComparison::kExact:
if (int n = actual.bytes;
std::memcmp(actual.data.raw, expected_.data.raw, n)) {
return false;
}
break;
case FloatComparison::kApproximate:
for (int i = 0, n = ::tflite::NumElements(actual.dims); i < n; ++i) {
if (!CompareFloat(actual.data.f[i], expected_.data.f[i])) {
return false;
}
}
break;
};
return true;
}
TensorComparison comp_;
TfLiteTensor expected_;
};
}
struct SimpleConstTensor : public TfLiteTensor {
template <typename T>
SimpleConstTensor(TfLiteType dtype, const std::vector<int>& shape,
absl::Span<T> buf) {
type = dtype;
dims = TfLiteIntArrayCreate(shape.size());
std::memcpy(dims->data, shape.data(), shape.size() * sizeof(int));
data = {.data = buf.data()};
bytes = buf.size() * sizeof(T);
sparsity = nullptr;
}
~SimpleConstTensor() { TfLiteIntArrayFree(dims); }
};
inline void PrintTo(const SimpleConstTensor& tensor,
std::ostream* os) {
PrintTo(absl::implicit_cast<const TfLiteTensor&>(tensor), os);
}
inline PolymorphicMatcher<internal::TensorMatcher> EqualsTensor(
const TfLiteTensor& expected) {
internal::TensorComparison comp;
return MakePolymorphicMatcher(internal::TensorMatcher(comp, expected));
}
template <class InnerTensorMatcherT>
inline InnerTensorMatcherT Approximately(InnerTensorMatcherT m) {
m.mutable_impl().SetCompareApproximately();
return m;
}
template <class InnerTensorMatcherT>
inline InnerTensorMatcherT Approximately(InnerTensorMatcherT m, double margin) {
m.mutable_impl().SetCompareApproximately();
m.mutable_impl().SetMargin(margin);
return m;
}
template <class InnerTensorMatcherT>
inline InnerTensorMatcherT Approximately(InnerTensorMatcherT m, double margin,
double fraction) {
m.mutable_impl().SetCompareApproximately();
m.mutable_impl().SetMargin(margin);
m.mutable_impl().SetFraction(fraction);
return m;
}
}
}
#endif | #include "tensorflow/lite/testing/matchers.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace {
using ::testing::tflite::Approximately;
using ::testing::tflite::EqualsTensor;
using ::testing::tflite::SimpleConstTensor;
TEST(TensorMatcherTest, ExactlyEqualsSelf) {
float data[] = {2.71828f, 3.14159f};
SimpleConstTensor a(TfLiteType::kTfLiteFloat32, {1, 2}, absl::MakeSpan(data));
EXPECT_THAT(a, EqualsTensor(a));
}
TEST(TensorMatcherTest, ExactlyEqualsSame) {
float a_data[] = {2.71828f, 3.14159f};
SimpleConstTensor a(TfLiteType::kTfLiteFloat32, {1, 2},
absl::MakeSpan(a_data));
float b_data[] = {2.71828f, 3.14159f};
SimpleConstTensor b(TfLiteType::kTfLiteFloat32, {1, 2},
absl::MakeSpan(b_data));
EXPECT_THAT(a, EqualsTensor(b));
}
TEST(TensorMatcherTest, DoesNotExactlyEqualDifferentType) {
float data[] = {2.71828f, 3.14159f};
SimpleConstTensor a(TfLiteType::kTfLiteFloat32, {1, 2}, absl::MakeSpan(data));
SimpleConstTensor b(TfLiteType::kTfLiteInt32, {1, 2}, absl::MakeSpan(data));
EXPECT_THAT(a, Not(EqualsTensor(b)));
}
TEST(TensorMatcherTest, DoesNotExactlyEqualDifferentDims) {
float data[] = {2.71828f, 3.14159f};
SimpleConstTensor a(TfLiteType::kTfLiteFloat32, {1, 2}, absl::MakeSpan(data));
SimpleConstTensor b(TfLiteType::kTfLiteFloat32, {2, 1}, absl::MakeSpan(data));
EXPECT_THAT(a, Not(EqualsTensor(b)));
}
TEST(TensorMatcherTest, DoesNotExactlyEqualDifferentData) {
float a_data[] = {2.71828f, 3.14159f};
SimpleConstTensor a(TfLiteType::kTfLiteFloat32, {1, 2},
absl::MakeSpan(a_data));
float b_data[] = {3.14159f, 2.71828f};
SimpleConstTensor b(TfLiteType::kTfLiteFloat32, {1, 2},
absl::MakeSpan(b_data));
EXPECT_THAT(a, Not(EqualsTensor(b)));
}
TEST(TensorMatcherTest, ApproximatelyEqualsDefaultMargin) {
float a_data[] = {2.71828f, 3.14159f};
SimpleConstTensor a(TfLiteType::kTfLiteFloat32, {1, 2},
absl::MakeSpan(a_data));
float b_data[] = {2.718277f, 3.141593f};
SimpleConstTensor b(TfLiteType::kTfLiteFloat32, {1, 2},
absl::MakeSpan(b_data));
EXPECT_THAT(a, Approximately(EqualsTensor(b)));
}
TEST(TensorMatcherTest, ApproximatelyEqualsWithLooseMargin) {
float a_data[] = {2.71828f, 3.14159f};
SimpleConstTensor a(TfLiteType::kTfLiteFloat32, {1, 2},
absl::MakeSpan(a_data));
float b_data[] = {2.72f, 3.14f};
SimpleConstTensor b(TfLiteType::kTfLiteFloat32, {1, 2},
absl::MakeSpan(b_data));
EXPECT_THAT(a, Approximately(EqualsTensor(b), 0.01));
}
TEST(TensorMatcherTest, DoesNotApproximatelyEqualWithTightMargin) {
float a_data[] = {2.71828f, 3.14159f};
SimpleConstTensor a(TfLiteType::kTfLiteFloat32, {1, 2},
absl::MakeSpan(a_data));
float b_data[] = {2.72f, 3.14f};
SimpleConstTensor b(TfLiteType::kTfLiteFloat32, {1, 2},
absl::MakeSpan(b_data));
EXPECT_THAT(a, Not(Approximately(EqualsTensor(b), 0.001)));
}
TEST(TensorMatcherTest, ApproximatelyEqualsWithLooseFraction) {
float a_data[] = {2.71828f, 3.14159f};
SimpleConstTensor a(TfLiteType::kTfLiteFloat32, {1, 2},
absl::MakeSpan(a_data));
float b_data[] = {2.72f, 3.14f};
SimpleConstTensor b(TfLiteType::kTfLiteFloat32, {1, 2},
absl::MakeSpan(b_data));
EXPECT_THAT(
a, Approximately(EqualsTensor(b), 0.0, 0.999));
}
TEST(TensorMatcherTest, DoesNotApproximatelyEqualWithTightFraction) {
float a_data[] = {2.71828f, 3.14159f};
SimpleConstTensor a(TfLiteType::kTfLiteFloat32, {1, 2},
absl::MakeSpan(a_data));
float b_data[] = {2.72f, 3.14f};
SimpleConstTensor b(TfLiteType::kTfLiteFloat32, {1, 2},
absl::MakeSpan(b_data));
EXPECT_THAT(a, Not(Approximately(EqualsTensor(b), 0.0,
0.0001)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/matchers.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/matchers_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8dd52495-17d6-4d03-9b40-62c146124490 | cpp | tensorflow/tensorflow | binary_elementwise | tensorflow/lite/experimental/shlo/ops/binary_elementwise.h | tensorflow/lite/experimental/shlo/ops/binary_elementwise_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_BINARY_ELEMENTWISE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_BINARY_ELEMENTWISE_H_
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
namespace detail {
template <DataType storage_type, DataType expressed_type, typename F>
void DequantizeOpQuantizePerTensor(F&& func, const Tensor& lhs,
const Tensor& rhs, Tensor& output) {
using StorageT = StorageType<storage_type>;
using ExpressedT = StorageType<expressed_type>;
const DimensionSize num_elements = lhs.NumElements();
const StorageT lhs_zero_point =
lhs.quantized_per_tensor_element_type().ZeroPointAs<storage_type>();
const ExpressedT lhs_scale =
lhs.quantized_per_tensor_element_type().ScaleAs<expressed_type>();
const StorageT rhs_zero_point =
rhs.quantized_per_tensor_element_type().ZeroPointAs<storage_type>();
const ExpressedT rhs_scale =
rhs.quantized_per_tensor_element_type().ScaleAs<expressed_type>();
const StorageT output_zero_point =
output.quantized_per_tensor_element_type().ZeroPointAs<storage_type>();
const ExpressedT output_scale =
output.quantized_per_tensor_element_type().ScaleAs<expressed_type>();
const StorageT* lhs_data = lhs.GetDataAs<storage_type>();
const StorageT* rhs_data = rhs.GetDataAs<storage_type>();
StorageT* output_data = output.GetDataAs<storage_type>();
const ExpressedT inv_scale = static_cast<ExpressedT>(1) / output_scale;
for (DimensionSize i = 0; i < num_elements;
++i, ++lhs_data, ++rhs_data, ++output_data) {
const ExpressedT dequantized_lhs =
Dequantize(*lhs_data, lhs_zero_point, lhs_scale);
const ExpressedT dequantized_rhs =
Dequantize(*rhs_data, rhs_zero_point, rhs_scale);
const ExpressedT dequantized_res = func(dequantized_lhs, dequantized_rhs);
*output_data = Quantize<storage_type, expressed_type>(
dequantized_res, output_zero_point, inv_scale);
}
}
template <DataType data_type, class F>
void EvaluateNoQuantization(F&& func, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
using T = StorageType<data_type>;
const T* lhs_data = lhs.GetDataAs<data_type>();
const T* rhs_data = rhs.GetDataAs<data_type>();
T* output_data = output.GetDataAs<data_type>();
const DimensionSize num_elements = lhs.NumElements();
for (DimensionSize i = 0; i < num_elements;
++i, ++output_data, ++lhs_data, ++rhs_data) {
*output_data = static_cast<T>(func(*lhs_data, *rhs_data));
}
}
}
}
#endif | #include "tensorflow/lite/experimental/shlo/ops/binary_elementwise.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::ElementsAreArray;
namespace shlo_ref {
namespace {
struct TestOp {
template <typename T>
T operator()(const T& lhs, const T& rhs) {
return lhs + rhs;
}
};
template <class T>
struct EvaluateNoQuantizationTest : ::testing::Test {};
TYPED_TEST_SUITE(EvaluateNoQuantizationTest, ArithmeticTestTypes,
TestParamNames);
TYPED_TEST(EvaluateNoQuantizationTest, ArithmeticTensorsWithTestOp) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -5, 5);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -5, 5);
Vector<StorageT> output_data(shape.NumElements());
Tensor lhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = rhs_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(lhs_data, rhs_data, expected_data.begin(), TestOp());
detail::EvaluateNoQuantization<TypeParam::kStorage>(
TestOp(), lhs_tensor, rhs_tensor, output_tensor);
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
template <class T>
struct DequantizeOpQuantizePerTensor : ::testing::Test {};
TYPED_TEST_SUITE(DequantizeOpQuantizePerTensor, QuantizedTestTypes,
TestParamNames);
TYPED_TEST(DequantizeOpQuantizePerTensor, QuantizedPerTensorWithTestOp) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -5, 5);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -5, 5);
Vector<StorageT> output_data(shape.NumElements());
const ExpressedT lhs_scale = static_cast<ExpressedT>(1.3);
const StorageT lhs_zero_point = static_cast<StorageT>(4);
const ExpressedT rhs_scale = static_cast<ExpressedT>(1.2);
const StorageT rhs_zero_point = static_cast<StorageT>(5);
const ExpressedT output_scale = static_cast<ExpressedT>(1.5);
const StorageT output_zero_point = static_cast<StorageT>(3);
Tensor lhs_tensor{.type =
QuantizedPerTensorTensorType{
.shape = shape,
.element_type = QuantizedElementTypePerTensor(
TypeParam::kStorage, lhs_zero_point,
TypeParam::kExpressed, lhs_scale)},
.data = lhs_data.data()};
Tensor rhs_tensor{.type =
QuantizedPerTensorTensorType{
.shape = shape,
.element_type = QuantizedElementTypePerTensor(
TypeParam::kStorage, rhs_zero_point,
TypeParam::kExpressed, rhs_scale)},
.data = rhs_data.data()};
Tensor output_tensor{.type =
QuantizedPerTensorTensorType{
.shape = shape,
.element_type = QuantizedElementTypePerTensor(
TypeParam::kStorage, output_zero_point,
TypeParam::kExpressed, output_scale)},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
lhs_data, rhs_data, expected_data.begin(),
[lhs_zero_point, lhs_scale, rhs_zero_point, rhs_scale, output_zero_point,
output_scale](auto lhs, auto rhs) {
const ExpressedT dequantized_lhs =
Dequantize(lhs, lhs_zero_point, lhs_scale);
const ExpressedT dequantized_rhs =
Dequantize(rhs, rhs_zero_point, rhs_scale);
const ExpressedT dequantized_res =
TestOp()(dequantized_lhs, dequantized_rhs);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, output_zero_point,
static_cast<ExpressedT>(1.) / output_scale);
});
detail::DequantizeOpQuantizePerTensor<TypeParam::kStorage,
TypeParam::kExpressed>(
TestOp(), lhs_tensor, rhs_tensor, output_tensor);
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/binary_elementwise.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/binary_elementwise_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ed9a82cd-053d-4367-8614-919d3297f750 | cpp | tensorflow/tensorflow | unary_elementwise | tensorflow/lite/experimental/shlo/ops/unary_elementwise.h | tensorflow/lite/experimental/shlo/ops/unary_elementwise_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_UNARY_ELEMENTWISE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_UNARY_ELEMENTWISE_H_
#include <cstddef>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
namespace detail {
template <typename StorageT, typename ExpressedT, typename F>
void DequantizeOpQuantizePerAxisImpl(
F& op, const Shape& shape, const Axis quantization_dimension,
const StorageT quantization_min, const StorageT quantization_max,
const absl::Span<const StorageT> input_zero_points,
const absl::Span<const ExpressedT> input_scales,
const absl::Span<const StorageT> output_zero_points,
const absl::Span<const ExpressedT> output_scales, const Strides& strides,
const StorageT* input_data, StorageT* output_data, const size_t depth,
size_t quantization_index) {
const DimensionSize dim = shape.Dim(depth);
if (depth + 1 >= shape.Rank()) {
for (DimensionSize i = 0; i < dim; ++i) {
if (depth == quantization_dimension) {
quantization_index = i;
}
const ExpressedT dequantized_input =
Dequantize(*input_data, input_zero_points[quantization_index],
input_scales[quantization_index]);
const ExpressedT dequantized_res = op(dequantized_input);
*output_data = Quantize<StorageT, ExpressedT>(
dequantized_res, output_zero_points[quantization_index],
static_cast<ExpressedT>(1) / output_scales[quantization_index],
quantization_min, quantization_max);
output_data += strides[depth];
input_data += strides[depth];
}
} else {
for (DimensionSize i = 0; i < dim; ++i) {
if (depth == quantization_dimension) {
quantization_index = i;
}
DequantizeOpQuantizePerAxisImpl(
op, shape, quantization_dimension, quantization_min, quantization_max,
input_zero_points, input_scales, output_zero_points, output_scales,
strides, input_data, output_data, depth + 1, quantization_index);
output_data += strides[depth];
input_data += strides[depth];
}
}
}
template <DataType storage_type, DataType expressed_type, typename F>
void DequantizeOpQuantizePerAxis(F&& func, const Tensor& input,
Tensor& output) {
using StorageT = StorageType<storage_type>;
using ExpressedT = StorageType<expressed_type>;
const Shape& shape = input.shape();
const Axis quantization_dimension =
input.quantized_per_axis_element_type().QuantizedDimension();
const absl::Span<const StorageT> input_zero_points =
input.quantized_per_axis_element_type().ZeroPointsAs<storage_type>();
const absl::Span<const ExpressedT> input_scales =
input.quantized_per_axis_element_type().ScalesAs<expressed_type>();
const absl::Span<const StorageT> output_zero_points =
output.quantized_per_axis_element_type().ZeroPointsAs<storage_type>();
const absl::Span<const ExpressedT> output_scales =
output.quantized_per_axis_element_type().ScalesAs<expressed_type>();
const Strides& strides = ComputeStrides(shape);
const StorageT* input_data = input.GetDataAs<storage_type>();
StorageT* output_data = output.GetDataAs<storage_type>();
DequantizeOpQuantizePerAxisImpl(
func, shape, quantization_dimension, Storage<storage_type>::kMinValue,
Storage<storage_type>::kMaxValue, input_zero_points, input_scales,
output_zero_points, output_scales, strides, input_data, output_data,
0, 0);
}
template <DataType storage_type, DataType expressed_type, typename F>
void DequantizeOpQuantizePerTensor(F& func, const Tensor& input,
Tensor& output) {
using StorageT = StorageType<storage_type>;
using ExpressedT = StorageType<expressed_type>;
const DimensionSize num_elements = input.NumElements();
const StorageT input_zero_point =
input.quantized_per_tensor_element_type().ZeroPointAs<storage_type>();
const ExpressedT input_scale =
input.quantized_per_tensor_element_type().ScaleAs<expressed_type>();
const StorageT output_zero_point =
output.quantized_per_tensor_element_type().ZeroPointAs<storage_type>();
const ExpressedT output_scale =
output.quantized_per_tensor_element_type().ScaleAs<expressed_type>();
const StorageT* input_data = input.GetDataAs<storage_type>();
StorageT* output_data = output.GetDataAs<storage_type>();
const ExpressedT inv_scale = static_cast<ExpressedT>(1) / output_scale;
for (DimensionSize i = 0; i < num_elements;
++i, ++input_data, ++output_data) {
const ExpressedT dequantized_input =
Dequantize(*input_data, input_zero_point, input_scale);
const ExpressedT dequantized_res = func(dequantized_input);
*output_data = Quantize<storage_type, expressed_type>(
dequantized_res, output_zero_point, inv_scale);
}
}
template <DataType data_type, class F>
void EvaluateNoQuantization(F&& func, const Tensor& input, Tensor& output) {
absl::c_transform(input.Flat<data_type>(), output.GetDataAs<data_type>(),
static_cast<F&&>(func));
}
}
template <class F>
struct UnaryElementwiseOp {
struct Attributes {};
F func;
};
template <class F>
UnaryElementwiseOp<F> Create(typename UnaryElementwiseOp<F>::Attributes,
const F& func) {
return UnaryElementwiseOp<F>{func};
}
template <class F>
UnaryElementwiseOp<F> Create(typename UnaryElementwiseOp<F>::Attributes,
F&& func) {
return UnaryElementwiseOp<F>{static_cast<F&&>(func)};
}
template <class F>
absl::Status Prepare(UnaryElementwiseOp<F>& op, const Tensor& input,
Tensor& output) {
return Propagate(input.shape(), output.shape());
}
template <class F>
absl::Status Evaluate(UnaryElementwiseOp<F>& op, const Tensor& input,
Tensor& output) {
if (input.IsPerAxisQuantized()) {
DISPATCH_QUANTIZED(detail::DequantizeOpQuantizePerAxis,
input.quantized_per_axis_element_type().StorageType(),
input.quantized_per_axis_element_type().ExpressedType(),
op.func, input, output);
} else if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), op.func,
input, output)
} else {
DISPATCH_BOOL_INT_FLOAT(detail::EvaluateNoQuantization,
input.tensor_element_type(), op.func, input,
output);
}
return absl::OkStatus();
}
}
#endif | #include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include <cstddef>
#include <cstdint>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::ElementsAreArray;
namespace shlo_ref {
namespace {
struct Abs {
template <class T>
T operator()(const T& val) {
return val < static_cast<T>(0) ? static_cast<T>(-val) : val;
}
};
template <DataType storage_type, DataType expressed_type = DataType::kF32>
struct TestParam {
static constexpr DataType kStorage = storage_type;
static constexpr DataType kExpressed = expressed_type;
using StorageT = StorageType<storage_type>;
using ExpressedT = StorageType<expressed_type>;
};
template <class T>
struct UnaryElementWiseTest : ::testing::Test {};
TYPED_TEST_SUITE(UnaryElementWiseTest, ArithmeticTestTypes);
TYPED_TEST(UnaryElementWiseTest, NonQuantizedWithAbs) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), Abs());
auto op = Create(UnaryElementwiseOp<Abs>::Attributes{}, Abs());
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
template <class T>
struct QuantizedUnaryElementWiseTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedUnaryElementWiseTest, QuantizedTestTypes);
TYPED_TEST(QuantizedUnaryElementWiseTest, QuantizedPerTensorWithAbs) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(5);
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor input_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = input_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
input_data, expected_data.begin(), [zero_point, scale](auto v) {
const ExpressedT dequantized_input = Dequantize(v, zero_point, scale);
const ExpressedT dequantized_res = Abs()(dequantized_input);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(UnaryElementwiseOp<Abs>::Attributes{}, Abs());
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
TYPED_TEST(QuantizedUnaryElementWiseTest, QuantizedPerAxisWithAbs) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({4, 3, 2});
const int quantized_dimension = 2;
const size_t rank = shape.Rank();
const Axis quantized_dimension_size = shape.Dim(quantized_dimension);
const size_t quantization_stride = [&] {
size_t res = 1;
for (int64_t i = rank - 1; i > quantized_dimension; --i) {
res *= shape.Dim(i);
}
return res;
}();
Vector<StorageT> input_data = IotaBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
Vector<StorageT> zero_points_data = RandomBuffer<TypeParam::kStorage>(
Shape({shape.Dim(2)}), static_cast<StorageT>(-5),
static_cast<StorageT>(5));
Vector<ExpressedT> scales_data = RandomBuffer<TypeParam::kExpressed>(
Shape({shape.Dim(2)}), static_cast<ExpressedT>(1),
static_cast<ExpressedT>(3));
const QuantizedElementTypePerAxis tensor_type = QuantizedElementTypePerAxis(
TypeParam::kStorage, zero_points_data, TypeParam::kExpressed, scales_data,
quantized_dimension);
Tensor input_tensor{
.type = QuantizedPerAxisTensorType{.shape = shape,
.element_type = tensor_type},
.data = input_data.data()};
Tensor output_tensor{
.type = QuantizedPerAxisTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
input_data, expected_data.begin(),
[&, element_index = 0ull, quantization_index = 0ull](auto v) mutable {
const StorageT zero_point = zero_points_data[quantization_index];
const ExpressedT scale = scales_data[quantization_index];
if (++element_index >= quantization_stride) {
element_index = 0;
if (++quantization_index >= quantized_dimension_size) {
quantization_index = 0;
}
}
const ExpressedT dequantized_input = Dequantize(v, zero_point, scale);
const ExpressedT dequantized_res = Abs()(dequantized_input);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, ExpressedT(1) / scale);
});
auto op = Create(UnaryElementwiseOp<Abs>::Attributes{}, Abs());
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/unary_elementwise.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/unary_elementwise_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
34deddd6-48d3-4706-9c67-4fd08209682b | cpp | tensorflow/tensorflow | reduced_precision_support | tensorflow/lite/tools/optimize/reduced_precision_support.h | tensorflow/lite/tools/optimize/reduced_precision_support_test.cc | #ifndef TENSORFLOW_LITE_TOOLS_OPTIMIZE_REDUCED_PRECISION_SUPPORT_H_
#define TENSORFLOW_LITE_TOOLS_OPTIMIZE_REDUCED_PRECISION_SUPPORT_H_
#include <string>
#include "tensorflow/compiler/mlir/lite/tools/optimize/reduced_precision_metadata.h"
namespace tflite {
namespace optimize {
inline bool ReadInferenceType(const std::string& metadata, size_t* idx,
ReducedPrecisionSupport* mask) {
if (metadata.substr(*idx, 4) == kTfLiteFloat16String) {
*idx += 4;
*mask = *mask | ReducedPrecisionSupport::Float16Inference;
return true;
} else if (metadata.substr(*idx, 4) == kTfLiteBfloat16String) {
*idx += 4;
*mask = *mask | ReducedPrecisionSupport::Bfloat16Inference;
return true;
}
return false;
}
inline bool ReadAccumulationType(const std::string& metadata, size_t* idx,
ReducedPrecisionSupport* mask) {
if (metadata.substr(*idx, 4) == kTfLiteFloat16String) {
*idx += 4;
*mask = *mask | ReducedPrecisionSupport::Float16Accumulation;
return true;
} else if (metadata.substr(*idx, 4) == kTfLiteFloat32String) {
*idx += 4;
*mask = *mask | ReducedPrecisionSupport::Float32Accumulation;
return true;
}
return false;
}
inline bool SetMaskFromReducedPrecisionMetadata(const std::string& metadata,
ReducedPrecisionSupport* mask) {
bool check = true;
size_t idx = 0;
ReducedPrecisionSupport rsp = ReducedPrecisionSupport::None;
do {
check = ReadInferenceType(metadata, &idx, &rsp);
} while (check);
if (idx == 0) {
return false;
}
if (metadata.substr(idx, 3) != kTfLiteAccumulationString) {
return false;
}
idx += std::string(kTfLiteAccumulationString).size();
if (!ReadAccumulationType(metadata, &idx, &rsp)) {
return false;
}
if (idx != metadata.length()) {
return false;
}
*mask = rsp;
return true;
}
}
}
#endif | #include "tensorflow/lite/tools/optimize/reduced_precision_support.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/schema/schema_utils.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace optimize {
namespace utils {
namespace {
class ReducedPrecisionSupportTest : public testing::Test {
protected:
tflite::TestErrorReporter error_reporter_;
};
TEST_F(ReducedPrecisionSupportTest, BitwiseOps) {
ReducedPrecisionSupport mask0 = ReducedPrecisionSupport::None;
ReducedPrecisionSupport mask1 = ReducedPrecisionSupport::Float16Inference;
ReducedPrecisionSupport bf16 = ReducedPrecisionSupport::Bfloat16Inference;
ReducedPrecisionSupport fp16 = ReducedPrecisionSupport::Float16Inference;
EXPECT_EQ(mask0, mask0 & mask1);
EXPECT_EQ(mask1, mask0 | mask1);
mask0 |= fp16;
EXPECT_EQ(true, SupportsFP16Inference(mask0));
mask0 |= bf16;
EXPECT_EQ(true, SupportsBfloat16Inference(mask0));
ReducedPrecisionSupport mask2 = ReducedPrecisionSupport::Float16Accumulation;
mask2 &= fp16;
EXPECT_EQ(mask2, ReducedPrecisionSupport::None);
}
TEST_F(ReducedPrecisionSupportTest, SupportTests) {
ReducedPrecisionSupport bf16 = ReducedPrecisionSupport::Bfloat16Inference;
ReducedPrecisionSupport fp16 = ReducedPrecisionSupport::Float16Inference;
ReducedPrecisionSupport mask = bf16 | fp16;
EXPECT_EQ(true, SupportsFP16Inference(mask));
EXPECT_EQ(true, SupportsBfloat16Inference(mask));
EXPECT_EQ(false, SupportsFP16Accumulation(mask));
EXPECT_EQ(false, SupportsFP32Accumulation(mask));
EXPECT_EQ(true, SupportsReducedPrecisionInference(mask));
EXPECT_EQ(true, SupportsReducedPrecisionInference(mask));
EXPECT_EQ(false, SupportsEitherFP16OrFP32Accumulation(mask));
mask = mask | ReducedPrecisionSupport::Float16Accumulation;
EXPECT_EQ(true, SupportsFP16Accumulation(mask));
EXPECT_EQ(false, SupportsFP32Accumulation(mask));
EXPECT_EQ(true, SupportsEitherFP16OrFP32Accumulation(mask));
}
TEST_F(ReducedPrecisionSupportTest, MetadataStrings) {
ReducedPrecisionSupport bf16 = ReducedPrecisionSupport::Bfloat16Inference;
ReducedPrecisionSupport fp16 = ReducedPrecisionSupport::Float16Inference;
ReducedPrecisionSupport accfp32 =
ReducedPrecisionSupport::Float32Accumulation;
ReducedPrecisionSupport accfp16 =
ReducedPrecisionSupport::Float16Accumulation;
ReducedPrecisionSupport maskA = bf16 | fp16 | accfp32;
std::pair<std::string, std::string> ans =
MetadataForReducedPrecisionSupport(maskA);
EXPECT_EQ("fp16bf16accfp32", ans.second);
EXPECT_EQ("reduced_precision_support", ans.first);
ReducedPrecisionSupport maskB = fp16 | accfp16;
EXPECT_EQ("fp16accfp16", MetadataForReducedPrecisionSupport(maskB).second);
}
TEST_F(ReducedPrecisionSupportTest, ReadStringsIntoMasks) {
ReducedPrecisionSupport fp16 = ReducedPrecisionSupport::Float16Inference;
ReducedPrecisionSupport accfp16 =
ReducedPrecisionSupport::Float16Accumulation;
ReducedPrecisionSupport maskfp16 = fp16;
ReducedPrecisionSupport maskfp16accfp16 = fp16 | accfp16;
ReducedPrecisionSupport mask = ReducedPrecisionSupport::None;
size_t idx = 0;
std::string metadata = "fp16accfp16";
EXPECT_EQ(true, ReadInferenceType(metadata, &idx, &mask));
EXPECT_EQ(maskfp16, mask);
EXPECT_EQ(idx, 4);
idx = 7;
EXPECT_EQ(true, ReadAccumulationType(metadata, &idx, &mask));
EXPECT_EQ(maskfp16accfp16, mask);
EXPECT_EQ(idx, 11);
}
TEST_F(ReducedPrecisionSupportTest, SetMasks) {
ReducedPrecisionSupport fp16 = ReducedPrecisionSupport::Float16Inference;
ReducedPrecisionSupport bf16 = ReducedPrecisionSupport::Bfloat16Inference;
ReducedPrecisionSupport accfp16 =
ReducedPrecisionSupport::Float16Accumulation;
ReducedPrecisionSupport accfp32 =
ReducedPrecisionSupport::Float32Accumulation;
ReducedPrecisionSupport mask = ReducedPrecisionSupport::None;
EXPECT_EQ(true, SetMaskFromReducedPrecisionMetadata("bf16accfp32", &mask));
EXPECT_EQ(mask, bf16 | accfp32);
mask = ReducedPrecisionSupport::None;
EXPECT_EQ(true, SetMaskFromReducedPrecisionMetadata("fp16accfp16", &mask));
EXPECT_EQ(mask, fp16 | accfp16);
mask = ReducedPrecisionSupport::None;
EXPECT_EQ(true,
SetMaskFromReducedPrecisionMetadata("fp16bf16accfp32", &mask));
EXPECT_EQ(mask, fp16 | bf16 | accfp32);
mask = ReducedPrecisionSupport::None;
EXPECT_EQ(false, SetMaskFromReducedPrecisionMetadata("accfp32", &mask));
EXPECT_EQ(mask, ReducedPrecisionSupport::None);
EXPECT_EQ(false, SetMaskFromReducedPrecisionMetadata("qwerwer", &mask));
EXPECT_EQ(mask, ReducedPrecisionSupport::None);
EXPECT_EQ(false,
SetMaskFromReducedPrecisionMetadata("fp16accfp32fp16", &mask));
EXPECT_EQ(mask, ReducedPrecisionSupport::None);
EXPECT_EQ(false, SetMaskFromReducedPrecisionMetadata("fp16accbf16", &mask));
EXPECT_EQ(mask, ReducedPrecisionSupport::None);
}
}
}
}
}
int main(int argc, char** argv) {
::tensorflow::port::InitMain(argv[0], &argc, &argv);
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/optimize/reduced_precision_support.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/optimize/reduced_precision_support_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8244d340-5db9-4480-9ac7-8d78b3bb8bef | cpp | tensorflow/tensorflow | op_resolver_internal | tensorflow/lite/core/api/op_resolver_internal.h | tensorflow/lite/core/api/op_resolver_internal_test.cc | #ifndef TENSORFLOW_LITE_CORE_API_OP_RESOLVER_INTERNAL_H_
#define TENSORFLOW_LITE_CORE_API_OP_RESOLVER_INTERNAL_H_
#include <memory>
#include "tensorflow/lite/core/api/op_resolver.h"
namespace tflite {
class OpResolverInternal {
public:
OpResolverInternal() = delete;
static bool MayContainUserDefinedOps(const OpResolver& op_resolver) {
return op_resolver.MayContainUserDefinedOps();
}
static std::shared_ptr<::tflite::internal::OperatorsCache> GetSharedCache(
const ::tflite::OpResolver& op_resolver) {
return op_resolver.registration_externals_cache_;
}
};
}
#endif | #include "tensorflow/lite/core/api/op_resolver_internal.h"
#include <gtest/gtest.h>
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/kernels/builtin_op_kernels.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
using ops::builtin::BuiltinOpResolver;
using ops::builtin::BuiltinOpResolverWithoutDefaultDelegates;
namespace {
TEST(OpResolverInternal, ObjectSlicing) {
BuiltinOpResolver op_resolver1;
EXPECT_FALSE(op_resolver1.GetDelegateCreators().empty());
BuiltinOpResolverWithoutDefaultDelegates op_resolver2;
EXPECT_TRUE(op_resolver2.GetDelegateCreators().empty());
BuiltinOpResolver op_resolver3(op_resolver2);
EXPECT_TRUE(op_resolver3.GetDelegateCreators().empty());
MutableOpResolver op_resolver4(op_resolver1);
EXPECT_FALSE(op_resolver4.GetDelegateCreators().empty());
MutableOpResolver op_resolver5(op_resolver2);
EXPECT_TRUE(op_resolver5.GetDelegateCreators().empty());
}
TEST(OpResolverInternal, BuiltinOpResolverContainsOnlyPredefinedOps) {
BuiltinOpResolver builtin_op_resolver;
EXPECT_EQ(OpResolverInternal::MayContainUserDefinedOps(builtin_op_resolver),
false);
}
TEST(OpResolverInternal, EmptyMutableOpResolverContainsOnlyPredefinedOps) {
MutableOpResolver empty_mutable_op_resolver;
EXPECT_EQ(
OpResolverInternal::MayContainUserDefinedOps(empty_mutable_op_resolver),
false);
}
TEST(OpResolverInternal,
MutableOpResolverAddBuiltinNullptrContainsOnlyPredefinedOps) {
MutableOpResolver mutable_op_resolver;
mutable_op_resolver.AddBuiltin(BuiltinOperator_ADD, nullptr, 1);
EXPECT_EQ(OpResolverInternal::MayContainUserDefinedOps(mutable_op_resolver),
false);
}
TEST(OpResolverInternal,
MutableOpResolverRedefineBuiltinDoesNotContainOnlyPredefinedOps) {
MutableOpResolver mutable_op_resolver;
mutable_op_resolver.AddBuiltin(BuiltinOperator_ADD,
tflite::ops::builtin::Register_MUL(), 1);
EXPECT_EQ(OpResolverInternal::MayContainUserDefinedOps(mutable_op_resolver),
true);
}
TEST(OpResolverInternal,
MutableOpResolverAddCustomDoesNotContainOnlyPredefinedOps) {
MutableOpResolver mutable_op_resolver;
mutable_op_resolver.AddCustom("my_custom_op",
tflite::ops::builtin::Register_ADD(), 1);
EXPECT_EQ(OpResolverInternal::MayContainUserDefinedOps(mutable_op_resolver),
true);
}
class ChainableOpResolver : public MutableOpResolver {
public:
using MutableOpResolver::ChainOpResolver;
};
TEST(OpResolverInternal, ChainedBuiltinOpResolverContainOnlyPredefinedOps) {
BuiltinOpResolver builtin_op_resolver;
ChainableOpResolver chainable_op_resolver;
chainable_op_resolver.ChainOpResolver(&builtin_op_resolver);
EXPECT_EQ(OpResolverInternal::MayContainUserDefinedOps(chainable_op_resolver),
false);
}
TEST(OpResolverInternal,
ChainedCustomOpResolverDoesNotContainOnlyPredefinedOps) {
MutableOpResolver mutable_op_resolver;
mutable_op_resolver.AddCustom("my_custom_op",
tflite::ops::builtin::Register_ADD(), 1);
ChainableOpResolver chainable_op_resolver;
chainable_op_resolver.ChainOpResolver(&mutable_op_resolver);
EXPECT_EQ(OpResolverInternal::MayContainUserDefinedOps(chainable_op_resolver),
true);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/api/op_resolver_internal.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/api/op_resolver_internal_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
62baec0c-be0a-40db-ac9e-428679188958 | cpp | tensorflow/tensorflow | test_runner | tensorflow/lite/testing/test_runner.h | tensorflow/lite/testing/test_runner_test.cc | #ifndef TENSORFLOW_LITE_TESTING_TEST_RUNNER_H_
#define TENSORFLOW_LITE_TESTING_TEST_RUNNER_H_
#include <iostream>
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/lite/string_type.h"
namespace tflite {
namespace testing {
class TestRunner {
public:
TestRunner() {}
virtual ~TestRunner() {}
virtual void LoadModel(const string& bin_file_path) = 0;
virtual void LoadModel(const string& bin_file_path,
const string& signature) = 0;
virtual void ReshapeTensor(const string& name, const string& csv_values) = 0;
virtual void ResetTensor(const std::string& name) = 0;
virtual string ReadOutput(const string& name) = 0;
virtual void Invoke(const std::vector<std::pair<string, string>>& inputs) = 0;
virtual bool CheckResults(
const std::vector<std::pair<string, string>>& expected_outputs,
const std::vector<std::pair<string, string>>& expected_output_shapes) = 0;
virtual std::vector<string> GetOutputNames() = 0;
virtual void AllocateTensors() = 0;
void SetModelBaseDir(const string& path) {
model_base_dir_ = path;
if (path[path.length() - 1] != '/') {
model_base_dir_ += "/";
}
}
string GetFullPath(const string& path) { return model_base_dir_ + path; }
void SetInvocationId(const string& id) { invocation_id_ = id; }
const string& GetInvocationId() const { return invocation_id_; }
void Invalidate(const string& error_message) {
std::cerr << error_message << std::endl;
error_message_ = error_message;
}
bool IsValid() const { return error_message_.empty(); }
const string& GetErrorMessage() const { return error_message_; }
void SetOverallSuccess(bool value) { overall_success_ = value; }
bool GetOverallSuccess() const { return overall_success_; }
protected:
template <typename T>
bool CheckSizes(size_t tensor_bytes, size_t num_values) {
size_t num_tensor_elements = tensor_bytes / sizeof(T);
if (num_tensor_elements != num_values) {
Invalidate("Expected '" + std::to_string(num_tensor_elements) +
"' elements for a tensor, but only got '" +
std::to_string(num_values) + "'");
return false;
}
return true;
}
private:
string model_base_dir_;
string invocation_id_;
bool overall_success_ = true;
string error_message_;
};
}
}
#endif | #include "tensorflow/lite/testing/test_runner.h"
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/string_type.h"
namespace tflite {
namespace testing {
namespace {
class ConcreteTestRunner : public TestRunner {
public:
void LoadModel(const string& bin_file_path) override {}
void AllocateTensors() override {}
bool CheckFloatSizes(size_t bytes, size_t values) {
return CheckSizes<float>(bytes, values);
}
void LoadModel(const string& bin_file_path,
const string& signature) override {}
void ReshapeTensor(const string& name, const string& csv_values) override {}
void ResetTensor(const std::string& name) override {}
string ReadOutput(const string& name) override { return ""; }
void Invoke(const std::vector<std::pair<string, string>>& inputs) override {}
bool CheckResults(
const std::vector<std::pair<string, string>>& expected_outputs,
const std::vector<std::pair<string, string>>& expected_output_shapes)
override {
return true;
}
std::vector<string> GetOutputNames() override { return {}; }
private:
std::vector<int> ids_;
};
TEST(TestRunner, ModelPath) {
ConcreteTestRunner runner;
EXPECT_EQ(runner.GetFullPath("test.bin"), "test.bin");
runner.SetModelBaseDir("/tmp");
EXPECT_EQ(runner.GetFullPath("test.bin"), "/tmp/test.bin");
}
TEST(TestRunner, InvocationId) {
ConcreteTestRunner runner;
EXPECT_EQ(runner.GetInvocationId(), "");
runner.SetInvocationId("X");
EXPECT_EQ(runner.GetInvocationId(), "X");
}
TEST(TestRunner, Invalidation) {
ConcreteTestRunner runner;
EXPECT_TRUE(runner.IsValid());
EXPECT_EQ(runner.GetErrorMessage(), "");
runner.Invalidate("Some Error");
EXPECT_FALSE(runner.IsValid());
EXPECT_EQ(runner.GetErrorMessage(), "Some Error");
}
TEST(TestRunner, OverallSuccess) {
ConcreteTestRunner runner;
EXPECT_TRUE(runner.GetOverallSuccess());
runner.SetOverallSuccess(false);
EXPECT_FALSE(runner.GetOverallSuccess());
}
TEST(TestRunner, CheckSizes) {
ConcreteTestRunner runner;
EXPECT_TRUE(runner.CheckFloatSizes(16, 4));
EXPECT_FALSE(runner.CheckFloatSizes(16, 2));
EXPECT_EQ(runner.GetErrorMessage(),
"Expected '4' elements for a tensor, but only got '2'");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/test_runner.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/test_runner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ae8ab56f-beae-43b8-9489-c0242595dbcc | cpp | tensorflow/tensorflow | join | tensorflow/lite/testing/join.h | tensorflow/lite/testing/join_test.cc | #ifndef TENSORFLOW_LITE_TESTING_JOIN_H_
#define TENSORFLOW_LITE_TESTING_JOIN_H_
#include <cstdint>
#include <cstdlib>
#include <iomanip>
#include <sstream>
#include "tensorflow/lite/string_type.h"
namespace tflite {
namespace testing {
template <typename T>
string JoinDefault(T* data, size_t len, const string& delimiter) {
if (len == 0 || data == nullptr) {
return "";
}
std::stringstream result;
result << data[0];
for (int i = 1; i < len; i++) {
result << delimiter << data[i];
}
return result.str();
}
template <typename T>
string Join(T* data, size_t len, const string& delimiter) {
if (len == 0 || data == nullptr) {
return "";
}
std::stringstream result;
result << std::setprecision(9) << data[0];
for (int i = 1; i < len; i++) {
result << std::setprecision(9) << delimiter << data[i];
}
return result.str();
}
template <>
inline string Join<uint8_t>(uint8_t* data, size_t len,
const string& delimiter) {
if (len == 0 || data == nullptr) {
return "";
}
std::stringstream result;
result << static_cast<int>(data[0]);
for (int i = 1; i < len; i++) {
result << delimiter << static_cast<int>(data[i]);
}
return result.str();
}
template <>
inline string Join<int8_t>(int8_t* data, size_t len, const string& delimiter) {
if (len == 0 || data == nullptr) {
return "";
}
std::stringstream result;
result << static_cast<int>(data[0]);
for (int i = 1; i < len; i++) {
result << delimiter << static_cast<int>(data[i]);
}
return result.str();
}
}
}
#endif | #include "tensorflow/lite/testing/join.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
namespace testing {
namespace {
TEST(JoinTest, JoinInt) {
std::vector<int> data = {1, 2, 3};
EXPECT_EQ(Join(data.data(), data.size(), ","), "1,2,3");
}
TEST(JoinDefaultTest, JoinFloat) {
float data[] = {1.0, -3, 2.3, 1e-5};
EXPECT_EQ(JoinDefault(data, 4, " "), "1 -3 2.3 1e-05");
}
TEST(JoinTest, JoinFloat) {
float data[] = {1.0, -3, 2.3, 1e-5};
EXPECT_EQ(Join(data, 4, " "), "1 -3 2.29999995 9.99999975e-06");
}
TEST(JoinTest, JoinNullData) { EXPECT_THAT(Join<int>(nullptr, 3, ","), ""); }
TEST(JoinTest, JoinZeroData) {
std::vector<int> data;
EXPECT_THAT(Join(data.data(), 0, ","), "");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/join.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/join_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7a29b9d1-1578-444f-adeb-b1e9b5c2583e | cpp | tensorflow/tensorflow | flexbuffers_util | tensorflow/lite/delegates/xnnpack/flexbuffers_util.h | tensorflow/lite/delegates/xnnpack/flexbuffers_util_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_XNNPACK_FLEXBUFFERS_UTIL_H_
#define TENSORFLOW_LITE_DELEGATES_XNNPACK_FLEXBUFFERS_UTIL_H_
#include "flatbuffers/base.h"
#include "flatbuffers/flexbuffers.h"
namespace tflite::xnnpack {
struct FloatPointer {
const float* ptr = nullptr;
};
}
namespace flexbuffers {
template <>
tflite::xnnpack::FloatPointer inline flexbuffers::Reference::As<
tflite::xnnpack::FloatPointer>() const {
#if !FLATBUFFERS_LITTLEENDIAN
return nullptr;
#else
return {IsFloat() ? reinterpret_cast<const float*>(data_) : nullptr};
#endif
}
}
#endif | #include "tensorflow/lite/delegates/xnnpack/flexbuffers_util.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flexbuffers.h"
namespace tflite::xnnpack {
namespace {
using ::testing::Pointee;
TEST(FlexbuffersUtilTest, FloatPointer) {
constexpr float kAValue = 3.14;
constexpr float kBValue = 56;
flexbuffers::Builder fbb;
fbb.Map([&] {
fbb.Float("a", kAValue);
fbb.Float("b", kBValue);
});
fbb.Finish();
const flexbuffers::Map map = flexbuffers::GetRoot(fbb.GetBuffer()).AsMap();
const flexbuffers::Reference a = map["a"];
EXPECT_TRUE(a.IsFloat());
EXPECT_THAT(a.As<FloatPointer>().ptr, Pointee(kAValue));
const flexbuffers::Reference b = map["b"];
EXPECT_TRUE(b.IsFloat());
EXPECT_THAT(b.As<FloatPointer>().ptr, Pointee(kBValue));
const flexbuffers::Reference c = map["c"];
ASSERT_TRUE(c.IsNull());
EXPECT_EQ(c.As<FloatPointer>().ptr, nullptr);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/flexbuffers_util.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/flexbuffers_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9f5bec3b-5ad6-4383-b363-31a826e9cb9f | cpp | tensorflow/tensorflow | tensorhandle | tensorflow/cc/experimental/base/public/tensorhandle.h | tensorflow/cc/experimental/base/tests/tensorhandle_test.cc | #ifndef TENSORFLOW_CC_EXPERIMENTAL_BASE_PUBLIC_TENSORHANDLE_H_
#define TENSORFLOW_CC_EXPERIMENTAL_BASE_PUBLIC_TENSORHANDLE_H_
#include <memory>
#include <vector>
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/c/eager/c_api_experimental.h"
#include "tensorflow/cc/experimental/base/public/runtime.h"
#include "tensorflow/cc/experimental/base/public/status.h"
#include "tensorflow/cc/experimental/base/public/tensor.h"
namespace tensorflow {
namespace experimental {
namespace cc {
class TensorHandle {
public:
Tensor Resolve(Status* status);
static TensorHandle FromTensor(const Tensor& tensor, const Runtime& runtime,
Status* status);
TensorHandle(TensorHandle&&) = default;
TensorHandle& operator=(TensorHandle&&) = default;
private:
explicit TensorHandle(TFE_TensorHandle* handle) : handle_(handle) {}
TensorHandle(const TensorHandle&) = delete;
TensorHandle& operator=(const TensorHandle&) = delete;
TFE_TensorHandle* GetTFETensorHandle() const { return handle_.get(); }
void Reset(TFE_TensorHandle* handle) { handle_.reset(handle); }
struct TFETensorHandleDeleter {
void operator()(TFE_TensorHandle* p) const { TFE_DeleteTensorHandle(p); }
};
std::unique_ptr<TFE_TensorHandle, TFETensorHandleDeleter> handle_;
};
inline Tensor TensorHandle::Resolve(Status* status) {
TF_Tensor* tensor =
TFE_TensorHandleResolve(handle_.get(), status->GetTFStatus());
if (!status->ok()) {
return Tensor(nullptr);
}
return Tensor(tensor);
}
inline TensorHandle TensorHandle::FromTensor(const Tensor& tensor,
const Runtime& runtime,
Status* status) {
TFE_TensorHandle* tensor_handle = TFE_NewTensorHandleFromTensor(
runtime.GetTFEContext(), tensor.GetTFTensor(), status->GetTFStatus());
if (!status->ok()) {
return TensorHandle(nullptr);
}
return TensorHandle(tensor_handle);
}
}
}
}
#endif | #include "tensorflow/cc/experimental/base/public/tensorhandle.h"
#include <stddef.h>
#include <stdint.h>
#include <memory>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "tensorflow/c/tf_datatype.h"
#include "tensorflow/cc/experimental/base/public/runtime.h"
#include "tensorflow/cc/experimental/base/public/runtime_builder.h"
#include "tensorflow/cc/experimental/base/public/status.h"
#include "tensorflow/cc/experimental/base/public/tensor.h"
#include "tensorflow/cc/experimental/base/tests/tensor_types_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
using tensorflow::experimental::cc::Runtime;
using tensorflow::experimental::cc::RuntimeBuilder;
using tensorflow::experimental::cc::Status;
using tensorflow::experimental::cc::Tensor;
using tensorflow::experimental::cc::TensorHandle;
using SimpleTypes = ::testing::Types<
tensorflow::FloatType, tensorflow::DoubleType, tensorflow::Int32Type,
tensorflow::UINT8Type, tensorflow::INT8Type, tensorflow::INT64Type,
tensorflow::UINT16Type, tensorflow::UINT32Type, tensorflow::UINT64Type>;
template <typename T>
class ConstructScalarTensorHandleTest : public ::testing::Test {};
TYPED_TEST_SUITE(ConstructScalarTensorHandleTest, SimpleTypes);
TYPED_TEST(ConstructScalarTensorHandleTest,
ValidTensorAttributesAfterConstruction) {
Status status;
RuntimeBuilder runtime_builder;
std::unique_ptr<Runtime> runtime = runtime_builder.Build(&status);
ASSERT_TRUE(status.ok()) << status.message();
TF_DataType dtype = TypeParam::kDType;
typename TypeParam::type value = 42;
Tensor original_tensor =
Tensor::FromBuffer(dtype, {},
&value,
sizeof(value),
[](void*, size_t) {}, &status);
ASSERT_TRUE(status.ok()) << status.message();
TensorHandle handle =
TensorHandle::FromTensor(original_tensor, *runtime, &status);
ASSERT_TRUE(status.ok()) << status.message();
Tensor tensor = handle.Resolve(&status);
ASSERT_TRUE(status.ok()) << status.message();
EXPECT_EQ(tensor.dims(), 0);
EXPECT_EQ(tensor.dtype(), dtype);
EXPECT_EQ(*reinterpret_cast<typename TypeParam::type*>(tensor.data()), 42);
EXPECT_EQ(tensor.num_bytes(), sizeof(typename TypeParam::type));
EXPECT_EQ(tensor.num_elements(), 1);
}
template <typename T>
class Construct1DTensorHandleTest : public ::testing::Test {};
TYPED_TEST_SUITE(Construct1DTensorHandleTest, SimpleTypes);
TYPED_TEST(Construct1DTensorHandleTest,
ValidTensorAttributesAfterConstruction) {
Status status;
RuntimeBuilder runtime_builder;
std::unique_ptr<Runtime> runtime = runtime_builder.Build(&status);
ASSERT_TRUE(status.ok()) << status.message();
TF_DataType dtype = TypeParam::kDType;
std::vector<typename TypeParam::type> value = {42, 100, 0, 1, 4, 29};
std::vector<int64_t> shape;
shape.push_back(value.size());
Tensor original_tensor = Tensor::FromBuffer(
dtype, shape,
value.data(),
value.size() * sizeof(typename TypeParam::type),
[](void*, size_t) {}, &status);
ASSERT_TRUE(status.ok()) << status.message();
TensorHandle handle =
TensorHandle::FromTensor(original_tensor, *runtime, &status);
ASSERT_TRUE(status.ok()) << status.message();
Tensor tensor = handle.Resolve(&status);
ASSERT_TRUE(status.ok()) << status.message();
EXPECT_EQ(tensor.dims(), 1);
EXPECT_EQ(tensor.dtype(), dtype);
absl::Span<const typename TypeParam::type> tensor_view(
reinterpret_cast<typename TypeParam::type*>(tensor.data()), value.size());
EXPECT_EQ(tensor_view[0], 42);
EXPECT_EQ(tensor_view[1], 100);
EXPECT_EQ(tensor_view[2], 0);
EXPECT_EQ(tensor_view[3], 1);
EXPECT_EQ(tensor_view[4], 4);
EXPECT_EQ(tensor_view[5], 29);
EXPECT_EQ(tensor.num_bytes(),
value.size() * sizeof(typename TypeParam::type));
EXPECT_EQ(tensor.num_elements(), value.size());
}
template <typename T>
class Construct2DTensorHandleTest : public ::testing::Test {};
TYPED_TEST_SUITE(Construct2DTensorHandleTest, SimpleTypes);
TYPED_TEST(Construct2DTensorHandleTest,
ValidTensorAttributesAfterConstruction) {
Status status;
RuntimeBuilder runtime_builder;
std::unique_ptr<Runtime> runtime = runtime_builder.Build(&status);
ASSERT_TRUE(status.ok()) << status.message();
TF_DataType dtype = TypeParam::kDType;
std::vector<typename TypeParam::type> value = {42, 100, 0, 1, 4, 29};
std::vector<int64_t> shape({2, 3});
Tensor original_tensor = Tensor::FromBuffer(
dtype, shape,
value.data(),
value.size() * sizeof(typename TypeParam::type),
[](void*, size_t) {}, &status);
ASSERT_TRUE(status.ok()) << status.message();
TensorHandle handle =
TensorHandle::FromTensor(original_tensor, *runtime, &status);
ASSERT_TRUE(status.ok()) << status.message();
Tensor tensor = handle.Resolve(&status);
ASSERT_TRUE(status.ok()) << status.message();
EXPECT_EQ(tensor.dims(), 2);
EXPECT_EQ(tensor.dtype(), dtype);
absl::Span<const typename TypeParam::type> tensor_view(
reinterpret_cast<typename TypeParam::type*>(tensor.data()), value.size());
EXPECT_EQ(tensor_view[0], 42);
EXPECT_EQ(tensor_view[1], 100);
EXPECT_EQ(tensor_view[2], 0);
EXPECT_EQ(tensor_view[3], 1);
EXPECT_EQ(tensor_view[4], 4);
EXPECT_EQ(tensor_view[5], 29);
EXPECT_EQ(tensor.num_bytes(),
value.size() * sizeof(typename TypeParam::type));
EXPECT_EQ(tensor.num_elements(), value.size());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/experimental/base/public/tensorhandle.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/experimental/base/tests/tensorhandle_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c98c390d-dd6d-4bfc-9171-631eabaf0891 | cpp | tensorflow/tensorflow | op_converter | tensorflow/compiler/tf2tensorrt/convert/op_converter.h | tensorflow/compiler/tf2tensorrt/convert/op_converter_test.cc | #ifndef TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_OP_CONVERTER_H_
#define TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_OP_CONVERTER_H_
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include <memory>
#include <vector>
#include "absl/strings/str_format.h"
#include "tensorflow/compiler/tf2tensorrt/convert/trt_parameters.h"
#include "tensorflow/compiler/tf2tensorrt/convert/weights.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
class Converter;
enum class TrtInputArg { kTensor = 1, kWeight = 2, kBoth = 3, kResource = 4 };
struct OpConverterParams {
OpConverterParams(const NodeDef& node_def,
const std::vector<TRT_TensorOrWeights>& inputs,
std::vector<TRT_TensorOrWeights>* outputs,
TrtWeightStore* weight_store,
TrtPrecisionMode precision_mode, bool use_calibration,
bool use_implicit_batch, bool use_explicit_precision);
OpConverterParams(Converter* converter, const NodeDef& node_def,
const std::vector<TRT_TensorOrWeights>& inputs,
std::vector<TRT_TensorOrWeights>* outputs,
TrtWeightStore* weight_store);
Converter* converter = nullptr;
const NodeDef& node_def;
const std::vector<TRT_TensorOrWeights>& inputs;
std::vector<TRT_TensorOrWeights>* outputs;
const bool validation_only;
TrtWeightStore* weight_store;
const TrtPrecisionMode precision_mode;
const bool use_calibration;
const bool use_implicit_batch;
const bool use_explicit_precision;
};
using OpConverter = std::function<Status(const OpConverterParams*)>;
struct InputArgSpec {
absl::string_view name;
TrtInputArg allowed_roles;
static constexpr InputArgSpec Create(absl::string_view n, TrtInputArg role) {
return InputArgSpec{n, role};
}
};
template <typename T>
std::string convert_not_supported_dtype_msg(const T& allowed_types,
DataType tf_type,
const NodeDef& node) {
string allowed_types_string =
absl::StrJoin(allowed_types, ", ", [](string* out, const DataType& type) {
absl::StrAppendFormat(out, "%s", DataTypeString(type));
});
return absl::StrCat("Data type ", DataTypeString(tf_type),
" is not supported for ", node.op(), ", must be one of [",
allowed_types_string, "]");
}
std::string convert_not_supported_implicit(const std::string& pOpName,
const std::string& pNodeName,
const char* pOpType = NULL);
template <typename Impl>
class OpConverterBase {
public:
explicit OpConverterBase(const OpConverterParams* params,
const std::vector<DataType>& data_types =
{DataType::DT_FLOAT, DataType::DT_HALF})
: params_(params),
node_def_attrs_(params->node_def),
allowed_dtypes_(data_types) {}
static constexpr const char* NodeDefDataTypeAttributeName() { return "T"; }
Status ValidateNodeDefDataType() {
if (absl::string_view(Impl::NodeDefDataTypeAttributeName()).empty()) {
return OkStatus();
}
auto dtype = GetAttrValue<DataType>(Impl::NodeDefDataTypeAttributeName());
if (!dtype.ok()) {
return errors::InvalidArgument("Attribute with name ",
Impl::NodeDefDataTypeAttributeName(),
" not found.");
}
if (std::find(allowed_dtypes_.begin(), allowed_dtypes_.end(), *dtype) ==
allowed_dtypes_.end()) {
return errors::Unimplemented(convert_not_supported_dtype_msg(
allowed_dtypes_, *dtype, params_->node_def));
}
return OkStatus();
}
static constexpr bool HasFixNumberOfInputs() { return true; }
Status ValidateInputs() {
const NodeDef& node_def = params_->node_def;
const auto& inputs = params_->inputs;
if (Impl::HasFixNumberOfInputs()) {
TRT_ENSURE(inputs.size() == Impl::InputSpec().size());
} else {
TRT_ENSURE(inputs.size() <= Impl::InputSpec().size());
}
for (int i = 0; i < inputs.size(); i++) {
const InputArgSpec arg_spec = Impl::InputSpec()[i];
if (arg_spec.allowed_roles == TrtInputArg::kWeight &&
inputs.at(i).is_tensor()) {
return errors::Unimplemented("The input \"", arg_spec.name, "\" for ",
node_def.op(), " must be a constant, at ",
node_def.name());
}
if (arg_spec.allowed_roles == TrtInputArg::kTensor &&
inputs.at(i).is_weights()) {
return errors::Unimplemented("The input \"", arg_spec.name, "\" for ",
node_def.op(), " must be a tensor, at ",
node_def.name());
}
}
return OkStatus();
}
Status operator()() {
TF_RETURN_IF_ERROR(this->ValidateNodeDefDataType());
TF_RETURN_IF_ERROR(this->ValidateInputs());
TF_RETURN_IF_ERROR(reinterpret_cast<Impl*>(this)->Validate());
if (params_->validation_only) {
return OkStatus();
}
return reinterpret_cast<Impl*>(this)->Convert();
}
protected:
Status NotSupportedInImplicitBatch(const char* pOpType = nullptr) {
if (params_->use_implicit_batch) {
const auto& op = params_->node_def.op();
const auto& nodeName = params_->node_def.name();
const auto& error = convert_not_supported_implicit(op, nodeName, pOpType);
return errors::Unimplemented(error);
}
return OkStatus();
}
void AddOutput(const TRT_TensorOrWeights& out) {
params_->outputs->push_back(out);
}
template <typename T>
StatusOr<T> GetAttrValue(absl::string_view key) const {
T result;
TF_RETURN_IF_ERROR(GetNodeAttr(node_def_attrs_, key, &result));
return result;
}
const OpConverterParams* const params_;
const AttrSlice node_def_attrs_;
const std::vector<DataType> allowed_dtypes_;
};
template <typename T>
OpConverter MakeConverterFunction() {
return [](const OpConverterParams* params) -> Status {
T converter(params);
return converter();
};
}
}
}
}
#endif
#endif | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter.h"
#include <gtest/gtest.h>
#include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h"
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
using ::tensorflow::testing::IsOk;
using ::tensorflow::testing::StatusIs;
using ::testing::HasSubstr;
class ExampleOpConverter : public OpConverterBase<ExampleOpConverter> {
public:
explicit ExampleOpConverter(const OpConverterParams* params)
: OpConverterBase<ExampleOpConverter>(params, {DataType::DT_FLOAT}) {}
static constexpr const char* NodeDefDataTypeAttributeName() {
return "data_type";
}
static constexpr std::array<InputArgSpec, 2> InputSpec() {
return std::array<InputArgSpec, 2>{
InputArgSpec::Create("input_tensor", TrtInputArg::kTensor),
InputArgSpec::Create("weight", TrtInputArg::kWeight)};
}
Status Validate() { return OkStatus(); }
Status Convert() {
AddOutput(TRT_TensorOrWeights(nvinfer1::DataType::kFLOAT,
nvinfer1::Dims{1, {1, 1, 1}}, 1));
return OkStatus();
}
};
TEST(TestOpConverterBase, TestOpConverterBase) {
GetOpConverterRegistry()->Register(
"FakeFunc", 1, MakeConverterFunction<ExampleOpConverter>());
NodeDef def;
def.set_op("FakeFunc");
auto converter = Converter::Create(TrtPrecisionMode::FP32, false,
Logger::GetLogger(), false, "test_engine");
EXPECT_THAT(converter, IsOk());
Status conversion_status = (*converter)->ConvertNode(def);
EXPECT_THAT(conversion_status,
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Attribute with name data_type not found")));
def.mutable_input()->Add("input1");
conversion_status = (*converter)
->AddInputTensor("input1", nvinfer1::DataType::kFLOAT,
nvinfer1::Dims{4, {1, 1, 1, 1}}, 1);
EXPECT_THAT(conversion_status, IsOk());
AddNodeAttr("data_type", DT_FLOAT, &def);
conversion_status = (*converter)->ConvertNode(def);
EXPECT_THAT(conversion_status, StatusIs(error::INTERNAL));
def.mutable_input()->Add("input2");
conversion_status = (*converter)
->AddInputTensor("input2", nvinfer1::DataType::kFLOAT,
nvinfer1::Dims{4, {1, 1, 1, 1}}, 1);
EXPECT_THAT(conversion_status, IsOk());
conversion_status = (*converter)->ConvertNode(def);
EXPECT_THAT(
conversion_status,
StatusIs(error::UNIMPLEMENTED,
HasSubstr("input \"weight\" for FakeFunc must be a constant")));
(*converter)->TensorsMap().erase("input2");
(*converter)
->TensorsMap()
.insert(std::make_pair("input2", TRT_TensorOrWeights(TRT_ShapedWeights(
nvinfer1::DataType::kFLOAT))));
conversion_status = (*converter)->ConvertNode(def);
EXPECT_THAT(conversion_status, IsOk());
EXPECT_EQ((*converter)->TensorsMap().size(), 3U);
GetOpConverterRegistry()->Clear("FakeFunc");
}
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/op_converter.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/op_converter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fc9125ea-a219-4e03-801b-8bfe86092d36 | cpp | tensorflow/tensorflow | offset_buffer | tensorflow/compiler/mlir/lite/offset_buffer.h | tensorflow/compiler/mlir/lite/offset_buffer_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_LITE_OFFSET_BUFFER_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_OFFSET_BUFFER_H_
#include <cstdint>
namespace tflite {
inline bool IsValidBufferOffset(const int64_t offset) { return offset > 1; }
}
#endif | #include "tensorflow/compiler/mlir/lite/offset_buffer.h"
#include "tensorflow/core/platform/test.h"
namespace tflite {
namespace {
TEST(OffsetBufferTest, IsValidBufferOffsetTrueGreaterThan1) {
EXPECT_TRUE(IsValidBufferOffset(2));
}
TEST(OffsetBufferTest, IsValidBufferOffsetFalseForLessThanOrEqualTo1) {
EXPECT_FALSE(IsValidBufferOffset(1));
EXPECT_FALSE(IsValidBufferOffset(0));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/offset_buffer.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/offset_buffer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
be58cc48-cadb-4691-a6e1-36be63a06a34 | cpp | tensorflow/tensorflow | test_matchers | tensorflow/compiler/mlir/tf2xla/internal/test_matchers.h | tensorflow/compiler/mlir/tf2xla/internal/test_matchers_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TF2XLA_INTERNAL_TEST_MATCHERS_H_
#define TENSORFLOW_COMPILER_MLIR_TF2XLA_INTERNAL_TEST_MATCHERS_H_
#include <gmock/gmock.h>
#include "absl/status/statusor.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.h"
#include "tsl/platform/statusor.h"
template <typename T>
bool WasGraphAnalysisFailure(const absl::StatusOr<T>& status) {
return (status.status() ==
tensorflow::CompileToHloGraphAnalysisFailedError());
}
MATCHER(IsOkOrFiltered,
"Status was OK or equal to the Graph Analysis failure") {
bool is_ok = arg.ok();
auto graph_analysis_failure = WasGraphAnalysisFailure(arg);
return testing::ExplainMatchResult(
testing::IsTrue(), is_ok || graph_analysis_failure, result_listener);
}
MATCHER_P2(IncrementedOrFiltered, metric, value,
"Metric was incremented by value or Status equal to the Graph "
"Analysis failure") {
auto graph_analysis_failure = WasGraphAnalysisFailure(arg);
if (graph_analysis_failure) {
return testing::ExplainMatchResult(testing::IsTrue(),
graph_analysis_failure, result_listener);
}
return testing::ExplainMatchResult(testing::Eq(metric), value,
result_listener);
}
MATCHER_P(ComputationProtoContains, regex,
"If not a Graph Analysis failure then matches the computation result "
"with the regex") {
auto graph_analysis_failure = WasGraphAnalysisFailure(arg);
if (graph_analysis_failure) {
return testing::ExplainMatchResult(testing::IsTrue(),
graph_analysis_failure, result_listener);
}
auto proto = arg.value().computation->proto().DebugString();
return testing::ExplainMatchResult(testing::ContainsRegex(regex), proto,
result_listener);
}
MATCHER_P(XlaComputationProtoContains, regex,
"If not a Graph Analysis failure then matches the computation result "
"with the regex") {
auto graph_analysis_failure = WasGraphAnalysisFailure(arg);
if (graph_analysis_failure) {
return testing::ExplainMatchResult(testing::IsTrue(),
graph_analysis_failure, result_listener);
}
auto proto = arg.value().proto().DebugString();
return testing::ExplainMatchResult(testing::ContainsRegex(regex), proto,
result_listener);
}
MATCHER_P(
HasMlirModuleWith, expected,
"If not a Graph Analysis failure then matches the mlir module result") {
auto graph_analysis_failure = WasGraphAnalysisFailure(arg);
if (graph_analysis_failure) {
return testing::ExplainMatchResult(testing::IsTrue(),
graph_analysis_failure, result_listener);
}
auto actual = arg.value();
return testing::ExplainMatchResult(testing::ContainsRegex(expected), actual,
result_listener);
}
#endif | #include "tensorflow/compiler/mlir/tf2xla/internal/test_matchers.h"
#include <cstdint>
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/xla_computation.h"
#include "xla/service/hlo.pb.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/lib/monitoring/counter.h"
#include "tsl/platform/statusor.h"
namespace {
using ::tensorflow::monitoring::testing::CellReader;
using ::testing::Not;
constexpr char kMetric[] = "/tensorflow/metric";
auto* counter =
tensorflow::monitoring::Counter<1>::New(kMetric, "description", "status");
constexpr char kOkStatus[] = "ok";
const int kArbitraryIntResult = 37;
template <typename T>
tsl::StatusOr<T> success(T t) {
return t;
}
absl::StatusOr<int> success() { return kArbitraryIntResult; }
template <typename T>
tsl::StatusOr<T> filtered(T t) {
return tsl::StatusOr<T>(tensorflow::CompileToHloGraphAnalysisFailedError());
}
absl::StatusOr<int> filtered() { return filtered(kArbitraryIntResult); }
absl::StatusOr<int> failed() {
return absl::StatusOr<int>(absl::InternalError("fail"));
}
TEST(TestUtil, MatchesOk) { ASSERT_THAT(success(), IsOkOrFiltered()); }
TEST(TestUtil, DoesntMatchesFailure) {
ASSERT_THAT(failed(), Not(IsOkOrFiltered()));
}
TEST(TestUtil, MatchesFiltered) { ASSERT_THAT(filtered(), IsOkOrFiltered()); }
TEST(TestUtil, IncrementsOk) {
CellReader<int64_t> reader(kMetric);
counter->GetCell(kOkStatus)->IncrementBy(1);
ASSERT_THAT(success(), IncrementedOrFiltered(reader.Delta(kOkStatus), 1));
}
TEST(TestUtil, FilteredDoesntIncrementsOk) {
CellReader<int64_t> reader(kMetric);
ASSERT_THAT(filtered(), IncrementedOrFiltered(reader.Delta(kOkStatus), 1));
}
TEST(TestUtil, FailureDoesntMatchIncrement) {
CellReader<int64_t> reader(kMetric);
ASSERT_THAT(failed(), Not(IncrementedOrFiltered(reader.Delta(kOkStatus), 1)));
}
tensorflow::XlaCompilationResult CreateXlaComputationResult(
const char* hlo_name) {
auto result = tensorflow::XlaCompilationResult();
xla::HloModuleProto hlo;
hlo.set_name(hlo_name);
result.computation = std::make_shared<xla::XlaComputation>(hlo);
return result;
}
TEST(TestUtil, ComputationContainsOk) {
constexpr char arbitrary_hlo[] = "arbitrary_hlo";
auto result = CreateXlaComputationResult(arbitrary_hlo);
ASSERT_THAT(success(result), ComputationProtoContains(arbitrary_hlo));
}
TEST(TestUtil, ComputationDoesNotContain) {
constexpr char arbitrary_hlo[] = "arbitrary_hlo";
constexpr char bad_hlo[] = "bad_hlo";
auto result = CreateXlaComputationResult(arbitrary_hlo);
ASSERT_THAT(success(result), Not(ComputationProtoContains(bad_hlo)));
}
TEST(TestUtil, ComputationDoesNotContainFiltered) {
constexpr char arbitrary_hlo[] = "arbitrary_hlo";
constexpr char bad_hlo[] = "bad_hlo";
auto result = CreateXlaComputationResult(arbitrary_hlo);
ASSERT_THAT(filtered(result), ComputationProtoContains(bad_hlo));
}
TEST(TestUtil, MlirModuleHas) {
constexpr char arbirary_mlir[] = "arbirary_mlir";
ASSERT_THAT(success(arbirary_mlir), HasMlirModuleWith(arbirary_mlir));
}
TEST(TestUtil, MlirModuleDoesNotHave) {
constexpr char arbirary_mlir[] = "arbirary_mlir";
constexpr char bad_mlir[] = "bad_mlir";
ASSERT_THAT(success(arbirary_mlir), Not(HasMlirModuleWith(bad_mlir)));
}
TEST(TestUtil, MlirModuleDoesNotHaveFiltered) {
constexpr char arbirary_mlir[] = "arbirary_mlir";
constexpr char bad_mlir[] = "bad_mlir";
ASSERT_THAT(filtered(arbirary_mlir), HasMlirModuleWith(bad_mlir));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/test_matchers.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/test_matchers_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
faa2d16e-aafb-4563-bd36-573f59beadb8 | cpp | tensorflow/tensorflow | compilation_timer | tensorflow/compiler/mlir/tf2xla/internal/compilation_timer.h | tensorflow/compiler/mlir/tf2xla/internal/compilation_timer_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TF2XLA_INTERNAL_COMPILATION_TIMER_H_
#define TENSORFLOW_COMPILER_MLIR_TF2XLA_INTERNAL_COMPILATION_TIMER_H_
#include <chrono>
#include "tensorflow/core/platform/profile_utils/cpu_utils.h"
struct CompilationTimer {
uint64_t start_cycles =
tensorflow::profile_utils::CpuUtils::GetCurrentClockCycle();
uint64_t ElapsedCycles() {
return tensorflow::profile_utils::CpuUtils::GetCurrentClockCycle() -
start_cycles;
}
int64_t ElapsedCyclesInMilliseconds() {
std::chrono::duration<double> duration =
tensorflow::profile_utils::CpuUtils::ConvertClockCycleToTime(
ElapsedCycles());
return std::chrono::duration_cast<std::chrono::milliseconds>(duration)
.count();
}
};
#endif | #include "tensorflow/compiler/mlir/tf2xla/internal/compilation_timer.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/time/clock.h"
#include "absl/time/time.h"
namespace {
TEST(CompilationTimer, MeasuresElapsedTime) {
uint64_t timer_result_in_milliseconds;
{
CompilationTimer timer;
absl::SleepFor(absl::Milliseconds(100));
timer_result_in_milliseconds = timer.ElapsedCyclesInMilliseconds();
}
ASSERT_THAT(timer_result_in_milliseconds, testing::Ne(0));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/compilation_timer.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/compilation_timer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
732356cd-3b81-4dfa-8545-9e40ed39711b | cpp | tensorflow/tensorflow | stablehlo_type_utils | tensorflow/compiler/mlir/quantization/stablehlo/utils/stablehlo_type_utils.h | tensorflow/compiler/mlir/quantization/stablehlo/utils/stablehlo_type_utils_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_UTILS_STABLEHLO_TYPE_UTILS_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_UTILS_STABLEHLO_TYPE_UTILS_H_
#include "llvm/ADT/StringRef.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/Transforms/DialectConversion.h"
#include "stablehlo/dialect/StablehloOps.h"
namespace mlir::quant::stablehlo {
inline bool IsStablehloOp(Operation* op) {
return op->getDialect()->getNamespace() ==
mlir::stablehlo::StablehloDialect::getDialectNamespace();
}
}
#endif | #include "tensorflow/compiler/mlir/quantization/stablehlo/utils/stablehlo_type_utils.h"
#include <gtest/gtest.h>
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "stablehlo/dialect/StablehloOps.h"
namespace mlir::quant::stablehlo {
namespace {
using ::testing::Test;
class StablehloTypeUtilsTest : public Test {
protected:
StablehloTypeUtilsTest() {
ctx_.loadDialect<mlir::stablehlo::StablehloDialect,
mlir::arith::ArithDialect, mlir::func::FuncDialect>();
}
MLIRContext ctx_;
OpBuilder builder_{&ctx_};
};
TEST_F(StablehloTypeUtilsTest, IsStablehloOpSucceedsWithStablehloOp) {
const OwningOpRef<mlir::stablehlo::ConstantOp> constant_op =
builder_.create<mlir::stablehlo::ConstantOp>(
builder_.getUnknownLoc(), builder_.getI32IntegerAttr(0));
EXPECT_TRUE(IsStablehloOp(*constant_op));
}
TEST_F(StablehloTypeUtilsTest, IsStablehloOpFailsWithArithOp) {
const OwningOpRef<mlir::arith::ConstantOp> constant_op =
builder_.create<mlir::arith::ConstantOp>(builder_.getUnknownLoc(),
builder_.getI32IntegerAttr(0));
EXPECT_FALSE(IsStablehloOp(*constant_op));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/utils/stablehlo_type_utils.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/utils/stablehlo_type_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2856eed2-70da-4d70-ac4a-3065123d029d | cpp | tensorflow/tensorflow | permutation | tensorflow/compiler/mlir/quantization/stablehlo/cc/permutation.h | tensorflow/compiler/mlir/quantization/stablehlo/cc/permutation_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_CC_PERMUTATION_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_CC_PERMUTATION_H_
#include <cstdint>
#include <type_traits>
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Support/LLVM.h"
namespace mlir::quant {
template <typename T,
typename = std::enable_if_t<std::is_default_constructible_v<T>, void>>
SmallVector<T> Permute(const ArrayRef<T> values,
const ArrayRef<int64_t> permutation) {
SmallVector<T> permuted_values(values.size(), T{});
for (auto [i, permutation_idx] : llvm::enumerate(permutation)) {
permuted_values[i] = std::move(values[permutation_idx]);
}
return permuted_values;
}
}
#endif | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/permutation.h"
#include <cstdint>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "mlir/Support/LLVM.h"
namespace mlir::quant {
namespace {
using testing::ElementsAre;
using testing::IsEmpty;
TEST(PermutationTest, PermuteEmptyArray) {
const SmallVector<int> permutation_result =
Permute<int>(SmallVector<int>{}, SmallVector<int64_t>{});
EXPECT_THAT(permutation_result, IsEmpty());
}
TEST(PermutationTest, PermuteOneElement) {
const SmallVector<int> single_element_array = {8};
const SmallVector<int64_t> permutation = {0};
const SmallVector<int> permutation_result =
Permute<int>(single_element_array, permutation);
EXPECT_THAT(permutation_result, ElementsAre(8));
}
TEST(PermutationTest, PermuteFourElements) {
const SmallVector<int> arr = {0, 3, 1, 2};
const SmallVector<int64_t> permutation = {0, 2, 3, 1};
const SmallVector<int> permutation_result = Permute<int>(arr, permutation);
EXPECT_THAT(permutation_result, ElementsAre(0, 1, 2, 3));
}
TEST(PermutationTest, PermuteFourStringElements) {
const SmallVector<std::string> arr = {"a", "b", "c", "d"};
const SmallVector<int64_t> permutation = {0, 2, 3, 1};
const SmallVector<std::string> permutation_result =
Permute<std::string>(arr, permutation);
EXPECT_THAT(permutation_result, ElementsAre("a", "c", "d", "b"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/permutation.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/permutation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
22542dd5-4db0-4aa6-a96e-2d2b524faeed | cpp | tensorflow/tensorflow | graph_def | tensorflow/compiler/mlir/quantization/stablehlo/cc/graph_def.h | tensorflow/compiler/mlir/quantization/stablehlo/cc/graph_def_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_CC_GRAPH_DEF_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_CC_GRAPH_DEF_H_
#include <type_traits>
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
namespace stablehlo::quantization {
template <typename FuncT, typename = std::enable_if_t<std::is_invocable_r_v<
void, FuncT, tensorflow::NodeDef&>>>
void MutateNodeDefs(tensorflow::GraphDef& graph_def, FuncT&& func) {
for (tensorflow::NodeDef& node_def : *graph_def.mutable_node()) {
func(node_def);
}
for (tensorflow::FunctionDef& function_def :
*graph_def.mutable_library()->mutable_function()) {
for (tensorflow::NodeDef& node_def : *function_def.mutable_node_def()) {
func(node_def);
}
}
}
}
#endif | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/graph_def.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tsl/platform/protobuf.h"
namespace stablehlo::quantization {
namespace {
using ::tensorflow::GraphDef;
using ::tensorflow::NodeDef;
using ::testing::SizeIs;
using ::testing::StrEq;
using ::tsl::protobuf::TextFormat;
TEST(GraphDefTest, MutateNodeDefsMutatesTopLevelNodeDefs) {
GraphDef graph_def;
ASSERT_TRUE(TextFormat::ParseFromString(R"pb(
node { name: "foo" }
)pb",
&graph_def));
MutateNodeDefs(graph_def,
[](NodeDef& node_def) { node_def.set_name("bar"); });
ASSERT_THAT(graph_def.node(), SizeIs(1));
EXPECT_THAT(graph_def.node()[0].name(), StrEq("bar"));
}
TEST(GraphDefTest, MutateNodeDefsMutatesFunctionNodeDefs) {
GraphDef graph_def;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(
library { function { node_def { name: "foo" } } }
)pb",
&graph_def));
MutateNodeDefs(graph_def,
[](NodeDef& node_def) { node_def.set_name("bar"); });
ASSERT_THAT(graph_def.library().function(), SizeIs(1));
ASSERT_THAT(graph_def.library().function()[0].node_def(), SizeIs(1));
EXPECT_THAT(graph_def.library().function()[0].node_def()[0].name(),
StrEq("bar"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/graph_def.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/graph_def_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
77752fe8-7d7c-4e1f-b5a5-4603ebf65abd | cpp | tensorflow/tensorflow | calibration_parameters | tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/calibration_parameters.h | tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/calibration_parameters_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_CC_CALIBRATION_CALIBRATION_PARAMETERS_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_CC_CALIBRATION_CALIBRATION_PARAMETERS_H_
#include <algorithm>
#include <cmath>
#include <cstdint>
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
namespace stablehlo::quantization {
inline float CalculateBinWidth(const float min_value, const float max_value,
const int32_t num_bins) {
const float raw_bin_width = (max_value - min_value) / num_bins;
return std::pow(2, std::ceil(std::log2(raw_bin_width)));
}
inline float CalculateLowerBound(const float min_value, const float bin_width) {
return std::floor(min_value / bin_width) * bin_width;
}
inline int32_t CalculateBinIndex(const float value, const float lower_bound,
const float bin_width) {
return std::floor((value - lower_bound) / bin_width);
}
inline int32_t CalculateBinIndexSafe(const float value, const float lower_bound,
const float bin_width,
const int32_t num_bins) {
const int32_t bin_index = CalculateBinIndex(value, lower_bound, bin_width);
return std::clamp(bin_index, 0, num_bins - 1);
}
inline bool IsHistogramCalibration(
const CalibrationOptions::CalibrationMethod method) {
return method ==
CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_PERCENTILE ||
method ==
CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_MSE_BRUTEFORCE ||
method == CalibrationOptions::
CALIBRATION_METHOD_HISTOGRAM_MSE_MAX_FREQUENCY ||
method ==
CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_MSE_SYMMETRIC;
}
inline int32_t GetNumBins(const CalibrationOptions& calib_opts) {
return IsHistogramCalibration(calib_opts.calibration_method())
? calib_opts.calibration_parameters().num_bins()
: 0;
}
}
#endif | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/calibration_parameters.h"
#include <cmath>
#include <cstdint>
#include <gtest/gtest.h>
namespace stablehlo::quantization {
namespace {
inline int32_t CalculateActualNumBins(const float min_value,
const float max_value,
const float bin_width) {
const float lower_bound = CalculateLowerBound(min_value, bin_width);
return std::ceil((max_value - lower_bound) / bin_width);
}
TEST(CalibrationParametersTest, CalculateBinWidthSmallerThanOne) {
float bin_width = CalculateBinWidth(0.0, 25.0,
256);
EXPECT_FLOAT_EQ(bin_width, 0.125);
int32_t actual_num_bins =
CalculateActualNumBins(0.0, 25.0, bin_width);
EXPECT_EQ(actual_num_bins, 200);
float raw_bin_width = 25.0 / actual_num_bins;
EXPECT_FLOAT_EQ(bin_width, raw_bin_width);
}
TEST(CalibrationParametersTest, CalculateBinWidthLargerThanOne) {
float bin_width = CalculateBinWidth(0.0, 360.0,
256);
EXPECT_FLOAT_EQ(bin_width, 2.0);
int32_t actual_num_bins =
CalculateActualNumBins(0.0, 360.0, bin_width);
EXPECT_EQ(actual_num_bins, 180);
float raw_bin_width = 360.0 / actual_num_bins;
EXPECT_FLOAT_EQ(bin_width, raw_bin_width);
}
TEST(CalibrationParametersTest, CalculateBinWidthDivisible) {
float bin_width = CalculateBinWidth(0.0, 256.0,
256);
EXPECT_FLOAT_EQ(bin_width, 1.0);
int32_t actual_num_bins =
CalculateActualNumBins(0.0, 256.0, bin_width);
EXPECT_EQ(actual_num_bins, 256);
float raw_bin_width = 256.0 / actual_num_bins;
EXPECT_FLOAT_EQ(bin_width, raw_bin_width);
}
TEST(CalibrationParametersTest, CalculateNumBinsDivisible) {
int32_t num_bins = CalculateActualNumBins(
0.0, 4.0, 2.0);
EXPECT_EQ(num_bins, 2);
}
TEST(CalibrationParametersTest, CalculateNumBinsNotDivisible) {
int32_t num_bins = CalculateActualNumBins(
0.0, 5.0, 2.0);
EXPECT_EQ(num_bins, 3);
}
TEST(CalibrationParametersTest, CalculateBinIndex) {
int32_t bin_index = CalculateBinIndexSafe(3.0, 0.0,
2.0, 2);
EXPECT_EQ(bin_index, 1);
}
TEST(CalibrationParametersTest, CalculateBinIndexMaxValue) {
int32_t bin_index = CalculateBinIndexSafe(4.0, 0.0,
2.0, 2);
EXPECT_EQ(bin_index, 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/calibration_parameters.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/calibration_parameters_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b211b421-a59e-4aa5-9f5c-9e7ba5fcbefa | cpp | tensorflow/tensorflow | device_compilation_cache | tensorflow/compiler/jit/device_compilation_cache.h | tensorflow/compiler/jit/device_compilation_cache_test.cc | #ifndef TENSORFLOW_COMPILER_JIT_DEVICE_COMPILATION_CACHE_H_
#define TENSORFLOW_COMPILER_JIT_DEVICE_COMPILATION_CACHE_H_
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/jit/device_compilation_cluster_signature.h"
#include "tensorflow/compiler/jit/xla_compile_util.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "xla/client/local_client.h"
#include "xla/pjrt/pjrt_client.h"
#include "tensorflow/core/platform/mutex.h"
namespace tensorflow {
namespace device_compilation_cache_internal {
template <typename ExecutableType>
int64_t ExecutableSize(const ExecutableType* executable) {
return 0;
}
template <>
inline int64_t ExecutableSize<xla::LocalExecutable>(
const xla::LocalExecutable* executable) {
if (executable != nullptr && executable->executable() != nullptr) {
return executable->executable()->SizeOfGeneratedCodeInBytes();
}
return 0;
}
template <>
inline int64_t ExecutableSize<xla::PjRtLoadedExecutable>(
const xla::PjRtLoadedExecutable* executable) {
if (executable != nullptr) {
return executable->SizeOfGeneratedCodeInBytes();
}
return 0;
}
}
template <typename ExecutableType>
class DeviceCompilationCache {
public:
DeviceCompilationCache() = default;
~DeviceCompilationCache() = default;
using Key = DeviceCompilationClusterSignature;
struct Value {
DeviceCompileState compile_state = DeviceCompileState::kUncompiled;
Status compilation_status;
int64_t request_count = 0;
const XlaCompiler::CompilationResult* compilation_result = nullptr;
ExecutableType* executable = nullptr;
};
std::optional<Value> Lookup(const Key& key) const;
Value LookupOrCreate(const Key& key);
void Store(const Key& key, std::optional<DeviceCompileState> compile_state,
std::optional<Status> compilation_status,
std::optional<std::unique_ptr<XlaCompiler::CompilationResult>>
compilation_result,
std::optional<std::unique_ptr<ExecutableType>> executable);
std::string DebugString() const;
private:
struct Entry {
mutable mutex mu;
DeviceCompileState compile_state TF_GUARDED_BY(mu) =
DeviceCompileState::kUncompiled;
int64_t request_count TF_GUARDED_BY(mu) = 0;
Status compilation_status TF_GUARDED_BY(mu);
std::unique_ptr<XlaCompiler::CompilationResult> compilation_result
TF_GUARDED_BY(mu);
std::unique_ptr<ExecutableType> executable TF_GUARDED_BY(mu);
std::string DebugString() const {
mutex_lock lock(mu);
int64_t executable_size =
device_compilation_cache_internal::ExecutableSize<ExecutableType>(
executable.get());
int64_t hlo_module_size = 0;
if (compilation_result != nullptr &&
compilation_result->computation != nullptr) {
hlo_module_size =
compilation_result->computation->proto().ByteSizeLong();
}
return absl::StrCat(
"{compile_state: ", compile_state, ", request_count: ", request_count,
", compilation_status: ", compilation_status.ToString(),
", compilation_result?: ", compilation_result != nullptr,
", hlo_module_size: ", hlo_module_size, " bytes",
", executable?: ", executable != nullptr,
", executable_size: ", executable_size, " bytes}");
}
};
mutable mutex compile_cache_mu_;
absl::flat_hash_map<Key, std::unique_ptr<Entry>, Key::Hash> cache_
TF_GUARDED_BY(compile_cache_mu_);
DeviceCompilationCache(const DeviceCompilationCache&) = delete;
void operator=(const DeviceCompilationCache&) = delete;
};
template <typename ExecutableType>
std::optional<typename DeviceCompilationCache<ExecutableType>::Value>
DeviceCompilationCache<ExecutableType>::Lookup(const Key& key) const {
Entry* entry;
{
mutex_lock lock(compile_cache_mu_);
auto it = cache_.find(key);
if (it == cache_.cend()) {
return std::nullopt;
}
entry = it->second.get();
}
mutex_lock lock(entry->mu);
Value value = {entry->compile_state,
entry->compilation_status,
++entry->request_count,
entry->compilation_result.get(),
entry->executable.get()};
return value;
}
template <typename ExecutableType>
typename DeviceCompilationCache<ExecutableType>::Value
DeviceCompilationCache<ExecutableType>::LookupOrCreate(const Key& key) {
Entry* entry;
{
mutex_lock lock(compile_cache_mu_);
auto it = cache_.emplace(key, std::make_unique<Entry>()).first;
entry = it->second.get();
}
mutex_lock lock(entry->mu);
Value value = {entry->compile_state,
entry->compilation_status,
++entry->request_count,
entry->compilation_result.get(),
entry->executable.get()};
return value;
}
template <typename ExecutableType>
void DeviceCompilationCache<ExecutableType>::Store(
const Key& key, std::optional<DeviceCompileState> compile_state,
std::optional<Status> compilation_status,
std::optional<std::unique_ptr<XlaCompiler::CompilationResult>>
compilation_result,
std::optional<std::unique_ptr<ExecutableType>> executable) {
Entry* entry;
{
mutex_lock lock(compile_cache_mu_);
auto it = cache_.emplace(key, std::make_unique<Entry>()).first;
entry = it->second.get();
}
{
mutex_lock lock(entry->mu);
if (compile_state.has_value()) {
entry->compile_state = *compile_state;
}
if (compilation_status.has_value()) {
entry->compilation_status = *compilation_status;
}
if (compilation_result.has_value()) {
entry->compilation_result = std::move(*compilation_result);
}
if (executable.has_value()) {
entry->executable = std::move(*executable);
}
}
VLOG(4) << "Added/updated cache entry: key=" << key.HumanString()
<< ", entry=" << entry->DebugString();
}
template <typename ExecutableType>
std::string DeviceCompilationCache<ExecutableType>::DebugString() const {
std::string s = "DeviceCompilationCache<ExecutableType> {\n";
{
mutex_lock lock(compile_cache_mu_);
for (const auto& [key, entry] : cache_) {
absl::StrAppend(&s, key.HumanString(), " : ", entry->DebugString(),
",\n");
}
}
absl::StrAppend(&s, "}");
return s;
}
}
#endif | #include "tensorflow/compiler/jit/device_compilation_cache.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
struct FakeExecutable {
std::string data;
explicit FakeExecutable(const std::string& s) : data(s) {}
};
using Cache = DeviceCompilationCache<FakeExecutable>;
using Signature = DeviceCompilationClusterSignature;
absl::StatusOr<Signature> BuildSampleSignature(const std::string& fn_name) {
NameAttrList fn;
fn.set_name(fn_name);
std::vector<XlaCompiler::Argument> args(1);
args[0].kind = XlaCompiler::Argument::kConstant;
args[0].type = DT_INT32;
args[0].shape = TensorShape({4, 0});
args[0].constant_value = Tensor(DT_INT32, {4, 0});
return Signature::Build(fn, args);
}
TEST(DeviceCompilationCacheTest, LookupEntryDoesntExist) {
auto cache = std::make_unique<Cache>();
TF_ASSERT_OK_AND_ASSIGN(auto key, BuildSampleSignature("foo"));
auto cache_value = cache->Lookup(key);
EXPECT_FALSE(cache_value.has_value());
}
TEST(DeviceCompilationCacheTest, LookupOrCreateEntryDoesntExist) {
auto cache = std::make_unique<Cache>();
TF_ASSERT_OK_AND_ASSIGN(auto key, BuildSampleSignature("foo"));
Cache::Value cache_value = cache->LookupOrCreate(key);
EXPECT_EQ(cache_value.compile_state, DeviceCompileState::kUncompiled);
EXPECT_EQ(cache_value.request_count, 1);
EXPECT_EQ(cache_value.compilation_result, nullptr);
EXPECT_EQ(cache_value.executable, nullptr);
}
TEST(DeviceCompilationCacheTest, IncrementRequestCountOnLookup) {
auto cache = std::make_unique<Cache>();
TF_ASSERT_OK_AND_ASSIGN(auto key, BuildSampleSignature("foo"));
Cache::Value cache_value = cache->LookupOrCreate(key);
EXPECT_EQ(cache_value.request_count, 1);
cache_value = cache->LookupOrCreate(key);
EXPECT_EQ(cache_value.request_count, 2);
cache_value = cache->LookupOrCreate(key);
EXPECT_EQ(cache_value.request_count, 3);
}
TEST(DeviceCompilationCacheTest, RequestCountUnchangedOnStore) {
auto cache = std::make_unique<Cache>();
TF_ASSERT_OK_AND_ASSIGN(auto key, BuildSampleSignature("foo"));
Cache::Value cache_value = cache->LookupOrCreate(key);
EXPECT_EQ(cache_value.request_count, 1);
cache_value = cache->LookupOrCreate(key);
EXPECT_EQ(cache_value.request_count, 2);
cache_value = cache->LookupOrCreate(key);
EXPECT_EQ(cache_value.request_count, 3);
auto compilation_result = std::make_unique<XlaCompiler::CompilationResult>();
cache->Store(key, DeviceCompileState::kCompiled, absl::OkStatus(),
std::move(compilation_result), std::nullopt);
cache_value = cache->LookupOrCreate(key);
EXPECT_EQ(cache_value.request_count, 4);
}
TEST(DeviceCompilationCacheTest, StoreLookup) {
auto cache = std::make_unique<Cache>();
TF_ASSERT_OK_AND_ASSIGN(auto key, BuildSampleSignature("foo"));
auto compilation_result = std::make_unique<XlaCompiler::CompilationResult>();
auto executable = std::make_unique<FakeExecutable>("foo_exe");
cache->Store(key, DeviceCompileState::kCompiled, absl::OkStatus(),
std::move(compilation_result), std::move(executable));
auto cache_value = cache->Lookup(key);
EXPECT_EQ(cache_value->compile_state, DeviceCompileState::kCompiled);
EXPECT_EQ(cache_value->request_count, 1);
EXPECT_TRUE(cache_value->compilation_status.ok());
EXPECT_TRUE(cache_value->compilation_result != nullptr);
EXPECT_TRUE(cache_value->executable != nullptr);
EXPECT_EQ(cache_value->executable->data, "foo_exe");
}
TEST(DeviceCompilationCacheTest, StoreLookupOrCreate) {
auto cache = std::make_unique<Cache>();
TF_ASSERT_OK_AND_ASSIGN(auto key, BuildSampleSignature("foo"));
auto compilation_result = std::make_unique<XlaCompiler::CompilationResult>();
auto executable = std::make_unique<FakeExecutable>("foo_exe");
cache->Store(key, DeviceCompileState::kCompiled, absl::OkStatus(),
std::move(compilation_result), std::move(executable));
auto cache_value = cache->LookupOrCreate(key);
EXPECT_EQ(cache_value.compile_state, DeviceCompileState::kCompiled);
EXPECT_EQ(cache_value.request_count, 1);
EXPECT_TRUE(cache_value.compilation_status.ok());
EXPECT_TRUE(cache_value.compilation_result != nullptr);
EXPECT_TRUE(cache_value.executable != nullptr);
EXPECT_EQ(cache_value.executable->data, "foo_exe");
}
TEST(DeviceCompilationCacheTest, StoreOptionalArgs) {
auto cache = std::make_unique<Cache>();
TF_ASSERT_OK_AND_ASSIGN(auto key, BuildSampleSignature("foo"));
auto compilation_result = std::make_unique<XlaCompiler::CompilationResult>();
auto executable = std::make_unique<FakeExecutable>("foo_exe");
cache->Store(key, DeviceCompileState::kCompiled, std::nullopt, std::nullopt,
std::nullopt);
auto cache_value = cache->Lookup(key);
EXPECT_EQ(cache_value->compile_state, DeviceCompileState::kCompiled);
EXPECT_TRUE(cache_value->compilation_status.ok());
EXPECT_TRUE(cache_value->compilation_result == nullptr);
EXPECT_TRUE(cache_value->executable == nullptr);
cache->Store(key, std::nullopt, errors::InvalidArgument("Couldn't compile."),
std::nullopt, std::nullopt);
cache_value = cache->Lookup(key);
EXPECT_EQ(cache_value->compile_state, DeviceCompileState::kCompiled);
EXPECT_EQ(cache_value->compilation_status.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(cache_value->compilation_result == nullptr);
EXPECT_TRUE(cache_value->executable == nullptr);
cache->Store(key, std::nullopt, std::nullopt, std::move(compilation_result),
std::nullopt);
cache_value = cache->Lookup(key);
EXPECT_EQ(cache_value->compile_state, DeviceCompileState::kCompiled);
EXPECT_EQ(cache_value->compilation_status.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(cache_value->compilation_result != nullptr);
EXPECT_TRUE(cache_value->executable == nullptr);
cache->Store(key, std::nullopt, std::nullopt, std::nullopt,
std::move(executable));
cache_value = cache->Lookup(key);
EXPECT_EQ(cache_value->compile_state, DeviceCompileState::kCompiled);
EXPECT_EQ(cache_value->compilation_status.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(cache_value->compilation_result != nullptr);
EXPECT_TRUE(cache_value->executable != nullptr);
EXPECT_EQ(cache_value->executable->data, "foo_exe");
}
TEST(DeviceCompilationCacheTest, StoreMultipleEntries) {
auto cache = std::make_unique<Cache>();
TF_ASSERT_OK_AND_ASSIGN(auto key1, BuildSampleSignature("foo"));
TF_ASSERT_OK_AND_ASSIGN(auto key2, BuildSampleSignature("bar"));
auto compilation_result1 = std::make_unique<XlaCompiler::CompilationResult>();
auto compilation_result2 = std::make_unique<XlaCompiler::CompilationResult>();
auto executable1 = std::make_unique<FakeExecutable>("foo_exe");
auto executable2 = std::make_unique<FakeExecutable>("bar_exe");
cache->Store(key1, DeviceCompileState::kCompiled,
errors::InvalidArgument("Invalid argument."),
std::move(compilation_result1), std::move(executable1));
cache->Store(key2, DeviceCompileState::kCompiling, absl::OkStatus(),
std::move(compilation_result2), std::move(executable2));
auto cache_value_1 = cache->Lookup(key1);
auto cache_value_2 = cache->Lookup(key2);
EXPECT_EQ(cache_value_1->compile_state, DeviceCompileState::kCompiled);
EXPECT_EQ(cache_value_1->compilation_status.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(cache_value_1->compilation_result != nullptr);
EXPECT_TRUE(cache_value_1->executable != nullptr);
EXPECT_EQ(cache_value_1->executable->data, "foo_exe");
EXPECT_EQ(cache_value_2->compile_state, DeviceCompileState::kCompiling);
EXPECT_TRUE(cache_value_2->compilation_status.ok());
EXPECT_TRUE(cache_value_2->compilation_result != nullptr);
EXPECT_TRUE(cache_value_2->executable != nullptr);
EXPECT_EQ(cache_value_2->executable->data, "bar_exe");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/device_compilation_cache.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/device_compilation_cache_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7d28bedf-da1c-40ae-8533-c0db0131ec7d | cpp | tensorflow/tensorflow | device_compiler | tensorflow/compiler/jit/device_compiler.h | tensorflow/compiler/jit/device_compiler_test.cc | #ifndef TENSORFLOW_COMPILER_JIT_DEVICE_COMPILER_H_
#define TENSORFLOW_COMPILER_JIT_DEVICE_COMPILER_H_
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/base/call_once.h"
#include "absl/container/flat_hash_map.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/jit/device_compilation_cache.h"
#include "tensorflow/compiler/jit/device_compilation_cluster_signature.h"
#include "tensorflow/compiler/jit/device_compilation_profiler.h"
#include "tensorflow/compiler/jit/device_compiler_client.h"
#include "tensorflow/compiler/jit/device_executable_persistor.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/jit/tf_graph_to_hlo_compiler.h"
#include "tensorflow/compiler/jit/xla_compile_util.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "xla/client/local_client.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/thread_annotations.h"
namespace tensorflow {
template <typename ExecutableType, typename ClientType>
class DeviceCompiler : public ResourceBase {
public:
DeviceCompiler(
std::unique_ptr<DeviceExecutablePersistor<ExecutableType, ClientType>>
persistor,
std::unique_ptr<DeviceCompilerClient<ExecutableType, ClientType>>
compiler_client);
~DeviceCompiler() override;
enum class CompileScope {
kOp,
kFunction,
};
Status CompileIfNeeded(
const XlaCompiler::Options& options, const NameAttrList& function,
const std::vector<XlaCompiler::Argument>& args,
const XlaCompiler::CompileOptions& compile_options,
DeviceCompileMode compile_mode, DeviceCompilationProfiler* profiler,
const XlaCompiler::CompilationResult** out_compilation_result,
ExecutableType** out_executable);
Status CompileSingleOpIfNeeded(
const XlaCompiler::Options& options,
const std::vector<XlaCompiler::Argument>& args,
const XlaCompiler::CompileOptions& compile_options, OpKernelContext* ctx,
DeviceCompilationProfiler* profiler,
const XlaCompiler::CompilationResult** out_compilation_result,
ExecutableType** out_executable);
ClientType* client() const { return compiler_client_->client(); }
const DeviceType& device_type() const { return persistor_->device_type(); }
DeviceCompilationCache<ExecutableType>* cache() { return cache_.get(); }
DeviceExecutablePersistor<ExecutableType, ClientType>* persistor() {
return persistor_.get();
}
DeviceCompilerClient<ExecutableType, ClientType>* compiler_client() {
return compiler_client_.get();
}
string DebugString() const override;
private:
Status CompileImpl(
const XlaCompiler::CompileOptions& compile_options,
const XlaCompiler::Options& options, const NameAttrList& function,
const std::vector<XlaCompiler::Argument>& args, CompileScope scope,
DeviceCompileMode compile_mode, OpKernelContext* ctx,
DeviceCompilationProfiler* profiler,
const XlaCompiler::CompilationResult** out_compilation_result,
ExecutableType** out_executable);
StatusOr<typename DeviceCompilationCache<ExecutableType>::Value>
CompileStrict(
const DeviceCompilationClusterSignature& sig,
const XlaCompiler::CompileOptions& compile_options,
const XlaCompiler::Options& options,
const std::vector<XlaCompiler::Argument>& args,
const NameAttrList& function,
typename DeviceCompilationCache<ExecutableType>::Value cache_value,
CompileScope scope, OpKernelContext* ctx,
DeviceCompilationProfiler* profiler, mutex* mu)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu);
Status CompileAsynchronous(const DeviceCompilationClusterSignature& sig,
const XlaCompiler::CompileOptions& compile_options,
const XlaCompiler::Options& options,
const std::vector<XlaCompiler::Argument>& args,
const NameAttrList& function, CompileScope scope,
OpKernelContext* ctx,
DeviceCompilationProfiler* profiler);
std::unique_ptr<DeviceExecutablePersistor<ExecutableType, ClientType>>
persistor_;
std::unique_ptr<DeviceCompilerClient<ExecutableType, ClientType>>
compiler_client_;
std::unique_ptr<DeviceCompilationCache<ExecutableType>> cache_;
std::unique_ptr<thread::ThreadPool> async_compiler_threads_;
mutex cluster_mutexes_mu_;
absl::flat_hash_map<DeviceCompilationClusterSignature, std::unique_ptr<mutex>,
DeviceCompilationClusterSignature::Hash>
cluster_mutexes_ TF_GUARDED_BY(cluster_mutexes_mu_);
DeviceCompiler(const DeviceCompiler&) = delete;
void operator=(const DeviceCompiler&) = delete;
};
namespace device_compiler_internal {
inline void LogOnceXlaCompiledFirstCluster() {
static absl::once_flag log_once;
absl::call_once(log_once, [] {
LOG(INFO) << "Compiled cluster using XLA! This line is logged at most "
"once for the lifetime of the process.";
});
}
template <typename ExecutableType>
inline Status EligibleToPersist(DeviceCompileState compile_state,
const ExecutableType* executable) {
if (compile_state != DeviceCompileState::kCompiled) {
return errors::FailedPrecondition(
"Cache entry to serialize is not compiled.");
}
if (executable == nullptr) {
return errors::FailedPrecondition(
"LocalExecutable not found for cache entry to serialize.");
}
return absl::OkStatus();
}
}
template <typename ExecutableType, typename ClientType>
DeviceCompiler<ExecutableType, ClientType>::DeviceCompiler(
std::unique_ptr<DeviceExecutablePersistor<ExecutableType, ClientType>>
persistor,
std::unique_ptr<DeviceCompilerClient<ExecutableType, ClientType>>
compiler_client)
: persistor_(std::move(persistor)),
compiler_client_(std::move(compiler_client)) {
cache_ = std::make_unique<DeviceCompilationCache<ExecutableType>>();
async_compiler_threads_ = std::make_unique<tensorflow::thread::ThreadPool>(
tensorflow::Env::Default(), "async_compiler_threads",
kNumAsyncDeviceCompilerThreads);
}
template <typename ExecutableType, typename ClientType>
DeviceCompiler<ExecutableType, ClientType>::~DeviceCompiler() {
compiler_client_->WaitForProgramsToFinish();
async_compiler_threads_.reset();
}
template <typename ExecutableType, typename ClientType>
string DeviceCompiler<ExecutableType, ClientType>::DebugString() const {
return "DeviceCompiler";
}
template <typename ExecutableType, typename ClientType>
Status DeviceCompiler<ExecutableType, ClientType>::CompileIfNeeded(
const XlaCompiler::Options& options, const NameAttrList& function,
const std::vector<XlaCompiler::Argument>& args,
const XlaCompiler::CompileOptions& compile_options,
DeviceCompileMode compile_mode, DeviceCompilationProfiler* profiler,
const XlaCompiler::CompilationResult** out_compilation_result,
ExecutableType** out_executable) {
return CompileImpl(compile_options, options, function, args,
CompileScope::kFunction, compile_mode, nullptr,
profiler, out_compilation_result, out_executable);
}
template <typename ExecutableType, typename ClientType>
Status DeviceCompiler<ExecutableType, ClientType>::CompileSingleOpIfNeeded(
const XlaCompiler::Options& options,
const std::vector<XlaCompiler::Argument>& args,
const XlaCompiler::CompileOptions& compile_options, OpKernelContext* ctx,
DeviceCompilationProfiler* profiler,
const XlaCompiler::CompilationResult** out_compilation_result,
ExecutableType** out_executable) {
const NodeDef& def = ctx->op_kernel().def();
NameAttrList name;
name.set_name(def.op());
*name.mutable_attr() = def.attr();
name.mutable_attr()->erase("_class");
return CompileImpl(compile_options, options, name, args, CompileScope::kOp,
DeviceCompileMode::kStrict, ctx, profiler,
out_compilation_result, out_executable);
}
template <typename ExecutableType, typename ClientType>
StatusOr<typename DeviceCompilationCache<ExecutableType>::Value>
DeviceCompiler<ExecutableType, ClientType>::CompileStrict(
const DeviceCompilationClusterSignature& sig,
const XlaCompiler::CompileOptions& compile_options,
const XlaCompiler::Options& options,
const std::vector<XlaCompiler::Argument>& args,
const NameAttrList& function,
typename DeviceCompilationCache<ExecutableType>::Value cache_value,
CompileScope scope, OpKernelContext* ctx,
DeviceCompilationProfiler* profiler, mutex* mu) {
tensorflow::Env* env = tensorflow::Env::Default();
const uint64 compile_start_us = env->NowMicros();
TfGraphToHloCompiler compiler(options);
cache_value.compile_state = DeviceCompileState::kCompiled;
std::unique_ptr<ExecutableType> out_executable;
auto out_compilation_result =
std::make_unique<XlaCompiler::CompilationResult>();
if (scope == CompileScope::kOp) {
cache_value.compilation_status = compiler.CompileSingleOp(
compile_options, ctx, args, out_compilation_result.get());
} else {
CHECK(scope == CompileScope::kFunction);
cache_value.compilation_status = compiler.Compile(
compile_options, function, args, out_compilation_result.get());
}
TF_RETURN_IF_ERROR(cache_value.compilation_status);
TF_RET_CHECK(cache_value.executable == nullptr);
TF_RET_CHECK(out_compilation_result->computation != nullptr);
auto loaded_executable = persistor_->TryToLoadExecutable(
DeviceCompilationClusterSignature::Hash()(sig), sig.HumanString(),
options, *out_compilation_result, compiler_client_.get());
if (loaded_executable.has_value()) {
cache_value.compilation_status = loaded_executable->status();
if (loaded_executable->ok()) {
out_executable = *std::move(*loaded_executable);
metrics::UpdatePersistentCacheLoadCount();
}
} else {
auto built_executable =
compiler_client_->BuildExecutable(options, *out_compilation_result);
TF_RETURN_IF_ERROR(built_executable.status());
out_executable = *std::move(built_executable);
TF_RETURN_IF_ERROR(
device_compiler_internal::EligibleToPersist<ExecutableType>(
cache_value.compile_state, out_executable.get()));
TF_RETURN_IF_ERROR(persistor_->TryToPersistExecutable(
DeviceCompilationClusterSignature::Hash()(sig), sig.HumanString(),
options, *out_compilation_result, *out_executable,
compiler_client_.get()));
}
cache_value.compilation_result = out_compilation_result.get();
cache_value.executable = out_executable.get();
cache_->Store(sig, cache_value.compile_state, cache_value.compilation_status,
std::move(out_compilation_result), std::move(out_executable));
const uint64 compile_end_us = env->NowMicros();
const uint64 compile_time_us = compile_end_us - compile_start_us;
device_compiler_internal::LogOnceXlaCompiledFirstCluster();
TF_RETURN_IF_ERROR(profiler->RegisterCompilation(
function, compile_time_us, loaded_executable.has_value()));
return cache_value;
}
template <typename ExecutableType, typename ClientType>
Status DeviceCompiler<ExecutableType, ClientType>::CompileAsynchronous(
const DeviceCompilationClusterSignature& signature,
const XlaCompiler::CompileOptions& compile_options,
const XlaCompiler::Options& options,
const std::vector<XlaCompiler::Argument>& args,
const NameAttrList& function, CompileScope scope, OpKernelContext* ctx,
DeviceCompilationProfiler* profiler) {
cache_->Store(signature, DeviceCompileState::kCompiling, std::nullopt,
std::nullopt, std::nullopt);
profiler->IncrementOngoingAsyncCompilations();
const std::string& function_name = function.name();
async_compiler_threads_->Schedule([=] {
VLOG(2) << "Starting asynchronous compilation of cluster " << function_name
<< '.';
mutex mu;
mutex_lock lock(mu);
auto cache_value = typename DeviceCompilationCache<ExecutableType>::Value();
auto s = CompileStrict(signature, compile_options, options, args, function,
cache_value, scope, ctx, profiler, &mu);
VLOG(2) << "Finished asynchronous compililation of cluster "
<< function_name << '.';
profiler->DecrementOngoingAsyncCompilations();
if (!s.ok()) {
cache_->Store(signature, std::nullopt, s.status(), std::nullopt,
std::nullopt);
}
});
return absl::OkStatus();
}
template <typename ExecutableType, typename ClientType>
Status DeviceCompiler<ExecutableType, ClientType>::CompileImpl(
const XlaCompiler::CompileOptions& compile_options,
const XlaCompiler::Options& options, const NameAttrList& function,
const std::vector<XlaCompiler::Argument>& args, CompileScope scope,
DeviceCompileMode compile_mode, OpKernelContext* ctx,
DeviceCompilationProfiler* profiler,
const XlaCompiler::CompilationResult** out_compilation_result,
ExecutableType** out_executable) {
DCHECK_NE(out_executable, nullptr);
VLOG(2) << "DeviceCompiler::Compile " << DebugString();
if (VLOG_IS_ON(2)) {
VLOG(2) << "num_inputs=" << args.size();
for (int i = 0, end = args.size(); i < end; i++) {
VLOG(3) << i << ": " << args[i].HumanString();
}
}
TF_ASSIGN_OR_RETURN(auto signature,
DeviceCompilationClusterSignature::Build(function, args));
mutex* cluster_mutex;
{
mutex_lock lock(cluster_mutexes_mu_);
auto it =
cluster_mutexes_.emplace(signature, std::make_unique<mutex>()).first;
cluster_mutex = it->second.get();
}
profiler->RegisterExecution(function);
string human_signature;
if (VLOG_IS_ON(2)) {
human_signature = VLOG_IS_ON(3) ? signature.HumanString() : function.name();
VLOG(2) << "DeviceCompilationClusterSignature: " << human_signature;
}
mutex_lock cluster_compile_lock(*cluster_mutex);
auto cache_value = cache_->LookupOrCreate(signature);
int64_t current_request_count = cache_value.request_count;
VLOG(2) << "Compilation cache entry hit: "
<< static_cast<int>(cache_value.compile_state)
<< " signature: " << human_signature << " with request count "
<< current_request_count;
DeviceCompileState state = cache_value.compile_state;
*out_compilation_result = nullptr;
*out_executable = nullptr;
if (state == DeviceCompileState::kUncompiled && FailOnXlaCompilation()) {
VLOG(1) << "XLA compilation disabled: " << function.name() << "\n"
<< absl::StrJoin(
args, "\n",
[](std::string* out, const XlaCompiler::Argument& arg) {
absl::StrAppend(out, " arg: ", arg.HumanString());
});
return errors::Internal("XLA compilation disabled");
}
if (state == DeviceCompileState::kUncompiled) {
XLA_SCOPED_LOGGING_TIMER("Compilation of XLA executable");
if (!profiler->ShouldCompileCluster(function, compile_mode,
current_request_count)) {
VLOG(2) << "Not compiling for signature: " << human_signature;
return absl::OkStatus();
} else if (compile_mode == DeviceCompileMode::kAsync) {
VLOG(2) << "Queueing asynchronous compilation for signature: "
<< human_signature;
TF_RETURN_IF_ERROR(CompileAsynchronous(signature, compile_options,
options, args, function, scope,
ctx, profiler));
return absl::OkStatus();
} else {
VLOG(2) << "Instantly compiling for signature: " << human_signature;
TF_ASSIGN_OR_RETURN(
cache_value,
CompileStrict(signature, compile_options, options, args, function,
cache_value, scope, ctx, profiler, cluster_mutex));
}
} else if (state == DeviceCompileState::kCompiling) {
VLOG(2) << "Ongoing asynchronous compilation for signature: "
<< human_signature;
return absl::OkStatus();
} else if (state == DeviceCompileState::kCompiled) {
VLOG(2) << "Already Compiled for signature: " << human_signature;
}
TF_RETURN_IF_ERROR(cache_value.compilation_status);
*out_compilation_result = cache_value.compilation_result;
*out_executable = cache_value.executable;
return absl::OkStatus();
}
}
#endif | #include "tensorflow/compiler/jit/device_compiler.h"
#include <iostream>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/compiler/jit/device_compilation_cluster_signature.h"
#include "tensorflow/compiler/jit/device_compiler_client.h"
#include "tensorflow/compiler/jit/tests/device_compiler_test_helper.h"
#include "tensorflow/compiler/jit/xla_device_compiler_client.h"
#include "xla/client/client_library.h"
#include "xla/stream_executor/platform_manager.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/notification.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/statusor.h"
namespace tensorflow {
namespace {
using ::testing::_;
using ::testing::Return;
using XlaDeviceCompiler =
DeviceCompiler<xla::LocalExecutable, xla::LocalClient>;
using XlaDeviceExecutablePersistor =
DeviceExecutablePersistor<xla::LocalExecutable, xla::LocalClient>;
using Signature = DeviceCompilationClusterSignature;
xla::LocalClient* GetLocalClient() {
auto platform = se::PlatformManager::PlatformWithName("cuda").value();
return xla::ClientLibrary::GetOrCreateLocalClient(platform).value();
}
XlaDeviceCompiler* CreateXlaDeviceCompiler(bool enable_persistence = false) {
auto xla_compiler_client =
std::make_unique<XlaDeviceCompilerClient>(GetLocalClient());
auto xla_persistor = std::make_unique<XlaDeviceExecutablePersistor>(
XlaDeviceExecutablePersistor::Config{
enable_persistence ? testing::TmpDir() : "", false, "xla"},
DeviceType(DEVICE_GPU_XLA_JIT));
return new XlaDeviceCompiler(std::move(xla_persistor),
std::move(xla_compiler_client));
}
absl::StatusOr<std::unique_ptr<Graph>> SampleGraphAddXY() {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0);
auto b = ops::_Arg(scope.WithOpName("B"), DT_INT32, 1);
auto c = ops::Add(scope.WithOpName("C"), a, b);
auto d = ops::_Retval(scope.WithOpName("D"), c, 0);
TF_RETURN_IF_ERROR(scope.ToGraph(graph.get()));
return graph;
}
absl::StatusOr<FunctionDef> SampleFuntionAddXY(const std::string& name) {
TF_ASSIGN_OR_RETURN(auto graph, SampleGraphAddXY());
FunctionDef fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*graph, name, &fdef));
return fdef;
}
std::vector<XlaCompiler::Argument> SampleArgsForAddXY() {
std::vector<XlaCompiler::Argument> args(2);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].type = DT_INT32;
args[0].shape = TensorShape({2});
args[1].kind = XlaCompiler::Argument::kParameter;
args[1].type = DT_INT32;
args[1].shape = TensorShape({2});
return args;
}
class MockXlaDeviceExecutablePersistor
: public DeviceExecutablePersistor<xla::LocalExecutable, xla::LocalClient> {
public:
MockXlaDeviceExecutablePersistor()
: DeviceExecutablePersistor<xla::LocalExecutable, xla::LocalClient>(
Config{testing::TmpDir(), false, "xla"},
DeviceType(DEVICE_CPU_XLA_JIT)) {}
MOCK_METHOD(Status, TryToPersistExecutable,
(uint64, const std::string&, const XlaCompiler::Options&,
const XlaCompiler::CompilationResult&,
const xla::LocalExecutable&,
(DeviceCompilerClient<xla::LocalExecutable, xla::LocalClient>*)),
(const, override));
};
class MockDeviceCompilationProfiler : public DeviceCompilationProfiler {
public:
MOCK_METHOD(bool, ShouldCompileCluster,
(const NameAttrList& function, DeviceCompileMode compile_mode,
int64_t current_request_count),
(override));
MOCK_METHOD(Status, RegisterCompilation,
(const NameAttrList& function, int64_t compile_time_us,
bool used_persistent_cache),
(override));
};
class DeviceCompilerTest : public ::testing::Test {
protected:
void SetUp() override {
flib_def_ = std::make_unique<FunctionLibraryDefinition>(
OpRegistry::Global(), FunctionDefLibrary());
TF_ASSERT_OK_AND_ASSIGN(auto fdef, SampleFuntionAddXY("foo"));
TF_ASSERT_OK(flib_def_->AddFunctionDef(fdef));
profiler_ = new DeviceCompilationProfiler();
profiler_ref_ = std::make_unique<core::ScopedUnref>(profiler_);
mock_profiler_ = new MockDeviceCompilationProfiler();
mock_profiler_ref_ = std::make_unique<core::ScopedUnref>(mock_profiler_);
xla_device_compiler_ = CreateXlaDeviceCompiler();
xla_device_compiler_ref_ =
std::make_unique<core::ScopedUnref>(xla_device_compiler_);
auto listener = std::make_unique<JitCompilationListener>();
listener_ = listener.get();
RegisterXlaActivityListener(std::move(listener));
}
XlaCompiler::Options GetDefaultXlaOptions() {
XlaCompiler::Options options;
options.device_type = DeviceType(DEVICE_GPU_XLA_JIT);
options.client = xla_device_compiler_->client();
options.flib_def = flib_def_.get();
return options;
}
absl::StatusOr<std::unique_ptr<xla::LocalExecutable>>
BuildSampleXlaExecutable() {
TF_ASSIGN_OR_RETURN(auto graph, SampleGraphAddXY());
auto args = SampleArgsForAddXY();
XlaCompiler compiler(GetDefaultXlaOptions());
XlaCompiler::CompilationResult compilation_result;
TF_RETURN_IF_ERROR(compiler.CompileGraph(XlaCompiler::CompileOptions(),
"graph", std::move(graph), args,
&compilation_result));
return xla_device_compiler_->compiler_client()->BuildExecutable(
GetDefaultXlaOptions(), compilation_result);
}
std::unique_ptr<FunctionLibraryDefinition> flib_def_;
JitCompilationListener* listener_;
DeviceCompilationProfiler* profiler_;
std::unique_ptr<core::ScopedUnref> profiler_ref_;
MockDeviceCompilationProfiler* mock_profiler_;
std::unique_ptr<core::ScopedUnref> mock_profiler_ref_;
XlaDeviceCompiler* xla_device_compiler_;
std::unique_ptr<core::ScopedUnref> xla_device_compiler_ref_;
};
TEST_F(DeviceCompilerTest, CompileStrictSuccess) {
const XlaCompiler::CompilationResult* compilation_result = nullptr;
xla::LocalExecutable* xla_executable = nullptr;
XlaCompiler::Options options = GetDefaultXlaOptions();
NameAttrList fn;
fn.set_name("foo");
TF_EXPECT_OK(xla_device_compiler_->CompileIfNeeded(
options, fn, SampleArgsForAddXY(), XlaCompiler::CompileOptions{},
DeviceCompileMode::kStrict, profiler_, &compilation_result,
&xla_executable));
EXPECT_TRUE(compilation_result != nullptr);
EXPECT_TRUE(xla_executable != nullptr);
}
TEST_F(DeviceCompilerTest, CompileShouldCompileClusterFalse) {
const XlaCompiler::CompilationResult* compilation_result = nullptr;
xla::LocalExecutable* xla_executable = nullptr;
XlaCompiler::Options options = GetDefaultXlaOptions();
NameAttrList fn;
fn.set_name("foo");
EXPECT_CALL(*mock_profiler_,
ShouldCompileCluster(_, DeviceCompileMode::kLazy, 1))
.WillOnce(Return(false));
TF_EXPECT_OK(xla_device_compiler_->CompileIfNeeded(
options, fn, SampleArgsForAddXY(), XlaCompiler::CompileOptions{},
DeviceCompileMode::kLazy, mock_profiler_, &compilation_result,
&xla_executable));
EXPECT_TRUE(compilation_result == nullptr);
EXPECT_TRUE(xla_executable == nullptr);
}
TEST_F(DeviceCompilerTest, CompileCacheHit) {
const XlaCompiler::CompilationResult* compilation_result = nullptr;
xla::LocalExecutable* xla_executable = nullptr;
XlaCompiler::Options options = GetDefaultXlaOptions();
NameAttrList fn;
fn.set_name("foo");
TF_EXPECT_OK(xla_device_compiler_->CompileIfNeeded(
options, fn, SampleArgsForAddXY(), XlaCompiler::CompileOptions{},
DeviceCompileMode::kStrict, profiler_, &compilation_result,
&xla_executable));
EXPECT_TRUE(compilation_result != nullptr);
EXPECT_TRUE(xla_executable != nullptr);
const XlaCompiler::CompilationResult* new_compilation_result = nullptr;
xla::LocalExecutable* new_xla_executable = nullptr;
TF_EXPECT_OK(xla_device_compiler_->CompileIfNeeded(
options, fn, SampleArgsForAddXY(), XlaCompiler::CompileOptions{},
DeviceCompileMode::kStrict, profiler_, &new_compilation_result,
&new_xla_executable));
EXPECT_EQ(compilation_result, new_compilation_result);
EXPECT_EQ(xla_executable, new_xla_executable);
}
TEST_F(DeviceCompilerTest, CompileAsyncSuccess) {
const XlaCompiler::CompilationResult* compilation_result = nullptr;
xla::LocalExecutable* xla_executable = nullptr;
XlaCompiler::Options options = GetDefaultXlaOptions();
NameAttrList fn;
fn.set_name("foo");
Notification done;
EXPECT_CALL(*mock_profiler_,
ShouldCompileCluster(_, DeviceCompileMode::kAsync, 1))
.WillOnce(Return(true));
EXPECT_CALL(*mock_profiler_, RegisterCompilation(_, _, false))
.WillOnce([&done] {
done.Notify();
return absl::OkStatus();
});
auto args = SampleArgsForAddXY();
TF_EXPECT_OK(xla_device_compiler_->CompileIfNeeded(
options, fn, args, XlaCompiler::CompileOptions{},
DeviceCompileMode::kAsync, mock_profiler_, &compilation_result,
&xla_executable));
EXPECT_TRUE(compilation_result == nullptr);
EXPECT_TRUE(xla_executable == nullptr);
auto xla_cache = xla_device_compiler_->cache();
TF_ASSERT_OK_AND_ASSIGN(auto signature, Signature::Build(fn, args));
auto cache_value = xla_cache->Lookup(signature);
EXPECT_TRUE(cache_value);
EXPECT_TRUE(cache_value->compile_state != DeviceCompileState::kUncompiled);
done.WaitForNotification();
cache_value = xla_cache->Lookup(signature);
EXPECT_TRUE(cache_value);
EXPECT_TRUE(cache_value->compile_state == DeviceCompileState::kCompiled);
EXPECT_TRUE(cache_value->compilation_result != nullptr);
EXPECT_TRUE(cache_value->executable != nullptr);
EXPECT_TRUE(cache_value->compilation_status.ok());
}
TEST_F(DeviceCompilerTest, CompilePersistentCacheEnabled) {
auto xla_device_compiler =
CreateXlaDeviceCompiler(true);
core::ScopedUnref xla_device_compiler_ref(xla_device_compiler);
NameAttrList fn;
fn.set_name("foo");
auto args = SampleArgsForAddXY();
XlaCompiler::Options options = GetDefaultXlaOptions();
const XlaCompiler::CompilationResult* compilation_result = nullptr;
xla::LocalExecutable* xla_executable = nullptr;
TF_EXPECT_OK(xla_device_compiler->CompileIfNeeded(
options, fn, args, XlaCompiler::CompileOptions{},
DeviceCompileMode::kStrict, profiler_, &compilation_result,
&xla_executable));
EXPECT_TRUE(compilation_result != nullptr);
EXPECT_TRUE(xla_executable != nullptr);
std::vector<XlaJitCompilationActivity> activity_history =
listener_->GetListenerHistory();
EXPECT_EQ(activity_history.size(), 1);
EXPECT_EQ(activity_history[0].cluster_name(), fn.name());
EXPECT_EQ(activity_history[0].compile_count(), 1);
EXPECT_FALSE(activity_history[0].used_persistent_cache());
listener_->ClearListenerHistory();
auto xla_device_compiler_2 =
CreateXlaDeviceCompiler(true);
core::ScopedUnref xla_device_compiler_ref_2(xla_device_compiler_2);
auto profiler = new DeviceCompilationProfiler();
core::ScopedUnref profiler_ref(profiler);
const XlaCompiler::CompilationResult* compilation_result_2 = nullptr;
xla::LocalExecutable* xla_executable_2 = nullptr;
TF_EXPECT_OK(xla_device_compiler_2->CompileIfNeeded(
options, fn, args, XlaCompiler::CompileOptions{},
DeviceCompileMode::kStrict, profiler, &compilation_result_2,
&xla_executable_2));
EXPECT_TRUE(compilation_result_2 != nullptr);
EXPECT_TRUE(xla_executable_2 != nullptr);
activity_history = listener_->GetListenerHistory();
EXPECT_EQ(activity_history.size(), 1);
EXPECT_EQ(activity_history[0].cluster_name(), fn.name());
EXPECT_EQ(activity_history[0].compile_count(), 1);
EXPECT_TRUE(activity_history[0].used_persistent_cache());
}
TEST_F(DeviceCompilerTest, CompileFailedToLoadFromPersistentCache) {
auto xla_device_compiler =
CreateXlaDeviceCompiler(true);
core::ScopedUnref xla_device_compiler_ref(xla_device_compiler);
NameAttrList fn;
fn.set_name("foo");
auto args = SampleArgsForAddXY();
XlaCompiler::Options options = GetDefaultXlaOptions();
const XlaCompiler::CompilationResult* compilation_result = nullptr;
xla::LocalExecutable* xla_executable = nullptr;
TF_EXPECT_OK(xla_device_compiler->CompileIfNeeded(
options, fn, args, XlaCompiler::CompileOptions{},
DeviceCompileMode::kStrict, profiler_, &compilation_result,
&xla_executable));
std::vector<string> files;
TF_ASSERT_OK(Env::Default()->GetChildren(testing::TmpDir(), &files));
std::string const* serialized_executable_filename = nullptr;
for (const auto& file : files) {
if (absl::StartsWith(file, "xla__")) {
serialized_executable_filename = &file;
break;
}
}
EXPECT_TRUE(serialized_executable_filename != nullptr);
std::string serialized_executable_filepath =
io::JoinPath(testing::TmpDir(), *serialized_executable_filename);
std::unique_ptr<WritableFile> serialized_executable_file;
TF_ASSERT_OK(Env::Default()->NewWritableFile(serialized_executable_filepath,
&serialized_executable_file));
TF_ASSERT_OK(serialized_executable_file->Append("Garbage."));
TF_ASSERT_OK(serialized_executable_file->Close());
auto xla_device_compiler_2 =
CreateXlaDeviceCompiler(true);
core::ScopedUnref xla_device_compiler_ref_2(xla_device_compiler_2);
const XlaCompiler::CompilationResult* compilation_result_2 = nullptr;
xla::LocalExecutable* xla_executable_2 = nullptr;
EXPECT_FALSE(xla_device_compiler_2
->CompileIfNeeded(options, fn, args,
XlaCompiler::CompileOptions{},
DeviceCompileMode::kStrict, profiler_,
&compilation_result_2, &xla_executable_2)
.ok());
EXPECT_TRUE(compilation_result_2 == nullptr);
EXPECT_TRUE(xla_executable_2 == nullptr);
}
TEST_F(DeviceCompilerTest, CompileStrictPersistentCacheFailedToPersist) {
auto xla_compiler_client =
std::make_unique<XlaDeviceCompilerClient>(GetLocalClient());
auto xla_persistor = std::make_unique<MockXlaDeviceExecutablePersistor>();
auto xla_device_compiler = new XlaDeviceCompiler(
std::move(xla_persistor), std::move(xla_compiler_client));
core::ScopedUnref xla_device_compiler_ref(xla_device_compiler);
NameAttrList fn;
fn.set_name("foo");
auto args = SampleArgsForAddXY();
XlaCompiler::Options options = GetDefaultXlaOptions();
const XlaCompiler::CompilationResult* compilation_result = nullptr;
xla::LocalExecutable* xla_executable = nullptr;
auto persistor = down_cast<MockXlaDeviceExecutablePersistor*>(
xla_device_compiler->persistor());
TF_ASSERT_OK_AND_ASSIGN(auto signature, Signature::Build(fn, args));
EXPECT_CALL(*persistor,
TryToPersistExecutable(Signature::Hash()(signature),
signature.HumanString(), _, _, _, _))
.WillOnce(Return(errors::FailedPrecondition("Random error.")));
EXPECT_THAT(xla_device_compiler->CompileIfNeeded(
options, fn, args, XlaCompiler::CompileOptions{},
DeviceCompileMode::kStrict, profiler_, &compilation_result,
&xla_executable),
testing::StatusIs(error::FAILED_PRECONDITION,
::testing::HasSubstr("Random error.")));
EXPECT_TRUE(compilation_result == nullptr);
EXPECT_TRUE(xla_executable == nullptr);
}
TEST_F(OpsTestBase, CompileSingleOpSuccess) {
TF_EXPECT_OK(NodeDefBuilder("identity_op", "Identity")
.Input(FakeInput(DT_FLOAT))
.Attr("T", DT_FLOAT)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<float>(TensorShape({1, 2}), {6.9, 4.2});
TF_EXPECT_OK(RunOpKernel());
auto xla_device_compiler = CreateXlaDeviceCompiler();
core::ScopedUnref xla_device_compiler_ref(xla_device_compiler);
auto profiler = new DeviceCompilationProfiler();
core::ScopedUnref profiler_ref(profiler);
const XlaCompiler::CompilationResult* compilation_result = nullptr;
xla::LocalExecutable* xla_executable = nullptr;
XlaOpRegistry::RegisterCompilationKernels();
auto flib_def = std::make_unique<FunctionLibraryDefinition>(
OpRegistry::Global(), FunctionDefLibrary());
XlaCompiler::Options options;
options.device_type = DeviceType(DEVICE_GPU_XLA_JIT);
options.client = GetLocalClient();
options.flib_def = flib_def.get();
std::vector<XlaCompiler::Argument> args(1);
args[0].kind = XlaCompiler::Argument::kConstant;
args[0].type = DT_FLOAT;
args[0].shape = TensorShape({1, 2});
args[0].constant_value = GetInput(0);
args[0].initialized = true;
NameAttrList fn;
fn.set_name("foo");
TF_EXPECT_OK(xla_device_compiler->CompileSingleOpIfNeeded(
options, args, XlaCompiler::CompileOptions{}, context_.get(), profiler,
&compilation_result, &xla_executable));
EXPECT_TRUE(compilation_result != nullptr);
EXPECT_TRUE(xla_executable != nullptr);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/device_compiler.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/device_compiler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
86b2b763-0abd-4bf5-99c5-c1caff3cceac | cpp | google/glog | utilities | src/utilities.cc | src/utilities_unittest.cc | #define _GNU_SOURCE 1
#include "utilities.h"
#include <atomic>
#include <cerrno>
#include <csignal>
#include <cstdio>
#include <cstdlib>
#include "base/googleinit.h"
#include "config.h"
#include "glog/flags.h"
#include "glog/logging.h"
#include "stacktrace.h"
#include "symbolize.h"
#ifdef GLOG_OS_ANDROID
# include <android/log.h>
#endif
#ifdef HAVE_SYS_TIME_H
# include <sys/time.h>
#endif
#if defined(HAVE_SYSCALL_H)
# include <syscall.h>
#elif defined(HAVE_SYS_SYSCALL_H)
# include <sys/syscall.h>
#endif
#ifdef HAVE_SYSLOG_H
# include <syslog.h>
#endif
#ifdef HAVE_UNISTD_H
# include <unistd.h>
#endif
#ifdef HAVE_PWD_H
# include <pwd.h>
#endif
#if defined(HAVE___PROGNAME)
extern char* __progname;
#endif
using std::string;
namespace google {
static const char* g_program_invocation_short_name = nullptr;
bool IsGoogleLoggingInitialized() {
return g_program_invocation_short_name != nullptr;
}
inline namespace glog_internal_namespace_ {
constexpr int FileDescriptor::InvalidHandle;
void AlsoErrorWrite(LogSeverity severity, const char* tag,
const char* message) noexcept {
#if defined(GLOG_OS_WINDOWS)
(void)severity;
(void)tag;
::OutputDebugStringA(message);
#elif defined(GLOG_OS_ANDROID)
constexpr int android_log_levels[] = {
ANDROID_LOG_INFO,
ANDROID_LOG_WARN,
ANDROID_LOG_ERROR,
ANDROID_LOG_FATAL,
};
__android_log_write(android_log_levels[severity], tag, message);
#else
(void)severity;
(void)tag;
(void)message;
#endif
}
}
}
#ifdef HAVE_STACKTRACE
# include "base/commandlineflags.h"
# include "stacktrace.h"
# include "symbolize.h"
namespace google {
using DebugWriter = void(const char*, void*);
static const int kPrintfPointerFieldWidth = 2 + 2 * sizeof(void*);
static void DebugWriteToStderr(const char* data, void*) {
if (write(fileno(stderr), data, strlen(data)) < 0) {
}
AlsoErrorWrite(GLOG_FATAL,
glog_internal_namespace_::ProgramInvocationShortName(), data);
}
static void DebugWriteToString(const char* data, void* arg) {
reinterpret_cast<string*>(arg)->append(data);
}
# ifdef HAVE_SYMBOLIZE
static void DumpPCAndSymbol(DebugWriter* writerfn, void* arg, void* pc,
const char* const prefix) {
char tmp[1024];
const char* symbol = "(unknown)";
if (Symbolize(reinterpret_cast<char*>(pc) - 1, tmp, sizeof(tmp))) {
symbol = tmp;
}
char buf[1024];
std::snprintf(buf, sizeof(buf), "%s@ %*p %s\n", prefix,
kPrintfPointerFieldWidth, pc, symbol);
writerfn(buf, arg);
}
# endif
static void DumpPC(DebugWriter* writerfn, void* arg, void* pc,
const char* const prefix) {
char buf[100];
std::snprintf(buf, sizeof(buf), "%s@ %*p\n", prefix, kPrintfPointerFieldWidth,
pc);
writerfn(buf, arg);
}
static void DumpStackTrace(int skip_count, DebugWriter* writerfn, void* arg) {
void* stack[32];
int depth = GetStackTrace(stack, ARRAYSIZE(stack), skip_count + 1);
for (int i = 0; i < depth; i++) {
# if defined(HAVE_SYMBOLIZE)
if (FLAGS_symbolize_stacktrace) {
DumpPCAndSymbol(writerfn, arg, stack[i], " ");
} else {
DumpPC(writerfn, arg, stack[i], " ");
}
# else
DumpPC(writerfn, arg, stack[i], " ");
# endif
}
}
# ifdef __GNUC__
__attribute__((noreturn))
# endif
static void
DumpStackTraceAndExit() {
DumpStackTrace(1, DebugWriteToStderr, nullptr);
if (IsFailureSignalHandlerInstalled()) {
# ifdef HAVE_SIGACTION
struct sigaction sig_action;
memset(&sig_action, 0, sizeof(sig_action));
sigemptyset(&sig_action.sa_mask);
sig_action.sa_handler = SIG_DFL;
sigaction(SIGABRT, &sig_action, nullptr);
# elif defined(GLOG_OS_WINDOWS)
signal(SIGABRT, SIG_DFL);
# endif
}
abort();
}
}
#endif
namespace google {
inline namespace glog_internal_namespace_ {
const char* const_basename(const char* filepath) {
const char* base = strrchr(filepath, '/');
#ifdef GLOG_OS_WINDOWS
if (!base) base = strrchr(filepath, '\\');
#endif
return base ? (base + 1) : filepath;
}
const char* ProgramInvocationShortName() {
if (g_program_invocation_short_name != nullptr) {
return g_program_invocation_short_name;
}
#if defined(HAVE_PROGRAM_INVOCATION_SHORT_NAME)
return program_invocation_short_name;
#elif defined(HAVE_GETPROGNAME)
return getprogname();
#elif defined(HAVE___PROGNAME)
return __progname;
#elif defined(HAVE___ARGV)
return const_basename(__argv[0]);
#else
return "UNKNOWN";
#endif
}
static int32 g_main_thread_pid = getpid();
int32 GetMainThreadPid() { return g_main_thread_pid; }
bool PidHasChanged() {
int32 pid = getpid();
if (g_main_thread_pid == pid) {
return false;
}
g_main_thread_pid = pid;
return true;
}
static string g_my_user_name;
const string& MyUserName() { return g_my_user_name; }
static void MyUserNameInitializer() {
#if defined(GLOG_OS_WINDOWS)
const char* user = getenv("USERNAME");
#else
const char* user = getenv("USER");
#endif
if (user != nullptr) {
g_my_user_name = user;
} else {
#if defined(HAVE_PWD_H) && defined(HAVE_UNISTD_H)
struct passwd pwd;
struct passwd* result = nullptr;
char buffer[1024] = {'\0'};
uid_t uid = geteuid();
int pwuid_res = getpwuid_r(uid, &pwd, buffer, sizeof(buffer), &result);
if (pwuid_res == 0 && result) {
g_my_user_name = pwd.pw_name;
} else {
std::snprintf(buffer, sizeof(buffer), "uid%d", uid);
g_my_user_name = buffer;
}
#endif
if (g_my_user_name.empty()) {
g_my_user_name = "invalid-user";
}
}
}
REGISTER_MODULE_INITIALIZER(utilities, MyUserNameInitializer())
static std::atomic<const logging::internal::CrashReason*> g_reason{nullptr};
void SetCrashReason(const logging::internal::CrashReason* r) {
const logging::internal::CrashReason* expected = nullptr;
g_reason.compare_exchange_strong(expected, r);
}
void InitGoogleLoggingUtilities(const char* argv0) {
CHECK(!IsGoogleLoggingInitialized())
<< "You called InitGoogleLogging() twice!";
g_program_invocation_short_name = const_basename(argv0);
#ifdef HAVE_STACKTRACE
InstallFailureFunction(&DumpStackTraceAndExit);
#endif
}
void ShutdownGoogleLoggingUtilities() {
CHECK(IsGoogleLoggingInitialized())
<< "You called ShutdownGoogleLogging() without calling "
"InitGoogleLogging() first!";
g_program_invocation_short_name = nullptr;
#ifdef HAVE_SYSLOG_H
closelog();
#endif
}
}
#ifdef HAVE_STACKTRACE
std::string GetStackTrace() {
std::string stacktrace;
DumpStackTrace(1, DebugWriteToString, &stacktrace);
return stacktrace;
}
#endif
} | #include "utilities.h"
#include "glog/logging.h"
#include "googletest.h"
#ifdef GLOG_USE_GFLAGS
# include <gflags/gflags.h>
using namespace GFLAGS_NAMESPACE;
#endif
using namespace google;
TEST(utilities, InitGoogleLoggingDeathTest) {
ASSERT_DEATH(InitGoogleLogging("foobar"), "");
}
int main(int argc, char** argv) {
InitGoogleLogging(argv[0]);
InitGoogleTest(&argc, argv);
CHECK_EQ(RUN_ALL_TESTS(), 0);
} | https://github.com/google/glog/blob/de309c08c05382fee0792380de7df1bd65332da2/src/utilities.cc | https://github.com/google/glog/blob/de309c08c05382fee0792380de7df1bd65332da2/src/utilities_unittest.cc | de309c08c05382fee0792380de7df1bd65332da2 |
cfee79c9-b07d-4a03-86dd-5907d4ab53fe | cpp | google/glog | symbolize | src/symbolize.cc | src/symbolize_unittest.cc | #ifdef GLOG_BUILD_CONFIG_INCLUDE
# include GLOG_BUILD_CONFIG_INCLUDE
#endif
#include "symbolize.h"
#include "utilities.h"
#if defined(HAVE_SYMBOLIZE)
# include <algorithm>
# include <cstdlib>
# include <cstring>
# include <limits>
# include "demangle.h"
# define GLOG_SAFE_ASSERT(expr) ((expr) ? 0 : (std::abort(), 0))
namespace google {
inline namespace glog_internal_namespace_ {
namespace {
SymbolizeCallback g_symbolize_callback = nullptr;
SymbolizeOpenObjectFileCallback g_symbolize_open_object_file_callback = nullptr;
ATTRIBUTE_NOINLINE
void DemangleInplace(char* out, size_t out_size) {
char demangled[256];
if (Demangle(out, demangled, sizeof(demangled))) {
size_t len = strlen(demangled);
if (len + 1 <= out_size) {
GLOG_SAFE_ASSERT(len < sizeof(demangled));
memmove(out, demangled, len + 1);
}
}
}
}
void InstallSymbolizeCallback(SymbolizeCallback callback) {
g_symbolize_callback = callback;
}
void InstallSymbolizeOpenObjectFileCallback(
SymbolizeOpenObjectFileCallback callback) {
g_symbolize_open_object_file_callback = callback;
}
}
}
# if defined(HAVE_LINK_H)
# if defined(HAVE_DLFCN_H)
# include <dlfcn.h>
# endif
# include <fcntl.h>
# include <sys/stat.h>
# include <sys/types.h>
# include <unistd.h>
# include <cerrno>
# include <climits>
# include <cstddef>
# include <cstdint>
# include <cstdio>
# include <cstdlib>
# include <cstring>
# include "config.h"
# include "glog/raw_logging.h"
# include "symbolize.h"
namespace google {
inline namespace glog_internal_namespace_ {
namespace {
template <class Functor>
auto FailureRetry(Functor run, int error = EINTR) noexcept(noexcept(run())) {
decltype(run()) result;
while ((result = run()) == -1 && errno == error) {
}
return result;
}
}
static ssize_t ReadFromOffset(const int fd, void* buf, const size_t count,
const size_t offset) {
GLOG_SAFE_ASSERT(fd >= 0);
GLOG_SAFE_ASSERT(count <=
static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
char* buf0 = reinterpret_cast<char*>(buf);
size_t num_bytes = 0;
while (num_bytes < count) {
ssize_t len = FailureRetry([fd, p = buf0 + num_bytes, n = count - num_bytes,
m = static_cast<off_t>(offset + num_bytes)] {
return pread(fd, p, n, m);
});
if (len < 0) {
return -1;
}
if (len == 0) {
break;
}
num_bytes += static_cast<size_t>(len);
}
GLOG_SAFE_ASSERT(num_bytes <= count);
return static_cast<ssize_t>(num_bytes);
}
static bool ReadFromOffsetExact(const int fd, void* buf, const size_t count,
const size_t offset) {
ssize_t len = ReadFromOffset(fd, buf, count, offset);
return static_cast<size_t>(len) == count;
}
static int FileGetElfType(const int fd) {
ElfW(Ehdr) elf_header;
if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
return -1;
}
if (memcmp(elf_header.e_ident, ELFMAG, SELFMAG) != 0) {
return -1;
}
return elf_header.e_type;
}
static ATTRIBUTE_NOINLINE bool GetSectionHeaderByType(const int fd,
ElfW(Half) sh_num,
const size_t sh_offset,
ElfW(Word) type,
ElfW(Shdr) * out) {
ElfW(Shdr) buf[16];
for (size_t i = 0; i < sh_num;) {
const size_t num_bytes_left = (sh_num - i) * sizeof(buf[0]);
const size_t num_bytes_to_read =
(sizeof(buf) > num_bytes_left) ? num_bytes_left : sizeof(buf);
const ssize_t len = ReadFromOffset(fd, buf, num_bytes_to_read,
sh_offset + i * sizeof(buf[0]));
if (len == -1) {
return false;
}
GLOG_SAFE_ASSERT(static_cast<size_t>(len) % sizeof(buf[0]) == 0);
const size_t num_headers_in_buf = static_cast<size_t>(len) / sizeof(buf[0]);
GLOG_SAFE_ASSERT(num_headers_in_buf <= sizeof(buf) / sizeof(buf[0]));
for (size_t j = 0; j < num_headers_in_buf; ++j) {
if (buf[j].sh_type == type) {
*out = buf[j];
return true;
}
}
i += num_headers_in_buf;
}
return false;
}
const int kMaxSectionNameLen = 64;
bool GetSectionHeaderByName(int fd, const char* name, size_t name_len,
ElfW(Shdr) * out) {
ElfW(Ehdr) elf_header;
if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
return false;
}
ElfW(Shdr) shstrtab;
size_t shstrtab_offset =
(elf_header.e_shoff + static_cast<size_t>(elf_header.e_shentsize) *
static_cast<size_t>(elf_header.e_shstrndx));
if (!ReadFromOffsetExact(fd, &shstrtab, sizeof(shstrtab), shstrtab_offset)) {
return false;
}
for (size_t i = 0; i < elf_header.e_shnum; ++i) {
size_t section_header_offset =
(elf_header.e_shoff + elf_header.e_shentsize * i);
if (!ReadFromOffsetExact(fd, out, sizeof(*out), section_header_offset)) {
return false;
}
char header_name[kMaxSectionNameLen];
if (sizeof(header_name) < name_len) {
RAW_LOG(WARNING,
"Section name '%s' is too long (%zu); "
"section will not be found (even if present).",
name, name_len);
return false;
}
size_t name_offset = shstrtab.sh_offset + out->sh_name;
ssize_t n_read = ReadFromOffset(fd, &header_name, name_len, name_offset);
if (n_read == -1) {
return false;
} else if (static_cast<size_t>(n_read) != name_len) {
continue;
}
if (memcmp(header_name, name, name_len) == 0) {
return true;
}
}
return false;
}
static ATTRIBUTE_NOINLINE bool FindSymbol(uint64_t pc, const int fd, char* out,
size_t out_size,
uint64_t symbol_offset,
const ElfW(Shdr) * strtab,
const ElfW(Shdr) * symtab) {
if (symtab == nullptr) {
return false;
}
const size_t num_symbols = symtab->sh_size / symtab->sh_entsize;
for (unsigned i = 0; i < num_symbols;) {
size_t offset = symtab->sh_offset + i * symtab->sh_entsize;
# if defined(__WORDSIZE) && __WORDSIZE == 64
const size_t NUM_SYMBOLS = 32U;
# else
const size_t NUM_SYMBOLS = 64U;
# endif
ElfW(Sym) buf[NUM_SYMBOLS];
size_t num_symbols_to_read = std::min(NUM_SYMBOLS, num_symbols - i);
const ssize_t len =
ReadFromOffset(fd, &buf, sizeof(buf[0]) * num_symbols_to_read, offset);
GLOG_SAFE_ASSERT(static_cast<size_t>(len) % sizeof(buf[0]) == 0);
const size_t num_symbols_in_buf = static_cast<size_t>(len) / sizeof(buf[0]);
GLOG_SAFE_ASSERT(num_symbols_in_buf <= num_symbols_to_read);
for (unsigned j = 0; j < num_symbols_in_buf; ++j) {
const ElfW(Sym)& symbol = buf[j];
uint64_t start_address = symbol.st_value;
start_address += symbol_offset;
uint64_t end_address = start_address + symbol.st_size;
if (symbol.st_value != 0 &&
symbol.st_shndx != 0 &&
start_address <= pc && pc < end_address) {
ssize_t len1 = ReadFromOffset(fd, out, out_size,
strtab->sh_offset + symbol.st_name);
if (len1 <= 0 || memchr(out, '\0', out_size) == nullptr) {
memset(out, 0, out_size);
return false;
}
return true;
}
}
i += num_symbols_in_buf;
}
return false;
}
static bool GetSymbolFromObjectFile(const int fd, uint64_t pc, char* out,
size_t out_size, uint64_t base_address) {
ElfW(Ehdr) elf_header;
if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
return false;
}
ElfW(Shdr) symtab, strtab;
if (GetSectionHeaderByType(fd, elf_header.e_shnum, elf_header.e_shoff,
SHT_SYMTAB, &symtab)) {
if (!ReadFromOffsetExact(
fd, &strtab, sizeof(strtab),
elf_header.e_shoff + symtab.sh_link * sizeof(symtab))) {
return false;
}
if (FindSymbol(pc, fd, out, out_size, base_address, &strtab, &symtab)) {
return true;
}
}
if (GetSectionHeaderByType(fd, elf_header.e_shnum, elf_header.e_shoff,
SHT_DYNSYM, &symtab)) {
if (!ReadFromOffsetExact(
fd, &strtab, sizeof(strtab),
elf_header.e_shoff + symtab.sh_link * sizeof(symtab))) {
return false;
}
if (FindSymbol(pc, fd, out, out_size, base_address, &strtab, &symtab)) {
return true;
}
}
return false;
}
namespace {
class LineReader {
public:
explicit LineReader(int fd, char* buf, size_t buf_len, size_t offset)
: fd_(fd),
buf_(buf),
buf_len_(buf_len),
offset_(offset),
bol_(buf),
eol_(buf),
eod_(buf) {}
bool ReadLine(const char** bol, const char** eol) {
if (BufferIsEmpty()) {
const ssize_t num_bytes = ReadFromOffset(fd_, buf_, buf_len_, offset_);
if (num_bytes <= 0) {
return false;
}
offset_ += static_cast<size_t>(num_bytes);
eod_ = buf_ + num_bytes;
bol_ = buf_;
} else {
bol_ = eol_ + 1;
GLOG_SAFE_ASSERT(bol_ <= eod_);
if (!HasCompleteLine()) {
const auto incomplete_line_length = static_cast<size_t>(eod_ - bol_);
memmove(buf_, bol_, incomplete_line_length);
char* const append_pos = buf_ + incomplete_line_length;
const size_t capacity_left = buf_len_ - incomplete_line_length;
const ssize_t num_bytes =
ReadFromOffset(fd_, append_pos, capacity_left, offset_);
if (num_bytes <= 0) {
return false;
}
offset_ += static_cast<size_t>(num_bytes);
eod_ = append_pos + num_bytes;
bol_ = buf_;
}
}
eol_ = FindLineFeed();
if (eol_ == nullptr) {
return false;
}
*eol_ = '\0';
*bol = bol_;
*eol = eol_;
return true;
}
const char* bol() { return bol_; }
const char* eol() { return eol_; }
private:
LineReader(const LineReader&) = delete;
void operator=(const LineReader&) = delete;
char* FindLineFeed() {
return reinterpret_cast<char*>(
memchr(bol_, '\n', static_cast<size_t>(eod_ - bol_)));
}
bool BufferIsEmpty() { return buf_ == eod_; }
bool HasCompleteLine() {
return !BufferIsEmpty() && FindLineFeed() != nullptr;
}
const int fd_;
char* const buf_;
const size_t buf_len_;
size_t offset_;
char* bol_;
char* eol_;
const char* eod_;
};
}
static char* GetHex(const char* start, const char* end, uint64_t* hex) {
*hex = 0;
const char* p;
for (p = start; p < end; ++p) {
int ch = *p;
if ((ch >= '0' && ch <= '9') || (ch >= 'A' && ch <= 'F') ||
(ch >= 'a' && ch <= 'f')) {
*hex = (*hex << 4U) |
(ch < 'A' ? static_cast<uint64_t>(ch - '0') : (ch & 0xF) + 9U);
} else {
break;
}
}
GLOG_SAFE_ASSERT(p <= end);
return const_cast<char*>(p);
}
static ATTRIBUTE_NOINLINE FileDescriptor
OpenObjectFileContainingPcAndGetStartAddress(uint64_t pc,
uint64_t& start_address,
uint64_t& base_address,
char* out_file_name,
size_t out_file_name_size) {
FileDescriptor maps_fd{
FailureRetry([] { return open("/proc/self/maps", O_RDONLY); })};
if (!maps_fd) {
return nullptr;
}
FileDescriptor mem_fd{
FailureRetry([] { return open("/proc/self/mem", O_RDONLY); })};
if (!mem_fd) {
return nullptr;
}
char buf[1024];
LineReader reader(maps_fd.get(), buf, sizeof(buf), 0);
while (true) {
const char* cursor;
const char* eol;
if (!reader.ReadLine(&cursor, &eol)) {
return nullptr;
}
cursor = GetHex(cursor, eol, &start_address);
if (cursor == eol || *cursor != '-') {
return nullptr;
}
++cursor;
uint64_t end_address;
cursor = GetHex(cursor, eol, &end_address);
if (cursor == eol || *cursor != ' ') {
return nullptr;
}
++cursor;
const char* const flags_start = cursor;
while (cursor < eol && *cursor != ' ') {
++cursor;
}
if (cursor == eol || cursor < flags_start + 4) {
return nullptr;
}
ElfW(Ehdr) ehdr;
if (flags_start[0] == 'r' &&
ReadFromOffsetExact(mem_fd.get(), &ehdr, sizeof(ElfW(Ehdr)),
start_address) &&
memcmp(ehdr.e_ident, ELFMAG, SELFMAG) == 0) {
switch (ehdr.e_type) {
case ET_EXEC:
base_address = 0;
break;
case ET_DYN:
base_address = start_address;
for (unsigned i = 0; i != ehdr.e_phnum; ++i) {
ElfW(Phdr) phdr;
if (ReadFromOffsetExact(
mem_fd.get(), &phdr, sizeof(phdr),
start_address + ehdr.e_phoff + i * sizeof(phdr)) &&
phdr.p_type == PT_LOAD && phdr.p_offset == 0) {
base_address = start_address - phdr.p_vaddr;
break;
}
}
break;
default:
break;
}
}
if (start_address > pc || pc >= end_address) {
continue;
}
if (flags_start[0] != 'r' || flags_start[2] != 'x') {
continue;
}
++cursor;
uint64_t file_offset;
cursor = GetHex(cursor, eol, &file_offset);
if (cursor == eol || *cursor != ' ') {
return nullptr;
}
++cursor;
int num_spaces = 0;
while (cursor < eol) {
if (*cursor == ' ') {
++num_spaces;
} else if (num_spaces >= 2) {
break;
}
++cursor;
}
if (cursor == eol) {
return nullptr;
}
strncpy(out_file_name, cursor, out_file_name_size);
out_file_name[out_file_name_size - 1] = '\0';
return FileDescriptor{
FailureRetry([cursor] { return open(cursor, O_RDONLY); })};
}
}
static char* itoa_r(uintptr_t i, char* buf, size_t sz, unsigned base,
size_t padding) {
size_t n = 1;
if (n > sz) {
return nullptr;
}
if (base < 2 || base > 16) {
buf[0] = '\000';
return nullptr;
}
char* start = buf;
char* ptr = start;
do {
if (++n > sz) {
buf[0] = '\000';
return nullptr;
}
*ptr++ = "0123456789abcdef"[i % base];
i /= base;
if (padding > 0) {
padding--;
}
} while (i > 0 || padding > 0);
*ptr = '\000';
while (--ptr > start) {
char ch = *ptr;
*ptr = *start;
*start++ = ch;
}
return buf;
}
static void SafeAppendString(const char* source, char* dest, size_t dest_size) {
size_t dest_string_length = strlen(dest);
GLOG_SAFE_ASSERT(dest_string_length < dest_size);
dest += dest_string_length;
dest_size -= dest_string_length;
strncpy(dest, source, dest_size);
dest[dest_size - 1] = '\0';
}
static void SafeAppendHexNumber(uint64_t value, char* dest, size_t dest_size) {
char buf[17] = {'\0'};
SafeAppendString(itoa_r(value, buf, sizeof(buf), 16, 0), dest, dest_size);
}
static ATTRIBUTE_NOINLINE bool SymbolizeAndDemangle(
void* pc, char* out, size_t out_size, SymbolizeOptions ) {
auto pc0 = reinterpret_cast<uintptr_t>(pc);
uint64_t start_address = 0;
uint64_t base_address = 0;
FileDescriptor object_fd;
if (out_size < 1) {
return false;
}
out[0] = '\0';
SafeAppendString("(", out, out_size);
if (g_symbolize_open_object_file_callback) {
object_fd.reset(g_symbolize_open_object_file_callback(
pc0, start_address, base_address, out + 1, out_size - 1));
} else {
object_fd = OpenObjectFileContainingPcAndGetStartAddress(
pc0, start_address, base_address, out + 1, out_size - 1);
}
# if defined(PRINT_UNSYMBOLIZED_STACK_TRACES)
{
# else
if (!object_fd) {
# endif
if (out[1]) {
out[out_size - 1] = '\0';
SafeAppendString("+0x", out, out_size);
SafeAppendHexNumber(pc0 - base_address, out, out_size);
SafeAppendString(")", out, out_size);
return true;
}
return false;
}
int elf_type = FileGetElfType(object_fd.get());
if (elf_type == -1) {
return false;
}
if (g_symbolize_callback) {
uint64_t relocation = (elf_type == ET_DYN) ? start_address : 0;
int num_bytes_written =
g_symbolize_callback(object_fd.get(), pc, out, out_size, relocation);
if (num_bytes_written > 0) {
out += static_cast<size_t>(num_bytes_written);
out_size -= static_cast<size_t>(num_bytes_written);
}
}
if (!GetSymbolFromObjectFile(object_fd.get(), pc0, out, out_size,
base_address)) {
if (out[1] && !g_symbolize_callback) {
out[out_size - 1] = '\0';
SafeAppendString("+0x", out, out_size);
SafeAppendHexNumber(pc0 - base_address, out, out_size);
SafeAppendString(")", out, out_size);
return true;
}
return false;
}
DemangleInplace(out, out_size);
return true;
}
}
}
# elif defined(GLOG_OS_MACOSX) && defined(HAVE_DLADDR)
# include <dlfcn.h>
# include <cstring>
namespace google {
inline namespace glog_internal_namespace_ {
static ATTRIBUTE_NOINLINE bool SymbolizeAndDemangle(
void* pc, char* out, size_t out_size, SymbolizeOptions ) {
Dl_info info;
if (dladdr(pc, &info)) {
if (info.dli_sname) {
if (strlen(info.dli_sname) < out_size) {
strcpy(out, info.dli_sname);
DemangleInplace(out, out_size);
return true;
}
}
}
return false;
}
}
}
# elif defined(GLOG_OS_WINDOWS) || defined(GLOG_OS_CYGWIN)
# include <dbghelp.h>
# include <windows.h>
namespace google {
inline namespace glog_internal_namespace_ {
namespace {
class SymInitializer final {
public:
HANDLE process;
bool ready;
SymInitializer() : process(GetCurrentProcess()), ready(false) {
SymSetOptions(SYMOPT_DEFERRED_LOADS | SYMOPT_LOAD_LINES);
if (SymInitialize(process, nullptr, true)) {
ready = true;
}
}
~SymInitializer() {
SymCleanup(process);
}
SymInitializer(const SymInitializer&) = delete;
SymInitializer& operator=(const SymInitializer&) = delete;
SymInitializer(SymInitializer&&) = delete;
SymInitializer& operator=(SymInitializer&&) = delete;
};
}
static ATTRIBUTE_NOINLINE bool SymbolizeAndDemangle(void* pc, char* out,
size_t out_size,
SymbolizeOptions options) {
const static SymInitializer symInitializer;
if (!symInitializer.ready) {
return false;
}
char buf[sizeof(SYMBOL_INFO) + MAX_SYM_NAME];
SYMBOL_INFO* symbol = reinterpret_cast<SYMBOL_INFO*>(buf);
symbol->SizeOfStruct = sizeof(SYMBOL_INFO);
symbol->MaxNameLen = MAX_SYM_NAME;
BOOL ret = SymFromAddr(symInitializer.process, reinterpret_cast<DWORD64>(pc),
0, symbol);
std::size_t namelen = static_cast<size_t>(symbol->NameLen);
if (ret && namelen < out_size) {
std::strncpy(out, symbol->Name, namelen);
out[namelen] = '\0';
DWORD displacement;
IMAGEHLP_LINE64 line{sizeof(IMAGEHLP_LINE64)};
BOOL found = FALSE;
if ((options & SymbolizeOptions::kNoLineNumbers) !=
SymbolizeOptions::kNoLineNumbers) {
found = SymGetLineFromAddr64(symInitializer.process,
reinterpret_cast<DWORD64>(pc), &displacement,
&line);
}
DemangleInplace(out, out_size);
out_size -= std::strlen(out);
if (found) {
std::size_t fnlen = std::strlen(line.FileName);
std::size_t digits = 1;
for (DWORD value = line.LineNumber; (value /= 10) != 0; ++digits) {
}
constexpr std::size_t extralen = 4;
const std::size_t suffixlen = fnlen + extralen + fnlen + digits;
if (suffixlen < out_size) {
out_size -= std::snprintf(out + namelen, out_size, " (%s:%lu)",
line.FileName, line.LineNumber);
}
}
return true;
}
return false;
}
}
}
# else
# error BUG: HAVE_SYMBOLIZE was wrongly set
# endif
namespace google {
inline namespace glog_internal_namespace_ {
bool Symbolize(void* pc, char* out, size_t out_size, SymbolizeOptions options) {
return SymbolizeAndDemangle(pc, out, out_size, options);
}
}
}
#endif | #include "symbolize.h"
#include <csignal>
#include <iostream>
#include "config.h"
#include "glog/logging.h"
#include "googletest.h"
#include "utilities.h"
#include "stacktrace.h"
#ifdef GLOG_USE_GFLAGS
# include <gflags/gflags.h>
using namespace GFLAGS_NAMESPACE;
#endif
using namespace std;
using namespace google;
#if defined(__GNUG__)
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wpedantic"
#endif
#if defined(HAVE_STACKTRACE)
# define always_inline
# if defined(HAVE_ELF_H) || defined(HAVE_SYS_EXEC_ELF_H) || \
defined(GLOG_OS_WINDOWS) || defined(GLOG_OS_CYGWIN)
static const char* TrySymbolize(void* pc, google::SymbolizeOptions options =
google::SymbolizeOptions::kNone) {
static char symbol[4096];
if (Symbolize(pc, symbol, sizeof(symbol), options)) {
return symbol;
} else {
return nullptr;
}
}
# endif
# if defined(HAVE_ELF_H) || defined(HAVE_SYS_EXEC_ELF_H)
# if defined(__GNUC__) && !defined(__OPENCC__)
# if __GNUC__ >= 4
# define TEST_WITH_MODERN_GCC
# if defined(__i386__) && __i386__
# undef always_inline
# define always_inline __attribute__((always_inline))
# define HAVE_ALWAYS_INLINE
# endif
# else
# endif
# define TEST_WITH_LABEL_ADDRESSES
# endif
extern "C" {
void nonstatic_func();
void nonstatic_func() {
volatile int a = 0;
a = a + 1;
}
static void static_func() {
volatile int a = 0;
a = a + 1;
}
}
TEST(Symbolize, Symbolize) {
EXPECT_STREQ("nonstatic_func", TrySymbolize((void*)(&nonstatic_func)));
const char* static_func_symbol =
TrySymbolize(reinterpret_cast<void*>(&static_func));
# if !defined(_MSC_VER) || !defined(NDEBUG)
CHECK(nullptr != static_func_symbol);
EXPECT_TRUE(strcmp("static_func", static_func_symbol) == 0 ||
strcmp("static_func()", static_func_symbol) == 0);
# endif
EXPECT_TRUE(nullptr == TrySymbolize(nullptr));
}
struct Foo {
static void func(int x);
};
void ATTRIBUTE_NOINLINE Foo::func(int x) {
volatile int a = x;
a = a + 1;
}
# ifdef TEST_WITH_MODERN_GCC
TEST(Symbolize, SymbolizeWithDemangling) {
Foo::func(100);
# if !defined(_MSC_VER) || !defined(NDEBUG)
# if defined(HAVE___CXA_DEMANGLE)
EXPECT_STREQ("Foo::func(int)", TrySymbolize((void*)(&Foo::func)));
# else
EXPECT_STREQ("Foo::func()", TrySymbolize((void*)(&Foo::func)));
# endif
# endif
}
# endif
static void* g_pc_to_symbolize;
static char g_symbolize_buffer[4096];
static char* g_symbolize_result;
static void EmptySignalHandler(int ) {}
static void SymbolizeSignalHandler(int ) {
if (Symbolize(g_pc_to_symbolize, g_symbolize_buffer,
sizeof(g_symbolize_buffer))) {
g_symbolize_result = g_symbolize_buffer;
} else {
g_symbolize_result = nullptr;
}
}
const int kAlternateStackSize = 8096;
const char kAlternateStackFillValue = 0x55;
static ATTRIBUTE_NOINLINE bool StackGrowsDown(int* x) {
int y;
return &y < x;
}
static int GetStackConsumption(const char* alt_stack) {
int x;
if (StackGrowsDown(&x)) {
for (int i = 0; i < kAlternateStackSize; i++) {
if (alt_stack[i] != kAlternateStackFillValue) {
return (kAlternateStackSize - i);
}
}
} else {
for (int i = (kAlternateStackSize - 1); i >= 0; i--) {
if (alt_stack[i] != kAlternateStackFillValue) {
return i;
}
}
}
return -1;
}
# ifdef HAVE_SIGALTSTACK
static const char* SymbolizeStackConsumption(void* pc, int* stack_consumed) {
g_pc_to_symbolize = pc;
char altstack[kAlternateStackSize];
memset(altstack, kAlternateStackFillValue, kAlternateStackSize);
stack_t sigstk;
memset(&sigstk, 0, sizeof(stack_t));
stack_t old_sigstk;
sigstk.ss_sp = altstack;
sigstk.ss_size = kAlternateStackSize;
sigstk.ss_flags = 0;
CHECK_ERR(sigaltstack(&sigstk, &old_sigstk));
struct sigaction sa;
memset(&sa, 0, sizeof(struct sigaction));
struct sigaction old_sa1, old_sa2;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_ONSTACK;
sa.sa_handler = EmptySignalHandler;
CHECK_ERR(sigaction(SIGUSR1, &sa, &old_sa1));
sa.sa_handler = SymbolizeSignalHandler;
CHECK_ERR(sigaction(SIGUSR2, &sa, &old_sa2));
CHECK_ERR(kill(getpid(), SIGUSR1));
int stack_consumption1 = GetStackConsumption(altstack);
CHECK_ERR(kill(getpid(), SIGUSR2));
int stack_consumption2 = GetStackConsumption(altstack);
if (stack_consumption1 != -1 && stack_consumption2 != -1) {
*stack_consumed = stack_consumption2 - stack_consumption1;
} else {
*stack_consumed = -1;
}
LOG(INFO) << "Stack consumption of empty signal handler: "
<< stack_consumption1;
LOG(INFO) << "Stack consumption of symbolize signal handler: "
<< stack_consumption2;
LOG(INFO) << "Stack consumption of Symbolize: " << *stack_consumed;
CHECK_ERR(sigaltstack(&old_sigstk, nullptr));
CHECK_ERR(sigaction(SIGUSR1, &old_sa1, nullptr));
CHECK_ERR(sigaction(SIGUSR2, &old_sa2, nullptr));
return g_symbolize_result;
}
# if !defined(HAVE___CXA_DEMANGLE)
# ifdef __ppc64__
constexpr int kStackConsumptionUpperLimit = 4096;
# else
constexpr int kStackConsumptionUpperLimit = 2048;
# endif
# endif
TEST(Symbolize, SymbolizeStackConsumption) {
int stack_consumed;
const char* symbol;
symbol = SymbolizeStackConsumption(reinterpret_cast<void*>(&nonstatic_func),
&stack_consumed);
EXPECT_STREQ("nonstatic_func", symbol);
EXPECT_GT(stack_consumed, 0);
# if !defined(HAVE___CXA_DEMANGLE)
EXPECT_LT(stack_consumed, kStackConsumptionUpperLimit);
# endif
symbol = SymbolizeStackConsumption(reinterpret_cast<void*>(&static_func),
&stack_consumed);
CHECK(nullptr != symbol);
EXPECT_TRUE(strcmp("static_func", symbol) == 0 ||
strcmp("static_func()", symbol) == 0);
EXPECT_GT(stack_consumed, 0);
# if !defined(HAVE___CXA_DEMANGLE)
EXPECT_LT(stack_consumed, kStackConsumptionUpperLimit);
# endif
}
# if defined(TEST_WITH_MODERN_GCC) && !defined(HAVE___CXA_DEMANGLE)
TEST(Symbolize, SymbolizeWithDemanglingStackConsumption) {
Foo::func(100);
int stack_consumed;
const char* symbol;
symbol = SymbolizeStackConsumption(reinterpret_cast<void*>(&Foo::func),
&stack_consumed);
# if defined(HAVE___CXA_DEMANGLE)
EXPECT_STREQ("Foo::func(int)", symbol);
# else
EXPECT_STREQ("Foo::func()", symbol);
# endif
EXPECT_GT(stack_consumed, 0);
EXPECT_LT(stack_consumed, kStackConsumptionUpperLimit);
}
# endif
# endif
extern "C" {
inline void* always_inline inline_func() {
void* pc = nullptr;
# ifdef TEST_WITH_LABEL_ADDRESSES
pc = &&curr_pc;
curr_pc:
# endif
return pc;
}
void* ATTRIBUTE_NOINLINE non_inline_func();
void* ATTRIBUTE_NOINLINE non_inline_func() {
void* pc = nullptr;
# ifdef TEST_WITH_LABEL_ADDRESSES
pc = &&curr_pc;
curr_pc:
# endif
return pc;
}
static void ATTRIBUTE_NOINLINE TestWithPCInsideNonInlineFunction() {
# if defined(TEST_WITH_LABEL_ADDRESSES) && defined(HAVE_ATTRIBUTE_NOINLINE)
void* pc = non_inline_func();
const char* symbol = TrySymbolize(pc);
# if !defined(_MSC_VER) || !defined(NDEBUG)
CHECK(symbol != nullptr);
CHECK_STREQ(symbol, "non_inline_func");
# endif
cout << "Test case TestWithPCInsideNonInlineFunction passed." << endl;
# endif
}
static void ATTRIBUTE_NOINLINE TestWithPCInsideInlineFunction() {
# if defined(TEST_WITH_LABEL_ADDRESSES) && defined(HAVE_ALWAYS_INLINE)
void* pc = inline_func();
const char* symbol = TrySymbolize(pc);
# if !defined(_MSC_VER) || !defined(NDEBUG)
CHECK(symbol != nullptr);
CHECK_STREQ(symbol, __FUNCTION__);
# endif
cout << "Test case TestWithPCInsideInlineFunction passed." << endl;
# endif
}
}
static void ATTRIBUTE_NOINLINE TestWithReturnAddress() {
# if defined(HAVE_ATTRIBUTE_NOINLINE)
void* return_address = __builtin_return_address(0);
const char* symbol =
TrySymbolize(return_address, google::SymbolizeOptions::kNoLineNumbers);
# if !defined(_MSC_VER) || !defined(NDEBUG)
CHECK(symbol != nullptr);
CHECK_STREQ(symbol, "main");
# endif
cout << "Test case TestWithReturnAddress passed." << endl;
# endif
}
# elif defined(GLOG_OS_WINDOWS) || defined(GLOG_OS_CYGWIN)
# ifdef _MSC_VER
# include <intrin.h>
# pragma intrinsic(_ReturnAddress)
# endif
struct Foo {
static void func(int x);
};
__declspec(noinline) void Foo::func(int x) {
volatile int a = x;
a = a + 1;
}
TEST(Symbolize, SymbolizeWithDemangling) {
Foo::func(100);
const char* ret = TrySymbolize((void*)(&Foo::func));
# if defined(HAVE_DBGHELP) && !defined(NDEBUG)
EXPECT_STREQ("public: static void __cdecl Foo::func(int)", ret);
# endif
}
__declspec(noinline) void TestWithReturnAddress() {
void* return_address =
# ifdef __GNUC__
__builtin_return_address(0)
# else
_ReturnAddress()
# endif
;
const char* symbol =
TrySymbolize(return_address, google::SymbolizeOptions::kNoLineNumbers);
# if !defined(_MSC_VER) || !defined(NDEBUG)
CHECK(symbol != nullptr);
CHECK_STREQ(symbol, "main");
# endif
cout << "Test case TestWithReturnAddress passed." << endl;
}
# endif
#endif
int main(int argc, char** argv) {
FLAGS_logtostderr = true;
InitGoogleLogging(argv[0]);
InitGoogleTest(&argc, argv);
#if defined(HAVE_SYMBOLIZE) && defined(HAVE_STACKTRACE)
# if defined(HAVE_ELF_H) || defined(HAVE_SYS_EXEC_ELF_H)
InstallSymbolizeCallback(nullptr);
TestWithPCInsideInlineFunction();
TestWithPCInsideNonInlineFunction();
TestWithReturnAddress();
return RUN_ALL_TESTS();
# elif defined(GLOG_OS_WINDOWS) || defined(GLOG_OS_CYGWIN)
TestWithReturnAddress();
return RUN_ALL_TESTS();
# else
printf("PASS (no symbolize_unittest support)\n");
return 0;
# endif
#else
printf("PASS (no symbolize support)\n");
return 0;
#endif
}
#if defined(__GNUG__)
# pragma GCC diagnostic pop
#endif | https://github.com/google/glog/blob/de309c08c05382fee0792380de7df1bd65332da2/src/symbolize.cc | https://github.com/google/glog/blob/de309c08c05382fee0792380de7df1bd65332da2/src/symbolize_unittest.cc | de309c08c05382fee0792380de7df1bd65332da2 |
8239efd6-6647-44aa-ab01-c4a65b5b5ec6 | cpp | google/glog | signalhandler | src/signalhandler.cc | src/signalhandler_unittest.cc | #include <algorithm>
#include <csignal>
#include <cstring>
#include <ctime>
#include <mutex>
#include <sstream>
#include <thread>
#include "config.h"
#include "glog/logging.h"
#include "glog/platform.h"
#include "stacktrace.h"
#include "symbolize.h"
#include "utilities.h"
#ifdef HAVE_UCONTEXT_H
# include <ucontext.h>
#endif
#ifdef HAVE_SYS_UCONTEXT_H
# include <sys/ucontext.h>
#endif
#ifdef HAVE_UNISTD_H
# include <unistd.h>
#endif
namespace google {
namespace {
const struct {
int number;
const char* name;
} kFailureSignals[] = {
{SIGSEGV, "SIGSEGV"}, {SIGILL, "SIGILL"},
{SIGFPE, "SIGFPE"}, {SIGABRT, "SIGABRT"},
#if !defined(GLOG_OS_WINDOWS)
{SIGBUS, "SIGBUS"},
#endif
{SIGTERM, "SIGTERM"},
};
static bool kFailureSignalHandlerInstalled = false;
#if !defined(GLOG_OS_WINDOWS)
void* GetPC(void* ucontext_in_void) {
# if (defined(HAVE_UCONTEXT_H) || defined(HAVE_SYS_UCONTEXT_H)) && \
defined(PC_FROM_UCONTEXT)
if (ucontext_in_void != nullptr) {
ucontext_t* context = reinterpret_cast<ucontext_t*>(ucontext_in_void);
return (void*)context->PC_FROM_UCONTEXT;
}
# else
(void)ucontext_in_void;
# endif
return nullptr;
}
#endif
class MinimalFormatter {
public:
MinimalFormatter(char* buffer, size_t size)
: buffer_(buffer), cursor_(buffer), end_(buffer + size) {}
std::size_t num_bytes_written() const {
return static_cast<std::size_t>(cursor_ - buffer_);
}
void AppendString(const char* str) {
ptrdiff_t i = 0;
while (str[i] != '\0' && cursor_ + i < end_) {
cursor_[i] = str[i];
++i;
}
cursor_ += i;
}
void AppendUint64(uint64 number, unsigned radix) {
unsigned i = 0;
while (cursor_ + i < end_) {
const uint64 tmp = number % radix;
number /= radix;
cursor_[i] = static_cast<char>(tmp < 10 ? '0' + tmp : 'a' + tmp - 10);
++i;
if (number == 0) {
break;
}
}
std::reverse(cursor_, cursor_ + i);
cursor_ += i;
}
void AppendHexWithPadding(uint64 number, int width) {
char* start = cursor_;
AppendString("0x");
AppendUint64(number, 16);
if (cursor_ < start + width) {
const int64 delta = start + width - cursor_;
std::copy(start, cursor_, start + delta);
std::fill(start, start + delta, ' ');
cursor_ = start + width;
}
}
private:
char* buffer_;
char* cursor_;
const char* const end_;
};
void WriteToStderr(const char* data, size_t size) {
if (write(fileno(stderr), data, size) < 0) {
}
}
void (*g_failure_writer)(const char* data, size_t size) = WriteToStderr;
void DumpTimeInfo() {
time_t time_in_sec = time(nullptr);
char buf[256];
MinimalFormatter formatter(buf, sizeof(buf));
formatter.AppendString("*** Aborted at ");
formatter.AppendUint64(static_cast<uint64>(time_in_sec), 10);
formatter.AppendString(" (unix time)");
formatter.AppendString(" try \"date -d @");
formatter.AppendUint64(static_cast<uint64>(time_in_sec), 10);
formatter.AppendString("\" if you are using GNU date ***\n");
g_failure_writer(buf, formatter.num_bytes_written());
}
#if defined(HAVE_STACKTRACE) && defined(HAVE_SIGACTION)
void DumpSignalInfo(int signal_number, siginfo_t* siginfo) {
const char* signal_name = nullptr;
for (auto kFailureSignal : kFailureSignals) {
if (signal_number == kFailureSignal.number) {
signal_name = kFailureSignal.name;
}
}
char buf[256];
MinimalFormatter formatter(buf, sizeof(buf));
formatter.AppendString("*** ");
if (signal_name) {
formatter.AppendString(signal_name);
} else {
formatter.AppendString("Signal ");
formatter.AppendUint64(static_cast<uint64>(signal_number), 10);
}
formatter.AppendString(" (@0x");
formatter.AppendUint64(reinterpret_cast<uintptr_t>(siginfo->si_addr), 16);
formatter.AppendString(")");
formatter.AppendString(" received by PID ");
formatter.AppendUint64(static_cast<uint64>(getpid()), 10);
formatter.AppendString(" (TID ");
std::ostringstream oss;
oss << std::showbase << std::hex << std::this_thread::get_id();
formatter.AppendString(oss.str().c_str());
formatter.AppendString(") ");
# ifdef GLOG_OS_LINUX
formatter.AppendString("from PID ");
formatter.AppendUint64(static_cast<uint64>(siginfo->si_pid), 10);
formatter.AppendString("; ");
# endif
formatter.AppendString("stack trace: ***\n");
g_failure_writer(buf, formatter.num_bytes_written());
}
#endif
void DumpStackFrameInfo(const char* prefix, void* pc) {
const char* symbol = "(unknown)";
#if defined(HAVE_SYMBOLIZE)
char symbolized[1024];
if (Symbolize(reinterpret_cast<char*>(pc) - 1, symbolized,
sizeof(symbolized))) {
symbol = symbolized;
}
#else
# pragma message( \
"Symbolize functionality is not available for target platform: stack dump will contain empty frames.")
#endif
char buf[1024];
MinimalFormatter formatter(buf, sizeof(buf));
formatter.AppendString(prefix);
formatter.AppendString("@ ");
const int width = 2 * sizeof(void*) + 2;
formatter.AppendHexWithPadding(reinterpret_cast<uintptr_t>(pc), width);
formatter.AppendString(" ");
formatter.AppendString(symbol);
formatter.AppendString("\n");
g_failure_writer(buf, formatter.num_bytes_written());
}
void InvokeDefaultSignalHandler(int signal_number) {
#ifdef HAVE_SIGACTION
struct sigaction sig_action;
memset(&sig_action, 0, sizeof(sig_action));
sigemptyset(&sig_action.sa_mask);
sig_action.sa_handler = SIG_DFL;
sigaction(signal_number, &sig_action, nullptr);
kill(getpid(), signal_number);
#elif defined(GLOG_OS_WINDOWS)
signal(signal_number, SIG_DFL);
raise(signal_number);
#endif
}
static std::once_flag signaled;
static void HandleSignal(int signal_number
#if !defined(GLOG_OS_WINDOWS)
,
siginfo_t* signal_info, void* ucontext
#endif
) {
DumpTimeInfo();
#if !defined(GLOG_OS_WINDOWS)
void* pc = GetPC(ucontext);
DumpStackFrameInfo("PC: ", pc);
#endif
#ifdef HAVE_STACKTRACE
void* stack[32];
const int depth = GetStackTrace(stack, ARRAYSIZE(stack), 1);
# ifdef HAVE_SIGACTION
DumpSignalInfo(signal_number, signal_info);
# elif !defined(GLOG_OS_WINDOWS)
(void)signal_info;
# endif
for (int i = 0; i < depth; ++i) {
DumpStackFrameInfo(" ", stack[i]);
}
#elif !defined(GLOG_OS_WINDOWS)
(void)signal_info;
#endif
FlushLogFilesUnsafe(GLOG_INFO);
InvokeDefaultSignalHandler(signal_number);
}
#if defined(GLOG_OS_WINDOWS)
void FailureSignalHandler(int signal_number)
#else
void FailureSignalHandler(int signal_number, siginfo_t* signal_info,
void* ucontext)
#endif
{
std::call_once(signaled, &HandleSignal, signal_number
#if !defined(GLOG_OS_WINDOWS)
,
signal_info, ucontext
#endif
);
}
}
bool IsFailureSignalHandlerInstalled() {
#ifdef HAVE_SIGACTION
struct sigaction sig_action;
memset(&sig_action, 0, sizeof(sig_action));
sigemptyset(&sig_action.sa_mask);
sigaction(SIGABRT, nullptr, &sig_action);
if (sig_action.sa_sigaction == &FailureSignalHandler) {
return true;
}
#elif defined(GLOG_OS_WINDOWS)
return kFailureSignalHandlerInstalled;
#endif
return false;
}
void InstallFailureSignalHandler() {
#ifdef HAVE_SIGACTION
struct sigaction sig_action;
memset(&sig_action, 0, sizeof(sig_action));
sigemptyset(&sig_action.sa_mask);
sig_action.sa_flags |= SA_SIGINFO;
sig_action.sa_sigaction = &FailureSignalHandler;
for (auto kFailureSignal : kFailureSignals) {
CHECK_ERR(sigaction(kFailureSignal.number, &sig_action, nullptr));
}
kFailureSignalHandlerInstalled = true;
#elif defined(GLOG_OS_WINDOWS)
for (size_t i = 0; i < ARRAYSIZE(kFailureSignals); ++i) {
CHECK_NE(signal(kFailureSignals[i].number, &FailureSignalHandler), SIG_ERR);
}
kFailureSignalHandlerInstalled = true;
#endif
}
void InstallFailureWriter(void (*writer)(const char* data, size_t size)) {
#if defined(HAVE_SIGACTION) || defined(GLOG_OS_WINDOWS)
g_failure_writer = writer;
#endif
}
} | #include <csignal>
#include <cstdio>
#include <cstdlib>
#include <sstream>
#include <string>
#include <thread>
#include "config.h"
#include "glog/logging.h"
#include "stacktrace.h"
#include "symbolize.h"
#if defined(HAVE_UNISTD_H)
# include <unistd.h>
#endif
#ifdef GLOG_USE_GFLAGS
# include <gflags/gflags.h>
using namespace GFLAGS_NAMESPACE;
#endif
#if defined(_MSC_VER)
# include <io.h>
#endif
using namespace google;
static void DieInThread(int* a) {
std::ostringstream oss;
oss << std::showbase << std::hex << std::this_thread::get_id();
fprintf(stderr, "%s is dying\n", oss.str().c_str());
int b = 1 / *a;
fprintf(stderr, "We should have died: b=%d\n", b);
}
static void WriteToStdout(const char* data, size_t size) {
if (write(fileno(stdout), data, size) < 0) {
}
}
int main(int argc, char** argv) {
#if defined(HAVE_STACKTRACE) && defined(HAVE_SYMBOLIZE)
InitGoogleLogging(argv[0]);
# ifdef GLOG_USE_GFLAGS
ParseCommandLineFlags(&argc, &argv, true);
# endif
InstallFailureSignalHandler();
const std::string command = argc > 1 ? argv[1] : "none";
if (command == "segv") {
LOG(INFO) << "create the log file";
LOG(INFO) << "a message before segv";
int* a = (int*)0xDEAD;
*a = 0;
} else if (command == "loop") {
fprintf(stderr, "looping\n");
while (true)
;
} else if (command == "die_in_thread") {
std::thread t{&DieInThread, nullptr};
t.join();
} else if (command == "dump_to_stdout") {
InstallFailureWriter(WriteToStdout);
abort();
} else if (command == "installed") {
fprintf(stderr, "signal handler installed: %s\n",
IsFailureSignalHandlerInstalled() ? "true" : "false");
} else {
puts("OK");
}
#endif
return 0;
} | https://github.com/google/glog/blob/de309c08c05382fee0792380de7df1bd65332da2/src/signalhandler.cc | https://github.com/google/glog/blob/de309c08c05382fee0792380de7df1bd65332da2/src/signalhandler_unittest.cc | de309c08c05382fee0792380de7df1bd65332da2 |
c75468e1-4f67-47b9-82b2-6d1dc12cb6bb | cpp | google/glog | logging | src/logging.cc | src/logging_unittest.cc | #define _GNU_SOURCE 1
#include "glog/logging.h"
#include <algorithm>
#include <cassert>
#include <chrono>
#include <cstddef>
#include <cstdint>
#include <iomanip>
#include <iterator>
#include <memory>
#include <mutex>
#include <shared_mutex>
#include <string>
#include <thread>
#include <tuple>
#include <type_traits>
#include <utility>
#include "config.h"
#include "glog/platform.h"
#include "glog/raw_logging.h"
#include "stacktrace.h"
#include "utilities.h"
#ifdef GLOG_OS_WINDOWS
# include "windows/dirent.h"
#else
# include <dirent.h>
#endif
#include <fcntl.h>
#include <sys/stat.h>
#include <cctype>
#include <cerrno>
#include <climits>
#include <cstdarg>
#include <cstdio>
#include <cstdlib>
#include <ctime>
#include <regex>
#include <sstream>
#include <vector>
#ifdef HAVE__CHSIZE_S
# include <io.h>
#endif
#ifdef HAVE_PWD_H
# include <pwd.h>
#endif
#ifdef HAVE_SYS_UTSNAME_H
# include <sys/utsname.h>
#endif
#ifdef HAVE_SYSLOG_H
# include <syslog.h>
#endif
#ifdef HAVE_SYS_TYPES_H
# include <sys/types.h>
#endif
#ifdef HAVE_UNISTD_H
# include <unistd.h>
#endif
#ifndef HAVE_MODE_T
typedef int mode_t;
#endif
using std::dec;
using std::hex;
using std::min;
using std::ostream;
using std::ostringstream;
using std::setfill;
using std::setw;
using std::string;
using std::vector;
using std::fclose;
using std::fflush;
using std::FILE;
using std::fprintf;
using std::fwrite;
using std::perror;
#ifdef __QNX__
using std::fdopen;
#endif
#define EXCLUSIVE_LOCKS_REQUIRED(mu)
enum { PATH_SEPARATOR = '/' };
#ifndef HAVE_PREAD
static ssize_t pread(int fd, void* buf, size_t count, off_t offset) {
off_t orig_offset = lseek(fd, 0, SEEK_CUR);
if (orig_offset == (off_t)-1) return -1;
if (lseek(fd, offset, SEEK_CUR) == (off_t)-1) return -1;
ssize_t len = read(fd, buf, count);
if (len < 0) return len;
if (lseek(fd, orig_offset, SEEK_SET) == (off_t)-1) return -1;
return len;
}
#endif
#ifndef HAVE_PWRITE
static ssize_t pwrite(int fd, void* buf, size_t count, off_t offset) {
off_t orig_offset = lseek(fd, 0, SEEK_CUR);
if (orig_offset == (off_t)-1) return -1;
if (lseek(fd, offset, SEEK_CUR) == (off_t)-1) return -1;
ssize_t len = write(fd, buf, count);
if (len < 0) return len;
if (lseek(fd, orig_offset, SEEK_SET) == (off_t)-1) return -1;
return len;
}
#endif
static void GetHostName(string* hostname) {
#if defined(HAVE_SYS_UTSNAME_H)
struct utsname buf;
if (uname(&buf) < 0) {
*buf.nodename = '\0';
}
*hostname = buf.nodename;
#elif defined(GLOG_OS_WINDOWS)
char buf[MAX_COMPUTERNAME_LENGTH + 1];
DWORD len = MAX_COMPUTERNAME_LENGTH + 1;
if (GetComputerNameA(buf, &len)) {
*hostname = buf;
} else {
hostname->clear();
}
#else
# warning There is no way to retrieve the host name.
*hostname = "(unknown)";
#endif
}
static bool TerminalSupportsColor() {
bool term_supports_color = false;
#ifdef GLOG_OS_WINDOWS
term_supports_color = true;
#else
const char* const term = getenv("TERM");
if (term != nullptr && term[0] != '\0') {
term_supports_color =
!strcmp(term, "xterm") || !strcmp(term, "xterm-color") ||
!strcmp(term, "xterm-256color") || !strcmp(term, "screen-256color") ||
!strcmp(term, "konsole") || !strcmp(term, "konsole-16color") ||
!strcmp(term, "konsole-256color") || !strcmp(term, "screen") ||
!strcmp(term, "linux") || !strcmp(term, "cygwin");
}
#endif
return term_supports_color;
}
#if defined(__cpp_lib_unreachable) && (__cpp_lib_unreachable >= 202202L)
# define GLOG_UNREACHABLE std::unreachable()
#elif !defined(NDEBUG)
# define GLOG_UNREACHABLE assert(false)
#else
# if defined(_MSC_VER)
# define GLOG_UNREACHABLE __assume(false)
# elif defined(__has_builtin)
# if __has_builtin(unreachable)
# define GLOG_UNREACHABLE __builtin_unreachable()
# endif
# endif
# if !defined(GLOG_UNREACHABLE) && defined(__GNUG__)
# define GLOG_UNREACHABLE __builtin_unreachable()
# endif
# if !defined(GLOG_UNREACHABLE)
# define GLOG_UNREACHABLE
# endif
#endif
namespace google {
GLOG_NO_EXPORT
std::string StrError(int err);
enum GLogColor { COLOR_DEFAULT, COLOR_RED, COLOR_GREEN, COLOR_YELLOW };
static GLogColor SeverityToColor(LogSeverity severity) {
switch (severity) {
case GLOG_INFO:
return COLOR_DEFAULT;
case GLOG_WARNING:
return COLOR_YELLOW;
case GLOG_ERROR:
case GLOG_FATAL:
return COLOR_RED;
}
GLOG_UNREACHABLE;
}
#ifdef GLOG_OS_WINDOWS
static WORD GetColorAttribute(GLogColor color) {
switch (color) {
case COLOR_RED:
return FOREGROUND_RED;
case COLOR_GREEN:
return FOREGROUND_GREEN;
case COLOR_YELLOW:
return FOREGROUND_RED | FOREGROUND_GREEN;
case COLOR_DEFAULT:
break;
}
return 0;
}
#else
static const char* GetAnsiColorCode(GLogColor color) {
switch (color) {
case COLOR_RED:
return "1";
case COLOR_GREEN:
return "2";
case COLOR_YELLOW:
return "3";
case COLOR_DEFAULT:
return "";
};
return nullptr;
}
#endif
static uint32 MaxLogSize() {
return (FLAGS_max_log_size > 0 && FLAGS_max_log_size < 4096
? FLAGS_max_log_size
: 1);
}
const size_t LogMessage::kMaxLogMessageLen = 30000;
namespace logging {
namespace internal {
struct LogMessageData {
LogMessageData();
int preserved_errno_;
char message_text_[LogMessage::kMaxLogMessageLen + 1];
LogMessage::LogStream stream_;
LogSeverity severity_;
int line_;
void (LogMessage::*send_method_)();
union {
LogSink* sink_;
std::vector<std::string>*
outvec_;
std::string* message_;
};
size_t num_prefix_chars_;
size_t num_chars_to_log_;
size_t num_chars_to_syslog_;
const char* basename_;
const char* fullname_;
bool has_been_flushed_;
bool first_fatal_;
std::thread::id thread_id_;
LogMessageData(const LogMessageData&) = delete;
LogMessageData& operator=(const LogMessageData&) = delete;
};
}
}
static std::mutex log_mutex;
int64 LogMessage::num_messages_[NUM_SEVERITIES] = {0, 0, 0, 0};
static bool stop_writing = false;
const char* const LogSeverityNames[] = {"INFO", "WARNING", "ERROR", "FATAL"};
static bool exit_on_dfatal = true;
const char* GetLogSeverityName(LogSeverity severity) {
return LogSeverityNames[severity];
}
static bool SendEmailInternal(const char* dest, const char* subject,
const char* body, bool use_logging);
base::Logger::~Logger() = default;
namespace {
constexpr std::intmax_t kSecondsInDay = 60 * 60 * 24;
constexpr std::intmax_t kSecondsInWeek = kSecondsInDay * 7;
class PrefixFormatter {
public:
PrefixFormatter(PrefixFormatterCallback callback, void* data) noexcept
: version{V2}, callback_v2{callback}, data{data} {}
void operator()(std::ostream& s, const LogMessage& message) const {
switch (version) {
case V2:
callback_v2(s, message, data);
break;
}
}
PrefixFormatter(const PrefixFormatter& other) = delete;
PrefixFormatter& operator=(const PrefixFormatter& other) = delete;
private:
enum Version { V2 } version;
union {
PrefixFormatterCallback callback_v2;
};
void* data;
};
std::unique_ptr<PrefixFormatter> g_prefix_formatter;
class LogFileObject : public base::Logger {
public:
LogFileObject(LogSeverity severity, const char* base_filename);
~LogFileObject() override;
void Write(bool force_flush,
const std::chrono::system_clock::time_point&
timestamp,
const char* message, size_t message_len) override;
void SetBasename(const char* basename);
void SetExtension(const char* ext);
void SetSymlinkBasename(const char* symlink_basename);
void Flush() override;
uint32 LogSize() override {
std::lock_guard<std::mutex> l{mutex_};
return file_length_;
}
void FlushUnlocked(const std::chrono::system_clock::time_point& now);
private:
static const uint32 kRolloverAttemptFrequency = 0x20;
std::mutex mutex_;
bool base_filename_selected_;
string base_filename_;
string symlink_basename_;
string filename_extension_;
std::unique_ptr<FILE> file_;
LogSeverity severity_;
uint32 bytes_since_flush_{0};
uint32 dropped_mem_length_{0};
uint32 file_length_{0};
unsigned int rollover_attempt_;
std::chrono::system_clock::time_point
next_flush_time_;
std::chrono::system_clock::time_point start_time_;
bool CreateLogfile(const string& time_pid_string);
};
class LogCleaner {
public:
LogCleaner();
void Enable(const std::chrono::minutes& overdue);
void Disable();
void Run(const std::chrono::system_clock::time_point& current_time,
bool base_filename_selected, const string& base_filename,
const string& filename_extension);
bool enabled() const { return enabled_; }
private:
vector<string> GetOverdueLogNames(
string log_directory,
const std::chrono::system_clock::time_point& current_time,
const string& base_filename, const string& filename_extension) const;
bool IsLogFromCurrentProject(const string& filepath,
const string& base_filename,
const string& filename_extension) const;
bool IsLogLastModifiedOver(
const string& filepath,
const std::chrono::system_clock::time_point& current_time) const;
bool enabled_{false};
std::chrono::minutes overdue_{
std::chrono::duration<int, std::ratio<kSecondsInWeek>>{1}};
std::chrono::system_clock::time_point
next_cleanup_time_;
};
LogCleaner log_cleaner;
}
class LogDestination {
public:
friend class LogMessage;
friend void ReprintFatalMessage();
friend base::Logger* base::GetLogger(LogSeverity);
friend void base::SetLogger(LogSeverity, base::Logger*);
static void SetLogDestination(LogSeverity severity,
const char* base_filename);
static void SetLogSymlink(LogSeverity severity, const char* symlink_basename);
static void AddLogSink(LogSink* destination);
static void RemoveLogSink(LogSink* destination);
static void SetLogFilenameExtension(const char* filename_extension);
static void SetStderrLogging(LogSeverity min_severity);
static void SetEmailLogging(LogSeverity min_severity, const char* addresses);
static void LogToStderr();
static void FlushLogFiles(int min_severity);
static void FlushLogFilesUnsafe(int min_severity);
static const int kNetworkBytes = 1400;
static const string& hostname();
static const bool& terminal_supports_color() {
return terminal_supports_color_;
}
static void DeleteLogDestinations();
LogDestination(LogSeverity severity, const char* base_filename);
private:
#if defined(__cpp_lib_shared_mutex) && (__cpp_lib_shared_mutex >= 201505L)
using SinkMutex = std::shared_mutex;
using SinkLock = std::lock_guard<SinkMutex>;
#else
using SinkMutex = std::shared_timed_mutex;
using SinkLock = std::unique_lock<SinkMutex>;
#endif
friend std::default_delete<LogDestination>;
~LogDestination();
static void MaybeLogToStderr(LogSeverity severity, const char* message,
size_t message_len, size_t prefix_len);
static void MaybeLogToEmail(LogSeverity severity, const char* message,
size_t len);
static void MaybeLogToLogfile(
LogSeverity severity,
const std::chrono::system_clock::time_point& timestamp,
const char* message, size_t len);
static void LogToAllLogfiles(
LogSeverity severity,
const std::chrono::system_clock::time_point& timestamp,
const char* message, size_t len);
static void LogToSinks(LogSeverity severity, const char* full_filename,
const char* base_filename, int line,
const LogMessageTime& time, const char* message,
size_t message_len);
static void WaitForSinks(logging::internal::LogMessageData* data);
static LogDestination* log_destination(LogSeverity severity);
base::Logger* GetLoggerImpl() const { return logger_; }
void SetLoggerImpl(base::Logger* logger);
void ResetLoggerImpl() { SetLoggerImpl(&fileobject_); }
LogFileObject fileobject_;
base::Logger* logger_;
static std::unique_ptr<LogDestination> log_destinations_[NUM_SEVERITIES];
static std::underlying_type_t<LogSeverity> email_logging_severity_;
static string addresses_;
static string hostname_;
static bool terminal_supports_color_;
static std::unique_ptr<vector<LogSink*>> sinks_;
static SinkMutex sink_mutex_;
LogDestination(const LogDestination&) = delete;
LogDestination& operator=(const LogDestination&) = delete;
};
std::underlying_type_t<LogSeverity> LogDestination::email_logging_severity_ =
99999;
string LogDestination::addresses_;
string LogDestination::hostname_;
std::unique_ptr<vector<LogSink*>> LogDestination::sinks_;
LogDestination::SinkMutex LogDestination::sink_mutex_;
bool LogDestination::terminal_supports_color_ = TerminalSupportsColor();
const string& LogDestination::hostname() {
if (hostname_.empty()) {
GetHostName(&hostname_);
if (hostname_.empty()) {
hostname_ = "(unknown)";
}
}
return hostname_;
}
LogDestination::LogDestination(LogSeverity severity, const char* base_filename)
: fileobject_(severity, base_filename), logger_(&fileobject_) {}
LogDestination::~LogDestination() { ResetLoggerImpl(); }
void LogDestination::SetLoggerImpl(base::Logger* logger) {
if (logger_ == logger) {
return;
}
if (logger_ && logger_ != &fileobject_) {
delete logger_;
}
logger_ = logger;
}
inline void LogDestination::FlushLogFilesUnsafe(int min_severity) {
std::for_each(std::next(std::begin(log_destinations_), min_severity),
std::end(log_destinations_),
[now = std::chrono::system_clock::now()](
std::unique_ptr<LogDestination>& log) {
if (log != nullptr) {
log->fileobject_.FlushUnlocked(now);
}
});
}
inline void LogDestination::FlushLogFiles(int min_severity) {
std::lock_guard<std::mutex> l{log_mutex};
for (int i = min_severity; i < NUM_SEVERITIES; i++) {
LogDestination* log = log_destination(static_cast<LogSeverity>(i));
if (log != nullptr) {
log->logger_->Flush();
}
}
}
inline void LogDestination::SetLogDestination(LogSeverity severity,
const char* base_filename) {
std::lock_guard<std::mutex> l{log_mutex};
log_destination(severity)->fileobject_.SetBasename(base_filename);
}
inline void LogDestination::SetLogSymlink(LogSeverity severity,
const char* symlink_basename) {
CHECK_GE(severity, 0);
CHECK_LT(severity, NUM_SEVERITIES);
std::lock_guard<std::mutex> l{log_mutex};
log_destination(severity)->fileobject_.SetSymlinkBasename(symlink_basename);
}
inline void LogDestination::AddLogSink(LogSink* destination) {
SinkLock l{sink_mutex_};
if (sinks_ == nullptr) sinks_ = std::make_unique<std::vector<LogSink*>>();
sinks_->push_back(destination);
}
inline void LogDestination::RemoveLogSink(LogSink* destination) {
SinkLock l{sink_mutex_};
if (sinks_) {
sinks_->erase(std::remove(sinks_->begin(), sinks_->end(), destination),
sinks_->end());
}
}
inline void LogDestination::SetLogFilenameExtension(const char* ext) {
std::lock_guard<std::mutex> l{log_mutex};
for (int severity = 0; severity < NUM_SEVERITIES; ++severity) {
log_destination(static_cast<LogSeverity>(severity))
->fileobject_.SetExtension(ext);
}
}
inline void LogDestination::SetStderrLogging(LogSeverity min_severity) {
std::lock_guard<std::mutex> l{log_mutex};
FLAGS_stderrthreshold = min_severity;
}
inline void LogDestination::LogToStderr() {
SetStderrLogging(GLOG_INFO);
for (int i = 0; i < NUM_SEVERITIES; ++i) {
SetLogDestination(static_cast<LogSeverity>(i),
"");
}
}
inline void LogDestination::SetEmailLogging(LogSeverity min_severity,
const char* addresses) {
std::lock_guard<std::mutex> l{log_mutex};
LogDestination::email_logging_severity_ = min_severity;
LogDestination::addresses_ = addresses;
}
static void ColoredWriteToStderrOrStdout(FILE* output, LogSeverity severity,
const char* message, size_t len) {
bool is_stdout = (output == stdout);
const GLogColor color = (LogDestination::terminal_supports_color() &&
((!is_stdout && FLAGS_colorlogtostderr) ||
(is_stdout && FLAGS_colorlogtostdout)))
? SeverityToColor(severity)
: COLOR_DEFAULT;
if (COLOR_DEFAULT == color) {
fwrite(message, len, 1, output);
return;
}
#ifdef GLOG_OS_WINDOWS
const HANDLE output_handle =
GetStdHandle(is_stdout ? STD_OUTPUT_HANDLE : STD_ERROR_HANDLE);
CONSOLE_SCREEN_BUFFER_INFO buffer_info;
GetConsoleScreenBufferInfo(output_handle, &buffer_info);
const WORD old_color_attrs = buffer_info.wAttributes;
fflush(output);
SetConsoleTextAttribute(output_handle,
GetColorAttribute(color) | FOREGROUND_INTENSITY);
fwrite(message, len, 1, output);
fflush(output);
SetConsoleTextAttribute(output_handle, old_color_attrs);
#else
fprintf(output, "\033[0;3%sm", GetAnsiColorCode(color));
fwrite(message, len, 1, output);
fprintf(output, "\033[m");
#endif
}
static void ColoredWriteToStdout(LogSeverity severity, const char* message,
size_t len) {
FILE* output = stdout;
if (severity >= FLAGS_stderrthreshold) {
output = stderr;
}
ColoredWriteToStderrOrStdout(output, severity, message, len);
}
static void ColoredWriteToStderr(LogSeverity severity, const char* message,
size_t len) {
ColoredWriteToStderrOrStdout(stderr, severity, message, len);
}
static void WriteToStderr(const char* message, size_t len) {
fwrite(message, len, 1, stderr);
}
inline void LogDestination::MaybeLogToStderr(LogSeverity severity,
const char* message,
size_t message_len,
size_t prefix_len) {
if ((severity >= FLAGS_stderrthreshold) || FLAGS_alsologtostderr) {
ColoredWriteToStderr(severity, message, message_len);
AlsoErrorWrite(severity,
glog_internal_namespace_::ProgramInvocationShortName(),
message + prefix_len);
}
}
inline void LogDestination::MaybeLogToEmail(LogSeverity severity,
const char* message, size_t len) {
if (severity >= email_logging_severity_ || severity >= FLAGS_logemaillevel) {
string to(FLAGS_alsologtoemail);
if (!addresses_.empty()) {
if (!to.empty()) {
to += ",";
}
to += addresses_;
}
const string subject(
string("[LOG] ") + LogSeverityNames[severity] + ": " +
glog_internal_namespace_::ProgramInvocationShortName());
string body(hostname());
body += "\n\n";
body.append(message, len);
SendEmailInternal(to.c_str(), subject.c_str(), body.c_str(), false);
}
}
inline void LogDestination::MaybeLogToLogfile(
LogSeverity severity,
const std::chrono::system_clock::time_point& timestamp, const char* message,
size_t len) {
const bool should_flush = severity > FLAGS_logbuflevel;
LogDestination* destination = log_destination(severity);
destination->logger_->Write(should_flush, timestamp, message, len);
}
inline void LogDestination::LogToAllLogfiles(
LogSeverity severity,
const std::chrono::system_clock::time_point& timestamp, const char* message,
size_t len) {
if (FLAGS_logtostdout) {
ColoredWriteToStdout(severity, message, len);
} else if (FLAGS_logtostderr) {
ColoredWriteToStderr(severity, message, len);
} else {
for (int i = severity; i >= 0; --i) {
LogDestination::MaybeLogToLogfile(static_cast<LogSeverity>(i), timestamp,
message, len);
}
}
}
inline void LogDestination::LogToSinks(LogSeverity severity,
const char* full_filename,
const char* base_filename, int line,
const LogMessageTime& time,
const char* message,
size_t message_len) {
std::shared_lock<SinkMutex> l{sink_mutex_};
if (sinks_) {
for (size_t i = sinks_->size(); i-- > 0;) {
(*sinks_)[i]->send(severity, full_filename, base_filename, line, time,
message, message_len);
}
}
}
inline void LogDestination::WaitForSinks(
logging::internal::LogMessageData* data) {
std::shared_lock<SinkMutex> l{sink_mutex_};
if (sinks_) {
for (size_t i = sinks_->size(); i-- > 0;) {
(*sinks_)[i]->WaitTillSent();
}
}
const bool send_to_sink =
(data->send_method_ == &LogMessage::SendToSink) ||
(data->send_method_ == &LogMessage::SendToSinkAndLog);
if (send_to_sink && data->sink_ != nullptr) {
data->sink_->WaitTillSent();
}
}
std::unique_ptr<LogDestination>
LogDestination::log_destinations_[NUM_SEVERITIES];
inline LogDestination* LogDestination::log_destination(LogSeverity severity) {
if (log_destinations_[severity] == nullptr) {
log_destinations_[severity] =
std::make_unique<LogDestination>(severity, nullptr);
}
return log_destinations_[severity].get();
}
void LogDestination::DeleteLogDestinations() {
for (auto& log_destination : log_destinations_) {
log_destination.reset();
}
SinkLock l{sink_mutex_};
sinks_.reset();
}
namespace {
std::string g_application_fingerprint;
}
void SetApplicationFingerprint(const std::string& fingerprint) {
g_application_fingerprint = fingerprint;
}
namespace {
#ifdef GLOG_OS_WINDOWS
const char possible_dir_delim[] = {'\\', '/'};
#else
const char possible_dir_delim[] = {'/'};
#endif
string PrettyDuration(const std::chrono::duration<int>& secs) {
std::stringstream result;
int mins = secs.count() / 60;
int hours = mins / 60;
mins = mins % 60;
int s = secs.count() % 60;
result.fill('0');
result << hours << ':' << setw(2) << mins << ':' << setw(2) << s;
return result.str();
}
LogFileObject::LogFileObject(LogSeverity severity, const char* base_filename)
: base_filename_selected_(base_filename != nullptr),
base_filename_((base_filename != nullptr) ? base_filename : ""),
symlink_basename_(glog_internal_namespace_::ProgramInvocationShortName()),
filename_extension_(),
severity_(severity),
rollover_attempt_(kRolloverAttemptFrequency - 1),
start_time_(std::chrono::system_clock::now()) {}
LogFileObject::~LogFileObject() {
std::lock_guard<std::mutex> l{mutex_};
file_ = nullptr;
}
void LogFileObject::SetBasename(const char* basename) {
std::lock_guard<std::mutex> l{mutex_};
base_filename_selected_ = true;
if (base_filename_ != basename) {
if (file_ != nullptr) {
file_ = nullptr;
rollover_attempt_ = kRolloverAttemptFrequency - 1;
}
base_filename_ = basename;
}
}
void LogFileObject::SetExtension(const char* ext) {
std::lock_guard<std::mutex> l{mutex_};
if (filename_extension_ != ext) {
if (file_ != nullptr) {
file_ = nullptr;
rollover_attempt_ = kRolloverAttemptFrequency - 1;
}
filename_extension_ = ext;
}
}
void LogFileObject::SetSymlinkBasename(const char* symlink_basename) {
std::lock_guard<std::mutex> l{mutex_};
symlink_basename_ = symlink_basename;
}
void LogFileObject::Flush() {
std::lock_guard<std::mutex> l{mutex_};
FlushUnlocked(std::chrono::system_clock::now());
}
void LogFileObject::FlushUnlocked(
const std::chrono::system_clock::time_point& now) {
if (file_ != nullptr) {
fflush(file_.get());
bytes_since_flush_ = 0;
}
next_flush_time_ =
now + std::chrono::duration_cast<std::chrono::system_clock::duration>(
std::chrono::duration<int32>{FLAGS_logbufsecs});
}
bool LogFileObject::CreateLogfile(const string& time_pid_string) {
string string_filename = base_filename_;
if (FLAGS_timestamp_in_logfile_name) {
string_filename += time_pid_string;
}
string_filename += filename_extension_;
const char* filename = string_filename.c_str();
int flags = O_WRONLY | O_CREAT;
if (FLAGS_timestamp_in_logfile_name) {
flags = flags | O_EXCL;
}
FileDescriptor fd{
open(filename, flags, static_cast<mode_t>(FLAGS_logfile_mode))};
if (!fd) return false;
#ifdef HAVE_FCNTL
fcntl(fd.get(), F_SETFD, FD_CLOEXEC);
static struct flock w_lock;
w_lock.l_type = F_WRLCK;
w_lock.l_start = 0;
w_lock.l_whence = SEEK_SET;
w_lock.l_len = 0;
int wlock_ret = fcntl(fd.get(), F_SETLK, &w_lock);
if (wlock_ret == -1) {
return false;
}
#endif
file_.reset(fdopen(fd.release(), "a"));
if (file_ == nullptr) {
if (FLAGS_timestamp_in_logfile_name) {
unlink(filename);
}
return false;
}
#ifdef GLOG_OS_WINDOWS
if (!FLAGS_timestamp_in_logfile_name) {
if (fseek(file_.get(), 0, SEEK_END) != 0) {
return false;
}
}
#endif
if (!symlink_basename_.empty()) {
const char* slash = strrchr(filename, PATH_SEPARATOR);
const string linkname =
symlink_basename_ + '.' + LogSeverityNames[severity_];
string linkpath;
if (slash)
linkpath = string(
filename, static_cast<size_t>(slash - filename + 1));
linkpath += linkname;
unlink(linkpath.c_str());
#if defined(GLOG_OS_WINDOWS)
#elif defined(HAVE_UNISTD_H)
const char* linkdest = slash ? (slash + 1) : filename;
if (symlink(linkdest, linkpath.c_str()) != 0) {
}
if (!FLAGS_log_link.empty()) {
linkpath = FLAGS_log_link + "/" + linkname;
unlink(linkpath.c_str());
if (symlink(filename, linkpath.c_str()) != 0) {
}
}
#endif
}
return true;
}
void LogFileObject::Write(
bool force_flush, const std::chrono::system_clock::time_point& timestamp,
const char* message, size_t message_len) {
std::lock_guard<std::mutex> l{mutex_};
if (base_filename_selected_ && base_filename_.empty()) {
return;
}
auto cleanupLogs = [this, current_time = timestamp] {
if (log_cleaner.enabled()) {
log_cleaner.Run(current_time, base_filename_selected_, base_filename_,
filename_extension_);
}
};
ScopedExit<decltype(cleanupLogs)> cleanupAtEnd{cleanupLogs};
if (file_length_ >> 20U >= MaxLogSize() || PidHasChanged()) {
file_ = nullptr;
file_length_ = bytes_since_flush_ = dropped_mem_length_ = 0;
rollover_attempt_ = kRolloverAttemptFrequency - 1;
}
if (file_ == nullptr) {
if (++rollover_attempt_ != kRolloverAttemptFrequency) return;
rollover_attempt_ = 0;
struct ::tm tm_time;
std::time_t t = std::chrono::system_clock::to_time_t(timestamp);
if (FLAGS_log_utc_time) {
gmtime_r(&t, &tm_time);
} else {
localtime_r(&t, &tm_time);
}
ostringstream time_pid_stream;
time_pid_stream.fill('0');
time_pid_stream << 1900 + tm_time.tm_year << setw(2) << 1 + tm_time.tm_mon
<< setw(2) << tm_time.tm_mday << '-' << setw(2)
<< tm_time.tm_hour << setw(2) << tm_time.tm_min << setw(2)
<< tm_time.tm_sec << '.' << GetMainThreadPid();
const string& time_pid_string = time_pid_stream.str();
if (base_filename_selected_) {
if (!CreateLogfile(time_pid_string)) {
perror("Could not create log file");
fprintf(stderr, "COULD NOT CREATE LOGFILE '%s'!\n",
time_pid_string.c_str());
return;
}
} else {
string stripped_filename(
glog_internal_namespace_::ProgramInvocationShortName());
string hostname;
GetHostName(&hostname);
string uidname = MyUserName();
if (uidname.empty()) uidname = "invalid-user";
stripped_filename = stripped_filename + '.' + hostname + '.' + uidname +
".log." + LogSeverityNames[severity_] + '.';
const vector<string>& log_dirs = GetLoggingDirectories();
bool success = false;
for (const auto& log_dir : log_dirs) {
base_filename_ = log_dir + "/" + stripped_filename;
if (CreateLogfile(time_pid_string)) {
success = true;
break;
}
}
if (success == false) {
perror("Could not create logging file");
fprintf(stderr, "COULD NOT CREATE A LOGGINGFILE %s!",
time_pid_string.c_str());
return;
}
}
if (FLAGS_log_file_header) {
ostringstream file_header_stream;
file_header_stream.fill('0');
file_header_stream << "Log file created at: " << 1900 + tm_time.tm_year
<< '/' << setw(2) << 1 + tm_time.tm_mon << '/'
<< setw(2) << tm_time.tm_mday << ' ' << setw(2)
<< tm_time.tm_hour << ':' << setw(2) << tm_time.tm_min
<< ':' << setw(2) << tm_time.tm_sec
<< (FLAGS_log_utc_time ? " UTC\n" : "\n")
<< "Running on machine: " << LogDestination::hostname()
<< '\n';
if (!g_application_fingerprint.empty()) {
file_header_stream << "Application fingerprint: "
<< g_application_fingerprint << '\n';
}
const char* const date_time_format = FLAGS_log_year_in_prefix
? "yyyymmdd hh:mm:ss.uuuuuu"
: "mmdd hh:mm:ss.uuuuuu";
file_header_stream
<< "Running duration (h:mm:ss): "
<< PrettyDuration(
std::chrono::duration_cast<std::chrono::duration<int>>(
timestamp - start_time_))
<< '\n'
<< "Log line format: [IWEF]" << date_time_format << " "
<< "threadid file:line] msg" << '\n';
const string& file_header_string = file_header_stream.str();
const size_t header_len = file_header_string.size();
fwrite(file_header_string.data(), 1, header_len, file_.get());
file_length_ += header_len;
bytes_since_flush_ += header_len;
}
}
if (!stop_writing) {
errno = 0;
fwrite(message, 1, message_len, file_.get());
if (FLAGS_stop_logging_if_full_disk &&
errno == ENOSPC) {
stop_writing = true;
return;
} else {
file_length_ += message_len;
bytes_since_flush_ += message_len;
}
} else {
if (timestamp >= next_flush_time_) {
stop_writing = false;
}
return;
}
if (force_flush || (bytes_since_flush_ >= 1000000) ||
(timestamp >= next_flush_time_)) {
FlushUnlocked(timestamp);
#ifdef GLOG_OS_LINUX
if (FLAGS_drop_log_memory && file_length_ >= (3U << 20U)) {
uint32 total_drop_length =
(file_length_ & ~((1U << 20U) - 1U)) - (1U << 20U);
uint32 this_drop_length = total_drop_length - dropped_mem_length_;
if (this_drop_length >= (2U << 20U)) {
# if defined(HAVE_POSIX_FADVISE)
posix_fadvise(
fileno(file_.get()), static_cast<off_t>(dropped_mem_length_),
static_cast<off_t>(this_drop_length), POSIX_FADV_DONTNEED);
# endif
dropped_mem_length_ = total_drop_length;
}
}
#endif
}
}
LogCleaner::LogCleaner() = default;
void LogCleaner::Enable(const std::chrono::minutes& overdue) {
enabled_ = true;
overdue_ = overdue;
}
void LogCleaner::Disable() { enabled_ = false; }
void LogCleaner::Run(const std::chrono::system_clock::time_point& current_time,
bool base_filename_selected, const string& base_filename,
const string& filename_extension) {
assert(enabled_);
assert(!base_filename_selected || !base_filename.empty());
if (current_time < next_cleanup_time_) {
return;
}
next_cleanup_time_ =
current_time +
std::chrono::duration_cast<std::chrono::system_clock::duration>(
std::chrono::duration<int32>{FLAGS_logcleansecs});
vector<string> dirs;
if (!base_filename_selected) {
dirs = GetLoggingDirectories();
} else {
size_t pos = base_filename.find_last_of(possible_dir_delim, string::npos,
sizeof(possible_dir_delim));
if (pos != string::npos) {
string dir = base_filename.substr(0, pos + 1);
dirs.push_back(dir);
} else {
dirs.emplace_back(".");
}
}
for (const std::string& dir : dirs) {
vector<string> logs = GetOverdueLogNames(dir, current_time, base_filename,
filename_extension);
for (const std::string& log : logs) {
int result = unlink(log.c_str());
if (result != 0) {
perror(("Could not remove overdue log " + log).c_str());
}
}
}
}
vector<string> LogCleaner::GetOverdueLogNames(
string log_directory,
const std::chrono::system_clock::time_point& current_time,
const string& base_filename, const string& filename_extension) const {
vector<string> overdue_log_names;
DIR* dir;
struct dirent* ent;
if ((dir = opendir(log_directory.c_str()))) {
while ((ent = readdir(dir))) {
if (strcmp(ent->d_name, ".") == 0 || strcmp(ent->d_name, "..") == 0) {
continue;
}
string filepath = ent->d_name;
const char* const dir_delim_end =
possible_dir_delim + sizeof(possible_dir_delim);
if (!log_directory.empty() &&
std::find(possible_dir_delim, dir_delim_end,
log_directory[log_directory.size() - 1]) != dir_delim_end) {
filepath = log_directory + filepath;
}
if (IsLogFromCurrentProject(filepath, base_filename,
filename_extension) &&
IsLogLastModifiedOver(filepath, current_time)) {
overdue_log_names.push_back(filepath);
}
}
closedir(dir);
}
return overdue_log_names;
}
bool LogCleaner::IsLogFromCurrentProject(
const string& filepath, const string& base_filename,
const string& filename_extension) const {
string cleaned_base_filename;
const char* const dir_delim_end =
possible_dir_delim + sizeof(possible_dir_delim);
size_t real_filepath_size = filepath.size();
for (char c : base_filename) {
if (cleaned_base_filename.empty()) {
cleaned_base_filename += c;
} else if (std::find(possible_dir_delim, dir_delim_end, c) ==
dir_delim_end ||
(!cleaned_base_filename.empty() &&
c != cleaned_base_filename[cleaned_base_filename.size() - 1])) {
cleaned_base_filename += c;
}
}
if (filepath.find(cleaned_base_filename) != 0) {
return false;
}
if (!filename_extension.empty()) {
if (cleaned_base_filename.size() >= real_filepath_size) {
return false;
}
string ext = filepath.substr(cleaned_base_filename.size(),
filename_extension.size());
if (ext == filename_extension) {
cleaned_base_filename += filename_extension;
} else {
if (filename_extension.size() >= real_filepath_size) {
return false;
}
real_filepath_size = filepath.size() - filename_extension.size();
if (filepath.substr(real_filepath_size) != filename_extension) {
return false;
}
}
}
for (size_t i = cleaned_base_filename.size(); i < real_filepath_size; i++) {
const char& c = filepath[i];
if (i <= cleaned_base_filename.size() + 7) {
if (c < '0' || c > '9') {
return false;
}
} else if (i == cleaned_base_filename.size() + 8) {
if (c != '-') {
return false;
}
} else if (i <= cleaned_base_filename.size() + 14) {
if (c < '0' || c > '9') {
return false;
}
} else if (i == cleaned_base_filename.size() + 15) {
if (c != '.') {
return false;
}
} else if (i >= cleaned_base_filename.size() + 16) {
if (c < '0' || c > '9') {
return false;
}
}
}
return true;
}
bool LogCleaner::IsLogLastModifiedOver(
const string& filepath,
const std::chrono::system_clock::time_point& current_time) const {
struct stat file_stat;
if (stat(filepath.c_str(), &file_stat) == 0) {
const auto last_modified_time =
std::chrono::system_clock::from_time_t(file_stat.st_mtime);
const auto diff = current_time - last_modified_time;
return diff >= overdue_;
}
return false;
}
}
static std::mutex fatal_msg_lock;
static logging::internal::CrashReason crash_reason;
static bool fatal_msg_exclusive = true;
static logging::internal::LogMessageData fatal_msg_data_exclusive;
static logging::internal::LogMessageData fatal_msg_data_shared;
#ifdef GLOG_THREAD_LOCAL_STORAGE
static thread_local bool thread_data_available = true;
# if defined(__cpp_lib_byte) && __cpp_lib_byte >= 201603L
alignas(logging::internal::LogMessageData) static thread_local std::byte
thread_msg_data[sizeof(logging::internal::LogMessageData)];
# else
static thread_local std::aligned_storage<
sizeof(logging::internal::LogMessageData),
alignof(logging::internal::LogMessageData)>::type thread_msg_data;
# endif
#endif
logging::internal::LogMessageData::LogMessageData()
: stream_(message_text_, LogMessage::kMaxLogMessageLen, 0) {}
LogMessage::LogMessage(const char* file, int line, LogSeverity severity,
int64 ctr, void (LogMessage::*send_method)())
: allocated_(nullptr) {
Init(file, line, severity, send_method);
data_->stream_.set_ctr(ctr);
}
LogMessage::LogMessage(const char* file, int line,
const logging::internal::CheckOpString& result)
: allocated_(nullptr) {
Init(file, line, GLOG_FATAL, &LogMessage::SendToLog);
stream() << "Check failed: " << (*result.str_) << " ";
}
LogMessage::LogMessage(const char* file, int line) : allocated_(nullptr) {
Init(file, line, GLOG_INFO, &LogMessage::SendToLog);
}
LogMessage::LogMessage(const char* file, int line, LogSeverity severity)
: allocated_(nullptr) {
Init(file, line, severity, &LogMessage::SendToLog);
}
LogMessage::LogMessage(const char* file, int line, LogSeverity severity,
LogSink* sink, bool also_send_to_log)
: allocated_(nullptr) {
Init(file, line, severity,
also_send_to_log ? &LogMessage::SendToSinkAndLog
: &LogMessage::SendToSink);
data_->sink_ = sink;
}
LogMessage::LogMessage(const char* file, int line, LogSeverity severity,
vector<string>* outvec)
: allocated_(nullptr) {
Init(file, line, severity, &LogMessage::SaveOrSendToLog);
data_->outvec_ = outvec;
}
LogMessage::LogMessage(const char* file, int line, LogSeverity severity,
string* message)
: allocated_(nullptr) {
Init(file, line, severity, &LogMessage::WriteToStringAndLog);
data_->message_ = message;
}
void LogMessage::Init(const char* file, int line, LogSeverity severity,
void (LogMessage::*send_method)()) {
allocated_ = nullptr;
if (severity != GLOG_FATAL || !exit_on_dfatal) {
#ifdef GLOG_THREAD_LOCAL_STORAGE
if (thread_data_available) {
thread_data_available = false;
data_ = new (&thread_msg_data) logging::internal::LogMessageData;
} else {
allocated_ = new logging::internal::LogMessageData();
data_ = allocated_;
}
#else
allocated_ = new logging::internal::LogMessageData();
data_ = allocated_;
#endif
data_->first_fatal_ = false;
} else {
std::lock_guard<std::mutex> l{fatal_msg_lock};
if (fatal_msg_exclusive) {
fatal_msg_exclusive = false;
data_ = &fatal_msg_data_exclusive;
data_->first_fatal_ = true;
} else {
data_ = &fatal_msg_data_shared;
data_->first_fatal_ = false;
}
}
data_->preserved_errno_ = errno;
data_->severity_ = severity;
data_->line_ = line;
data_->send_method_ = send_method;
data_->sink_ = nullptr;
data_->outvec_ = nullptr;
const auto now = std::chrono::system_clock::now();
time_ = LogMessageTime(now);
data_->num_chars_to_log_ = 0;
data_->num_chars_to_syslog_ = 0;
data_->basename_ = const_basename(file);
data_->fullname_ = file;
data_->has_been_flushed_ = false;
data_->thread_id_ = std::this_thread::get_id();
if (FLAGS_log_prefix && (line != kNoLogPrefix)) {
std::ios saved_fmt(nullptr);
saved_fmt.copyfmt(stream());
stream().fill('0');
if (g_prefix_formatter == nullptr) {
stream() << LogSeverityNames[severity][0];
if (FLAGS_log_year_in_prefix) {
stream() << setw(4) << 1900 + time_.year();
}
stream() << setw(2) << 1 + time_.month() << setw(2) << time_.day() << ' '
<< setw(2) << time_.hour() << ':' << setw(2) << time_.min()
<< ':' << setw(2) << time_.sec() << "." << setw(6)
<< time_.usec() << ' ' << setfill(' ') << setw(5)
<< data_->thread_id_ << setfill('0') << ' ' << data_->basename_
<< ':' << data_->line_ << "] ";
} else {
(*g_prefix_formatter)(stream(), *this);
stream() << " ";
}
stream().copyfmt(saved_fmt);
}
data_->num_prefix_chars_ = data_->stream_.pcount();
if (!FLAGS_log_backtrace_at.empty()) {
char fileline[128];
std::snprintf(fileline, sizeof(fileline), "%s:%d", data_->basename_, line);
#ifdef HAVE_STACKTRACE
if (FLAGS_log_backtrace_at == fileline) {
string stacktrace = GetStackTrace();
stream() << " (stacktrace:\n" << stacktrace << ") ";
}
#endif
}
}
LogSeverity LogMessage::severity() const noexcept { return data_->severity_; }
int LogMessage::line() const noexcept { return data_->line_; }
const std::thread::id& LogMessage::thread_id() const noexcept {
return data_->thread_id_;
}
const char* LogMessage::fullname() const noexcept { return data_->fullname_; }
const char* LogMessage::basename() const noexcept { return data_->basename_; }
const LogMessageTime& LogMessage::time() const noexcept { return time_; }
LogMessage::~LogMessage() noexcept(false) {
Flush();
bool fail = data_->severity_ == GLOG_FATAL && exit_on_dfatal;
#ifdef GLOG_THREAD_LOCAL_STORAGE
if (data_ == static_cast<void*>(&thread_msg_data)) {
data_->~LogMessageData();
thread_data_available = true;
} else {
delete allocated_;
}
#else
delete allocated_;
#endif
if (fail) {
const char* message = "*** Check failure stack trace: ***\n";
if (write(fileno(stderr), message, strlen(message)) < 0) {
}
AlsoErrorWrite(GLOG_FATAL,
glog_internal_namespace_::ProgramInvocationShortName(),
message);
#if defined(__cpp_lib_uncaught_exceptions) && \
(__cpp_lib_uncaught_exceptions >= 201411L)
if (std::uncaught_exceptions() == 0)
#else
if (!std::uncaught_exception())
#endif
{
Fail();
}
}
}
int LogMessage::preserved_errno() const { return data_->preserved_errno_; }
ostream& LogMessage::stream() { return data_->stream_; }
void LogMessage::Flush() {
if (data_->has_been_flushed_ || data_->severity_ < FLAGS_minloglevel) {
return;
}
data_->num_chars_to_log_ = data_->stream_.pcount();
data_->num_chars_to_syslog_ =
data_->num_chars_to_log_ - data_->num_prefix_chars_;
bool append_newline =
(data_->message_text_[data_->num_chars_to_log_ - 1] != '\n');
char original_final_char = '\0';
if (append_newline) {
original_final_char = data_->message_text_[data_->num_chars_to_log_];
data_->message_text_[data_->num_chars_to_log_++] = '\n';
}
data_->message_text_[data_->num_chars_to_log_] = '\0';
{
std::lock_guard<std::mutex> l{log_mutex};
(this->*(data_->send_method_))();
++num_messages_[static_cast<int>(data_->severity_)];
}
LogDestination::WaitForSinks(data_);
if (append_newline) {
data_->message_text_[data_->num_chars_to_log_ - 1] = original_final_char;
}
if (data_->preserved_errno_ != 0) {
errno = data_->preserved_errno_;
}
data_->has_been_flushed_ = true;
}
static std::chrono::system_clock::time_point fatal_time;
static char fatal_message[256];
void ReprintFatalMessage() {
if (fatal_message[0]) {
const size_t n = strlen(fatal_message);
if (!FLAGS_logtostderr) {
WriteToStderr(fatal_message, n);
}
LogDestination::LogToAllLogfiles(GLOG_ERROR, fatal_time, fatal_message, n);
}
}
void LogMessage::SendToLog() EXCLUSIVE_LOCKS_REQUIRED(log_mutex) {
static bool already_warned_before_initgoogle = false;
RAW_DCHECK(data_->num_chars_to_log_ > 0 &&
data_->message_text_[data_->num_chars_to_log_ - 1] == '\n',
"");
if (!already_warned_before_initgoogle && !IsGoogleLoggingInitialized()) {
const char w[] =
"WARNING: Logging before InitGoogleLogging() is "
"written to STDERR\n";
WriteToStderr(w, strlen(w));
already_warned_before_initgoogle = true;
}
if (FLAGS_logtostderr || FLAGS_logtostdout || !IsGoogleLoggingInitialized()) {
if (FLAGS_logtostdout) {
ColoredWriteToStdout(data_->severity_, data_->message_text_,
data_->num_chars_to_log_);
} else {
ColoredWriteToStderr(data_->severity_, data_->message_text_,
data_->num_chars_to_log_);
}
LogDestination::LogToSinks(
data_->severity_, data_->fullname_, data_->basename_, data_->line_,
time_, data_->message_text_ + data_->num_prefix_chars_,
(data_->num_chars_to_log_ - data_->num_prefix_chars_ - 1));
} else {
LogDestination::LogToAllLogfiles(data_->severity_, time_.when(),
data_->message_text_,
data_->num_chars_to_log_);
LogDestination::MaybeLogToStderr(data_->severity_, data_->message_text_,
data_->num_chars_to_log_,
data_->num_prefix_chars_);
LogDestination::MaybeLogToEmail(data_->severity_, data_->message_text_,
data_->num_chars_to_log_);
LogDestination::LogToSinks(
data_->severity_, data_->fullname_, data_->basename_, data_->line_,
time_, data_->message_text_ + data_->num_prefix_chars_,
(data_->num_chars_to_log_ - data_->num_prefix_chars_ - 1));
}
if (data_->severity_ == GLOG_FATAL && exit_on_dfatal) {
if (data_->first_fatal_) {
RecordCrashReason(&crash_reason);
SetCrashReason(&crash_reason);
const size_t copy =
min(data_->num_chars_to_log_, sizeof(fatal_message) - 1);
memcpy(fatal_message, data_->message_text_, copy);
fatal_message[copy] = '\0';
fatal_time = time_.when();
}
if (!FLAGS_logtostderr && !FLAGS_logtostdout) {
for (auto& log_destination : LogDestination::log_destinations_) {
if (log_destination) {
log_destination->logger_->Write(
true, std::chrono::system_clock::time_point{}, "", 0);
}
}
}
LogDestination::WaitForSinks(data_);
}
}
void LogMessage::RecordCrashReason(logging::internal::CrashReason* reason) {
reason->filename = fatal_msg_data_exclusive.fullname_;
reason->line_number = fatal_msg_data_exclusive.line_;
reason->message = fatal_msg_data_exclusive.message_text_ +
fatal_msg_data_exclusive.num_prefix_chars_;
#ifdef HAVE_STACKTRACE
reason->depth = GetStackTrace(reason->stack, ARRAYSIZE(reason->stack), 4);
#else
reason->depth = 0;
#endif
}
GLOG_NO_EXPORT logging_fail_func_t g_logging_fail_func =
reinterpret_cast<logging_fail_func_t>(&abort);
NullStream::NullStream() : LogMessage::LogStream(message_buffer_, 2, 0) {}
NullStream::NullStream(const char* , int ,
const logging::internal::CheckOpString& )
: LogMessage::LogStream(message_buffer_, 2, 0) {}
NullStream& NullStream::stream() { return *this; }
NullStreamFatal::~NullStreamFatal() {
std::abort();
}
logging_fail_func_t InstallFailureFunction(logging_fail_func_t fail_func) {
return std::exchange(g_logging_fail_func, fail_func);
}
void LogMessage::Fail() { g_logging_fail_func(); }
void LogMessage::SendToSink() EXCLUSIVE_LOCKS_REQUIRED(log_mutex) {
if (data_->sink_ != nullptr) {
RAW_DCHECK(data_->num_chars_to_log_ > 0 &&
data_->message_text_[data_->num_chars_to_log_ - 1] == '\n',
"");
data_->sink_->send(
data_->severity_, data_->fullname_, data_->basename_, data_->line_,
time_, data_->message_text_ + data_->num_prefix_chars_,
(data_->num_chars_to_log_ - data_->num_prefix_chars_ - 1));
}
}
void LogMessage::SendToSinkAndLog() EXCLUSIVE_LOCKS_REQUIRED(log_mutex) {
SendToSink();
SendToLog();
}
void LogMessage::SaveOrSendToLog() EXCLUSIVE_LOCKS_REQUIRED(log_mutex) {
if (data_->outvec_ != nullptr) {
RAW_DCHECK(data_->num_chars_to_log_ > 0 &&
data_->message_text_[data_->num_chars_to_log_ - 1] == '\n',
"");
const char* start = data_->message_text_ + data_->num_prefix_chars_;
size_t len = data_->num_chars_to_log_ - data_->num_prefix_chars_ - 1;
data_->outvec_->push_back(string(start, len));
} else {
SendToLog();
}
}
void LogMessage::WriteToStringAndLog() EXCLUSIVE_LOCKS_REQUIRED(log_mutex) {
if (data_->message_ != nullptr) {
RAW_DCHECK(data_->num_chars_to_log_ > 0 &&
data_->message_text_[data_->num_chars_to_log_ - 1] == '\n',
"");
const char* start = data_->message_text_ + data_->num_prefix_chars_;
size_t len = data_->num_chars_to_log_ - data_->num_prefix_chars_ - 1;
data_->message_->assign(start, len);
}
SendToLog();
}
void LogMessage::SendToSyslogAndLog() {
#ifdef HAVE_SYSLOG_H
static bool openlog_already_called = false;
if (!openlog_already_called) {
openlog(glog_internal_namespace_::ProgramInvocationShortName(),
LOG_CONS | LOG_NDELAY | LOG_PID, LOG_USER);
openlog_already_called = true;
}
const int SEVERITY_TO_LEVEL[] = {LOG_INFO, LOG_WARNING, LOG_ERR, LOG_EMERG};
syslog(LOG_USER | SEVERITY_TO_LEVEL[static_cast<int>(data_->severity_)],
"%.*s", static_cast<int>(data_->num_chars_to_syslog_),
data_->message_text_ + data_->num_prefix_chars_);
SendToLog();
#else
LOG(ERROR) << "No syslog support: message=" << data_->message_text_;
#endif
}
base::Logger* base::GetLogger(LogSeverity severity) {
std::lock_guard<std::mutex> l{log_mutex};
return LogDestination::log_destination(severity)->GetLoggerImpl();
}
void base::SetLogger(LogSeverity severity, base::Logger* logger) {
std::lock_guard<std::mutex> l{log_mutex};
LogDestination::log_destination(severity)->SetLoggerImpl(logger);
}
int64 LogMessage::num_messages(int severity) {
std::lock_guard<std::mutex> l{log_mutex};
return num_messages_[severity];
}
ostream& operator<<(ostream& os, const Counter_t&) {
#ifdef DISABLE_RTTI
LogMessage::LogStream* log = static_cast<LogMessage::LogStream*>(&os);
#else
auto* log = dynamic_cast<LogMessage::LogStream*>(&os);
#endif
CHECK(log && log == log->self())
<< "You must not use COUNTER with non-glog ostream";
os << log->ctr();
return os;
}
ErrnoLogMessage::ErrnoLogMessage(const char* file, int line,
LogSeverity severity, int64 ctr,
void (LogMessage::*send_method)())
: LogMessage(file, line, severity, ctr, send_method) {}
ErrnoLogMessage::~ErrnoLogMessage() {
stream() << ": " << StrError(preserved_errno()) << " [" << preserved_errno()
<< "]";
}
void FlushLogFiles(LogSeverity min_severity) {
LogDestination::FlushLogFiles(min_severity);
}
void FlushLogFilesUnsafe(LogSeverity min_severity) {
LogDestination::FlushLogFilesUnsafe(min_severity);
}
void SetLogDestination(LogSeverity severity, const char* base_filename) {
LogDestination::SetLogDestination(severity, base_filename);
}
void SetLogSymlink(LogSeverity severity, const char* symlink_basename) {
LogDestination::SetLogSymlink(severity, symlink_basename);
}
LogSink::~LogSink() = default;
void LogSink::WaitTillSent() {
}
string LogSink::ToString(LogSeverity severity, const char* file, int line,
const LogMessageTime& time, const char* message,
size_t message_len) {
ostringstream stream;
stream.fill('0');
stream << LogSeverityNames[severity][0];
if (FLAGS_log_year_in_prefix) {
stream << setw(4) << 1900 + time.year();
}
stream << setw(2) << 1 + time.month() << setw(2) << time.day() << ' '
<< setw(2) << time.hour() << ':' << setw(2) << time.min() << ':'
<< setw(2) << time.sec() << '.' << setw(6) << time.usec() << ' '
<< setfill(' ') << setw(5) << std::this_thread::get_id()
<< setfill('0') << ' ' << file << ':' << line << "] ";
(stream.write)(message, static_cast<std::streamsize>(message_len));
return stream.str();
}
void AddLogSink(LogSink* destination) {
LogDestination::AddLogSink(destination);
}
void RemoveLogSink(LogSink* destination) {
LogDestination::RemoveLogSink(destination);
}
void SetLogFilenameExtension(const char* ext) {
LogDestination::SetLogFilenameExtension(ext);
}
void SetStderrLogging(LogSeverity min_severity) {
LogDestination::SetStderrLogging(min_severity);
}
void SetEmailLogging(LogSeverity min_severity, const char* addresses) {
LogDestination::SetEmailLogging(min_severity, addresses);
}
void LogToStderr() { LogDestination::LogToStderr(); }
namespace base {
namespace internal {
bool GetExitOnDFatal();
bool GetExitOnDFatal() {
std::lock_guard<std::mutex> l{log_mutex};
return exit_on_dfatal;
}
void SetExitOnDFatal(bool value);
void SetExitOnDFatal(bool value) {
std::lock_guard<std::mutex> l{log_mutex};
exit_on_dfatal = value;
}
}
}
#ifndef GLOG_OS_EMSCRIPTEN
static const char kDontNeedShellEscapeChars[] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"0123456789+-_.=/:,@";
static string ShellEscape(const string& src) {
string result;
if (!src.empty() &&
src.find_first_not_of(kDontNeedShellEscapeChars) == string::npos) {
result.assign(src);
} else if (src.find_first_of('\'') == string::npos) {
result.assign("'");
result.append(src);
result.append("'");
} else {
result.assign("\"");
for (size_t i = 0; i < src.size(); ++i) {
switch (src[i]) {
case '\\':
case '$':
case '"':
case '`':
result.append("\\");
}
result.append(src, i, 1);
}
result.append("\"");
}
return result;
}
static inline void trim(std::string& s) {
const auto toRemove = [](char ch) { return std::isspace(ch) == 0; };
s.erase(s.begin(), std::find_if(s.begin(), s.end(), toRemove));
s.erase(std::find_if(s.rbegin(), s.rend(), toRemove).base(), s.end());
}
#endif
static bool SendEmailInternal(const char* dest, const char* subject,
const char* body, bool use_logging) {
#ifndef GLOG_OS_EMSCRIPTEN
if (dest && *dest) {
std::istringstream ss(dest);
std::ostringstream sanitized_dests;
std::string s;
while (std::getline(ss, s, ',')) {
trim(s);
if (s.empty()) {
continue;
}
if (!std::regex_match(
s,
std::regex("^[a-zA-Z0-9]"
"[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]*@[a-zA-Z0-9]"
"(?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9]"
"(?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$"))) {
if (use_logging) {
VLOG(1) << "Invalid destination email address:" << s;
} else {
fprintf(stderr, "Invalid destination email address: %s\n", s.c_str());
}
return false;
}
if (!sanitized_dests.str().empty()) {
sanitized_dests << ",";
}
sanitized_dests << s;
}
const std::string& tmp = sanitized_dests.str();
dest = tmp.c_str();
if (use_logging) {
VLOG(1) << "Trying to send TITLE:" << subject << " BODY:" << body
<< " to " << dest;
} else {
fprintf(stderr, "Trying to send TITLE: %s BODY: %s to %s\n", subject,
body, dest);
}
string logmailer;
if (FLAGS_logmailer.empty()) {
logmailer = "/bin/mail";
} else {
logmailer = ShellEscape(FLAGS_logmailer);
}
string cmd =
logmailer + " -s" + ShellEscape(subject) + " " + ShellEscape(dest);
if (use_logging) {
VLOG(4) << "Mailing command: " << cmd;
}
FILE* pipe = popen(cmd.c_str(), "w");
if (pipe != nullptr) {
if (body) {
fwrite(body, sizeof(char), strlen(body), pipe);
}
bool ok = pclose(pipe) != -1;
if (!ok) {
if (use_logging) {
LOG(ERROR) << "Problems sending mail to " << dest << ": "
<< StrError(errno);
} else {
fprintf(stderr, "Problems sending mail to %s: %s\n", dest,
StrError(errno).c_str());
}
}
return ok;
} else {
if (use_logging) {
LOG(ERROR) << "Unable to send mail to " << dest;
} else {
fprintf(stderr, "Unable to send mail to %s\n", dest);
}
}
}
#else
(void)dest;
(void)subject;
(void)body;
(void)use_logging;
LOG(WARNING) << "Email support not available; not sending message";
#endif
return false;
}
bool SendEmail(const char* dest, const char* subject, const char* body) {
return SendEmailInternal(dest, subject, body, true);
}
static void GetTempDirectories(vector<string>& list) {
list.clear();
#ifdef GLOG_OS_WINDOWS
char tmp[MAX_PATH];
if (GetTempPathA(MAX_PATH, tmp)) list.push_back(tmp);
list.push_back("C:\\TMP\\");
list.push_back("C:\\TEMP\\");
#else
const char* candidates[] = {
getenv("TEST_TMPDIR"),
getenv("TMPDIR"),
getenv("TMP"),
"/tmp",
};
for (auto d : candidates) {
if (!d) continue;
string dstr = d;
if (dstr[dstr.size() - 1] != '/') {
dstr += "/";
}
list.push_back(dstr);
struct stat statbuf;
if (!stat(d, &statbuf) && S_ISDIR(statbuf.st_mode)) {
return;
}
}
#endif
}
static std::unique_ptr<std::vector<std::string>> logging_directories_list;
const vector<string>& GetLoggingDirectories() {
if (logging_directories_list == nullptr) {
logging_directories_list = std::make_unique<std::vector<std::string>>();
if (!FLAGS_log_dir.empty()) {
if (std::find(std::begin(possible_dir_delim),
std::end(possible_dir_delim),
FLAGS_log_dir.back()) == std::end(possible_dir_delim)) {
logging_directories_list->push_back(FLAGS_log_dir + "/");
} else {
logging_directories_list->push_back(FLAGS_log_dir);
}
} else {
GetTempDirectories(*logging_directories_list);
#ifdef GLOG_OS_WINDOWS
char tmp[MAX_PATH];
if (GetWindowsDirectoryA(tmp, MAX_PATH))
logging_directories_list->push_back(tmp);
logging_directories_list->push_back(".\\");
#else
logging_directories_list->push_back("./");
#endif
}
}
return *logging_directories_list;
}
GLOG_NO_EXPORT
void GetExistingTempDirectories(vector<string>& list) {
GetTempDirectories(list);
auto i_dir = list.begin();
while (i_dir != list.end()) {
if (access(i_dir->c_str(), 0)) {
i_dir = list.erase(i_dir);
} else {
++i_dir;
}
}
}
void TruncateLogFile(const char* path, uint64 limit, uint64 keep) {
#if defined(HAVE_UNISTD_H) || defined(HAVE__CHSIZE_S)
struct stat statbuf;
const int kCopyBlockSize = 8 << 10;
char copybuf[kCopyBlockSize];
off_t read_offset, write_offset;
int flags = O_RDWR;
# ifdef GLOG_OS_LINUX
const char* procfd_prefix = "/proc/self/fd/";
if (strncmp(procfd_prefix, path, strlen(procfd_prefix))) flags |= O_NOFOLLOW;
# endif
FileDescriptor fd{open(path, flags)};
if (!fd) {
if (errno == EFBIG) {
# ifdef HAVE__CHSIZE_S
if (_chsize_s(fd.get(), 0) != 0) {
# else
if (truncate(path, 0) == -1) {
# endif
PLOG(ERROR) << "Unable to truncate " << path;
} else {
LOG(ERROR) << "Truncated " << path << " due to EFBIG error";
}
} else {
PLOG(ERROR) << "Unable to open " << path;
}
return;
}
if (fstat(fd.get(), &statbuf) == -1) {
PLOG(ERROR) << "Unable to fstat()";
return;
}
if (!S_ISREG(statbuf.st_mode)) return;
if (statbuf.st_size <= static_cast<off_t>(limit)) return;
if (statbuf.st_size <= static_cast<off_t>(keep)) return;
LOG(INFO) << "Truncating " << path << " to " << keep << " bytes";
read_offset = statbuf.st_size - static_cast<off_t>(keep);
write_offset = 0;
ssize_t bytesin, bytesout;
while ((bytesin = pread(fd.get(), copybuf, sizeof(copybuf), read_offset)) >
0) {
bytesout =
pwrite(fd.get(), copybuf, static_cast<size_t>(bytesin), write_offset);
if (bytesout == -1) {
PLOG(ERROR) << "Unable to write to " << path;
break;
} else if (bytesout != bytesin) {
LOG(ERROR) << "Expected to write " << bytesin << ", wrote " << bytesout;
}
read_offset += bytesin;
write_offset += bytesout;
}
if (bytesin == -1) PLOG(ERROR) << "Unable to read from " << path;
# ifdef HAVE__CHSIZE_S
if (_chsize_s(fd.get(), write_offset) != 0) {
# else
if (ftruncate(fd.get(), write_offset) == -1) {
# endif
PLOG(ERROR) << "Unable to truncate " << path;
}
#else
LOG(ERROR) << "No log truncation support.";
#endif
}
void TruncateStdoutStderr() {
#ifdef HAVE_UNISTD_H
uint64 limit = MaxLogSize() << 20U;
uint64 keep = 1U << 20U;
TruncateLogFile("/proc/self/fd/1", limit, keep);
TruncateLogFile("/proc/self/fd/2", limit, keep);
#else
LOG(ERROR) << "No log truncation support.";
#endif
}
namespace logging {
namespace internal {
#define DEFINE_CHECK_STROP_IMPL(name, func, expected) \
std::unique_ptr<string> Check##func##expected##Impl( \
const char* s1, const char* s2, const char* names) { \
bool equal = s1 == s2 || (s1 && s2 && !func(s1, s2)); \
if (equal == (expected)) \
return nullptr; \
else { \
ostringstream ss; \
if (!s1) s1 = ""; \
if (!s2) s2 = ""; \
ss << #name " failed: " << names << " (" << s1 << " vs. " << s2 << ")"; \
return std::make_unique<std::string>(ss.str()); \
} \
}
DEFINE_CHECK_STROP_IMPL(CHECK_STREQ, strcmp, true)
DEFINE_CHECK_STROP_IMPL(CHECK_STRNE, strcmp, false)
DEFINE_CHECK_STROP_IMPL(CHECK_STRCASEEQ, strcasecmp, true)
DEFINE_CHECK_STROP_IMPL(CHECK_STRCASENE, strcasecmp, false)
#undef DEFINE_CHECK_STROP_IMPL
}
}
GLOG_NO_EXPORT
int posix_strerror_r(int err, char* buf, size_t len) {
if (buf == nullptr || len <= 0) {
errno = EINVAL;
return -1;
}
buf[0] = '\000';
int old_errno = errno;
errno = 0;
char* rc = reinterpret_cast<char*>(strerror_r(err, buf, len));
if (errno) {
buf[0] = '\000';
return -1;
}
errno = old_errno;
buf[len - 1] = '\000';
if (!rc) {
return 0;
} else {
if (rc == buf) {
return 0;
} else {
buf[0] = '\000';
#if defined(GLOG_OS_MACOSX) || defined(GLOG_OS_FREEBSD) || \
defined(GLOG_OS_OPENBSD)
if (reinterpret_cast<intptr_t>(rc) < sys_nerr) {
return -1;
}
#endif
strncat(buf, rc, len - 1);
return 0;
}
}
}
string StrError(int err) {
char buf[100];
int rc = posix_strerror_r(err, buf, sizeof(buf));
if ((rc < 0) || (buf[0] == '\000')) {
std::snprintf(buf, sizeof(buf), "Error number %d", err);
}
return buf;
}
LogMessageFatal::LogMessageFatal(const char* file, int line)
: LogMessage(file, line, GLOG_FATAL) {}
LogMessageFatal::LogMessageFatal(const char* file, int line,
const logging::internal::CheckOpString& result)
: LogMessage(file, line, result) {}
LogMessageFatal::~LogMessageFatal() noexcept(false) {
Flush();
LogMessage::Fail();
}
namespace logging {
namespace internal {
CheckOpMessageBuilder::CheckOpMessageBuilder(const char* exprtext)
: stream_(new ostringstream) {
*stream_ << exprtext << " (";
}
CheckOpMessageBuilder::~CheckOpMessageBuilder() { delete stream_; }
ostream* CheckOpMessageBuilder::ForVar2() {
*stream_ << " vs. ";
return stream_;
}
std::unique_ptr<string> CheckOpMessageBuilder::NewString() {
*stream_ << ")";
return std::make_unique<std::string>(stream_->str());
}
template <>
void MakeCheckOpValueString(std::ostream* os, const char& v) {
if (v >= 32 && v <= 126) {
(*os) << "'" << v << "'";
} else {
(*os) << "char value " << static_cast<short>(v);
}
}
template <>
void MakeCheckOpValueString(std::ostream* os, const signed char& v) {
if (v >= 32 && v <= 126) {
(*os) << "'" << v << "'";
} else {
(*os) << "signed char value " << static_cast<short>(v);
}
}
template <>
void MakeCheckOpValueString(std::ostream* os, const unsigned char& v) {
if (v >= 32 && v <= 126) {
(*os) << "'" << v << "'";
} else {
(*os) << "unsigned char value " << static_cast<unsigned short>(v);
}
}
template <>
void MakeCheckOpValueString(std::ostream* os, const std::nullptr_t& ) {
(*os) << "nullptr";
}
}
}
void InitGoogleLogging(const char* argv0) { InitGoogleLoggingUtilities(argv0); }
void InstallPrefixFormatter(PrefixFormatterCallback callback, void* data) {
if (callback != nullptr) {
g_prefix_formatter = std::make_unique<PrefixFormatter>(callback, data);
} else {
g_prefix_formatter = nullptr;
}
}
void ShutdownGoogleLogging() {
ShutdownGoogleLoggingUtilities();
LogDestination::DeleteLogDestinations();
logging_directories_list = nullptr;
g_prefix_formatter = nullptr;
}
void EnableLogCleaner(unsigned int overdue_days) {
log_cleaner.Enable(std::chrono::duration_cast<std::chrono::minutes>(
std::chrono::duration<unsigned, std::ratio<kSecondsInDay>>{
overdue_days}));
}
void EnableLogCleaner(const std::chrono::minutes& overdue) {
log_cleaner.Enable(overdue);
}
void DisableLogCleaner() { log_cleaner.Disable(); }
LogMessageTime::LogMessageTime() = default;
namespace {
template <class... Args>
struct void_impl {
using type = void;
};
template <class... Args>
using void_t = typename void_impl<Args...>::type;
template <class T, class E = void>
struct has_member_tm_gmtoff : std::false_type {};
template <class T>
struct has_member_tm_gmtoff<T, void_t<decltype(&T::tm_gmtoff)>>
: std::true_type {};
template <class T = std::tm>
auto Breakdown(const std::chrono::system_clock::time_point& now)
-> std::enable_if_t<!has_member_tm_gmtoff<T>::value,
std::tuple<std::tm, std::time_t, std::chrono::hours>> {
std::time_t timestamp = std::chrono::system_clock::to_time_t(now);
std::tm tm_local;
std::tm tm_utc;
int isdst = 0;
if (FLAGS_log_utc_time) {
gmtime_r(×tamp, &tm_local);
localtime_r(×tamp, &tm_utc);
isdst = tm_utc.tm_isdst;
tm_utc = tm_local;
} else {
localtime_r(×tamp, &tm_local);
isdst = tm_local.tm_isdst;
gmtime_r(×tamp, &tm_utc);
}
std::time_t gmt_sec = std::mktime(&tm_utc);
using namespace std::chrono_literals;
const auto gmtoffset = std::chrono::duration_cast<std::chrono::hours>(
now - std::chrono::system_clock::from_time_t(gmt_sec) +
(isdst ? 1h : 0h));
return std::make_tuple(tm_local, timestamp, gmtoffset);
}
template <class T = std::tm>
auto Breakdown(const std::chrono::system_clock::time_point& now)
-> std::enable_if_t<has_member_tm_gmtoff<T>::value,
std::tuple<std::tm, std::time_t, std::chrono::hours>> {
std::time_t timestamp = std::chrono::system_clock::to_time_t(now);
T tm;
if (FLAGS_log_utc_time) {
gmtime_r(×tamp, &tm);
} else {
localtime_r(×tamp, &tm);
}
const auto gmtoffset = std::chrono::duration_cast<std::chrono::hours>(
std::chrono::seconds{tm.tm_gmtoff});
return std::make_tuple(tm, timestamp, gmtoffset);
}
}
LogMessageTime::LogMessageTime(std::chrono::system_clock::time_point now)
: timestamp_{now} {
std::time_t timestamp;
std::tie(tm_, timestamp, gmtoffset_) = Breakdown(now);
usecs_ = std::chrono::duration_cast<std::chrono::microseconds>(
now - std::chrono::system_clock::from_time_t(timestamp));
}
} | #include <fcntl.h>
#include <chrono>
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <memory>
#include <mutex>
#include <queue>
#include <sstream>
#include <stdexcept>
#include <string>
#include <thread>
#include <vector>
#include "config.h"
#ifdef HAVE_GLOB_H
# include <glob.h>
#endif
#include <sys/stat.h>
#ifdef HAVE_UNISTD_H
# include <unistd.h>
#endif
#ifdef HAVE_SYS_WAIT_H
# include <sys/wait.h>
#endif
#include "base/commandlineflags.h"
#include "glog/logging.h"
#include "glog/raw_logging.h"
#include "googletest.h"
#include "stacktrace.h"
#include "utilities.h"
#ifdef GLOG_USE_GFLAGS
# include <gflags/gflags.h>
using namespace GFLAGS_NAMESPACE;
#endif
#ifdef HAVE_LIB_GMOCK
# include <gmock/gmock.h>
# include "mock-log.h"
using google::glog_testing::ScopedMockLog;
using testing::_;
using testing::AllOf;
using testing::AnyNumber;
using testing::HasSubstr;
using testing::InitGoogleMock;
using testing::StrictMock;
using testing::StrNe;
#endif
using namespace std;
using namespace google;
namespace google {
namespace base {
namespace internal {
bool GetExitOnDFatal();
void SetExitOnDFatal(bool value);
}
}
}
static void TestLogging(bool check_counts);
static void TestRawLogging();
static void LogWithLevels(int v, int severity, bool err, bool alsoerr);
static void TestLoggingLevels();
static void TestVLogModule();
static void TestLogString();
static void TestLogSink();
static void TestLogToString();
static void TestLogSinkWaitTillSent();
static void TestCHECK();
static void TestDCHECK();
static void TestSTREQ();
static void TestBasename();
static void TestBasenameAppendWhenNoTimestamp();
static void TestTwoProcessesWrite();
static void TestSymlink();
static void TestExtension();
static void TestWrapper();
static void TestErrno();
static void TestTruncate();
static void TestCustomLoggerDeletionOnShutdown();
static void TestLogPeriodically();
static int x = -1;
static void BM_Check1(int n) {
while (n-- > 0) {
CHECK_GE(n, x);
CHECK_GE(n, x);
CHECK_GE(n, x);
CHECK_GE(n, x);
CHECK_GE(n, x);
CHECK_GE(n, x);
CHECK_GE(n, x);
CHECK_GE(n, x);
}
}
BENCHMARK(BM_Check1)
static void CheckFailure(int a, int b, const char* file, int line,
const char* msg);
static void BM_Check3(int n) {
while (n-- > 0) {
if (n < x) CheckFailure(n, x, __FILE__, __LINE__, "n < x");
if (n < x) CheckFailure(n, x, __FILE__, __LINE__, "n < x");
if (n < x) CheckFailure(n, x, __FILE__, __LINE__, "n < x");
if (n < x) CheckFailure(n, x, __FILE__, __LINE__, "n < x");
if (n < x) CheckFailure(n, x, __FILE__, __LINE__, "n < x");
if (n < x) CheckFailure(n, x, __FILE__, __LINE__, "n < x");
if (n < x) CheckFailure(n, x, __FILE__, __LINE__, "n < x");
if (n < x) CheckFailure(n, x, __FILE__, __LINE__, "n < x");
}
}
BENCHMARK(BM_Check3)
static void BM_Check2(int n) {
if (n == 17) {
x = 5;
}
while (n-- > 0) {
CHECK(n >= x);
CHECK(n >= x);
CHECK(n >= x);
CHECK(n >= x);
CHECK(n >= x);
CHECK(n >= x);
CHECK(n >= x);
CHECK(n >= x);
}
}
BENCHMARK(BM_Check2)
static void CheckFailure(int, int, const char* , int ,
const char* ) {}
static void BM_logspeed(int n) {
while (n-- > 0) {
LOG(INFO) << "test message";
}
}
BENCHMARK(BM_logspeed)
static void BM_vlog(int n) {
while (n-- > 0) {
VLOG(1) << "test message";
}
}
BENCHMARK(BM_vlog)
namespace {
void PrefixAttacher(std::ostream& s, const LogMessage& m, void* data) {
if (data == nullptr || *static_cast<string*>(data) != "good data") {
return;
}
s << GetLogSeverityName(m.severity())[0] << setw(4) << 1900 + m.time().year()
<< setw(2) << 1 + m.time().month() << setw(2) << m.time().day() << ' '
<< setw(2) << m.time().hour() << ':' << setw(2) << m.time().min() << ':'
<< setw(2) << m.time().sec() << "." << setw(6) << m.time().usec() << ' '
<< setfill(' ') << setw(5) << m.thread_id() << setfill('0') << ' '
<< m.basename() << ':' << m.line() << "]";
}
}
int main(int argc, char** argv) {
FLAGS_colorlogtostderr = false;
FLAGS_timestamp_in_logfile_name = true;
setbuf(stderr, nullptr);
CaptureTestStderr();
LogWithLevels(FLAGS_v, FLAGS_stderrthreshold, FLAGS_logtostderr,
FLAGS_alsologtostderr);
LogWithLevels(0, 0, false, false);
const string early_stderr = GetCapturedTestStderr();
EXPECT_FALSE(IsGoogleLoggingInitialized());
string prefix_attacher_data = "good data";
InitGoogleLogging(argv[0]);
InstallPrefixFormatter(&PrefixAttacher, &prefix_attacher_data);
EXPECT_TRUE(IsGoogleLoggingInitialized());
RunSpecifiedBenchmarks();
FLAGS_logtostderr = true;
InitGoogleTest(&argc, argv);
#ifdef HAVE_LIB_GMOCK
InitGoogleMock(&argc, argv);
#endif
#ifdef GLOG_USE_GFLAGS
ParseCommandLineFlags(&argc, &argv, true);
#endif
CHECK_EQ(RUN_ALL_TESTS(), 0);
CaptureTestStderr();
LogMessage("dummy", LogMessage::kNoLogPrefix, GLOG_INFO).stream()
<< early_stderr;
TestLogging(true);
TestRawLogging();
TestLoggingLevels();
TestVLogModule();
TestLogString();
TestLogSink();
TestLogToString();
TestLogSinkWaitTillSent();
TestCHECK();
TestDCHECK();
TestSTREQ();
EXPECT_TRUE(
MungeAndDiffTestStderr(FLAGS_test_srcdir + "/src/logging_unittest.err"));
FLAGS_logtostderr = false;
FLAGS_logtostdout = true;
FLAGS_stderrthreshold = NUM_SEVERITIES;
CaptureTestStdout();
TestRawLogging();
TestLoggingLevels();
TestLogString();
TestLogSink();
TestLogToString();
TestLogSinkWaitTillSent();
TestCHECK();
TestDCHECK();
TestSTREQ();
EXPECT_TRUE(
MungeAndDiffTestStdout(FLAGS_test_srcdir + "/src/logging_unittest.out"));
FLAGS_logtostdout = false;
TestBasename();
TestBasenameAppendWhenNoTimestamp();
TestTwoProcessesWrite();
TestSymlink();
TestExtension();
TestWrapper();
TestErrno();
TestTruncate();
TestCustomLoggerDeletionOnShutdown();
TestLogPeriodically();
fprintf(stdout, "PASS\n");
return 0;
}
void TestLogging(bool check_counts) {
int64 base_num_infos = LogMessage::num_messages(GLOG_INFO);
int64 base_num_warning = LogMessage::num_messages(GLOG_WARNING);
int64 base_num_errors = LogMessage::num_messages(GLOG_ERROR);
LOG(INFO) << string("foo ") << "bar " << 10 << ' ' << 3.4;
for (int i = 0; i < 10; ++i) {
int old_errno = errno;
errno = i;
PLOG_EVERY_N(ERROR, 2) << "Plog every 2, iteration " << COUNTER;
errno = old_errno;
LOG_EVERY_N(ERROR, 3) << "Log every 3, iteration " << COUNTER << endl;
LOG_EVERY_N(ERROR, 4) << "Log every 4, iteration " << COUNTER << endl;
LOG_IF_EVERY_N(WARNING, true, 5) << "Log if every 5, iteration " << COUNTER;
LOG_IF_EVERY_N(WARNING, false, 3)
<< "Log if every 3, iteration " << COUNTER;
LOG_IF_EVERY_N(INFO, true, 1) << "Log if every 1, iteration " << COUNTER;
LOG_IF_EVERY_N(ERROR, (i < 3), 2)
<< "Log if less than 3 every 2, iteration " << COUNTER;
}
LOG_IF(WARNING, true) << "log_if this";
LOG_IF(WARNING, false) << "don't log_if this";
char s[] = "array";
LOG(INFO) << s;
const char const_s[] = "const array";
LOG(INFO) << const_s;
int j = 1000;
LOG(ERROR) << string("foo") << ' ' << j << ' ' << setw(10) << j << " "
<< setw(1) << hex << j;
LOG(INFO) << "foo " << std::setw(10) << 1.0;
{
google::LogMessage outer(__FILE__, __LINE__, GLOG_ERROR);
outer.stream() << "outer";
LOG(ERROR) << "inner";
}
LogMessage("foo", LogMessage::kNoLogPrefix, GLOG_INFO).stream()
<< "no prefix";
if (check_counts) {
CHECK_EQ(base_num_infos + 15, LogMessage::num_messages(GLOG_INFO));
CHECK_EQ(base_num_warning + 3, LogMessage::num_messages(GLOG_WARNING));
CHECK_EQ(base_num_errors + 17, LogMessage::num_messages(GLOG_ERROR));
}
}
static void NoAllocNewHook() { LOG(FATAL) << "unexpected new"; }
struct NewHook {
NewHook() { g_new_hook = &NoAllocNewHook; }
~NewHook() { g_new_hook = nullptr; }
};
namespace {
int* allocInt() { return new int; }
}
TEST(DeathNoAllocNewHook, logging) {
NewHook new_hook;
(void)&allocInt;
ASSERT_DEATH({ allocInt(); }, "unexpected new");
}
void TestRawLogging() {
auto* foo = new string("foo ");
string huge_str(50000, 'a');
FlagSaver saver;
NewHook new_hook;
RAW_LOG(INFO, "%s%s%d%c%f", foo->c_str(), "bar ", 10, ' ', 3.4);
char s[] = "array";
RAW_LOG(WARNING, "%s", s);
const char const_s[] = "const array";
RAW_LOG(INFO, "%s", const_s);
void* p = reinterpret_cast<void*>(PTR_TEST_VALUE);
RAW_LOG(INFO, "ptr %p", p);
p = nullptr;
RAW_LOG(INFO, "ptr %p", p);
int j = 1000;
RAW_LOG(ERROR, "%s%d%c%010d%s%1x", foo->c_str(), j, ' ', j, " ", j);
RAW_VLOG(0, "foo %d", j);
#if defined(NDEBUG)
RAW_LOG(INFO, "foo %d", j);
#else
RAW_DLOG(INFO, "foo %d", j);
#endif
RAW_LOG(WARNING, "Huge string: %s", huge_str.c_str());
RAW_VLOG(0, "Huge string: %s", huge_str.c_str());
FLAGS_v = 0;
RAW_LOG(INFO, "log");
RAW_VLOG(0, "vlog 0 on");
RAW_VLOG(1, "vlog 1 off");
RAW_VLOG(2, "vlog 2 off");
RAW_VLOG(3, "vlog 3 off");
FLAGS_v = 2;
RAW_LOG(INFO, "log");
RAW_VLOG(1, "vlog 1 on");
RAW_VLOG(2, "vlog 2 on");
RAW_VLOG(3, "vlog 3 off");
#if defined(NDEBUG)
RAW_DCHECK(1 == 2, " RAW_DCHECK's shouldn't be compiled in normal mode");
#endif
RAW_CHECK(1 == 1, "should be ok");
RAW_DCHECK(true, "should be ok");
delete foo;
}
void LogWithLevels(int v, int severity, bool err, bool alsoerr) {
RAW_LOG(INFO,
"Test: v=%d stderrthreshold=%d logtostderr=%d alsologtostderr=%d", v,
severity, err, alsoerr);
FlagSaver saver;
FLAGS_v = v;
FLAGS_stderrthreshold = severity;
FLAGS_logtostderr = err;
FLAGS_alsologtostderr = alsoerr;
RAW_VLOG(-1, "vlog -1");
RAW_VLOG(0, "vlog 0");
RAW_VLOG(1, "vlog 1");
RAW_LOG(INFO, "log info");
RAW_LOG(WARNING, "log warning");
RAW_LOG(ERROR, "log error");
VLOG(-1) << "vlog -1";
VLOG(0) << "vlog 0";
VLOG(1) << "vlog 1";
LOG(INFO) << "log info";
LOG(WARNING) << "log warning";
LOG(ERROR) << "log error";
VLOG_IF(-1, true) << "vlog_if -1";
VLOG_IF(-1, false) << "don't vlog_if -1";
VLOG_IF(0, true) << "vlog_if 0";
VLOG_IF(0, false) << "don't vlog_if 0";
VLOG_IF(1, true) << "vlog_if 1";
VLOG_IF(1, false) << "don't vlog_if 1";
LOG_IF(INFO, true) << "log_if info";
LOG_IF(INFO, false) << "don't log_if info";
LOG_IF(WARNING, true) << "log_if warning";
LOG_IF(WARNING, false) << "don't log_if warning";
LOG_IF(ERROR, true) << "log_if error";
LOG_IF(ERROR, false) << "don't log_if error";
int c;
c = 1;
VLOG_IF(100, c -= 2) << "vlog_if 100 expr";
EXPECT_EQ(c, -1);
c = 1;
VLOG_IF(0, c -= 2) << "vlog_if 0 expr";
EXPECT_EQ(c, -1);
c = 1;
LOG_IF(INFO, c -= 2) << "log_if info expr";
EXPECT_EQ(c, -1);
c = 1;
LOG_IF(ERROR, c -= 2) << "log_if error expr";
EXPECT_EQ(c, -1);
c = 2;
VLOG_IF(0, c -= 2) << "don't vlog_if 0 expr";
EXPECT_EQ(c, 0);
c = 2;
LOG_IF(ERROR, c -= 2) << "don't log_if error expr";
EXPECT_EQ(c, 0);
c = 3;
LOG_IF_EVERY_N(INFO, c -= 4, 1) << "log_if info every 1 expr";
EXPECT_EQ(c, -1);
c = 3;
LOG_IF_EVERY_N(ERROR, c -= 4, 1) << "log_if error every 1 expr";
EXPECT_EQ(c, -1);
c = 4;
LOG_IF_EVERY_N(ERROR, c -= 4, 3) << "don't log_if info every 3 expr";
EXPECT_EQ(c, 0);
c = 4;
LOG_IF_EVERY_N(ERROR, c -= 4, 3) << "don't log_if error every 3 expr";
EXPECT_EQ(c, 0);
c = 5;
VLOG_IF_EVERY_N(0, c -= 4, 1) << "vlog_if 0 every 1 expr";
EXPECT_EQ(c, 1);
c = 5;
VLOG_IF_EVERY_N(100, c -= 4, 3) << "vlog_if 100 every 3 expr";
EXPECT_EQ(c, 1);
c = 6;
VLOG_IF_EVERY_N(0, c -= 6, 1) << "don't vlog_if 0 every 1 expr";
EXPECT_EQ(c, 0);
c = 6;
VLOG_IF_EVERY_N(100, c -= 6, 3) << "don't vlog_if 100 every 1 expr";
EXPECT_EQ(c, 0);
}
void TestLoggingLevels() {
LogWithLevels(0, GLOG_INFO, false, false);
LogWithLevels(1, GLOG_INFO, false, false);
LogWithLevels(-1, GLOG_INFO, false, false);
LogWithLevels(0, GLOG_WARNING, false, false);
LogWithLevels(0, GLOG_ERROR, false, false);
LogWithLevels(0, GLOG_FATAL, false, false);
LogWithLevels(0, GLOG_FATAL, true, false);
LogWithLevels(0, GLOG_FATAL, false, true);
LogWithLevels(1, GLOG_WARNING, false, false);
LogWithLevels(1, GLOG_FATAL, false, true);
}
int TestVlogHelper() {
if (VLOG_IS_ON(1)) {
return 1;
}
return 0;
}
void TestVLogModule() {
int c = TestVlogHelper();
EXPECT_EQ(0, c);
#if defined(__GNUC__)
EXPECT_EQ(0, SetVLOGLevel("logging_unittest", 1));
c = TestVlogHelper();
EXPECT_EQ(1, c);
#endif
}
TEST(DeathRawCHECK, logging) {
ASSERT_DEATH(RAW_CHECK(false, "failure 1"),
"RAW: Check false failed: failure 1");
ASSERT_DEBUG_DEATH(RAW_DCHECK(1 == 2, "failure 2"),
"RAW: Check 1 == 2 failed: failure 2");
}
void TestLogString() {
vector<string> errors;
vector<string>* no_errors = nullptr;
LOG_STRING(INFO, &errors) << "LOG_STRING: "
<< "collected info";
LOG_STRING(WARNING, &errors) << "LOG_STRING: "
<< "collected warning";
LOG_STRING(ERROR, &errors) << "LOG_STRING: "
<< "collected error";
LOG_STRING(INFO, no_errors) << "LOG_STRING: "
<< "reported info";
LOG_STRING(WARNING, no_errors) << "LOG_STRING: "
<< "reported warning";
LOG_STRING(ERROR, nullptr) << "LOG_STRING: "
<< "reported error";
for (auto& error : errors) {
LOG(INFO) << "Captured by LOG_STRING: " << error;
}
}
void TestLogToString() {
string error;
string* no_error = nullptr;
LOG_TO_STRING(INFO, &error) << "LOG_TO_STRING: "
<< "collected info";
LOG(INFO) << "Captured by LOG_TO_STRING: " << error;
LOG_TO_STRING(WARNING, &error) << "LOG_TO_STRING: "
<< "collected warning";
LOG(INFO) << "Captured by LOG_TO_STRING: " << error;
LOG_TO_STRING(ERROR, &error) << "LOG_TO_STRING: "
<< "collected error";
LOG(INFO) << "Captured by LOG_TO_STRING: " << error;
LOG_TO_STRING(INFO, no_error) << "LOG_TO_STRING: "
<< "reported info";
LOG_TO_STRING(WARNING, no_error) << "LOG_TO_STRING: "
<< "reported warning";
LOG_TO_STRING(ERROR, nullptr) << "LOG_TO_STRING: "
<< "reported error";
}
class TestLogSinkImpl : public LogSink {
public:
vector<string> errors;
void send(LogSeverity severity, const char* ,
const char* base_filename, int line,
const LogMessageTime& logmsgtime, const char* message,
size_t message_len) override {
errors.push_back(ToString(severity, base_filename, line, logmsgtime,
message, message_len));
}
};
void TestLogSink() {
TestLogSinkImpl sink;
LogSink* no_sink = nullptr;
LOG_TO_SINK(&sink, INFO) << "LOG_TO_SINK: "
<< "collected info";
LOG_TO_SINK(&sink, WARNING) << "LOG_TO_SINK: "
<< "collected warning";
LOG_TO_SINK(&sink, ERROR) << "LOG_TO_SINK: "
<< "collected error";
LOG_TO_SINK(no_sink, INFO) << "LOG_TO_SINK: "
<< "reported info";
LOG_TO_SINK(no_sink, WARNING) << "LOG_TO_SINK: "
<< "reported warning";
LOG_TO_SINK(nullptr, ERROR) << "LOG_TO_SINK: "
<< "reported error";
LOG_TO_SINK_BUT_NOT_TO_LOGFILE(&sink, INFO)
<< "LOG_TO_SINK_BUT_NOT_TO_LOGFILE: "
<< "collected info";
LOG_TO_SINK_BUT_NOT_TO_LOGFILE(&sink, WARNING)
<< "LOG_TO_SINK_BUT_NOT_TO_LOGFILE: "
<< "collected warning";
LOG_TO_SINK_BUT_NOT_TO_LOGFILE(&sink, ERROR)
<< "LOG_TO_SINK_BUT_NOT_TO_LOGFILE: "
<< "collected error";
LOG_TO_SINK_BUT_NOT_TO_LOGFILE(no_sink, INFO)
<< "LOG_TO_SINK_BUT_NOT_TO_LOGFILE: "
<< "thrashed info";
LOG_TO_SINK_BUT_NOT_TO_LOGFILE(no_sink, WARNING)
<< "LOG_TO_SINK_BUT_NOT_TO_LOGFILE: "
<< "thrashed warning";
LOG_TO_SINK_BUT_NOT_TO_LOGFILE(nullptr, ERROR)
<< "LOG_TO_SINK_BUT_NOT_TO_LOGFILE: "
<< "thrashed error";
LOG(INFO) << "Captured by LOG_TO_SINK:";
for (auto& error : sink.errors) {
LogMessage("foo", LogMessage::kNoLogPrefix, GLOG_INFO).stream() << error;
}
}
enum { CASE_A, CASE_B };
void TestCHECK() {
CHECK(1 == 1);
CHECK_EQ(1, 1);
CHECK_NE(1, 2);
CHECK_GE(1, 1);
CHECK_GE(2, 1);
CHECK_LE(1, 1);
CHECK_LE(1, 2);
CHECK_GT(2, 1);
CHECK_LT(1, 2);
#if !defined(GLOG_OS_MACOSX)
CHECK_EQ(CASE_A, CASE_A);
CHECK_NE(CASE_A, CASE_B);
CHECK_GE(CASE_A, CASE_A);
CHECK_GE(CASE_B, CASE_A);
CHECK_LE(CASE_A, CASE_A);
CHECK_LE(CASE_A, CASE_B);
CHECK_GT(CASE_B, CASE_A);
CHECK_LT(CASE_A, CASE_B);
#endif
}
void TestDCHECK() {
#if defined(NDEBUG)
DCHECK(1 == 2) << " DCHECK's shouldn't be compiled in normal mode";
#endif
DCHECK(1 == 1);
DCHECK_EQ(1, 1);
DCHECK_NE(1, 2);
DCHECK_GE(1, 1);
DCHECK_GE(2, 1);
DCHECK_LE(1, 1);
DCHECK_LE(1, 2);
DCHECK_GT(2, 1);
DCHECK_LT(1, 2);
auto* orig_ptr = new int64;
int64* ptr = DCHECK_NOTNULL(orig_ptr);
CHECK_EQ(ptr, orig_ptr);
delete orig_ptr;
}
void TestSTREQ() {
CHECK_STREQ("this", "this");
CHECK_STREQ(nullptr, nullptr);
CHECK_STRCASEEQ("this", "tHiS");
CHECK_STRCASEEQ(nullptr, nullptr);
CHECK_STRNE("this", "tHiS");
CHECK_STRNE("this", nullptr);
CHECK_STRCASENE("this", "that");
CHECK_STRCASENE(nullptr, "that");
CHECK_STREQ((string("a") + "b").c_str(), "ab");
CHECK_STREQ(string("test").c_str(), (string("te") + string("st")).c_str());
}
TEST(DeathSTREQ, logging) {
ASSERT_DEATH(CHECK_STREQ(nullptr, "this"), "");
ASSERT_DEATH(CHECK_STREQ("this", "siht"), "");
ASSERT_DEATH(CHECK_STRCASEEQ(nullptr, "siht"), "");
ASSERT_DEATH(CHECK_STRCASEEQ("this", "siht"), "");
ASSERT_DEATH(CHECK_STRNE(nullptr, nullptr), "");
ASSERT_DEATH(CHECK_STRNE("this", "this"), "");
ASSERT_DEATH(CHECK_STREQ((string("a") + "b").c_str(), "abc"), "");
}
TEST(CheckNOTNULL, Simple) {
int64 t;
void* ptr = static_cast<void*>(&t);
void* ref = CHECK_NOTNULL(ptr);
EXPECT_EQ(ptr, ref);
CHECK_NOTNULL(reinterpret_cast<char*>(ptr));
CHECK_NOTNULL(reinterpret_cast<unsigned char*>(ptr));
CHECK_NOTNULL(reinterpret_cast<int*>(ptr));
CHECK_NOTNULL(reinterpret_cast<int64*>(ptr));
}
TEST(DeathCheckNN, Simple) {
ASSERT_DEATH(CHECK_NOTNULL(static_cast<void*>(nullptr)), "");
}
static void GetFiles(const string& pattern, vector<string>* files) {
files->clear();
#if defined(HAVE_GLOB_H)
glob_t g;
const int r = glob(pattern.c_str(), 0, nullptr, &g);
CHECK((r == 0) || (r == GLOB_NOMATCH)) << ": error matching " << pattern;
for (size_t i = 0; i < g.gl_pathc; i++) {
files->push_back(string(g.gl_pathv[i]));
}
globfree(&g);
#elif defined(GLOG_OS_WINDOWS)
WIN32_FIND_DATAA data;
HANDLE handle = FindFirstFileA(pattern.c_str(), &data);
size_t index = pattern.rfind('\\');
if (index == string::npos) {
LOG(FATAL) << "No directory separator.";
}
const string dirname = pattern.substr(0, index + 1);
if (handle == INVALID_HANDLE_VALUE) {
return;
}
do {
files->push_back(dirname + data.cFileName);
} while (FindNextFileA(handle, &data));
if (!FindClose(handle)) {
LOG_SYSRESULT(GetLastError());
}
#else
# error There is no way to do glob.
#endif
}
static void DeleteFiles(const string& pattern) {
vector<string> files;
GetFiles(pattern, &files);
for (auto& file : files) {
CHECK(unlink(file.c_str()) == 0) << ": " << strerror(errno);
}
}
static void CheckFile(const string& name, const string& expected_string,
const bool checkInFileOrNot = true) {
vector<string> files;
GetFiles(name + "*", &files);
CHECK_EQ(files.size(), 1UL);
std::unique_ptr<std::FILE> file{fopen(files[0].c_str(), "r")};
CHECK(file != nullptr) << ": could not open " << files[0];
char buf[1000];
while (fgets(buf, sizeof(buf), file.get()) != nullptr) {
char* first = strstr(buf, expected_string.c_str());
if (checkInFileOrNot != (first == nullptr)) {
return;
}
}
LOG(FATAL) << "Did " << (checkInFileOrNot ? "not " : "") << "find "
<< expected_string << " in " << files[0];
}
static void TestBasename() {
fprintf(stderr, "==== Test setting log file basename\n");
const string dest = FLAGS_test_tmpdir + "/logging_test_basename";
DeleteFiles(dest + "*");
SetLogDestination(GLOG_INFO, dest.c_str());
LOG(INFO) << "message to new base";
FlushLogFiles(GLOG_INFO);
CheckFile(dest, "message to new base");
LogToStderr();
DeleteFiles(dest + "*");
}
static void TestBasenameAppendWhenNoTimestamp() {
fprintf(stderr,
"==== Test setting log file basename without timestamp and appending "
"properly\n");
const string dest =
FLAGS_test_tmpdir + "/logging_test_basename_append_when_no_timestamp";
DeleteFiles(dest + "*");
ofstream out(dest.c_str());
out << "test preexisting content" << endl;
out.close();
CheckFile(dest, "test preexisting content");
FLAGS_timestamp_in_logfile_name = false;
SetLogDestination(GLOG_INFO, dest.c_str());
LOG(INFO) << "message to new base, appending to preexisting file";
FlushLogFiles(GLOG_INFO);
FLAGS_timestamp_in_logfile_name = true;
CheckFile(dest, "test preexisting content");
CheckFile(dest, "message to new base, appending to preexisting file");
LogToStderr();
DeleteFiles(dest + "*");
}
static void TestTwoProcessesWrite() {
#if defined(HAVE_SYS_WAIT_H) && defined(HAVE_UNISTD_H) && defined(HAVE_FCNTL)
fprintf(stderr,
"==== Test setting log file basename and two processes writing - "
"second should fail\n");
const string dest =
FLAGS_test_tmpdir + "/logging_test_basename_two_processes_writing";
DeleteFiles(dest + "*");
FLAGS_timestamp_in_logfile_name = false;
SetLogDestination(GLOG_INFO, dest.c_str());
LOG(INFO) << "message to new base, parent";
FlushLogFiles(GLOG_INFO);
pid_t pid = fork();
CHECK_ERR(pid);
if (pid == 0) {
LOG(INFO) << "message to new base, child - should only appear on STDERR "
"not on the file";
ShutdownGoogleLogging();
exit(EXIT_SUCCESS);
} else if (pid > 0) {
wait(nullptr);
}
FLAGS_timestamp_in_logfile_name = true;
CheckFile(dest, "message to new base, parent");
CheckFile(dest,
"message to new base, child - should only appear on STDERR not on "
"the file",
false);
LogToStderr();
DeleteFiles(dest + "*");
#endif
}
static void TestSymlink() {
#ifndef GLOG_OS_WINDOWS
fprintf(stderr, "==== Test setting log file symlink\n");
string dest = FLAGS_test_tmpdir + "/logging_test_symlink";
string sym = FLAGS_test_tmpdir + "/symlinkbase";
DeleteFiles(dest + "*");
DeleteFiles(sym + "*");
SetLogSymlink(GLOG_INFO, "symlinkbase");
SetLogDestination(GLOG_INFO, dest.c_str());
LOG(INFO) << "message to new symlink";
FlushLogFiles(GLOG_INFO);
CheckFile(sym, "message to new symlink");
DeleteFiles(dest + "*");
DeleteFiles(sym + "*");
#endif
}
static void TestExtension() {
fprintf(stderr, "==== Test setting log file extension\n");
string dest = FLAGS_test_tmpdir + "/logging_test_extension";
DeleteFiles(dest + "*");
SetLogDestination(GLOG_INFO, dest.c_str());
SetLogFilenameExtension("specialextension");
LOG(INFO) << "message to new extension";
FlushLogFiles(GLOG_INFO);
CheckFile(dest, "message to new extension");
vector<string> filenames;
GetFiles(dest + "*", &filenames);
CHECK_EQ(filenames.size(), 1UL);
CHECK(strstr(filenames[0].c_str(), "specialextension") != nullptr);
LogToStderr();
DeleteFiles(dest + "*");
}
struct MyLogger : public base::Logger {
string data;
explicit MyLogger(bool* set_on_destruction)
: set_on_destruction_(set_on_destruction) {}
~MyLogger() override { *set_on_destruction_ = true; }
void Write(bool ,
const std::chrono::system_clock::time_point& ,
const char* message, size_t length) override {
data.append(message, length);
}
void Flush() override {}
uint32 LogSize() override { return static_cast<uint32>(data.length()); }
private:
bool* set_on_destruction_;
};
static void TestWrapper() {
fprintf(stderr, "==== Test log wrapper\n");
bool custom_logger_deleted = false;
auto* my_logger = new MyLogger(&custom_logger_deleted);
base::Logger* old_logger = base::GetLogger(GLOG_INFO);
base::SetLogger(GLOG_INFO, my_logger);
LOG(INFO) << "Send to wrapped logger";
CHECK(strstr(my_logger->data.c_str(), "Send to wrapped logger") != nullptr);
FlushLogFiles(GLOG_INFO);
EXPECT_FALSE(custom_logger_deleted);
base::SetLogger(GLOG_INFO, old_logger);
EXPECT_TRUE(custom_logger_deleted);
}
static void TestErrno() {
fprintf(stderr, "==== Test errno preservation\n");
errno = ENOENT;
TestLogging(false);
CHECK_EQ(errno, ENOENT);
}
static void TestOneTruncate(const char* path, uint64 limit, uint64 keep,
size_t dsize, size_t ksize, size_t expect) {
FileDescriptor fd{open(path, O_RDWR | O_CREAT | O_TRUNC, 0600)};
CHECK_ERR(fd);
const char *discardstr = "DISCARDME!", *keepstr = "KEEPME!";
const size_t discard_size = strlen(discardstr), keep_size = strlen(keepstr);
size_t written = 0;
while (written < dsize) {
size_t bytes = min(dsize - written, discard_size);
CHECK_ERR(write(fd.get(), discardstr, bytes));
written += bytes;
}
written = 0;
while (written < ksize) {
size_t bytes = min(ksize - written, keep_size);
CHECK_ERR(write(fd.get(), keepstr, bytes));
written += bytes;
}
TruncateLogFile(path, limit, keep);
struct stat statbuf;
CHECK_ERR(fstat(fd.get(), &statbuf));
CHECK_EQ(static_cast<size_t>(statbuf.st_size), expect);
CHECK_ERR(lseek(fd.get(), 0, SEEK_SET));
const size_t buf_size = static_cast<size_t>(statbuf.st_size) + 1;
std::vector<char> buf(buf_size);
CHECK_ERR(read(fd.get(), buf.data(), buf_size));
const char* p = buf.data();
size_t checked = 0;
while (checked < expect) {
size_t bytes = min(expect - checked, keep_size);
CHECK(!memcmp(p, keepstr, bytes));
checked += bytes;
}
}
static void TestTruncate() {
#ifdef HAVE_UNISTD_H
fprintf(stderr, "==== Test log truncation\n");
string path = FLAGS_test_tmpdir + "/truncatefile";
TestOneTruncate(path.c_str(), 10, 10, 10, 10, 10);
TestOneTruncate(path.c_str(), 2U << 20U, 4U << 10U, 3U << 20U, 4U << 10U,
4U << 10U);
TestOneTruncate(path.c_str(), 10, 20, 0, 20, 20);
TestOneTruncate(path.c_str(), 10, 0, 0, 0, 0);
TestOneTruncate(path.c_str(), 10, 50, 0, 10, 10);
TestOneTruncate(path.c_str(), 50, 100, 0, 30, 30);
# if !defined(GLOG_OS_MACOSX) && !defined(GLOG_OS_WINDOWS)
string linkname = path + ".link";
unlink(linkname.c_str());
CHECK_ERR(symlink(path.c_str(), linkname.c_str()));
TestOneTruncate(linkname.c_str(), 10, 10, 0, 30, 30);
# endif
# if defined(GLOG_OS_LINUX)
int fd;
CHECK_ERR(fd = open(path.c_str(), O_APPEND | O_WRONLY));
char fdpath[64];
std::snprintf(fdpath, sizeof(fdpath), "/proc/self/fd/%d", fd);
TestOneTruncate(fdpath, 10, 10, 10, 10, 10);
# endif
#endif
}
struct RecordDeletionLogger : public base::Logger {
RecordDeletionLogger(bool* set_on_destruction, base::Logger* wrapped_logger)
: set_on_destruction_(set_on_destruction),
wrapped_logger_(wrapped_logger) {
*set_on_destruction_ = false;
}
~RecordDeletionLogger() override { *set_on_destruction_ = true; }
void Write(bool force_flush,
const std::chrono::system_clock::time_point& timestamp,
const char* message, size_t length) override {
wrapped_logger_->Write(force_flush, timestamp, message, length);
}
void Flush() override { wrapped_logger_->Flush(); }
uint32 LogSize() override { return wrapped_logger_->LogSize(); }
private:
bool* set_on_destruction_;
base::Logger* wrapped_logger_;
};
static void TestCustomLoggerDeletionOnShutdown() {
bool custom_logger_deleted = false;
base::SetLogger(GLOG_INFO,
new RecordDeletionLogger(&custom_logger_deleted,
base::GetLogger(GLOG_INFO)));
EXPECT_TRUE(IsGoogleLoggingInitialized());
ShutdownGoogleLogging();
EXPECT_TRUE(custom_logger_deleted);
EXPECT_FALSE(IsGoogleLoggingInitialized());
}
namespace LogTimes {
constexpr int64_t LOG_PERIOD_NS = 10000000;
constexpr int64_t LOG_PERIOD_TOL_NS = 500000;
constexpr size_t MAX_CALLS = 10;
}
struct LogTimeRecorder {
LogTimeRecorder() = default;
size_t m_streamTimes{0};
std::chrono::steady_clock::time_point m_callTimes[LogTimes::MAX_CALLS];
};
std::ostream& operator<<(std::ostream& stream, LogTimeRecorder& t) {
t.m_callTimes[t.m_streamTimes++] = std::chrono::steady_clock::now();
return stream;
}
int64 elapsedTime_ns(const std::chrono::steady_clock::time_point& begin,
const std::chrono::steady_clock::time_point& end) {
return std::chrono::duration_cast<std::chrono::nanoseconds>((end - begin))
.count();
}
static void TestLogPeriodically() {
fprintf(stderr, "==== Test log periodically\n");
LogTimeRecorder timeLogger;
constexpr double LOG_PERIOD_SEC = LogTimes::LOG_PERIOD_NS * 1e-9;
while (timeLogger.m_streamTimes < LogTimes::MAX_CALLS) {
LOG_EVERY_T(INFO, LOG_PERIOD_SEC)
<< timeLogger << "Timed Message #" << timeLogger.m_streamTimes;
}
int64 nsBetweenCalls[LogTimes::MAX_CALLS - 1];
for (size_t i = 1; i < LogTimes::MAX_CALLS; ++i) {
nsBetweenCalls[i - 1] = elapsedTime_ns(timeLogger.m_callTimes[i - 1],
timeLogger.m_callTimes[i]);
}
for (long time_ns : nsBetweenCalls) {
EXPECT_NEAR(time_ns, LogTimes::LOG_PERIOD_NS, LogTimes::LOG_PERIOD_TOL_NS);
}
}
namespace google {
inline namespace glog_internal_namespace_ {
extern bool SafeFNMatch_(const char* pattern, size_t patt_len, const char* str,
size_t str_len);
}
}
static bool WrapSafeFNMatch(string pattern, string str) {
pattern += "abc";
str += "defgh";
return SafeFNMatch_(pattern.data(), pattern.size() - 3, str.data(),
str.size() - 5);
}
TEST(SafeFNMatch, logging) {
CHECK(WrapSafeFNMatch("foo", "foo"));
CHECK(!WrapSafeFNMatch("foo", "bar"));
CHECK(!WrapSafeFNMatch("foo", "fo"));
CHECK(!WrapSafeFNMatch("foo", "foo2"));
CHECK(WrapSafeFNMatch("bar/foo.ext", "bar/foo.ext"));
CHECK(WrapSafeFNMatch("*ba*r/fo*o.ext*", "bar/foo.ext"));
CHECK(!WrapSafeFNMatch("bar/foo.ext", "bar/baz.ext"));
CHECK(!WrapSafeFNMatch("bar/foo.ext", "bar/foo"));
CHECK(!WrapSafeFNMatch("bar/foo.ext", "bar/foo.ext.zip"));
CHECK(WrapSafeFNMatch("ba?,
const char* base_filename, int line,
const LogMessageTime& logmsgtime, const char* message,
size_t message_len) override {
if (tid_ == std::this_thread::get_id()) {
writer_.Buffer(ToString(severity, base_filename, line, logmsgtime,
message, message_len));
}
}
void WaitTillSent() override {
if (tid_ == std::this_thread::get_id()) writer_.Wait();
}
private:
std::thread::id tid_;
TestLogSinkWriter writer_;
};
static void TestLogSinkWaitTillSent() {
global_messages.clear();
{
using namespace std::chrono_literals;
TestWaitingLogSink sink;
LOG(INFO) << "Message 1";
std::this_thread::sleep_for(60ms);
LOG(ERROR) << "Message 2";
std::this_thread::sleep_for(60ms);
LOG(WARNING) << "Message 3";
std::this_thread::sleep_for(60ms);
}
for (auto& global_message : global_messages) {
LOG(INFO) << "Sink capture: " << global_message;
}
CHECK_EQ(global_messages.size(), 3UL);
}
TEST(Strerror, logging) {
int errcode = EINTR;
std::string msg = strerror(errcode);
const size_t buf_size = msg.size() + 1;
std::vector<char> buf(buf_size);
CHECK_EQ(posix_strerror_r(errcode, nullptr, 0), -1);
buf[0] = 'A';
CHECK_EQ(posix_strerror_r(errcode, buf.data(), 0), -1);
CHECK_EQ(buf[0], 'A');
CHECK_EQ(posix_strerror_r(errcode, nullptr, buf_size), -1);
#if defined(GLOG_OS_MACOSX) || defined(GLOG_OS_FREEBSD) || \
defined(GLOG_OS_OPENBSD)
CHECK_EQ(posix_strerror_r(errcode, buf.data(), 1), -1);
#else
CHECK_EQ(posix_strerror_r(errcode, buf.data(), 1), 0);
#endif
CHECK_STREQ(buf.data(), "");
CHECK_EQ(posix_strerror_r(errcode, buf.data(), buf_size), 0);
CHECK_STREQ(buf.data(), msg.c_str());
CHECK_EQ(msg, StrError(errcode));
}
#ifdef HAVE_LIB_GMOCK
TEST(DVLog, Basic) {
ScopedMockLog log;
# if defined(NDEBUG)
EXPECT_CALL(log, Log(_, _, _)).Times(0);
# else
EXPECT_CALL(log, Log(GLOG_INFO, __FILE__, "debug log"));
# endif
FLAGS_v = 1;
DVLOG(1) << "debug log";
}
TEST(DVLog, V0) {
ScopedMockLog log;
EXPECT_CALL(log, Log(_, _, _)).Times(0);
FLAGS_v = 0;
DVLOG(1) << "debug log";
}
TEST(LogAtLevel, Basic) {
ScopedMockLog log;
EXPECT_CALL(log, Log(GLOG_WARNING, StrNe(__FILE__), "function version"));
EXPECT_CALL(log, Log(GLOG_INFO, __FILE__, "macro version"));
LogSeverity severity = GLOG_WARNING;
LogAtLevel(severity, "function version");
severity = GLOG_INFO;
LOG_AT_LEVEL(severity) << "macro" << ' ' << "version";
}
TEST(TestExitOnDFatal, ToBeOrNotToBe) {
EXPECT_TRUE(base::internal::GetExitOnDFatal());
base::internal::SetExitOnDFatal(false);
EXPECT_FALSE(base::internal::GetExitOnDFatal());
{
ScopedMockLog log;
const LogSeverity severity =
# if defined(NDEBUG)
GLOG_ERROR;
# else
GLOG_FATAL;
# endif
EXPECT_CALL(log, Log(severity, __FILE__, "This should not be fatal"));
LOG(DFATAL) << "This should not be fatal";
}
base::internal::SetExitOnDFatal(true);
EXPECT_TRUE(base::internal::GetExitOnDFatal());
# ifdef GTEST_HAS_DEATH_TEST
EXPECT_DEBUG_DEATH({ LOG(DFATAL) << "This should be fatal in debug mode"; },
"This should be fatal in debug mode");
# endif
}
# ifdef HAVE_STACKTRACE
static void BacktraceAtHelper() {
LOG(INFO) << "Not me";
LOG(INFO) << "Backtrace me";
}
static int kBacktraceAtLine = __LINE__ - 2;
TEST(LogBacktraceAt, DoesNotBacktraceWhenDisabled) {
StrictMock<ScopedMockLog> log;
FLAGS_log_backtrace_at = "";
EXPECT_CALL(log, Log(_, _, "Backtrace me"));
EXPECT_CALL(log, Log(_, _, "Not me"));
BacktraceAtHelper();
}
TEST(LogBacktraceAt, DoesBacktraceAtRightLineWhenEnabled) {
StrictMock<ScopedMockLog> log;
char where[100];
std::snprintf(where, 100, "%s:%d", const_basename(__FILE__),
kBacktraceAtLine);
FLAGS_log_backtrace_at = where;
EXPECT_CALL(
log, Log(_, _,
AllOf(HasSubstr("stacktrace:"), HasSubstr("BacktraceAtHelper"),
HasSubstr("main"), HasSubstr("Backtrace me"))));
EXPECT_CALL(log, Log(_, _, "Not me"));
BacktraceAtHelper();
}
# endif
#endif
struct UserDefinedClass {
bool operator==(const UserDefinedClass&) const { return true; }
};
inline ostream& operator<<(ostream& out, const UserDefinedClass&) {
out << "OK";
return out;
}
TEST(UserDefinedClass, logging) {
UserDefinedClass u;
vector<string> buf;
LOG_STRING(INFO, &buf) << u;
CHECK_EQ(1UL, buf.size());
CHECK(buf[0].find("OK") != string::npos);
CHECK_EQ(u, u);
}
TEST(LogMsgTime, gmtoff) {
google::LogMessage log_obj(__FILE__, __LINE__);
std::chrono::seconds gmtoff = log_obj.time().gmtoffset();
using namespace std::chrono_literals;
constexpr std::chrono::hours utc_min_offset = -12h;
constexpr std::chrono::hours utc_max_offset = +14h;
EXPECT_TRUE((gmtoff >= utc_min_offset) && (gmtoff <= utc_max_offset));
}
TEST(EmailLogging, ValidAddress) {
FlagSaver saver;
FLAGS_logmailer = "/usr/bin/true";
EXPECT_TRUE(
SendEmail("[email protected]", "Example subject", "Example body"));
}
TEST(EmailLogging, MultipleAddresses) {
FlagSaver saver;
FLAGS_logmailer = "/usr/bin/true";
EXPECT_TRUE(SendEmail("[email protected],[email protected]", "Example subject",
"Example body"));
}
TEST(EmailLogging, InvalidAddress) {
FlagSaver saver;
FLAGS_logmailer = "/usr/bin/true";
EXPECT_FALSE(SendEmail("hello world@foo", "Example subject", "Example body"));
}
TEST(EmailLogging, MaliciousAddress) {
FlagSaver saver;
FLAGS_logmailer = "/usr/bin/true";
EXPECT_FALSE(
SendEmail("!/bin/[email protected]", "Example subject", "Example body"));
}
TEST(Logging, FatalThrow) {
auto const fail_func =
InstallFailureFunction(+[]()
#if defined(__has_attribute)
# if __has_attribute(noreturn)
__attribute__((noreturn))
# endif
#endif
{ throw std::logic_error{"fail"}; });
auto restore_fail = [fail_func] { InstallFailureFunction(fail_func); };
ScopedExit<decltype(restore_fail)> restore{restore_fail};
EXPECT_THROW({ LOG(FATAL) << "must throw to fail"; }, std::logic_error);
} | https://github.com/google/glog/blob/de309c08c05382fee0792380de7df1bd65332da2/src/logging.cc | https://github.com/google/glog/blob/de309c08c05382fee0792380de7df1bd65332da2/src/logging_unittest.cc | de309c08c05382fee0792380de7df1bd65332da2 |
4319d63b-c78f-484c-8fe8-e6acfcb63ac0 | cpp | google/glog | demangle | src/demangle.cc | src/demangle_unittest.cc | #include "demangle.h"
#include <algorithm>
#include <cstdlib>
#include <limits>
#include "utilities.h"
#if defined(HAVE___CXA_DEMANGLE)
# include <cxxabi.h>
#endif
#if defined(GLOG_OS_WINDOWS)
# include <dbghelp.h>
#endif
namespace google {
inline namespace glog_internal_namespace_ {
#if !defined(GLOG_OS_WINDOWS) && !defined(HAVE___CXA_DEMANGLE)
namespace {
struct AbbrevPair {
const char* const abbrev;
const char* const real_name;
};
const AbbrevPair kOperatorList[] = {
{"nw", "new"}, {"na", "new[]"}, {"dl", "delete"}, {"da", "delete[]"},
{"ps", "+"}, {"ng", "-"}, {"ad", "&"}, {"de", "*"},
{"co", "~"}, {"pl", "+"}, {"mi", "-"}, {"ml", "*"},
{"dv", "/"}, {"rm", "%"}, {"an", "&"}, {"or", "|"},
{"eo", "^"}, {"aS", "="}, {"pL", "+="}, {"mI", "-="},
{"mL", "*="}, {"dV", "/="}, {"rM", "%="}, {"aN", "&="},
{"oR", "|="}, {"eO", "^="}, {"ls", "<<"}, {"rs", ">>"},
{"lS", "<<="}, {"rS", ">>="}, {"eq", "=="}, {"ne", "!="},
{"lt", "<"}, {"gt", ">"}, {"le", "<="}, {"ge", ">="},
{"nt", "!"}, {"aa", "&&"}, {"oo", "||"}, {"pp", "++"},
{"mm", "--"}, {"cm", ","}, {"pm", "->*"}, {"pt", "->"},
{"cl", "()"}, {"ix", "[]"}, {"qu", "?"}, {"st", "sizeof"},
{"sz", "sizeof"}, {nullptr, nullptr},
};
const AbbrevPair kBuiltinTypeList[] = {
{"v", "void"}, {"w", "wchar_t"},
{"b", "bool"}, {"c", "char"},
{"a", "signed char"}, {"h", "unsigned char"},
{"s", "short"}, {"t", "unsigned short"},
{"i", "int"}, {"j", "unsigned int"},
{"l", "long"}, {"m", "unsigned long"},
{"x", "long long"}, {"y", "unsigned long long"},
{"n", "__int128"}, {"o", "unsigned __int128"},
{"f", "float"}, {"d", "double"},
{"e", "long double"}, {"g", "__float128"},
{"z", "ellipsis"}, {"Dn", "decltype(nullptr)"},
{nullptr, nullptr}};
const AbbrevPair kSubstitutionList[] = {
{"St", ""},
{"Sa", "allocator"},
{"Sb", "basic_string"},
{"Ss", "string"},
{"Si", "istream"},
{"So", "ostream"},
{"Sd", "iostream"},
{nullptr, nullptr}};
struct State {
const char* mangled_cur;
char* out_cur;
const char* out_begin;
const char* out_end;
const char* prev_name;
ssize_t prev_name_length;
short nest_level;
bool append;
bool overflowed;
uint32 local_level;
uint32 expr_level;
uint32 arg_level;
};
size_t StrLen(const char* str) {
size_t len = 0;
while (*str != '\0') {
++str;
++len;
}
return len;
}
bool AtLeastNumCharsRemaining(const char* str, ssize_t n) {
for (ssize_t i = 0; i < n; ++i) {
if (str[i] == '\0') {
return false;
}
}
return true;
}
bool StrPrefix(const char* str, const char* prefix) {
size_t i = 0;
while (str[i] != '\0' && prefix[i] != '\0' && str[i] == prefix[i]) {
++i;
}
return prefix[i] == '\0';
}
void InitState(State* state, const char* mangled, char* out, size_t out_size) {
state->mangled_cur = mangled;
state->out_cur = out;
state->out_begin = out;
state->out_end = out + out_size;
state->prev_name = nullptr;
state->prev_name_length = -1;
state->nest_level = -1;
state->append = true;
state->overflowed = false;
state->local_level = 0;
state->expr_level = 0;
state->arg_level = 0;
}
bool ParseOneCharToken(State* state, const char one_char_token) {
if (state->mangled_cur[0] == one_char_token) {
++state->mangled_cur;
return true;
}
return false;
}
bool ParseTwoCharToken(State* state, const char* two_char_token) {
if (state->mangled_cur[0] == two_char_token[0] &&
state->mangled_cur[1] == two_char_token[1]) {
state->mangled_cur += 2;
return true;
}
return false;
}
bool ParseCharClass(State* state, const char* char_class) {
const char* p = char_class;
for (; *p != '\0'; ++p) {
if (state->mangled_cur[0] == *p) {
++state->mangled_cur;
return true;
}
}
return false;
}
bool Optional(bool) { return true; }
using ParseFunc = bool (*)(State*);
bool OneOrMore(ParseFunc parse_func, State* state) {
if (parse_func(state)) {
while (parse_func(state)) {
}
return true;
}
return false;
}
bool ZeroOrMore(ParseFunc parse_func, State* state) {
while (parse_func(state)) {
}
return true;
}
void Append(State* state, const char* const str, ssize_t length) {
if (state->out_cur == nullptr) {
state->overflowed = true;
return;
}
for (ssize_t i = 0; i < length; ++i) {
if (state->out_cur + 1 < state->out_end) {
*state->out_cur = str[i];
++state->out_cur;
} else {
state->overflowed = true;
break;
}
}
if (!state->overflowed) {
*state->out_cur = '\0';
}
}
bool IsLower(char c) { return c >= 'a' && c <= 'z'; }
bool IsAlpha(char c) {
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z');
}
bool IsDigit(char c) { return c >= '0' && c <= '9'; }
bool IsFunctionCloneSuffix(const char* str) {
size_t i = 0;
while (str[i] != '\0') {
if (str[i] != '.' || !IsAlpha(str[i + 1])) {
return false;
}
i += 2;
while (IsAlpha(str[i])) {
++i;
}
if (str[i] != '.' || !IsDigit(str[i + 1])) {
return false;
}
i += 2;
while (IsDigit(str[i])) {
++i;
}
}
return true;
}
void MaybeAppendWithLength(State* state, const char* const str,
ssize_t length) {
if (state->append && length > 0) {
if (str[0] == '<' && state->out_begin < state->out_cur &&
state->out_cur[-1] == '<') {
Append(state, " ", 1);
}
if (IsAlpha(str[0]) || str[0] == '_') {
state->prev_name = state->out_cur;
state->prev_name_length = length;
}
Append(state, str, length);
}
}
bool MaybeAppend(State* state, const char* const str) {
if (state->append) {
size_t length = StrLen(str);
MaybeAppendWithLength(state, str, static_cast<ssize_t>(length));
}
return true;
}
bool EnterNestedName(State* state) {
state->nest_level = 0;
return true;
}
bool LeaveNestedName(State* state, short prev_value) {
state->nest_level = prev_value;
return true;
}
bool DisableAppend(State* state) {
state->append = false;
return true;
}
bool RestoreAppend(State* state, bool prev_value) {
state->append = prev_value;
return true;
}
void MaybeIncreaseNestLevel(State* state) {
if (state->nest_level > -1) {
++state->nest_level;
}
}
void MaybeAppendSeparator(State* state) {
if (state->nest_level >= 1) {
MaybeAppend(state, "::");
}
}
void MaybeCancelLastSeparator(State* state) {
if (state->nest_level >= 1 && state->append &&
state->out_begin <= state->out_cur - 2) {
state->out_cur -= 2;
*state->out_cur = '\0';
}
}
bool IdentifierIsAnonymousNamespace(State* state, ssize_t length) {
const char anon_prefix[] = "_GLOBAL__N_";
return (length > static_cast<ssize_t>(sizeof(anon_prefix)) -
1 &&
StrPrefix(state->mangled_cur, anon_prefix));
}
bool ParseMangledName(State* state);
bool ParseEncoding(State* state);
bool ParseName(State* state);
bool ParseUnscopedName(State* state);
bool ParseUnscopedTemplateName(State* state);
bool ParseNestedName(State* state);
bool ParsePrefix(State* state);
bool ParseUnqualifiedName(State* state);
bool ParseSourceName(State* state);
bool ParseLocalSourceName(State* state);
bool ParseNumber(State* state, int* number_out);
bool ParseFloatNumber(State* state);
bool ParseSeqId(State* state);
bool ParseIdentifier(State* state, ssize_t length);
bool ParseAbiTags(State* state);
bool ParseAbiTag(State* state);
bool ParseOperatorName(State* state);
bool ParseSpecialName(State* state);
bool ParseCallOffset(State* state);
bool ParseNVOffset(State* state);
bool ParseVOffset(State* state);
bool ParseCtorDtorName(State* state);
bool ParseType(State* state);
bool ParseCVQualifiers(State* state);
bool ParseBuiltinType(State* state);
bool ParseFunctionType(State* state);
bool ParseBareFunctionType(State* state);
bool ParseClassEnumType(State* state);
bool ParseArrayType(State* state);
bool ParsePointerToMemberType(State* state);
bool ParseTemplateParam(State* state);
bool ParseTemplateTemplateParam(State* state);
bool ParseTemplateArgs(State* state);
bool ParseTemplateArg(State* state);
bool ParseExpression(State* state);
bool ParseExprPrimary(State* state);
bool ParseLocalName(State* state);
bool ParseDiscriminator(State* state);
bool ParseSubstitution(State* state);
bool ParseMangledName(State* state) {
return ParseTwoCharToken(state, "_Z") && ParseEncoding(state);
}
bool ParseEncoding(State* state) {
State copy = *state;
if (ParseName(state) && ParseBareFunctionType(state)) {
return true;
}
*state = copy;
if (ParseName(state) || ParseSpecialName(state)) {
return true;
}
return false;
}
bool ParseName(State* state) {
if (ParseNestedName(state) || ParseLocalName(state)) {
return true;
}
State copy = *state;
if (ParseUnscopedTemplateName(state) && ParseTemplateArgs(state)) {
return true;
}
*state = copy;
if (ParseUnscopedName(state)) {
return true;
}
return false;
}
bool ParseUnscopedName(State* state) {
if (ParseUnqualifiedName(state)) {
return true;
}
State copy = *state;
if (ParseTwoCharToken(state, "St") && MaybeAppend(state, "std::") &&
ParseUnqualifiedName(state)) {
return true;
}
*state = copy;
return false;
}
bool ParseUnscopedTemplateName(State* state) {
return ParseUnscopedName(state) || ParseSubstitution(state);
}
bool ParseNestedName(State* state) {
State copy = *state;
if (ParseOneCharToken(state, 'N') && EnterNestedName(state) &&
Optional(ParseCVQualifiers(state)) && ParsePrefix(state) &&
LeaveNestedName(state, copy.nest_level) &&
ParseOneCharToken(state, 'E')) {
return true;
}
*state = copy;
return false;
}
bool ParsePrefix(State* state) {
bool has_something = false;
while (true) {
MaybeAppendSeparator(state);
if (ParseTemplateParam(state) || ParseSubstitution(state) ||
ParseUnscopedName(state)) {
has_something = true;
MaybeIncreaseNestLevel(state);
continue;
}
MaybeCancelLastSeparator(state);
if (has_something && ParseTemplateArgs(state)) {
return ParsePrefix(state);
} else {
break;
}
}
return true;
}
bool ParseUnqualifiedName(State* state) {
return (ParseOperatorName(state) || ParseCtorDtorName(state) ||
(ParseSourceName(state) && Optional(ParseAbiTags(state))) ||
(ParseLocalSourceName(state) && Optional(ParseAbiTags(state))));
}
bool ParseSourceName(State* state) {
State copy = *state;
int length = -1;
if (ParseNumber(state, &length) && ParseIdentifier(state, length)) {
return true;
}
*state = copy;
return false;
}
bool ParseLocalSourceName(State* state) {
State copy = *state;
if (ParseOneCharToken(state, 'L') && ParseSourceName(state) &&
Optional(ParseDiscriminator(state))) {
return true;
}
*state = copy;
return false;
}
bool ParseNumber(State* state, int* number_out) {
int sign = 1;
if (ParseOneCharToken(state, 'n')) {
sign = -1;
}
const char* p = state->mangled_cur;
int number = 0;
constexpr int int_max_by_10 = std::numeric_limits<int>::max() / 10;
for (; *p != '\0'; ++p) {
if (IsDigit(*p)) {
if (number > int_max_by_10) {
return false;
}
const int digit = *p - '0';
const int shifted = number * 10;
if (digit > std::numeric_limits<int>::max() - shifted) {
return false;
}
number = shifted + digit;
} else {
break;
}
}
if (p != state->mangled_cur) {
state->mangled_cur = p;
if (number_out != nullptr) {
*number_out = number * sign;
}
return true;
}
return false;
}
bool ParseFloatNumber(State* state) {
const char* p = state->mangled_cur;
for (; *p != '\0'; ++p) {
if (!IsDigit(*p) && !(*p >= 'a' && *p <= 'f')) {
break;
}
}
if (p != state->mangled_cur) {
state->mangled_cur = p;
return true;
}
return false;
}
bool ParseSeqId(State* state) {
const char* p = state->mangled_cur;
for (; *p != '\0'; ++p) {
if (!IsDigit(*p) && !(*p >= 'A' && *p <= 'Z')) {
break;
}
}
if (p != state->mangled_cur) {
state->mangled_cur = p;
return true;
}
return false;
}
bool ParseIdentifier(State* state, ssize_t length) {
if (length == -1 || !AtLeastNumCharsRemaining(state->mangled_cur, length)) {
return false;
}
if (IdentifierIsAnonymousNamespace(state, length)) {
MaybeAppend(state, "(anonymous namespace)");
} else {
MaybeAppendWithLength(state, state->mangled_cur, length);
}
if (length < 0 ||
static_cast<std::size_t>(length) > StrLen(state->mangled_cur)) {
return false;
}
state->mangled_cur += length;
return true;
}
bool ParseAbiTags(State* state) {
State copy = *state;
DisableAppend(state);
if (OneOrMore(ParseAbiTag, state)) {
RestoreAppend(state, copy.append);
return true;
}
*state = copy;
return false;
}
bool ParseAbiTag(State* state) {
return ParseOneCharToken(state, 'B') && ParseSourceName(state);
}
bool ParseOperatorName(State* state) {
if (!AtLeastNumCharsRemaining(state->mangled_cur, 2)) {
return false;
}
State copy = *state;
if (ParseTwoCharToken(state, "cv") && MaybeAppend(state, "operator ") &&
EnterNestedName(state) && ParseType(state) &&
LeaveNestedName(state, copy.nest_level)) {
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'v') && ParseCharClass(state, "0123456789") &&
ParseSourceName(state)) {
return true;
}
*state = copy;
if (!(IsLower(state->mangled_cur[0]) && IsAlpha(state->mangled_cur[1]))) {
return false;
}
const AbbrevPair* p;
for (p = kOperatorList; p->abbrev != nullptr; ++p) {
if (state->mangled_cur[0] == p->abbrev[0] &&
state->mangled_cur[1] == p->abbrev[1]) {
MaybeAppend(state, "operator");
if (IsLower(*p->real_name)) {
MaybeAppend(state, " ");
}
MaybeAppend(state, p->real_name);
state->mangled_cur += 2;
return true;
}
}
return false;
}
bool ParseSpecialName(State* state) {
State copy = *state;
if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "VTIS") &&
ParseType(state)) {
return true;
}
*state = copy;
if (ParseTwoCharToken(state, "Tc") && ParseCallOffset(state) &&
ParseCallOffset(state) && ParseEncoding(state)) {
return true;
}
*state = copy;
if (ParseTwoCharToken(state, "GV") && ParseName(state)) {
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'T') && ParseCallOffset(state) &&
ParseEncoding(state)) {
return true;
}
*state = copy;
if (ParseTwoCharToken(state, "TC") && ParseType(state) &&
ParseNumber(state, nullptr) && ParseOneCharToken(state, '_') &&
DisableAppend(state) && ParseType(state)) {
RestoreAppend(state, copy.append);
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "FJ") &&
ParseType(state)) {
return true;
}
*state = copy;
if (ParseTwoCharToken(state, "GR") && ParseName(state)) {
return true;
}
*state = copy;
if (ParseTwoCharToken(state, "GA") && ParseEncoding(state)) {
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "hv") &&
ParseCallOffset(state) && ParseEncoding(state)) {
return true;
}
*state = copy;
return false;
}
bool ParseCallOffset(State* state) {
State copy = *state;
if (ParseOneCharToken(state, 'h') && ParseNVOffset(state) &&
ParseOneCharToken(state, '_')) {
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'v') && ParseVOffset(state) &&
ParseOneCharToken(state, '_')) {
return true;
}
*state = copy;
return false;
}
bool ParseNVOffset(State* state) { return ParseNumber(state, nullptr); }
bool ParseVOffset(State* state) {
State copy = *state;
if (ParseNumber(state, nullptr) && ParseOneCharToken(state, '_') &&
ParseNumber(state, nullptr)) {
return true;
}
*state = copy;
return false;
}
bool ParseCtorDtorName(State* state) {
State copy = *state;
if (ParseOneCharToken(state, 'C') && ParseCharClass(state, "123")) {
const char* const prev_name = state->prev_name;
const ssize_t prev_name_length = state->prev_name_length;
MaybeAppendWithLength(state, prev_name, prev_name_length);
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'D') && ParseCharClass(state, "012")) {
const char* const prev_name = state->prev_name;
const ssize_t prev_name_length = state->prev_name_length;
MaybeAppend(state, "~");
MaybeAppendWithLength(state, prev_name, prev_name_length);
return true;
}
*state = copy;
return false;
}
bool ParseType(State* state) {
State copy = *state;
if (ParseCVQualifiers(state) && ParseType(state)) {
return true;
}
*state = copy;
if (ParseCharClass(state, "OPRCG") && ParseType(state)) {
return true;
}
*state = copy;
if (ParseTwoCharToken(state, "Dp") && ParseType(state)) {
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'D') && ParseCharClass(state, "tT") &&
ParseExpression(state) && ParseOneCharToken(state, 'E')) {
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'U') && ParseSourceName(state) &&
ParseType(state)) {
return true;
}
*state = copy;
if (ParseBuiltinType(state) || ParseFunctionType(state) ||
ParseClassEnumType(state) || ParseArrayType(state) ||
ParsePointerToMemberType(state) || ParseSubstitution(state)) {
return true;
}
if (ParseTemplateTemplateParam(state) && ParseTemplateArgs(state)) {
return true;
}
*state = copy;
if (ParseTemplateParam(state)) {
return true;
}
return false;
}
bool ParseCVQualifiers(State* state) {
int num_cv_qualifiers = 0;
num_cv_qualifiers += ParseOneCharToken(state, 'r');
num_cv_qualifiers += ParseOneCharToken(state, 'V');
num_cv_qualifiers += ParseOneCharToken(state, 'K');
return num_cv_qualifiers > 0;
}
bool ParseBuiltinType(State* state) {
const AbbrevPair* p;
for (p = kBuiltinTypeList; p->abbrev != nullptr; ++p) {
if (state->mangled_cur[0] == p->abbrev[0]) {
MaybeAppend(state, p->real_name);
++state->mangled_cur;
return true;
}
}
State copy = *state;
if (ParseOneCharToken(state, 'u') && ParseSourceName(state)) {
return true;
}
*state = copy;
return false;
}
bool ParseFunctionType(State* state) {
State copy = *state;
if (ParseOneCharToken(state, 'F') &&
Optional(ParseOneCharToken(state, 'Y')) && ParseBareFunctionType(state) &&
ParseOneCharToken(state, 'E')) {
return true;
}
*state = copy;
return false;
}
bool ParseBareFunctionType(State* state) {
State copy = *state;
DisableAppend(state);
if (OneOrMore(ParseType, state)) {
RestoreAppend(state, copy.append);
MaybeAppend(state, "()");
return true;
}
*state = copy;
return false;
}
bool ParseClassEnumType(State* state) { return ParseName(state); }
bool ParseArrayType(State* state) {
State copy = *state;
if (ParseOneCharToken(state, 'A') && ParseNumber(state, nullptr) &&
ParseOneCharToken(state, '_') && ParseType(state)) {
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'A') && Optional(ParseExpression(state)) &&
ParseOneCharToken(state, '_') && ParseType(state)) {
return true;
}
*state = copy;
return false;
}
bool ParsePointerToMemberType(State* state) {
State copy = *state;
if (ParseOneCharToken(state, 'M') && ParseType(state) && ParseType(state)) {
return true;
}
*state = copy;
return false;
}
bool ParseTemplateParam(State* state) {
if (ParseTwoCharToken(state, "T_")) {
MaybeAppend(state, "?");
return true;
}
State copy = *state;
if (ParseOneCharToken(state, 'T') && ParseNumber(state, nullptr) &&
ParseOneCharToken(state, '_')) {
MaybeAppend(state, "?");
return true;
}
*state = copy;
return false;
}
bool ParseTemplateTemplateParam(State* state) {
return (ParseTemplateParam(state) || ParseSubstitution(state));
}
bool ParseTemplateArgs(State* state) {
State copy = *state;
DisableAppend(state);
if (ParseOneCharToken(state, 'I') && OneOrMore(ParseTemplateArg, state) &&
ParseOneCharToken(state, 'E')) {
RestoreAppend(state, copy.append);
MaybeAppend(state, "<>");
return true;
}
*state = copy;
return false;
}
bool ParseTemplateArg(State* state) {
constexpr uint32 max_levels = 6;
if (state->arg_level > max_levels) {
return false;
}
++state->arg_level;
State copy = *state;
if ((ParseOneCharToken(state, 'I') || ParseOneCharToken(state, 'J')) &&
ZeroOrMore(ParseTemplateArg, state) && ParseOneCharToken(state, 'E')) {
--state->arg_level;
return true;
}
*state = copy;
if (ParseType(state) || ParseExprPrimary(state)) {
--state->arg_level;
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'X') && ParseExpression(state) &&
ParseOneCharToken(state, 'E')) {
--state->arg_level;
return true;
}
*state = copy;
return false;
}
bool ParseExpression(State* state) {
if (ParseTemplateParam(state) || ParseExprPrimary(state)) {
return true;
}
constexpr uint32 max_levels = 5;
if (state->expr_level > max_levels) {
return false;
}
++state->expr_level;
State copy = *state;
if (ParseOperatorName(state) && ParseExpression(state) &&
ParseExpression(state) && ParseExpression(state)) {
--state->expr_level;
return true;
}
*state = copy;
if (ParseOperatorName(state) && ParseExpression(state) &&
ParseExpression(state)) {
--state->expr_level;
return true;
}
*state = copy;
if (ParseOperatorName(state) && ParseExpression(state)) {
--state->expr_level;
return true;
}
*state = copy;
if (ParseTwoCharToken(state, "st") && ParseType(state)) {
return true;
--state->expr_level;
}
*state = copy;
if (ParseTwoCharToken(state, "sr") && ParseType(state) &&
ParseUnqualifiedName(state) && ParseTemplateArgs(state)) {
--state->expr_level;
return true;
}
*state = copy;
if (ParseTwoCharToken(state, "sr") && ParseType(state) &&
ParseUnqualifiedName(state)) {
--state->expr_level;
return true;
}
*state = copy;
if (ParseTwoCharToken(state, "sp") && ParseType(state)) {
--state->expr_level;
return true;
}
*state = copy;
return false;
}
bool ParseExprPrimary(State* state) {
State copy = *state;
if (ParseOneCharToken(state, 'L') && ParseType(state) &&
ParseNumber(state, nullptr) && ParseOneCharToken(state, 'E')) {
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'L') && ParseType(state) &&
ParseFloatNumber(state) && ParseOneCharToken(state, 'E')) {
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'L') && ParseMangledName(state) &&
ParseOneCharToken(state, 'E')) {
return true;
}
*state = copy;
if (ParseTwoCharToken(state, "LZ") && ParseEncoding(state) &&
ParseOneCharToken(state, 'E')) {
return true;
}
*state = copy;
return false;
}
bool ParseLocalName(State* state) {
constexpr uint32 max_levels = 5;
if (state->local_level > max_levels) {
return false;
}
++state->local_level;
State copy = *state;
if (ParseOneCharToken(state, 'Z') && ParseEncoding(state) &&
ParseOneCharToken(state, 'E') && MaybeAppend(state, "::") &&
ParseName(state) && Optional(ParseDiscriminator(state))) {
--state->local_level;
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'Z') && ParseEncoding(state) &&
ParseTwoCharToken(state, "Es") && Optional(ParseDiscriminator(state))) {
--state->local_level;
return true;
}
*state = copy;
return false;
}
bool ParseDiscriminator(State* state) {
State copy = *state;
if (ParseOneCharToken(state, '_') && ParseNumber(state, nullptr)) {
return true;
}
*state = copy;
return false;
}
bool ParseSubstitution(State* state) {
if (ParseTwoCharToken(state, "S_")) {
MaybeAppend(state, "?");
return true;
}
State copy = *state;
if (ParseOneCharToken(state, 'S') && ParseSeqId(state) &&
ParseOneCharToken(state, '_')) {
MaybeAppend(state, "?");
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'S')) {
const AbbrevPair* p;
for (p = kSubstitutionList; p->abbrev != nullptr; ++p) {
if (state->mangled_cur[0] == p->abbrev[1]) {
MaybeAppend(state, "std");
if (p->real_name[0] != '\0') {
MaybeAppend(state, "::");
MaybeAppend(state, p->real_name);
}
++state->mangled_cur;
return true;
}
}
}
*state = copy;
return false;
}
bool ParseTopLevelMangledName(State* state) {
if (ParseMangledName(state)) {
if (state->mangled_cur[0] != '\0') {
if (IsFunctionCloneSuffix(state->mangled_cur)) {
return true;
}
if (state->mangled_cur[0] == '@') {
MaybeAppend(state, state->mangled_cur);
return true;
}
return ParseName(state);
}
return true;
}
return false;
}
}
#endif
bool Demangle(const char* mangled, char* out, size_t out_size) {
#if defined(GLOG_OS_WINDOWS)
# if defined(HAVE_DBGHELP)
char buffer[1024];
const char* lparen = strchr(mangled, '(');
if (lparen) {
const char* rparen = strchr(lparen, ')');
size_t length = static_cast<size_t>(rparen - lparen) - 1;
strncpy(buffer, lparen + 1, length);
buffer[length] = '\0';
mangled = buffer;
}
return UnDecorateSymbolName(mangled, out, out_size, UNDNAME_COMPLETE);
# else
(void)mangled;
(void)out;
(void)out_size;
return false;
# endif
#elif defined(HAVE___CXA_DEMANGLE)
int status = -1;
std::size_t n = 0;
std::unique_ptr<char, decltype(&std::free)> unmangled{
abi::__cxa_demangle(mangled, nullptr, &n, &status), &std::free};
if (!unmangled) {
return false;
}
std::copy_n(unmangled.get(), std::min(n, out_size), out);
return status == 0;
#else
State state;
InitState(&state, mangled, out, out_size);
return ParseTopLevelMangledName(&state) && !state.overflowed;
#endif
}
}
} | #include "demangle.h"
#include <fstream>
#include <iostream>
#include <string>
#include "config.h"
#include "glog/logging.h"
#include "googletest.h"
#include "utilities.h"
#ifdef GLOG_USE_GFLAGS
# include <gflags/gflags.h>
using namespace GFLAGS_NAMESPACE;
#endif
GLOG_DEFINE_bool(demangle_filter, false,
"Run demangle_unittest in filter mode");
using namespace std;
using namespace google;
static const char* DemangleIt(const char* const mangled) {
static char demangled[4096];
if (Demangle(mangled, demangled, sizeof(demangled))) {
return demangled;
} else {
return mangled;
}
}
#if defined(GLOG_OS_WINDOWS)
# if defined(HAVE_DBGHELP) && !defined(NDEBUG)
TEST(Demangle, Windows) {
EXPECT_STREQ("public: static void __cdecl Foo::func(int)",
DemangleIt("?func@Foo@@SAXH@Z"));
EXPECT_STREQ("public: static void __cdecl Foo::func(int)",
DemangleIt("@ILT+1105(?func@Foo@@SAXH@Z)"));
EXPECT_STREQ("int __cdecl foobarArray(int * const)",
DemangleIt("?foobarArray@@YAHQAH@Z"));
}
# endif
#else
TEST(Demangle, CornerCases) {
const size_t size = 10;
char tmp[size] = {0};
const char* demangled = "foobar()";
const char* mangled = "_Z6foobarv";
EXPECT_TRUE(Demangle(mangled, tmp, sizeof(tmp)));
EXPECT_STREQ(demangled, tmp);
EXPECT_TRUE(Demangle(mangled, tmp, size - 1));
EXPECT_STREQ(demangled, tmp);
EXPECT_FALSE(Demangle(mangled, tmp, size - 2));
EXPECT_FALSE(Demangle(mangled, tmp, 1));
EXPECT_FALSE(Demangle(mangled, tmp, 0));
EXPECT_FALSE(Demangle(mangled, nullptr, 0));
}
TEST(Demangle, Clones) {
char tmp[20];
EXPECT_TRUE(Demangle("_ZL3Foov", tmp, sizeof(tmp)));
EXPECT_STREQ("Foo()", tmp);
EXPECT_TRUE(Demangle("_ZL3Foov.clone.3", tmp, sizeof(tmp)));
EXPECT_STREQ("Foo()", tmp);
EXPECT_TRUE(Demangle("_ZL3Foov.constprop.80", tmp, sizeof(tmp)));
EXPECT_STREQ("Foo()", tmp);
EXPECT_TRUE(Demangle("_ZL3Foov.isra.18", tmp, sizeof(tmp)));
EXPECT_STREQ("Foo()", tmp);
EXPECT_TRUE(Demangle("_ZL3Foov.isra.2.constprop.18", tmp, sizeof(tmp)));
EXPECT_STREQ("Foo()", tmp);
EXPECT_FALSE(Demangle("_ZL3Foov.clo", tmp, sizeof(tmp)));
EXPECT_FALSE(Demangle("_ZL3Foov.clone.", tmp, sizeof(tmp)));
EXPECT_FALSE(Demangle("_ZL3Foov.clone.foo", tmp, sizeof(tmp)));
EXPECT_FALSE(Demangle("_ZL3Foov.isra.2.constprop.", tmp, sizeof(tmp)));
}
TEST(Demangle, FromFile) {
string test_file = FLAGS_test_srcdir + "/src/demangle_unittest.txt";
ifstream f(test_file.c_str());
EXPECT_FALSE(f.fail());
string line;
while (getline(f, line)) {
if (line.empty() || line[0] == '#') {
continue;
}
string::size_type tab_pos = line.find('\t');
EXPECT_NE(string::npos, tab_pos);
string mangled = line.substr(0, tab_pos);
string demangled = line.substr(tab_pos + 1);
EXPECT_EQ(demangled, DemangleIt(mangled.c_str()));
}
}
#endif
int main(int argc, char** argv) {
InitGoogleTest(&argc, argv);
#ifdef GLOG_USE_GFLAGS
ParseCommandLineFlags(&argc, &argv, true);
#endif
FLAGS_logtostderr = true;
InitGoogleLogging(argv[0]);
if (FLAGS_demangle_filter) {
string line;
while (getline(cin, line, '\n')) {
cout << DemangleIt(line.c_str()) << endl;
}
return 0;
} else if (argc > 1) {
cout << DemangleIt(argv[1]) << endl;
return 0;
} else {
return RUN_ALL_TESTS();
}
} | https://github.com/google/glog/blob/de309c08c05382fee0792380de7df1bd65332da2/src/demangle.cc | https://github.com/google/glog/blob/de309c08c05382fee0792380de7df1bd65332da2/src/demangle_unittest.cc | de309c08c05382fee0792380de7df1bd65332da2 |
6abeb031-0f9b-4d31-b673-75daf31fd982 | cpp | google/glog | stacktrace | src/stacktrace.cc | src/stacktrace_unittest.cc | #include "stacktrace.h"
#if defined(STACKTRACE_H)
# include STACKTRACE_H
#endif | #include "stacktrace.h"
#include <cstdio>
#include <cstdlib>
#include "base/commandlineflags.h"
#include "config.h"
#include "glog/logging.h"
#include "utilities.h"
#ifdef HAVE_EXECINFO_BACKTRACE_SYMBOLS
# include <execinfo.h>
#endif
#ifdef HAVE_STACKTRACE
const int BACKTRACE_STEPS = 6;
struct AddressRange {
const void *start, *end;
};
AddressRange expected_range[BACKTRACE_STEPS];
# if __GNUC__
# define INIT_ADDRESS_RANGE(fn, start_label, end_label, prange) \
do { \
(prange)->start = &&start_label; \
(prange)->end = &&end_label; \
CHECK_LT((prange)->start, (prange)->end); \
} while (0)
# define DECLARE_ADDRESS_LABEL(a_label) \
a_label: \
do { \
__asm__ __volatile__(""); \
} while (0)
# define ADJUST_ADDRESS_RANGE_FROM_RA(prange) \
do { \
void* ra = __builtin_return_address(0); \
CHECK_LT((prange)->start, ra); \
if (ra > (prange)->end) { \
printf("Adjusting range from %p..%p to %p..%p\n", (prange)->start, \
(prange)->end, (prange)->start, ra); \
(prange)->end = ra; \
} \
} while (0)
# else
# define INIT_ADDRESS_RANGE(fn, start_label, end_label, prange) \
do { \
(prange)->start = reinterpret_cast<const void*>(&fn); \
(prange)->end = reinterpret_cast<const char*>(&fn) + 256; \
} while (0)
# define DECLARE_ADDRESS_LABEL(a_label) \
do { \
} while (0)
# define ADJUST_ADDRESS_RANGE_FROM_RA(prange) \
do { \
} while (0)
# endif
static void CheckRetAddrIsInFunction(void* ret_addr,
const AddressRange& range) {
CHECK_GE(ret_addr, range.start);
CHECK_LE(ret_addr, range.end);
}
# if defined(__clang__)
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wgnu-label-as-value"
# endif
void ATTRIBUTE_NOINLINE CheckStackTrace(int);
static void ATTRIBUTE_NOINLINE CheckStackTraceLeaf() {
const int STACK_LEN = 10;
void* stack[STACK_LEN];
int size;
ADJUST_ADDRESS_RANGE_FROM_RA(&expected_range[1]);
INIT_ADDRESS_RANGE(CheckStackTraceLeaf, start, end, &expected_range[0]);
DECLARE_ADDRESS_LABEL(start);
size = google::GetStackTrace(stack, STACK_LEN, 0);
printf("Obtained %d stack frames.\n", size);
CHECK_GE(size, 1);
CHECK_LE(size, STACK_LEN);
if (true) {
# ifdef HAVE_EXECINFO_BACKTRACE_SYMBOLS
char** strings = backtrace_symbols(stack, size);
printf("Obtained %d stack frames.\n", size);
for (int i = 0; i < size; i++) {
printf("%s %p\n", strings[i], stack[i]);
}
union {
void (*p1)(int);
void* p2;
} p = {&CheckStackTrace};
printf("CheckStackTrace() addr: %p\n", p.p2);
free(strings);
# endif
}
for (int i = 0; i < BACKTRACE_STEPS; i++) {
printf("Backtrace %d: expected: %p..%p actual: %p ... ", i,
expected_range[i].start, expected_range[i].end, stack[i]);
fflush(stdout);
CheckRetAddrIsInFunction(stack[i], expected_range[i]);
printf("OK\n");
}
DECLARE_ADDRESS_LABEL(end);
}
static void ATTRIBUTE_NOINLINE CheckStackTrace4(int i) {
ADJUST_ADDRESS_RANGE_FROM_RA(&expected_range[2]);
INIT_ADDRESS_RANGE(CheckStackTrace4, start, end, &expected_range[1]);
DECLARE_ADDRESS_LABEL(start);
for (int j = i; j >= 0; j--) {
CheckStackTraceLeaf();
}
DECLARE_ADDRESS_LABEL(end);
}
static void ATTRIBUTE_NOINLINE CheckStackTrace3(int i) {
ADJUST_ADDRESS_RANGE_FROM_RA(&expected_range[3]);
INIT_ADDRESS_RANGE(CheckStackTrace3, start, end, &expected_range[2]);
DECLARE_ADDRESS_LABEL(start);
for (int j = i; j >= 0; j--) {
CheckStackTrace4(j);
}
DECLARE_ADDRESS_LABEL(end);
}
static void ATTRIBUTE_NOINLINE CheckStackTrace2(int i) {
ADJUST_ADDRESS_RANGE_FROM_RA(&expected_range[4]);
INIT_ADDRESS_RANGE(CheckStackTrace2, start, end, &expected_range[3]);
DECLARE_ADDRESS_LABEL(start);
for (int j = i; j >= 0; j--) {
CheckStackTrace3(j);
}
DECLARE_ADDRESS_LABEL(end);
}
static void ATTRIBUTE_NOINLINE CheckStackTrace1(int i) {
ADJUST_ADDRESS_RANGE_FROM_RA(&expected_range[5]);
INIT_ADDRESS_RANGE(CheckStackTrace1, start, end, &expected_range[4]);
DECLARE_ADDRESS_LABEL(start);
for (int j = i; j >= 0; j--) {
CheckStackTrace2(j);
}
DECLARE_ADDRESS_LABEL(end);
}
# ifndef __GNUC__
static
# endif
void ATTRIBUTE_NOINLINE
CheckStackTrace(int i) {
INIT_ADDRESS_RANGE(CheckStackTrace, start, end, &expected_range[5]);
DECLARE_ADDRESS_LABEL(start);
for (int j = i; j >= 0; j--) {
CheckStackTrace1(j);
}
DECLARE_ADDRESS_LABEL(end);
}
# if defined(__clang__)
# pragma clang diagnostic pop
# endif
int main(int, char** argv) {
FLAGS_logtostderr = true;
google::InitGoogleLogging(argv[0]);
CheckStackTrace(0);
printf("PASS\n");
return 0;
}
#else
int main() {
# ifdef GLOG_BAZEL_BUILD
printf("HAVE_STACKTRACE is expected to be defined in Bazel tests\n");
exit(EXIT_FAILURE);
# endif
printf("PASS (no stacktrace support)\n");
return 0;
}
#endif | https://github.com/google/glog/blob/de309c08c05382fee0792380de7df1bd65332da2/src/stacktrace.cc | https://github.com/google/glog/blob/de309c08c05382fee0792380de7df1bd65332da2/src/stacktrace_unittest.cc | de309c08c05382fee0792380de7df1bd65332da2 |
511d6642-4f1a-4dc8-bc5f-e3d3f7835350 | cpp | google/glog | mock-log | src/mock-log.h | src/mock-log_unittest.cc | #ifndef GLOG_SRC_MOCK_LOG_H_
#define GLOG_SRC_MOCK_LOG_H_
#include <gmock/gmock.h>
#include <string>
#include "glog/logging.h"
#include "utilities.h"
namespace google {
namespace glog_testing {
class ScopedMockLog : public google::LogSink {
public:
ScopedMockLog() { AddLogSink(this); }
~ScopedMockLog() override { RemoveLogSink(this); }
MOCK_METHOD3(Log,
void(google::LogSeverity severity, const std::string& file_path,
const std::string& message));
private:
void send(google::LogSeverity severity, const char* full_filename,
const char* , int ,
const LogMessageTime& , const char* message,
size_t message_len) override {
message_info_.severity = severity;
message_info_.file_path = full_filename;
message_info_.message = std::string(message, message_len);
}
void WaitTillSent() override {
MessageInfo message_info = message_info_;
Log(message_info.severity, message_info.file_path, message_info.message);
}
struct MessageInfo {
google::LogSeverity severity;
std::string file_path;
std::string message;
};
MessageInfo message_info_;
};
}
}
#endif | #include "mock-log.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <string>
namespace {
using google::GLOG_ERROR;
using google::GLOG_INFO;
using google::GLOG_WARNING;
using google::glog_testing::ScopedMockLog;
using std::string;
using testing::_;
using testing::EndsWith;
using testing::InSequence;
using testing::InvokeWithoutArgs;
TEST(ScopedMockLogTest, InterceptsLog) {
ScopedMockLog log;
InSequence s;
EXPECT_CALL(log,
Log(GLOG_WARNING, EndsWith("mock-log_unittest.cc"), "Fishy."));
EXPECT_CALL(log, Log(GLOG_INFO, _, "Working...")).Times(2);
EXPECT_CALL(log, Log(GLOG_ERROR, _, "Bad!!"));
LOG(WARNING) << "Fishy.";
LOG(INFO) << "Working...";
LOG(INFO) << "Working...";
LOG(ERROR) << "Bad!!";
}
void LogBranch() { LOG(INFO) << "Logging a branch..."; }
void LogTree() { LOG(INFO) << "Logging the whole tree..."; }
void LogForest() {
LOG(INFO) << "Logging the entire forest.";
LOG(INFO) << "Logging the entire forest..";
LOG(INFO) << "Logging the entire forest...";
}
TEST(ScopedMockLogTest, LogDuringIntercept) {
ScopedMockLog log;
InSequence s;
EXPECT_CALL(log, Log(GLOG_INFO, __FILE__, "Logging a branch..."))
.WillOnce(InvokeWithoutArgs(LogTree));
EXPECT_CALL(log, Log(GLOG_INFO, __FILE__, "Logging the whole tree..."))
.WillOnce(InvokeWithoutArgs(LogForest));
EXPECT_CALL(log, Log(GLOG_INFO, __FILE__, "Logging the entire forest."));
EXPECT_CALL(log, Log(GLOG_INFO, __FILE__, "Logging the entire forest.."));
EXPECT_CALL(log, Log(GLOG_INFO, __FILE__, "Logging the entire forest..."));
LogBranch();
}
}
int main(int argc, char** argv) {
google::InitGoogleLogging(argv[0]);
testing::InitGoogleTest(&argc, argv);
testing::InitGoogleMock(&argc, argv);
return RUN_ALL_TESTS();
} | https://github.com/google/glog/blob/de309c08c05382fee0792380de7df1bd65332da2/src/mock-log.h | https://github.com/google/glog/blob/de309c08c05382fee0792380de7df1bd65332da2/src/mock-log_unittest.cc | de309c08c05382fee0792380de7df1bd65332da2 |
9855c1c7-1951-4105-a993-5090a6604b71 | cpp | google/glog | stl_logging | src/glog/stl_logging.h | src/stl_logging_unittest.cc | #ifndef GLOG_STL_LOGGING_H
#define GLOG_STL_LOGGING_H
#include <deque>
#include <list>
#include <map>
#include <ostream>
#include <set>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
template <class First, class Second>
std::ostream& operator<<(std::ostream& out, const std::pair<First, Second>& p);
namespace google {
template <class Iter>
void PrintSequence(std::ostream& out, Iter begin, Iter end);
}
#define OUTPUT_TWO_ARG_CONTAINER(Sequence) \
template <class T1, class T2> \
inline std::ostream& operator<<(std::ostream& out, \
const Sequence<T1, T2>& seq) { \
google::PrintSequence(out, seq.begin(), seq.end()); \
return out; \
}
OUTPUT_TWO_ARG_CONTAINER(std::vector)
OUTPUT_TWO_ARG_CONTAINER(std::deque)
OUTPUT_TWO_ARG_CONTAINER(std::list)
#undef OUTPUT_TWO_ARG_CONTAINER
#define OUTPUT_THREE_ARG_CONTAINER(Sequence) \
template <class T1, class T2, class T3> \
inline std::ostream& operator<<(std::ostream& out, \
const Sequence<T1, T2, T3>& seq) { \
google::PrintSequence(out, seq.begin(), seq.end()); \
return out; \
}
OUTPUT_THREE_ARG_CONTAINER(std::set)
OUTPUT_THREE_ARG_CONTAINER(std::multiset)
#undef OUTPUT_THREE_ARG_CONTAINER
#define OUTPUT_FOUR_ARG_CONTAINER(Sequence) \
template <class T1, class T2, class T3, class T4> \
inline std::ostream& operator<<(std::ostream& out, \
const Sequence<T1, T2, T3, T4>& seq) { \
google::PrintSequence(out, seq.begin(), seq.end()); \
return out; \
}
OUTPUT_FOUR_ARG_CONTAINER(std::map)
OUTPUT_FOUR_ARG_CONTAINER(std::multimap)
OUTPUT_FOUR_ARG_CONTAINER(std::unordered_set)
OUTPUT_FOUR_ARG_CONTAINER(std::unordered_multiset)
#undef OUTPUT_FOUR_ARG_CONTAINER
#define OUTPUT_FIVE_ARG_CONTAINER(Sequence) \
template <class T1, class T2, class T3, class T4, class T5> \
inline std::ostream& operator<<(std::ostream& out, \
const Sequence<T1, T2, T3, T4, T5>& seq) { \
google::PrintSequence(out, seq.begin(), seq.end()); \
return out; \
}
OUTPUT_FIVE_ARG_CONTAINER(std::unordered_map)
OUTPUT_FIVE_ARG_CONTAINER(std::unordered_multimap)
#undef OUTPUT_FIVE_ARG_CONTAINER
template <class First, class Second>
inline std::ostream& operator<<(std::ostream& out,
const std::pair<First, Second>& p) {
out << '(' << p.first << ", " << p.second << ')';
return out;
}
namespace google {
template <class Iter>
inline void PrintSequence(std::ostream& out, Iter begin, Iter end) {
for (int i = 0; begin != end && i < 100; ++i, ++begin) {
if (i > 0) out << ' ';
out << *begin;
}
if (begin != end) {
out << " ...";
}
}
}
namespace std {
using ::operator<<;
}
#endif | #include "glog/stl_logging.h"
#include <functional>
#include <iostream>
#include <map>
#include <ostream>
#include <string>
#include <vector>
#include "config.h"
#include "glog/logging.h"
#include "googletest.h"
using namespace std;
struct user_hash {
size_t operator()(int x) const { return static_cast<size_t>(x); }
};
static void TestSTLLogging() {
{
vector<int> v;
v.push_back(10);
v.push_back(20);
v.push_back(30);
ostringstream ss;
ss << v;
EXPECT_EQ(ss.str(), "10 20 30");
vector<int> copied_v(v);
CHECK_EQ(v, copied_v);
}
{
map<int, string> m;
m[20] = "twenty";
m[10] = "ten";
m[30] = "thirty";
ostringstream ss;
ss << m;
EXPECT_EQ(ss.str(), "(10, ten) (20, twenty) (30, thirty)");
map<int, string> copied_m(m);
CHECK_EQ(m, copied_m);
}
{
vector<int> v;
string expected;
for (int i = 0; i < 100; i++) {
v.push_back(i);
if (i > 0) expected += ' ';
const size_t buf_size = 256;
char buf[buf_size];
std::snprintf(buf, buf_size, "%d", i);
expected += buf;
}
v.push_back(100);
expected += " ...";
ostringstream ss;
ss << v;
CHECK_EQ(ss.str(), expected.c_str());
}
{
map<int, string, greater<>> m;
m[20] = "twenty";
m[10] = "ten";
m[30] = "thirty";
ostringstream ss;
ss << m;
EXPECT_EQ(ss.str(), "(30, thirty) (20, twenty) (10, ten)");
map<int, string, greater<>> copied_m(m);
CHECK_EQ(m, copied_m);
}
}
int main(int, char**) {
TestSTLLogging();
std::cout << "PASS\n";
return 0;
} | https://github.com/google/glog/blob/de309c08c05382fee0792380de7df1bd65332da2/src/glog/stl_logging.h | https://github.com/google/glog/blob/de309c08c05382fee0792380de7df1bd65332da2/src/stl_logging_unittest.cc | de309c08c05382fee0792380de7df1bd65332da2 |
f7d4da3e-9d3d-48d1-90e0-2c36aa6ca297 | cpp | google/leveldb | status | util/status.cc | util/status_test.cc | #include "leveldb/status.h"
#include <cstdio>
#include "port/port.h"
namespace leveldb {
const char* Status::CopyState(const char* state) {
uint32_t size;
std::memcpy(&size, state, sizeof(size));
char* result = new char[size + 5];
std::memcpy(result, state, size + 5);
return result;
}
Status::Status(Code code, const Slice& msg, const Slice& msg2) {
assert(code != kOk);
const uint32_t len1 = static_cast<uint32_t>(msg.size());
const uint32_t len2 = static_cast<uint32_t>(msg2.size());
const uint32_t size = len1 + (len2 ? (2 + len2) : 0);
char* result = new char[size + 5];
std::memcpy(result, &size, sizeof(size));
result[4] = static_cast<char>(code);
std::memcpy(result + 5, msg.data(), len1);
if (len2) {
result[5 + len1] = ':';
result[6 + len1] = ' ';
std::memcpy(result + 7 + len1, msg2.data(), len2);
}
state_ = result;
}
std::string Status::ToString() const {
if (state_ == nullptr) {
return "OK";
} else {
char tmp[30];
const char* type;
switch (code()) {
case kOk:
type = "OK";
break;
case kNotFound:
type = "NotFound: ";
break;
case kCorruption:
type = "Corruption: ";
break;
case kNotSupported:
type = "Not implemented: ";
break;
case kInvalidArgument:
type = "Invalid argument: ";
break;
case kIOError:
type = "IO error: ";
break;
default:
std::snprintf(tmp, sizeof(tmp),
"Unknown code(%d): ", static_cast<int>(code()));
type = tmp;
break;
}
std::string result(type);
uint32_t length;
std::memcpy(&length, state_, sizeof(length));
result.append(state_ + 5, length);
return result;
}
}
} | #include "leveldb/status.h"
#include <utility>
#include "gtest/gtest.h"
#include "leveldb/slice.h"
namespace leveldb {
TEST(Status, MoveConstructor) {
{
Status ok = Status::OK();
Status ok2 = std::move(ok);
ASSERT_TRUE(ok2.ok());
}
{
Status status = Status::NotFound("custom NotFound status message");
Status status2 = std::move(status);
ASSERT_TRUE(status2.IsNotFound());
ASSERT_EQ("NotFound: custom NotFound status message", status2.ToString());
}
{
Status self_moved = Status::IOError("custom IOError status message");
Status& self_moved_reference = self_moved;
self_moved_reference = std::move(self_moved);
}
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/status.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/status_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
70594a59-cbf4-48c5-b398-788d2ee64d20 | cpp | google/leveldb | arena | util/arena.cc | util/arena_test.cc | #include "util/arena.h"
namespace leveldb {
static const int kBlockSize = 4096;
Arena::Arena()
: alloc_ptr_(nullptr), alloc_bytes_remaining_(0), memory_usage_(0) {}
Arena::~Arena() {
for (size_t i = 0; i < blocks_.size(); i++) {
delete[] blocks_[i];
}
}
char* Arena::AllocateFallback(size_t bytes) {
if (bytes > kBlockSize / 4) {
char* result = AllocateNewBlock(bytes);
return result;
}
alloc_ptr_ = AllocateNewBlock(kBlockSize);
alloc_bytes_remaining_ = kBlockSize;
char* result = alloc_ptr_;
alloc_ptr_ += bytes;
alloc_bytes_remaining_ -= bytes;
return result;
}
char* Arena::AllocateAligned(size_t bytes) {
const int align = (sizeof(void*) > 8) ? sizeof(void*) : 8;
static_assert((align & (align - 1)) == 0,
"Pointer size should be a power of 2");
size_t current_mod = reinterpret_cast<uintptr_t>(alloc_ptr_) & (align - 1);
size_t slop = (current_mod == 0 ? 0 : align - current_mod);
size_t needed = bytes + slop;
char* result;
if (needed <= alloc_bytes_remaining_) {
result = alloc_ptr_ + slop;
alloc_ptr_ += needed;
alloc_bytes_remaining_ -= needed;
} else {
result = AllocateFallback(bytes);
}
assert((reinterpret_cast<uintptr_t>(result) & (align - 1)) == 0);
return result;
}
char* Arena::AllocateNewBlock(size_t block_bytes) {
char* result = new char[block_bytes];
blocks_.push_back(result);
memory_usage_.fetch_add(block_bytes + sizeof(char*),
std::memory_order_relaxed);
return result;
}
} | #include "util/arena.h"
#include "gtest/gtest.h"
#include "util/random.h"
namespace leveldb {
TEST(ArenaTest, Empty) { Arena arena; }
TEST(ArenaTest, Simple) {
std::vector<std::pair<size_t, char*>> allocated;
Arena arena;
const int N = 100000;
size_t bytes = 0;
Random rnd(301);
for (int i = 0; i < N; i++) {
size_t s;
if (i % (N / 10) == 0) {
s = i;
} else {
s = rnd.OneIn(4000)
? rnd.Uniform(6000)
: (rnd.OneIn(10) ? rnd.Uniform(100) : rnd.Uniform(20));
}
if (s == 0) {
s = 1;
}
char* r;
if (rnd.OneIn(10)) {
r = arena.AllocateAligned(s);
} else {
r = arena.Allocate(s);
}
for (size_t b = 0; b < s; b++) {
r[b] = i % 256;
}
bytes += s;
allocated.push_back(std::make_pair(s, r));
ASSERT_GE(arena.MemoryUsage(), bytes);
if (i > N / 10) {
ASSERT_LE(arena.MemoryUsage(), bytes * 1.10);
}
}
for (size_t i = 0; i < allocated.size(); i++) {
size_t num_bytes = allocated[i].first;
const char* p = allocated[i].second;
for (size_t b = 0; b < num_bytes; b++) {
ASSERT_EQ(int(p[b]) & 0xff, i % 256);
}
}
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/arena.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/arena_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
868102d5-3fce-4df6-ae58-7cc321b9294f | cpp | google/leveldb | bloom | util/bloom.cc | util/bloom_test.cc | #include "leveldb/filter_policy.h"
#include "leveldb/slice.h"
#include "util/hash.h"
namespace leveldb {
namespace {
static uint32_t BloomHash(const Slice& key) {
return Hash(key.data(), key.size(), 0xbc9f1d34);
}
class BloomFilterPolicy : public FilterPolicy {
public:
explicit BloomFilterPolicy(int bits_per_key) : bits_per_key_(bits_per_key) {
k_ = static_cast<size_t>(bits_per_key * 0.69);
if (k_ < 1) k_ = 1;
if (k_ > 30) k_ = 30;
}
const char* Name() const override { return "leveldb.BuiltinBloomFilter2"; }
void CreateFilter(const Slice* keys, int n, std::string* dst) const override {
size_t bits = n * bits_per_key_;
if (bits < 64) bits = 64;
size_t bytes = (bits + 7) / 8;
bits = bytes * 8;
const size_t init_size = dst->size();
dst->resize(init_size + bytes, 0);
dst->push_back(static_cast<char>(k_));
char* array = &(*dst)[init_size];
for (int i = 0; i < n; i++) {
uint32_t h = BloomHash(keys[i]);
const uint32_t delta = (h >> 17) | (h << 15);
for (size_t j = 0; j < k_; j++) {
const uint32_t bitpos = h % bits;
array[bitpos / 8] |= (1 << (bitpos % 8));
h += delta;
}
}
}
bool KeyMayMatch(const Slice& key, const Slice& bloom_filter) const override {
const size_t len = bloom_filter.size();
if (len < 2) return false;
const char* array = bloom_filter.data();
const size_t bits = (len - 1) * 8;
const size_t k = array[len - 1];
if (k > 30) {
return true;
}
uint32_t h = BloomHash(key);
const uint32_t delta = (h >> 17) | (h << 15);
for (size_t j = 0; j < k; j++) {
const uint32_t bitpos = h % bits;
if ((array[bitpos / 8] & (1 << (bitpos % 8))) == 0) return false;
h += delta;
}
return true;
}
private:
size_t bits_per_key_;
size_t k_;
};
}
const FilterPolicy* NewBloomFilterPolicy(int bits_per_key) {
return new BloomFilterPolicy(bits_per_key);
}
} | #include "gtest/gtest.h"
#include "leveldb/filter_policy.h"
#include "util/coding.h"
#include "util/logging.h"
#include "util/testutil.h"
namespace leveldb {
static const int kVerbose = 1;
static Slice Key(int i, char* buffer) {
EncodeFixed32(buffer, i);
return Slice(buffer, sizeof(uint32_t));
}
class BloomTest : public testing::Test {
public:
BloomTest() : policy_(NewBloomFilterPolicy(10)) {}
~BloomTest() { delete policy_; }
void Reset() {
keys_.clear();
filter_.clear();
}
void Add(const Slice& s) { keys_.push_back(s.ToString()); }
void Build() {
std::vector<Slice> key_slices;
for (size_t i = 0; i < keys_.size(); i++) {
key_slices.push_back(Slice(keys_[i]));
}
filter_.clear();
policy_->CreateFilter(&key_slices[0], static_cast<int>(key_slices.size()),
&filter_);
keys_.clear();
if (kVerbose >= 2) DumpFilter();
}
size_t FilterSize() const { return filter_.size(); }
void DumpFilter() {
std::fprintf(stderr, "F(");
for (size_t i = 0; i + 1 < filter_.size(); i++) {
const unsigned int c = static_cast<unsigned int>(filter_[i]);
for (int j = 0; j < 8; j++) {
std::fprintf(stderr, "%c", (c & (1 << j)) ? '1' : '.');
}
}
std::fprintf(stderr, ")\n");
}
bool Matches(const Slice& s) {
if (!keys_.empty()) {
Build();
}
return policy_->KeyMayMatch(s, filter_);
}
double FalsePositiveRate() {
char buffer[sizeof(int)];
int result = 0;
for (int i = 0; i < 10000; i++) {
if (Matches(Key(i + 1000000000, buffer))) {
result++;
}
}
return result / 10000.0;
}
private:
const FilterPolicy* policy_;
std::string filter_;
std::vector<std::string> keys_;
};
TEST_F(BloomTest, EmptyFilter) {
ASSERT_TRUE(!Matches("hello"));
ASSERT_TRUE(!Matches("world"));
}
TEST_F(BloomTest, Small) {
Add("hello");
Add("world");
ASSERT_TRUE(Matches("hello"));
ASSERT_TRUE(Matches("world"));
ASSERT_TRUE(!Matches("x"));
ASSERT_TRUE(!Matches("foo"));
}
static int NextLength(int length) {
if (length < 10) {
length += 1;
} else if (length < 100) {
length += 10;
} else if (length < 1000) {
length += 100;
} else {
length += 1000;
}
return length;
}
TEST_F(BloomTest, VaryingLengths) {
char buffer[sizeof(int)];
int mediocre_filters = 0;
int good_filters = 0;
for (int length = 1; length <= 10000; length = NextLength(length)) {
Reset();
for (int i = 0; i < length; i++) {
Add(Key(i, buffer));
}
Build();
ASSERT_LE(FilterSize(), static_cast<size_t>((length * 10 / 8) + 40))
<< length;
for (int i = 0; i < length; i++) {
ASSERT_TRUE(Matches(Key(i, buffer)))
<< "Length " << length << "; key " << i;
}
double rate = FalsePositiveRate();
if (kVerbose >= 1) {
std::fprintf(stderr,
"False positives: %5.2f%% @ length = %6d ; bytes = %6d\n",
rate * 100.0, length, static_cast<int>(FilterSize()));
}
ASSERT_LE(rate, 0.02);
if (rate > 0.0125)
mediocre_filters++;
else
good_filters++;
}
if (kVerbose >= 1) {
std::fprintf(stderr, "Filters: %d good, %d mediocre\n", good_filters,
mediocre_filters);
}
ASSERT_LE(mediocre_filters, good_filters / 5);
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/bloom.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/bloom_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
0851d7ea-8a5b-4636-b24c-ea8f34dd89eb | cpp | google/leveldb | hash | util/hash.cc | util/hash_test.cc | #include "util/hash.h"
#include <cstring>
#include "util/coding.h"
#ifndef FALLTHROUGH_INTENDED
#define FALLTHROUGH_INTENDED \
do { \
} while (0)
#endif
namespace leveldb {
uint32_t Hash(const char* data, size_t n, uint32_t seed) {
const uint32_t m = 0xc6a4a793;
const uint32_t r = 24;
const char* limit = data + n;
uint32_t h = seed ^ (n * m);
while (data + 4 <= limit) {
uint32_t w = DecodeFixed32(data);
data += 4;
h += w;
h *= m;
h ^= (h >> 16);
}
switch (limit - data) {
case 3:
h += static_cast<uint8_t>(data[2]) << 16;
FALLTHROUGH_INTENDED;
case 2:
h += static_cast<uint8_t>(data[1]) << 8;
FALLTHROUGH_INTENDED;
case 1:
h += static_cast<uint8_t>(data[0]);
h *= m;
h ^= (h >> r);
break;
}
return h;
}
} | #include "util/hash.h"
#include "gtest/gtest.h"
namespace leveldb {
TEST(HASH, SignedUnsignedIssue) {
const uint8_t data1[1] = {0x62};
const uint8_t data2[2] = {0xc3, 0x97};
const uint8_t data3[3] = {0xe2, 0x99, 0xa5};
const uint8_t data4[4] = {0xe1, 0x80, 0xb9, 0x32};
const uint8_t data5[48] = {
0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
ASSERT_EQ(Hash(0, 0, 0xbc9f1d34), 0xbc9f1d34);
ASSERT_EQ(
Hash(reinterpret_cast<const char*>(data1), sizeof(data1), 0xbc9f1d34),
0xef1345c4);
ASSERT_EQ(
Hash(reinterpret_cast<const char*>(data2), sizeof(data2), 0xbc9f1d34),
0x5b663814);
ASSERT_EQ(
Hash(reinterpret_cast<const char*>(data3), sizeof(data3), 0xbc9f1d34),
0x323c078f);
ASSERT_EQ(
Hash(reinterpret_cast<const char*>(data4), sizeof(data4), 0xbc9f1d34),
0xed21633a);
ASSERT_EQ(
Hash(reinterpret_cast<const char*>(data5), sizeof(data5), 0x12345678),
0xf333dabb);
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/hash.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/hash_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
7ebac96d-5373-4efe-9bc1-d3e56b54150e | cpp | google/leveldb | crc32c | util/crc32c.cc | util/crc32c_test.cc | #include "util/crc32c.h"
#include <cstddef>
#include <cstdint>
#include "port/port.h"
#include "util/coding.h"
namespace leveldb {
namespace crc32c {
namespace {
const uint32_t kByteExtensionTable[256] = {
0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4, 0xc79a971f, 0x35f1141c,
0x26a1e7e8, 0xd4ca64eb, 0x8ad958cf, 0x78b2dbcc, 0x6be22838, 0x9989ab3b,
0x4d43cfd0, 0xbf284cd3, 0xac78bf27, 0x5e133c24, 0x105ec76f, 0xe235446c,
0xf165b798, 0x030e349b, 0xd7c45070, 0x25afd373, 0x36ff2087, 0xc494a384,
0x9a879fa0, 0x68ec1ca3, 0x7bbcef57, 0x89d76c54, 0x5d1d08bf, 0xaf768bbc,
0xbc267848, 0x4e4dfb4b, 0x20bd8ede, 0xd2d60ddd, 0xc186fe29, 0x33ed7d2a,
0xe72719c1, 0x154c9ac2, 0x061c6936, 0xf477ea35, 0xaa64d611, 0x580f5512,
0x4b5fa6e6, 0xb93425e5, 0x6dfe410e, 0x9f95c20d, 0x8cc531f9, 0x7eaeb2fa,
0x30e349b1, 0xc288cab2, 0xd1d83946, 0x23b3ba45, 0xf779deae, 0x05125dad,
0x1642ae59, 0xe4292d5a, 0xba3a117e, 0x4851927d, 0x5b016189, 0xa96ae28a,
0x7da08661, 0x8fcb0562, 0x9c9bf696, 0x6ef07595, 0x417b1dbc, 0xb3109ebf,
0xa0406d4b, 0x522bee48, 0x86e18aa3, 0x748a09a0, 0x67dafa54, 0x95b17957,
0xcba24573, 0x39c9c670, 0x2a993584, 0xd8f2b687, 0x0c38d26c, 0xfe53516f,
0xed03a29b, 0x1f682198, 0x5125dad3, 0xa34e59d0, 0xb01eaa24, 0x42752927,
0x96bf4dcc, 0x64d4cecf, 0x77843d3b, 0x85efbe38, 0xdbfc821c, 0x2997011f,
0x3ac7f2eb, 0xc8ac71e8, 0x1c661503, 0xee0d9600, 0xfd5d65f4, 0x0f36e6f7,
0x61c69362, 0x93ad1061, 0x80fde395, 0x72966096, 0xa65c047d, 0x5437877e,
0x4767748a, 0xb50cf789, 0xeb1fcbad, 0x197448ae, 0x0a24bb5a, 0xf84f3859,
0x2c855cb2, 0xdeeedfb1, 0xcdbe2c45, 0x3fd5af46, 0x7198540d, 0x83f3d70e,
0x90a324fa, 0x62c8a7f9, 0xb602c312, 0x44694011, 0x5739b3e5, 0xa55230e6,
0xfb410cc2, 0x092a8fc1, 0x1a7a7c35, 0xe811ff36, 0x3cdb9bdd, 0xceb018de,
0xdde0eb2a, 0x2f8b6829, 0x82f63b78, 0x709db87b, 0x63cd4b8f, 0x91a6c88c,
0x456cac67, 0xb7072f64, 0xa457dc90, 0x563c5f93, 0x082f63b7, 0xfa44e0b4,
0xe9141340, 0x1b7f9043, 0xcfb5f4a8, 0x3dde77ab, 0x2e8e845f, 0xdce5075c,
0x92a8fc17, 0x60c37f14, 0x73938ce0, 0x81f80fe3, 0x55326b08, 0xa759e80b,
0xb4091bff, 0x466298fc, 0x1871a4d8, 0xea1a27db, 0xf94ad42f, 0x0b21572c,
0xdfeb33c7, 0x2d80b0c4, 0x3ed04330, 0xccbbc033, 0xa24bb5a6, 0x502036a5,
0x4370c551, 0xb11b4652, 0x65d122b9, 0x97baa1ba, 0x84ea524e, 0x7681d14d,
0x2892ed69, 0xdaf96e6a, 0xc9a99d9e, 0x3bc21e9d, 0xef087a76, 0x1d63f975,
0x0e330a81, 0xfc588982, 0xb21572c9, 0x407ef1ca, 0x532e023e, 0xa145813d,
0x758fe5d6, 0x87e466d5, 0x94b49521, 0x66df1622, 0x38cc2a06, 0xcaa7a905,
0xd9f75af1, 0x2b9cd9f2, 0xff56bd19, 0x0d3d3e1a, 0x1e6dcdee, 0xec064eed,
0xc38d26c4, 0x31e6a5c7, 0x22b65633, 0xd0ddd530, 0x0417b1db, 0xf67c32d8,
0xe52cc12c, 0x1747422f, 0x49547e0b, 0xbb3ffd08, 0xa86f0efc, 0x5a048dff,
0x8ecee914, 0x7ca56a17, 0x6ff599e3, 0x9d9e1ae0, 0xd3d3e1ab, 0x21b862a8,
0x32e8915c, 0xc083125f, 0x144976b4, 0xe622f5b7, 0xf5720643, 0x07198540,
0x590ab964, 0xab613a67, 0xb831c993, 0x4a5a4a90, 0x9e902e7b, 0x6cfbad78,
0x7fab5e8c, 0x8dc0dd8f, 0xe330a81a, 0x115b2b19, 0x020bd8ed, 0xf0605bee,
0x24aa3f05, 0xd6c1bc06, 0xc5914ff2, 0x37faccf1, 0x69e9f0d5, 0x9b8273d6,
0x88d28022, 0x7ab90321, 0xae7367ca, 0x5c18e4c9, 0x4f48173d, 0xbd23943e,
0xf36e6f75, 0x0105ec76, 0x12551f82, 0xe03e9c81, 0x34f4f86a, 0xc69f7b69,
0xd5cf889d, 0x27a40b9e, 0x79b737ba, 0x8bdcb4b9, 0x988c474d, 0x6ae7c44e,
0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351};
const uint32_t kStrideExtensionTable0[256] = {
0x00000000, 0x30d23865, 0x61a470ca, 0x517648af, 0xc348e194, 0xf39ad9f1,
0xa2ec915e, 0x923ea93b, 0x837db5d9, 0xb3af8dbc, 0xe2d9c513, 0xd20bfd76,
0x4035544d, 0x70e76c28, 0x21912487, 0x11431ce2, 0x03171d43, 0x33c52526,
0x62b36d89, 0x526155ec, 0xc05ffcd7, 0xf08dc4b2, 0xa1fb8c1d, 0x9129b478,
0x806aa89a, 0xb0b890ff, 0xe1ced850, 0xd11ce035, 0x4322490e, 0x73f0716b,
0x228639c4, 0x125401a1, 0x062e3a86, 0x36fc02e3, 0x678a4a4c, 0x57587229,
0xc566db12, 0xf5b4e377, 0xa4c2abd8, 0x941093bd, 0x85538f5f, 0xb581b73a,
0xe4f7ff95, 0xd425c7f0, 0x461b6ecb, 0x76c956ae, 0x27bf1e01, 0x176d2664,
0x053927c5, 0x35eb1fa0, 0x649d570f, 0x544f6f6a, 0xc671c651, 0xf6a3fe34,
0xa7d5b69b, 0x97078efe, 0x8644921c, 0xb696aa79, 0xe7e0e2d6, 0xd732dab3,
0x450c7388, 0x75de4bed, 0x24a80342, 0x147a3b27, 0x0c5c750c, 0x3c8e4d69,
0x6df805c6, 0x5d2a3da3, 0xcf149498, 0xffc6acfd, 0xaeb0e452, 0x9e62dc37,
0x8f21c0d5, 0xbff3f8b0, 0xee85b01f, 0xde57887a, 0x4c692141, 0x7cbb1924,
0x2dcd518b, 0x1d1f69ee, 0x0f4b684f, 0x3f99502a, 0x6eef1885, 0x5e3d20e0,
0xcc0389db, 0xfcd1b1be, 0xada7f911, 0x9d75c174, 0x8c36dd96, 0xbce4e5f3,
0xed92ad5c, 0xdd409539, 0x4f7e3c02, 0x7fac0467, 0x2eda4cc8, 0x1e0874ad,
0x0a724f8a, 0x3aa077ef, 0x6bd63f40, 0x5b040725, 0xc93aae1e, 0xf9e8967b,
0xa89eded4, 0x984ce6b1, 0x890ffa53, 0xb9ddc236, 0xe8ab8a99, 0xd879b2fc,
0x4a471bc7, 0x7a9523a2, 0x2be36b0d, 0x1b315368, 0x096552c9, 0x39b76aac,
0x68c12203, 0x58131a66, 0xca2db35d, 0xfaff8b38, 0xab89c397, 0x9b5bfbf2,
0x8a18e710, 0xbacadf75, 0xebbc97da, 0xdb6eafbf, 0x49500684, 0x79823ee1,
0x28f4764e, 0x18264e2b, 0x18b8ea18, 0x286ad27d, 0x791c9ad2, 0x49cea2b7,
0xdbf00b8c, 0xeb2233e9, 0xba547b46, 0x8a864323, 0x9bc55fc1, 0xab1767a4,
0xfa612f0b, 0xcab3176e, 0x588dbe55, 0x685f8630, 0x3929ce9f, 0x09fbf6fa,
0x1baff75b, 0x2b7dcf3e, 0x7a0b8791, 0x4ad9bff4, 0xd8e716cf, 0xe8352eaa,
0xb9436605, 0x89915e60, 0x98d24282, 0xa8007ae7, 0xf9763248, 0xc9a40a2d,
0x5b9aa316, 0x6b489b73, 0x3a3ed3dc, 0x0aecebb9, 0x1e96d09e, 0x2e44e8fb,
0x7f32a054, 0x4fe09831, 0xddde310a, 0xed0c096f, 0xbc7a41c0, 0x8ca879a5,
0x9deb6547, 0xad395d22, 0xfc4f158d, 0xcc9d2de8, 0x5ea384d3, 0x6e71bcb6,
0x3f07f419, 0x0fd5cc7c, 0x1d81cddd, 0x2d53f5b8, 0x7c25bd17, 0x4cf78572,
0xdec92c49, 0xee1b142c, 0xbf6d5c83, 0x8fbf64e6, 0x9efc7804, 0xae2e4061,
0xff5808ce, 0xcf8a30ab, 0x5db49990, 0x6d66a1f5, 0x3c10e95a, 0x0cc2d13f,
0x14e49f14, 0x2436a771, 0x7540efde, 0x4592d7bb, 0xd7ac7e80, 0xe77e46e5,
0xb6080e4a, 0x86da362f, 0x97992acd, 0xa74b12a8, 0xf63d5a07, 0xc6ef6262,
0x54d1cb59, 0x6403f33c, 0x3575bb93, 0x05a783f6, 0x17f38257, 0x2721ba32,
0x7657f29d, 0x4685caf8, 0xd4bb63c3, 0xe4695ba6, 0xb51f1309, 0x85cd2b6c,
0x948e378e, 0xa45c0feb, 0xf52a4744, 0xc5f87f21, 0x57c6d61a, 0x6714ee7f,
0x3662a6d0, 0x06b09eb5, 0x12caa592, 0x22189df7, 0x736ed558, 0x43bced3d,
0xd1824406, 0xe1507c63, 0xb02634cc, 0x80f40ca9, 0x91b7104b, 0xa165282e,
0xf0136081, 0xc0c158e4, 0x52fff1df, 0x622dc9ba, 0x335b8115, 0x0389b970,
0x11ddb8d1, 0x210f80b4, 0x7079c81b, 0x40abf07e, 0xd2955945, 0xe2476120,
0xb331298f, 0x83e311ea, 0x92a00d08, 0xa272356d, 0xf3047dc2, 0xc3d645a7,
0x51e8ec9c, 0x613ad4f9, 0x304c9c56, 0x009ea433};
const uint32_t kStrideExtensionTable1[256] = {
0x00000000, 0x54075546, 0xa80eaa8c, 0xfc09ffca, 0x55f123e9, 0x01f676af,
0xfdff8965, 0xa9f8dc23, 0xabe247d2, 0xffe51294, 0x03eced5e, 0x57ebb818,
0xfe13643b, 0xaa14317d, 0x561dceb7, 0x021a9bf1, 0x5228f955, 0x062fac13,
0xfa2653d9, 0xae21069f, 0x07d9dabc, 0x53de8ffa, 0xafd77030, 0xfbd02576,
0xf9cabe87, 0xadcdebc1, 0x51c4140b, 0x05c3414d, 0xac3b9d6e, 0xf83cc828,
0x043537e2, 0x503262a4, 0xa451f2aa, 0xf056a7ec, 0x0c5f5826, 0x58580d60,
0xf1a0d143, 0xa5a78405, 0x59ae7bcf, 0x0da92e89, 0x0fb3b578, 0x5bb4e03e,
0xa7bd1ff4, 0xf3ba4ab2, 0x5a429691, 0x0e45c3d7, 0xf24c3c1d, 0xa64b695b,
0xf6790bff, 0xa27e5eb9, 0x5e77a173, 0x0a70f435, 0xa3882816, 0xf78f7d50,
0x0b86829a, 0x5f81d7dc, 0x5d9b4c2d, 0x099c196b, 0xf595e6a1, 0xa192b3e7,
0x086a6fc4, 0x5c6d3a82, 0xa064c548, 0xf463900e, 0x4d4f93a5, 0x1948c6e3,
0xe5413929, 0xb1466c6f, 0x18beb04c, 0x4cb9e50a, 0xb0b01ac0, 0xe4b74f86,
0xe6add477, 0xb2aa8131, 0x4ea37efb, 0x1aa42bbd, 0xb35cf79e, 0xe75ba2d8,
0x1b525d12, 0x4f550854, 0x1f676af0, 0x4b603fb6, 0xb769c07c, 0xe36e953a,
0x4a964919, 0x1e911c5f, 0xe298e395, 0xb69fb6d3, 0xb4852d22, 0xe0827864,
0x1c8b87ae, 0x488cd2e8, 0xe1740ecb, 0xb5735b8d, 0x497aa447, 0x1d7df101,
0xe91e610f, 0xbd193449, 0x4110cb83, 0x15179ec5, 0xbcef42e6, 0xe8e817a0,
0x14e1e86a, 0x40e6bd2c, 0x42fc26dd, 0x16fb739b, 0xeaf28c51, 0xbef5d917,
0x170d0534, 0x430a5072, 0xbf03afb8, 0xeb04fafe, 0xbb36985a, 0xef31cd1c,
0x133832d6, 0x473f6790, 0xeec7bbb3, 0xbac0eef5, 0x46c9113f, 0x12ce4479,
0x10d4df88, 0x44d38ace, 0xb8da7504, 0xecdd2042, 0x4525fc61, 0x1122a927,
0xed2b56ed, 0xb92c03ab, 0x9a9f274a, 0xce98720c, 0x32918dc6, 0x6696d880,
0xcf6e04a3, 0x9b6951e5, 0x6760ae2f, 0x3367fb69, 0x317d6098, 0x657a35de,
0x9973ca14, 0xcd749f52, 0x648c4371, 0x308b1637, 0xcc82e9fd, 0x9885bcbb,
0xc8b7de1f, 0x9cb08b59, 0x60b97493, 0x34be21d5, 0x9d46fdf6, 0xc941a8b0,
0x3548577a, 0x614f023c, 0x635599cd, 0x3752cc8b, 0xcb5b3341, 0x9f5c6607,
0x36a4ba24, 0x62a3ef62, 0x9eaa10a8, 0xcaad45ee, 0x3eced5e0, 0x6ac980a6,
0x96c07f6c, 0xc2c72a2a, 0x6b3ff609, 0x3f38a34f, 0xc3315c85, 0x973609c3,
0x952c9232, 0xc12bc774, 0x3d2238be, 0x69256df8, 0xc0ddb1db, 0x94dae49d,
0x68d31b57, 0x3cd44e11, 0x6ce62cb5, 0x38e179f3, 0xc4e88639, 0x90efd37f,
0x39170f5c, 0x6d105a1a, 0x9119a5d0, 0xc51ef096, 0xc7046b67, 0x93033e21,
0x6f0ac1eb, 0x3b0d94ad, 0x92f5488e, 0xc6f21dc8, 0x3afbe202, 0x6efcb744,
0xd7d0b4ef, 0x83d7e1a9, 0x7fde1e63, 0x2bd94b25, 0x82219706, 0xd626c240,
0x2a2f3d8a, 0x7e2868cc, 0x7c32f33d, 0x2835a67b, 0xd43c59b1, 0x803b0cf7,
0x29c3d0d4, 0x7dc48592, 0x81cd7a58, 0xd5ca2f1e, 0x85f84dba, 0xd1ff18fc,
0x2df6e736, 0x79f1b270, 0xd0096e53, 0x840e3b15, 0x7807c4df, 0x2c009199,
0x2e1a0a68, 0x7a1d5f2e, 0x8614a0e4, 0xd213f5a2, 0x7beb2981, 0x2fec7cc7,
0xd3e5830d, 0x87e2d64b, 0x73814645, 0x27861303, 0xdb8fecc9, 0x8f88b98f,
0x267065ac, 0x727730ea, 0x8e7ecf20, 0xda799a66, 0xd8630197, 0x8c6454d1,
0x706dab1b, 0x246afe5d, 0x8d92227e, 0xd9957738, 0x259c88f2, 0x719bddb4,
0x21a9bf10, 0x75aeea56, 0x89a7159c, 0xdda040da, 0x74589cf9, 0x205fc9bf,
0xdc563675, 0x88516333, 0x8a4bf8c2, 0xde4cad84, 0x2245524e, 0x76420708,
0xdfbadb2b, 0x8bbd8e6d, 0x77b471a7, 0x23b324e1};
const uint32_t kStrideExtensionTable2[256] = {
0x00000000, 0x678efd01, 0xcf1dfa02, 0xa8930703, 0x9bd782f5, 0xfc597ff4,
0x54ca78f7, 0x334485f6, 0x3243731b, 0x55cd8e1a, 0xfd5e8919, 0x9ad07418,
0xa994f1ee, 0xce1a0cef, 0x66890bec, 0x0107f6ed, 0x6486e636, 0x03081b37,
0xab9b1c34, 0xcc15e135, 0xff5164c3, 0x98df99c2, 0x304c9ec1, 0x57c263c0,
0x56c5952d, 0x314b682c, 0x99d86f2f, 0xfe56922e, 0xcd1217d8, 0xaa9cead9,
0x020fedda, 0x658110db, 0xc90dcc6c, 0xae83316d, 0x0610366e, 0x619ecb6f,
0x52da4e99, 0x3554b398, 0x9dc7b49b, 0xfa49499a, 0xfb4ebf77, 0x9cc04276,
0x34534575, 0x53ddb874, 0x60993d82, 0x0717c083, 0xaf84c780, 0xc80a3a81,
0xad8b2a5a, 0xca05d75b, 0x6296d058, 0x05182d59, 0x365ca8af, 0x51d255ae,
0xf94152ad, 0x9ecfafac, 0x9fc85941, 0xf846a440, 0x50d5a343, 0x375b5e42,
0x041fdbb4, 0x639126b5, 0xcb0221b6, 0xac8cdcb7, 0x97f7ee29, 0xf0791328,
0x58ea142b, 0x3f64e92a, 0x0c206cdc, 0x6bae91dd, 0xc33d96de, 0xa4b36bdf,
0xa5b49d32, 0xc23a6033, 0x6aa96730, 0x0d279a31, 0x3e631fc7, 0x59ede2c6,
0xf17ee5c5, 0x96f018c4, 0xf371081f, 0x94fff51e, 0x3c6cf21d, 0x5be20f1c,
0x68a68aea, 0x0f2877eb, 0xa7bb70e8, 0xc0358de9, 0xc1327b04, 0xa6bc8605,
0x0e2f8106, 0x69a17c07, 0x5ae5f9f1, 0x3d6b04f0, 0x95f803f3, 0xf276fef2,
0x5efa2245, 0x3974df44, 0x91e7d847, 0xf6692546, 0xc52da0b0, 0xa2a35db1,
0x0a305ab2, 0x6dbea7b3, 0x6cb9515e, 0x0b37ac5f, 0xa3a4ab5c, 0xc42a565d,
0xf76ed3ab, 0x90e02eaa, 0x387329a9, 0x5ffdd4a8, 0x3a7cc473, 0x5df23972,
0xf5613e71, 0x92efc370, 0xa1ab4686, 0xc625bb87, 0x6eb6bc84, 0x09384185,
0x083fb768, 0x6fb14a69, 0xc7224d6a, 0xa0acb06b, 0x93e8359d, 0xf466c89c,
0x5cf5cf9f, 0x3b7b329e, 0x2a03aaa3, 0x4d8d57a2, 0xe51e50a1, 0x8290ada0,
0xb1d42856, 0xd65ad557, 0x7ec9d254, 0x19472f55, 0x1840d9b8, 0x7fce24b9,
0xd75d23ba, 0xb0d3debb, 0x83975b4d, 0xe419a64c, 0x4c8aa14f, 0x2b045c4e,
0x4e854c95, 0x290bb194, 0x8198b697, 0xe6164b96, 0xd552ce60, 0xb2dc3361,
0x1a4f3462, 0x7dc1c963, 0x7cc63f8e, 0x1b48c28f, 0xb3dbc58c, 0xd455388d,
0xe711bd7b, 0x809f407a, 0x280c4779, 0x4f82ba78, 0xe30e66cf, 0x84809bce,
0x2c139ccd, 0x4b9d61cc, 0x78d9e43a, 0x1f57193b, 0xb7c41e38, 0xd04ae339,
0xd14d15d4, 0xb6c3e8d5, 0x1e50efd6, 0x79de12d7, 0x4a9a9721, 0x2d146a20,
0x85876d23, 0xe2099022, 0x878880f9, 0xe0067df8, 0x48957afb, 0x2f1b87fa,
0x1c5f020c, 0x7bd1ff0d, 0xd342f80e, 0xb4cc050f, 0xb5cbf3e2, 0xd2450ee3,
0x7ad609e0, 0x1d58f4e1, 0x2e1c7117, 0x49928c16, 0xe1018b15, 0x868f7614,
0xbdf4448a, 0xda7ab98b, 0x72e9be88, 0x15674389, 0x2623c67f, 0x41ad3b7e,
0xe93e3c7d, 0x8eb0c17c, 0x8fb73791, 0xe839ca90, 0x40aacd93, 0x27243092,
0x1460b564, 0x73ee4865, 0xdb7d4f66, 0xbcf3b267, 0xd972a2bc, 0xbefc5fbd,
0x166f58be, 0x71e1a5bf, 0x42a52049, 0x252bdd48, 0x8db8da4b, 0xea36274a,
0xeb31d1a7, 0x8cbf2ca6, 0x242c2ba5, 0x43a2d6a4, 0x70e65352, 0x1768ae53,
0xbffba950, 0xd8755451, 0x74f988e6, 0x137775e7, 0xbbe472e4, 0xdc6a8fe5,
0xef2e0a13, 0x88a0f712, 0x2033f011, 0x47bd0d10, 0x46bafbfd, 0x213406fc,
0x89a701ff, 0xee29fcfe, 0xdd6d7908, 0xbae38409, 0x1270830a, 0x75fe7e0b,
0x107f6ed0, 0x77f193d1, 0xdf6294d2, 0xb8ec69d3, 0x8ba8ec25, 0xec261124,
0x44b51627, 0x233beb26, 0x223c1dcb, 0x45b2e0ca, 0xed21e7c9, 0x8aaf1ac8,
0xb9eb9f3e, 0xde65623f, 0x76f6653c, 0x1178983d};
const uint32_t kStrideExtensionTable3[256] = {
0x00000000, 0xf20c0dfe, 0xe1f46d0d, 0x13f860f3, 0xc604aceb, 0x3408a115,
0x27f0c1e6, 0xd5fccc18, 0x89e52f27, 0x7be922d9, 0x6811422a, 0x9a1d4fd4,
0x4fe183cc, 0xbded8e32, 0xae15eec1, 0x5c19e33f, 0x162628bf, 0xe42a2541,
0xf7d245b2, 0x05de484c, 0xd0228454, 0x222e89aa, 0x31d6e959, 0xc3dae4a7,
0x9fc30798, 0x6dcf0a66, 0x7e376a95, 0x8c3b676b, 0x59c7ab73, 0xabcba68d,
0xb833c67e, 0x4a3fcb80, 0x2c4c517e, 0xde405c80, 0xcdb83c73, 0x3fb4318d,
0xea48fd95, 0x1844f06b, 0x0bbc9098, 0xf9b09d66, 0xa5a97e59, 0x57a573a7,
0x445d1354, 0xb6511eaa, 0x63add2b2, 0x91a1df4c, 0x8259bfbf, 0x7055b241,
0x3a6a79c1, 0xc866743f, 0xdb9e14cc, 0x29921932, 0xfc6ed52a, 0x0e62d8d4,
0x1d9ab827, 0xef96b5d9, 0xb38f56e6, 0x41835b18, 0x527b3beb, 0xa0773615,
0x758bfa0d, 0x8787f7f3, 0x947f9700, 0x66739afe, 0x5898a2fc, 0xaa94af02,
0xb96ccff1, 0x4b60c20f, 0x9e9c0e17, 0x6c9003e9, 0x7f68631a, 0x8d646ee4,
0xd17d8ddb, 0x23718025, 0x3089e0d6, 0xc285ed28, 0x17792130, 0xe5752cce,
0xf68d4c3d, 0x048141c3, 0x4ebe8a43, 0xbcb287bd, 0xaf4ae74e, 0x5d46eab0,
0x88ba26a8, 0x7ab62b56, 0x694e4ba5, 0x9b42465b, 0xc75ba564, 0x3557a89a,
0x26afc869, 0xd4a3c597, 0x015f098f, 0xf3530471, 0xe0ab6482, 0x12a7697c,
0x74d4f382, 0x86d8fe7c, 0x95209e8f, 0x672c9371, 0xb2d05f69, 0x40dc5297,
0x53243264, 0xa1283f9a, 0xfd31dca5, 0x0f3dd15b, 0x1cc5b1a8, 0xeec9bc56,
0x3b35704e, 0xc9397db0, 0xdac11d43, 0x28cd10bd, 0x62f2db3d, 0x90fed6c3,
0x8306b630, 0x710abbce, 0xa4f677d6, 0x56fa7a28, 0x45021adb, 0xb70e1725,
0xeb17f41a, 0x191bf9e4, 0x0ae39917, 0xf8ef94e9, 0x2d1358f1, 0xdf1f550f,
0xcce735fc, 0x3eeb3802, 0xb13145f8, 0x433d4806, 0x50c528f5, 0xa2c9250b,
0x7735e913, 0x8539e4ed, 0x96c1841e, 0x64cd89e0, 0x38d46adf, 0xcad86721,
0xd92007d2, 0x2b2c0a2c, 0xfed0c634, 0x0cdccbca, 0x1f24ab39, 0xed28a6c7,
0xa7176d47, 0x551b60b9, 0x46e3004a, 0xb4ef0db4, 0x6113c1ac, 0x931fcc52,
0x80e7aca1, 0x72eba15f, 0x2ef24260, 0xdcfe4f9e, 0xcf062f6d, 0x3d0a2293,
0xe8f6ee8b, 0x1afae375, 0x09028386, 0xfb0e8e78, 0x9d7d1486, 0x6f711978,
0x7c89798b, 0x8e857475, 0x5b79b86d, 0xa975b593, 0xba8dd560, 0x4881d89e,
0x14983ba1, 0xe694365f, 0xf56c56ac, 0x07605b52, 0xd29c974a, 0x20909ab4,
0x3368fa47, 0xc164f7b9, 0x8b5b3c39, 0x795731c7, 0x6aaf5134, 0x98a35cca,
0x4d5f90d2, 0xbf539d2c, 0xacabfddf, 0x5ea7f021, 0x02be131e, 0xf0b21ee0,
0xe34a7e13, 0x114673ed, 0xc4babff5, 0x36b6b20b, 0x254ed2f8, 0xd742df06,
0xe9a9e704, 0x1ba5eafa, 0x085d8a09, 0xfa5187f7, 0x2fad4bef, 0xdda14611,
0xce5926e2, 0x3c552b1c, 0x604cc823, 0x9240c5dd, 0x81b8a52e, 0x73b4a8d0,
0xa64864c8, 0x54446936, 0x47bc09c5, 0xb5b0043b, 0xff8fcfbb, 0x0d83c245,
0x1e7ba2b6, 0xec77af48, 0x398b6350, 0xcb876eae, 0xd87f0e5d, 0x2a7303a3,
0x766ae09c, 0x8466ed62, 0x979e8d91, 0x6592806f, 0xb06e4c77, 0x42624189,
0x519a217a, 0xa3962c84, 0xc5e5b67a, 0x37e9bb84, 0x2411db77, 0xd61dd689,
0x03e11a91, 0xf1ed176f, 0xe215779c, 0x10197a62, 0x4c00995d, 0xbe0c94a3,
0xadf4f450, 0x5ff8f9ae, 0x8a0435b6, 0x78083848, 0x6bf058bb, 0x99fc5545,
0xd3c39ec5, 0x21cf933b, 0x3237f3c8, 0xc03bfe36, 0x15c7322e, 0xe7cb3fd0,
0xf4335f23, 0x063f52dd, 0x5a26b1e2, 0xa82abc1c, 0xbbd2dcef, 0x49ded111,
0x9c221d09, 0x6e2e10f7, 0x7dd67004, 0x8fda7dfa};
static constexpr const uint32_t kCRC32Xor = static_cast<uint32_t>(0xffffffffU);
inline uint32_t ReadUint32LE(const uint8_t* buffer) {
return DecodeFixed32(reinterpret_cast<const char*>(buffer));
}
template <int N>
constexpr inline const uint8_t* RoundUp(const uint8_t* pointer) {
return reinterpret_cast<uint8_t*>(
(reinterpret_cast<uintptr_t>(pointer) + (N - 1)) &
~static_cast<uintptr_t>(N - 1));
}
}
static bool CanAccelerateCRC32C() {
static const char kTestCRCBuffer[] = "TestCRCBuffer";
static const char kBufSize = sizeof(kTestCRCBuffer) - 1;
static const uint32_t kTestCRCValue = 0xdcbc59fa;
return port::AcceleratedCRC32C(0, kTestCRCBuffer, kBufSize) == kTestCRCValue;
}
uint32_t Extend(uint32_t crc, const char* data, size_t n) {
static bool accelerate = CanAccelerateCRC32C();
if (accelerate) {
return port::AcceleratedCRC32C(crc, data, n);
}
const uint8_t* p = reinterpret_cast<const uint8_t*>(data);
const uint8_t* e = p + n;
uint32_t l = crc ^ kCRC32Xor;
#define STEP1 \
do { \
int c = (l & 0xff) ^ *p++; \
l = kByteExtensionTable[c] ^ (l >> 8); \
} while (0)
#define STEP4(s) \
do { \
crc##s = ReadUint32LE(p + s * 4) ^ kStrideExtensionTable3[crc##s & 0xff] ^ \
kStrideExtensionTable2[(crc##s >> 8) & 0xff] ^ \
kStrideExtensionTable1[(crc##s >> 16) & 0xff] ^ \
kStrideExtensionTable0[crc##s >> 24]; \
} while (0)
#define STEP16 \
do { \
STEP4(0); \
STEP4(1); \
STEP4(2); \
STEP4(3); \
p += 16; \
} while (0)
#define STEP4W(w) \
do { \
w ^= l; \
for (size_t i = 0; i < 4; ++i) { \
w = (w >> 8) ^ kByteExtensionTable[w & 0xff]; \
} \
l = w; \
} while (0)
const uint8_t* x = RoundUp<4>(p);
if (x <= e) {
while (p != x) {
STEP1;
}
}
if ((e - p) >= 16) {
uint32_t crc0 = ReadUint32LE(p + 0 * 4) ^ l;
uint32_t crc1 = ReadUint32LE(p + 1 * 4);
uint32_t crc2 = ReadUint32LE(p + 2 * 4);
uint32_t crc3 = ReadUint32LE(p + 3 * 4);
p += 16;
while ((e - p) >= 16) {
STEP16;
}
while ((e - p) >= 4) {
STEP4(0);
uint32_t tmp = crc0;
crc0 = crc1;
crc1 = crc2;
crc2 = crc3;
crc3 = tmp;
p += 4;
}
l = 0;
STEP4W(crc0);
STEP4W(crc1);
STEP4W(crc2);
STEP4W(crc3);
}
while (p != e) {
STEP1;
}
#undef STEP4W
#undef STEP16
#undef STEP4
#undef STEP1
return l ^ kCRC32Xor;
}
}
} | #include "util/crc32c.h"
#include "gtest/gtest.h"
namespace leveldb {
namespace crc32c {
TEST(CRC, StandardResults) {
char buf[32];
memset(buf, 0, sizeof(buf));
ASSERT_EQ(0x8a9136aa, Value(buf, sizeof(buf)));
memset(buf, 0xff, sizeof(buf));
ASSERT_EQ(0x62a8ab43, Value(buf, sizeof(buf)));
for (int i = 0; i < 32; i++) {
buf[i] = i;
}
ASSERT_EQ(0x46dd794e, Value(buf, sizeof(buf)));
for (int i = 0; i < 32; i++) {
buf[i] = 31 - i;
}
ASSERT_EQ(0x113fdb5c, Value(buf, sizeof(buf)));
uint8_t data[48] = {
0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
ASSERT_EQ(0xd9963a56, Value(reinterpret_cast<char*>(data), sizeof(data)));
}
TEST(CRC, Values) { ASSERT_NE(Value("a", 1), Value("foo", 3)); }
TEST(CRC, Extend) {
ASSERT_EQ(Value("hello world", 11), Extend(Value("hello ", 6), "world", 5));
}
TEST(CRC, Mask) {
uint32_t crc = Value("foo", 3);
ASSERT_NE(crc, Mask(crc));
ASSERT_NE(crc, Mask(Mask(crc)));
ASSERT_EQ(crc, Unmask(Mask(crc)));
ASSERT_EQ(crc, Unmask(Unmask(Mask(Mask(crc)))));
}
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/crc32c.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/crc32c_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
f048a1a5-6f7c-4d47-b8b9-224c351315fd | cpp | google/leveldb | cache | util/cache.cc | util/cache_test.cc | #include "leveldb/cache.h"
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include "port/port.h"
#include "port/thread_annotations.h"
#include "util/hash.h"
#include "util/mutexlock.h"
namespace leveldb {
Cache::~Cache() {}
namespace {
struct LRUHandle {
void* value;
void (*deleter)(const Slice&, void* value);
LRUHandle* next_hash;
LRUHandle* next;
LRUHandle* prev;
size_t charge;
size_t key_length;
bool in_cache;
uint32_t refs;
uint32_t hash;
char key_data[1];
Slice key() const {
assert(next != this);
return Slice(key_data, key_length);
}
};
class HandleTable {
public:
HandleTable() : length_(0), elems_(0), list_(nullptr) { Resize(); }
~HandleTable() { delete[] list_; }
LRUHandle* Lookup(const Slice& key, uint32_t hash) {
return *FindPointer(key, hash);
}
LRUHandle* Insert(LRUHandle* h) {
LRUHandle** ptr = FindPointer(h->key(), h->hash);
LRUHandle* old = *ptr;
h->next_hash = (old == nullptr ? nullptr : old->next_hash);
*ptr = h;
if (old == nullptr) {
++elems_;
if (elems_ > length_) {
Resize();
}
}
return old;
}
LRUHandle* Remove(const Slice& key, uint32_t hash) {
LRUHandle** ptr = FindPointer(key, hash);
LRUHandle* result = *ptr;
if (result != nullptr) {
*ptr = result->next_hash;
--elems_;
}
return result;
}
private:
uint32_t length_;
uint32_t elems_;
LRUHandle** list_;
LRUHandle** FindPointer(const Slice& key, uint32_t hash) {
LRUHandle** ptr = &list_[hash & (length_ - 1)];
while (*ptr != nullptr && ((*ptr)->hash != hash || key != (*ptr)->key())) {
ptr = &(*ptr)->next_hash;
}
return ptr;
}
void Resize() {
uint32_t new_length = 4;
while (new_length < elems_) {
new_length *= 2;
}
LRUHandle** new_list = new LRUHandle*[new_length];
memset(new_list, 0, sizeof(new_list[0]) * new_length);
uint32_t count = 0;
for (uint32_t i = 0; i < length_; i++) {
LRUHandle* h = list_[i];
while (h != nullptr) {
LRUHandle* next = h->next_hash;
uint32_t hash = h->hash;
LRUHandle** ptr = &new_list[hash & (new_length - 1)];
h->next_hash = *ptr;
*ptr = h;
h = next;
count++;
}
}
assert(elems_ == count);
delete[] list_;
list_ = new_list;
length_ = new_length;
}
};
class LRUCache {
public:
LRUCache();
~LRUCache();
void SetCapacity(size_t capacity) { capacity_ = capacity; }
Cache::Handle* Insert(const Slice& key, uint32_t hash, void* value,
size_t charge,
void (*deleter)(const Slice& key, void* value));
Cache::Handle* Lookup(const Slice& key, uint32_t hash);
void Release(Cache::Handle* handle);
void Erase(const Slice& key, uint32_t hash);
void Prune();
size_t TotalCharge() const {
MutexLock l(&mutex_);
return usage_;
}
private:
void LRU_Remove(LRUHandle* e);
void LRU_Append(LRUHandle* list, LRUHandle* e);
void Ref(LRUHandle* e);
void Unref(LRUHandle* e);
bool FinishErase(LRUHandle* e) EXCLUSIVE_LOCKS_REQUIRED(mutex_);
size_t capacity_;
mutable port::Mutex mutex_;
size_t usage_ GUARDED_BY(mutex_);
LRUHandle lru_ GUARDED_BY(mutex_);
LRUHandle in_use_ GUARDED_BY(mutex_);
HandleTable table_ GUARDED_BY(mutex_);
};
LRUCache::LRUCache() : capacity_(0), usage_(0) {
lru_.next = &lru_;
lru_.prev = &lru_;
in_use_.next = &in_use_;
in_use_.prev = &in_use_;
}
LRUCache::~LRUCache() {
assert(in_use_.next == &in_use_);
for (LRUHandle* e = lru_.next; e != &lru_;) {
LRUHandle* next = e->next;
assert(e->in_cache);
e->in_cache = false;
assert(e->refs == 1);
Unref(e);
e = next;
}
}
void LRUCache::Ref(LRUHandle* e) {
if (e->refs == 1 && e->in_cache) {
LRU_Remove(e);
LRU_Append(&in_use_, e);
}
e->refs++;
}
void LRUCache::Unref(LRUHandle* e) {
assert(e->refs > 0);
e->refs--;
if (e->refs == 0) {
assert(!e->in_cache);
(*e->deleter)(e->key(), e->value);
free(e);
} else if (e->in_cache && e->refs == 1) {
LRU_Remove(e);
LRU_Append(&lru_, e);
}
}
void LRUCache::LRU_Remove(LRUHandle* e) {
e->next->prev = e->prev;
e->prev->next = e->next;
}
void LRUCache::LRU_Append(LRUHandle* list, LRUHandle* e) {
e->next = list;
e->prev = list->prev;
e->prev->next = e;
e->next->prev = e;
}
Cache::Handle* LRUCache::Lookup(const Slice& key, uint32_t hash) {
MutexLock l(&mutex_);
LRUHandle* e = table_.Lookup(key, hash);
if (e != nullptr) {
Ref(e);
}
return reinterpret_cast<Cache::Handle*>(e);
}
void LRUCache::Release(Cache::Handle* handle) {
MutexLock l(&mutex_);
Unref(reinterpret_cast<LRUHandle*>(handle));
}
Cache::Handle* LRUCache::Insert(const Slice& key, uint32_t hash, void* value,
size_t charge,
void (*deleter)(const Slice& key,
void* value)) {
MutexLock l(&mutex_);
LRUHandle* e =
reinterpret_cast<LRUHandle*>(malloc(sizeof(LRUHandle) - 1 + key.size()));
e->value = value;
e->deleter = deleter;
e->charge = charge;
e->key_length = key.size();
e->hash = hash;
e->in_cache = false;
e->refs = 1;
std::memcpy(e->key_data, key.data(), key.size());
if (capacity_ > 0) {
e->refs++;
e->in_cache = true;
LRU_Append(&in_use_, e);
usage_ += charge;
FinishErase(table_.Insert(e));
} else {
e->next = nullptr;
}
while (usage_ > capacity_ && lru_.next != &lru_) {
LRUHandle* old = lru_.next;
assert(old->refs == 1);
bool erased = FinishErase(table_.Remove(old->key(), old->hash));
if (!erased) {
assert(erased);
}
}
return reinterpret_cast<Cache::Handle*>(e);
}
bool LRUCache::FinishErase(LRUHandle* e) {
if (e != nullptr) {
assert(e->in_cache);
LRU_Remove(e);
e->in_cache = false;
usage_ -= e->charge;
Unref(e);
}
return e != nullptr;
}
void LRUCache::Erase(const Slice& key, uint32_t hash) {
MutexLock l(&mutex_);
FinishErase(table_.Remove(key, hash));
}
void LRUCache::Prune() {
MutexLock l(&mutex_);
while (lru_.next != &lru_) {
LRUHandle* e = lru_.next;
assert(e->refs == 1);
bool erased = FinishErase(table_.Remove(e->key(), e->hash));
if (!erased) {
assert(erased);
}
}
}
static const int kNumShardBits = 4;
static const int kNumShards = 1 << kNumShardBits;
class ShardedLRUCache : public Cache {
private:
LRUCache shard_[kNumShards];
port::Mutex id_mutex_;
uint64_t last_id_;
static inline uint32_t HashSlice(const Slice& s) {
return Hash(s.data(), s.size(), 0);
}
static uint32_t Shard(uint32_t hash) { return hash >> (32 - kNumShardBits); }
public:
explicit ShardedLRUCache(size_t capacity) : last_id_(0) {
const size_t per_shard = (capacity + (kNumShards - 1)) / kNumShards;
for (int s = 0; s < kNumShards; s++) {
shard_[s].SetCapacity(per_shard);
}
}
~ShardedLRUCache() override {}
Handle* Insert(const Slice& key, void* value, size_t charge,
void (*deleter)(const Slice& key, void* value)) override {
const uint32_t hash = HashSlice(key);
return shard_[Shard(hash)].Insert(key, hash, value, charge, deleter);
}
Handle* Lookup(const Slice& key) override {
const uint32_t hash = HashSlice(key);
return shard_[Shard(hash)].Lookup(key, hash);
}
void Release(Handle* handle) override {
LRUHandle* h = reinterpret_cast<LRUHandle*>(handle);
shard_[Shard(h->hash)].Release(handle);
}
void Erase(const Slice& key) override {
const uint32_t hash = HashSlice(key);
shard_[Shard(hash)].Erase(key, hash);
}
void* Value(Handle* handle) override {
return reinterpret_cast<LRUHandle*>(handle)->value;
}
uint64_t NewId() override {
MutexLock l(&id_mutex_);
return ++(last_id_);
}
void Prune() override {
for (int s = 0; s < kNumShards; s++) {
shard_[s].Prune();
}
}
size_t TotalCharge() const override {
size_t total = 0;
for (int s = 0; s < kNumShards; s++) {
total += shard_[s].TotalCharge();
}
return total;
}
};
}
Cache* NewLRUCache(size_t capacity) { return new ShardedLRUCache(capacity); }
} | #include "leveldb/cache.h"
#include <vector>
#include "gtest/gtest.h"
#include "util/coding.h"
namespace leveldb {
static std::string EncodeKey(int k) {
std::string result;
PutFixed32(&result, k);
return result;
}
static int DecodeKey(const Slice& k) {
assert(k.size() == 4);
return DecodeFixed32(k.data());
}
static void* EncodeValue(uintptr_t v) { return reinterpret_cast<void*>(v); }
static int DecodeValue(void* v) { return reinterpret_cast<uintptr_t>(v); }
class CacheTest : public testing::Test {
public:
static void Deleter(const Slice& key, void* v) {
current_->deleted_keys_.push_back(DecodeKey(key));
current_->deleted_values_.push_back(DecodeValue(v));
}
static constexpr int kCacheSize = 1000;
std::vector<int> deleted_keys_;
std::vector<int> deleted_values_;
Cache* cache_;
CacheTest() : cache_(NewLRUCache(kCacheSize)) { current_ = this; }
~CacheTest() { delete cache_; }
int Lookup(int key) {
Cache::Handle* handle = cache_->Lookup(EncodeKey(key));
const int r = (handle == nullptr) ? -1 : DecodeValue(cache_->Value(handle));
if (handle != nullptr) {
cache_->Release(handle);
}
return r;
}
void Insert(int key, int value, int charge = 1) {
cache_->Release(cache_->Insert(EncodeKey(key), EncodeValue(value), charge,
&CacheTest::Deleter));
}
Cache::Handle* InsertAndReturnHandle(int key, int value, int charge = 1) {
return cache_->Insert(EncodeKey(key), EncodeValue(value), charge,
&CacheTest::Deleter);
}
void Erase(int key) { cache_->Erase(EncodeKey(key)); }
static CacheTest* current_;
};
CacheTest* CacheTest::current_;
TEST_F(CacheTest, HitAndMiss) {
ASSERT_EQ(-1, Lookup(100));
Insert(100, 101);
ASSERT_EQ(101, Lookup(100));
ASSERT_EQ(-1, Lookup(200));
ASSERT_EQ(-1, Lookup(300));
Insert(200, 201);
ASSERT_EQ(101, Lookup(100));
ASSERT_EQ(201, Lookup(200));
ASSERT_EQ(-1, Lookup(300));
Insert(100, 102);
ASSERT_EQ(102, Lookup(100));
ASSERT_EQ(201, Lookup(200));
ASSERT_EQ(-1, Lookup(300));
ASSERT_EQ(1, deleted_keys_.size());
ASSERT_EQ(100, deleted_keys_[0]);
ASSERT_EQ(101, deleted_values_[0]);
}
TEST_F(CacheTest, Erase) {
Erase(200);
ASSERT_EQ(0, deleted_keys_.size());
Insert(100, 101);
Insert(200, 201);
Erase(100);
ASSERT_EQ(-1, Lookup(100));
ASSERT_EQ(201, Lookup(200));
ASSERT_EQ(1, deleted_keys_.size());
ASSERT_EQ(100, deleted_keys_[0]);
ASSERT_EQ(101, deleted_values_[0]);
Erase(100);
ASSERT_EQ(-1, Lookup(100));
ASSERT_EQ(201, Lookup(200));
ASSERT_EQ(1, deleted_keys_.size());
}
TEST_F(CacheTest, EntriesArePinned) {
Insert(100, 101);
Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
ASSERT_EQ(101, DecodeValue(cache_->Value(h1)));
Insert(100, 102);
Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
ASSERT_EQ(102, DecodeValue(cache_->Value(h2)));
ASSERT_EQ(0, deleted_keys_.size());
cache_->Release(h1);
ASSERT_EQ(1, deleted_keys_.size());
ASSERT_EQ(100, deleted_keys_[0]);
ASSERT_EQ(101, deleted_values_[0]);
Erase(100);
ASSERT_EQ(-1, Lookup(100));
ASSERT_EQ(1, deleted_keys_.size());
cache_->Release(h2);
ASSERT_EQ(2, deleted_keys_.size());
ASSERT_EQ(100, deleted_keys_[1]);
ASSERT_EQ(102, deleted_values_[1]);
}
TEST_F(CacheTest, EvictionPolicy) {
Insert(100, 101);
Insert(200, 201);
Insert(300, 301);
Cache::Handle* h = cache_->Lookup(EncodeKey(300));
for (int i = 0; i < kCacheSize + 100; i++) {
Insert(1000 + i, 2000 + i);
ASSERT_EQ(2000 + i, Lookup(1000 + i));
ASSERT_EQ(101, Lookup(100));
}
ASSERT_EQ(101, Lookup(100));
ASSERT_EQ(-1, Lookup(200));
ASSERT_EQ(301, Lookup(300));
cache_->Release(h);
}
TEST_F(CacheTest, UseExceedsCacheSize) {
std::vector<Cache::Handle*> h;
for (int i = 0; i < kCacheSize + 100; i++) {
h.push_back(InsertAndReturnHandle(1000 + i, 2000 + i));
}
for (int i = 0; i < h.size(); i++) {
ASSERT_EQ(2000 + i, Lookup(1000 + i));
}
for (int i = 0; i < h.size(); i++) {
cache_->Release(h[i]);
}
}
TEST_F(CacheTest, HeavyEntries) {
const int kLight = 1;
const int kHeavy = 10;
int added = 0;
int index = 0;
while (added < 2 * kCacheSize) {
const int weight = (index & 1) ? kLight : kHeavy;
Insert(index, 1000 + index, weight);
added += weight;
index++;
}
int cached_weight = 0;
for (int i = 0; i < index; i++) {
const int weight = (i & 1 ? kLight : kHeavy);
int r = Lookup(i);
if (r >= 0) {
cached_weight += weight;
ASSERT_EQ(1000 + i, r);
}
}
ASSERT_LE(cached_weight, kCacheSize + kCacheSize / 10);
}
TEST_F(CacheTest, NewId) {
uint64_t a = cache_->NewId();
uint64_t b = cache_->NewId();
ASSERT_NE(a, b);
}
TEST_F(CacheTest, Prune) {
Insert(1, 100);
Insert(2, 200);
Cache::Handle* handle = cache_->Lookup(EncodeKey(1));
ASSERT_TRUE(handle);
cache_->Prune();
cache_->Release(handle);
ASSERT_EQ(100, Lookup(1));
ASSERT_EQ(-1, Lookup(2));
}
TEST_F(CacheTest, ZeroSizeCache) {
delete cache_;
cache_ = NewLRUCache(0);
Insert(1, 100);
ASSERT_EQ(-1, Lookup(1));
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/cache.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/cache_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
90432f51-de56-4e88-bba2-65270ed31614 | cpp | google/leveldb | env_windows | util/env_windows.cc | util/env_windows_test.cc | #ifndef NOMINMAX
#define NOMINMAX
#endif
#include <windows.h>
#include <algorithm>
#include <atomic>
#include <chrono>
#include <condition_variable>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <memory>
#include <mutex>
#include <queue>
#include <sstream>
#include <string>
#include <vector>
#include "leveldb/env.h"
#include "leveldb/slice.h"
#include "port/port.h"
#include "port/thread_annotations.h"
#include "util/env_windows_test_helper.h"
#include "util/logging.h"
#include "util/mutexlock.h"
#include "util/windows_logger.h"
namespace leveldb {
namespace {
constexpr const size_t kWritableFileBufferSize = 65536;
constexpr int kDefaultMmapLimit = (sizeof(void*) >= 8) ? 1000 : 0;
int g_mmap_limit = kDefaultMmapLimit;
std::string GetWindowsErrorMessage(DWORD error_code) {
std::string message;
char* error_text = nullptr;
size_t error_text_size = ::FormatMessageA(
FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER |
FORMAT_MESSAGE_IGNORE_INSERTS,
nullptr, error_code, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
reinterpret_cast<char*>(&error_text), 0, nullptr);
if (!error_text) {
return message;
}
message.assign(error_text, error_text_size);
::LocalFree(error_text);
return message;
}
Status WindowsError(const std::string& context, DWORD error_code) {
if (error_code == ERROR_FILE_NOT_FOUND || error_code == ERROR_PATH_NOT_FOUND)
return Status::NotFound(context, GetWindowsErrorMessage(error_code));
return Status::IOError(context, GetWindowsErrorMessage(error_code));
}
class ScopedHandle {
public:
ScopedHandle(HANDLE handle) : handle_(handle) {}
ScopedHandle(const ScopedHandle&) = delete;
ScopedHandle(ScopedHandle&& other) noexcept : handle_(other.Release()) {}
~ScopedHandle() { Close(); }
ScopedHandle& operator=(const ScopedHandle&) = delete;
ScopedHandle& operator=(ScopedHandle&& rhs) noexcept {
if (this != &rhs) handle_ = rhs.Release();
return *this;
}
bool Close() {
if (!is_valid()) {
return true;
}
HANDLE h = handle_;
handle_ = INVALID_HANDLE_VALUE;
return ::CloseHandle(h);
}
bool is_valid() const {
return handle_ != INVALID_HANDLE_VALUE && handle_ != nullptr;
}
HANDLE get() const { return handle_; }
HANDLE Release() {
HANDLE h = handle_;
handle_ = INVALID_HANDLE_VALUE;
return h;
}
private:
HANDLE handle_;
};
class Limiter {
public:
Limiter(int max_acquires)
:
#if !defined(NDEBUG)
max_acquires_(max_acquires),
#endif
acquires_allowed_(max_acquires) {
assert(max_acquires >= 0);
}
Limiter(const Limiter&) = delete;
Limiter operator=(const Limiter&) = delete;
bool Acquire() {
int old_acquires_allowed =
acquires_allowed_.fetch_sub(1, std::memory_order_relaxed);
if (old_acquires_allowed > 0) return true;
acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
return false;
}
void Release() {
int old_acquires_allowed =
acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
(void)old_acquires_allowed;
assert(old_acquires_allowed < max_acquires_);
}
private:
#if !defined(NDEBUG)
const int max_acquires_;
#endif
std::atomic<int> acquires_allowed_;
};
class WindowsSequentialFile : public SequentialFile {
public:
WindowsSequentialFile(std::string filename, ScopedHandle handle)
: handle_(std::move(handle)), filename_(std::move(filename)) {}
~WindowsSequentialFile() override {}
Status Read(size_t n, Slice* result, char* scratch) override {
DWORD bytes_read;
assert(n <= std::numeric_limits<DWORD>::max());
if (!::ReadFile(handle_.get(), scratch, static_cast<DWORD>(n), &bytes_read,
nullptr)) {
return WindowsError(filename_, ::GetLastError());
}
*result = Slice(scratch, bytes_read);
return Status::OK();
}
Status Skip(uint64_t n) override {
LARGE_INTEGER distance;
distance.QuadPart = n;
if (!::SetFilePointerEx(handle_.get(), distance, nullptr, FILE_CURRENT)) {
return WindowsError(filename_, ::GetLastError());
}
return Status::OK();
}
private:
const ScopedHandle handle_;
const std::string filename_;
};
class WindowsRandomAccessFile : public RandomAccessFile {
public:
WindowsRandomAccessFile(std::string filename, ScopedHandle handle)
: handle_(std::move(handle)), filename_(std::move(filename)) {}
~WindowsRandomAccessFile() override = default;
Status Read(uint64_t offset, size_t n, Slice* result,
char* scratch) const override {
DWORD bytes_read = 0;
OVERLAPPED overlapped = {0};
overlapped.OffsetHigh = static_cast<DWORD>(offset >> 32);
overlapped.Offset = static_cast<DWORD>(offset);
if (!::ReadFile(handle_.get(), scratch, static_cast<DWORD>(n), &bytes_read,
&overlapped)) {
DWORD error_code = ::GetLastError();
if (error_code != ERROR_HANDLE_EOF) {
*result = Slice(scratch, 0);
return Status::IOError(filename_, GetWindowsErrorMessage(error_code));
}
}
*result = Slice(scratch, bytes_read);
return Status::OK();
}
private:
const ScopedHandle handle_;
const std::string filename_;
};
class WindowsMmapReadableFile : public RandomAccessFile {
public:
WindowsMmapReadableFile(std::string filename, char* mmap_base, size_t length,
Limiter* mmap_limiter)
: mmap_base_(mmap_base),
length_(length),
mmap_limiter_(mmap_limiter),
filename_(std::move(filename)) {}
~WindowsMmapReadableFile() override {
::UnmapViewOfFile(mmap_base_);
mmap_limiter_->Release();
}
Status Read(uint64_t offset, size_t n, Slice* result,
char* scratch) const override {
if (offset + n > length_) {
*result = Slice();
return WindowsError(filename_, ERROR_INVALID_PARAMETER);
}
*result = Slice(mmap_base_ + offset, n);
return Status::OK();
}
private:
char* const mmap_base_;
const size_t length_;
Limiter* const mmap_limiter_;
const std::string filename_;
};
class WindowsWritableFile : public WritableFile {
public:
WindowsWritableFile(std::string filename, ScopedHandle handle)
: pos_(0), handle_(std::move(handle)), filename_(std::move(filename)) {}
~WindowsWritableFile() override = default;
Status Append(const Slice& data) override {
size_t write_size = data.size();
const char* write_data = data.data();
size_t copy_size = std::min(write_size, kWritableFileBufferSize - pos_);
std::memcpy(buf_ + pos_, write_data, copy_size);
write_data += copy_size;
write_size -= copy_size;
pos_ += copy_size;
if (write_size == 0) {
return Status::OK();
}
Status status = FlushBuffer();
if (!status.ok()) {
return status;
}
if (write_size < kWritableFileBufferSize) {
std::memcpy(buf_, write_data, write_size);
pos_ = write_size;
return Status::OK();
}
return WriteUnbuffered(write_data, write_size);
}
Status Close() override {
Status status = FlushBuffer();
if (!handle_.Close() && status.ok()) {
status = WindowsError(filename_, ::GetLastError());
}
return status;
}
Status Flush() override { return FlushBuffer(); }
Status Sync() override {
Status status = FlushBuffer();
if (!status.ok()) {
return status;
}
if (!::FlushFileBuffers(handle_.get())) {
return Status::IOError(filename_,
GetWindowsErrorMessage(::GetLastError()));
}
return Status::OK();
}
private:
Status FlushBuffer() {
Status status = WriteUnbuffered(buf_, pos_);
pos_ = 0;
return status;
}
Status WriteUnbuffered(const char* data, size_t size) {
DWORD bytes_written;
if (!::WriteFile(handle_.get(), data, static_cast<DWORD>(size),
&bytes_written, nullptr)) {
return Status::IOError(filename_,
GetWindowsErrorMessage(::GetLastError()));
}
return Status::OK();
}
char buf_[kWritableFileBufferSize];
size_t pos_;
ScopedHandle handle_;
const std::string filename_;
};
bool LockOrUnlock(HANDLE handle, bool lock) {
if (lock) {
return ::LockFile(handle,
0, 0,
MAXDWORD,
MAXDWORD);
} else {
return ::UnlockFile(handle,
0, 0,
MAXDWORD,
MAXDWORD);
}
}
class WindowsFileLock : public FileLock {
public:
WindowsFileLock(ScopedHandle handle, std::string filename)
: handle_(std::move(handle)), filename_(std::move(filename)) {}
const ScopedHandle& handle() const { return handle_; }
const std::string& filename() const { return filename_; }
private:
const ScopedHandle handle_;
const std::string filename_;
};
class WindowsEnv : public Env {
public:
WindowsEnv();
~WindowsEnv() override {
static const char msg[] =
"WindowsEnv singleton destroyed. Unsupported behavior!\n";
std::fwrite(msg, 1, sizeof(msg), stderr);
std::abort();
}
Status NewSequentialFile(const std::string& filename,
SequentialFile** result) override {
*result = nullptr;
DWORD desired_access = GENERIC_READ;
DWORD share_mode = FILE_SHARE_READ;
ScopedHandle handle = ::CreateFileA(
filename.c_str(), desired_access, share_mode,
nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL,
nullptr);
if (!handle.is_valid()) {
return WindowsError(filename, ::GetLastError());
}
*result = new WindowsSequentialFile(filename, std::move(handle));
return Status::OK();
}
Status NewRandomAccessFile(const std::string& filename,
RandomAccessFile** result) override {
*result = nullptr;
DWORD desired_access = GENERIC_READ;
DWORD share_mode = FILE_SHARE_READ;
ScopedHandle handle =
::CreateFileA(filename.c_str(), desired_access, share_mode,
nullptr, OPEN_EXISTING,
FILE_ATTRIBUTE_READONLY,
nullptr);
if (!handle.is_valid()) {
return WindowsError(filename, ::GetLastError());
}
if (!mmap_limiter_.Acquire()) {
*result = new WindowsRandomAccessFile(filename, std::move(handle));
return Status::OK();
}
LARGE_INTEGER file_size;
Status status;
if (!::GetFileSizeEx(handle.get(), &file_size)) {
mmap_limiter_.Release();
return WindowsError(filename, ::GetLastError());
}
ScopedHandle mapping =
::CreateFileMappingA(handle.get(),
nullptr, PAGE_READONLY,
0,
0,
nullptr);
if (mapping.is_valid()) {
void* mmap_base = ::MapViewOfFile(mapping.get(), FILE_MAP_READ,
0,
0,
0);
if (mmap_base) {
*result = new WindowsMmapReadableFile(
filename, reinterpret_cast<char*>(mmap_base),
static_cast<size_t>(file_size.QuadPart), &mmap_limiter_);
return Status::OK();
}
}
mmap_limiter_.Release();
return WindowsError(filename, ::GetLastError());
}
Status NewWritableFile(const std::string& filename,
WritableFile** result) override {
DWORD desired_access = GENERIC_WRITE;
DWORD share_mode = 0;
ScopedHandle handle = ::CreateFileA(
filename.c_str(), desired_access, share_mode,
nullptr, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL,
nullptr);
if (!handle.is_valid()) {
*result = nullptr;
return WindowsError(filename, ::GetLastError());
}
*result = new WindowsWritableFile(filename, std::move(handle));
return Status::OK();
}
Status NewAppendableFile(const std::string& filename,
WritableFile** result) override {
DWORD desired_access = FILE_APPEND_DATA;
DWORD share_mode = 0;
ScopedHandle handle = ::CreateFileA(
filename.c_str(), desired_access, share_mode,
nullptr, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL,
nullptr);
if (!handle.is_valid()) {
*result = nullptr;
return WindowsError(filename, ::GetLastError());
}
*result = new WindowsWritableFile(filename, std::move(handle));
return Status::OK();
}
bool FileExists(const std::string& filename) override {
return GetFileAttributesA(filename.c_str()) != INVALID_FILE_ATTRIBUTES;
}
Status GetChildren(const std::string& directory_path,
std::vector<std::string>* result) override {
const std::string find_pattern = directory_path + "\\*";
WIN32_FIND_DATAA find_data;
HANDLE dir_handle = ::FindFirstFileA(find_pattern.c_str(), &find_data);
if (dir_handle == INVALID_HANDLE_VALUE) {
DWORD last_error = ::GetLastError();
if (last_error == ERROR_FILE_NOT_FOUND) {
return Status::OK();
}
return WindowsError(directory_path, last_error);
}
do {
char base_name[_MAX_FNAME];
char ext[_MAX_EXT];
if (!_splitpath_s(find_data.cFileName, nullptr, 0, nullptr, 0, base_name,
ARRAYSIZE(base_name), ext, ARRAYSIZE(ext))) {
result->emplace_back(std::string(base_name) + ext);
}
} while (::FindNextFileA(dir_handle, &find_data));
DWORD last_error = ::GetLastError();
::FindClose(dir_handle);
if (last_error != ERROR_NO_MORE_FILES) {
return WindowsError(directory_path, last_error);
}
return Status::OK();
}
Status RemoveFile(const std::string& filename) override {
if (!::DeleteFileA(filename.c_str())) {
return WindowsError(filename, ::GetLastError());
}
return Status::OK();
}
Status CreateDir(const std::string& dirname) override {
if (!::CreateDirectoryA(dirname.c_str(), nullptr)) {
return WindowsError(dirname, ::GetLastError());
}
return Status::OK();
}
Status RemoveDir(const std::string& dirname) override {
if (!::RemoveDirectoryA(dirname.c_str())) {
return WindowsError(dirname, ::GetLastError());
}
return Status::OK();
}
Status GetFileSize(const std::string& filename, uint64_t* size) override {
WIN32_FILE_ATTRIBUTE_DATA file_attributes;
if (!::GetFileAttributesExA(filename.c_str(), GetFileExInfoStandard,
&file_attributes)) {
return WindowsError(filename, ::GetLastError());
}
ULARGE_INTEGER file_size;
file_size.HighPart = file_attributes.nFileSizeHigh;
file_size.LowPart = file_attributes.nFileSizeLow;
*size = file_size.QuadPart;
return Status::OK();
}
Status RenameFile(const std::string& from, const std::string& to) override {
if (::MoveFileA(from.c_str(), to.c_str())) {
return Status::OK();
}
DWORD move_error = ::GetLastError();
if (::ReplaceFileA(to.c_str(), from.c_str(), nullptr,
REPLACEFILE_IGNORE_MERGE_ERRORS,
nullptr, nullptr)) {
return Status::OK();
}
DWORD replace_error = ::GetLastError();
if (replace_error == ERROR_FILE_NOT_FOUND ||
replace_error == ERROR_PATH_NOT_FOUND) {
return WindowsError(from, move_error);
} else {
return WindowsError(from, replace_error);
}
}
Status LockFile(const std::string& filename, FileLock** lock) override {
*lock = nullptr;
Status result;
ScopedHandle handle = ::CreateFileA(
filename.c_str(), GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ,
nullptr, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL,
nullptr);
if (!handle.is_valid()) {
result = WindowsError(filename, ::GetLastError());
} else if (!LockOrUnlock(handle.get(), true)) {
result = WindowsError("lock " + filename, ::GetLastError());
} else {
*lock = new WindowsFileLock(std::move(handle), filename);
}
return result;
}
Status UnlockFile(FileLock* lock) override {
WindowsFileLock* windows_file_lock =
reinterpret_cast<WindowsFileLock*>(lock);
if (!LockOrUnlock(windows_file_lock->handle().get(), false)) {
return WindowsError("unlock " + windows_file_lock->filename(),
::GetLastError());
}
delete windows_file_lock;
return Status::OK();
}
void Schedule(void (*background_work_function)(void* background_work_arg),
void* background_work_arg) override;
void StartThread(void (*thread_main)(void* thread_main_arg),
void* thread_main_arg) override {
std::thread new_thread(thread_main, thread_main_arg);
new_thread.detach();
}
Status GetTestDirectory(std::string* result) override {
const char* env = getenv("TEST_TMPDIR");
if (env && env[0] != '\0') {
*result = env;
return Status::OK();
}
char tmp_path[MAX_PATH];
if (!GetTempPathA(ARRAYSIZE(tmp_path), tmp_path)) {
return WindowsError("GetTempPath", ::GetLastError());
}
std::stringstream ss;
ss << tmp_path << "leveldbtest-" << std::this_thread::get_id();
*result = ss.str();
CreateDir(*result);
return Status::OK();
}
Status NewLogger(const std::string& filename, Logger** result) override {
std::FILE* fp = std::fopen(filename.c_str(), "wN");
if (fp == nullptr) {
*result = nullptr;
return WindowsError(filename, ::GetLastError());
} else {
*result = new WindowsLogger(fp);
return Status::OK();
}
}
uint64_t NowMicros() override {
FILETIME ft;
::GetSystemTimeAsFileTime(&ft);
uint64_t num_ticks =
(static_cast<uint64_t>(ft.dwHighDateTime) << 32) + ft.dwLowDateTime;
return num_ticks / 10;
}
void SleepForMicroseconds(int micros) override {
std::this_thread::sleep_for(std::chrono::microseconds(micros));
}
private:
void BackgroundThreadMain();
static void BackgroundThreadEntryPoint(WindowsEnv* env) {
env->BackgroundThreadMain();
}
struct BackgroundWorkItem {
explicit BackgroundWorkItem(void (*function)(void* arg), void* arg)
: function(function), arg(arg) {}
void (*const function)(void*);
void* const arg;
};
port::Mutex background_work_mutex_;
port::CondVar background_work_cv_ GUARDED_BY(background_work_mutex_);
bool started_background_thread_ GUARDED_BY(background_work_mutex_);
std::queue<BackgroundWorkItem> background_work_queue_
GUARDED_BY(background_work_mutex_);
Limiter mmap_limiter_;
};
int MaxMmaps() { return g_mmap_limit; }
WindowsEnv::WindowsEnv()
: background_work_cv_(&background_work_mutex_),
started_background_thread_(false),
mmap_limiter_(MaxMmaps()) {}
void WindowsEnv::Schedule(
void (*background_work_function)(void* background_work_arg),
void* background_work_arg) {
background_work_mutex_.Lock();
if (!started_background_thread_) {
started_background_thread_ = true;
std::thread background_thread(WindowsEnv::BackgroundThreadEntryPoint, this);
background_thread.detach();
}
if (background_work_queue_.empty()) {
background_work_cv_.Signal();
}
background_work_queue_.emplace(background_work_function, background_work_arg);
background_work_mutex_.Unlock();
}
void WindowsEnv::BackgroundThreadMain() {
while (true) {
background_work_mutex_.Lock();
while (background_work_queue_.empty()) {
background_work_cv_.Wait();
}
assert(!background_work_queue_.empty());
auto background_work_function = background_work_queue_.front().function;
void* background_work_arg = background_work_queue_.front().arg;
background_work_queue_.pop();
background_work_mutex_.Unlock();
background_work_function(background_work_arg);
}
}
template <typename EnvType>
class SingletonEnv {
public:
SingletonEnv() {
#if !defined(NDEBUG)
env_initialized_.store(true, std::memory_order_relaxed);
#endif
static_assert(sizeof(env_storage_) >= sizeof(EnvType),
"env_storage_ will not fit the Env");
static_assert(alignof(decltype(env_storage_)) >= alignof(EnvType),
"env_storage_ does not meet the Env's alignment needs");
new (&env_storage_) EnvType();
}
~SingletonEnv() = default;
SingletonEnv(const SingletonEnv&) = delete;
SingletonEnv& operator=(const SingletonEnv&) = delete;
Env* env() { return reinterpret_cast<Env*>(&env_storage_); }
static void AssertEnvNotInitialized() {
#if !defined(NDEBUG)
assert(!env_initialized_.load(std::memory_order_relaxed));
#endif
}
private:
typename std::aligned_storage<sizeof(EnvType), alignof(EnvType)>::type
env_storage_;
#if !defined(NDEBUG)
static std::atomic<bool> env_initialized_;
#endif
};
#if !defined(NDEBUG)
template <typename EnvType>
std::atomic<bool> SingletonEnv<EnvType>::env_initialized_;
#endif
using WindowsDefaultEnv = SingletonEnv<WindowsEnv>;
}
void EnvWindowsTestHelper::SetReadOnlyMMapLimit(int limit) {
WindowsDefaultEnv::AssertEnvNotInitialized();
g_mmap_limit = limit;
}
Env* Env::Default() {
static WindowsDefaultEnv env_container;
return env_container.env();
}
} | #include "gtest/gtest.h"
#include "leveldb/env.h"
#include "port/port.h"
#include "util/env_windows_test_helper.h"
#include "util/testutil.h"
namespace leveldb {
static const int kMMapLimit = 4;
class EnvWindowsTest : public testing::Test {
public:
static void SetFileLimits(int mmap_limit) {
EnvWindowsTestHelper::SetReadOnlyMMapLimit(mmap_limit);
}
EnvWindowsTest() : env_(Env::Default()) {}
Env* env_;
};
TEST_F(EnvWindowsTest, TestOpenOnRead) {
std::string test_dir;
ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string test_file = test_dir + "/open_on_read.txt";
FILE* f = std::fopen(test_file.c_str(), "w");
ASSERT_TRUE(f != nullptr);
const char kFileData[] = "abcdefghijklmnopqrstuvwxyz";
fputs(kFileData, f);
std::fclose(f);
const int kNumFiles = kMMapLimit + 5;
leveldb::RandomAccessFile* files[kNumFiles] = {0};
for (int i = 0; i < kNumFiles; i++) {
ASSERT_LEVELDB_OK(env_->NewRandomAccessFile(test_file, &files[i]));
}
char scratch;
Slice read_result;
for (int i = 0; i < kNumFiles; i++) {
ASSERT_LEVELDB_OK(files[i]->Read(i, 1, &read_result, &scratch));
ASSERT_EQ(kFileData[i], read_result[0]);
}
for (int i = 0; i < kNumFiles; i++) {
delete files[i];
}
ASSERT_LEVELDB_OK(env_->RemoveFile(test_file));
}
}
int main(int argc, char** argv) {
leveldb::EnvWindowsTest::SetFileLimits(leveldb::kMMapLimit);
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/env_windows.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/env_windows_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
f53ee2d1-19b2-4c39-b70c-c7eeea5c2694 | cpp | google/leveldb | logging | util/logging.cc | util/logging_test.cc | #include "util/logging.h"
#include <cstdarg>
#include <cstdio>
#include <cstdlib>
#include <limits>
#include "leveldb/env.h"
#include "leveldb/slice.h"
namespace leveldb {
void AppendNumberTo(std::string* str, uint64_t num) {
char buf[30];
std::snprintf(buf, sizeof(buf), "%llu", static_cast<unsigned long long>(num));
str->append(buf);
}
void AppendEscapedStringTo(std::string* str, const Slice& value) {
for (size_t i = 0; i < value.size(); i++) {
char c = value[i];
if (c >= ' ' && c <= '~') {
str->push_back(c);
} else {
char buf[10];
std::snprintf(buf, sizeof(buf), "\\x%02x",
static_cast<unsigned int>(c) & 0xff);
str->append(buf);
}
}
}
std::string NumberToString(uint64_t num) {
std::string r;
AppendNumberTo(&r, num);
return r;
}
std::string EscapeString(const Slice& value) {
std::string r;
AppendEscapedStringTo(&r, value);
return r;
}
bool ConsumeDecimalNumber(Slice* in, uint64_t* val) {
constexpr const uint64_t kMaxUint64 = std::numeric_limits<uint64_t>::max();
constexpr const char kLastDigitOfMaxUint64 =
'0' + static_cast<char>(kMaxUint64 % 10);
uint64_t value = 0;
const uint8_t* start = reinterpret_cast<const uint8_t*>(in->data());
const uint8_t* end = start + in->size();
const uint8_t* current = start;
for (; current != end; ++current) {
const uint8_t ch = *current;
if (ch < '0' || ch > '9') break;
if (value > kMaxUint64 / 10 ||
(value == kMaxUint64 / 10 && ch > kLastDigitOfMaxUint64)) {
return false;
}
value = (value * 10) + (ch - '0');
}
*val = value;
const size_t digits_consumed = current - start;
in->remove_prefix(digits_consumed);
return digits_consumed != 0;
}
} | #include "util/logging.h"
#include <limits>
#include <string>
#include "gtest/gtest.h"
#include "leveldb/slice.h"
namespace leveldb {
TEST(Logging, NumberToString) {
ASSERT_EQ("0", NumberToString(0));
ASSERT_EQ("1", NumberToString(1));
ASSERT_EQ("9", NumberToString(9));
ASSERT_EQ("10", NumberToString(10));
ASSERT_EQ("11", NumberToString(11));
ASSERT_EQ("19", NumberToString(19));
ASSERT_EQ("99", NumberToString(99));
ASSERT_EQ("100", NumberToString(100));
ASSERT_EQ("109", NumberToString(109));
ASSERT_EQ("190", NumberToString(190));
ASSERT_EQ("123", NumberToString(123));
ASSERT_EQ("12345678", NumberToString(12345678));
static_assert(std::numeric_limits<uint64_t>::max() == 18446744073709551615U,
"Test consistency check");
ASSERT_EQ("18446744073709551000", NumberToString(18446744073709551000U));
ASSERT_EQ("18446744073709551600", NumberToString(18446744073709551600U));
ASSERT_EQ("18446744073709551610", NumberToString(18446744073709551610U));
ASSERT_EQ("18446744073709551614", NumberToString(18446744073709551614U));
ASSERT_EQ("18446744073709551615", NumberToString(18446744073709551615U));
}
void ConsumeDecimalNumberRoundtripTest(uint64_t number,
const std::string& padding = "") {
std::string decimal_number = NumberToString(number);
std::string input_string = decimal_number + padding;
Slice input(input_string);
Slice output = input;
uint64_t result;
ASSERT_TRUE(ConsumeDecimalNumber(&output, &result));
ASSERT_EQ(number, result);
ASSERT_EQ(decimal_number.size(), output.data() - input.data());
ASSERT_EQ(padding.size(), output.size());
}
TEST(Logging, ConsumeDecimalNumberRoundtrip) {
ConsumeDecimalNumberRoundtripTest(0);
ConsumeDecimalNumberRoundtripTest(1);
ConsumeDecimalNumberRoundtripTest(9);
ConsumeDecimalNumberRoundtripTest(10);
ConsumeDecimalNumberRoundtripTest(11);
ConsumeDecimalNumberRoundtripTest(19);
ConsumeDecimalNumberRoundtripTest(99);
ConsumeDecimalNumberRoundtripTest(100);
ConsumeDecimalNumberRoundtripTest(109);
ConsumeDecimalNumberRoundtripTest(190);
ConsumeDecimalNumberRoundtripTest(123);
ASSERT_EQ("12345678", NumberToString(12345678));
for (uint64_t i = 0; i < 100; ++i) {
uint64_t large_number = std::numeric_limits<uint64_t>::max() - i;
ConsumeDecimalNumberRoundtripTest(large_number);
}
}
TEST(Logging, ConsumeDecimalNumberRoundtripWithPadding) {
ConsumeDecimalNumberRoundtripTest(0, " ");
ConsumeDecimalNumberRoundtripTest(1, "abc");
ConsumeDecimalNumberRoundtripTest(9, "x");
ConsumeDecimalNumberRoundtripTest(10, "_");
ConsumeDecimalNumberRoundtripTest(11, std::string("\0\0\0", 3));
ConsumeDecimalNumberRoundtripTest(19, "abc");
ConsumeDecimalNumberRoundtripTest(99, "padding");
ConsumeDecimalNumberRoundtripTest(100, " ");
for (uint64_t i = 0; i < 100; ++i) {
uint64_t large_number = std::numeric_limits<uint64_t>::max() - i;
ConsumeDecimalNumberRoundtripTest(large_number, "pad");
}
}
void ConsumeDecimalNumberOverflowTest(const std::string& input_string) {
Slice input(input_string);
Slice output = input;
uint64_t result;
ASSERT_EQ(false, ConsumeDecimalNumber(&output, &result));
}
TEST(Logging, ConsumeDecimalNumberOverflow) {
static_assert(std::numeric_limits<uint64_t>::max() == 18446744073709551615U,
"Test consistency check");
ConsumeDecimalNumberOverflowTest("18446744073709551616");
ConsumeDecimalNumberOverflowTest("18446744073709551617");
ConsumeDecimalNumberOverflowTest("18446744073709551618");
ConsumeDecimalNumberOverflowTest("18446744073709551619");
ConsumeDecimalNumberOverflowTest("18446744073709551620");
ConsumeDecimalNumberOverflowTest("18446744073709551621");
ConsumeDecimalNumberOverflowTest("18446744073709551622");
ConsumeDecimalNumberOverflowTest("18446744073709551623");
ConsumeDecimalNumberOverflowTest("18446744073709551624");
ConsumeDecimalNumberOverflowTest("18446744073709551625");
ConsumeDecimalNumberOverflowTest("18446744073709551626");
ConsumeDecimalNumberOverflowTest("18446744073709551700");
ConsumeDecimalNumberOverflowTest("99999999999999999999");
}
void ConsumeDecimalNumberNoDigitsTest(const std::string& input_string) {
Slice input(input_string);
Slice output = input;
uint64_t result;
ASSERT_EQ(false, ConsumeDecimalNumber(&output, &result));
ASSERT_EQ(input.data(), output.data());
ASSERT_EQ(input.size(), output.size());
}
TEST(Logging, ConsumeDecimalNumberNoDigits) {
ConsumeDecimalNumberNoDigitsTest("");
ConsumeDecimalNumberNoDigitsTest(" ");
ConsumeDecimalNumberNoDigitsTest("a");
ConsumeDecimalNumberNoDigitsTest(" 123");
ConsumeDecimalNumberNoDigitsTest("a123");
ConsumeDecimalNumberNoDigitsTest(std::string("\000123", 4));
ConsumeDecimalNumberNoDigitsTest(std::string("\177123", 4));
ConsumeDecimalNumberNoDigitsTest(std::string("\377123", 4));
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/logging.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/logging_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
4ecfefe6-3b23-43e8-a12b-076143182fdb | cpp | google/leveldb | coding | util/coding.cc | util/coding_test.cc | #include "util/coding.h"
namespace leveldb {
void PutFixed32(std::string* dst, uint32_t value) {
char buf[sizeof(value)];
EncodeFixed32(buf, value);
dst->append(buf, sizeof(buf));
}
void PutFixed64(std::string* dst, uint64_t value) {
char buf[sizeof(value)];
EncodeFixed64(buf, value);
dst->append(buf, sizeof(buf));
}
char* EncodeVarint32(char* dst, uint32_t v) {
uint8_t* ptr = reinterpret_cast<uint8_t*>(dst);
static const int B = 128;
if (v < (1 << 7)) {
*(ptr++) = v;
} else if (v < (1 << 14)) {
*(ptr++) = v | B;
*(ptr++) = v >> 7;
} else if (v < (1 << 21)) {
*(ptr++) = v | B;
*(ptr++) = (v >> 7) | B;
*(ptr++) = v >> 14;
} else if (v < (1 << 28)) {
*(ptr++) = v | B;
*(ptr++) = (v >> 7) | B;
*(ptr++) = (v >> 14) | B;
*(ptr++) = v >> 21;
} else {
*(ptr++) = v | B;
*(ptr++) = (v >> 7) | B;
*(ptr++) = (v >> 14) | B;
*(ptr++) = (v >> 21) | B;
*(ptr++) = v >> 28;
}
return reinterpret_cast<char*>(ptr);
}
void PutVarint32(std::string* dst, uint32_t v) {
char buf[5];
char* ptr = EncodeVarint32(buf, v);
dst->append(buf, ptr - buf);
}
char* EncodeVarint64(char* dst, uint64_t v) {
static const int B = 128;
uint8_t* ptr = reinterpret_cast<uint8_t*>(dst);
while (v >= B) {
*(ptr++) = v | B;
v >>= 7;
}
*(ptr++) = static_cast<uint8_t>(v);
return reinterpret_cast<char*>(ptr);
}
void PutVarint64(std::string* dst, uint64_t v) {
char buf[10];
char* ptr = EncodeVarint64(buf, v);
dst->append(buf, ptr - buf);
}
void PutLengthPrefixedSlice(std::string* dst, const Slice& value) {
PutVarint32(dst, value.size());
dst->append(value.data(), value.size());
}
int VarintLength(uint64_t v) {
int len = 1;
while (v >= 128) {
v >>= 7;
len++;
}
return len;
}
const char* GetVarint32PtrFallback(const char* p, const char* limit,
uint32_t* value) {
uint32_t result = 0;
for (uint32_t shift = 0; shift <= 28 && p < limit; shift += 7) {
uint32_t byte = *(reinterpret_cast<const uint8_t*>(p));
p++;
if (byte & 128) {
result |= ((byte & 127) << shift);
} else {
result |= (byte << shift);
*value = result;
return reinterpret_cast<const char*>(p);
}
}
return nullptr;
}
bool GetVarint32(Slice* input, uint32_t* value) {
const char* p = input->data();
const char* limit = p + input->size();
const char* q = GetVarint32Ptr(p, limit, value);
if (q == nullptr) {
return false;
} else {
*input = Slice(q, limit - q);
return true;
}
}
const char* GetVarint64Ptr(const char* p, const char* limit, uint64_t* value) {
uint64_t result = 0;
for (uint32_t shift = 0; shift <= 63 && p < limit; shift += 7) {
uint64_t byte = *(reinterpret_cast<const uint8_t*>(p));
p++;
if (byte & 128) {
result |= ((byte & 127) << shift);
} else {
result |= (byte << shift);
*value = result;
return reinterpret_cast<const char*>(p);
}
}
return nullptr;
}
bool GetVarint64(Slice* input, uint64_t* value) {
const char* p = input->data();
const char* limit = p + input->size();
const char* q = GetVarint64Ptr(p, limit, value);
if (q == nullptr) {
return false;
} else {
*input = Slice(q, limit - q);
return true;
}
}
bool GetLengthPrefixedSlice(Slice* input, Slice* result) {
uint32_t len;
if (GetVarint32(input, &len) && input->size() >= len) {
*result = Slice(input->data(), len);
input->remove_prefix(len);
return true;
} else {
return false;
}
}
} | #include "util/coding.h"
#include <vector>
#include "gtest/gtest.h"
namespace leveldb {
TEST(Coding, Fixed32) {
std::string s;
for (uint32_t v = 0; v < 100000; v++) {
PutFixed32(&s, v);
}
const char* p = s.data();
for (uint32_t v = 0; v < 100000; v++) {
uint32_t actual = DecodeFixed32(p);
ASSERT_EQ(v, actual);
p += sizeof(uint32_t);
}
}
TEST(Coding, Fixed64) {
std::string s;
for (int power = 0; power <= 63; power++) {
uint64_t v = static_cast<uint64_t>(1) << power;
PutFixed64(&s, v - 1);
PutFixed64(&s, v + 0);
PutFixed64(&s, v + 1);
}
const char* p = s.data();
for (int power = 0; power <= 63; power++) {
uint64_t v = static_cast<uint64_t>(1) << power;
uint64_t actual;
actual = DecodeFixed64(p);
ASSERT_EQ(v - 1, actual);
p += sizeof(uint64_t);
actual = DecodeFixed64(p);
ASSERT_EQ(v + 0, actual);
p += sizeof(uint64_t);
actual = DecodeFixed64(p);
ASSERT_EQ(v + 1, actual);
p += sizeof(uint64_t);
}
}
TEST(Coding, EncodingOutput) {
std::string dst;
PutFixed32(&dst, 0x04030201);
ASSERT_EQ(4, dst.size());
ASSERT_EQ(0x01, static_cast<int>(dst[0]));
ASSERT_EQ(0x02, static_cast<int>(dst[1]));
ASSERT_EQ(0x03, static_cast<int>(dst[2]));
ASSERT_EQ(0x04, static_cast<int>(dst[3]));
dst.clear();
PutFixed64(&dst, 0x0807060504030201ull);
ASSERT_EQ(8, dst.size());
ASSERT_EQ(0x01, static_cast<int>(dst[0]));
ASSERT_EQ(0x02, static_cast<int>(dst[1]));
ASSERT_EQ(0x03, static_cast<int>(dst[2]));
ASSERT_EQ(0x04, static_cast<int>(dst[3]));
ASSERT_EQ(0x05, static_cast<int>(dst[4]));
ASSERT_EQ(0x06, static_cast<int>(dst[5]));
ASSERT_EQ(0x07, static_cast<int>(dst[6]));
ASSERT_EQ(0x08, static_cast<int>(dst[7]));
}
TEST(Coding, Varint32) {
std::string s;
for (uint32_t i = 0; i < (32 * 32); i++) {
uint32_t v = (i / 32) << (i % 32);
PutVarint32(&s, v);
}
const char* p = s.data();
const char* limit = p + s.size();
for (uint32_t i = 0; i < (32 * 32); i++) {
uint32_t expected = (i / 32) << (i % 32);
uint32_t actual;
const char* start = p;
p = GetVarint32Ptr(p, limit, &actual);
ASSERT_TRUE(p != nullptr);
ASSERT_EQ(expected, actual);
ASSERT_EQ(VarintLength(actual), p - start);
}
ASSERT_EQ(p, s.data() + s.size());
}
TEST(Coding, Varint64) {
std::vector<uint64_t> values;
values.push_back(0);
values.push_back(100);
values.push_back(~static_cast<uint64_t>(0));
values.push_back(~static_cast<uint64_t>(0) - 1);
for (uint32_t k = 0; k < 64; k++) {
const uint64_t power = 1ull << k;
values.push_back(power);
values.push_back(power - 1);
values.push_back(power + 1);
}
std::string s;
for (size_t i = 0; i < values.size(); i++) {
PutVarint64(&s, values[i]);
}
const char* p = s.data();
const char* limit = p + s.size();
for (size_t i = 0; i < values.size(); i++) {
ASSERT_TRUE(p < limit);
uint64_t actual;
const char* start = p;
p = GetVarint64Ptr(p, limit, &actual);
ASSERT_TRUE(p != nullptr);
ASSERT_EQ(values[i], actual);
ASSERT_EQ(VarintLength(actual), p - start);
}
ASSERT_EQ(p, limit);
}
TEST(Coding, Varint32Overflow) {
uint32_t result;
std::string input("\x81\x82\x83\x84\x85\x11");
ASSERT_TRUE(GetVarint32Ptr(input.data(), input.data() + input.size(),
&result) == nullptr);
}
TEST(Coding, Varint32Truncation) {
uint32_t large_value = (1u << 31) + 100;
std::string s;
PutVarint32(&s, large_value);
uint32_t result;
for (size_t len = 0; len < s.size() - 1; len++) {
ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == nullptr);
}
ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + s.size(), &result) !=
nullptr);
ASSERT_EQ(large_value, result);
}
TEST(Coding, Varint64Overflow) {
uint64_t result;
std::string input("\x81\x82\x83\x84\x85\x81\x82\x83\x84\x85\x11");
ASSERT_TRUE(GetVarint64Ptr(input.data(), input.data() + input.size(),
&result) == nullptr);
}
TEST(Coding, Varint64Truncation) {
uint64_t large_value = (1ull << 63) + 100ull;
std::string s;
PutVarint64(&s, large_value);
uint64_t result;
for (size_t len = 0; len < s.size() - 1; len++) {
ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == nullptr);
}
ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + s.size(), &result) !=
nullptr);
ASSERT_EQ(large_value, result);
}
TEST(Coding, Strings) {
std::string s;
PutLengthPrefixedSlice(&s, Slice(""));
PutLengthPrefixedSlice(&s, Slice("foo"));
PutLengthPrefixedSlice(&s, Slice("bar"));
PutLengthPrefixedSlice(&s, Slice(std::string(200, 'x')));
Slice input(s);
Slice v;
ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v));
ASSERT_EQ("", v.ToString());
ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v));
ASSERT_EQ("foo", v.ToString());
ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v));
ASSERT_EQ("bar", v.ToString());
ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v));
ASSERT_EQ(std::string(200, 'x'), v.ToString());
ASSERT_EQ("", input.ToString());
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/coding.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/coding_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
df1d4917-d2e6-47fd-912a-265947ba8239 | cpp | google/leveldb | env_posix | util/env_posix.cc | util/env_posix_test.cc | #include <dirent.h>
#include <fcntl.h>
#include <sys/mman.h>
#ifndef __Fuchsia__
#include <sys/resource.h>
#endif
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <unistd.h>
#include <atomic>
#include <cerrno>
#include <cstddef>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <limits>
#include <queue>
#include <set>
#include <string>
#include <thread>
#include <type_traits>
#include <utility>
#include "leveldb/env.h"
#include "leveldb/slice.h"
#include "leveldb/status.h"
#include "port/port.h"
#include "port/thread_annotations.h"
#include "util/env_posix_test_helper.h"
#include "util/posix_logger.h"
namespace leveldb {
namespace {
int g_open_read_only_file_limit = -1;
constexpr const int kDefaultMmapLimit = (sizeof(void*) >= 8) ? 1000 : 0;
int g_mmap_limit = kDefaultMmapLimit;
#if defined(HAVE_O_CLOEXEC)
constexpr const int kOpenBaseFlags = O_CLOEXEC;
#else
constexpr const int kOpenBaseFlags = 0;
#endif
constexpr const size_t kWritableFileBufferSize = 65536;
Status PosixError(const std::string& context, int error_number) {
if (error_number == ENOENT) {
return Status::NotFound(context, std::strerror(error_number));
} else {
return Status::IOError(context, std::strerror(error_number));
}
}
class Limiter {
public:
Limiter(int max_acquires)
:
#if !defined(NDEBUG)
max_acquires_(max_acquires),
#endif
acquires_allowed_(max_acquires) {
assert(max_acquires >= 0);
}
Limiter(const Limiter&) = delete;
Limiter operator=(const Limiter&) = delete;
bool Acquire() {
int old_acquires_allowed =
acquires_allowed_.fetch_sub(1, std::memory_order_relaxed);
if (old_acquires_allowed > 0) return true;
int pre_increment_acquires_allowed =
acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
(void)pre_increment_acquires_allowed;
assert(pre_increment_acquires_allowed < max_acquires_);
return false;
}
void Release() {
int old_acquires_allowed =
acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
(void)old_acquires_allowed;
assert(old_acquires_allowed < max_acquires_);
}
private:
#if !defined(NDEBUG)
const int max_acquires_;
#endif
std::atomic<int> acquires_allowed_;
};
class PosixSequentialFile final : public SequentialFile {
public:
PosixSequentialFile(std::string filename, int fd)
: fd_(fd), filename_(std::move(filename)) {}
~PosixSequentialFile() override { close(fd_); }
Status Read(size_t n, Slice* result, char* scratch) override {
Status status;
while (true) {
::ssize_t read_size = ::read(fd_, scratch, n);
if (read_size < 0) {
if (errno == EINTR) {
continue;
}
status = PosixError(filename_, errno);
break;
}
*result = Slice(scratch, read_size);
break;
}
return status;
}
Status Skip(uint64_t n) override {
if (::lseek(fd_, n, SEEK_CUR) == static_cast<off_t>(-1)) {
return PosixError(filename_, errno);
}
return Status::OK();
}
private:
const int fd_;
const std::string filename_;
};
class PosixRandomAccessFile final : public RandomAccessFile {
public:
PosixRandomAccessFile(std::string filename, int fd, Limiter* fd_limiter)
: has_permanent_fd_(fd_limiter->Acquire()),
fd_(has_permanent_fd_ ? fd : -1),
fd_limiter_(fd_limiter),
filename_(std::move(filename)) {
if (!has_permanent_fd_) {
assert(fd_ == -1);
::close(fd);
}
}
~PosixRandomAccessFile() override {
if (has_permanent_fd_) {
assert(fd_ != -1);
::close(fd_);
fd_limiter_->Release();
}
}
Status Read(uint64_t offset, size_t n, Slice* result,
char* scratch) const override {
int fd = fd_;
if (!has_permanent_fd_) {
fd = ::open(filename_.c_str(), O_RDONLY | kOpenBaseFlags);
if (fd < 0) {
return PosixError(filename_, errno);
}
}
assert(fd != -1);
Status status;
ssize_t read_size = ::pread(fd, scratch, n, static_cast<off_t>(offset));
*result = Slice(scratch, (read_size < 0) ? 0 : read_size);
if (read_size < 0) {
status = PosixError(filename_, errno);
}
if (!has_permanent_fd_) {
assert(fd != fd_);
::close(fd);
}
return status;
}
private:
const bool has_permanent_fd_;
const int fd_;
Limiter* const fd_limiter_;
const std::string filename_;
};
class PosixMmapReadableFile final : public RandomAccessFile {
public:
PosixMmapReadableFile(std::string filename, char* mmap_base, size_t length,
Limiter* mmap_limiter)
: mmap_base_(mmap_base),
length_(length),
mmap_limiter_(mmap_limiter),
filename_(std::move(filename)) {}
~PosixMmapReadableFile() override {
::munmap(static_cast<void*>(mmap_base_), length_);
mmap_limiter_->Release();
}
Status Read(uint64_t offset, size_t n, Slice* result,
char* scratch) const override {
if (offset + n > length_) {
*result = Slice();
return PosixError(filename_, EINVAL);
}
*result = Slice(mmap_base_ + offset, n);
return Status::OK();
}
private:
char* const mmap_base_;
const size_t length_;
Limiter* const mmap_limiter_;
const std::string filename_;
};
class PosixWritableFile final : public WritableFile {
public:
PosixWritableFile(std::string filename, int fd)
: pos_(0),
fd_(fd),
is_manifest_(IsManifest(filename)),
filename_(std::move(filename)),
dirname_(Dirname(filename_)) {}
~PosixWritableFile() override {
if (fd_ >= 0) {
Close();
}
}
Status Append(const Slice& data) override {
size_t write_size = data.size();
const char* write_data = data.data();
size_t copy_size = std::min(write_size, kWritableFileBufferSize - pos_);
std::memcpy(buf_ + pos_, write_data, copy_size);
write_data += copy_size;
write_size -= copy_size;
pos_ += copy_size;
if (write_size == 0) {
return Status::OK();
}
Status status = FlushBuffer();
if (!status.ok()) {
return status;
}
if (write_size < kWritableFileBufferSize) {
std::memcpy(buf_, write_data, write_size);
pos_ = write_size;
return Status::OK();
}
return WriteUnbuffered(write_data, write_size);
}
Status Close() override {
Status status = FlushBuffer();
const int close_result = ::close(fd_);
if (close_result < 0 && status.ok()) {
status = PosixError(filename_, errno);
}
fd_ = -1;
return status;
}
Status Flush() override { return FlushBuffer(); }
Status Sync() override {
Status status = SyncDirIfManifest();
if (!status.ok()) {
return status;
}
status = FlushBuffer();
if (!status.ok()) {
return status;
}
return SyncFd(fd_, filename_);
}
private:
Status FlushBuffer() {
Status status = WriteUnbuffered(buf_, pos_);
pos_ = 0;
return status;
}
Status WriteUnbuffered(const char* data, size_t size) {
while (size > 0) {
ssize_t write_result = ::write(fd_, data, size);
if (write_result < 0) {
if (errno == EINTR) {
continue;
}
return PosixError(filename_, errno);
}
data += write_result;
size -= write_result;
}
return Status::OK();
}
Status SyncDirIfManifest() {
Status status;
if (!is_manifest_) {
return status;
}
int fd = ::open(dirname_.c_str(), O_RDONLY | kOpenBaseFlags);
if (fd < 0) {
status = PosixError(dirname_, errno);
} else {
status = SyncFd(fd, dirname_);
::close(fd);
}
return status;
}
static Status SyncFd(int fd, const std::string& fd_path) {
#if HAVE_FULLFSYNC
if (::fcntl(fd, F_FULLFSYNC) == 0) {
return Status::OK();
}
#endif
#if HAVE_FDATASYNC
bool sync_success = ::fdatasync(fd) == 0;
#else
bool sync_success = ::fsync(fd) == 0;
#endif
if (sync_success) {
return Status::OK();
}
return PosixError(fd_path, errno);
}
static std::string Dirname(const std::string& filename) {
std::string::size_type separator_pos = filename.rfind('/');
if (separator_pos == std::string::npos) {
return std::string(".");
}
assert(filename.find('/', separator_pos + 1) == std::string::npos);
return filename.substr(0, separator_pos);
}
static Slice Basename(const std::string& filename) {
std::string::size_type separator_pos = filename.rfind('/');
if (separator_pos == std::string::npos) {
return Slice(filename);
}
assert(filename.find('/', separator_pos + 1) == std::string::npos);
return Slice(filename.data() + separator_pos + 1,
filename.length() - separator_pos - 1);
}
static bool IsManifest(const std::string& filename) {
return Basename(filename).starts_with("MANIFEST");
}
char buf_[kWritableFileBufferSize];
size_t pos_;
int fd_;
const bool is_manifest_;
const std::string filename_;
const std::string dirname_;
};
int LockOrUnlock(int fd, bool lock) {
errno = 0;
struct ::flock file_lock_info;
std::memset(&file_lock_info, 0, sizeof(file_lock_info));
file_lock_info.l_type = (lock ? F_WRLCK : F_UNLCK);
file_lock_info.l_whence = SEEK_SET;
file_lock_info.l_start = 0;
file_lock_info.l_len = 0;
return ::fcntl(fd, F_SETLK, &file_lock_info);
}
class PosixFileLock : public FileLock {
public:
PosixFileLock(int fd, std::string filename)
: fd_(fd), filename_(std::move(filename)) {}
int fd() const { return fd_; }
const std::string& filename() const { return filename_; }
private:
const int fd_;
const std::string filename_;
};
class PosixLockTable {
public:
bool Insert(const std::string& fname) LOCKS_EXCLUDED(mu_) {
mu_.Lock();
bool succeeded = locked_files_.insert(fname).second;
mu_.Unlock();
return succeeded;
}
void Remove(const std::string& fname) LOCKS_EXCLUDED(mu_) {
mu_.Lock();
locked_files_.erase(fname);
mu_.Unlock();
}
private:
port::Mutex mu_;
std::set<std::string> locked_files_ GUARDED_BY(mu_);
};
class PosixEnv : public Env {
public:
PosixEnv();
~PosixEnv() override {
static const char msg[] =
"PosixEnv singleton destroyed. Unsupported behavior!\n";
std::fwrite(msg, 1, sizeof(msg), stderr);
std::abort();
}
Status NewSequentialFile(const std::string& filename,
SequentialFile** result) override {
int fd = ::open(filename.c_str(), O_RDONLY | kOpenBaseFlags);
if (fd < 0) {
*result = nullptr;
return PosixError(filename, errno);
}
*result = new PosixSequentialFile(filename, fd);
return Status::OK();
}
Status NewRandomAccessFile(const std::string& filename,
RandomAccessFile** result) override {
*result = nullptr;
int fd = ::open(filename.c_str(), O_RDONLY | kOpenBaseFlags);
if (fd < 0) {
return PosixError(filename, errno);
}
if (!mmap_limiter_.Acquire()) {
*result = new PosixRandomAccessFile(filename, fd, &fd_limiter_);
return Status::OK();
}
uint64_t file_size;
Status status = GetFileSize(filename, &file_size);
if (status.ok()) {
void* mmap_base =
::mmap(nullptr, file_size, PROT_READ, MAP_SHARED, fd, 0);
if (mmap_base != MAP_FAILED) {
*result = new PosixMmapReadableFile(filename,
reinterpret_cast<char*>(mmap_base),
file_size, &mmap_limiter_);
} else {
status = PosixError(filename, errno);
}
}
::close(fd);
if (!status.ok()) {
mmap_limiter_.Release();
}
return status;
}
Status NewWritableFile(const std::string& filename,
WritableFile** result) override {
int fd = ::open(filename.c_str(),
O_TRUNC | O_WRONLY | O_CREAT | kOpenBaseFlags, 0644);
if (fd < 0) {
*result = nullptr;
return PosixError(filename, errno);
}
*result = new PosixWritableFile(filename, fd);
return Status::OK();
}
Status NewAppendableFile(const std::string& filename,
WritableFile** result) override {
int fd = ::open(filename.c_str(),
O_APPEND | O_WRONLY | O_CREAT | kOpenBaseFlags, 0644);
if (fd < 0) {
*result = nullptr;
return PosixError(filename, errno);
}
*result = new PosixWritableFile(filename, fd);
return Status::OK();
}
bool FileExists(const std::string& filename) override {
return ::access(filename.c_str(), F_OK) == 0;
}
Status GetChildren(const std::string& directory_path,
std::vector<std::string>* result) override {
result->clear();
::DIR* dir = ::opendir(directory_path.c_str());
if (dir == nullptr) {
return PosixError(directory_path, errno);
}
struct ::dirent* entry;
while ((entry = ::readdir(dir)) != nullptr) {
result->emplace_back(entry->d_name);
}
::closedir(dir);
return Status::OK();
}
Status RemoveFile(const std::string& filename) override {
if (::unlink(filename.c_str()) != 0) {
return PosixError(filename, errno);
}
return Status::OK();
}
Status CreateDir(const std::string& dirname) override {
if (::mkdir(dirname.c_str(), 0755) != 0) {
return PosixError(dirname, errno);
}
return Status::OK();
}
Status RemoveDir(const std::string& dirname) override {
if (::rmdir(dirname.c_str()) != 0) {
return PosixError(dirname, errno);
}
return Status::OK();
}
Status GetFileSize(const std::string& filename, uint64_t* size) override {
struct ::stat file_stat;
if (::stat(filename.c_str(), &file_stat) != 0) {
*size = 0;
return PosixError(filename, errno);
}
*size = file_stat.st_size;
return Status::OK();
}
Status RenameFile(const std::string& from, const std::string& to) override {
if (std::rename(from.c_str(), to.c_str()) != 0) {
return PosixError(from, errno);
}
return Status::OK();
}
Status LockFile(const std::string& filename, FileLock** lock) override {
*lock = nullptr;
int fd = ::open(filename.c_str(), O_RDWR | O_CREAT | kOpenBaseFlags, 0644);
if (fd < 0) {
return PosixError(filename, errno);
}
if (!locks_.Insert(filename)) {
::close(fd);
return Status::IOError("lock " + filename, "already held by process");
}
if (LockOrUnlock(fd, true) == -1) {
int lock_errno = errno;
::close(fd);
locks_.Remove(filename);
return PosixError("lock " + filename, lock_errno);
}
*lock = new PosixFileLock(fd, filename);
return Status::OK();
}
Status UnlockFile(FileLock* lock) override {
PosixFileLock* posix_file_lock = static_cast<PosixFileLock*>(lock);
if (LockOrUnlock(posix_file_lock->fd(), false) == -1) {
return PosixError("unlock " + posix_file_lock->filename(), errno);
}
locks_.Remove(posix_file_lock->filename());
::close(posix_file_lock->fd());
delete posix_file_lock;
return Status::OK();
}
void Schedule(void (*background_work_function)(void* background_work_arg),
void* background_work_arg) override;
void StartThread(void (*thread_main)(void* thread_main_arg),
void* thread_main_arg) override {
std::thread new_thread(thread_main, thread_main_arg);
new_thread.detach();
}
Status GetTestDirectory(std::string* result) override {
const char* env = std::getenv("TEST_TMPDIR");
if (env && env[0] != '\0') {
*result = env;
} else {
char buf[100];
std::snprintf(buf, sizeof(buf), "/tmp/leveldbtest-%d",
static_cast<int>(::geteuid()));
*result = buf;
}
CreateDir(*result);
return Status::OK();
}
Status NewLogger(const std::string& filename, Logger** result) override {
int fd = ::open(filename.c_str(),
O_APPEND | O_WRONLY | O_CREAT | kOpenBaseFlags, 0644);
if (fd < 0) {
*result = nullptr;
return PosixError(filename, errno);
}
std::FILE* fp = ::fdopen(fd, "w");
if (fp == nullptr) {
::close(fd);
*result = nullptr;
return PosixError(filename, errno);
} else {
*result = new PosixLogger(fp);
return Status::OK();
}
}
uint64_t NowMicros() override {
static constexpr uint64_t kUsecondsPerSecond = 1000000;
struct ::timeval tv;
::gettimeofday(&tv, nullptr);
return static_cast<uint64_t>(tv.tv_sec) * kUsecondsPerSecond + tv.tv_usec;
}
void SleepForMicroseconds(int micros) override {
std::this_thread::sleep_for(std::chrono::microseconds(micros));
}
private:
void BackgroundThreadMain();
static void BackgroundThreadEntryPoint(PosixEnv* env) {
env->BackgroundThreadMain();
}
struct BackgroundWorkItem {
explicit BackgroundWorkItem(void (*function)(void* arg), void* arg)
: function(function), arg(arg) {}
void (*const function)(void*);
void* const arg;
};
port::Mutex background_work_mutex_;
port::CondVar background_work_cv_ GUARDED_BY(background_work_mutex_);
bool started_background_thread_ GUARDED_BY(background_work_mutex_);
std::queue<BackgroundWorkItem> background_work_queue_
GUARDED_BY(background_work_mutex_);
PosixLockTable locks_;
Limiter mmap_limiter_;
Limiter fd_limiter_;
};
int MaxMmaps() { return g_mmap_limit; }
int MaxOpenFiles() {
if (g_open_read_only_file_limit >= 0) {
return g_open_read_only_file_limit;
}
#ifdef __Fuchsia__
g_open_read_only_file_limit = 50;
#else
struct ::rlimit rlim;
if (::getrlimit(RLIMIT_NOFILE, &rlim)) {
g_open_read_only_file_limit = 50;
} else if (rlim.rlim_cur == RLIM_INFINITY) {
g_open_read_only_file_limit = std::numeric_limits<int>::max();
} else {
g_open_read_only_file_limit = rlim.rlim_cur / 5;
}
#endif
return g_open_read_only_file_limit;
}
}
PosixEnv::PosixEnv()
: background_work_cv_(&background_work_mutex_),
started_background_thread_(false),
mmap_limiter_(MaxMmaps()),
fd_limiter_(MaxOpenFiles()) {}
void PosixEnv::Schedule(
void (*background_work_function)(void* background_work_arg),
void* background_work_arg) {
background_work_mutex_.Lock();
if (!started_background_thread_) {
started_background_thread_ = true;
std::thread background_thread(PosixEnv::BackgroundThreadEntryPoint, this);
background_thread.detach();
}
if (background_work_queue_.empty()) {
background_work_cv_.Signal();
}
background_work_queue_.emplace(background_work_function, background_work_arg);
background_work_mutex_.Unlock();
}
void PosixEnv::BackgroundThreadMain() {
while (true) {
background_work_mutex_.Lock();
while (background_work_queue_.empty()) {
background_work_cv_.Wait();
}
assert(!background_work_queue_.empty());
auto background_work_function = background_work_queue_.front().function;
void* background_work_arg = background_work_queue_.front().arg;
background_work_queue_.pop();
background_work_mutex_.Unlock();
background_work_function(background_work_arg);
}
}
namespace {
template <typename EnvType>
class SingletonEnv {
public:
SingletonEnv() {
#if !defined(NDEBUG)
env_initialized_.store(true, std::memory_order_relaxed);
#endif
static_assert(sizeof(env_storage_) >= sizeof(EnvType),
"env_storage_ will not fit the Env");
static_assert(alignof(decltype(env_storage_)) >= alignof(EnvType),
"env_storage_ does not meet the Env's alignment needs");
new (&env_storage_) EnvType();
}
~SingletonEnv() = default;
SingletonEnv(const SingletonEnv&) = delete;
SingletonEnv& operator=(const SingletonEnv&) = delete;
Env* env() { return reinterpret_cast<Env*>(&env_storage_); }
static void AssertEnvNotInitialized() {
#if !defined(NDEBUG)
assert(!env_initialized_.load(std::memory_order_relaxed));
#endif
}
private:
typename std::aligned_storage<sizeof(EnvType), alignof(EnvType)>::type
env_storage_;
#if !defined(NDEBUG)
static std::atomic<bool> env_initialized_;
#endif
};
#if !defined(NDEBUG)
template <typename EnvType>
std::atomic<bool> SingletonEnv<EnvType>::env_initialized_;
#endif
using PosixDefaultEnv = SingletonEnv<PosixEnv>;
}
void EnvPosixTestHelper::SetReadOnlyFDLimit(int limit) {
PosixDefaultEnv::AssertEnvNotInitialized();
g_open_read_only_file_limit = limit;
}
void EnvPosixTestHelper::SetReadOnlyMMapLimit(int limit) {
PosixDefaultEnv::AssertEnvNotInitialized();
g_mmap_limit = limit;
}
Env* Env::Default() {
static PosixDefaultEnv env_container;
return env_container.env();
}
} | #include <sys/resource.h>
#include <sys/wait.h>
#include <unistd.h>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <string>
#include <unordered_set>
#include <vector>
#include "gtest/gtest.h"
#include "leveldb/env.h"
#include "port/port.h"
#include "util/env_posix_test_helper.h"
#include "util/testutil.h"
#if HAVE_O_CLOEXEC
namespace {
constexpr int kTextCloseOnExecHelperExecFailedCode = 61;
constexpr int kTextCloseOnExecHelperDup2FailedCode = 62;
constexpr int kTextCloseOnExecHelperFoundOpenFdCode = 63;
std::vector<char>* GetArgvZero() {
static std::vector<char> program_name;
return &program_name;
}
static const char kTestCloseOnExecSwitch[] = "--test-close-on-exec-helper";
int TestCloseOnExecHelperMain(char* pid_arg) {
int fd = std::atoi(pid_arg);
if (::dup2(fd, fd) == fd) {
std::fprintf(stderr, "Unexpected open fd %d\n", fd);
return kTextCloseOnExecHelperFoundOpenFdCode;
}
if (errno != EBADF) {
std::fprintf(stderr, "Unexpected errno after calling dup2 on fd %d: %s\n",
fd, std::strerror(errno));
return kTextCloseOnExecHelperDup2FailedCode;
}
return 0;
}
void GetMaxFileDescriptor(int* result_fd) {
::rlimit fd_rlimit;
ASSERT_EQ(0, ::getrlimit(RLIMIT_NOFILE, &fd_rlimit));
*result_fd = fd_rlimit.rlim_cur;
}
void GetOpenFileDescriptors(std::unordered_set<int>* open_fds) {
int max_fd = 0;
GetMaxFileDescriptor(&max_fd);
for (int fd = 0; fd < max_fd; ++fd) {
if (::dup2(fd, fd) != fd) {
ASSERT_EQ(EBADF, errno)
<< "dup2() should set errno to EBADF on closed file descriptors";
continue;
}
open_fds->insert(fd);
}
}
void GetNewlyOpenedFileDescriptor(
const std::unordered_set<int>& baseline_open_fds, int* result_fd) {
std::unordered_set<int> open_fds;
GetOpenFileDescriptors(&open_fds);
for (int fd : baseline_open_fds) {
ASSERT_EQ(1, open_fds.count(fd))
<< "Previously opened file descriptor was closed during test setup";
open_fds.erase(fd);
}
ASSERT_EQ(1, open_fds.size())
<< "Expected exactly one newly opened file descriptor during test setup";
*result_fd = *open_fds.begin();
}
void CheckCloseOnExecDoesNotLeakFDs(
const std::unordered_set<int>& baseline_open_fds) {
char switch_buffer[sizeof(kTestCloseOnExecSwitch)];
std::memcpy(switch_buffer, kTestCloseOnExecSwitch,
sizeof(kTestCloseOnExecSwitch));
int probed_fd;
GetNewlyOpenedFileDescriptor(baseline_open_fds, &probed_fd);
std::string fd_string = std::to_string(probed_fd);
std::vector<char> fd_buffer(fd_string.begin(), fd_string.end());
fd_buffer.emplace_back('\0');
char* child_argv[] = {GetArgvZero()->data(), switch_buffer, fd_buffer.data(),
nullptr};
constexpr int kForkInChildProcessReturnValue = 0;
int child_pid = fork();
if (child_pid == kForkInChildProcessReturnValue) {
::execv(child_argv[0], child_argv);
std::fprintf(stderr, "Error spawning child process: %s\n", strerror(errno));
std::exit(kTextCloseOnExecHelperExecFailedCode);
}
int child_status = 0;
ASSERT_EQ(child_pid, ::waitpid(child_pid, &child_status, 0));
ASSERT_TRUE(WIFEXITED(child_status))
<< "The helper process did not exit with an exit code";
ASSERT_EQ(0, WEXITSTATUS(child_status))
<< "The helper process encountered an error";
}
}
#endif
namespace leveldb {
static const int kReadOnlyFileLimit = 4;
static const int kMMapLimit = 4;
class EnvPosixTest : public testing::Test {
public:
static void SetFileLimits(int read_only_file_limit, int mmap_limit) {
EnvPosixTestHelper::SetReadOnlyFDLimit(read_only_file_limit);
EnvPosixTestHelper::SetReadOnlyMMapLimit(mmap_limit);
}
EnvPosixTest() : env_(Env::Default()) {}
Env* env_;
};
TEST_F(EnvPosixTest, TestOpenOnRead) {
std::string test_dir;
ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string test_file = test_dir + "/open_on_read.txt";
FILE* f = std::fopen(test_file.c_str(), "we");
ASSERT_TRUE(f != nullptr);
const char kFileData[] = "abcdefghijklmnopqrstuvwxyz";
fputs(kFileData, f);
std::fclose(f);
const int kNumFiles = kReadOnlyFileLimit + kMMapLimit + 5;
leveldb::RandomAccessFile* files[kNumFiles] = {0};
for (int i = 0; i < kNumFiles; i++) {
ASSERT_LEVELDB_OK(env_->NewRandomAccessFile(test_file, &files[i]));
}
char scratch;
Slice read_result;
for (int i = 0; i < kNumFiles; i++) {
ASSERT_LEVELDB_OK(files[i]->Read(i, 1, &read_result, &scratch));
ASSERT_EQ(kFileData[i], read_result[0]);
}
for (int i = 0; i < kNumFiles; i++) {
delete files[i];
}
ASSERT_LEVELDB_OK(env_->RemoveFile(test_file));
}
#if HAVE_O_CLOEXEC
TEST_F(EnvPosixTest, TestCloseOnExecSequentialFile) {
std::unordered_set<int> open_fds;
GetOpenFileDescriptors(&open_fds);
std::string test_dir;
ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string file_path = test_dir + "/close_on_exec_sequential.txt";
ASSERT_LEVELDB_OK(WriteStringToFile(env_, "0123456789", file_path));
leveldb::SequentialFile* file = nullptr;
ASSERT_LEVELDB_OK(env_->NewSequentialFile(file_path, &file));
CheckCloseOnExecDoesNotLeakFDs(open_fds);
delete file;
ASSERT_LEVELDB_OK(env_->RemoveFile(file_path));
}
TEST_F(EnvPosixTest, TestCloseOnExecRandomAccessFile) {
std::unordered_set<int> open_fds;
GetOpenFileDescriptors(&open_fds);
std::string test_dir;
ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string file_path = test_dir + "/close_on_exec_random_access.txt";
ASSERT_LEVELDB_OK(WriteStringToFile(env_, "0123456789", file_path));
leveldb::RandomAccessFile* mmapped_files[kMMapLimit];
for (int i = 0; i < kMMapLimit; i++) {
ASSERT_LEVELDB_OK(env_->NewRandomAccessFile(file_path, &mmapped_files[i]));
}
leveldb::RandomAccessFile* file = nullptr;
ASSERT_LEVELDB_OK(env_->NewRandomAccessFile(file_path, &file));
CheckCloseOnExecDoesNotLeakFDs(open_fds);
delete file;
for (int i = 0; i < kMMapLimit; i++) {
delete mmapped_files[i];
}
ASSERT_LEVELDB_OK(env_->RemoveFile(file_path));
}
TEST_F(EnvPosixTest, TestCloseOnExecWritableFile) {
std::unordered_set<int> open_fds;
GetOpenFileDescriptors(&open_fds);
std::string test_dir;
ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string file_path = test_dir + "/close_on_exec_writable.txt";
ASSERT_LEVELDB_OK(WriteStringToFile(env_, "0123456789", file_path));
leveldb::WritableFile* file = nullptr;
ASSERT_LEVELDB_OK(env_->NewWritableFile(file_path, &file));
CheckCloseOnExecDoesNotLeakFDs(open_fds);
delete file;
ASSERT_LEVELDB_OK(env_->RemoveFile(file_path));
}
TEST_F(EnvPosixTest, TestCloseOnExecAppendableFile) {
std::unordered_set<int> open_fds;
GetOpenFileDescriptors(&open_fds);
std::string test_dir;
ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string file_path = test_dir + "/close_on_exec_appendable.txt";
ASSERT_LEVELDB_OK(WriteStringToFile(env_, "0123456789", file_path));
leveldb::WritableFile* file = nullptr;
ASSERT_LEVELDB_OK(env_->NewAppendableFile(file_path, &file));
CheckCloseOnExecDoesNotLeakFDs(open_fds);
delete file;
ASSERT_LEVELDB_OK(env_->RemoveFile(file_path));
}
TEST_F(EnvPosixTest, TestCloseOnExecLockFile) {
std::unordered_set<int> open_fds;
GetOpenFileDescriptors(&open_fds);
std::string test_dir;
ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string file_path = test_dir + "/close_on_exec_lock.txt";
ASSERT_LEVELDB_OK(WriteStringToFile(env_, "0123456789", file_path));
leveldb::FileLock* lock = nullptr;
ASSERT_LEVELDB_OK(env_->LockFile(file_path, &lock));
CheckCloseOnExecDoesNotLeakFDs(open_fds);
ASSERT_LEVELDB_OK(env_->UnlockFile(lock));
ASSERT_LEVELDB_OK(env_->RemoveFile(file_path));
}
TEST_F(EnvPosixTest, TestCloseOnExecLogger) {
std::unordered_set<int> open_fds;
GetOpenFileDescriptors(&open_fds);
std::string test_dir;
ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string file_path = test_dir + "/close_on_exec_logger.txt";
ASSERT_LEVELDB_OK(WriteStringToFile(env_, "0123456789", file_path));
leveldb::Logger* file = nullptr;
ASSERT_LEVELDB_OK(env_->NewLogger(file_path, &file));
CheckCloseOnExecDoesNotLeakFDs(open_fds);
delete file;
ASSERT_LEVELDB_OK(env_->RemoveFile(file_path));
}
#endif
}
int main(int argc, char** argv) {
#if HAVE_O_CLOEXEC
for (int i = 1; i < argc; ++i) {
if (!std::strcmp(argv[i], kTestCloseOnExecSwitch)) {
return TestCloseOnExecHelperMain(argv[i + 1]);
}
}
GetArgvZero()->assign(argv[0], argv[0] + std::strlen(argv[0]) + 1);
#endif
leveldb::EnvPosixTest::SetFileLimits(leveldb::kReadOnlyFileLimit,
leveldb::kMMapLimit);
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/env_posix.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/env_posix_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
419cad26-d16a-4110-b69d-61fb3bd29932 | cpp | google/leveldb | env | util/env.cc | util/env_test.cc | #include "leveldb/env.h"
#include <cstdarg>
#if defined(_WIN32) && defined(LEVELDB_DELETEFILE_UNDEFINED)
#undef DeleteFile
#endif
namespace leveldb {
Env::Env() = default;
Env::~Env() = default;
Status Env::NewAppendableFile(const std::string& fname, WritableFile** result) {
return Status::NotSupported("NewAppendableFile", fname);
}
Status Env::RemoveDir(const std::string& dirname) { return DeleteDir(dirname); }
Status Env::DeleteDir(const std::string& dirname) { return RemoveDir(dirname); }
Status Env::RemoveFile(const std::string& fname) { return DeleteFile(fname); }
Status Env::DeleteFile(const std::string& fname) { return RemoveFile(fname); }
SequentialFile::~SequentialFile() = default;
RandomAccessFile::~RandomAccessFile() = default;
WritableFile::~WritableFile() = default;
Logger::~Logger() = default;
FileLock::~FileLock() = default;
void Log(Logger* info_log, const char* format, ...) {
if (info_log != nullptr) {
std::va_list ap;
va_start(ap, format);
info_log->Logv(format, ap);
va_end(ap);
}
}
static Status DoWriteStringToFile(Env* env, const Slice& data,
const std::string& fname, bool should_sync) {
WritableFile* file;
Status s = env->NewWritableFile(fname, &file);
if (!s.ok()) {
return s;
}
s = file->Append(data);
if (s.ok() && should_sync) {
s = file->Sync();
}
if (s.ok()) {
s = file->Close();
}
delete file;
if (!s.ok()) {
env->RemoveFile(fname);
}
return s;
}
Status WriteStringToFile(Env* env, const Slice& data,
const std::string& fname) {
return DoWriteStringToFile(env, data, fname, false);
}
Status WriteStringToFileSync(Env* env, const Slice& data,
const std::string& fname) {
return DoWriteStringToFile(env, data, fname, true);
}
Status ReadFileToString(Env* env, const std::string& fname, std::string* data) {
data->clear();
SequentialFile* file;
Status s = env->NewSequentialFile(fname, &file);
if (!s.ok()) {
return s;
}
static const int kBufferSize = 8192;
char* space = new char[kBufferSize];
while (true) {
Slice fragment;
s = file->Read(kBufferSize, &fragment, space);
if (!s.ok()) {
break;
}
data->append(fragment.data(), fragment.size());
if (fragment.empty()) {
break;
}
}
delete[] space;
delete file;
return s;
}
EnvWrapper::~EnvWrapper() {}
} | #include "leveldb/env.h"
#include <algorithm>
#include "gtest/gtest.h"
#include "port/port.h"
#include "port/thread_annotations.h"
#include "util/mutexlock.h"
#include "util/testutil.h"
namespace leveldb {
class EnvTest : public testing::Test {
public:
EnvTest() : env_(Env::Default()) {}
Env* env_;
};
TEST_F(EnvTest, ReadWrite) {
Random rnd(test::RandomSeed());
std::string test_dir;
ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string test_file_name = test_dir + "/open_on_read.txt";
WritableFile* writable_file;
ASSERT_LEVELDB_OK(env_->NewWritableFile(test_file_name, &writable_file));
static const size_t kDataSize = 10 * 1048576;
std::string data;
while (data.size() < kDataSize) {
int len = rnd.Skewed(18);
std::string r;
test::RandomString(&rnd, len, &r);
ASSERT_LEVELDB_OK(writable_file->Append(r));
data += r;
if (rnd.OneIn(10)) {
ASSERT_LEVELDB_OK(writable_file->Flush());
}
}
ASSERT_LEVELDB_OK(writable_file->Sync());
ASSERT_LEVELDB_OK(writable_file->Close());
delete writable_file;
SequentialFile* sequential_file;
ASSERT_LEVELDB_OK(env_->NewSequentialFile(test_file_name, &sequential_file));
std::string read_result;
std::string scratch;
while (read_result.size() < data.size()) {
int len = std::min<int>(rnd.Skewed(18), data.size() - read_result.size());
scratch.resize(std::max(len, 1));
Slice read;
ASSERT_LEVELDB_OK(sequential_file->Read(len, &read, &scratch[0]));
if (len > 0) {
ASSERT_GT(read.size(), 0);
}
ASSERT_LE(read.size(), len);
read_result.append(read.data(), read.size());
}
ASSERT_EQ(read_result, data);
delete sequential_file;
}
TEST_F(EnvTest, RunImmediately) {
struct RunState {
port::Mutex mu;
port::CondVar cvar{&mu};
bool called = false;
static void Run(void* arg) {
RunState* state = reinterpret_cast<RunState*>(arg);
MutexLock l(&state->mu);
ASSERT_EQ(state->called, false);
state->called = true;
state->cvar.Signal();
}
};
RunState state;
env_->Schedule(&RunState::Run, &state);
MutexLock l(&state.mu);
while (!state.called) {
state.cvar.Wait();
}
}
TEST_F(EnvTest, RunMany) {
struct RunState {
port::Mutex mu;
port::CondVar cvar{&mu};
int run_count = 0;
};
struct Callback {
RunState* const state_;
bool run = false;
Callback(RunState* s) : state_(s) {}
static void Run(void* arg) {
Callback* callback = reinterpret_cast<Callback*>(arg);
RunState* state = callback->state_;
MutexLock l(&state->mu);
state->run_count++;
callback->run = true;
state->cvar.Signal();
}
};
RunState state;
Callback callback1(&state);
Callback callback2(&state);
Callback callback3(&state);
Callback callback4(&state);
env_->Schedule(&Callback::Run, &callback1);
env_->Schedule(&Callback::Run, &callback2);
env_->Schedule(&Callback::Run, &callback3);
env_->Schedule(&Callback::Run, &callback4);
MutexLock l(&state.mu);
while (state.run_count != 4) {
state.cvar.Wait();
}
ASSERT_TRUE(callback1.run);
ASSERT_TRUE(callback2.run);
ASSERT_TRUE(callback3.run);
ASSERT_TRUE(callback4.run);
}
struct State {
port::Mutex mu;
port::CondVar cvar{&mu};
int val GUARDED_BY(mu);
int num_running GUARDED_BY(mu);
State(int val, int num_running) : val(val), num_running(num_running) {}
};
static void ThreadBody(void* arg) {
State* s = reinterpret_cast<State*>(arg);
s->mu.Lock();
s->val += 1;
s->num_running -= 1;
s->cvar.Signal();
s->mu.Unlock();
}
TEST_F(EnvTest, StartThread) {
State state(0, 3);
for (int i = 0; i < 3; i++) {
env_->StartThread(&ThreadBody, &state);
}
MutexLock l(&state.mu);
while (state.num_running != 0) {
state.cvar.Wait();
}
ASSERT_EQ(state.val, 3);
}
TEST_F(EnvTest, TestOpenNonExistentFile) {
std::string test_dir;
ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string non_existent_file = test_dir + "/non_existent_file";
ASSERT_TRUE(!env_->FileExists(non_existent_file));
RandomAccessFile* random_access_file;
Status status =
env_->NewRandomAccessFile(non_existent_file, &random_access_file);
#if defined(LEVELDB_PLATFORM_CHROMIUM)
ASSERT_TRUE(status.IsIOError());
#else
ASSERT_TRUE(status.IsNotFound());
#endif
SequentialFile* sequential_file;
status = env_->NewSequentialFile(non_existent_file, &sequential_file);
#if defined(LEVELDB_PLATFORM_CHROMIUM)
ASSERT_TRUE(status.IsIOError());
#else
ASSERT_TRUE(status.IsNotFound());
#endif
}
TEST_F(EnvTest, ReopenWritableFile) {
std::string test_dir;
ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string test_file_name = test_dir + "/reopen_writable_file.txt";
env_->RemoveFile(test_file_name);
WritableFile* writable_file;
ASSERT_LEVELDB_OK(env_->NewWritableFile(test_file_name, &writable_file));
std::string data("hello world!");
ASSERT_LEVELDB_OK(writable_file->Append(data));
ASSERT_LEVELDB_OK(writable_file->Close());
delete writable_file;
ASSERT_LEVELDB_OK(env_->NewWritableFile(test_file_name, &writable_file));
data = "42";
ASSERT_LEVELDB_OK(writable_file->Append(data));
ASSERT_LEVELDB_OK(writable_file->Close());
delete writable_file;
ASSERT_LEVELDB_OK(ReadFileToString(env_, test_file_name, &data));
ASSERT_EQ(std::string("42"), data);
env_->RemoveFile(test_file_name);
}
TEST_F(EnvTest, ReopenAppendableFile) {
std::string test_dir;
ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string test_file_name = test_dir + "/reopen_appendable_file.txt";
env_->RemoveFile(test_file_name);
WritableFile* appendable_file;
ASSERT_LEVELDB_OK(env_->NewAppendableFile(test_file_name, &appendable_file));
std::string data("hello world!");
ASSERT_LEVELDB_OK(appendable_file->Append(data));
ASSERT_LEVELDB_OK(appendable_file->Close());
delete appendable_file;
ASSERT_LEVELDB_OK(env_->NewAppendableFile(test_file_name, &appendable_file));
data = "42";
ASSERT_LEVELDB_OK(appendable_file->Append(data));
ASSERT_LEVELDB_OK(appendable_file->Close());
delete appendable_file;
ASSERT_LEVELDB_OK(ReadFileToString(env_, test_file_name, &data));
ASSERT_EQ(std::string("hello world!42"), data);
env_->RemoveFile(test_file_name);
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/env.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/env_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
098ee8f8-91fa-497d-8970-0aa185f2ecb9 | cpp | google/leveldb | table | table/table.cc | table/table_test.cc | #include "leveldb/table.h"
#include "leveldb/cache.h"
#include "leveldb/comparator.h"
#include "leveldb/env.h"
#include "leveldb/filter_policy.h"
#include "leveldb/options.h"
#include "table/block.h"
#include "table/filter_block.h"
#include "table/format.h"
#include "table/two_level_iterator.h"
#include "util/coding.h"
namespace leveldb {
struct Table::Rep {
~Rep() {
delete filter;
delete[] filter_data;
delete index_block;
}
Options options;
Status status;
RandomAccessFile* file;
uint64_t cache_id;
FilterBlockReader* filter;
const char* filter_data;
BlockHandle metaindex_handle;
Block* index_block;
};
Status Table::Open(const Options& options, RandomAccessFile* file,
uint64_t size, Table** table) {
*table = nullptr;
if (size < Footer::kEncodedLength) {
return Status::Corruption("file is too short to be an sstable");
}
char footer_space[Footer::kEncodedLength];
Slice footer_input;
Status s = file->Read(size - Footer::kEncodedLength, Footer::kEncodedLength,
&footer_input, footer_space);
if (!s.ok()) return s;
Footer footer;
s = footer.DecodeFrom(&footer_input);
if (!s.ok()) return s;
BlockContents index_block_contents;
ReadOptions opt;
if (options.paranoid_checks) {
opt.verify_checksums = true;
}
s = ReadBlock(file, opt, footer.index_handle(), &index_block_contents);
if (s.ok()) {
Block* index_block = new Block(index_block_contents);
Rep* rep = new Table::Rep;
rep->options = options;
rep->file = file;
rep->metaindex_handle = footer.metaindex_handle();
rep->index_block = index_block;
rep->cache_id = (options.block_cache ? options.block_cache->NewId() : 0);
rep->filter_data = nullptr;
rep->filter = nullptr;
*table = new Table(rep);
(*table)->ReadMeta(footer);
}
return s;
}
void Table::ReadMeta(const Footer& footer) {
if (rep_->options.filter_policy == nullptr) {
return;
}
ReadOptions opt;
if (rep_->options.paranoid_checks) {
opt.verify_checksums = true;
}
BlockContents contents;
if (!ReadBlock(rep_->file, opt, footer.metaindex_handle(), &contents).ok()) {
return;
}
Block* meta = new Block(contents);
Iterator* iter = meta->NewIterator(BytewiseComparator());
std::string key = "filter.";
key.append(rep_->options.filter_policy->Name());
iter->Seek(key);
if (iter->Valid() && iter->key() == Slice(key)) {
ReadFilter(iter->value());
}
delete iter;
delete meta;
}
void Table::ReadFilter(const Slice& filter_handle_value) {
Slice v = filter_handle_value;
BlockHandle filter_handle;
if (!filter_handle.DecodeFrom(&v).ok()) {
return;
}
ReadOptions opt;
if (rep_->options.paranoid_checks) {
opt.verify_checksums = true;
}
BlockContents block;
if (!ReadBlock(rep_->file, opt, filter_handle, &block).ok()) {
return;
}
if (block.heap_allocated) {
rep_->filter_data = block.data.data();
}
rep_->filter = new FilterBlockReader(rep_->options.filter_policy, block.data);
}
Table::~Table() { delete rep_; }
static void DeleteBlock(void* arg, void* ignored) {
delete reinterpret_cast<Block*>(arg);
}
static void DeleteCachedBlock(const Slice& key, void* value) {
Block* block = reinterpret_cast<Block*>(value);
delete block;
}
static void ReleaseBlock(void* arg, void* h) {
Cache* cache = reinterpret_cast<Cache*>(arg);
Cache::Handle* handle = reinterpret_cast<Cache::Handle*>(h);
cache->Release(handle);
}
Iterator* Table::BlockReader(void* arg, const ReadOptions& options,
const Slice& index_value) {
Table* table = reinterpret_cast<Table*>(arg);
Cache* block_cache = table->rep_->options.block_cache;
Block* block = nullptr;
Cache::Handle* cache_handle = nullptr;
BlockHandle handle;
Slice input = index_value;
Status s = handle.DecodeFrom(&input);
if (s.ok()) {
BlockContents contents;
if (block_cache != nullptr) {
char cache_key_buffer[16];
EncodeFixed64(cache_key_buffer, table->rep_->cache_id);
EncodeFixed64(cache_key_buffer + 8, handle.offset());
Slice key(cache_key_buffer, sizeof(cache_key_buffer));
cache_handle = block_cache->Lookup(key);
if (cache_handle != nullptr) {
block = reinterpret_cast<Block*>(block_cache->Value(cache_handle));
} else {
s = ReadBlock(table->rep_->file, options, handle, &contents);
if (s.ok()) {
block = new Block(contents);
if (contents.cachable && options.fill_cache) {
cache_handle = block_cache->Insert(key, block, block->size(),
&DeleteCachedBlock);
}
}
}
} else {
s = ReadBlock(table->rep_->file, options, handle, &contents);
if (s.ok()) {
block = new Block(contents);
}
}
}
Iterator* iter;
if (block != nullptr) {
iter = block->NewIterator(table->rep_->options.comparator);
if (cache_handle == nullptr) {
iter->RegisterCleanup(&DeleteBlock, block, nullptr);
} else {
iter->RegisterCleanup(&ReleaseBlock, block_cache, cache_handle);
}
} else {
iter = NewErrorIterator(s);
}
return iter;
}
Iterator* Table::NewIterator(const ReadOptions& options) const {
return NewTwoLevelIterator(
rep_->index_block->NewIterator(rep_->options.comparator),
&Table::BlockReader, const_cast<Table*>(this), options);
}
Status Table::InternalGet(const ReadOptions& options, const Slice& k, void* arg,
void (*handle_result)(void*, const Slice&,
const Slice&)) {
Status s;
Iterator* iiter = rep_->index_block->NewIterator(rep_->options.comparator);
iiter->Seek(k);
if (iiter->Valid()) {
Slice handle_value = iiter->value();
FilterBlockReader* filter = rep_->filter;
BlockHandle handle;
if (filter != nullptr && handle.DecodeFrom(&handle_value).ok() &&
!filter->KeyMayMatch(handle.offset(), k)) {
} else {
Iterator* block_iter = BlockReader(this, options, iiter->value());
block_iter->Seek(k);
if (block_iter->Valid()) {
(*handle_result)(arg, block_iter->key(), block_iter->value());
}
s = block_iter->status();
delete block_iter;
}
}
if (s.ok()) {
s = iiter->status();
}
delete iiter;
return s;
}
uint64_t Table::ApproximateOffsetOf(const Slice& key) const {
Iterator* index_iter =
rep_->index_block->NewIterator(rep_->options.comparator);
index_iter->Seek(key);
uint64_t result;
if (index_iter->Valid()) {
BlockHandle handle;
Slice input = index_iter->value();
Status s = handle.DecodeFrom(&input);
if (s.ok()) {
result = handle.offset();
} else {
result = rep_->metaindex_handle.offset();
}
} else {
result = rep_->metaindex_handle.offset();
}
delete index_iter;
return result;
}
} | #include "leveldb/table.h"
#include <map>
#include <string>
#include "gtest/gtest.h"
#include "db/dbformat.h"
#include "db/memtable.h"
#include "db/write_batch_internal.h"
#include "leveldb/db.h"
#include "leveldb/env.h"
#include "leveldb/iterator.h"
#include "leveldb/options.h"
#include "leveldb/table_builder.h"
#include "table/block.h"
#include "table/block_builder.h"
#include "table/format.h"
#include "util/random.h"
#include "util/testutil.h"
namespace leveldb {
static std::string Reverse(const Slice& key) {
std::string str(key.ToString());
std::string rev("");
for (std::string::reverse_iterator rit = str.rbegin(); rit != str.rend();
++rit) {
rev.push_back(*rit);
}
return rev;
}
namespace {
class ReverseKeyComparator : public Comparator {
public:
const char* Name() const override {
return "leveldb.ReverseBytewiseComparator";
}
int Compare(const Slice& a, const Slice& b) const override {
return BytewiseComparator()->Compare(Reverse(a), Reverse(b));
}
void FindShortestSeparator(std::string* start,
const Slice& limit) const override {
std::string s = Reverse(*start);
std::string l = Reverse(limit);
BytewiseComparator()->FindShortestSeparator(&s, l);
*start = Reverse(s);
}
void FindShortSuccessor(std::string* key) const override {
std::string s = Reverse(*key);
BytewiseComparator()->FindShortSuccessor(&s);
*key = Reverse(s);
}
};
}
static ReverseKeyComparator reverse_key_comparator;
static void Increment(const Comparator* cmp, std::string* key) {
if (cmp == BytewiseComparator()) {
key->push_back('\0');
} else {
assert(cmp == &reverse_key_comparator);
std::string rev = Reverse(*key);
rev.push_back('\0');
*key = Reverse(rev);
}
}
namespace {
struct STLLessThan {
const Comparator* cmp;
STLLessThan() : cmp(BytewiseComparator()) {}
STLLessThan(const Comparator* c) : cmp(c) {}
bool operator()(const std::string& a, const std::string& b) const {
return cmp->Compare(Slice(a), Slice(b)) < 0;
}
};
}
class StringSink : public WritableFile {
public:
~StringSink() override = default;
const std::string& contents() const { return contents_; }
Status Close() override { return Status::OK(); }
Status Flush() override { return Status::OK(); }
Status Sync() override { return Status::OK(); }
Status Append(const Slice& data) override {
contents_.append(data.data(), data.size());
return Status::OK();
}
private:
std::string contents_;
};
class StringSource : public RandomAccessFile {
public:
StringSource(const Slice& contents)
: contents_(contents.data(), contents.size()) {}
~StringSource() override = default;
uint64_t Size() const { return contents_.size(); }
Status Read(uint64_t offset, size_t n, Slice* result,
char* scratch) const override {
if (offset >= contents_.size()) {
return Status::InvalidArgument("invalid Read offset");
}
if (offset + n > contents_.size()) {
n = contents_.size() - offset;
}
std::memcpy(scratch, &contents_[offset], n);
*result = Slice(scratch, n);
return Status::OK();
}
private:
std::string contents_;
};
typedef std::map<std::string, std::string, STLLessThan> KVMap;
class Constructor {
public:
explicit Constructor(const Comparator* cmp) : data_(STLLessThan(cmp)) {}
virtual ~Constructor() = default;
void Add(const std::string& key, const Slice& value) {
data_[key] = value.ToString();
}
void Finish(const Options& options, std::vector<std::string>* keys,
KVMap* kvmap) {
*kvmap = data_;
keys->clear();
for (const auto& kvp : data_) {
keys->push_back(kvp.first);
}
data_.clear();
Status s = FinishImpl(options, *kvmap);
ASSERT_TRUE(s.ok()) << s.ToString();
}
virtual Status FinishImpl(const Options& options, const KVMap& data) = 0;
virtual Iterator* NewIterator() const = 0;
const KVMap& data() const { return data_; }
virtual DB* db() const { return nullptr; }
private:
KVMap data_;
};
class BlockConstructor : public Constructor {
public:
explicit BlockConstructor(const Comparator* cmp)
: Constructor(cmp), comparator_(cmp), block_(nullptr) {}
~BlockConstructor() override { delete block_; }
Status FinishImpl(const Options& options, const KVMap& data) override {
delete block_;
block_ = nullptr;
BlockBuilder builder(&options);
for (const auto& kvp : data) {
builder.Add(kvp.first, kvp.second);
}
data_ = builder.Finish().ToString();
BlockContents contents;
contents.data = data_;
contents.cachable = false;
contents.heap_allocated = false;
block_ = new Block(contents);
return Status::OK();
}
Iterator* NewIterator() const override {
return block_->NewIterator(comparator_);
}
private:
const Comparator* const comparator_;
std::string data_;
Block* block_;
BlockConstructor();
};
class TableConstructor : public Constructor {
public:
TableConstructor(const Comparator* cmp)
: Constructor(cmp), source_(nullptr), table_(nullptr) {}
~TableConstructor() override { Reset(); }
Status FinishImpl(const Options& options, const KVMap& data) override {
Reset();
StringSink sink;
TableBuilder builder(options, &sink);
for (const auto& kvp : data) {
builder.Add(kvp.first, kvp.second);
EXPECT_LEVELDB_OK(builder.status());
}
Status s = builder.Finish();
EXPECT_LEVELDB_OK(s);
EXPECT_EQ(sink.contents().size(), builder.FileSize());
source_ = new StringSource(sink.contents());
Options table_options;
table_options.comparator = options.comparator;
return Table::Open(table_options, source_, sink.contents().size(), &table_);
}
Iterator* NewIterator() const override {
return table_->NewIterator(ReadOptions());
}
uint64_t ApproximateOffsetOf(const Slice& key) const {
return table_->ApproximateOffsetOf(key);
}
private:
void Reset() {
delete table_;
delete source_;
table_ = nullptr;
source_ = nullptr;
}
StringSource* source_;
Table* table_;
TableConstructor();
};
class KeyConvertingIterator : public Iterator {
public:
explicit KeyConvertingIterator(Iterator* iter) : iter_(iter) {}
KeyConvertingIterator(const KeyConvertingIterator&) = delete;
KeyConvertingIterator& operator=(const KeyConvertingIterator&) = delete;
~KeyConvertingIterator() override { delete iter_; }
bool Valid() const override { return iter_->Valid(); }
void Seek(const Slice& target) override {
ParsedInternalKey ikey(target, kMaxSequenceNumber, kTypeValue);
std::string encoded;
AppendInternalKey(&encoded, ikey);
iter_->Seek(encoded);
}
void SeekToFirst() override { iter_->SeekToFirst(); }
void SeekToLast() override { iter_->SeekToLast(); }
void Next() override { iter_->Next(); }
void Prev() override { iter_->Prev(); }
Slice key() const override {
assert(Valid());
ParsedInternalKey key;
if (!ParseInternalKey(iter_->key(), &key)) {
status_ = Status::Corruption("malformed internal key");
return Slice("corrupted key");
}
return key.user_key;
}
Slice value() const override { return iter_->value(); }
Status status() const override {
return status_.ok() ? iter_->status() : status_;
}
private:
mutable Status status_;
Iterator* iter_;
};
class MemTableConstructor : public Constructor {
public:
explicit MemTableConstructor(const Comparator* cmp)
: Constructor(cmp), internal_comparator_(cmp) {
memtable_ = new MemTable(internal_comparator_);
memtable_->Ref();
}
~MemTableConstructor() override { memtable_->Unref(); }
Status FinishImpl(const Options& options, const KVMap& data) override {
memtable_->Unref();
memtable_ = new MemTable(internal_comparator_);
memtable_->Ref();
int seq = 1;
for (const auto& kvp : data) {
memtable_->Add(seq, kTypeValue, kvp.first, kvp.second);
seq++;
}
return Status::OK();
}
Iterator* NewIterator() const override {
return new KeyConvertingIterator(memtable_->NewIterator());
}
private:
const InternalKeyComparator internal_comparator_;
MemTable* memtable_;
};
class DBConstructor : public Constructor {
public:
explicit DBConstructor(const Comparator* cmp)
: Constructor(cmp), comparator_(cmp) {
db_ = nullptr;
NewDB();
}
~DBConstructor() override { delete db_; }
Status FinishImpl(const Options& options, const KVMap& data) override {
delete db_;
db_ = nullptr;
NewDB();
for (const auto& kvp : data) {
WriteBatch batch;
batch.Put(kvp.first, kvp.second);
EXPECT_TRUE(db_->Write(WriteOptions(), &batch).ok());
}
return Status::OK();
}
Iterator* NewIterator() const override {
return db_->NewIterator(ReadOptions());
}
DB* db() const override { return db_; }
private:
void NewDB() {
std::string name = testing::TempDir() + "table_testdb";
Options options;
options.comparator = comparator_;
Status status = DestroyDB(name, options);
ASSERT_TRUE(status.ok()) << status.ToString();
options.create_if_missing = true;
options.error_if_exists = true;
options.write_buffer_size = 10000;
status = DB::Open(options, name, &db_);
ASSERT_TRUE(status.ok()) << status.ToString();
}
const Comparator* const comparator_;
DB* db_;
};
enum TestType { TABLE_TEST, BLOCK_TEST, MEMTABLE_TEST, DB_TEST };
struct TestArgs {
TestType type;
bool reverse_compare;
int restart_interval;
};
static const TestArgs kTestArgList[] = {
{TABLE_TEST, false, 16},
{TABLE_TEST, false, 1},
{TABLE_TEST, false, 1024},
{TABLE_TEST, true, 16},
{TABLE_TEST, true, 1},
{TABLE_TEST, true, 1024},
{BLOCK_TEST, false, 16},
{BLOCK_TEST, false, 1},
{BLOCK_TEST, false, 1024},
{BLOCK_TEST, true, 16},
{BLOCK_TEST, true, 1},
{BLOCK_TEST, true, 1024},
{MEMTABLE_TEST, false, 16},
{MEMTABLE_TEST, true, 16},
{DB_TEST, false, 16},
{DB_TEST, true, 16},
};
static const int kNumTestArgs = sizeof(kTestArgList) / sizeof(kTestArgList[0]);
class Harness : public testing::Test {
public:
Harness() : constructor_(nullptr) {}
void Init(const TestArgs& args) {
delete constructor_;
constructor_ = nullptr;
options_ = Options();
options_.block_restart_interval = args.restart_interval;
options_.block_size = 256;
if (args.reverse_compare) {
options_.comparator = &reverse_key_comparator;
}
switch (args.type) {
case TABLE_TEST:
constructor_ = new TableConstructor(options_.comparator);
break;
case BLOCK_TEST:
constructor_ = new BlockConstructor(options_.comparator);
break;
case MEMTABLE_TEST:
constructor_ = new MemTableConstructor(options_.comparator);
break;
case DB_TEST:
constructor_ = new DBConstructor(options_.comparator);
break;
}
}
~Harness() { delete constructor_; }
void Add(const std::string& key, const std::string& value) {
constructor_->Add(key, value);
}
void Test(Random* rnd) {
std::vector<std::string> keys;
KVMap data;
constructor_->Finish(options_, &keys, &data);
TestForwardScan(keys, data);
TestBackwardScan(keys, data);
TestRandomAccess(rnd, keys, data);
}
void TestForwardScan(const std::vector<std::string>& keys,
const KVMap& data) {
Iterator* iter = constructor_->NewIterator();
ASSERT_TRUE(!iter->Valid());
iter->SeekToFirst();
for (KVMap::const_iterator model_iter = data.begin();
model_iter != data.end(); ++model_iter) {
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
iter->Next();
}
ASSERT_TRUE(!iter->Valid());
delete iter;
}
void TestBackwardScan(const std::vector<std::string>& keys,
const KVMap& data) {
Iterator* iter = constructor_->NewIterator();
ASSERT_TRUE(!iter->Valid());
iter->SeekToLast();
for (KVMap::const_reverse_iterator model_iter = data.rbegin();
model_iter != data.rend(); ++model_iter) {
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
iter->Prev();
}
ASSERT_TRUE(!iter->Valid());
delete iter;
}
void TestRandomAccess(Random* rnd, const std::vector<std::string>& keys,
const KVMap& data) {
static const bool kVerbose = false;
Iterator* iter = constructor_->NewIterator();
ASSERT_TRUE(!iter->Valid());
KVMap::const_iterator model_iter = data.begin();
if (kVerbose) std::fprintf(stderr, "---\n");
for (int i = 0; i < 200; i++) {
const int toss = rnd->Uniform(5);
switch (toss) {
case 0: {
if (iter->Valid()) {
if (kVerbose) std::fprintf(stderr, "Next\n");
iter->Next();
++model_iter;
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
}
break;
}
case 1: {
if (kVerbose) std::fprintf(stderr, "SeekToFirst\n");
iter->SeekToFirst();
model_iter = data.begin();
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
break;
}
case 2: {
std::string key = PickRandomKey(rnd, keys);
model_iter = data.lower_bound(key);
if (kVerbose)
std::fprintf(stderr, "Seek '%s'\n", EscapeString(key).c_str());
iter->Seek(Slice(key));
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
break;
}
case 3: {
if (iter->Valid()) {
if (kVerbose) std::fprintf(stderr, "Prev\n");
iter->Prev();
if (model_iter == data.begin()) {
model_iter = data.end();
} else {
--model_iter;
}
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
}
break;
}
case 4: {
if (kVerbose) std::fprintf(stderr, "SeekToLast\n");
iter->SeekToLast();
if (keys.empty()) {
model_iter = data.end();
} else {
std::string last = data.rbegin()->first;
model_iter = data.lower_bound(last);
}
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
break;
}
}
}
delete iter;
}
std::string ToString(const KVMap& data, const KVMap::const_iterator& it) {
if (it == data.end()) {
return "END";
} else {
return "'" + it->first + "->" + it->second + "'";
}
}
std::string ToString(const KVMap& data,
const KVMap::const_reverse_iterator& it) {
if (it == data.rend()) {
return "END";
} else {
return "'" + it->first + "->" + it->second + "'";
}
}
std::string ToString(const Iterator* it) {
if (!it->Valid()) {
return "END";
} else {
return "'" + it->key().ToString() + "->" + it->value().ToString() + "'";
}
}
std::string PickRandomKey(Random* rnd, const std::vector<std::string>& keys) {
if (keys.empty()) {
return "foo";
} else {
const int index = rnd->Uniform(keys.size());
std::string result = keys[index];
switch (rnd->Uniform(3)) {
case 0:
break;
case 1: {
if (!result.empty() && result[result.size() - 1] > '\0') {
result[result.size() - 1]--;
}
break;
}
case 2: {
Increment(options_.comparator, &result);
break;
}
}
return result;
}
}
DB* db() const { return constructor_->db(); }
private:
Options options_;
Constructor* constructor_;
};
TEST_F(Harness, Empty) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
Random rnd(test::RandomSeed() + 1);
Test(&rnd);
}
}
TEST_F(Harness, ZeroRestartPointsInBlock) {
char data[sizeof(uint32_t)];
memset(data, 0, sizeof(data));
BlockContents contents;
contents.data = Slice(data, sizeof(data));
contents.cachable = false;
contents.heap_allocated = false;
Block block(contents);
Iterator* iter = block.NewIterator(BytewiseComparator());
iter->SeekToFirst();
ASSERT_TRUE(!iter->Valid());
iter->SeekToLast();
ASSERT_TRUE(!iter->Valid());
iter->Seek("foo");
ASSERT_TRUE(!iter->Valid());
delete iter;
}
TEST_F(Harness, SimpleEmptyKey) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
Random rnd(test::RandomSeed() + 1);
Add("", "v");
Test(&rnd);
}
}
TEST_F(Harness, SimpleSingle) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
Random rnd(test::RandomSeed() + 2);
Add("abc", "v");
Test(&rnd);
}
}
TEST_F(Harness, SimpleMulti) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
Random rnd(test::RandomSeed() + 3);
Add("abc", "v");
Add("abcd", "v");
Add("ac", "v2");
Test(&rnd);
}
}
TEST_F(Harness, SimpleSpecialKey) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
Random rnd(test::RandomSeed() + 4);
Add("\xff\xff", "v3");
Test(&rnd);
}
}
TEST_F(Harness, Randomized) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
Random rnd(test::RandomSeed() + 5);
for (int num_entries = 0; num_entries < 2000;
num_entries += (num_entries < 50 ? 1 : 200)) {
if ((num_entries % 10) == 0) {
std::fprintf(stderr, "case %d of %d: num_entries = %d\n", (i + 1),
int(kNumTestArgs), num_entries);
}
for (int e = 0; e < num_entries; e++) {
std::string v;
Add(test::RandomKey(&rnd, rnd.Skewed(4)),
test::RandomString(&rnd, rnd.Skewed(5), &v).ToString());
}
Test(&rnd);
}
}
}
TEST_F(Harness, RandomizedLongDB) {
Random rnd(test::RandomSeed());
TestArgs args = {DB_TEST, false, 16};
Init(args);
int num_entries = 100000;
for (int e = 0; e < num_entries; e++) {
std::string v;
Add(test::RandomKey(&rnd, rnd.Skewed(4)),
test::RandomString(&rnd, rnd.Skewed(5), &v).ToString());
}
Test(&rnd);
int files = 0;
for (int level = 0; level < config::kNumLevels; level++) {
std::string value;
char name[100];
std::snprintf(name, sizeof(name), "leveldb.num-files-at-level%d", level);
ASSERT_TRUE(db()->GetProperty(name, &value));
files += atoi(value.c_str());
}
ASSERT_GT(files, 0);
}
TEST(MemTableTest, Simple) {
InternalKeyComparator cmp(BytewiseComparator());
MemTable* memtable = new MemTable(cmp);
memtable->Ref();
WriteBatch batch;
WriteBatchInternal::SetSequence(&batch, 100);
batch.Put(std::string("k1"), std::string("v1"));
batch.Put(std::string("k2"), std::string("v2"));
batch.Put(std::string("k3"), std::string("v3"));
batch.Put(std::string("largekey"), std::string("vlarge"));
ASSERT_TRUE(WriteBatchInternal::InsertInto(&batch, memtable).ok());
Iterator* iter = memtable->NewIterator();
iter->SeekToFirst();
while (iter->Valid()) {
std::fprintf(stderr, "key: '%s' -> '%s'\n", iter->key().ToString().c_str(),
iter->value().ToString().c_str());
iter->Next();
}
delete iter;
memtable->Unref();
}
static bool Between(uint64_t val, uint64_t low, uint64_t high) {
bool result = (val >= low) && (val <= high);
if (!result) {
std::fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
(unsigned long long)(val), (unsigned long long)(low),
(unsigned long long)(high));
}
return result;
}
TEST(TableTest, ApproximateOffsetOfPlain) {
TableConstructor c(BytewiseComparator());
c.Add("k01", "hello");
c.Add("k02", "hello2");
c.Add("k03", std::string(10000, 'x'));
c.Add("k04", std::string(200000, 'x'));
c.Add("k05", std::string(300000, 'x'));
c.Add("k06", "hello3");
c.Add("k07", std::string(100000, 'x'));
std::vector<std::string> keys;
KVMap kvmap;
Options options;
options.block_size = 1024;
options.compression = kNoCompression;
c.Finish(options, &keys, &kvmap);
ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01a"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 10000, 11000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04a"), 210000, 211000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k05"), 210000, 211000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k06"), 510000, 511000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k07"), 510000, 511000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 610000, 612000));
}
static bool CompressionSupported(CompressionType type) {
std::string out;
Slice in = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
if (type == kSnappyCompression) {
return port::Snappy_Compress(in.data(), in.size(), &out);
} else if (type == kZstdCompression) {
return port::Zstd_Compress(1, in.data(), in.size(), &out);
}
return false;
}
class CompressionTableTest
: public ::testing::TestWithParam<std::tuple<CompressionType>> {};
INSTANTIATE_TEST_SUITE_P(CompressionTests, CompressionTableTest,
::testing::Values(kSnappyCompression,
kZstdCompression));
TEST_P(CompressionTableTest, ApproximateOffsetOfCompressed) {
CompressionType type = ::testing::get<0>(GetParam());
if (!CompressionSupported(type)) {
GTEST_SKIP() << "skipping compression test: " << type;
}
Random rnd(301);
TableConstructor c(BytewiseComparator());
std::string tmp;
c.Add("k01", "hello");
c.Add("k02", test::CompressibleString(&rnd, 0.25, 10000, &tmp));
c.Add("k03", "hello3");
c.Add("k04", test::CompressibleString(&rnd, 0.25, 10000, &tmp));
std::vector<std::string> keys;
KVMap kvmap;
Options options;
options.block_size = 1024;
options.compression = type;
c.Finish(options, &keys, &kvmap);
static const int kSlop = 1000;
const int expected = 2500;
const int min_z = expected - kSlop;
const int max_z = expected + kSlop;
ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, kSlop));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, kSlop));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, kSlop));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), min_z, max_z));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), min_z, max_z));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 2 * min_z, 2 * max_z));
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/table/table.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/table/table_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
beb94efe-689c-4864-a58d-c462f72b8745 | cpp | google/leveldb | filter_block | table/filter_block.cc | table/filter_block_test.cc | #include "table/filter_block.h"
#include "leveldb/filter_policy.h"
#include "util/coding.h"
namespace leveldb {
static const size_t kFilterBaseLg = 11;
static const size_t kFilterBase = 1 << kFilterBaseLg;
FilterBlockBuilder::FilterBlockBuilder(const FilterPolicy* policy)
: policy_(policy) {}
void FilterBlockBuilder::StartBlock(uint64_t block_offset) {
uint64_t filter_index = (block_offset / kFilterBase);
assert(filter_index >= filter_offsets_.size());
while (filter_index > filter_offsets_.size()) {
GenerateFilter();
}
}
void FilterBlockBuilder::AddKey(const Slice& key) {
Slice k = key;
start_.push_back(keys_.size());
keys_.append(k.data(), k.size());
}
Slice FilterBlockBuilder::Finish() {
if (!start_.empty()) {
GenerateFilter();
}
const uint32_t array_offset = result_.size();
for (size_t i = 0; i < filter_offsets_.size(); i++) {
PutFixed32(&result_, filter_offsets_[i]);
}
PutFixed32(&result_, array_offset);
result_.push_back(kFilterBaseLg);
return Slice(result_);
}
void FilterBlockBuilder::GenerateFilter() {
const size_t num_keys = start_.size();
if (num_keys == 0) {
filter_offsets_.push_back(result_.size());
return;
}
start_.push_back(keys_.size());
tmp_keys_.resize(num_keys);
for (size_t i = 0; i < num_keys; i++) {
const char* base = keys_.data() + start_[i];
size_t length = start_[i + 1] - start_[i];
tmp_keys_[i] = Slice(base, length);
}
filter_offsets_.push_back(result_.size());
policy_->CreateFilter(&tmp_keys_[0], static_cast<int>(num_keys), &result_);
tmp_keys_.clear();
keys_.clear();
start_.clear();
}
FilterBlockReader::FilterBlockReader(const FilterPolicy* policy,
const Slice& contents)
: policy_(policy), data_(nullptr), offset_(nullptr), num_(0), base_lg_(0) {
size_t n = contents.size();
if (n < 5) return;
base_lg_ = contents[n - 1];
uint32_t last_word = DecodeFixed32(contents.data() + n - 5);
if (last_word > n - 5) return;
data_ = contents.data();
offset_ = data_ + last_word;
num_ = (n - 5 - last_word) / 4;
}
bool FilterBlockReader::KeyMayMatch(uint64_t block_offset, const Slice& key) {
uint64_t index = block_offset >> base_lg_;
if (index < num_) {
uint32_t start = DecodeFixed32(offset_ + index * 4);
uint32_t limit = DecodeFixed32(offset_ + index * 4 + 4);
if (start <= limit && limit <= static_cast<size_t>(offset_ - data_)) {
Slice filter = Slice(data_ + start, limit - start);
return policy_->KeyMayMatch(key, filter);
} else if (start == limit) {
return false;
}
}
return true;
}
} | #include "table/filter_block.h"
#include "gtest/gtest.h"
#include "leveldb/filter_policy.h"
#include "util/coding.h"
#include "util/hash.h"
#include "util/logging.h"
#include "util/testutil.h"
namespace leveldb {
class TestHashFilter : public FilterPolicy {
public:
const char* Name() const override { return "TestHashFilter"; }
void CreateFilter(const Slice* keys, int n, std::string* dst) const override {
for (int i = 0; i < n; i++) {
uint32_t h = Hash(keys[i].data(), keys[i].size(), 1);
PutFixed32(dst, h);
}
}
bool KeyMayMatch(const Slice& key, const Slice& filter) const override {
uint32_t h = Hash(key.data(), key.size(), 1);
for (size_t i = 0; i + 4 <= filter.size(); i += 4) {
if (h == DecodeFixed32(filter.data() + i)) {
return true;
}
}
return false;
}
};
class FilterBlockTest : public testing::Test {
public:
TestHashFilter policy_;
};
TEST_F(FilterBlockTest, EmptyBuilder) {
FilterBlockBuilder builder(&policy_);
Slice block = builder.Finish();
ASSERT_EQ("\\x00\\x00\\x00\\x00\\x0b", EscapeString(block));
FilterBlockReader reader(&policy_, block);
ASSERT_TRUE(reader.KeyMayMatch(0, "foo"));
ASSERT_TRUE(reader.KeyMayMatch(100000, "foo"));
}
TEST_F(FilterBlockTest, SingleChunk) {
FilterBlockBuilder builder(&policy_);
builder.StartBlock(100);
builder.AddKey("foo");
builder.AddKey("bar");
builder.AddKey("box");
builder.StartBlock(200);
builder.AddKey("box");
builder.StartBlock(300);
builder.AddKey("hello");
Slice block = builder.Finish();
FilterBlockReader reader(&policy_, block);
ASSERT_TRUE(reader.KeyMayMatch(100, "foo"));
ASSERT_TRUE(reader.KeyMayMatch(100, "bar"));
ASSERT_TRUE(reader.KeyMayMatch(100, "box"));
ASSERT_TRUE(reader.KeyMayMatch(100, "hello"));
ASSERT_TRUE(reader.KeyMayMatch(100, "foo"));
ASSERT_TRUE(!reader.KeyMayMatch(100, "missing"));
ASSERT_TRUE(!reader.KeyMayMatch(100, "other"));
}
TEST_F(FilterBlockTest, MultiChunk) {
FilterBlockBuilder builder(&policy_);
builder.StartBlock(0);
builder.AddKey("foo");
builder.StartBlock(2000);
builder.AddKey("bar");
builder.StartBlock(3100);
builder.AddKey("box");
builder.StartBlock(9000);
builder.AddKey("box");
builder.AddKey("hello");
Slice block = builder.Finish();
FilterBlockReader reader(&policy_, block);
ASSERT_TRUE(reader.KeyMayMatch(0, "foo"));
ASSERT_TRUE(reader.KeyMayMatch(2000, "bar"));
ASSERT_TRUE(!reader.KeyMayMatch(0, "box"));
ASSERT_TRUE(!reader.KeyMayMatch(0, "hello"));
ASSERT_TRUE(reader.KeyMayMatch(3100, "box"));
ASSERT_TRUE(!reader.KeyMayMatch(3100, "foo"));
ASSERT_TRUE(!reader.KeyMayMatch(3100, "bar"));
ASSERT_TRUE(!reader.KeyMayMatch(3100, "hello"));
ASSERT_TRUE(!reader.KeyMayMatch(4100, "foo"));
ASSERT_TRUE(!reader.KeyMayMatch(4100, "bar"));
ASSERT_TRUE(!reader.KeyMayMatch(4100, "box"));
ASSERT_TRUE(!reader.KeyMayMatch(4100, "hello"));
ASSERT_TRUE(reader.KeyMayMatch(9000, "box"));
ASSERT_TRUE(reader.KeyMayMatch(9000, "hello"));
ASSERT_TRUE(!reader.KeyMayMatch(9000, "foo"));
ASSERT_TRUE(!reader.KeyMayMatch(9000, "bar"));
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/table/filter_block.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/table/filter_block_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
fa9a5052-ef10-472d-907c-c663fac19c97 | cpp | google/leveldb | filename | db/filename.cc | db/filename_test.cc | #include "db/filename.h"
#include <cassert>
#include <cstdio>
#include "db/dbformat.h"
#include "leveldb/env.h"
#include "util/logging.h"
namespace leveldb {
Status WriteStringToFileSync(Env* env, const Slice& data,
const std::string& fname);
static std::string MakeFileName(const std::string& dbname, uint64_t number,
const char* suffix) {
char buf[100];
std::snprintf(buf, sizeof(buf), "/%06llu.%s",
static_cast<unsigned long long>(number), suffix);
return dbname + buf;
}
std::string LogFileName(const std::string& dbname, uint64_t number) {
assert(number > 0);
return MakeFileName(dbname, number, "log");
}
std::string TableFileName(const std::string& dbname, uint64_t number) {
assert(number > 0);
return MakeFileName(dbname, number, "ldb");
}
std::string SSTTableFileName(const std::string& dbname, uint64_t number) {
assert(number > 0);
return MakeFileName(dbname, number, "sst");
}
std::string DescriptorFileName(const std::string& dbname, uint64_t number) {
assert(number > 0);
char buf[100];
std::snprintf(buf, sizeof(buf), "/MANIFEST-%06llu",
static_cast<unsigned long long>(number));
return dbname + buf;
}
std::string CurrentFileName(const std::string& dbname) {
return dbname + "/CURRENT";
}
std::string LockFileName(const std::string& dbname) { return dbname + "/LOCK"; }
std::string TempFileName(const std::string& dbname, uint64_t number) {
assert(number > 0);
return MakeFileName(dbname, number, "dbtmp");
}
std::string InfoLogFileName(const std::string& dbname) {
return dbname + "/LOG";
}
std::string OldInfoLogFileName(const std::string& dbname) {
return dbname + "/LOG.old";
}
bool ParseFileName(const std::string& filename, uint64_t* number,
FileType* type) {
Slice rest(filename);
if (rest == "CURRENT") {
*number = 0;
*type = kCurrentFile;
} else if (rest == "LOCK") {
*number = 0;
*type = kDBLockFile;
} else if (rest == "LOG" || rest == "LOG.old") {
*number = 0;
*type = kInfoLogFile;
} else if (rest.starts_with("MANIFEST-")) {
rest.remove_prefix(strlen("MANIFEST-"));
uint64_t num;
if (!ConsumeDecimalNumber(&rest, &num)) {
return false;
}
if (!rest.empty()) {
return false;
}
*type = kDescriptorFile;
*number = num;
} else {
uint64_t num;
if (!ConsumeDecimalNumber(&rest, &num)) {
return false;
}
Slice suffix = rest;
if (suffix == Slice(".log")) {
*type = kLogFile;
} else if (suffix == Slice(".sst") || suffix == Slice(".ldb")) {
*type = kTableFile;
} else if (suffix == Slice(".dbtmp")) {
*type = kTempFile;
} else {
return false;
}
*number = num;
}
return true;
}
Status SetCurrentFile(Env* env, const std::string& dbname,
uint64_t descriptor_number) {
std::string manifest = DescriptorFileName(dbname, descriptor_number);
Slice contents = manifest;
assert(contents.starts_with(dbname + "/"));
contents.remove_prefix(dbname.size() + 1);
std::string tmp = TempFileName(dbname, descriptor_number);
Status s = WriteStringToFileSync(env, contents.ToString() + "\n", tmp);
if (s.ok()) {
s = env->RenameFile(tmp, CurrentFileName(dbname));
}
if (!s.ok()) {
env->RemoveFile(tmp);
}
return s;
}
} | #include "db/filename.h"
#include "gtest/gtest.h"
#include "db/dbformat.h"
#include "port/port.h"
#include "util/logging.h"
namespace leveldb {
TEST(FileNameTest, Parse) {
Slice db;
FileType type;
uint64_t number;
static struct {
const char* fname;
uint64_t number;
FileType type;
} cases[] = {
{"100.log", 100, kLogFile},
{"0.log", 0, kLogFile},
{"0.sst", 0, kTableFile},
{"0.ldb", 0, kTableFile},
{"CURRENT", 0, kCurrentFile},
{"LOCK", 0, kDBLockFile},
{"MANIFEST-2", 2, kDescriptorFile},
{"MANIFEST-7", 7, kDescriptorFile},
{"LOG", 0, kInfoLogFile},
{"LOG.old", 0, kInfoLogFile},
{"18446744073709551615.log", 18446744073709551615ull, kLogFile},
};
for (int i = 0; i < sizeof(cases) / sizeof(cases[0]); i++) {
std::string f = cases[i].fname;
ASSERT_TRUE(ParseFileName(f, &number, &type)) << f;
ASSERT_EQ(cases[i].type, type) << f;
ASSERT_EQ(cases[i].number, number) << f;
}
static const char* errors[] = {"",
"foo",
"foo-dx-100.log",
".log",
"",
"manifest",
"CURREN",
"CURRENTX",
"MANIFES",
"MANIFEST",
"MANIFEST-",
"XMANIFEST-3",
"MANIFEST-3x",
"LOC",
"LOCKx",
"LO",
"LOGx",
"18446744073709551616.log",
"184467440737095516150.log",
"100",
"100.",
"100.lop"};
for (int i = 0; i < sizeof(errors) / sizeof(errors[0]); i++) {
std::string f = errors[i];
ASSERT_TRUE(!ParseFileName(f, &number, &type)) << f;
}
}
TEST(FileNameTest, Construction) {
uint64_t number;
FileType type;
std::string fname;
fname = CurrentFileName("foo");
ASSERT_EQ("foo/", std::string(fname.data(), 4));
ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
ASSERT_EQ(0, number);
ASSERT_EQ(kCurrentFile, type);
fname = LockFileName("foo");
ASSERT_EQ("foo/", std::string(fname.data(), 4));
ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
ASSERT_EQ(0, number);
ASSERT_EQ(kDBLockFile, type);
fname = LogFileName("foo", 192);
ASSERT_EQ("foo/", std::string(fname.data(), 4));
ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
ASSERT_EQ(192, number);
ASSERT_EQ(kLogFile, type);
fname = TableFileName("bar", 200);
ASSERT_EQ("bar/", std::string(fname.data(), 4));
ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
ASSERT_EQ(200, number);
ASSERT_EQ(kTableFile, type);
fname = DescriptorFileName("bar", 100);
ASSERT_EQ("bar/", std::string(fname.data(), 4));
ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
ASSERT_EQ(100, number);
ASSERT_EQ(kDescriptorFile, type);
fname = TempFileName("tmp", 999);
ASSERT_EQ("tmp/", std::string(fname.data(), 4));
ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
ASSERT_EQ(999, number);
ASSERT_EQ(kTempFile, type);
fname = InfoLogFileName("foo");
ASSERT_EQ("foo/", std::string(fname.data(), 4));
ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
ASSERT_EQ(0, number);
ASSERT_EQ(kInfoLogFile, type);
fname = OldInfoLogFileName("foo");
ASSERT_EQ("foo/", std::string(fname.data(), 4));
ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
ASSERT_EQ(0, number);
ASSERT_EQ(kInfoLogFile, type);
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/db/filename.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/db/filename_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
2557a474-7929-4212-8a22-7076742f216a | cpp | google/leveldb | write_batch | db/write_batch.cc | db/write_batch_test.cc | #include "leveldb/write_batch.h"
#include "db/dbformat.h"
#include "db/memtable.h"
#include "db/write_batch_internal.h"
#include "leveldb/db.h"
#include "util/coding.h"
namespace leveldb {
static const size_t kHeader = 12;
WriteBatch::WriteBatch() { Clear(); }
WriteBatch::~WriteBatch() = default;
WriteBatch::Handler::~Handler() = default;
void WriteBatch::Clear() {
rep_.clear();
rep_.resize(kHeader);
}
size_t WriteBatch::ApproximateSize() const { return rep_.size(); }
Status WriteBatch::Iterate(Handler* handler) const {
Slice input(rep_);
if (input.size() < kHeader) {
return Status::Corruption("malformed WriteBatch (too small)");
}
input.remove_prefix(kHeader);
Slice key, value;
int found = 0;
while (!input.empty()) {
found++;
char tag = input[0];
input.remove_prefix(1);
switch (tag) {
case kTypeValue:
if (GetLengthPrefixedSlice(&input, &key) &&
GetLengthPrefixedSlice(&input, &value)) {
handler->Put(key, value);
} else {
return Status::Corruption("bad WriteBatch Put");
}
break;
case kTypeDeletion:
if (GetLengthPrefixedSlice(&input, &key)) {
handler->Delete(key);
} else {
return Status::Corruption("bad WriteBatch Delete");
}
break;
default:
return Status::Corruption("unknown WriteBatch tag");
}
}
if (found != WriteBatchInternal::Count(this)) {
return Status::Corruption("WriteBatch has wrong count");
} else {
return Status::OK();
}
}
int WriteBatchInternal::Count(const WriteBatch* b) {
return DecodeFixed32(b->rep_.data() + 8);
}
void WriteBatchInternal::SetCount(WriteBatch* b, int n) {
EncodeFixed32(&b->rep_[8], n);
}
SequenceNumber WriteBatchInternal::Sequence(const WriteBatch* b) {
return SequenceNumber(DecodeFixed64(b->rep_.data()));
}
void WriteBatchInternal::SetSequence(WriteBatch* b, SequenceNumber seq) {
EncodeFixed64(&b->rep_[0], seq);
}
void WriteBatch::Put(const Slice& key, const Slice& value) {
WriteBatchInternal::SetCount(this, WriteBatchInternal::Count(this) + 1);
rep_.push_back(static_cast<char>(kTypeValue));
PutLengthPrefixedSlice(&rep_, key);
PutLengthPrefixedSlice(&rep_, value);
}
void WriteBatch::Delete(const Slice& key) {
WriteBatchInternal::SetCount(this, WriteBatchInternal::Count(this) + 1);
rep_.push_back(static_cast<char>(kTypeDeletion));
PutLengthPrefixedSlice(&rep_, key);
}
void WriteBatch::Append(const WriteBatch& source) {
WriteBatchInternal::Append(this, &source);
}
namespace {
class MemTableInserter : public WriteBatch::Handler {
public:
SequenceNumber sequence_;
MemTable* mem_;
void Put(const Slice& key, const Slice& value) override {
mem_->Add(sequence_, kTypeValue, key, value);
sequence_++;
}
void Delete(const Slice& key) override {
mem_->Add(sequence_, kTypeDeletion, key, Slice());
sequence_++;
}
};
}
Status WriteBatchInternal::InsertInto(const WriteBatch* b, MemTable* memtable) {
MemTableInserter inserter;
inserter.sequence_ = WriteBatchInternal::Sequence(b);
inserter.mem_ = memtable;
return b->Iterate(&inserter);
}
void WriteBatchInternal::SetContents(WriteBatch* b, const Slice& contents) {
assert(contents.size() >= kHeader);
b->rep_.assign(contents.data(), contents.size());
}
void WriteBatchInternal::Append(WriteBatch* dst, const WriteBatch* src) {
SetCount(dst, Count(dst) + Count(src));
assert(src->rep_.size() >= kHeader);
dst->rep_.append(src->rep_.data() + kHeader, src->rep_.size() - kHeader);
}
} | #include "gtest/gtest.h"
#include "db/memtable.h"
#include "db/write_batch_internal.h"
#include "leveldb/db.h"
#include "leveldb/env.h"
#include "util/logging.h"
namespace leveldb {
static std::string PrintContents(WriteBatch* b) {
InternalKeyComparator cmp(BytewiseComparator());
MemTable* mem = new MemTable(cmp);
mem->Ref();
std::string state;
Status s = WriteBatchInternal::InsertInto(b, mem);
int count = 0;
Iterator* iter = mem->NewIterator();
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
ParsedInternalKey ikey;
EXPECT_TRUE(ParseInternalKey(iter->key(), &ikey));
switch (ikey.type) {
case kTypeValue:
state.append("Put(");
state.append(ikey.user_key.ToString());
state.append(", ");
state.append(iter->value().ToString());
state.append(")");
count++;
break;
case kTypeDeletion:
state.append("Delete(");
state.append(ikey.user_key.ToString());
state.append(")");
count++;
break;
}
state.append("@");
state.append(NumberToString(ikey.sequence));
}
delete iter;
if (!s.ok()) {
state.append("ParseError()");
} else if (count != WriteBatchInternal::Count(b)) {
state.append("CountMismatch()");
}
mem->Unref();
return state;
}
TEST(WriteBatchTest, Empty) {
WriteBatch batch;
ASSERT_EQ("", PrintContents(&batch));
ASSERT_EQ(0, WriteBatchInternal::Count(&batch));
}
TEST(WriteBatchTest, Multiple) {
WriteBatch batch;
batch.Put(Slice("foo"), Slice("bar"));
batch.Delete(Slice("box"));
batch.Put(Slice("baz"), Slice("boo"));
WriteBatchInternal::SetSequence(&batch, 100);
ASSERT_EQ(100, WriteBatchInternal::Sequence(&batch));
ASSERT_EQ(3, WriteBatchInternal::Count(&batch));
ASSERT_EQ(
"Put(baz, boo)@102"
"Delete(box)@101"
"Put(foo, bar)@100",
PrintContents(&batch));
}
TEST(WriteBatchTest, Corruption) {
WriteBatch batch;
batch.Put(Slice("foo"), Slice("bar"));
batch.Delete(Slice("box"));
WriteBatchInternal::SetSequence(&batch, 200);
Slice contents = WriteBatchInternal::Contents(&batch);
WriteBatchInternal::SetContents(&batch,
Slice(contents.data(), contents.size() - 1));
ASSERT_EQ(
"Put(foo, bar)@200"
"ParseError()",
PrintContents(&batch));
}
TEST(WriteBatchTest, Append) {
WriteBatch b1, b2;
WriteBatchInternal::SetSequence(&b1, 200);
WriteBatchInternal::SetSequence(&b2, 300);
b1.Append(b2);
ASSERT_EQ("", PrintContents(&b1));
b2.Put("a", "va");
b1.Append(b2);
ASSERT_EQ("Put(a, va)@200", PrintContents(&b1));
b2.Clear();
b2.Put("b", "vb");
b1.Append(b2);
ASSERT_EQ(
"Put(a, va)@200"
"Put(b, vb)@201",
PrintContents(&b1));
b2.Delete("foo");
b1.Append(b2);
ASSERT_EQ(
"Put(a, va)@200"
"Put(b, vb)@202"
"Put(b, vb)@201"
"Delete(foo)@203",
PrintContents(&b1));
}
TEST(WriteBatchTest, ApproximateSize) {
WriteBatch batch;
size_t empty_size = batch.ApproximateSize();
batch.Put(Slice("foo"), Slice("bar"));
size_t one_key_size = batch.ApproximateSize();
ASSERT_LT(empty_size, one_key_size);
batch.Put(Slice("baz"), Slice("boo"));
size_t two_keys_size = batch.ApproximateSize();
ASSERT_LT(one_key_size, two_keys_size);
batch.Delete(Slice("box"));
size_t post_delete_size = batch.ApproximateSize();
ASSERT_LT(two_keys_size, post_delete_size);
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/db/write_batch.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/db/write_batch_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
300a714d-d3fa-4ac6-a895-c2b60a84b167 | cpp | google/leveldb | version_edit | db/version_edit.cc | db/version_edit_test.cc | #include "db/version_edit.h"
#include "db/version_set.h"
#include "util/coding.h"
namespace leveldb {
enum Tag {
kComparator = 1,
kLogNumber = 2,
kNextFileNumber = 3,
kLastSequence = 4,
kCompactPointer = 5,
kDeletedFile = 6,
kNewFile = 7,
kPrevLogNumber = 9
};
void VersionEdit::Clear() {
comparator_.clear();
log_number_ = 0;
prev_log_number_ = 0;
last_sequence_ = 0;
next_file_number_ = 0;
has_comparator_ = false;
has_log_number_ = false;
has_prev_log_number_ = false;
has_next_file_number_ = false;
has_last_sequence_ = false;
compact_pointers_.clear();
deleted_files_.clear();
new_files_.clear();
}
void VersionEdit::EncodeTo(std::string* dst) const {
if (has_comparator_) {
PutVarint32(dst, kComparator);
PutLengthPrefixedSlice(dst, comparator_);
}
if (has_log_number_) {
PutVarint32(dst, kLogNumber);
PutVarint64(dst, log_number_);
}
if (has_prev_log_number_) {
PutVarint32(dst, kPrevLogNumber);
PutVarint64(dst, prev_log_number_);
}
if (has_next_file_number_) {
PutVarint32(dst, kNextFileNumber);
PutVarint64(dst, next_file_number_);
}
if (has_last_sequence_) {
PutVarint32(dst, kLastSequence);
PutVarint64(dst, last_sequence_);
}
for (size_t i = 0; i < compact_pointers_.size(); i++) {
PutVarint32(dst, kCompactPointer);
PutVarint32(dst, compact_pointers_[i].first);
PutLengthPrefixedSlice(dst, compact_pointers_[i].second.Encode());
}
for (const auto& deleted_file_kvp : deleted_files_) {
PutVarint32(dst, kDeletedFile);
PutVarint32(dst, deleted_file_kvp.first);
PutVarint64(dst, deleted_file_kvp.second);
}
for (size_t i = 0; i < new_files_.size(); i++) {
const FileMetaData& f = new_files_[i].second;
PutVarint32(dst, kNewFile);
PutVarint32(dst, new_files_[i].first);
PutVarint64(dst, f.number);
PutVarint64(dst, f.file_size);
PutLengthPrefixedSlice(dst, f.smallest.Encode());
PutLengthPrefixedSlice(dst, f.largest.Encode());
}
}
static bool GetInternalKey(Slice* input, InternalKey* dst) {
Slice str;
if (GetLengthPrefixedSlice(input, &str)) {
return dst->DecodeFrom(str);
} else {
return false;
}
}
static bool GetLevel(Slice* input, int* level) {
uint32_t v;
if (GetVarint32(input, &v) && v < config::kNumLevels) {
*level = v;
return true;
} else {
return false;
}
}
Status VersionEdit::DecodeFrom(const Slice& src) {
Clear();
Slice input = src;
const char* msg = nullptr;
uint32_t tag;
int level;
uint64_t number;
FileMetaData f;
Slice str;
InternalKey key;
while (msg == nullptr && GetVarint32(&input, &tag)) {
switch (tag) {
case kComparator:
if (GetLengthPrefixedSlice(&input, &str)) {
comparator_ = str.ToString();
has_comparator_ = true;
} else {
msg = "comparator name";
}
break;
case kLogNumber:
if (GetVarint64(&input, &log_number_)) {
has_log_number_ = true;
} else {
msg = "log number";
}
break;
case kPrevLogNumber:
if (GetVarint64(&input, &prev_log_number_)) {
has_prev_log_number_ = true;
} else {
msg = "previous log number";
}
break;
case kNextFileNumber:
if (GetVarint64(&input, &next_file_number_)) {
has_next_file_number_ = true;
} else {
msg = "next file number";
}
break;
case kLastSequence:
if (GetVarint64(&input, &last_sequence_)) {
has_last_sequence_ = true;
} else {
msg = "last sequence number";
}
break;
case kCompactPointer:
if (GetLevel(&input, &level) && GetInternalKey(&input, &key)) {
compact_pointers_.push_back(std::make_pair(level, key));
} else {
msg = "compaction pointer";
}
break;
case kDeletedFile:
if (GetLevel(&input, &level) && GetVarint64(&input, &number)) {
deleted_files_.insert(std::make_pair(level, number));
} else {
msg = "deleted file";
}
break;
case kNewFile:
if (GetLevel(&input, &level) && GetVarint64(&input, &f.number) &&
GetVarint64(&input, &f.file_size) &&
GetInternalKey(&input, &f.smallest) &&
GetInternalKey(&input, &f.largest)) {
new_files_.push_back(std::make_pair(level, f));
} else {
msg = "new-file entry";
}
break;
default:
msg = "unknown tag";
break;
}
}
if (msg == nullptr && !input.empty()) {
msg = "invalid tag";
}
Status result;
if (msg != nullptr) {
result = Status::Corruption("VersionEdit", msg);
}
return result;
}
std::string VersionEdit::DebugString() const {
std::string r;
r.append("VersionEdit {");
if (has_comparator_) {
r.append("\n Comparator: ");
r.append(comparator_);
}
if (has_log_number_) {
r.append("\n LogNumber: ");
AppendNumberTo(&r, log_number_);
}
if (has_prev_log_number_) {
r.append("\n PrevLogNumber: ");
AppendNumberTo(&r, prev_log_number_);
}
if (has_next_file_number_) {
r.append("\n NextFile: ");
AppendNumberTo(&r, next_file_number_);
}
if (has_last_sequence_) {
r.append("\n LastSeq: ");
AppendNumberTo(&r, last_sequence_);
}
for (size_t i = 0; i < compact_pointers_.size(); i++) {
r.append("\n CompactPointer: ");
AppendNumberTo(&r, compact_pointers_[i].first);
r.append(" ");
r.append(compact_pointers_[i].second.DebugString());
}
for (const auto& deleted_files_kvp : deleted_files_) {
r.append("\n RemoveFile: ");
AppendNumberTo(&r, deleted_files_kvp.first);
r.append(" ");
AppendNumberTo(&r, deleted_files_kvp.second);
}
for (size_t i = 0; i < new_files_.size(); i++) {
const FileMetaData& f = new_files_[i].second;
r.append("\n AddFile: ");
AppendNumberTo(&r, new_files_[i].first);
r.append(" ");
AppendNumberTo(&r, f.number);
r.append(" ");
AppendNumberTo(&r, f.file_size);
r.append(" ");
r.append(f.smallest.DebugString());
r.append(" .. ");
r.append(f.largest.DebugString());
}
r.append("\n}\n");
return r;
}
} | #include "db/version_edit.h"
#include "gtest/gtest.h"
namespace leveldb {
static void TestEncodeDecode(const VersionEdit& edit) {
std::string encoded, encoded2;
edit.EncodeTo(&encoded);
VersionEdit parsed;
Status s = parsed.DecodeFrom(encoded);
ASSERT_TRUE(s.ok()) << s.ToString();
parsed.EncodeTo(&encoded2);
ASSERT_EQ(encoded, encoded2);
}
TEST(VersionEditTest, EncodeDecode) {
static const uint64_t kBig = 1ull << 50;
VersionEdit edit;
for (int i = 0; i < 4; i++) {
TestEncodeDecode(edit);
edit.AddFile(3, kBig + 300 + i, kBig + 400 + i,
InternalKey("foo", kBig + 500 + i, kTypeValue),
InternalKey("zoo", kBig + 600 + i, kTypeDeletion));
edit.RemoveFile(4, kBig + 700 + i);
edit.SetCompactPointer(i, InternalKey("x", kBig + 900 + i, kTypeValue));
}
edit.SetComparatorName("foo");
edit.SetLogNumber(kBig + 100);
edit.SetNextFile(kBig + 200);
edit.SetLastSequence(kBig + 1000);
TestEncodeDecode(edit);
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/db/version_edit.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/db/version_edit_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
b698b8f6-d114-4655-8a28-439344c2c25d | cpp | google/leveldb | dbformat | db/dbformat.cc | db/dbformat_test.cc | #include "db/dbformat.h"
#include <cstdio>
#include <sstream>
#include "port/port.h"
#include "util/coding.h"
namespace leveldb {
static uint64_t PackSequenceAndType(uint64_t seq, ValueType t) {
assert(seq <= kMaxSequenceNumber);
assert(t <= kValueTypeForSeek);
return (seq << 8) | t;
}
void AppendInternalKey(std::string* result, const ParsedInternalKey& key) {
result->append(key.user_key.data(), key.user_key.size());
PutFixed64(result, PackSequenceAndType(key.sequence, key.type));
}
std::string ParsedInternalKey::DebugString() const {
std::ostringstream ss;
ss << '\'' << EscapeString(user_key.ToString()) << "' @ " << sequence << " : "
<< static_cast<int>(type);
return ss.str();
}
std::string InternalKey::DebugString() const {
ParsedInternalKey parsed;
if (ParseInternalKey(rep_, &parsed)) {
return parsed.DebugString();
}
std::ostringstream ss;
ss << "(bad)" << EscapeString(rep_);
return ss.str();
}
const char* InternalKeyComparator::Name() const {
return "leveldb.InternalKeyComparator";
}
int InternalKeyComparator::Compare(const Slice& akey, const Slice& bkey) const {
int r = user_comparator_->Compare(ExtractUserKey(akey), ExtractUserKey(bkey));
if (r == 0) {
const uint64_t anum = DecodeFixed64(akey.data() + akey.size() - 8);
const uint64_t bnum = DecodeFixed64(bkey.data() + bkey.size() - 8);
if (anum > bnum) {
r = -1;
} else if (anum < bnum) {
r = +1;
}
}
return r;
}
void InternalKeyComparator::FindShortestSeparator(std::string* start,
const Slice& limit) const {
Slice user_start = ExtractUserKey(*start);
Slice user_limit = ExtractUserKey(limit);
std::string tmp(user_start.data(), user_start.size());
user_comparator_->FindShortestSeparator(&tmp, user_limit);
if (tmp.size() < user_start.size() &&
user_comparator_->Compare(user_start, tmp) < 0) {
PutFixed64(&tmp,
PackSequenceAndType(kMaxSequenceNumber, kValueTypeForSeek));
assert(this->Compare(*start, tmp) < 0);
assert(this->Compare(tmp, limit) < 0);
start->swap(tmp);
}
}
void InternalKeyComparator::FindShortSuccessor(std::string* key) const {
Slice user_key = ExtractUserKey(*key);
std::string tmp(user_key.data(), user_key.size());
user_comparator_->FindShortSuccessor(&tmp);
if (tmp.size() < user_key.size() &&
user_comparator_->Compare(user_key, tmp) < 0) {
PutFixed64(&tmp,
PackSequenceAndType(kMaxSequenceNumber, kValueTypeForSeek));
assert(this->Compare(*key, tmp) < 0);
key->swap(tmp);
}
}
const char* InternalFilterPolicy::Name() const { return user_policy_->Name(); }
void InternalFilterPolicy::CreateFilter(const Slice* keys, int n,
std::string* dst) const {
Slice* mkey = const_cast<Slice*>(keys);
for (int i = 0; i < n; i++) {
mkey[i] = ExtractUserKey(keys[i]);
}
user_policy_->CreateFilter(keys, n, dst);
}
bool InternalFilterPolicy::KeyMayMatch(const Slice& key, const Slice& f) const {
return user_policy_->KeyMayMatch(ExtractUserKey(key), f);
}
LookupKey::LookupKey(const Slice& user_key, SequenceNumber s) {
size_t usize = user_key.size();
size_t needed = usize + 13;
char* dst;
if (needed <= sizeof(space_)) {
dst = space_;
} else {
dst = new char[needed];
}
start_ = dst;
dst = EncodeVarint32(dst, usize + 8);
kstart_ = dst;
std::memcpy(dst, user_key.data(), usize);
dst += usize;
EncodeFixed64(dst, PackSequenceAndType(s, kValueTypeForSeek));
dst += 8;
end_ = dst;
}
} | #include "db/dbformat.h"
#include "gtest/gtest.h"
#include "util/logging.h"
namespace leveldb {
static std::string IKey(const std::string& user_key, uint64_t seq,
ValueType vt) {
std::string encoded;
AppendInternalKey(&encoded, ParsedInternalKey(user_key, seq, vt));
return encoded;
}
static std::string Shorten(const std::string& s, const std::string& l) {
std::string result = s;
InternalKeyComparator(BytewiseComparator()).FindShortestSeparator(&result, l);
return result;
}
static std::string ShortSuccessor(const std::string& s) {
std::string result = s;
InternalKeyComparator(BytewiseComparator()).FindShortSuccessor(&result);
return result;
}
static void TestKey(const std::string& key, uint64_t seq, ValueType vt) {
std::string encoded = IKey(key, seq, vt);
Slice in(encoded);
ParsedInternalKey decoded("", 0, kTypeValue);
ASSERT_TRUE(ParseInternalKey(in, &decoded));
ASSERT_EQ(key, decoded.user_key.ToString());
ASSERT_EQ(seq, decoded.sequence);
ASSERT_EQ(vt, decoded.type);
ASSERT_TRUE(!ParseInternalKey(Slice("bar"), &decoded));
}
TEST(FormatTest, InternalKey_EncodeDecode) {
const char* keys[] = {"", "k", "hello", "longggggggggggggggggggggg"};
const uint64_t seq[] = {1,
2,
3,
(1ull << 8) - 1,
1ull << 8,
(1ull << 8) + 1,
(1ull << 16) - 1,
1ull << 16,
(1ull << 16) + 1,
(1ull << 32) - 1,
1ull << 32,
(1ull << 32) + 1};
for (int k = 0; k < sizeof(keys) / sizeof(keys[0]); k++) {
for (int s = 0; s < sizeof(seq) / sizeof(seq[0]); s++) {
TestKey(keys[k], seq[s], kTypeValue);
TestKey("hello", 1, kTypeDeletion);
}
}
}
TEST(FormatTest, InternalKey_DecodeFromEmpty) {
InternalKey internal_key;
ASSERT_TRUE(!internal_key.DecodeFrom(""));
}
TEST(FormatTest, InternalKeyShortSeparator) {
ASSERT_EQ(IKey("foo", 100, kTypeValue),
Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 99, kTypeValue)));
ASSERT_EQ(
IKey("foo", 100, kTypeValue),
Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 101, kTypeValue)));
ASSERT_EQ(
IKey("foo", 100, kTypeValue),
Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 100, kTypeValue)));
ASSERT_EQ(
IKey("foo", 100, kTypeValue),
Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 100, kTypeDeletion)));
ASSERT_EQ(IKey("foo", 100, kTypeValue),
Shorten(IKey("foo", 100, kTypeValue), IKey("bar", 99, kTypeValue)));
ASSERT_EQ(
IKey("g", kMaxSequenceNumber, kValueTypeForSeek),
Shorten(IKey("foo", 100, kTypeValue), IKey("hello", 200, kTypeValue)));
ASSERT_EQ(
IKey("foo", 100, kTypeValue),
Shorten(IKey("foo", 100, kTypeValue), IKey("foobar", 200, kTypeValue)));
ASSERT_EQ(
IKey("foobar", 100, kTypeValue),
Shorten(IKey("foobar", 100, kTypeValue), IKey("foo", 200, kTypeValue)));
}
TEST(FormatTest, InternalKeyShortestSuccessor) {
ASSERT_EQ(IKey("g", kMaxSequenceNumber, kValueTypeForSeek),
ShortSuccessor(IKey("foo", 100, kTypeValue)));
ASSERT_EQ(IKey("\xff\xff", 100, kTypeValue),
ShortSuccessor(IKey("\xff\xff", 100, kTypeValue)));
}
TEST(FormatTest, ParsedInternalKeyDebugString) {
ParsedInternalKey key("The \"key\" in 'single quotes'", 42, kTypeValue);
ASSERT_EQ("'The \"key\" in 'single quotes'' @ 42 : 1", key.DebugString());
}
TEST(FormatTest, InternalKeyDebugString) {
InternalKey key("The \"key\" in 'single quotes'", 42, kTypeValue);
ASSERT_EQ("'The \"key\" in 'single quotes'' @ 42 : 1", key.DebugString());
InternalKey invalid_key;
ASSERT_EQ("(bad)", invalid_key.DebugString());
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/db/dbformat.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/db/dbformat_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
fe354f63-2eed-42f1-a42d-7aec1a0c3f03 | cpp | google/leveldb | version_set | db/version_set.cc | db/version_set_test.cc | #include "db/version_set.h"
#include <algorithm>
#include <cstdio>
#include "db/filename.h"
#include "db/log_reader.h"
#include "db/log_writer.h"
#include "db/memtable.h"
#include "db/table_cache.h"
#include "leveldb/env.h"
#include "leveldb/table_builder.h"
#include "table/merger.h"
#include "table/two_level_iterator.h"
#include "util/coding.h"
#include "util/logging.h"
namespace leveldb {
static size_t TargetFileSize(const Options* options) {
return options->max_file_size;
}
static int64_t MaxGrandParentOverlapBytes(const Options* options) {
return 10 * TargetFileSize(options);
}
static int64_t ExpandedCompactionByteSizeLimit(const Options* options) {
return 25 * TargetFileSize(options);
}
static double MaxBytesForLevel(const Options* options, int level) {
double result = 10. * 1048576.0;
while (level > 1) {
result *= 10;
level--;
}
return result;
}
static uint64_t MaxFileSizeForLevel(const Options* options, int level) {
return TargetFileSize(options);
}
static int64_t TotalFileSize(const std::vector<FileMetaData*>& files) {
int64_t sum = 0;
for (size_t i = 0; i < files.size(); i++) {
sum += files[i]->file_size;
}
return sum;
}
Version::~Version() {
assert(refs_ == 0);
prev_->next_ = next_;
next_->prev_ = prev_;
for (int level = 0; level < config::kNumLevels; level++) {
for (size_t i = 0; i < files_[level].size(); i++) {
FileMetaData* f = files_[level][i];
assert(f->refs > 0);
f->refs--;
if (f->refs <= 0) {
delete f;
}
}
}
}
int FindFile(const InternalKeyComparator& icmp,
const std::vector<FileMetaData*>& files, const Slice& key) {
uint32_t left = 0;
uint32_t right = files.size();
while (left < right) {
uint32_t mid = (left + right) / 2;
const FileMetaData* f = files[mid];
if (icmp.InternalKeyComparator::Compare(f->largest.Encode(), key) < 0) {
left = mid + 1;
} else {
right = mid;
}
}
return right;
}
static bool AfterFile(const Comparator* ucmp, const Slice* user_key,
const FileMetaData* f) {
return (user_key != nullptr &&
ucmp->Compare(*user_key, f->largest.user_key()) > 0);
}
static bool BeforeFile(const Comparator* ucmp, const Slice* user_key,
const FileMetaData* f) {
return (user_key != nullptr &&
ucmp->Compare(*user_key, f->smallest.user_key()) < 0);
}
bool SomeFileOverlapsRange(const InternalKeyComparator& icmp,
bool disjoint_sorted_files,
const std::vector<FileMetaData*>& files,
const Slice* smallest_user_key,
const Slice* largest_user_key) {
const Comparator* ucmp = icmp.user_comparator();
if (!disjoint_sorted_files) {
for (size_t i = 0; i < files.size(); i++) {
const FileMetaData* f = files[i];
if (AfterFile(ucmp, smallest_user_key, f) ||
BeforeFile(ucmp, largest_user_key, f)) {
} else {
return true;
}
}
return false;
}
uint32_t index = 0;
if (smallest_user_key != nullptr) {
InternalKey small_key(*smallest_user_key, kMaxSequenceNumber,
kValueTypeForSeek);
index = FindFile(icmp, files, small_key.Encode());
}
if (index >= files.size()) {
return false;
}
return !BeforeFile(ucmp, largest_user_key, files[index]);
}
class Version::LevelFileNumIterator : public Iterator {
public:
LevelFileNumIterator(const InternalKeyComparator& icmp,
const std::vector<FileMetaData*>* flist)
: icmp_(icmp), flist_(flist), index_(flist->size()) {
}
bool Valid() const override { return index_ < flist_->size(); }
void Seek(const Slice& target) override {
index_ = FindFile(icmp_, *flist_, target);
}
void SeekToFirst() override { index_ = 0; }
void SeekToLast() override {
index_ = flist_->empty() ? 0 : flist_->size() - 1;
}
void Next() override {
assert(Valid());
index_++;
}
void Prev() override {
assert(Valid());
if (index_ == 0) {
index_ = flist_->size();
} else {
index_--;
}
}
Slice key() const override {
assert(Valid());
return (*flist_)[index_]->largest.Encode();
}
Slice value() const override {
assert(Valid());
EncodeFixed64(value_buf_, (*flist_)[index_]->number);
EncodeFixed64(value_buf_ + 8, (*flist_)[index_]->file_size);
return Slice(value_buf_, sizeof(value_buf_));
}
Status status() const override { return Status::OK(); }
private:
const InternalKeyComparator icmp_;
const std::vector<FileMetaData*>* const flist_;
uint32_t index_;
mutable char value_buf_[16];
};
static Iterator* GetFileIterator(void* arg, const ReadOptions& options,
const Slice& file_value) {
TableCache* cache = reinterpret_cast<TableCache*>(arg);
if (file_value.size() != 16) {
return NewErrorIterator(
Status::Corruption("FileReader invoked with unexpected value"));
} else {
return cache->NewIterator(options, DecodeFixed64(file_value.data()),
DecodeFixed64(file_value.data() + 8));
}
}
Iterator* Version::NewConcatenatingIterator(const ReadOptions& options,
int level) const {
return NewTwoLevelIterator(
new LevelFileNumIterator(vset_->icmp_, &files_[level]), &GetFileIterator,
vset_->table_cache_, options);
}
void Version::AddIterators(const ReadOptions& options,
std::vector<Iterator*>* iters) {
for (size_t i = 0; i < files_[0].size(); i++) {
iters->push_back(vset_->table_cache_->NewIterator(
options, files_[0][i]->number, files_[0][i]->file_size));
}
for (int level = 1; level < config::kNumLevels; level++) {
if (!files_[level].empty()) {
iters->push_back(NewConcatenatingIterator(options, level));
}
}
}
namespace {
enum SaverState {
kNotFound,
kFound,
kDeleted,
kCorrupt,
};
struct Saver {
SaverState state;
const Comparator* ucmp;
Slice user_key;
std::string* value;
};
}
static void SaveValue(void* arg, const Slice& ikey, const Slice& v) {
Saver* s = reinterpret_cast<Saver*>(arg);
ParsedInternalKey parsed_key;
if (!ParseInternalKey(ikey, &parsed_key)) {
s->state = kCorrupt;
} else {
if (s->ucmp->Compare(parsed_key.user_key, s->user_key) == 0) {
s->state = (parsed_key.type == kTypeValue) ? kFound : kDeleted;
if (s->state == kFound) {
s->value->assign(v.data(), v.size());
}
}
}
}
static bool NewestFirst(FileMetaData* a, FileMetaData* b) {
return a->number > b->number;
}
void Version::ForEachOverlapping(Slice user_key, Slice internal_key, void* arg,
bool (*func)(void*, int, FileMetaData*)) {
const Comparator* ucmp = vset_->icmp_.user_comparator();
std::vector<FileMetaData*> tmp;
tmp.reserve(files_[0].size());
for (uint32_t i = 0; i < files_[0].size(); i++) {
FileMetaData* f = files_[0][i];
if (ucmp->Compare(user_key, f->smallest.user_key()) >= 0 &&
ucmp->Compare(user_key, f->largest.user_key()) <= 0) {
tmp.push_back(f);
}
}
if (!tmp.empty()) {
std::sort(tmp.begin(), tmp.end(), NewestFirst);
for (uint32_t i = 0; i < tmp.size(); i++) {
if (!(*func)(arg, 0, tmp[i])) {
return;
}
}
}
for (int level = 1; level < config::kNumLevels; level++) {
size_t num_files = files_[level].size();
if (num_files == 0) continue;
uint32_t index = FindFile(vset_->icmp_, files_[level], internal_key);
if (index < num_files) {
FileMetaData* f = files_[level][index];
if (ucmp->Compare(user_key, f->smallest.user_key()) < 0) {
} else {
if (!(*func)(arg, level, f)) {
return;
}
}
}
}
}
Status Version::Get(const ReadOptions& options, const LookupKey& k,
std::string* value, GetStats* stats) {
stats->seek_file = nullptr;
stats->seek_file_level = -1;
struct State {
Saver saver;
GetStats* stats;
const ReadOptions* options;
Slice ikey;
FileMetaData* last_file_read;
int last_file_read_level;
VersionSet* vset;
Status s;
bool found;
static bool Match(void* arg, int level, FileMetaData* f) {
State* state = reinterpret_cast<State*>(arg);
if (state->stats->seek_file == nullptr &&
state->last_file_read != nullptr) {
state->stats->seek_file = state->last_file_read;
state->stats->seek_file_level = state->last_file_read_level;
}
state->last_file_read = f;
state->last_file_read_level = level;
state->s = state->vset->table_cache_->Get(*state->options, f->number,
f->file_size, state->ikey,
&state->saver, SaveValue);
if (!state->s.ok()) {
state->found = true;
return false;
}
switch (state->saver.state) {
case kNotFound:
return true;
case kFound:
state->found = true;
return false;
case kDeleted:
return false;
case kCorrupt:
state->s =
Status::Corruption("corrupted key for ", state->saver.user_key);
state->found = true;
return false;
}
return false;
}
};
State state;
state.found = false;
state.stats = stats;
state.last_file_read = nullptr;
state.last_file_read_level = -1;
state.options = &options;
state.ikey = k.internal_key();
state.vset = vset_;
state.saver.state = kNotFound;
state.saver.ucmp = vset_->icmp_.user_comparator();
state.saver.user_key = k.user_key();
state.saver.value = value;
ForEachOverlapping(state.saver.user_key, state.ikey, &state, &State::Match);
return state.found ? state.s : Status::NotFound(Slice());
}
bool Version::UpdateStats(const GetStats& stats) {
FileMetaData* f = stats.seek_file;
if (f != nullptr) {
f->allowed_seeks--;
if (f->allowed_seeks <= 0 && file_to_compact_ == nullptr) {
file_to_compact_ = f;
file_to_compact_level_ = stats.seek_file_level;
return true;
}
}
return false;
}
bool Version::RecordReadSample(Slice internal_key) {
ParsedInternalKey ikey;
if (!ParseInternalKey(internal_key, &ikey)) {
return false;
}
struct State {
GetStats stats;
int matches;
static bool Match(void* arg, int level, FileMetaData* f) {
State* state = reinterpret_cast<State*>(arg);
state->matches++;
if (state->matches == 1) {
state->stats.seek_file = f;
state->stats.seek_file_level = level;
}
return state->matches < 2;
}
};
State state;
state.matches = 0;
ForEachOverlapping(ikey.user_key, internal_key, &state, &State::Match);
if (state.matches >= 2) {
return UpdateStats(state.stats);
}
return false;
}
void Version::Ref() { ++refs_; }
void Version::Unref() {
assert(this != &vset_->dummy_versions_);
assert(refs_ >= 1);
--refs_;
if (refs_ == 0) {
delete this;
}
}
bool Version::OverlapInLevel(int level, const Slice* smallest_user_key,
const Slice* largest_user_key) {
return SomeFileOverlapsRange(vset_->icmp_, (level > 0), files_[level],
smallest_user_key, largest_user_key);
}
int Version::PickLevelForMemTableOutput(const Slice& smallest_user_key,
const Slice& largest_user_key) {
int level = 0;
if (!OverlapInLevel(0, &smallest_user_key, &largest_user_key)) {
InternalKey start(smallest_user_key, kMaxSequenceNumber, kValueTypeForSeek);
InternalKey limit(largest_user_key, 0, static_cast<ValueType>(0));
std::vector<FileMetaData*> overlaps;
while (level < config::kMaxMemCompactLevel) {
if (OverlapInLevel(level + 1, &smallest_user_key, &largest_user_key)) {
break;
}
if (level + 2 < config::kNumLevels) {
GetOverlappingInputs(level + 2, &start, &limit, &overlaps);
const int64_t sum = TotalFileSize(overlaps);
if (sum > MaxGrandParentOverlapBytes(vset_->options_)) {
break;
}
}
level++;
}
}
return level;
}
void Version::GetOverlappingInputs(int level, const InternalKey* begin,
const InternalKey* end,
std::vector<FileMetaData*>* inputs) {
assert(level >= 0);
assert(level < config::kNumLevels);
inputs->clear();
Slice user_begin, user_end;
if (begin != nullptr) {
user_begin = begin->user_key();
}
if (end != nullptr) {
user_end = end->user_key();
}
const Comparator* user_cmp = vset_->icmp_.user_comparator();
for (size_t i = 0; i < files_[level].size();) {
FileMetaData* f = files_[level][i++];
const Slice file_start = f->smallest.user_key();
const Slice file_limit = f->largest.user_key();
if (begin != nullptr && user_cmp->Compare(file_limit, user_begin) < 0) {
} else if (end != nullptr && user_cmp->Compare(file_start, user_end) > 0) {
} else {
inputs->push_back(f);
if (level == 0) {
if (begin != nullptr && user_cmp->Compare(file_start, user_begin) < 0) {
user_begin = file_start;
inputs->clear();
i = 0;
} else if (end != nullptr &&
user_cmp->Compare(file_limit, user_end) > 0) {
user_end = file_limit;
inputs->clear();
i = 0;
}
}
}
}
}
std::string Version::DebugString() const {
std::string r;
for (int level = 0; level < config::kNumLevels; level++) {
r.append("--- level ");
AppendNumberTo(&r, level);
r.append(" ---\n");
const std::vector<FileMetaData*>& files = files_[level];
for (size_t i = 0; i < files.size(); i++) {
r.push_back(' ');
AppendNumberTo(&r, files[i]->number);
r.push_back(':');
AppendNumberTo(&r, files[i]->file_size);
r.append("[");
r.append(files[i]->smallest.DebugString());
r.append(" .. ");
r.append(files[i]->largest.DebugString());
r.append("]\n");
}
}
return r;
}
class VersionSet::Builder {
private:
struct BySmallestKey {
const InternalKeyComparator* internal_comparator;
bool operator()(FileMetaData* f1, FileMetaData* f2) const {
int r = internal_comparator->Compare(f1->smallest, f2->smallest);
if (r != 0) {
return (r < 0);
} else {
return (f1->number < f2->number);
}
}
};
typedef std::set<FileMetaData*, BySmallestKey> FileSet;
struct LevelState {
std::set<uint64_t> deleted_files;
FileSet* added_files;
};
VersionSet* vset_;
Version* base_;
LevelState levels_[config::kNumLevels];
public:
Builder(VersionSet* vset, Version* base) : vset_(vset), base_(base) {
base_->Ref();
BySmallestKey cmp;
cmp.internal_comparator = &vset_->icmp_;
for (int level = 0; level < config::kNumLevels; level++) {
levels_[level].added_files = new FileSet(cmp);
}
}
~Builder() {
for (int level = 0; level < config::kNumLevels; level++) {
const FileSet* added = levels_[level].added_files;
std::vector<FileMetaData*> to_unref;
to_unref.reserve(added->size());
for (FileSet::const_iterator it = added->begin(); it != added->end();
++it) {
to_unref.push_back(*it);
}
delete added;
for (uint32_t i = 0; i < to_unref.size(); i++) {
FileMetaData* f = to_unref[i];
f->refs--;
if (f->refs <= 0) {
delete f;
}
}
}
base_->Unref();
}
void Apply(const VersionEdit* edit) {
for (size_t i = 0; i < edit->compact_pointers_.size(); i++) {
const int level = edit->compact_pointers_[i].first;
vset_->compact_pointer_[level] =
edit->compact_pointers_[i].second.Encode().ToString();
}
for (const auto& deleted_file_set_kvp : edit->deleted_files_) {
const int level = deleted_file_set_kvp.first;
const uint64_t number = deleted_file_set_kvp.second;
levels_[level].deleted_files.insert(number);
}
for (size_t i = 0; i < edit->new_files_.size(); i++) {
const int level = edit->new_files_[i].first;
FileMetaData* f = new FileMetaData(edit->new_files_[i].second);
f->refs = 1;
f->allowed_seeks = static_cast<int>((f->file_size / 16384U));
if (f->allowed_seeks < 100) f->allowed_seeks = 100;
levels_[level].deleted_files.erase(f->number);
levels_[level].added_files->insert(f);
}
}
void SaveTo(Version* v) {
BySmallestKey cmp;
cmp.internal_comparator = &vset_->icmp_;
for (int level = 0; level < config::kNumLevels; level++) {
const std::vector<FileMetaData*>& base_files = base_->files_[level];
std::vector<FileMetaData*>::const_iterator base_iter = base_files.begin();
std::vector<FileMetaData*>::const_iterator base_end = base_files.end();
const FileSet* added_files = levels_[level].added_files;
v->files_[level].reserve(base_files.size() + added_files->size());
for (const auto& added_file : *added_files) {
for (std::vector<FileMetaData*>::const_iterator bpos =
std::upper_bound(base_iter, base_end, added_file, cmp);
base_iter != bpos; ++base_iter) {
MaybeAddFile(v, level, *base_iter);
}
MaybeAddFile(v, level, added_file);
}
for (; base_iter != base_end; ++base_iter) {
MaybeAddFile(v, level, *base_iter);
}
#ifndef NDEBUG
if (level > 0) {
for (uint32_t i = 1; i < v->files_[level].size(); i++) {
const InternalKey& prev_end = v->files_[level][i - 1]->largest;
const InternalKey& this_begin = v->files_[level][i]->smallest;
if (vset_->icmp_.Compare(prev_end, this_begin) >= 0) {
std::fprintf(stderr, "overlapping ranges in same level %s vs. %s\n",
prev_end.DebugString().c_str(),
this_begin.DebugString().c_str());
std::abort();
}
}
}
#endif
}
}
void MaybeAddFile(Version* v, int level, FileMetaData* f) {
if (levels_[level].deleted_files.count(f->number) > 0) {
} else {
std::vector<FileMetaData*>* files = &v->files_[level];
if (level > 0 && !files->empty()) {
assert(vset_->icmp_.Compare((*files)[files->size() - 1]->largest,
f->smallest) < 0);
}
f->refs++;
files->push_back(f);
}
}
};
VersionSet::VersionSet(const std::string& dbname, const Options* options,
TableCache* table_cache,
const InternalKeyComparator* cmp)
: env_(options->env),
dbname_(dbname),
options_(options),
table_cache_(table_cache),
icmp_(*cmp),
next_file_number_(2),
manifest_file_number_(0),
last_sequence_(0),
log_number_(0),
prev_log_number_(0),
descriptor_file_(nullptr),
descriptor_log_(nullptr),
dummy_versions_(this),
current_(nullptr) {
AppendVersion(new Version(this));
}
VersionSet::~VersionSet() {
current_->Unref();
assert(dummy_versions_.next_ == &dummy_versions_);
delete descriptor_log_;
delete descriptor_file_;
}
void VersionSet::AppendVersion(Version* v) {
assert(v->refs_ == 0);
assert(v != current_);
if (current_ != nullptr) {
current_->Unref();
}
current_ = v;
v->Ref();
v->prev_ = dummy_versions_.prev_;
v->next_ = &dummy_versions_;
v->prev_->next_ = v;
v->next_->prev_ = v;
}
Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) {
if (edit->has_log_number_) {
assert(edit->log_number_ >= log_number_);
assert(edit->log_number_ < next_file_number_);
} else {
edit->SetLogNumber(log_number_);
}
if (!edit->has_prev_log_number_) {
edit->SetPrevLogNumber(prev_log_number_);
}
edit->SetNextFile(next_file_number_);
edit->SetLastSequence(last_sequence_);
Version* v = new Version(this);
{
Builder builder(this, current_);
builder.Apply(edit);
builder.SaveTo(v);
}
Finalize(v);
std::string new_manifest_file;
Status s;
if (descriptor_log_ == nullptr) {
assert(descriptor_file_ == nullptr);
new_manifest_file = DescriptorFileName(dbname_, manifest_file_number_);
s = env_->NewWritableFile(new_manifest_file, &descriptor_file_);
if (s.ok()) {
descriptor_log_ = new log::Writer(descriptor_file_);
s = WriteSnapshot(descriptor_log_);
}
}
{
mu->Unlock();
if (s.ok()) {
std::string record;
edit->EncodeTo(&record);
s = descriptor_log_->AddRecord(record);
if (s.ok()) {
s = descriptor_file_->Sync();
}
if (!s.ok()) {
Log(options_->info_log, "MANIFEST write: %s\n", s.ToString().c_str());
}
}
if (s.ok() && !new_manifest_file.empty()) {
s = SetCurrentFile(env_, dbname_, manifest_file_number_);
}
mu->Lock();
}
if (s.ok()) {
AppendVersion(v);
log_number_ = edit->log_number_;
prev_log_number_ = edit->prev_log_number_;
} else {
delete v;
if (!new_manifest_file.empty()) {
delete descriptor_log_;
delete descriptor_file_;
descriptor_log_ = nullptr;
descriptor_file_ = nullptr;
env_->RemoveFile(new_manifest_file);
}
}
return s;
}
Status VersionSet::Recover(bool* save_manifest) {
struct LogReporter : public log::Reader::Reporter {
Status* status;
void Corruption(size_t bytes, const Status& s) override {
if (this->status->ok()) *this->status = s;
}
};
std::string current;
Status s = ReadFileToString(env_, CurrentFileName(dbname_), ¤t);
if (!s.ok()) {
return s;
}
if (current.empty() || current[current.size() - 1] != '\n') {
return Status::Corruption("CURRENT file does not end with newline");
}
current.resize(current.size() - 1);
std::string dscname = dbname_ + "/" + current;
SequentialFile* file;
s = env_->NewSequentialFile(dscname, &file);
if (!s.ok()) {
if (s.IsNotFound()) {
return Status::Corruption("CURRENT points to a non-existent file",
s.ToString());
}
return s;
}
bool have_log_number = false;
bool have_prev_log_number = false;
bool have_next_file = false;
bool have_last_sequence = false;
uint64_t next_file = 0;
uint64_t last_sequence = 0;
uint64_t log_number = 0;
uint64_t prev_log_number = 0;
Builder builder(this, current_);
int read_records = 0;
{
LogReporter reporter;
reporter.status = &s;
log::Reader reader(file, &reporter, true ,
0 );
Slice record;
std::string scratch;
while (reader.ReadRecord(&record, &scratch) && s.ok()) {
++read_records;
VersionEdit edit;
s = edit.DecodeFrom(record);
if (s.ok()) {
if (edit.has_comparator_ &&
edit.comparator_ != icmp_.user_comparator()->Name()) {
s = Status::InvalidArgument(
edit.comparator_ + " does not match existing comparator ",
icmp_.user_comparator()->Name());
}
}
if (s.ok()) {
builder.Apply(&edit);
}
if (edit.has_log_number_) {
log_number = edit.log_number_;
have_log_number = true;
}
if (edit.has_prev_log_number_) {
prev_log_number = edit.prev_log_number_;
have_prev_log_number = true;
}
if (edit.has_next_file_number_) {
next_file = edit.next_file_number_;
have_next_file = true;
}
if (edit.has_last_sequence_) {
last_sequence = edit.last_sequence_;
have_last_sequence = true;
}
}
}
delete file;
file = nullptr;
if (s.ok()) {
if (!have_next_file) {
s = Status::Corruption("no meta-nextfile entry in descriptor");
} else if (!have_log_number) {
s = Status::Corruption("no meta-lognumber entry in descriptor");
} else if (!have_last_sequence) {
s = Status::Corruption("no last-sequence-number entry in descriptor");
}
if (!have_prev_log_number) {
prev_log_number = 0;
}
MarkFileNumberUsed(prev_log_number);
MarkFileNumberUsed(log_number);
}
if (s.ok()) {
Version* v = new Version(this);
builder.SaveTo(v);
Finalize(v);
AppendVersion(v);
manifest_file_number_ = next_file;
next_file_number_ = next_file + 1;
last_sequence_ = last_sequence;
log_number_ = log_number;
prev_log_number_ = prev_log_number;
if (ReuseManifest(dscname, current)) {
} else {
*save_manifest = true;
}
} else {
std::string error = s.ToString();
Log(options_->info_log, "Error recovering version set with %d records: %s",
read_records, error.c_str());
}
return s;
}
bool VersionSet::ReuseManifest(const std::string& dscname,
const std::string& dscbase) {
if (!options_->reuse_logs) {
return false;
}
FileType manifest_type;
uint64_t manifest_number;
uint64_t manifest_size;
if (!ParseFileName(dscbase, &manifest_number, &manifest_type) ||
manifest_type != kDescriptorFile ||
!env_->GetFileSize(dscname, &manifest_size).ok() ||
manifest_size >= TargetFileSize(options_)) {
return false;
}
assert(descriptor_file_ == nullptr);
assert(descriptor_log_ == nullptr);
Status r = env_->NewAppendableFile(dscname, &descriptor_file_);
if (!r.ok()) {
Log(options_->info_log, "Reuse MANIFEST: %s\n", r.ToString().c_str());
assert(descriptor_file_ == nullptr);
return false;
}
Log(options_->info_log, "Reusing MANIFEST %s\n", dscname.c_str());
descriptor_log_ = new log::Writer(descriptor_file_, manifest_size);
manifest_file_number_ = manifest_number;
return true;
}
void VersionSet::MarkFileNumberUsed(uint64_t number) {
if (next_file_number_ <= number) {
next_file_number_ = number + 1;
}
}
void VersionSet::Finalize(Version* v) {
int best_level = -1;
double best_score = -1;
for (int level = 0; level < config::kNumLevels - 1; level++) {
double score;
if (level == 0) {
score = v->files_[level].size() /
static_cast<double>(config::kL0_CompactionTrigger);
} else {
const uint64_t level_bytes = TotalFileSize(v->files_[level]);
score =
static_cast<double>(level_bytes) / MaxBytesForLevel(options_, level);
}
if (score > best_score) {
best_level = level;
best_score = score;
}
}
v->compaction_level_ = best_level;
v->compaction_score_ = best_score;
}
Status VersionSet::WriteSnapshot(log::Writer* log) {
VersionEdit edit;
edit.SetComparatorName(icmp_.user_comparator()->Name());
for (int level = 0; level < config::kNumLevels; level++) {
if (!compact_pointer_[level].empty()) {
InternalKey key;
key.DecodeFrom(compact_pointer_[level]);
edit.SetCompactPointer(level, key);
}
}
for (int level = 0; level < config::kNumLevels; level++) {
const std::vector<FileMetaData*>& files = current_->files_[level];
for (size_t i = 0; i < files.size(); i++) {
const FileMetaData* f = files[i];
edit.AddFile(level, f->number, f->file_size, f->smallest, f->largest);
}
}
std::string record;
edit.EncodeTo(&record);
return log->AddRecord(record);
}
int VersionSet::NumLevelFiles(int level) const {
assert(level >= 0);
assert(level < config::kNumLevels);
return current_->files_[level].size();
}
const char* VersionSet::LevelSummary(LevelSummaryStorage* scratch) const {
static_assert(config::kNumLevels == 7, "");
std::snprintf(
scratch->buffer, sizeof(scratch->buffer), "files[ %d %d %d %d %d %d %d ]",
int(current_->files_[0].size()), int(current_->files_[1].size()),
int(current_->files_[2].size()), int(current_->files_[3].size()),
int(current_->files_[4].size()), int(current_->files_[5].size()),
int(current_->files_[6].size()));
return scratch->buffer;
}
uint64_t VersionSet::ApproximateOffsetOf(Version* v, const InternalKey& ikey) {
uint64_t result = 0;
for (int level = 0; level < config::kNumLevels; level++) {
const std::vector<FileMetaData*>& files = v->files_[level];
for (size_t i = 0; i < files.size(); i++) {
if (icmp_.Compare(files[i]->largest, ikey) <= 0) {
result += files[i]->file_size;
} else if (icmp_.Compare(files[i]->smallest, ikey) > 0) {
if (level > 0) {
break;
}
} else {
Table* tableptr;
Iterator* iter = table_cache_->NewIterator(
ReadOptions(), files[i]->number, files[i]->file_size, &tableptr);
if (tableptr != nullptr) {
result += tableptr->ApproximateOffsetOf(ikey.Encode());
}
delete iter;
}
}
}
return result;
}
void VersionSet::AddLiveFiles(std::set<uint64_t>* live) {
for (Version* v = dummy_versions_.next_; v != &dummy_versions_;
v = v->next_) {
for (int level = 0; level < config::kNumLevels; level++) {
const std::vector<FileMetaData*>& files = v->files_[level];
for (size_t i = 0; i < files.size(); i++) {
live->insert(files[i]->number);
}
}
}
}
int64_t VersionSet::NumLevelBytes(int level) const {
assert(level >= 0);
assert(level < config::kNumLevels);
return TotalFileSize(current_->files_[level]);
}
int64_t VersionSet::MaxNextLevelOverlappingBytes() {
int64_t result = 0;
std::vector<FileMetaData*> overlaps;
for (int level = 1; level < config::kNumLevels - 1; level++) {
for (size_t i = 0; i < current_->files_[level].size(); i++) {
const FileMetaData* f = current_->files_[level][i];
current_->GetOverlappingInputs(level + 1, &f->smallest, &f->largest,
&overlaps);
const int64_t sum = TotalFileSize(overlaps);
if (sum > result) {
result = sum;
}
}
}
return result;
}
void VersionSet::GetRange(const std::vector<FileMetaData*>& inputs,
InternalKey* smallest, InternalKey* largest) {
assert(!inputs.empty());
smallest->Clear();
largest->Clear();
for (size_t i = 0; i < inputs.size(); i++) {
FileMetaData* f = inputs[i];
if (i == 0) {
*smallest = f->smallest;
*largest = f->largest;
} else {
if (icmp_.Compare(f->smallest, *smallest) < 0) {
*smallest = f->smallest;
}
if (icmp_.Compare(f->largest, *largest) > 0) {
*largest = f->largest;
}
}
}
}
void VersionSet::GetRange2(const std::vector<FileMetaData*>& inputs1,
const std::vector<FileMetaData*>& inputs2,
InternalKey* smallest, InternalKey* largest) {
std::vector<FileMetaData*> all = inputs1;
all.insert(all.end(), inputs2.begin(), inputs2.end());
GetRange(all, smallest, largest);
}
Iterator* VersionSet::MakeInputIterator(Compaction* c) {
ReadOptions options;
options.verify_checksums = options_->paranoid_checks;
options.fill_cache = false;
const int space = (c->level() == 0 ? c->inputs_[0].size() + 1 : 2);
Iterator** list = new Iterator*[space];
int num = 0;
for (int which = 0; which < 2; which++) {
if (!c->inputs_[which].empty()) {
if (c->level() + which == 0) {
const std::vector<FileMetaData*>& files = c->inputs_[which];
for (size_t i = 0; i < files.size(); i++) {
list[num++] = table_cache_->NewIterator(options, files[i]->number,
files[i]->file_size);
}
} else {
list[num++] = NewTwoLevelIterator(
new Version::LevelFileNumIterator(icmp_, &c->inputs_[which]),
&GetFileIterator, table_cache_, options);
}
}
}
assert(num <= space);
Iterator* result = NewMergingIterator(&icmp_, list, num);
delete[] list;
return result;
}
Compaction* VersionSet::PickCompaction() {
Compaction* c;
int level;
const bool size_compaction = (current_->compaction_score_ >= 1);
const bool seek_compaction = (current_->file_to_compact_ != nullptr);
if (size_compaction) {
level = current_->compaction_level_;
assert(level >= 0);
assert(level + 1 < config::kNumLevels);
c = new Compaction(options_, level);
for (size_t i = 0; i < current_->files_[level].size(); i++) {
FileMetaData* f = current_->files_[level][i];
if (compact_pointer_[level].empty() ||
icmp_.Compare(f->largest.Encode(), compact_pointer_[level]) > 0) {
c->inputs_[0].push_back(f);
break;
}
}
if (c->inputs_[0].empty()) {
c->inputs_[0].push_back(current_->files_[level][0]);
}
} else if (seek_compaction) {
level = current_->file_to_compact_level_;
c = new Compaction(options_, level);
c->inputs_[0].push_back(current_->file_to_compact_);
} else {
return nullptr;
}
c->input_version_ = current_;
c->input_version_->Ref();
if (level == 0) {
InternalKey smallest, largest;
GetRange(c->inputs_[0], &smallest, &largest);
current_->GetOverlappingInputs(0, &smallest, &largest, &c->inputs_[0]);
assert(!c->inputs_[0].empty());
}
SetupOtherInputs(c);
return c;
}
bool FindLargestKey(const InternalKeyComparator& icmp,
const std::vector<FileMetaData*>& files,
InternalKey* largest_key) {
if (files.empty()) {
return false;
}
*largest_key = files[0]->largest;
for (size_t i = 1; i < files.size(); ++i) {
FileMetaData* f = files[i];
if (icmp.Compare(f->largest, *largest_key) > 0) {
*largest_key = f->largest;
}
}
return true;
}
FileMetaData* FindSmallestBoundaryFile(
const InternalKeyComparator& icmp,
const std::vector<FileMetaData*>& level_files,
const InternalKey& largest_key) {
const Comparator* user_cmp = icmp.user_comparator();
FileMetaData* smallest_boundary_file = nullptr;
for (size_t i = 0; i < level_files.size(); ++i) {
FileMetaData* f = level_files[i];
if (icmp.Compare(f->smallest, largest_key) > 0 &&
user_cmp->Compare(f->smallest.user_key(), largest_key.user_key()) ==
0) {
if (smallest_boundary_file == nullptr ||
icmp.Compare(f->smallest, smallest_boundary_file->smallest) < 0) {
smallest_boundary_file = f;
}
}
}
return smallest_boundary_file;
}
void AddBoundaryInputs(const InternalKeyComparator& icmp,
const std::vector<FileMetaData*>& level_files,
std::vector<FileMetaData*>* compaction_files) {
InternalKey largest_key;
if (!FindLargestKey(icmp, *compaction_files, &largest_key)) {
return;
}
bool continue_searching = true;
while (continue_searching) {
FileMetaData* smallest_boundary_file =
FindSmallestBoundaryFile(icmp, level_files, largest_key);
if (smallest_boundary_file != NULL) {
compaction_files->push_back(smallest_boundary_file);
largest_key = smallest_boundary_file->largest;
} else {
continue_searching = false;
}
}
}
void VersionSet::SetupOtherInputs(Compaction* c) {
const int level = c->level();
InternalKey smallest, largest;
AddBoundaryInputs(icmp_, current_->files_[level], &c->inputs_[0]);
GetRange(c->inputs_[0], &smallest, &largest);
current_->GetOverlappingInputs(level + 1, &smallest, &largest,
&c->inputs_[1]);
AddBoundaryInputs(icmp_, current_->files_[level + 1], &c->inputs_[1]);
InternalKey all_start, all_limit;
GetRange2(c->inputs_[0], c->inputs_[1], &all_start, &all_limit);
if (!c->inputs_[1].empty()) {
std::vector<FileMetaData*> expanded0;
current_->GetOverlappingInputs(level, &all_start, &all_limit, &expanded0);
AddBoundaryInputs(icmp_, current_->files_[level], &expanded0);
const int64_t inputs0_size = TotalFileSize(c->inputs_[0]);
const int64_t inputs1_size = TotalFileSize(c->inputs_[1]);
const int64_t expanded0_size = TotalFileSize(expanded0);
if (expanded0.size() > c->inputs_[0].size() &&
inputs1_size + expanded0_size <
ExpandedCompactionByteSizeLimit(options_)) {
InternalKey new_start, new_limit;
GetRange(expanded0, &new_start, &new_limit);
std::vector<FileMetaData*> expanded1;
current_->GetOverlappingInputs(level + 1, &new_start, &new_limit,
&expanded1);
AddBoundaryInputs(icmp_, current_->files_[level + 1], &expanded1);
if (expanded1.size() == c->inputs_[1].size()) {
Log(options_->info_log,
"Expanding@%d %d+%d (%ld+%ld bytes) to %d+%d (%ld+%ld bytes)\n",
level, int(c->inputs_[0].size()), int(c->inputs_[1].size()),
long(inputs0_size), long(inputs1_size), int(expanded0.size()),
int(expanded1.size()), long(expanded0_size), long(inputs1_size));
smallest = new_start;
largest = new_limit;
c->inputs_[0] = expanded0;
c->inputs_[1] = expanded1;
GetRange2(c->inputs_[0], c->inputs_[1], &all_start, &all_limit);
}
}
}
if (level + 2 < config::kNumLevels) {
current_->GetOverlappingInputs(level + 2, &all_start, &all_limit,
&c->grandparents_);
}
compact_pointer_[level] = largest.Encode().ToString();
c->edit_.SetCompactPointer(level, largest);
}
Compaction* VersionSet::CompactRange(int level, const InternalKey* begin,
const InternalKey* end) {
std::vector<FileMetaData*> inputs;
current_->GetOverlappingInputs(level, begin, end, &inputs);
if (inputs.empty()) {
return nullptr;
}
if (level > 0) {
const uint64_t limit = MaxFileSizeForLevel(options_, level);
uint64_t total = 0;
for (size_t i = 0; i < inputs.size(); i++) {
uint64_t s = inputs[i]->file_size;
total += s;
if (total >= limit) {
inputs.resize(i + 1);
break;
}
}
}
Compaction* c = new Compaction(options_, level);
c->input_version_ = current_;
c->input_version_->Ref();
c->inputs_[0] = inputs;
SetupOtherInputs(c);
return c;
}
Compaction::Compaction(const Options* options, int level)
: level_(level),
max_output_file_size_(MaxFileSizeForLevel(options, level)),
input_version_(nullptr),
grandparent_index_(0),
seen_key_(false),
overlapped_bytes_(0) {
for (int i = 0; i < config::kNumLevels; i++) {
level_ptrs_[i] = 0;
}
}
Compaction::~Compaction() {
if (input_version_ != nullptr) {
input_version_->Unref();
}
}
bool Compaction::IsTrivialMove() const {
const VersionSet* vset = input_version_->vset_;
return (num_input_files(0) == 1 && num_input_files(1) == 0 &&
TotalFileSize(grandparents_) <=
MaxGrandParentOverlapBytes(vset->options_));
}
void Compaction::AddInputDeletions(VersionEdit* edit) {
for (int which = 0; which < 2; which++) {
for (size_t i = 0; i < inputs_[which].size(); i++) {
edit->RemoveFile(level_ + which, inputs_[which][i]->number);
}
}
}
bool Compaction::IsBaseLevelForKey(const Slice& user_key) {
const Comparator* user_cmp = input_version_->vset_->icmp_.user_comparator();
for (int lvl = level_ + 2; lvl < config::kNumLevels; lvl++) {
const std::vector<FileMetaData*>& files = input_version_->files_[lvl];
while (level_ptrs_[lvl] < files.size()) {
FileMetaData* f = files[level_ptrs_[lvl]];
if (user_cmp->Compare(user_key, f->largest.user_key()) <= 0) {
if (user_cmp->Compare(user_key, f->smallest.user_key()) >= 0) {
return false;
}
break;
}
level_ptrs_[lvl]++;
}
}
return true;
}
bool Compaction::ShouldStopBefore(const Slice& internal_key) {
const VersionSet* vset = input_version_->vset_;
const InternalKeyComparator* icmp = &vset->icmp_;
while (grandparent_index_ < grandparents_.size() &&
icmp->Compare(internal_key,
grandparents_[grandparent_index_]->largest.Encode()) >
0) {
if (seen_key_) {
overlapped_bytes_ += grandparents_[grandparent_index_]->file_size;
}
grandparent_index_++;
}
seen_key_ = true;
if (overlapped_bytes_ > MaxGrandParentOverlapBytes(vset->options_)) {
overlapped_bytes_ = 0;
return true;
} else {
return false;
}
}
void Compaction::ReleaseInputs() {
if (input_version_ != nullptr) {
input_version_->Unref();
input_version_ = nullptr;
}
}
} | #include "db/version_set.h"
#include "gtest/gtest.h"
#include "util/logging.h"
#include "util/testutil.h"
namespace leveldb {
class FindFileTest : public testing::Test {
public:
FindFileTest() : disjoint_sorted_files_(true) {}
~FindFileTest() {
for (int i = 0; i < files_.size(); i++) {
delete files_[i];
}
}
void Add(const char* smallest, const char* largest,
SequenceNumber smallest_seq = 100,
SequenceNumber largest_seq = 100) {
FileMetaData* f = new FileMetaData;
f->number = files_.size() + 1;
f->smallest = InternalKey(smallest, smallest_seq, kTypeValue);
f->largest = InternalKey(largest, largest_seq, kTypeValue);
files_.push_back(f);
}
int Find(const char* key) {
InternalKey target(key, 100, kTypeValue);
InternalKeyComparator cmp(BytewiseComparator());
return FindFile(cmp, files_, target.Encode());
}
bool Overlaps(const char* smallest, const char* largest) {
InternalKeyComparator cmp(BytewiseComparator());
Slice s(smallest != nullptr ? smallest : "");
Slice l(largest != nullptr ? largest : "");
return SomeFileOverlapsRange(cmp, disjoint_sorted_files_, files_,
(smallest != nullptr ? &s : nullptr),
(largest != nullptr ? &l : nullptr));
}
bool disjoint_sorted_files_;
private:
std::vector<FileMetaData*> files_;
};
TEST_F(FindFileTest, Empty) {
ASSERT_EQ(0, Find("foo"));
ASSERT_TRUE(!Overlaps("a", "z"));
ASSERT_TRUE(!Overlaps(nullptr, "z"));
ASSERT_TRUE(!Overlaps("a", nullptr));
ASSERT_TRUE(!Overlaps(nullptr, nullptr));
}
TEST_F(FindFileTest, Single) {
Add("p", "q");
ASSERT_EQ(0, Find("a"));
ASSERT_EQ(0, Find("p"));
ASSERT_EQ(0, Find("p1"));
ASSERT_EQ(0, Find("q"));
ASSERT_EQ(1, Find("q1"));
ASSERT_EQ(1, Find("z"));
ASSERT_TRUE(!Overlaps("a", "b"));
ASSERT_TRUE(!Overlaps("z1", "z2"));
ASSERT_TRUE(Overlaps("a", "p"));
ASSERT_TRUE(Overlaps("a", "q"));
ASSERT_TRUE(Overlaps("a", "z"));
ASSERT_TRUE(Overlaps("p", "p1"));
ASSERT_TRUE(Overlaps("p", "q"));
ASSERT_TRUE(Overlaps("p", "z"));
ASSERT_TRUE(Overlaps("p1", "p2"));
ASSERT_TRUE(Overlaps("p1", "z"));
ASSERT_TRUE(Overlaps("q", "q"));
ASSERT_TRUE(Overlaps("q", "q1"));
ASSERT_TRUE(!Overlaps(nullptr, "j"));
ASSERT_TRUE(!Overlaps("r", nullptr));
ASSERT_TRUE(Overlaps(nullptr, "p"));
ASSERT_TRUE(Overlaps(nullptr, "p1"));
ASSERT_TRUE(Overlaps("q", nullptr));
ASSERT_TRUE(Overlaps(nullptr, nullptr));
}
TEST_F(FindFileTest, Multiple) {
Add("150", "200");
Add("200", "250");
Add("300", "350");
Add("400", "450");
ASSERT_EQ(0, Find("100"));
ASSERT_EQ(0, Find("150"));
ASSERT_EQ(0, Find("151"));
ASSERT_EQ(0, Find("199"));
ASSERT_EQ(0, Find("200"));
ASSERT_EQ(1, Find("201"));
ASSERT_EQ(1, Find("249"));
ASSERT_EQ(1, Find("250"));
ASSERT_EQ(2, Find("251"));
ASSERT_EQ(2, Find("299"));
ASSERT_EQ(2, Find("300"));
ASSERT_EQ(2, Find("349"));
ASSERT_EQ(2, Find("350"));
ASSERT_EQ(3, Find("351"));
ASSERT_EQ(3, Find("400"));
ASSERT_EQ(3, Find("450"));
ASSERT_EQ(4, Find("451"));
ASSERT_TRUE(!Overlaps("100", "149"));
ASSERT_TRUE(!Overlaps("251", "299"));
ASSERT_TRUE(!Overlaps("451", "500"));
ASSERT_TRUE(!Overlaps("351", "399"));
ASSERT_TRUE(Overlaps("100", "150"));
ASSERT_TRUE(Overlaps("100", "200"));
ASSERT_TRUE(Overlaps("100", "300"));
ASSERT_TRUE(Overlaps("100", "400"));
ASSERT_TRUE(Overlaps("100", "500"));
ASSERT_TRUE(Overlaps("375", "400"));
ASSERT_TRUE(Overlaps("450", "450"));
ASSERT_TRUE(Overlaps("450", "500"));
}
TEST_F(FindFileTest, MultipleNullBoundaries) {
Add("150", "200");
Add("200", "250");
Add("300", "350");
Add("400", "450");
ASSERT_TRUE(!Overlaps(nullptr, "149"));
ASSERT_TRUE(!Overlaps("451", nullptr));
ASSERT_TRUE(Overlaps(nullptr, nullptr));
ASSERT_TRUE(Overlaps(nullptr, "150"));
ASSERT_TRUE(Overlaps(nullptr, "199"));
ASSERT_TRUE(Overlaps(nullptr, "200"));
ASSERT_TRUE(Overlaps(nullptr, "201"));
ASSERT_TRUE(Overlaps(nullptr, "400"));
ASSERT_TRUE(Overlaps(nullptr, "800"));
ASSERT_TRUE(Overlaps("100", nullptr));
ASSERT_TRUE(Overlaps("200", nullptr));
ASSERT_TRUE(Overlaps("449", nullptr));
ASSERT_TRUE(Overlaps("450", nullptr));
}
TEST_F(FindFileTest, OverlapSequenceChecks) {
Add("200", "200", 5000, 3000);
ASSERT_TRUE(!Overlaps("199", "199"));
ASSERT_TRUE(!Overlaps("201", "300"));
ASSERT_TRUE(Overlaps("200", "200"));
ASSERT_TRUE(Overlaps("190", "200"));
ASSERT_TRUE(Overlaps("200", "210"));
}
TEST_F(FindFileTest, OverlappingFiles) {
Add("150", "600");
Add("400", "500");
disjoint_sorted_files_ = false;
ASSERT_TRUE(!Overlaps("100", "149"));
ASSERT_TRUE(!Overlaps("601", "700"));
ASSERT_TRUE(Overlaps("100", "150"));
ASSERT_TRUE(Overlaps("100", "200"));
ASSERT_TRUE(Overlaps("100", "300"));
ASSERT_TRUE(Overlaps("100", "400"));
ASSERT_TRUE(Overlaps("100", "500"));
ASSERT_TRUE(Overlaps("375", "400"));
ASSERT_TRUE(Overlaps("450", "450"));
ASSERT_TRUE(Overlaps("450", "500"));
ASSERT_TRUE(Overlaps("450", "700"));
ASSERT_TRUE(Overlaps("600", "700"));
}
void AddBoundaryInputs(const InternalKeyComparator& icmp,
const std::vector<FileMetaData*>& level_files,
std::vector<FileMetaData*>* compaction_files);
class AddBoundaryInputsTest : public testing::Test {
public:
std::vector<FileMetaData*> level_files_;
std::vector<FileMetaData*> compaction_files_;
std::vector<FileMetaData*> all_files_;
InternalKeyComparator icmp_;
AddBoundaryInputsTest() : icmp_(BytewiseComparator()) {}
~AddBoundaryInputsTest() {
for (size_t i = 0; i < all_files_.size(); ++i) {
delete all_files_[i];
}
all_files_.clear();
}
FileMetaData* CreateFileMetaData(uint64_t number, InternalKey smallest,
InternalKey largest) {
FileMetaData* f = new FileMetaData();
f->number = number;
f->smallest = smallest;
f->largest = largest;
all_files_.push_back(f);
return f;
}
};
TEST_F(AddBoundaryInputsTest, TestEmptyFileSets) {
AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
ASSERT_TRUE(compaction_files_.empty());
ASSERT_TRUE(level_files_.empty());
}
TEST_F(AddBoundaryInputsTest, TestEmptyLevelFiles) {
FileMetaData* f1 =
CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
InternalKey(InternalKey("100", 1, kTypeValue)));
compaction_files_.push_back(f1);
AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
ASSERT_EQ(1, compaction_files_.size());
ASSERT_EQ(f1, compaction_files_[0]);
ASSERT_TRUE(level_files_.empty());
}
TEST_F(AddBoundaryInputsTest, TestEmptyCompactionFiles) {
FileMetaData* f1 =
CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
InternalKey(InternalKey("100", 1, kTypeValue)));
level_files_.push_back(f1);
AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
ASSERT_TRUE(compaction_files_.empty());
ASSERT_EQ(1, level_files_.size());
ASSERT_EQ(f1, level_files_[0]);
}
TEST_F(AddBoundaryInputsTest, TestNoBoundaryFiles) {
FileMetaData* f1 =
CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
InternalKey(InternalKey("100", 1, kTypeValue)));
FileMetaData* f2 =
CreateFileMetaData(1, InternalKey("200", 2, kTypeValue),
InternalKey(InternalKey("200", 1, kTypeValue)));
FileMetaData* f3 =
CreateFileMetaData(1, InternalKey("300", 2, kTypeValue),
InternalKey(InternalKey("300", 1, kTypeValue)));
level_files_.push_back(f3);
level_files_.push_back(f2);
level_files_.push_back(f1);
compaction_files_.push_back(f2);
compaction_files_.push_back(f3);
AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
ASSERT_EQ(2, compaction_files_.size());
}
TEST_F(AddBoundaryInputsTest, TestOneBoundaryFiles) {
FileMetaData* f1 =
CreateFileMetaData(1, InternalKey("100", 3, kTypeValue),
InternalKey(InternalKey("100", 2, kTypeValue)));
FileMetaData* f2 =
CreateFileMetaData(1, InternalKey("100", 1, kTypeValue),
InternalKey(InternalKey("200", 3, kTypeValue)));
FileMetaData* f3 =
CreateFileMetaData(1, InternalKey("300", 2, kTypeValue),
InternalKey(InternalKey("300", 1, kTypeValue)));
level_files_.push_back(f3);
level_files_.push_back(f2);
level_files_.push_back(f1);
compaction_files_.push_back(f1);
AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
ASSERT_EQ(2, compaction_files_.size());
ASSERT_EQ(f1, compaction_files_[0]);
ASSERT_EQ(f2, compaction_files_[1]);
}
TEST_F(AddBoundaryInputsTest, TestTwoBoundaryFiles) {
FileMetaData* f1 =
CreateFileMetaData(1, InternalKey("100", 6, kTypeValue),
InternalKey(InternalKey("100", 5, kTypeValue)));
FileMetaData* f2 =
CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
InternalKey(InternalKey("300", 1, kTypeValue)));
FileMetaData* f3 =
CreateFileMetaData(1, InternalKey("100", 4, kTypeValue),
InternalKey(InternalKey("100", 3, kTypeValue)));
level_files_.push_back(f2);
level_files_.push_back(f3);
level_files_.push_back(f1);
compaction_files_.push_back(f1);
AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
ASSERT_EQ(3, compaction_files_.size());
ASSERT_EQ(f1, compaction_files_[0]);
ASSERT_EQ(f3, compaction_files_[1]);
ASSERT_EQ(f2, compaction_files_[2]);
}
TEST_F(AddBoundaryInputsTest, TestDisjoinFilePointers) {
FileMetaData* f1 =
CreateFileMetaData(1, InternalKey("100", 6, kTypeValue),
InternalKey(InternalKey("100", 5, kTypeValue)));
FileMetaData* f2 =
CreateFileMetaData(1, InternalKey("100", 6, kTypeValue),
InternalKey(InternalKey("100", 5, kTypeValue)));
FileMetaData* f3 =
CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
InternalKey(InternalKey("300", 1, kTypeValue)));
FileMetaData* f4 =
CreateFileMetaData(1, InternalKey("100", 4, kTypeValue),
InternalKey(InternalKey("100", 3, kTypeValue)));
level_files_.push_back(f2);
level_files_.push_back(f3);
level_files_.push_back(f4);
compaction_files_.push_back(f1);
AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
ASSERT_EQ(3, compaction_files_.size());
ASSERT_EQ(f1, compaction_files_[0]);
ASSERT_EQ(f4, compaction_files_[1]);
ASSERT_EQ(f3, compaction_files_[2]);
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/db/version_set.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/db/version_set_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
d6039166-212e-4c5c-a02a-ee5b64913499 | cpp | google/leveldb | memenv | helpers/memenv/memenv.cc | helpers/memenv/memenv_test.cc | #include "helpers/memenv/memenv.h"
#include <cstring>
#include <limits>
#include <map>
#include <string>
#include <vector>
#include "leveldb/env.h"
#include "leveldb/status.h"
#include "port/port.h"
#include "port/thread_annotations.h"
#include "util/mutexlock.h"
namespace leveldb {
namespace {
class FileState {
public:
FileState() : refs_(0), size_(0) {}
FileState(const FileState&) = delete;
FileState& operator=(const FileState&) = delete;
void Ref() {
MutexLock lock(&refs_mutex_);
++refs_;
}
void Unref() {
bool do_delete = false;
{
MutexLock lock(&refs_mutex_);
--refs_;
assert(refs_ >= 0);
if (refs_ <= 0) {
do_delete = true;
}
}
if (do_delete) {
delete this;
}
}
uint64_t Size() const {
MutexLock lock(&blocks_mutex_);
return size_;
}
void Truncate() {
MutexLock lock(&blocks_mutex_);
for (char*& block : blocks_) {
delete[] block;
}
blocks_.clear();
size_ = 0;
}
Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const {
MutexLock lock(&blocks_mutex_);
if (offset > size_) {
return Status::IOError("Offset greater than file size.");
}
const uint64_t available = size_ - offset;
if (n > available) {
n = static_cast<size_t>(available);
}
if (n == 0) {
*result = Slice();
return Status::OK();
}
assert(offset / kBlockSize <= std::numeric_limits<size_t>::max());
size_t block = static_cast<size_t>(offset / kBlockSize);
size_t block_offset = offset % kBlockSize;
size_t bytes_to_copy = n;
char* dst = scratch;
while (bytes_to_copy > 0) {
size_t avail = kBlockSize - block_offset;
if (avail > bytes_to_copy) {
avail = bytes_to_copy;
}
std::memcpy(dst, blocks_[block] + block_offset, avail);
bytes_to_copy -= avail;
dst += avail;
block++;
block_offset = 0;
}
*result = Slice(scratch, n);
return Status::OK();
}
Status Append(const Slice& data) {
const char* src = data.data();
size_t src_len = data.size();
MutexLock lock(&blocks_mutex_);
while (src_len > 0) {
size_t avail;
size_t offset = size_ % kBlockSize;
if (offset != 0) {
avail = kBlockSize - offset;
} else {
blocks_.push_back(new char[kBlockSize]);
avail = kBlockSize;
}
if (avail > src_len) {
avail = src_len;
}
std::memcpy(blocks_.back() + offset, src, avail);
src_len -= avail;
src += avail;
size_ += avail;
}
return Status::OK();
}
private:
enum { kBlockSize = 8 * 1024 };
~FileState() { Truncate(); }
port::Mutex refs_mutex_;
int refs_ GUARDED_BY(refs_mutex_);
mutable port::Mutex blocks_mutex_;
std::vector<char*> blocks_ GUARDED_BY(blocks_mutex_);
uint64_t size_ GUARDED_BY(blocks_mutex_);
};
class SequentialFileImpl : public SequentialFile {
public:
explicit SequentialFileImpl(FileState* file) : file_(file), pos_(0) {
file_->Ref();
}
~SequentialFileImpl() override { file_->Unref(); }
Status Read(size_t n, Slice* result, char* scratch) override {
Status s = file_->Read(pos_, n, result, scratch);
if (s.ok()) {
pos_ += result->size();
}
return s;
}
Status Skip(uint64_t n) override {
if (pos_ > file_->Size()) {
return Status::IOError("pos_ > file_->Size()");
}
const uint64_t available = file_->Size() - pos_;
if (n > available) {
n = available;
}
pos_ += n;
return Status::OK();
}
private:
FileState* file_;
uint64_t pos_;
};
class RandomAccessFileImpl : public RandomAccessFile {
public:
explicit RandomAccessFileImpl(FileState* file) : file_(file) { file_->Ref(); }
~RandomAccessFileImpl() override { file_->Unref(); }
Status Read(uint64_t offset, size_t n, Slice* result,
char* scratch) const override {
return file_->Read(offset, n, result, scratch);
}
private:
FileState* file_;
};
class WritableFileImpl : public WritableFile {
public:
WritableFileImpl(FileState* file) : file_(file) { file_->Ref(); }
~WritableFileImpl() override { file_->Unref(); }
Status Append(const Slice& data) override { return file_->Append(data); }
Status Close() override { return Status::OK(); }
Status Flush() override { return Status::OK(); }
Status Sync() override { return Status::OK(); }
private:
FileState* file_;
};
class NoOpLogger : public Logger {
public:
void Logv(const char* format, std::va_list ap) override {}
};
class InMemoryEnv : public EnvWrapper {
public:
explicit InMemoryEnv(Env* base_env) : EnvWrapper(base_env) {}
~InMemoryEnv() override {
for (const auto& kvp : file_map_) {
kvp.second->Unref();
}
}
Status NewSequentialFile(const std::string& fname,
SequentialFile** result) override {
MutexLock lock(&mutex_);
if (file_map_.find(fname) == file_map_.end()) {
*result = nullptr;
return Status::IOError(fname, "File not found");
}
*result = new SequentialFileImpl(file_map_[fname]);
return Status::OK();
}
Status NewRandomAccessFile(const std::string& fname,
RandomAccessFile** result) override {
MutexLock lock(&mutex_);
if (file_map_.find(fname) == file_map_.end()) {
*result = nullptr;
return Status::IOError(fname, "File not found");
}
*result = new RandomAccessFileImpl(file_map_[fname]);
return Status::OK();
}
Status NewWritableFile(const std::string& fname,
WritableFile** result) override {
MutexLock lock(&mutex_);
FileSystem::iterator it = file_map_.find(fname);
FileState* file;
if (it == file_map_.end()) {
file = new FileState();
file->Ref();
file_map_[fname] = file;
} else {
file = it->second;
file->Truncate();
}
*result = new WritableFileImpl(file);
return Status::OK();
}
Status NewAppendableFile(const std::string& fname,
WritableFile** result) override {
MutexLock lock(&mutex_);
FileState** sptr = &file_map_[fname];
FileState* file = *sptr;
if (file == nullptr) {
file = new FileState();
file->Ref();
}
*result = new WritableFileImpl(file);
return Status::OK();
}
bool FileExists(const std::string& fname) override {
MutexLock lock(&mutex_);
return file_map_.find(fname) != file_map_.end();
}
Status GetChildren(const std::string& dir,
std::vector<std::string>* result) override {
MutexLock lock(&mutex_);
result->clear();
for (const auto& kvp : file_map_) {
const std::string& filename = kvp.first;
if (filename.size() >= dir.size() + 1 && filename[dir.size()] == '/' &&
Slice(filename).starts_with(Slice(dir))) {
result->push_back(filename.substr(dir.size() + 1));
}
}
return Status::OK();
}
void RemoveFileInternal(const std::string& fname)
EXCLUSIVE_LOCKS_REQUIRED(mutex_) {
if (file_map_.find(fname) == file_map_.end()) {
return;
}
file_map_[fname]->Unref();
file_map_.erase(fname);
}
Status RemoveFile(const std::string& fname) override {
MutexLock lock(&mutex_);
if (file_map_.find(fname) == file_map_.end()) {
return Status::IOError(fname, "File not found");
}
RemoveFileInternal(fname);
return Status::OK();
}
Status CreateDir(const std::string& dirname) override { return Status::OK(); }
Status RemoveDir(const std::string& dirname) override { return Status::OK(); }
Status GetFileSize(const std::string& fname, uint64_t* file_size) override {
MutexLock lock(&mutex_);
if (file_map_.find(fname) == file_map_.end()) {
return Status::IOError(fname, "File not found");
}
*file_size = file_map_[fname]->Size();
return Status::OK();
}
Status RenameFile(const std::string& src,
const std::string& target) override {
MutexLock lock(&mutex_);
if (file_map_.find(src) == file_map_.end()) {
return Status::IOError(src, "File not found");
}
RemoveFileInternal(target);
file_map_[target] = file_map_[src];
file_map_.erase(src);
return Status::OK();
}
Status LockFile(const std::string& fname, FileLock** lock) override {
*lock = new FileLock;
return Status::OK();
}
Status UnlockFile(FileLock* lock) override {
delete lock;
return Status::OK();
}
Status GetTestDirectory(std::string* path) override {
*path = "/test";
return Status::OK();
}
Status NewLogger(const std::string& fname, Logger** result) override {
*result = new NoOpLogger;
return Status::OK();
}
private:
typedef std::map<std::string, FileState*> FileSystem;
port::Mutex mutex_;
FileSystem file_map_ GUARDED_BY(mutex_);
};
}
Env* NewMemEnv(Env* base_env) { return new InMemoryEnv(base_env); }
} | #include "helpers/memenv/memenv.h"
#include <string>
#include <vector>
#include "gtest/gtest.h"
#include "db/db_impl.h"
#include "leveldb/db.h"
#include "leveldb/env.h"
#include "util/testutil.h"
namespace leveldb {
class MemEnvTest : public testing::Test {
public:
MemEnvTest() : env_(NewMemEnv(Env::Default())) {}
~MemEnvTest() { delete env_; }
Env* env_;
};
TEST_F(MemEnvTest, Basics) {
uint64_t file_size;
WritableFile* writable_file;
std::vector<std::string> children;
ASSERT_LEVELDB_OK(env_->CreateDir("/dir"));
ASSERT_TRUE(!env_->FileExists("/dir/non_existent"));
ASSERT_TRUE(!env_->GetFileSize("/dir/non_existent", &file_size).ok());
ASSERT_LEVELDB_OK(env_->GetChildren("/dir", &children));
ASSERT_EQ(0, children.size());
ASSERT_LEVELDB_OK(env_->NewWritableFile("/dir/f", &writable_file));
ASSERT_LEVELDB_OK(env_->GetFileSize("/dir/f", &file_size));
ASSERT_EQ(0, file_size);
delete writable_file;
ASSERT_TRUE(env_->FileExists("/dir/f"));
ASSERT_LEVELDB_OK(env_->GetFileSize("/dir/f", &file_size));
ASSERT_EQ(0, file_size);
ASSERT_LEVELDB_OK(env_->GetChildren("/dir", &children));
ASSERT_EQ(1, children.size());
ASSERT_EQ("f", children[0]);
ASSERT_LEVELDB_OK(env_->NewWritableFile("/dir/f", &writable_file));
ASSERT_LEVELDB_OK(writable_file->Append("abc"));
delete writable_file;
ASSERT_LEVELDB_OK(env_->NewAppendableFile("/dir/f", &writable_file));
ASSERT_LEVELDB_OK(env_->GetFileSize("/dir/f", &file_size));
ASSERT_EQ(3, file_size);
ASSERT_LEVELDB_OK(writable_file->Append("hello"));
delete writable_file;
ASSERT_LEVELDB_OK(env_->GetFileSize("/dir/f", &file_size));
ASSERT_EQ(8, file_size);
ASSERT_TRUE(!env_->RenameFile("/dir/non_existent", "/dir/g").ok());
ASSERT_LEVELDB_OK(env_->RenameFile("/dir/f", "/dir/g"));
ASSERT_TRUE(!env_->FileExists("/dir/f"));
ASSERT_TRUE(env_->FileExists("/dir/g"));
ASSERT_LEVELDB_OK(env_->GetFileSize("/dir/g", &file_size));
ASSERT_EQ(8, file_size);
SequentialFile* seq_file;
RandomAccessFile* rand_file;
ASSERT_TRUE(!env_->NewSequentialFile("/dir/non_existent", &seq_file).ok());
ASSERT_TRUE(!seq_file);
ASSERT_TRUE(!env_->NewRandomAccessFile("/dir/non_existent", &rand_file).ok());
ASSERT_TRUE(!rand_file);
ASSERT_TRUE(!env_->RemoveFile("/dir/non_existent").ok());
ASSERT_LEVELDB_OK(env_->RemoveFile("/dir/g"));
ASSERT_TRUE(!env_->FileExists("/dir/g"));
ASSERT_LEVELDB_OK(env_->GetChildren("/dir", &children));
ASSERT_EQ(0, children.size());
ASSERT_LEVELDB_OK(env_->RemoveDir("/dir"));
}
TEST_F(MemEnvTest, ReadWrite) {
WritableFile* writable_file;
SequentialFile* seq_file;
RandomAccessFile* rand_file;
Slice result;
char scratch[100];
ASSERT_LEVELDB_OK(env_->CreateDir("/dir"));
ASSERT_LEVELDB_OK(env_->NewWritableFile("/dir/f", &writable_file));
ASSERT_LEVELDB_OK(writable_file->Append("hello "));
ASSERT_LEVELDB_OK(writable_file->Append("world"));
delete writable_file;
ASSERT_LEVELDB_OK(env_->NewSequentialFile("/dir/f", &seq_file));
ASSERT_LEVELDB_OK(seq_file->Read(5, &result, scratch));
ASSERT_EQ(0, result.compare("hello"));
ASSERT_LEVELDB_OK(seq_file->Skip(1));
ASSERT_LEVELDB_OK(seq_file->Read(1000, &result, scratch));
ASSERT_EQ(0, result.compare("world"));
ASSERT_LEVELDB_OK(
seq_file->Read(1000, &result, scratch));
ASSERT_EQ(0, result.size());
ASSERT_LEVELDB_OK(seq_file->Skip(100));
ASSERT_LEVELDB_OK(seq_file->Read(1000, &result, scratch));
ASSERT_EQ(0, result.size());
delete seq_file;
ASSERT_LEVELDB_OK(env_->NewRandomAccessFile("/dir/f", &rand_file));
ASSERT_LEVELDB_OK(rand_file->Read(6, 5, &result, scratch));
ASSERT_EQ(0, result.compare("world"));
ASSERT_LEVELDB_OK(rand_file->Read(0, 5, &result, scratch));
ASSERT_EQ(0, result.compare("hello"));
ASSERT_LEVELDB_OK(rand_file->Read(10, 100, &result, scratch));
ASSERT_EQ(0, result.compare("d"));
ASSERT_TRUE(!rand_file->Read(1000, 5, &result, scratch).ok());
delete rand_file;
}
TEST_F(MemEnvTest, Locks) {
FileLock* lock;
ASSERT_LEVELDB_OK(env_->LockFile("some file", &lock));
ASSERT_LEVELDB_OK(env_->UnlockFile(lock));
}
TEST_F(MemEnvTest, Misc) {
std::string test_dir;
ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
ASSERT_TRUE(!test_dir.empty());
WritableFile* writable_file;
ASSERT_LEVELDB_OK(env_->NewWritableFile("/a/b", &writable_file));
ASSERT_LEVELDB_OK(writable_file->Sync());
ASSERT_LEVELDB_OK(writable_file->Flush());
ASSERT_LEVELDB_OK(writable_file->Close());
delete writable_file;
}
TEST_F(MemEnvTest, LargeWrite) {
const size_t kWriteSize = 300 * 1024;
char* scratch = new char[kWriteSize * 2];
std::string write_data;
for (size_t i = 0; i < kWriteSize; ++i) {
write_data.append(1, static_cast<char>(i));
}
WritableFile* writable_file;
ASSERT_LEVELDB_OK(env_->NewWritableFile("/dir/f", &writable_file));
ASSERT_LEVELDB_OK(writable_file->Append("foo"));
ASSERT_LEVELDB_OK(writable_file->Append(write_data));
delete writable_file;
SequentialFile* seq_file;
Slice result;
ASSERT_LEVELDB_OK(env_->NewSequentialFile("/dir/f", &seq_file));
ASSERT_LEVELDB_OK(seq_file->Read(3, &result, scratch));
ASSERT_EQ(0, result.compare("foo"));
size_t read = 0;
std::string read_data;
while (read < kWriteSize) {
ASSERT_LEVELDB_OK(seq_file->Read(kWriteSize - read, &result, scratch));
read_data.append(result.data(), result.size());
read += result.size();
}
ASSERT_TRUE(write_data == read_data);
delete seq_file;
delete[] scratch;
}
TEST_F(MemEnvTest, OverwriteOpenFile) {
const char kWrite1Data[] = "Write #1 data";
const size_t kFileDataLen = sizeof(kWrite1Data) - 1;
const std::string kTestFileName = testing::TempDir() + "leveldb-TestFile.dat";
ASSERT_LEVELDB_OK(WriteStringToFile(env_, kWrite1Data, kTestFileName));
RandomAccessFile* rand_file;
ASSERT_LEVELDB_OK(env_->NewRandomAccessFile(kTestFileName, &rand_file));
const char kWrite2Data[] = "Write #2 data";
ASSERT_LEVELDB_OK(WriteStringToFile(env_, kWrite2Data, kTestFileName));
Slice result;
char scratch[kFileDataLen];
ASSERT_LEVELDB_OK(rand_file->Read(0, kFileDataLen, &result, scratch));
ASSERT_EQ(0, result.compare(kWrite2Data));
delete rand_file;
}
TEST_F(MemEnvTest, DBTest) {
Options options;
options.create_if_missing = true;
options.env = env_;
DB* db;
const Slice keys[] = {Slice("aaa"), Slice("bbb"), Slice("ccc")};
const Slice vals[] = {Slice("foo"), Slice("bar"), Slice("baz")};
ASSERT_LEVELDB_OK(DB::Open(options, "/dir/db", &db));
for (size_t i = 0; i < 3; ++i) {
ASSERT_LEVELDB_OK(db->Put(WriteOptions(), keys[i], vals[i]));
}
for (size_t i = 0; i < 3; ++i) {
std::string res;
ASSERT_LEVELDB_OK(db->Get(ReadOptions(), keys[i], &res));
ASSERT_TRUE(res == vals[i]);
}
Iterator* iterator = db->NewIterator(ReadOptions());
iterator->SeekToFirst();
for (size_t i = 0; i < 3; ++i) {
ASSERT_TRUE(iterator->Valid());
ASSERT_TRUE(keys[i] == iterator->key());
ASSERT_TRUE(vals[i] == iterator->value());
iterator->Next();
}
ASSERT_TRUE(!iterator->Valid());
delete iterator;
DBImpl* dbi = reinterpret_cast<DBImpl*>(db);
ASSERT_LEVELDB_OK(dbi->TEST_CompactMemTable());
for (size_t i = 0; i < 3; ++i) {
std::string res;
ASSERT_LEVELDB_OK(db->Get(ReadOptions(), keys[i], &res));
ASSERT_TRUE(res == vals[i]);
}
delete db;
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/helpers/memenv/memenv.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/helpers/memenv/memenv_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
03550b38-ce64-42b1-8eb3-be6022a8b556 | cpp | google/leveldb | no_destructor | util/no_destructor.h | util/no_destructor_test.cc | #ifndef STORAGE_LEVELDB_UTIL_NO_DESTRUCTOR_H_
#define STORAGE_LEVELDB_UTIL_NO_DESTRUCTOR_H_
#include <type_traits>
#include <utility>
namespace leveldb {
template <typename InstanceType>
class NoDestructor {
public:
template <typename... ConstructorArgTypes>
explicit NoDestructor(ConstructorArgTypes&&... constructor_args) {
static_assert(sizeof(instance_storage_) >= sizeof(InstanceType),
"instance_storage_ is not large enough to hold the instance");
static_assert(
alignof(decltype(instance_storage_)) >= alignof(InstanceType),
"instance_storage_ does not meet the instance's alignment requirement");
new (&instance_storage_)
InstanceType(std::forward<ConstructorArgTypes>(constructor_args)...);
}
~NoDestructor() = default;
NoDestructor(const NoDestructor&) = delete;
NoDestructor& operator=(const NoDestructor&) = delete;
InstanceType* get() {
return reinterpret_cast<InstanceType*>(&instance_storage_);
}
private:
typename std::aligned_storage<sizeof(InstanceType),
alignof(InstanceType)>::type instance_storage_;
};
}
#endif | #include "util/no_destructor.h"
#include <cstdint>
#include <cstdlib>
#include <utility>
#include "gtest/gtest.h"
namespace leveldb {
namespace {
struct DoNotDestruct {
public:
DoNotDestruct(uint32_t a, uint64_t b) : a(a), b(b) {}
~DoNotDestruct() { std::abort(); }
uint32_t a;
uint64_t b;
};
constexpr const uint32_t kGoldenA = 0xdeadbeef;
constexpr const uint64_t kGoldenB = 0xaabbccddeeffaabb;
}
TEST(NoDestructorTest, StackInstance) {
NoDestructor<DoNotDestruct> instance(kGoldenA, kGoldenB);
ASSERT_EQ(kGoldenA, instance.get()->a);
ASSERT_EQ(kGoldenB, instance.get()->b);
}
TEST(NoDestructorTest, StaticInstance) {
static NoDestructor<DoNotDestruct> instance(kGoldenA, kGoldenB);
ASSERT_EQ(kGoldenA, instance.get()->a);
ASSERT_EQ(kGoldenB, instance.get()->b);
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/no_destructor.h | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/no_destructor_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
e10933de-725f-4497-af94-a8296041a38b | cpp | google/leveldb | db | include/leveldb/db.h | db/db_test.cc | #ifndef STORAGE_LEVELDB_INCLUDE_DB_H_
#define STORAGE_LEVELDB_INCLUDE_DB_H_
#include <cstdint>
#include <cstdio>
#include "leveldb/export.h"
#include "leveldb/iterator.h"
#include "leveldb/options.h"
namespace leveldb {
static const int kMajorVersion = 1;
static const int kMinorVersion = 23;
struct Options;
struct ReadOptions;
struct WriteOptions;
class WriteBatch;
class LEVELDB_EXPORT Snapshot {
protected:
virtual ~Snapshot();
};
struct LEVELDB_EXPORT Range {
Range() = default;
Range(const Slice& s, const Slice& l) : start(s), limit(l) {}
Slice start;
Slice limit;
};
class LEVELDB_EXPORT DB {
public:
static Status Open(const Options& options, const std::string& name,
DB** dbptr);
DB() = default;
DB(const DB&) = delete;
DB& operator=(const DB&) = delete;
virtual ~DB();
virtual Status Put(const WriteOptions& options, const Slice& key,
const Slice& value) = 0;
virtual Status Delete(const WriteOptions& options, const Slice& key) = 0;
virtual Status Write(const WriteOptions& options, WriteBatch* updates) = 0;
virtual Status Get(const ReadOptions& options, const Slice& key,
std::string* value) = 0;
virtual Iterator* NewIterator(const ReadOptions& options) = 0;
virtual const Snapshot* GetSnapshot() = 0;
virtual void ReleaseSnapshot(const Snapshot* snapshot) = 0;
virtual bool GetProperty(const Slice& property, std::string* value) = 0;
virtual void GetApproximateSizes(const Range* range, int n,
uint64_t* sizes) = 0;
virtual void CompactRange(const Slice* begin, const Slice* end) = 0;
};
LEVELDB_EXPORT Status DestroyDB(const std::string& name,
const Options& options);
LEVELDB_EXPORT Status RepairDB(const std::string& dbname,
const Options& options);
}
#endif | #include "leveldb/db.h"
#include <atomic>
#include <cinttypes>
#include <string>
#include "gtest/gtest.h"
#include "db/db_impl.h"
#include "db/filename.h"
#include "db/version_set.h"
#include "db/write_batch_internal.h"
#include "leveldb/cache.h"
#include "leveldb/env.h"
#include "leveldb/filter_policy.h"
#include "leveldb/table.h"
#include "port/port.h"
#include "port/thread_annotations.h"
#include "util/hash.h"
#include "util/logging.h"
#include "util/mutexlock.h"
#include "util/testutil.h"
namespace leveldb {
static std::string RandomString(Random* rnd, int len) {
std::string r;
test::RandomString(rnd, len, &r);
return r;
}
static std::string RandomKey(Random* rnd) {
int len =
(rnd->OneIn(3) ? 1
: (rnd->OneIn(100) ? rnd->Skewed(10) : rnd->Uniform(10)));
return test::RandomKey(rnd, len);
}
namespace {
class AtomicCounter {
public:
AtomicCounter() : count_(0) {}
void Increment() { IncrementBy(1); }
void IncrementBy(int count) LOCKS_EXCLUDED(mu_) {
MutexLock l(&mu_);
count_ += count;
}
int Read() LOCKS_EXCLUDED(mu_) {
MutexLock l(&mu_);
return count_;
}
void Reset() LOCKS_EXCLUDED(mu_) {
MutexLock l(&mu_);
count_ = 0;
}
private:
port::Mutex mu_;
int count_ GUARDED_BY(mu_);
};
void DelayMilliseconds(int millis) {
Env::Default()->SleepForMicroseconds(millis * 1000);
}
bool IsLdbFile(const std::string& f) {
return strstr(f.c_str(), ".ldb") != nullptr;
}
bool IsLogFile(const std::string& f) {
return strstr(f.c_str(), ".log") != nullptr;
}
bool IsManifestFile(const std::string& f) {
return strstr(f.c_str(), "MANIFEST") != nullptr;
}
}
class TestEnv : public EnvWrapper {
public:
explicit TestEnv(Env* base) : EnvWrapper(base), ignore_dot_files_(false) {}
void SetIgnoreDotFiles(bool ignored) { ignore_dot_files_ = ignored; }
Status GetChildren(const std::string& dir,
std::vector<std::string>* result) override {
Status s = target()->GetChildren(dir, result);
if (!s.ok() || !ignore_dot_files_) {
return s;
}
std::vector<std::string>::iterator it = result->begin();
while (it != result->end()) {
if ((*it == ".") || (*it == "..")) {
it = result->erase(it);
} else {
++it;
}
}
return s;
}
private:
bool ignore_dot_files_;
};
class SpecialEnv : public EnvWrapper {
public:
std::atomic<bool> delay_data_sync_;
std::atomic<bool> data_sync_error_;
std::atomic<bool> no_space_;
std::atomic<bool> non_writable_;
std::atomic<bool> manifest_sync_error_;
std::atomic<bool> manifest_write_error_;
std::atomic<bool> log_file_close_;
bool count_random_reads_;
AtomicCounter random_read_counter_;
explicit SpecialEnv(Env* base)
: EnvWrapper(base),
delay_data_sync_(false),
data_sync_error_(false),
no_space_(false),
non_writable_(false),
manifest_sync_error_(false),
manifest_write_error_(false),
log_file_close_(false),
count_random_reads_(false) {}
Status NewWritableFile(const std::string& f, WritableFile** r) {
class DataFile : public WritableFile {
private:
SpecialEnv* const env_;
WritableFile* const base_;
const std::string fname_;
public:
DataFile(SpecialEnv* env, WritableFile* base, const std::string& fname)
: env_(env), base_(base), fname_(fname) {}
~DataFile() { delete base_; }
Status Append(const Slice& data) {
if (env_->no_space_.load(std::memory_order_acquire)) {
return Status::OK();
} else {
return base_->Append(data);
}
}
Status Close() {
Status s = base_->Close();
if (s.ok() && IsLogFile(fname_) &&
env_->log_file_close_.load(std::memory_order_acquire)) {
s = Status::IOError("simulated log file Close error");
}
return s;
}
Status Flush() { return base_->Flush(); }
Status Sync() {
if (env_->data_sync_error_.load(std::memory_order_acquire)) {
return Status::IOError("simulated data sync error");
}
while (env_->delay_data_sync_.load(std::memory_order_acquire)) {
DelayMilliseconds(100);
}
return base_->Sync();
}
};
class ManifestFile : public WritableFile {
private:
SpecialEnv* env_;
WritableFile* base_;
public:
ManifestFile(SpecialEnv* env, WritableFile* b) : env_(env), base_(b) {}
~ManifestFile() { delete base_; }
Status Append(const Slice& data) {
if (env_->manifest_write_error_.load(std::memory_order_acquire)) {
return Status::IOError("simulated writer error");
} else {
return base_->Append(data);
}
}
Status Close() { return base_->Close(); }
Status Flush() { return base_->Flush(); }
Status Sync() {
if (env_->manifest_sync_error_.load(std::memory_order_acquire)) {
return Status::IOError("simulated sync error");
} else {
return base_->Sync();
}
}
};
if (non_writable_.load(std::memory_order_acquire)) {
return Status::IOError("simulated write error");
}
Status s = target()->NewWritableFile(f, r);
if (s.ok()) {
if (IsLdbFile(f) || IsLogFile(f)) {
*r = new DataFile(this, *r, f);
} else if (IsManifestFile(f)) {
*r = new ManifestFile(this, *r);
}
}
return s;
}
Status NewRandomAccessFile(const std::string& f, RandomAccessFile** r) {
class CountingFile : public RandomAccessFile {
private:
RandomAccessFile* target_;
AtomicCounter* counter_;
public:
CountingFile(RandomAccessFile* target, AtomicCounter* counter)
: target_(target), counter_(counter) {}
~CountingFile() override { delete target_; }
Status Read(uint64_t offset, size_t n, Slice* result,
char* scratch) const override {
counter_->Increment();
return target_->Read(offset, n, result, scratch);
}
};
Status s = target()->NewRandomAccessFile(f, r);
if (s.ok() && count_random_reads_) {
*r = new CountingFile(*r, &random_read_counter_);
}
return s;
}
};
class DBTest : public testing::Test {
public:
std::string dbname_;
SpecialEnv* env_;
DB* db_;
Options last_options_;
DBTest() : env_(new SpecialEnv(Env::Default())), option_config_(kDefault) {
filter_policy_ = NewBloomFilterPolicy(10);
dbname_ = testing::TempDir() + "db_test";
DestroyDB(dbname_, Options());
db_ = nullptr;
Reopen();
}
~DBTest() {
delete db_;
DestroyDB(dbname_, Options());
delete env_;
delete filter_policy_;
}
bool ChangeOptions() {
option_config_++;
if (option_config_ >= kEnd) {
return false;
} else {
DestroyAndReopen();
return true;
}
}
Options CurrentOptions() {
Options options;
options.reuse_logs = false;
switch (option_config_) {
case kReuse:
options.reuse_logs = true;
break;
case kFilter:
options.filter_policy = filter_policy_;
break;
case kUncompressed:
options.compression = kNoCompression;
break;
default:
break;
}
return options;
}
DBImpl* dbfull() { return reinterpret_cast<DBImpl*>(db_); }
void Reopen(Options* options = nullptr) {
ASSERT_LEVELDB_OK(TryReopen(options));
}
void Close() {
delete db_;
db_ = nullptr;
}
void DestroyAndReopen(Options* options = nullptr) {
delete db_;
db_ = nullptr;
DestroyDB(dbname_, Options());
ASSERT_LEVELDB_OK(TryReopen(options));
}
Status TryReopen(Options* options) {
delete db_;
db_ = nullptr;
Options opts;
if (options != nullptr) {
opts = *options;
} else {
opts = CurrentOptions();
opts.create_if_missing = true;
}
last_options_ = opts;
return DB::Open(opts, dbname_, &db_);
}
Status Put(const std::string& k, const std::string& v) {
return db_->Put(WriteOptions(), k, v);
}
Status Delete(const std::string& k) { return db_->Delete(WriteOptions(), k); }
std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) {
ReadOptions options;
options.snapshot = snapshot;
std::string result;
Status s = db_->Get(options, k, &result);
if (s.IsNotFound()) {
result = "NOT_FOUND";
} else if (!s.ok()) {
result = s.ToString();
}
return result;
}
std::string Contents() {
std::vector<std::string> forward;
std::string result;
Iterator* iter = db_->NewIterator(ReadOptions());
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
std::string s = IterStatus(iter);
result.push_back('(');
result.append(s);
result.push_back(')');
forward.push_back(s);
}
size_t matched = 0;
for (iter->SeekToLast(); iter->Valid(); iter->Prev()) {
EXPECT_LT(matched, forward.size());
EXPECT_EQ(IterStatus(iter), forward[forward.size() - matched - 1]);
matched++;
}
EXPECT_EQ(matched, forward.size());
delete iter;
return result;
}
std::string AllEntriesFor(const Slice& user_key) {
Iterator* iter = dbfull()->TEST_NewInternalIterator();
InternalKey target(user_key, kMaxSequenceNumber, kTypeValue);
iter->Seek(target.Encode());
std::string result;
if (!iter->status().ok()) {
result = iter->status().ToString();
} else {
result = "[ ";
bool first = true;
while (iter->Valid()) {
ParsedInternalKey ikey;
if (!ParseInternalKey(iter->key(), &ikey)) {
result += "CORRUPTED";
} else {
if (last_options_.comparator->Compare(ikey.user_key, user_key) != 0) {
break;
}
if (!first) {
result += ", ";
}
first = false;
switch (ikey.type) {
case kTypeValue:
result += iter->value().ToString();
break;
case kTypeDeletion:
result += "DEL";
break;
}
}
iter->Next();
}
if (!first) {
result += " ";
}
result += "]";
}
delete iter;
return result;
}
int NumTableFilesAtLevel(int level) {
std::string property;
EXPECT_TRUE(db_->GetProperty(
"leveldb.num-files-at-level" + NumberToString(level), &property));
return std::stoi(property);
}
int TotalTableFiles() {
int result = 0;
for (int level = 0; level < config::kNumLevels; level++) {
result += NumTableFilesAtLevel(level);
}
return result;
}
std::string FilesPerLevel() {
std::string result;
int last_non_zero_offset = 0;
for (int level = 0; level < config::kNumLevels; level++) {
int f = NumTableFilesAtLevel(level);
char buf[100];
std::snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f);
result += buf;
if (f > 0) {
last_non_zero_offset = result.size();
}
}
result.resize(last_non_zero_offset);
return result;
}
int CountFiles() {
std::vector<std::string> files;
env_->GetChildren(dbname_, &files);
return static_cast<int>(files.size());
}
uint64_t Size(const Slice& start, const Slice& limit) {
Range r(start, limit);
uint64_t size;
db_->GetApproximateSizes(&r, 1, &size);
return size;
}
void Compact(const Slice& start, const Slice& limit) {
db_->CompactRange(&start, &limit);
}
void MakeTables(int n, const std::string& small_key,
const std::string& large_key) {
for (int i = 0; i < n; i++) {
Put(small_key, "begin");
Put(large_key, "end");
dbfull()->TEST_CompactMemTable();
}
}
void FillLevels(const std::string& smallest, const std::string& largest) {
MakeTables(config::kNumLevels, smallest, largest);
}
void DumpFileCounts(const char* label) {
std::fprintf(stderr, "---\n%s:\n", label);
std::fprintf(
stderr, "maxoverlap: %lld\n",
static_cast<long long>(dbfull()->TEST_MaxNextLevelOverlappingBytes()));
for (int level = 0; level < config::kNumLevels; level++) {
int num = NumTableFilesAtLevel(level);
if (num > 0) {
std::fprintf(stderr, " level %3d : %d files\n", level, num);
}
}
}
std::string DumpSSTableList() {
std::string property;
db_->GetProperty("leveldb.sstables", &property);
return property;
}
std::string IterStatus(Iterator* iter) {
std::string result;
if (iter->Valid()) {
result = iter->key().ToString() + "->" + iter->value().ToString();
} else {
result = "(invalid)";
}
return result;
}
bool DeleteAnSSTFile() {
std::vector<std::string> filenames;
EXPECT_LEVELDB_OK(env_->GetChildren(dbname_, &filenames));
uint64_t number;
FileType type;
for (size_t i = 0; i < filenames.size(); i++) {
if (ParseFileName(filenames[i], &number, &type) && type == kTableFile) {
EXPECT_LEVELDB_OK(env_->RemoveFile(TableFileName(dbname_, number)));
return true;
}
}
return false;
}
int RenameLDBToSST() {
std::vector<std::string> filenames;
EXPECT_LEVELDB_OK(env_->GetChildren(dbname_, &filenames));
uint64_t number;
FileType type;
int files_renamed = 0;
for (size_t i = 0; i < filenames.size(); i++) {
if (ParseFileName(filenames[i], &number, &type) && type == kTableFile) {
const std::string from = TableFileName(dbname_, number);
const std::string to = SSTTableFileName(dbname_, number);
EXPECT_LEVELDB_OK(env_->RenameFile(from, to));
files_renamed++;
}
}
return files_renamed;
}
private:
enum OptionConfig { kDefault, kReuse, kFilter, kUncompressed, kEnd };
const FilterPolicy* filter_policy_;
int option_config_;
};
TEST_F(DBTest, Empty) {
do {
ASSERT_TRUE(db_ != nullptr);
ASSERT_EQ("NOT_FOUND", Get("foo"));
} while (ChangeOptions());
}
TEST_F(DBTest, EmptyKey) {
do {
ASSERT_LEVELDB_OK(Put("", "v1"));
ASSERT_EQ("v1", Get(""));
ASSERT_LEVELDB_OK(Put("", "v2"));
ASSERT_EQ("v2", Get(""));
} while (ChangeOptions());
}
TEST_F(DBTest, EmptyValue) {
do {
ASSERT_LEVELDB_OK(Put("key", "v1"));
ASSERT_EQ("v1", Get("key"));
ASSERT_LEVELDB_OK(Put("key", ""));
ASSERT_EQ("", Get("key"));
ASSERT_LEVELDB_OK(Put("key", "v2"));
ASSERT_EQ("v2", Get("key"));
} while (ChangeOptions());
}
TEST_F(DBTest, ReadWrite) {
do {
ASSERT_LEVELDB_OK(Put("foo", "v1"));
ASSERT_EQ("v1", Get("foo"));
ASSERT_LEVELDB_OK(Put("bar", "v2"));
ASSERT_LEVELDB_OK(Put("foo", "v3"));
ASSERT_EQ("v3", Get("foo"));
ASSERT_EQ("v2", Get("bar"));
} while (ChangeOptions());
}
TEST_F(DBTest, PutDeleteGet) {
do {
ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v1"));
ASSERT_EQ("v1", Get("foo"));
ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v2"));
ASSERT_EQ("v2", Get("foo"));
ASSERT_LEVELDB_OK(db_->Delete(WriteOptions(), "foo"));
ASSERT_EQ("NOT_FOUND", Get("foo"));
} while (ChangeOptions());
}
TEST_F(DBTest, GetFromImmutableLayer) {
do {
Options options = CurrentOptions();
options.env = env_;
options.write_buffer_size = 100000;
Reopen(&options);
ASSERT_LEVELDB_OK(Put("foo", "v1"));
ASSERT_EQ("v1", Get("foo"));
env_->delay_data_sync_.store(true, std::memory_order_release);
Put("k1", std::string(100000, 'x'));
Put("k2", std::string(100000, 'y'));
ASSERT_EQ("v1", Get("foo"));
env_->delay_data_sync_.store(false, std::memory_order_release);
} while (ChangeOptions());
}
TEST_F(DBTest, GetFromVersions) {
do {
ASSERT_LEVELDB_OK(Put("foo", "v1"));
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("v1", Get("foo"));
} while (ChangeOptions());
}
TEST_F(DBTest, GetMemUsage) {
do {
ASSERT_LEVELDB_OK(Put("foo", "v1"));
std::string val;
ASSERT_TRUE(db_->GetProperty("leveldb.approximate-memory-usage", &val));
int mem_usage = std::stoi(val);
ASSERT_GT(mem_usage, 0);
ASSERT_LT(mem_usage, 5 * 1024 * 1024);
} while (ChangeOptions());
}
TEST_F(DBTest, GetSnapshot) {
do {
for (int i = 0; i < 2; i++) {
std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x');
ASSERT_LEVELDB_OK(Put(key, "v1"));
const Snapshot* s1 = db_->GetSnapshot();
ASSERT_LEVELDB_OK(Put(key, "v2"));
ASSERT_EQ("v2", Get(key));
ASSERT_EQ("v1", Get(key, s1));
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("v2", Get(key));
ASSERT_EQ("v1", Get(key, s1));
db_->ReleaseSnapshot(s1);
}
} while (ChangeOptions());
}
TEST_F(DBTest, GetIdenticalSnapshots) {
do {
for (int i = 0; i < 2; i++) {
std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x');
ASSERT_LEVELDB_OK(Put(key, "v1"));
const Snapshot* s1 = db_->GetSnapshot();
const Snapshot* s2 = db_->GetSnapshot();
const Snapshot* s3 = db_->GetSnapshot();
ASSERT_LEVELDB_OK(Put(key, "v2"));
ASSERT_EQ("v2", Get(key));
ASSERT_EQ("v1", Get(key, s1));
ASSERT_EQ("v1", Get(key, s2));
ASSERT_EQ("v1", Get(key, s3));
db_->ReleaseSnapshot(s1);
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("v2", Get(key));
ASSERT_EQ("v1", Get(key, s2));
db_->ReleaseSnapshot(s2);
ASSERT_EQ("v1", Get(key, s3));
db_->ReleaseSnapshot(s3);
}
} while (ChangeOptions());
}
TEST_F(DBTest, IterateOverEmptySnapshot) {
do {
const Snapshot* snapshot = db_->GetSnapshot();
ReadOptions read_options;
read_options.snapshot = snapshot;
ASSERT_LEVELDB_OK(Put("foo", "v1"));
ASSERT_LEVELDB_OK(Put("foo", "v2"));
Iterator* iterator1 = db_->NewIterator(read_options);
iterator1->SeekToFirst();
ASSERT_TRUE(!iterator1->Valid());
delete iterator1;
dbfull()->TEST_CompactMemTable();
Iterator* iterator2 = db_->NewIterator(read_options);
iterator2->SeekToFirst();
ASSERT_TRUE(!iterator2->Valid());
delete iterator2;
db_->ReleaseSnapshot(snapshot);
} while (ChangeOptions());
}
TEST_F(DBTest, GetLevel0Ordering) {
do {
ASSERT_LEVELDB_OK(Put("bar", "b"));
ASSERT_LEVELDB_OK(Put("foo", "v1"));
dbfull()->TEST_CompactMemTable();
ASSERT_LEVELDB_OK(Put("foo", "v2"));
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("v2", Get("foo"));
} while (ChangeOptions());
}
TEST_F(DBTest, GetOrderedByLevels) {
do {
ASSERT_LEVELDB_OK(Put("foo", "v1"));
Compact("a", "z");
ASSERT_EQ("v1", Get("foo"));
ASSERT_LEVELDB_OK(Put("foo", "v2"));
ASSERT_EQ("v2", Get("foo"));
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("v2", Get("foo"));
} while (ChangeOptions());
}
TEST_F(DBTest, GetPicksCorrectFile) {
do {
ASSERT_LEVELDB_OK(Put("a", "va"));
Compact("a", "b");
ASSERT_LEVELDB_OK(Put("x", "vx"));
Compact("x", "y");
ASSERT_LEVELDB_OK(Put("f", "vf"));
Compact("f", "g");
ASSERT_EQ("va", Get("a"));
ASSERT_EQ("vf", Get("f"));
ASSERT_EQ("vx", Get("x"));
} while (ChangeOptions());
}
TEST_F(DBTest, GetEncountersEmptyLevel) {
do {
int compaction_count = 0;
while (NumTableFilesAtLevel(0) == 0 || NumTableFilesAtLevel(2) == 0) {
ASSERT_LE(compaction_count, 100) << "could not fill levels 0 and 2";
compaction_count++;
Put("a", "begin");
Put("z", "end");
dbfull()->TEST_CompactMemTable();
}
dbfull()->TEST_CompactRange(1, nullptr, nullptr);
ASSERT_EQ(NumTableFilesAtLevel(0), 1);
ASSERT_EQ(NumTableFilesAtLevel(1), 0);
ASSERT_EQ(NumTableFilesAtLevel(2), 1);
for (int i = 0; i < 1000; i++) {
ASSERT_EQ("NOT_FOUND", Get("missing"));
}
DelayMilliseconds(1000);
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
} while (ChangeOptions());
}
TEST_F(DBTest, IterEmpty) {
Iterator* iter = db_->NewIterator(ReadOptions());
iter->SeekToFirst();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->SeekToLast();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->Seek("foo");
ASSERT_EQ(IterStatus(iter), "(invalid)");
delete iter;
}
TEST_F(DBTest, IterSingle) {
ASSERT_LEVELDB_OK(Put("a", "va"));
Iterator* iter = db_->NewIterator(ReadOptions());
iter->SeekToFirst();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Next();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->SeekToFirst();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->SeekToLast();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Next();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->SeekToLast();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->Seek("");
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Next();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->Seek("a");
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Next();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->Seek("b");
ASSERT_EQ(IterStatus(iter), "(invalid)");
delete iter;
}
TEST_F(DBTest, IterMulti) {
ASSERT_LEVELDB_OK(Put("a", "va"));
ASSERT_LEVELDB_OK(Put("b", "vb"));
ASSERT_LEVELDB_OK(Put("c", "vc"));
Iterator* iter = db_->NewIterator(ReadOptions());
iter->SeekToFirst();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Next();
ASSERT_EQ(IterStatus(iter), "b->vb");
iter->Next();
ASSERT_EQ(IterStatus(iter), "c->vc");
iter->Next();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->SeekToFirst();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->SeekToLast();
ASSERT_EQ(IterStatus(iter), "c->vc");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "b->vb");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->SeekToLast();
ASSERT_EQ(IterStatus(iter), "c->vc");
iter->Next();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->Seek("");
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Seek("a");
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Seek("ax");
ASSERT_EQ(IterStatus(iter), "b->vb");
iter->Seek("b");
ASSERT_EQ(IterStatus(iter), "b->vb");
iter->Seek("z");
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->SeekToLast();
iter->Prev();
iter->Prev();
iter->Next();
ASSERT_EQ(IterStatus(iter), "b->vb");
iter->SeekToFirst();
iter->Next();
iter->Next();
iter->Prev();
ASSERT_EQ(IterStatus(iter), "b->vb");
ASSERT_LEVELDB_OK(Put("a", "va2"));
ASSERT_LEVELDB_OK(Put("a2", "va3"));
ASSERT_LEVELDB_OK(Put("b", "vb2"));
ASSERT_LEVELDB_OK(Put("c", "vc2"));
ASSERT_LEVELDB_OK(Delete("b"));
iter->SeekToFirst();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Next();
ASSERT_EQ(IterStatus(iter), "b->vb");
iter->Next();
ASSERT_EQ(IterStatus(iter), "c->vc");
iter->Next();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->SeekToLast();
ASSERT_EQ(IterStatus(iter), "c->vc");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "b->vb");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "(invalid)");
delete iter;
}
TEST_F(DBTest, IterSmallAndLargeMix) {
ASSERT_LEVELDB_OK(Put("a", "va"));
ASSERT_LEVELDB_OK(Put("b", std::string(100000, 'b')));
ASSERT_LEVELDB_OK(Put("c", "vc"));
ASSERT_LEVELDB_OK(Put("d", std::string(100000, 'd')));
ASSERT_LEVELDB_OK(Put("e", std::string(100000, 'e')));
Iterator* iter = db_->NewIterator(ReadOptions());
iter->SeekToFirst();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Next();
ASSERT_EQ(IterStatus(iter), "b->" + std::string(100000, 'b'));
iter->Next();
ASSERT_EQ(IterStatus(iter), "c->vc");
iter->Next();
ASSERT_EQ(IterStatus(iter), "d->" + std::string(100000, 'd'));
iter->Next();
ASSERT_EQ(IterStatus(iter), "e->" + std::string(100000, 'e'));
iter->Next();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->SeekToLast();
ASSERT_EQ(IterStatus(iter), "e->" + std::string(100000, 'e'));
iter->Prev();
ASSERT_EQ(IterStatus(iter), "d->" + std::string(100000, 'd'));
iter->Prev();
ASSERT_EQ(IterStatus(iter), "c->vc");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "b->" + std::string(100000, 'b'));
iter->Prev();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "(invalid)");
delete iter;
}
TEST_F(DBTest, IterMultiWithDelete) {
do {
ASSERT_LEVELDB_OK(Put("a", "va"));
ASSERT_LEVELDB_OK(Put("b", "vb"));
ASSERT_LEVELDB_OK(Put("c", "vc"));
ASSERT_LEVELDB_OK(Delete("b"));
ASSERT_EQ("NOT_FOUND", Get("b"));
Iterator* iter = db_->NewIterator(ReadOptions());
iter->Seek("c");
ASSERT_EQ(IterStatus(iter), "c->vc");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "a->va");
delete iter;
} while (ChangeOptions());
}
TEST_F(DBTest, IterMultiWithDeleteAndCompaction) {
do {
ASSERT_LEVELDB_OK(Put("b", "vb"));
ASSERT_LEVELDB_OK(Put("c", "vc"));
ASSERT_LEVELDB_OK(Put("a", "va"));
dbfull()->TEST_CompactMemTable();
ASSERT_LEVELDB_OK(Delete("b"));
ASSERT_EQ("NOT_FOUND", Get("b"));
Iterator* iter = db_->NewIterator(ReadOptions());
iter->Seek("c");
ASSERT_EQ(IterStatus(iter), "c->vc");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Seek("b");
ASSERT_EQ(IterStatus(iter), "c->vc");
delete iter;
} while (ChangeOptions());
}
TEST_F(DBTest, Recover) {
do {
ASSERT_LEVELDB_OK(Put("foo", "v1"));
ASSERT_LEVELDB_OK(Put("baz", "v5"));
Reopen();
ASSERT_EQ("v1", Get("foo"));
ASSERT_EQ("v1", Get("foo"));
ASSERT_EQ("v5", Get("baz"));
ASSERT_LEVELDB_OK(Put("bar", "v2"));
ASSERT_LEVELDB_OK(Put("foo", "v3"));
Reopen();
ASSERT_EQ("v3", Get("foo"));
ASSERT_LEVELDB_OK(Put("foo", "v4"));
ASSERT_EQ("v4", Get("foo"));
ASSERT_EQ("v2", Get("bar"));
ASSERT_EQ("v5", Get("baz"));
} while (ChangeOptions());
}
TEST_F(DBTest, RecoveryWithEmptyLog) {
do {
ASSERT_LEVELDB_OK(Put("foo", "v1"));
ASSERT_LEVELDB_OK(Put("foo", "v2"));
Reopen();
Reopen();
ASSERT_LEVELDB_OK(Put("foo", "v3"));
Reopen();
ASSERT_EQ("v3", Get("foo"));
} while (ChangeOptions());
}
TEST_F(DBTest, RecoverDuringMemtableCompaction) {
do {
Options options = CurrentOptions();
options.env = env_;
options.write_buffer_size = 1000000;
Reopen(&options);
ASSERT_LEVELDB_OK(Put("foo", "v1"));
ASSERT_LEVELDB_OK(
Put("big1", std::string(10000000, 'x')));
ASSERT_LEVELDB_OK(
Put("big2", std::string(1000, 'y')));
ASSERT_LEVELDB_OK(Put("bar", "v2"));
Reopen(&options);
ASSERT_EQ("v1", Get("foo"));
ASSERT_EQ("v2", Get("bar"));
ASSERT_EQ(std::string(10000000, 'x'), Get("big1"));
ASSERT_EQ(std::string(1000, 'y'), Get("big2"));
} while (ChangeOptions());
}
static std::string Key(int i) {
char buf[100];
std::snprintf(buf, sizeof(buf), "key%06d", i);
return std::string(buf);
}
TEST_F(DBTest, MinorCompactionsHappen) {
Options options = CurrentOptions();
options.write_buffer_size = 10000;
Reopen(&options);
const int N = 500;
int starting_num_tables = TotalTableFiles();
for (int i = 0; i < N; i++) {
ASSERT_LEVELDB_OK(Put(Key(i), Key(i) + std::string(1000, 'v')));
}
int ending_num_tables = TotalTableFiles();
ASSERT_GT(ending_num_tables, starting_num_tables);
for (int i = 0; i < N; i++) {
ASSERT_EQ(Key(i) + std::string(1000, 'v'), Get(Key(i)));
}
Reopen();
for (int i = 0; i < N; i++) {
ASSERT_EQ(Key(i) + std::string(1000, 'v'), Get(Key(i)));
}
}
TEST_F(DBTest, RecoverWithLargeLog) {
{
Options options = CurrentOptions();
Reopen(&options);
ASSERT_LEVELDB_OK(Put("big1", std::string(200000, '1')));
ASSERT_LEVELDB_OK(Put("big2", std::string(200000, '2')));
ASSERT_LEVELDB_OK(Put("small3", std::string(10, '3')));
ASSERT_LEVELDB_OK(Put("small4", std::string(10, '4')));
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
}
Options options = CurrentOptions();
options.write_buffer_size = 100000;
Reopen(&options);
ASSERT_EQ(NumTableFilesAtLevel(0), 3);
ASSERT_EQ(std::string(200000, '1'), Get("big1"));
ASSERT_EQ(std::string(200000, '2'), Get("big2"));
ASSERT_EQ(std::string(10, '3'), Get("small3"));
ASSERT_EQ(std::string(10, '4'), Get("small4"));
ASSERT_GT(NumTableFilesAtLevel(0), 1);
}
TEST_F(DBTest, CompactionsGenerateMultipleFiles) {
Options options = CurrentOptions();
options.write_buffer_size = 100000000;
Reopen(&options);
Random rnd(301);
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
std::vector<std::string> values;
for (int i = 0; i < 80; i++) {
values.push_back(RandomString(&rnd, 100000));
ASSERT_LEVELDB_OK(Put(Key(i), values[i]));
}
Reopen(&options);
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
ASSERT_GT(NumTableFilesAtLevel(1), 1);
for (int i = 0; i < 80; i++) {
ASSERT_EQ(Get(Key(i)), values[i]);
}
}
TEST_F(DBTest, RepeatedWritesToSameKey) {
Options options = CurrentOptions();
options.env = env_;
options.write_buffer_size = 100000;
Reopen(&options);
const int kMaxFiles = config::kNumLevels + config::kL0_StopWritesTrigger;
Random rnd(301);
std::string value = RandomString(&rnd, 2 * options.write_buffer_size);
for (int i = 0; i < 5 * kMaxFiles; i++) {
Put("key", value);
ASSERT_LE(TotalTableFiles(), kMaxFiles);
std::fprintf(stderr, "after %d: %d files\n", i + 1, TotalTableFiles());
}
}
TEST_F(DBTest, SparseMerge) {
Options options = CurrentOptions();
options.compression = kNoCompression;
Reopen(&options);
FillLevels("A", "Z");
const std::string value(1000, 'x');
Put("A", "va");
for (int i = 0; i < 100000; i++) {
char key[100];
std::snprintf(key, sizeof(key), "B%010d", i);
Put(key, value);
}
Put("C", "vc");
dbfull()->TEST_CompactMemTable();
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
Put("A", "va2");
Put("B100", "bvalue2");
Put("C", "vc2");
dbfull()->TEST_CompactMemTable();
ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576);
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576);
dbfull()->TEST_CompactRange(1, nullptr, nullptr);
ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576);
}
static bool Between(uint64_t val, uint64_t low, uint64_t high) {
bool result = (val >= low) && (val <= high);
if (!result) {
std::fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
(unsigned long long)(val), (unsigned long long)(low),
(unsigned long long)(high));
}
return result;
}
TEST_F(DBTest, ApproximateSizes) {
do {
Options options = CurrentOptions();
options.write_buffer_size = 100000000;
options.compression = kNoCompression;
DestroyAndReopen();
ASSERT_TRUE(Between(Size("", "xyz"), 0, 0));
Reopen(&options);
ASSERT_TRUE(Between(Size("", "xyz"), 0, 0));
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
const int N = 80;
static const int S1 = 100000;
static const int S2 = 105000;
Random rnd(301);
for (int i = 0; i < N; i++) {
ASSERT_LEVELDB_OK(Put(Key(i), RandomString(&rnd, S1)));
}
ASSERT_TRUE(Between(Size("", Key(50)), 0, 0));
if (options.reuse_logs) {
Reopen(&options);
ASSERT_TRUE(Between(Size("", Key(50)), 0, 0));
continue;
}
for (int run = 0; run < 3; run++) {
Reopen(&options);
for (int compact_start = 0; compact_start < N; compact_start += 10) {
for (int i = 0; i < N; i += 10) {
ASSERT_TRUE(Between(Size("", Key(i)), S1 * i, S2 * i));
ASSERT_TRUE(Between(Size("", Key(i) + ".suffix"), S1 * (i + 1),
S2 * (i + 1)));
ASSERT_TRUE(Between(Size(Key(i), Key(i + 10)), S1 * 10, S2 * 10));
}
ASSERT_TRUE(Between(Size("", Key(50)), S1 * 50, S2 * 50));
ASSERT_TRUE(Between(Size("", Key(50) + ".suffix"), S1 * 50, S2 * 50));
std::string cstart_str = Key(compact_start);
std::string cend_str = Key(compact_start + 9);
Slice cstart = cstart_str;
Slice cend = cend_str;
dbfull()->TEST_CompactRange(0, &cstart, &cend);
}
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
ASSERT_GT(NumTableFilesAtLevel(1), 0);
}
} while (ChangeOptions());
}
TEST_F(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
do {
Options options = CurrentOptions();
options.compression = kNoCompression;
Reopen();
Random rnd(301);
std::string big1 = RandomString(&rnd, 100000);
ASSERT_LEVELDB_OK(Put(Key(0), RandomString(&rnd, 10000)));
ASSERT_LEVELDB_OK(Put(Key(1), RandomString(&rnd, 10000)));
ASSERT_LEVELDB_OK(Put(Key(2), big1));
ASSERT_LEVELDB_OK(Put(Key(3), RandomString(&rnd, 10000)));
ASSERT_LEVELDB_OK(Put(Key(4), big1));
ASSERT_LEVELDB_OK(Put(Key(5), RandomString(&rnd, 10000)));
ASSERT_LEVELDB_OK(Put(Key(6), RandomString(&rnd, 300000)));
ASSERT_LEVELDB_OK(Put(Key(7), RandomString(&rnd, 10000)));
if (options.reuse_logs) {
ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable());
}
for (int run = 0; run < 3; run++) {
Reopen(&options);
ASSERT_TRUE(Between(Size("", Key(0)), 0, 0));
ASSERT_TRUE(Between(Size("", Key(1)), 10000, 11000));
ASSERT_TRUE(Between(Size("", Key(2)), 20000, 21000));
ASSERT_TRUE(Between(Size("", Key(3)), 120000, 121000));
ASSERT_TRUE(Between(Size("", Key(4)), 130000, 131000));
ASSERT_TRUE(Between(Size("", Key(5)), 230000, 231000));
ASSERT_TRUE(Between(Size("", Key(6)), 240000, 241000));
ASSERT_TRUE(Between(Size("", Key(7)), 540000, 541000));
ASSERT_TRUE(Between(Size("", Key(8)), 550000, 560000));
ASSERT_TRUE(Between(Size(Key(3), Key(5)), 110000, 111000));
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
}
} while (ChangeOptions());
}
TEST_F(DBTest, IteratorPinsRef) {
Put("foo", "hello");
Iterator* iter = db_->NewIterator(ReadOptions());
Put("foo", "newvalue1");
for (int i = 0; i < 100; i++) {
ASSERT_LEVELDB_OK(
Put(Key(i), Key(i) + std::string(100000, 'v')));
}
Put("foo", "newvalue2");
iter->SeekToFirst();
ASSERT_TRUE(iter->Valid());
ASSERT_EQ("foo", iter->key().ToString());
ASSERT_EQ("hello", iter->value().ToString());
iter->Next();
ASSERT_TRUE(!iter->Valid());
delete iter;
}
TEST_F(DBTest, Snapshot) {
do {
Put("foo", "v1");
const Snapshot* s1 = db_->GetSnapshot();
Put("foo", "v2");
const Snapshot* s2 = db_->GetSnapshot();
Put("foo", "v3");
const Snapshot* s3 = db_->GetSnapshot();
Put("foo", "v4");
ASSERT_EQ("v1", Get("foo", s1));
ASSERT_EQ("v2", Get("foo", s2));
ASSERT_EQ("v3", Get("foo", s3));
ASSERT_EQ("v4", Get("foo"));
db_->ReleaseSnapshot(s3);
ASSERT_EQ("v1", Get("foo", s1));
ASSERT_EQ("v2", Get("foo", s2));
ASSERT_EQ("v4", Get("foo"));
db_->ReleaseSnapshot(s1);
ASSERT_EQ("v2", Get("foo", s2));
ASSERT_EQ("v4", Get("foo"));
db_->ReleaseSnapshot(s2);
ASSERT_EQ("v4", Get("foo"));
} while (ChangeOptions());
}
TEST_F(DBTest, HiddenValuesAreRemoved) {
do {
Random rnd(301);
FillLevels("a", "z");
std::string big = RandomString(&rnd, 50000);
Put("foo", big);
Put("pastfoo", "v");
const Snapshot* snapshot = db_->GetSnapshot();
Put("foo", "tiny");
Put("pastfoo2", "v2");
ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable());
ASSERT_GT(NumTableFilesAtLevel(0), 0);
ASSERT_EQ(big, Get("foo", snapshot));
ASSERT_TRUE(Between(Size("", "pastfoo"), 50000, 60000));
db_->ReleaseSnapshot(snapshot);
ASSERT_EQ(AllEntriesFor("foo"), "[ tiny, " + big + " ]");
Slice x("x");
dbfull()->TEST_CompactRange(0, nullptr, &x);
ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]");
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
ASSERT_GE(NumTableFilesAtLevel(1), 1);
dbfull()->TEST_CompactRange(1, nullptr, &x);
ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]");
ASSERT_TRUE(Between(Size("", "pastfoo"), 0, 1000));
} while (ChangeOptions());
}
TEST_F(DBTest, DeletionMarkers1) {
Put("foo", "v1");
ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable());
const int last = config::kMaxMemCompactLevel;
ASSERT_EQ(NumTableFilesAtLevel(last), 1);
Put("a", "begin");
Put("z", "end");
dbfull()->TEST_CompactMemTable();
ASSERT_EQ(NumTableFilesAtLevel(last), 1);
ASSERT_EQ(NumTableFilesAtLevel(last - 1), 1);
Delete("foo");
Put("foo", "v2");
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable());
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
Slice z("z");
dbfull()->TEST_CompactRange(last - 2, nullptr, &z);
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]");
dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr);
ASSERT_EQ(AllEntriesFor("foo"), "[ v2 ]");
}
TEST_F(DBTest, DeletionMarkers2) {
Put("foo", "v1");
ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable());
const int last = config::kMaxMemCompactLevel;
ASSERT_EQ(NumTableFilesAtLevel(last), 1);
Put("a", "begin");
Put("z", "end");
dbfull()->TEST_CompactMemTable();
ASSERT_EQ(NumTableFilesAtLevel(last), 1);
ASSERT_EQ(NumTableFilesAtLevel(last - 1), 1);
Delete("foo");
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable());
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
dbfull()->TEST_CompactRange(last - 2, nullptr, nullptr);
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr);
ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
}
TEST_F(DBTest, OverlapInLevel0) {
do {
ASSERT_EQ(config::kMaxMemCompactLevel, 2) << "Fix test to match config";
ASSERT_LEVELDB_OK(Put("100", "v100"));
ASSERT_LEVELDB_OK(Put("999", "v999"));
dbfull()->TEST_CompactMemTable();
ASSERT_LEVELDB_OK(Delete("100"));
ASSERT_LEVELDB_OK(Delete("999"));
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("0,1,1", FilesPerLevel());
ASSERT_LEVELDB_OK(Put("300", "v300"));
ASSERT_LEVELDB_OK(Put("500", "v500"));
dbfull()->TEST_CompactMemTable();
ASSERT_LEVELDB_OK(Put("200", "v200"));
ASSERT_LEVELDB_OK(Put("600", "v600"));
ASSERT_LEVELDB_OK(Put("900", "v900"));
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("2,1,1", FilesPerLevel());
dbfull()->TEST_CompactRange(1, nullptr, nullptr);
dbfull()->TEST_CompactRange(2, nullptr, nullptr);
ASSERT_EQ("2", FilesPerLevel());
ASSERT_LEVELDB_OK(Delete("600"));
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("3", FilesPerLevel());
ASSERT_EQ("NOT_FOUND", Get("600"));
} while (ChangeOptions());
}
TEST_F(DBTest, L0_CompactionBug_Issue44_a) {
Reopen();
ASSERT_LEVELDB_OK(Put("b", "v"));
Reopen();
ASSERT_LEVELDB_OK(Delete("b"));
ASSERT_LEVELDB_OK(Delete("a"));
Reopen();
ASSERT_LEVELDB_OK(Delete("a"));
Reopen();
ASSERT_LEVELDB_OK(Put("a", "v"));
Reopen();
Reopen();
ASSERT_EQ("(a->v)", Contents());
DelayMilliseconds(1000);
ASSERT_EQ("(a->v)", Contents());
}
TEST_F(DBTest, L0_CompactionBug_Issue44_b) {
Reopen();
Put("", "");
Reopen();
Delete("e");
Put("", "");
Reopen();
Put("c", "cv");
Reopen();
Put("", "");
Reopen();
Put("", "");
DelayMilliseconds(1000);
Reopen();
Put("d", "dv");
Reopen();
Put("", "");
Reopen();
Delete("d");
Delete("b");
Reopen();
ASSERT_EQ("(->)(c->cv)", Contents());
DelayMilliseconds(1000);
ASSERT_EQ("(->)(c->cv)", Contents());
}
TEST_F(DBTest, Fflush_Issue474) {
static const int kNum = 100000;
Random rnd(test::RandomSeed());
for (int i = 0; i < kNum; i++) {
std::fflush(nullptr);
ASSERT_LEVELDB_OK(Put(RandomKey(&rnd), RandomString(&rnd, 100)));
}
}
TEST_F(DBTest, ComparatorCheck) {
class NewComparator : public Comparator {
public:
const char* Name() const override { return "leveldb.NewComparator"; }
int Compare(const Slice& a, const Slice& b) const override {
return BytewiseComparator()->Compare(a, b);
}
void FindShortestSeparator(std::string* s, const Slice& l) const override {
BytewiseComparator()->FindShortestSeparator(s, l);
}
void FindShortSuccessor(std::string* key) const override {
BytewiseComparator()->FindShortSuccessor(key);
}
};
NewComparator cmp;
Options new_options = CurrentOptions();
new_options.comparator = &cmp;
Status s = TryReopen(&new_options);
ASSERT_TRUE(!s.ok());
ASSERT_TRUE(s.ToString().find("comparator") != std::string::npos)
<< s.ToString();
}
TEST_F(DBTest, CustomComparator) {
class NumberComparator : public Comparator {
public:
const char* Name() const override { return "test.NumberComparator"; }
int Compare(const Slice& a, const Slice& b) const override {
return ToNumber(a) - ToNumber(b);
}
void FindShortestSeparator(std::string* s, const Slice& l) const override {
ToNumber(*s);
ToNumber(l);
}
void FindShortSuccessor(std::string* key) const override {
ToNumber(*key);
}
private:
static int ToNumber(const Slice& x) {
EXPECT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size() - 1] == ']')
<< EscapeString(x);
int val;
char ignored;
EXPECT_TRUE(sscanf(x.ToString().c_str(), "[%i]%c", &val, &ignored) == 1)
<< EscapeString(x);
return val;
}
};
NumberComparator cmp;
Options new_options = CurrentOptions();
new_options.create_if_missing = true;
new_options.comparator = &cmp;
new_options.filter_policy = nullptr;
new_options.write_buffer_size = 1000;
DestroyAndReopen(&new_options);
ASSERT_LEVELDB_OK(Put("[10]", "ten"));
ASSERT_LEVELDB_OK(Put("[0x14]", "twenty"));
for (int i = 0; i < 2; i++) {
ASSERT_EQ("ten", Get("[10]"));
ASSERT_EQ("ten", Get("[0xa]"));
ASSERT_EQ("twenty", Get("[20]"));
ASSERT_EQ("twenty", Get("[0x14]"));
ASSERT_EQ("NOT_FOUND", Get("[15]"));
ASSERT_EQ("NOT_FOUND", Get("[0xf]"));
Compact("[0]", "[9999]");
}
for (int run = 0; run < 2; run++) {
for (int i = 0; i < 1000; i++) {
char buf[100];
std::snprintf(buf, sizeof(buf), "[%d]", i * 10);
ASSERT_LEVELDB_OK(Put(buf, buf));
}
Compact("[0]", "[1000000]");
}
}
TEST_F(DBTest, ManualCompaction) {
ASSERT_EQ(config::kMaxMemCompactLevel, 2)
<< "Need to update this test to match kMaxMemCompactLevel";
MakeTables(3, "p", "q");
ASSERT_EQ("1,1,1", FilesPerLevel());
Compact("", "c");
ASSERT_EQ("1,1,1", FilesPerLevel());
Compact("r", "z");
ASSERT_EQ("1,1,1", FilesPerLevel());
Compact("p1", "p9");
ASSERT_EQ("0,0,1", FilesPerLevel());
MakeTables(3, "c", "e");
ASSERT_EQ("1,1,2", FilesPerLevel());
Compact("b", "f");
ASSERT_EQ("0,0,2", FilesPerLevel());
MakeTables(1, "a", "z");
ASSERT_EQ("0,1,2", FilesPerLevel());
db_->CompactRange(nullptr, nullptr);
ASSERT_EQ("0,0,1", FilesPerLevel());
}
TEST_F(DBTest, DBOpen_Options) {
std::string dbname = testing::TempDir() + "db_options_test";
DestroyDB(dbname, Options());
DB* db = nullptr;
Options opts;
opts.create_if_missing = false;
Status s = DB::Open(opts, dbname, &db);
ASSERT_TRUE(strstr(s.ToString().c_str(), "does not exist") != nullptr);
ASSERT_TRUE(db == nullptr);
opts.create_if_missing = true;
s = DB::Open(opts, dbname, &db);
ASSERT_LEVELDB_OK(s);
ASSERT_TRUE(db != nullptr);
delete db;
db = nullptr;
opts.create_if_missing = false;
opts.error_if_exists = true;
s = DB::Open(opts, dbname, &db);
ASSERT_TRUE(strstr(s.ToString().c_str(), "exists") != nullptr);
ASSERT_TRUE(db == nullptr);
opts.create_if_missing = true;
opts.error_if_exists = false;
s = DB::Open(opts, dbname, &db);
ASSERT_LEVELDB_OK(s);
ASSERT_TRUE(db != nullptr);
delete db;
db = nullptr;
}
TEST_F(DBTest, DestroyEmptyDir) {
std::string dbname = testing::TempDir() + "db_empty_dir";
TestEnv env(Env::Default());
env.RemoveDir(dbname);
ASSERT_TRUE(!env.FileExists(dbname));
Options opts;
opts.env = &env;
ASSERT_LEVELDB_OK(env.CreateDir(dbname));
ASSERT_TRUE(env.FileExists(dbname));
std::vector<std::string> children;
ASSERT_LEVELDB_OK(env.GetChildren(dbname, &children));
#if defined(LEVELDB_PLATFORM_CHROMIUM)
ASSERT_EQ(0, children.size());
#else
ASSERT_EQ(2, children.size());
#endif
ASSERT_LEVELDB_OK(DestroyDB(dbname, opts));
ASSERT_TRUE(!env.FileExists(dbname));
env.SetIgnoreDotFiles(true);
ASSERT_LEVELDB_OK(env.CreateDir(dbname));
ASSERT_TRUE(env.FileExists(dbname));
ASSERT_LEVELDB_OK(env.GetChildren(dbname, &children));
ASSERT_EQ(0, children.size());
ASSERT_LEVELDB_OK(DestroyDB(dbname, opts));
ASSERT_TRUE(!env.FileExists(dbname));
}
TEST_F(DBTest, DestroyOpenDB) {
std::string dbname = testing::TempDir() + "open_db_dir";
env_->RemoveDir(dbname);
ASSERT_TRUE(!env_->FileExists(dbname));
Options opts;
opts.create_if_missing = true;
DB* db = nullptr;
ASSERT_LEVELDB_OK(DB::Open(opts, dbname, &db));
ASSERT_TRUE(db != nullptr);
ASSERT_TRUE(env_->FileExists(dbname));
ASSERT_TRUE(!DestroyDB(dbname, Options()).ok());
ASSERT_TRUE(env_->FileExists(dbname));
delete db;
db = nullptr;
ASSERT_LEVELDB_OK(DestroyDB(dbname, Options()));
ASSERT_TRUE(!env_->FileExists(dbname));
}
TEST_F(DBTest, Locking) {
DB* db2 = nullptr;
Status s = DB::Open(CurrentOptions(), dbname_, &db2);
ASSERT_TRUE(!s.ok()) << "Locking did not prevent re-opening db";
}
TEST_F(DBTest, NoSpace) {
Options options = CurrentOptions();
options.env = env_;
Reopen(&options);
ASSERT_LEVELDB_OK(Put("foo", "v1"));
ASSERT_EQ("v1", Get("foo"));
Compact("a", "z");
const int num_files = CountFiles();
env_->no_space_.store(true, std::memory_order_release);
for (int i = 0; i < 10; i++) {
for (int level = 0; level < config::kNumLevels - 1; level++) {
dbfull()->TEST_CompactRange(level, nullptr, nullptr);
}
}
env_->no_space_.store(false, std::memory_order_release);
ASSERT_LT(CountFiles(), num_files + 3);
}
TEST_F(DBTest, NonWritableFileSystem) {
Options options = CurrentOptions();
options.write_buffer_size = 1000;
options.env = env_;
Reopen(&options);
ASSERT_LEVELDB_OK(Put("foo", "v1"));
env_->non_writable_.store(true, std::memory_order_release);
std::string big(100000, 'x');
int errors = 0;
for (int i = 0; i < 20; i++) {
std::fprintf(stderr, "iter %d; errors %d\n", i, errors);
if (!Put("foo", big).ok()) {
errors++;
DelayMilliseconds(100);
}
}
ASSERT_GT(errors, 0);
env_->non_writable_.store(false, std::memory_order_release);
}
TEST_F(DBTest, WriteSyncError) {
Options options = CurrentOptions();
options.env = env_;
Reopen(&options);
env_->data_sync_error_.store(true, std::memory_order_release);
WriteOptions w;
ASSERT_LEVELDB_OK(db_->Put(w, "k1", "v1"));
ASSERT_EQ("v1", Get("k1"));
w.sync = true;
ASSERT_TRUE(!db_->Put(w, "k2", "v2").ok());
ASSERT_EQ("v1", Get("k1"));
ASSERT_EQ("NOT_FOUND", Get("k2"));
env_->data_sync_error_.store(false, std::memory_order_release);
w.sync = false;
ASSERT_TRUE(!db_->Put(w, "k3", "v3").ok());
ASSERT_EQ("v1", Get("k1"));
ASSERT_EQ("NOT_FOUND", Get("k2"));
ASSERT_EQ("NOT_FOUND", Get("k3"));
}
TEST_F(DBTest, ManifestWriteError) {
for (int iter = 0; iter < 2; iter++) {
std::atomic<bool>* error_type = (iter == 0) ? &env_->manifest_sync_error_
: &env_->manifest_write_error_;
Options options = CurrentOptions();
options.env = env_;
options.create_if_missing = true;
options.error_if_exists = false;
DestroyAndReopen(&options);
ASSERT_LEVELDB_OK(Put("foo", "bar"));
ASSERT_EQ("bar", Get("foo"));
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("bar", Get("foo"));
const int last = config::kMaxMemCompactLevel;
ASSERT_EQ(NumTableFilesAtLevel(last), 1);
error_type->store(true, std::memory_order_release);
dbfull()->TEST_CompactRange(last, nullptr, nullptr);
ASSERT_EQ("bar", Get("foo"));
error_type->store(false, std::memory_order_release);
Reopen(&options);
ASSERT_EQ("bar", Get("foo"));
}
}
TEST_F(DBTest, MissingSSTFile) {
ASSERT_LEVELDB_OK(Put("foo", "bar"));
ASSERT_EQ("bar", Get("foo"));
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("bar", Get("foo"));
Close();
ASSERT_TRUE(DeleteAnSSTFile());
Options options = CurrentOptions();
options.paranoid_checks = true;
Status s = TryReopen(&options);
ASSERT_TRUE(!s.ok());
ASSERT_TRUE(s.ToString().find("issing") != std::string::npos) << s.ToString();
}
TEST_F(DBTest, StillReadSST) {
ASSERT_LEVELDB_OK(Put("foo", "bar"));
ASSERT_EQ("bar", Get("foo"));
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("bar", Get("foo"));
Close();
ASSERT_GT(RenameLDBToSST(), 0);
Options options = CurrentOptions();
options.paranoid_checks = true;
Status s = TryReopen(&options);
ASSERT_TRUE(s.ok());
ASSERT_EQ("bar", Get("foo"));
}
TEST_F(DBTest, FilesDeletedAfterCompaction) {
ASSERT_LEVELDB_OK(Put("foo", "v2"));
Compact("a", "z");
const int num_files = CountFiles();
for (int i = 0; i < 10; i++) {
ASSERT_LEVELDB_OK(Put("foo", "v2"));
Compact("a", "z");
}
ASSERT_EQ(CountFiles(), num_files);
}
TEST_F(DBTest, BloomFilter) {
env_->count_random_reads_ = true;
Options options = CurrentOptions();
options.env = env_;
options.block_cache = NewLRUCache(0);
options.filter_policy = NewBloomFilterPolicy(10);
Reopen(&options);
const int N = 10000;
for (int i = 0; i < N; i++) {
ASSERT_LEVELDB_OK(Put(Key(i), Key(i)));
}
Compact("a", "z");
for (int i = 0; i < N; i += 100) {
ASSERT_LEVELDB_OK(Put(Key(i), Key(i)));
}
dbfull()->TEST_CompactMemTable();
env_->delay_data_sync_.store(true, std::memory_order_release);
env_->random_read_counter_.Reset();
for (int i = 0; i < N; i++) {
ASSERT_EQ(Key(i), Get(Key(i)));
}
int reads = env_->random_read_counter_.Read();
std::fprintf(stderr, "%d present => %d reads\n", N, reads);
ASSERT_GE(reads, N);
ASSERT_LE(reads, N + 2 * N / 100);
env_->random_read_counter_.Reset();
for (int i = 0; i < N; i++) {
ASSERT_EQ("NOT_FOUND", Get(Key(i) + ".missing"));
}
reads = env_->random_read_counter_.Read();
std::fprintf(stderr, "%d missing => %d reads\n", N, reads);
ASSERT_LE(reads, 3 * N / 100);
env_->delay_data_sync_.store(false, std::memory_order_release);
Close();
delete options.block_cache;
delete options.filter_policy;
}
TEST_F(DBTest, LogCloseError) {
const int kValueSize = 20000;
const int kWriteCount = 10;
const int kWriteBufferSize = (kValueSize * kWriteCount) / 2;
Options options = CurrentOptions();
options.env = env_;
options.write_buffer_size = kWriteBufferSize;
Reopen(&options);
env_->log_file_close_.store(true, std::memory_order_release);
std::string value(kValueSize, 'x');
Status s;
for (int i = 0; i < kWriteCount && s.ok(); i++) {
s = Put(Key(i), value);
}
ASSERT_TRUE(!s.ok()) << "succeeded even after log file Close failure";
s = Put("hello", "world");
ASSERT_TRUE(!s.ok()) << "write succeeded after log file Close failure";
env_->log_file_close_.store(false, std::memory_order_release);
}
namespace {
static const int kNumThreads = 4;
static const int kTestSeconds = 10;
static const int kNumKeys = 1000;
struct MTState {
DBTest* test;
std::atomic<bool> stop;
std::atomic<int> counter[kNumThreads];
std::atomic<bool> thread_done[kNumThreads];
};
struct MTThread {
MTState* state;
int id;
};
static void MTThreadBody(void* arg) {
MTThread* t = reinterpret_cast<MTThread*>(arg);
int id = t->id;
DB* db = t->state->test->db_;
int counter = 0;
std::fprintf(stderr, "... starting thread %d\n", id);
Random rnd(1000 + id);
std::string value;
char valbuf[1500];
while (!t->state->stop.load(std::memory_order_acquire)) {
t->state->counter[id].store(counter, std::memory_order_release);
int key = rnd.Uniform(kNumKeys);
char keybuf[20];
std::snprintf(keybuf, sizeof(keybuf), "%016d", key);
if (rnd.OneIn(2)) {
std::snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d", key, id,
static_cast<int>(counter));
ASSERT_LEVELDB_OK(db->Put(WriteOptions(), Slice(keybuf), Slice(valbuf)));
} else {
Status s = db->Get(ReadOptions(), Slice(keybuf), &value);
if (s.IsNotFound()) {
} else {
ASSERT_LEVELDB_OK(s);
int k, w, c;
ASSERT_EQ(3, sscanf(value.c_str(), "%d.%d.%d", &k, &w, &c)) << value;
ASSERT_EQ(k, key);
ASSERT_GE(w, 0);
ASSERT_LT(w, kNumThreads);
ASSERT_LE(c, t->state->counter[w].load(std::memory_order_acquire));
}
}
counter++;
}
t->state->thread_done[id].store(true, std::memory_order_release);
std::fprintf(stderr, "... stopping thread %d after %d ops\n", id, counter);
}
}
TEST_F(DBTest, MultiThreaded) {
do {
MTState mt;
mt.test = this;
mt.stop.store(false, std::memory_order_release);
for (int id = 0; id < kNumThreads; id++) {
mt.counter[id].store(false, std::memory_order_release);
mt.thread_done[id].store(false, std::memory_order_release);
}
MTThread thread[kNumThreads];
for (int id = 0; id < kNumThreads; id++) {
thread[id].state = &mt;
thread[id].id = id;
env_->StartThread(MTThreadBody, &thread[id]);
}
DelayMilliseconds(kTestSeconds * 1000);
mt.stop.store(true, std::memory_order_release);
for (int id = 0; id < kNumThreads; id++) {
while (!mt.thread_done[id].load(std::memory_order_acquire)) {
DelayMilliseconds(100);
}
}
} while (ChangeOptions());
}
namespace {
typedef std::map<std::string, std::string> KVMap;
}
class ModelDB : public DB {
public:
class ModelSnapshot : public Snapshot {
public:
KVMap map_;
};
explicit ModelDB(const Options& options) : options_(options) {}
~ModelDB() override = default;
Status Put(const WriteOptions& o, const Slice& k, const Slice& v) override {
return DB::Put(o, k, v);
}
Status Delete(const WriteOptions& o, const Slice& key) override {
return DB::Delete(o, key);
}
Status Get(const ReadOptions& options, const Slice& key,
std::string* value) override {
assert(false);
return Status::NotFound(key);
}
Iterator* NewIterator(const ReadOptions& options) override {
if (options.snapshot == nullptr) {
KVMap* saved = new KVMap;
*saved = map_;
return new ModelIter(saved, true);
} else {
const KVMap* snapshot_state =
&(reinterpret_cast<const ModelSnapshot*>(options.snapshot)->map_);
return new ModelIter(snapshot_state, false);
}
}
const Snapshot* GetSnapshot() override {
ModelSnapshot* snapshot = new ModelSnapshot;
snapshot->map_ = map_;
return snapshot;
}
void ReleaseSnapshot(const Snapshot* snapshot) override {
delete reinterpret_cast<const ModelSnapshot*>(snapshot);
}
Status Write(const WriteOptions& options, WriteBatch* batch) override {
class Handler : public WriteBatch::Handler {
public:
KVMap* map_;
void Put(const Slice& key, const Slice& value) override {
(*map_)[key.ToString()] = value.ToString();
}
void Delete(const Slice& key) override { map_->erase(key.ToString()); }
};
Handler handler;
handler.map_ = &map_;
return batch->Iterate(&handler);
}
bool GetProperty(const Slice& property, std::string* value) override {
return false;
}
void GetApproximateSizes(const Range* r, int n, uint64_t* sizes) override {
for (int i = 0; i < n; i++) {
sizes[i] = 0;
}
}
void CompactRange(const Slice* start, const Slice* end) override {}
private:
class ModelIter : public Iterator {
public:
ModelIter(const KVMap* map, bool owned)
: map_(map), owned_(owned), iter_(map_->end()) {}
~ModelIter() override {
if (owned_) delete map_;
}
bool Valid() const override { return iter_ != map_->end(); }
void SeekToFirst() override { iter_ = map_->begin(); }
void SeekToLast() override {
if (map_->empty()) {
iter_ = map_->end();
} else {
iter_ = map_->find(map_->rbegin()->first);
}
}
void Seek(const Slice& k) override {
iter_ = map_->lower_bound(k.ToString());
}
void Next() override { ++iter_; }
void Prev() override { --iter_; }
Slice key() const override { return iter_->first; }
Slice value() const override { return iter_->second; }
Status status() const override { return Status::OK(); }
private:
const KVMap* const map_;
const bool owned_;
KVMap::const_iterator iter_;
};
const Options options_;
KVMap map_;
};
static bool CompareIterators(int step, DB* model, DB* db,
const Snapshot* model_snap,
const Snapshot* db_snap) {
ReadOptions options;
options.snapshot = model_snap;
Iterator* miter = model->NewIterator(options);
options.snapshot = db_snap;
Iterator* dbiter = db->NewIterator(options);
bool ok = true;
int count = 0;
std::vector<std::string> seek_keys;
for (miter->SeekToFirst(), dbiter->SeekToFirst();
ok && miter->Valid() && dbiter->Valid(); miter->Next(), dbiter->Next()) {
count++;
if (miter->key().compare(dbiter->key()) != 0) {
std::fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n", step,
EscapeString(miter->key()).c_str(),
EscapeString(dbiter->key()).c_str());
ok = false;
break;
}
if (miter->value().compare(dbiter->value()) != 0) {
std::fprintf(stderr,
"step %d: Value mismatch for key '%s': '%s' vs. '%s'\n",
step, EscapeString(miter->key()).c_str(),
EscapeString(miter->value()).c_str(),
EscapeString(miter->value()).c_str());
ok = false;
break;
}
if (count % 10 == 0) {
seek_keys.push_back(miter->key().ToString());
}
}
if (ok) {
if (miter->Valid() != dbiter->Valid()) {
std::fprintf(stderr, "step %d: Mismatch at end of iterators: %d vs. %d\n",
step, miter->Valid(), dbiter->Valid());
ok = false;
}
}
if (ok) {
for (auto kiter = seek_keys.begin(); ok && kiter != seek_keys.end();
++kiter) {
miter->Seek(*kiter);
dbiter->Seek(*kiter);
if (!miter->Valid() || !dbiter->Valid()) {
std::fprintf(stderr, "step %d: Seek iterators invalid: %d vs. %d\n",
step, miter->Valid(), dbiter->Valid());
ok = false;
}
if (miter->key().compare(dbiter->key()) != 0) {
std::fprintf(stderr, "step %d: Seek key mismatch: '%s' vs. '%s'\n",
step, EscapeString(miter->key()).c_str(),
EscapeString(dbiter->key()).c_str());
ok = false;
break;
}
if (miter->value().compare(dbiter->value()) != 0) {
std::fprintf(
stderr,
"step %d: Seek value mismatch for key '%s': '%s' vs. '%s'\n", step,
EscapeString(miter->key()).c_str(),
EscapeString(miter->value()).c_str(),
EscapeString(miter->value()).c_str());
ok = false;
break;
}
}
}
std::fprintf(stderr, "%d entries compared: ok=%d\n", count, ok);
delete miter;
delete dbiter;
return ok;
}
TEST_F(DBTest, Randomized) {
Random rnd(test::RandomSeed());
do {
ModelDB model(CurrentOptions());
const int N = 10000;
const Snapshot* model_snap = nullptr;
const Snapshot* db_snap = nullptr;
std::string k, v;
for (int step = 0; step < N; step++) {
if (step % 100 == 0) {
std::fprintf(stderr, "Step %d of %d\n", step, N);
}
int p = rnd.Uniform(100);
if (p < 45) {
k = RandomKey(&rnd);
v = RandomString(
&rnd, rnd.OneIn(20) ? 100 + rnd.Uniform(100) : rnd.Uniform(8));
ASSERT_LEVELDB_OK(model.Put(WriteOptions(), k, v));
ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), k, v));
} else if (p < 90) {
k = RandomKey(&rnd);
ASSERT_LEVELDB_OK(model.Delete(WriteOptions(), k));
ASSERT_LEVELDB_OK(db_->Delete(WriteOptions(), k));
} else {
WriteBatch b;
const int num = rnd.Uniform(8);
for (int i = 0; i < num; i++) {
if (i == 0 || !rnd.OneIn(10)) {
k = RandomKey(&rnd);
} else {
}
if (rnd.OneIn(2)) {
v = RandomString(&rnd, rnd.Uniform(10));
b.Put(k, v);
} else {
b.Delete(k);
}
}
ASSERT_LEVELDB_OK(model.Write(WriteOptions(), &b));
ASSERT_LEVELDB_OK(db_->Write(WriteOptions(), &b));
}
if ((step % 100) == 0) {
ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr));
ASSERT_TRUE(CompareIterators(step, &model, db_, model_snap, db_snap));
if (model_snap != nullptr) model.ReleaseSnapshot(model_snap);
if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap);
Reopen();
ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr));
model_snap = model.GetSnapshot();
db_snap = db_->GetSnapshot();
}
}
if (model_snap != nullptr) model.ReleaseSnapshot(model_snap);
if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap);
} while (ChangeOptions());
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/include/leveldb/db.h | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/db/db_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
b716890b-3e9c-405c-930e-85296b06a003 | cpp | google/leveldb | skiplist | db/skiplist.h | db/skiplist_test.cc | #ifndef STORAGE_LEVELDB_DB_SKIPLIST_H_
#define STORAGE_LEVELDB_DB_SKIPLIST_H_
#include <atomic>
#include <cassert>
#include <cstdlib>
#include "util/arena.h"
#include "util/random.h"
namespace leveldb {
template <typename Key, class Comparator>
class SkipList {
private:
struct Node;
public:
explicit SkipList(Comparator cmp, Arena* arena);
SkipList(const SkipList&) = delete;
SkipList& operator=(const SkipList&) = delete;
void Insert(const Key& key);
bool Contains(const Key& key) const;
class Iterator {
public:
explicit Iterator(const SkipList* list);
bool Valid() const;
const Key& key() const;
void Next();
void Prev();
void Seek(const Key& target);
void SeekToFirst();
void SeekToLast();
private:
const SkipList* list_;
Node* node_;
};
private:
enum { kMaxHeight = 12 };
inline int GetMaxHeight() const {
return max_height_.load(std::memory_order_relaxed);
}
Node* NewNode(const Key& key, int height);
int RandomHeight();
bool Equal(const Key& a, const Key& b) const { return (compare_(a, b) == 0); }
bool KeyIsAfterNode(const Key& key, Node* n) const;
Node* FindGreaterOrEqual(const Key& key, Node** prev) const;
Node* FindLessThan(const Key& key) const;
Node* FindLast() const;
Comparator const compare_;
Arena* const arena_;
Node* const head_;
std::atomic<int> max_height_;
Random rnd_;
};
template <typename Key, class Comparator>
struct SkipList<Key, Comparator>::Node {
explicit Node(const Key& k) : key(k) {}
Key const key;
Node* Next(int n) {
assert(n >= 0);
return next_[n].load(std::memory_order_acquire);
}
void SetNext(int n, Node* x) {
assert(n >= 0);
next_[n].store(x, std::memory_order_release);
}
Node* NoBarrier_Next(int n) {
assert(n >= 0);
return next_[n].load(std::memory_order_relaxed);
}
void NoBarrier_SetNext(int n, Node* x) {
assert(n >= 0);
next_[n].store(x, std::memory_order_relaxed);
}
private:
std::atomic<Node*> next_[1];
};
template <typename Key, class Comparator>
typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::NewNode(
const Key& key, int height) {
char* const node_memory = arena_->AllocateAligned(
sizeof(Node) + sizeof(std::atomic<Node*>) * (height - 1));
return new (node_memory) Node(key);
}
template <typename Key, class Comparator>
inline SkipList<Key, Comparator>::Iterator::Iterator(const SkipList* list) {
list_ = list;
node_ = nullptr;
}
template <typename Key, class Comparator>
inline bool SkipList<Key, Comparator>::Iterator::Valid() const {
return node_ != nullptr;
}
template <typename Key, class Comparator>
inline const Key& SkipList<Key, Comparator>::Iterator::key() const {
assert(Valid());
return node_->key;
}
template <typename Key, class Comparator>
inline void SkipList<Key, Comparator>::Iterator::Next() {
assert(Valid());
node_ = node_->Next(0);
}
template <typename Key, class Comparator>
inline void SkipList<Key, Comparator>::Iterator::Prev() {
assert(Valid());
node_ = list_->FindLessThan(node_->key);
if (node_ == list_->head_) {
node_ = nullptr;
}
}
template <typename Key, class Comparator>
inline void SkipList<Key, Comparator>::Iterator::Seek(const Key& target) {
node_ = list_->FindGreaterOrEqual(target, nullptr);
}
template <typename Key, class Comparator>
inline void SkipList<Key, Comparator>::Iterator::SeekToFirst() {
node_ = list_->head_->Next(0);
}
template <typename Key, class Comparator>
inline void SkipList<Key, Comparator>::Iterator::SeekToLast() {
node_ = list_->FindLast();
if (node_ == list_->head_) {
node_ = nullptr;
}
}
template <typename Key, class Comparator>
int SkipList<Key, Comparator>::RandomHeight() {
static const unsigned int kBranching = 4;
int height = 1;
while (height < kMaxHeight && rnd_.OneIn(kBranching)) {
height++;
}
assert(height > 0);
assert(height <= kMaxHeight);
return height;
}
template <typename Key, class Comparator>
bool SkipList<Key, Comparator>::KeyIsAfterNode(const Key& key, Node* n) const {
return (n != nullptr) && (compare_(n->key, key) < 0);
}
template <typename Key, class Comparator>
typename SkipList<Key, Comparator>::Node*
SkipList<Key, Comparator>::FindGreaterOrEqual(const Key& key,
Node** prev) const {
Node* x = head_;
int level = GetMaxHeight() - 1;
while (true) {
Node* next = x->Next(level);
if (KeyIsAfterNode(key, next)) {
x = next;
} else {
if (prev != nullptr) prev[level] = x;
if (level == 0) {
return next;
} else {
level--;
}
}
}
}
template <typename Key, class Comparator>
typename SkipList<Key, Comparator>::Node*
SkipList<Key, Comparator>::FindLessThan(const Key& key) const {
Node* x = head_;
int level = GetMaxHeight() - 1;
while (true) {
assert(x == head_ || compare_(x->key, key) < 0);
Node* next = x->Next(level);
if (next == nullptr || compare_(next->key, key) >= 0) {
if (level == 0) {
return x;
} else {
level--;
}
} else {
x = next;
}
}
}
template <typename Key, class Comparator>
typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::FindLast()
const {
Node* x = head_;
int level = GetMaxHeight() - 1;
while (true) {
Node* next = x->Next(level);
if (next == nullptr) {
if (level == 0) {
return x;
} else {
level--;
}
} else {
x = next;
}
}
}
template <typename Key, class Comparator>
SkipList<Key, Comparator>::SkipList(Comparator cmp, Arena* arena)
: compare_(cmp),
arena_(arena),
head_(NewNode(0 , kMaxHeight)),
max_height_(1),
rnd_(0xdeadbeef) {
for (int i = 0; i < kMaxHeight; i++) {
head_->SetNext(i, nullptr);
}
}
template <typename Key, class Comparator>
void SkipList<Key, Comparator>::Insert(const Key& key) {
Node* prev[kMaxHeight];
Node* x = FindGreaterOrEqual(key, prev);
assert(x == nullptr || !Equal(key, x->key));
int height = RandomHeight();
if (height > GetMaxHeight()) {
for (int i = GetMaxHeight(); i < height; i++) {
prev[i] = head_;
}
max_height_.store(height, std::memory_order_relaxed);
}
x = NewNode(key, height);
for (int i = 0; i < height; i++) {
x->NoBarrier_SetNext(i, prev[i]->NoBarrier_Next(i));
prev[i]->SetNext(i, x);
}
}
template <typename Key, class Comparator>
bool SkipList<Key, Comparator>::Contains(const Key& key) const {
Node* x = FindGreaterOrEqual(key, nullptr);
if (x != nullptr && Equal(key, x->key)) {
return true;
} else {
return false;
}
}
}
#endif | #include "db/skiplist.h"
#include <atomic>
#include <set>
#include "gtest/gtest.h"
#include "leveldb/env.h"
#include "port/port.h"
#include "port/thread_annotations.h"
#include "util/arena.h"
#include "util/hash.h"
#include "util/random.h"
#include "util/testutil.h"
namespace leveldb {
typedef uint64_t Key;
struct Comparator {
int operator()(const Key& a, const Key& b) const {
if (a < b) {
return -1;
} else if (a > b) {
return +1;
} else {
return 0;
}
}
};
TEST(SkipTest, Empty) {
Arena arena;
Comparator cmp;
SkipList<Key, Comparator> list(cmp, &arena);
ASSERT_TRUE(!list.Contains(10));
SkipList<Key, Comparator>::Iterator iter(&list);
ASSERT_TRUE(!iter.Valid());
iter.SeekToFirst();
ASSERT_TRUE(!iter.Valid());
iter.Seek(100);
ASSERT_TRUE(!iter.Valid());
iter.SeekToLast();
ASSERT_TRUE(!iter.Valid());
}
TEST(SkipTest, InsertAndLookup) {
const int N = 2000;
const int R = 5000;
Random rnd(1000);
std::set<Key> keys;
Arena arena;
Comparator cmp;
SkipList<Key, Comparator> list(cmp, &arena);
for (int i = 0; i < N; i++) {
Key key = rnd.Next() % R;
if (keys.insert(key).second) {
list.Insert(key);
}
}
for (int i = 0; i < R; i++) {
if (list.Contains(i)) {
ASSERT_EQ(keys.count(i), 1);
} else {
ASSERT_EQ(keys.count(i), 0);
}
}
{
SkipList<Key, Comparator>::Iterator iter(&list);
ASSERT_TRUE(!iter.Valid());
iter.Seek(0);
ASSERT_TRUE(iter.Valid());
ASSERT_EQ(*(keys.begin()), iter.key());
iter.SeekToFirst();
ASSERT_TRUE(iter.Valid());
ASSERT_EQ(*(keys.begin()), iter.key());
iter.SeekToLast();
ASSERT_TRUE(iter.Valid());
ASSERT_EQ(*(keys.rbegin()), iter.key());
}
for (int i = 0; i < R; i++) {
SkipList<Key, Comparator>::Iterator iter(&list);
iter.Seek(i);
std::set<Key>::iterator model_iter = keys.lower_bound(i);
for (int j = 0; j < 3; j++) {
if (model_iter == keys.end()) {
ASSERT_TRUE(!iter.Valid());
break;
} else {
ASSERT_TRUE(iter.Valid());
ASSERT_EQ(*model_iter, iter.key());
++model_iter;
iter.Next();
}
}
}
{
SkipList<Key, Comparator>::Iterator iter(&list);
iter.SeekToLast();
for (std::set<Key>::reverse_iterator model_iter = keys.rbegin();
model_iter != keys.rend(); ++model_iter) {
ASSERT_TRUE(iter.Valid());
ASSERT_EQ(*model_iter, iter.key());
iter.Prev();
}
ASSERT_TRUE(!iter.Valid());
}
}
class ConcurrentTest {
private:
static constexpr uint32_t K = 4;
static uint64_t key(Key key) { return (key >> 40); }
static uint64_t gen(Key key) { return (key >> 8) & 0xffffffffu; }
static uint64_t hash(Key key) { return key & 0xff; }
static uint64_t HashNumbers(uint64_t k, uint64_t g) {
uint64_t data[2] = {k, g};
return Hash(reinterpret_cast<char*>(data), sizeof(data), 0);
}
static Key MakeKey(uint64_t k, uint64_t g) {
static_assert(sizeof(Key) == sizeof(uint64_t), "");
assert(k <= K);
assert(g <= 0xffffffffu);
return ((k << 40) | (g << 8) | (HashNumbers(k, g) & 0xff));
}
static bool IsValidKey(Key k) {
return hash(k) == (HashNumbers(key(k), gen(k)) & 0xff);
}
static Key RandomTarget(Random* rnd) {
switch (rnd->Next() % 10) {
case 0:
return MakeKey(0, 0);
case 1:
return MakeKey(K, 0);
default:
return MakeKey(rnd->Next() % K, 0);
}
}
struct State {
std::atomic<int> generation[K];
void Set(int k, int v) {
generation[k].store(v, std::memory_order_release);
}
int Get(int k) { return generation[k].load(std::memory_order_acquire); }
State() {
for (int k = 0; k < K; k++) {
Set(k, 0);
}
}
};
State current_;
Arena arena_;
SkipList<Key, Comparator> list_;
public:
ConcurrentTest() : list_(Comparator(), &arena_) {}
void WriteStep(Random* rnd) {
const uint32_t k = rnd->Next() % K;
const intptr_t g = current_.Get(k) + 1;
const Key key = MakeKey(k, g);
list_.Insert(key);
current_.Set(k, g);
}
void ReadStep(Random* rnd) {
State initial_state;
for (int k = 0; k < K; k++) {
initial_state.Set(k, current_.Get(k));
}
Key pos = RandomTarget(rnd);
SkipList<Key, Comparator>::Iterator iter(&list_);
iter.Seek(pos);
while (true) {
Key current;
if (!iter.Valid()) {
current = MakeKey(K, 0);
} else {
current = iter.key();
ASSERT_TRUE(IsValidKey(current)) << current;
}
ASSERT_LE(pos, current) << "should not go backwards";
while (pos < current) {
ASSERT_LT(key(pos), K) << pos;
ASSERT_TRUE((gen(pos) == 0) ||
(gen(pos) > static_cast<Key>(initial_state.Get(key(pos)))))
<< "key: " << key(pos) << "; gen: " << gen(pos)
<< "; initgen: " << initial_state.Get(key(pos));
if (key(pos) < key(current)) {
pos = MakeKey(key(pos) + 1, 0);
} else {
pos = MakeKey(key(pos), gen(pos) + 1);
}
}
if (!iter.Valid()) {
break;
}
if (rnd->Next() % 2) {
iter.Next();
pos = MakeKey(key(pos), gen(pos) + 1);
} else {
Key new_target = RandomTarget(rnd);
if (new_target > pos) {
pos = new_target;
iter.Seek(new_target);
}
}
}
}
};
constexpr uint32_t ConcurrentTest::K;
TEST(SkipTest, ConcurrentWithoutThreads) {
ConcurrentTest test;
Random rnd(test::RandomSeed());
for (int i = 0; i < 10000; i++) {
test.ReadStep(&rnd);
test.WriteStep(&rnd);
}
}
class TestState {
public:
ConcurrentTest t_;
int seed_;
std::atomic<bool> quit_flag_;
enum ReaderState { STARTING, RUNNING, DONE };
explicit TestState(int s)
: seed_(s), quit_flag_(false), state_(STARTING), state_cv_(&mu_) {}
void Wait(ReaderState s) LOCKS_EXCLUDED(mu_) {
mu_.Lock();
while (state_ != s) {
state_cv_.Wait();
}
mu_.Unlock();
}
void Change(ReaderState s) LOCKS_EXCLUDED(mu_) {
mu_.Lock();
state_ = s;
state_cv_.Signal();
mu_.Unlock();
}
private:
port::Mutex mu_;
ReaderState state_ GUARDED_BY(mu_);
port::CondVar state_cv_ GUARDED_BY(mu_);
};
static void ConcurrentReader(void* arg) {
TestState* state = reinterpret_cast<TestState*>(arg);
Random rnd(state->seed_);
int64_t reads = 0;
state->Change(TestState::RUNNING);
while (!state->quit_flag_.load(std::memory_order_acquire)) {
state->t_.ReadStep(&rnd);
++reads;
}
state->Change(TestState::DONE);
}
static void RunConcurrent(int run) {
const int seed = test::RandomSeed() + (run * 100);
Random rnd(seed);
const int N = 1000;
const int kSize = 1000;
for (int i = 0; i < N; i++) {
if ((i % 100) == 0) {
std::fprintf(stderr, "Run %d of %d\n", i, N);
}
TestState state(seed + 1);
Env::Default()->Schedule(ConcurrentReader, &state);
state.Wait(TestState::RUNNING);
for (int i = 0; i < kSize; i++) {
state.t_.WriteStep(&rnd);
}
state.quit_flag_.store(true, std::memory_order_release);
state.Wait(TestState::DONE);
}
}
TEST(SkipTest, Concurrent1) { RunConcurrent(1); }
TEST(SkipTest, Concurrent2) { RunConcurrent(2); }
TEST(SkipTest, Concurrent3) { RunConcurrent(3); }
TEST(SkipTest, Concurrent4) { RunConcurrent(4); }
TEST(SkipTest, Concurrent5) { RunConcurrent(5); }
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/db/skiplist.h | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/db/skiplist_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
8e55df92-c213-4cd5-876f-69f4af4f9644 | cpp | google/tsl | str_util | tsl/platform/str_util.cc | tsl/platform/str_util_test.cc | #include "tsl/platform/str_util.h"
#include <cctype>
#include <cstdint>
#include <string>
#include "absl/strings/ascii.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/stringpiece.h"
namespace tsl {
namespace str_util {
size_t RemoveLeadingWhitespace(absl::string_view* text) {
absl::string_view new_text = absl::StripLeadingAsciiWhitespace(*text);
size_t count = text->size() - new_text.size();
*text = new_text;
return count;
}
size_t RemoveTrailingWhitespace(absl::string_view* text) {
absl::string_view new_text = absl::StripTrailingAsciiWhitespace(*text);
size_t count = text->size() - new_text.size();
*text = new_text;
return count;
}
size_t RemoveWhitespaceContext(absl::string_view* text) {
absl::string_view new_text = absl::StripAsciiWhitespace(*text);
size_t count = text->size() - new_text.size();
*text = new_text;
return count;
}
bool ConsumeLeadingDigits(absl::string_view* s, uint64_t* val) {
const char* p = s->data();
const char* limit = p + s->size();
uint64_t v = 0;
while (p < limit) {
const char c = *p;
if (c < '0' || c > '9') break;
uint64_t new_v = (v * 10) + (c - '0');
if (new_v / 8 < v) {
return false;
}
v = new_v;
p++;
}
if (p > s->data()) {
s->remove_prefix(p - s->data());
*val = v;
return true;
} else {
return false;
}
}
bool ConsumeNonWhitespace(absl::string_view* s, absl::string_view* val) {
const char* p = s->data();
const char* limit = p + s->size();
while (p < limit) {
const char c = *p;
if (isspace(c)) break;
p++;
}
const size_t n = p - s->data();
if (n > 0) {
*val = absl::string_view(s->data(), n);
s->remove_prefix(n);
return true;
} else {
*val = absl::string_view();
return false;
}
}
void TitlecaseString(string* s, absl::string_view delimiters) {
bool upper = true;
for (string::iterator ss = s->begin(); ss != s->end(); ++ss) {
if (upper) {
*ss = toupper(*ss);
}
upper = (delimiters.find(*ss) != absl::string_view::npos);
}
}
string StringReplace(absl::string_view s, absl::string_view oldsub,
absl::string_view newsub, bool replace_all) {
string res(s);
size_t pos = 0;
while ((pos = res.find(oldsub.data(), pos, oldsub.size())) != string::npos) {
res.replace(pos, oldsub.size(), newsub.data(), newsub.size());
pos += newsub.size();
if (oldsub.empty()) {
pos++;
}
if (!replace_all) {
break;
}
}
return res;
}
size_t Strnlen(const char* str, const size_t string_max_len) {
size_t len = 0;
while (len < string_max_len && str[len] != '\0') {
++len;
}
return len;
}
string ArgDefCase(absl::string_view s) {
const size_t n = s.size();
size_t extra_us = 0;
size_t to_skip = 0;
for (size_t i = 0; i < n; ++i) {
if (i == to_skip && !isalpha(s[i])) {
++to_skip;
continue;
}
if (isupper(s[i]) && i != to_skip && i > 0 && isalnum(s[i - 1])) {
++extra_us;
}
}
string result(n + extra_us - to_skip, '_');
for (size_t i = to_skip, j = 0; i < n; ++i, ++j) {
DCHECK_LT(j, result.size());
char c = s[i];
if (isalnum(c)) {
if (isupper(c)) {
if (i != to_skip) {
DCHECK_GT(j, 0);
if (result[j - 1] != '_') ++j;
}
result[j] = tolower(c);
} else {
result[j] = c;
}
}
}
return result;
}
}
} | #include "tsl/platform/str_util.h"
#include <vector>
#include "tsl/platform/test.h"
namespace tsl {
TEST(CEscape, Basic) {
EXPECT_EQ(absl::CEscape("hello"), "hello");
EXPECT_EQ(absl::CEscape("hello\n"), "hello\\n");
EXPECT_EQ(absl::CEscape("hello\r"), "hello\\r");
EXPECT_EQ(absl::CEscape("\t\r\"'"), "\\t\\r\\\"\\'");
EXPECT_EQ(absl::CEscape("\320hi\200"), "\\320hi\\200");
}
string ExpectCUnescapeSuccess(absl::string_view source) {
string dest;
string error;
EXPECT_TRUE(absl::CUnescape(source, &dest, &error)) << error;
return dest;
}
TEST(CUnescape, Basic) {
EXPECT_EQ("hello", ExpectCUnescapeSuccess("hello"));
EXPECT_EQ("hello\n", ExpectCUnescapeSuccess("hello\\n"));
EXPECT_EQ("hello\r", ExpectCUnescapeSuccess("hello\\r"));
EXPECT_EQ("\t\r\"'", ExpectCUnescapeSuccess("\\t\\r\\\"\\'"));
EXPECT_EQ("\320hi\200", ExpectCUnescapeSuccess("\\320hi\\200"));
}
TEST(CUnescape, HandlesCopyOnWriteStrings) {
string dest = "hello";
string read = dest;
string error;
absl::string_view source = "llohe";
EXPECT_TRUE(absl::CUnescape(source, &dest, &error));
EXPECT_EQ("hello", read);
}
TEST(StripTrailingWhitespace, Basic) {
string test;
test = "hello";
absl::StripTrailingAsciiWhitespace(&test);
EXPECT_EQ(test, "hello");
test = "foo ";
absl::StripTrailingAsciiWhitespace(&test);
EXPECT_EQ(test, "foo");
test = " ";
absl::StripTrailingAsciiWhitespace(&test);
EXPECT_EQ(test, "");
test = "";
absl::StripTrailingAsciiWhitespace(&test);
EXPECT_EQ(test, "");
test = " abc\t";
absl::StripTrailingAsciiWhitespace(&test);
EXPECT_EQ(test, " abc");
}
TEST(RemoveLeadingWhitespace, Basic) {
string text = " \t \n \r Quick\t";
absl::string_view data(text);
EXPECT_EQ(str_util::RemoveLeadingWhitespace(&data), 11);
EXPECT_EQ(data, absl::string_view("Quick\t"));
EXPECT_EQ(str_util::RemoveLeadingWhitespace(&data), 0);
EXPECT_EQ(data, absl::string_view("Quick\t"));
}
TEST(RemoveLeadingWhitespace, TerminationHandling) {
string text = "\t";
absl::string_view data(text);
EXPECT_EQ(str_util::RemoveLeadingWhitespace(&data), 1);
EXPECT_EQ(data, absl::string_view(""));
EXPECT_EQ(str_util::RemoveLeadingWhitespace(&data), 0);
EXPECT_EQ(data, absl::string_view(""));
}
TEST(RemoveTrailingWhitespace, Basic) {
string text = " \t \n \r Quick \t";
absl::string_view data(text);
EXPECT_EQ(str_util::RemoveTrailingWhitespace(&data), 2);
EXPECT_EQ(data, absl::string_view(" \t \n \r Quick"));
EXPECT_EQ(str_util::RemoveTrailingWhitespace(&data), 0);
EXPECT_EQ(data, absl::string_view(" \t \n \r Quick"));
}
TEST(RemoveTrailingWhitespace, TerminationHandling) {
string text = "\t";
absl::string_view data(text);
EXPECT_EQ(str_util::RemoveTrailingWhitespace(&data), 1);
EXPECT_EQ(data, absl::string_view(""));
EXPECT_EQ(str_util::RemoveTrailingWhitespace(&data), 0);
EXPECT_EQ(data, absl::string_view(""));
}
TEST(RemoveWhitespaceContext, Basic) {
string text = " \t \n \r Quick \t";
absl::string_view data(text);
EXPECT_EQ(str_util::RemoveWhitespaceContext(&data), 13);
EXPECT_EQ(data, absl::string_view("Quick"));
EXPECT_EQ(str_util::RemoveWhitespaceContext(&data), 0);
EXPECT_EQ(data, absl::string_view("Quick"));
text = "";
data = text;
EXPECT_EQ(str_util::RemoveWhitespaceContext(&data), 0);
EXPECT_EQ(data, absl::string_view(""));
}
void TestConsumeLeadingDigits(absl::string_view s, int64_t expected,
absl::string_view remaining) {
uint64 v;
absl::string_view input(s);
if (str_util::ConsumeLeadingDigits(&input, &v)) {
EXPECT_EQ(v, static_cast<uint64>(expected));
EXPECT_EQ(input, remaining);
} else {
EXPECT_LT(expected, 0);
EXPECT_EQ(input, remaining);
}
}
TEST(ConsumeLeadingDigits, Basic) {
using str_util::ConsumeLeadingDigits;
TestConsumeLeadingDigits("123", 123, "");
TestConsumeLeadingDigits("a123", -1, "a123");
TestConsumeLeadingDigits("9_", 9, "_");
TestConsumeLeadingDigits("11111111111xyz", 11111111111ll, "xyz");
TestConsumeLeadingDigits("1111111111111111111111111111111xyz", -1,
"1111111111111111111111111111111xyz");
TestConsumeLeadingDigits("18446744073709551616xyz", -1,
"18446744073709551616xyz");
TestConsumeLeadingDigits("18446744073709551615xyz", 18446744073709551615ull,
"xyz");
TestConsumeLeadingDigits("184467440737095516159yz", -1,
"184467440737095516159yz");
}
void TestConsumeNonWhitespace(absl::string_view s, absl::string_view expected,
absl::string_view remaining) {
absl::string_view v;
absl::string_view input(s);
if (str_util::ConsumeNonWhitespace(&input, &v)) {
EXPECT_EQ(v, expected);
EXPECT_EQ(input, remaining);
} else {
EXPECT_EQ(expected, "");
EXPECT_EQ(input, remaining);
}
}
TEST(ConsumeNonWhitespace, Basic) {
TestConsumeNonWhitespace("", "", "");
TestConsumeNonWhitespace(" ", "", " ");
TestConsumeNonWhitespace("abc", "abc", "");
TestConsumeNonWhitespace("abc ", "abc", " ");
}
TEST(ConsumePrefix, Basic) {
string s("abcdef");
absl::string_view input(s);
EXPECT_FALSE(absl::ConsumePrefix(&input, "abcdefg"));
EXPECT_EQ(input, "abcdef");
EXPECT_FALSE(absl::ConsumePrefix(&input, "abce"));
EXPECT_EQ(input, "abcdef");
EXPECT_TRUE(absl::ConsumePrefix(&input, ""));
EXPECT_EQ(input, "abcdef");
EXPECT_FALSE(absl::ConsumePrefix(&input, "abcdeg"));
EXPECT_EQ(input, "abcdef");
EXPECT_TRUE(absl::ConsumePrefix(&input, "abcdef"));
EXPECT_EQ(input, "");
input = s;
EXPECT_TRUE(absl::ConsumePrefix(&input, "abcde"));
EXPECT_EQ(input, "f");
}
TEST(StripPrefix, Basic) {
EXPECT_EQ(absl::StripPrefix("abcdef", "abcdefg"), "abcdef");
EXPECT_EQ(absl::StripPrefix("abcdef", "abce"), "abcdef");
EXPECT_EQ(absl::StripPrefix("abcdef", ""), "abcdef");
EXPECT_EQ(absl::StripPrefix("abcdef", "abcdeg"), "abcdef");
EXPECT_EQ(absl::StripPrefix("abcdef", "abcdef"), "");
EXPECT_EQ(absl::StripPrefix("abcdef", "abcde"), "f");
}
TEST(JoinStrings, Basic) {
std::vector<string> s;
s = {"hi"};
EXPECT_EQ(absl::StrJoin(s, " "), "hi");
s = {"hi", "there", "strings"};
EXPECT_EQ(absl::StrJoin(s, " "), "hi there strings");
std::vector<absl::string_view> sp;
sp = {"hi"};
EXPECT_EQ(absl::StrJoin(sp, ",,"), "hi");
sp = {"hi", "there", "strings"};
EXPECT_EQ(absl::StrJoin(sp, "--"), "hi--there--strings");
}
TEST(JoinStrings, Join3) {
std::vector<string> s;
s = {"hi"};
auto l1 = [](string* out, string s) { *out += s; };
EXPECT_EQ(str_util::Join(s, " ", l1), "hi");
s = {"hi", "there", "strings"};
auto l2 = [](string* out, string s) { *out += s[0]; };
EXPECT_EQ(str_util::Join(s, " ", l2), "h t s");
}
TEST(Split, Basic) {
EXPECT_TRUE(str_util::Split("", ',').empty());
EXPECT_EQ(absl::StrJoin(str_util::Split("a", ','), "|"), "a");
EXPECT_EQ(absl::StrJoin(str_util::Split(",", ','), "|"), "|");
EXPECT_EQ(absl::StrJoin(str_util::Split("a,b,c", ','), "|"), "a|b|c");
EXPECT_EQ(absl::StrJoin(str_util::Split("a,,,b,,c,", ','), "|"), "a|||b||c|");
EXPECT_EQ(absl::StrJoin(str_util::Split("a!,!b,!c,", ",!"), "|"),
"a|||b||c|");
EXPECT_EQ(absl::StrJoin(
str_util::Split("a,,,b,,c,", ',', str_util::SkipEmpty()), "|"),
"a|b|c");
EXPECT_EQ(
absl::StrJoin(
str_util::Split("a, ,b,,c,", ',', str_util::SkipWhitespace()), "|"),
"a|b|c");
EXPECT_EQ(absl::StrJoin(str_util::Split("a. !b,;c,", ".,;!",
str_util::SkipWhitespace()),
"|"),
"a|b|c");
}
TEST(Lowercase, Basic) {
EXPECT_EQ("", absl::AsciiStrToLower(""));
EXPECT_EQ("hello", absl::AsciiStrToLower("hello"));
EXPECT_EQ("hello world", absl::AsciiStrToLower("Hello World"));
}
TEST(Uppercase, Basic) {
EXPECT_EQ("", absl::AsciiStrToUpper(""));
EXPECT_EQ("HELLO", absl::AsciiStrToUpper("hello"));
EXPECT_EQ("HELLO WORLD", absl::AsciiStrToUpper("Hello World"));
}
TEST(SnakeCase, Basic) {
EXPECT_EQ("", str_util::ArgDefCase(""));
EXPECT_EQ("", str_util::ArgDefCase("!"));
EXPECT_EQ("", str_util::ArgDefCase("5"));
EXPECT_EQ("", str_util::ArgDefCase("!:"));
EXPECT_EQ("", str_util::ArgDefCase("5-5"));
EXPECT_EQ("", str_util::ArgDefCase("_!"));
EXPECT_EQ("", str_util::ArgDefCase("_5"));
EXPECT_EQ("a", str_util::ArgDefCase("_a"));
EXPECT_EQ("a", str_util::ArgDefCase("_A"));
EXPECT_EQ("i", str_util::ArgDefCase("I"));
EXPECT_EQ("i", str_util::ArgDefCase("i"));
EXPECT_EQ("i_", str_util::ArgDefCase("I%"));
EXPECT_EQ("i_", str_util::ArgDefCase("i%"));
EXPECT_EQ("i", str_util::ArgDefCase("%I"));
EXPECT_EQ("i", str_util::ArgDefCase("-i"));
EXPECT_EQ("i", str_util::ArgDefCase("3i"));
EXPECT_EQ("i", str_util::ArgDefCase("32i"));
EXPECT_EQ("i3", str_util::ArgDefCase("i3"));
EXPECT_EQ("i_a3", str_util::ArgDefCase("i_A3"));
EXPECT_EQ("i_i", str_util::ArgDefCase("II"));
EXPECT_EQ("i_i", str_util::ArgDefCase("I_I"));
EXPECT_EQ("i__i", str_util::ArgDefCase("I__I"));
EXPECT_EQ("i_i_32", str_util::ArgDefCase("II-32"));
EXPECT_EQ("ii_32", str_util::ArgDefCase("Ii-32"));
EXPECT_EQ("hi_there", str_util::ArgDefCase("HiThere"));
EXPECT_EQ("hi_hi", str_util::ArgDefCase("Hi!Hi"));
EXPECT_EQ("hi_hi", str_util::ArgDefCase("HiHi"));
EXPECT_EQ("hihi", str_util::ArgDefCase("Hihi"));
EXPECT_EQ("hi_hi", str_util::ArgDefCase("Hi_Hi"));
}
TEST(TitlecaseString, Basic) {
string s = "sparse_lookup";
str_util::TitlecaseString(&s, "_");
ASSERT_EQ(s, "Sparse_Lookup");
s = "sparse_lookup";
str_util::TitlecaseString(&s, " ");
ASSERT_EQ(s, "Sparse_lookup");
s = "dense";
str_util::TitlecaseString(&s, " ");
ASSERT_EQ(s, "Dense");
}
TEST(StringReplace, Basic) {
EXPECT_EQ("XYZ_XYZ_XYZ", str_util::StringReplace("ABC_ABC_ABC", "ABC", "XYZ",
true));
}
TEST(StringReplace, OnlyFirst) {
EXPECT_EQ("XYZ_ABC_ABC", str_util::StringReplace("ABC_ABC_ABC", "ABC", "XYZ",
false));
}
TEST(StringReplace, IncreaseLength) {
EXPECT_EQ("a b c",
str_util::StringReplace("abc", "b", " b ", true));
}
TEST(StringReplace, IncreaseLengthMultipleMatches) {
EXPECT_EQ("a b b c",
str_util::StringReplace("abbc", "b", " b ", true));
}
TEST(StringReplace, NoChange) {
EXPECT_EQ("abc",
str_util::StringReplace("abc", "d", "X", true));
}
TEST(StringReplace, EmptyStringReplaceFirst) {
EXPECT_EQ("", str_util::StringReplace("", "a", "X", false));
}
TEST(StringReplace, EmptyStringReplaceAll) {
EXPECT_EQ("", str_util::StringReplace("", "a", "X", true));
}
TEST(Strnlen, Basic) {
EXPECT_EQ(0, str_util::Strnlen("ab", 0));
EXPECT_EQ(1, str_util::Strnlen("a", 1));
EXPECT_EQ(2, str_util::Strnlen("abcd", 2));
EXPECT_EQ(3, str_util::Strnlen("abc", 10));
EXPECT_EQ(4, str_util::Strnlen("a \t\n", 10));
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/str_util.cc | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/str_util_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
bc18fd1c-491b-4d62-b0f4-3017a34034bf | cpp | google/tsl | errors | tsl/platform/errors.cc | tsl/platform/errors_test.cc | #include "tsl/platform/errors.h"
#include <errno.h>
#include <string.h>
#include "tsl/platform/status.h"
#include "tsl/platform/strcat.h"
namespace tsl {
namespace errors {
namespace {
absl::StatusCode ErrnoToCode(int err_number) {
absl::StatusCode code;
switch (err_number) {
case 0:
code = absl::StatusCode::kOk;
break;
case EINVAL:
case ENAMETOOLONG:
case E2BIG:
case EDESTADDRREQ:
case EDOM:
case EFAULT:
case EILSEQ:
case ENOPROTOOPT:
case ENOSTR:
case ENOTSOCK:
case ENOTTY:
case EPROTOTYPE:
case ESPIPE:
code = absl::StatusCode::kInvalidArgument;
break;
case ETIMEDOUT:
case ETIME:
code = absl::StatusCode::kDeadlineExceeded;
break;
case ENODEV:
case ENOENT:
case ENXIO:
case ESRCH:
code = absl::StatusCode::kNotFound;
break;
case EEXIST:
case EADDRNOTAVAIL:
case EALREADY:
code = absl::StatusCode::kAlreadyExists;
break;
case EPERM:
case EACCES:
case EROFS:
code = absl::StatusCode::kPermissionDenied;
break;
case ENOTEMPTY:
case EISDIR:
case ENOTDIR:
case EADDRINUSE:
case EBADF:
case EBUSY:
case ECHILD:
case EISCONN:
#if !defined(_WIN32) && !defined(__HAIKU__)
case ENOTBLK:
#endif
case ENOTCONN:
case EPIPE:
#if !defined(_WIN32)
case ESHUTDOWN:
#endif
case ETXTBSY:
code = absl::StatusCode::kFailedPrecondition;
break;
case ENOSPC:
#if !defined(_WIN32)
case EDQUOT:
#endif
case EMFILE:
case EMLINK:
case ENFILE:
case ENOBUFS:
case ENODATA:
case ENOMEM:
case ENOSR:
#if !defined(_WIN32) && !defined(__HAIKU__)
case EUSERS:
#endif
code = absl::StatusCode::kResourceExhausted;
break;
case EFBIG:
case EOVERFLOW:
case ERANGE:
code = absl::StatusCode::kOutOfRange;
break;
case ENOSYS:
case ENOTSUP:
case EAFNOSUPPORT:
#if !defined(_WIN32)
case EPFNOSUPPORT:
#endif
case EPROTONOSUPPORT:
#if !defined(_WIN32) && !defined(__HAIKU__)
case ESOCKTNOSUPPORT:
#endif
case EXDEV:
code = absl::StatusCode::kUnimplemented;
break;
case EAGAIN:
case ECONNREFUSED:
case ECONNABORTED:
case ECONNRESET:
case EINTR:
#if !defined(_WIN32)
case EHOSTDOWN:
#endif
case EHOSTUNREACH:
case ENETDOWN:
case ENETRESET:
case ENETUNREACH:
case ENOLCK:
case ENOLINK:
#if !(defined(__APPLE__) || defined(__FreeBSD__) || defined(_WIN32) || \
defined(__HAIKU__))
case ENONET:
#endif
code = absl::StatusCode::kUnavailable;
break;
case EDEADLK:
#if !defined(_WIN32)
case ESTALE:
#endif
code = absl::StatusCode::kAborted;
break;
case ECANCELED:
code = absl::StatusCode::kCancelled;
break;
case EBADMSG:
case EIDRM:
case EINPROGRESS:
case EIO:
case ELOOP:
case ENOEXEC:
case ENOMSG:
case EPROTO:
#if !defined(_WIN32) && !defined(__HAIKU__)
case EREMOTE:
#endif
code = absl::StatusCode::kUnknown;
break;
default: {
code = absl::StatusCode::kUnknown;
break;
}
}
return code;
}
}
absl::Status IOError(const string& context, int err_number) {
auto code = ErrnoToCode(err_number);
return absl::Status(code,
strings::StrCat(context, "; ", strerror(err_number)));
}
bool IsAborted(const absl::Status& status) {
return status.code() == tsl::error::Code::ABORTED;
}
bool IsAlreadyExists(const absl::Status& status) {
return status.code() == tsl::error::Code::ALREADY_EXISTS;
}
bool IsCancelled(const absl::Status& status) {
return status.code() == tsl::error::Code::CANCELLED;
}
bool IsDataLoss(const absl::Status& status) {
return status.code() == tsl::error::Code::DATA_LOSS;
}
bool IsDeadlineExceeded(const absl::Status& status) {
return status.code() == tsl::error::Code::DEADLINE_EXCEEDED;
}
bool IsFailedPrecondition(const absl::Status& status) {
return status.code() == tsl::error::Code::FAILED_PRECONDITION;
}
bool IsInternal(const absl::Status& status) {
return status.code() == tsl::error::Code::INTERNAL;
}
bool IsInvalidArgument(const absl::Status& status) {
return status.code() == tsl::error::Code::INVALID_ARGUMENT;
}
bool IsNotFound(const absl::Status& status) {
return status.code() == tsl::error::Code::NOT_FOUND;
}
bool IsOutOfRange(const absl::Status& status) {
return status.code() == tsl::error::Code::OUT_OF_RANGE;
}
bool IsPermissionDenied(const absl::Status& status) {
return status.code() == tsl::error::Code::PERMISSION_DENIED;
}
bool IsResourceExhausted(const absl::Status& status) {
return status.code() == tsl::error::Code::RESOURCE_EXHAUSTED;
}
bool IsUnauthenticated(const absl::Status& status) {
return status.code() == tsl::error::Code::UNAUTHENTICATED;
}
bool IsUnavailable(const absl::Status& status) {
return status.code() == tsl::error::Code::UNAVAILABLE;
}
bool IsUnimplemented(const absl::Status& status) {
return status.code() == tsl::error::Code::UNIMPLEMENTED;
}
bool IsUnknown(const absl::Status& status) {
return status.code() == tsl::error::Code::UNKNOWN;
}
}
} | #include "tsl/platform/errors.h"
#include "absl/status/status.h"
#include "tsl/platform/test.h"
namespace tsl {
TEST(AppendToMessageTest, PayloadsAreCopied) {
absl::Status status = errors::Aborted("Aborted Error Message");
status.SetPayload("payload_key", absl::Cord("payload_value"));
errors::AppendToMessage(&status, "Appended Message");
EXPECT_EQ(status.message(), "Aborted Error Message\n\tAppended Message");
EXPECT_EQ(status.GetPayload("payload_key"), absl::Cord("payload_value"));
}
TEST(Status, GetAllPayloads) {
absl::Status s_error(absl::StatusCode::kInternal, "Error message");
s_error.SetPayload("Error key", absl::Cord("foo"));
auto payloads_error_status = errors::GetPayloads(s_error);
ASSERT_EQ(payloads_error_status.size(), 1);
ASSERT_EQ(payloads_error_status["Error key"], "foo");
absl::Status s_ok = absl::Status();
auto payloads_ok_status = errors::GetPayloads(s_ok);
ASSERT_TRUE(payloads_ok_status.empty());
}
TEST(Status, OKStatusInsertPayloadsFromErrorStatus) {
absl::Status s_error(absl::StatusCode::kInternal, "Error message");
s_error.SetPayload("Error key", absl::Cord("foo"));
absl::Status s_ok = absl::Status();
errors::InsertPayloads(s_ok, errors::GetPayloads(s_error));
auto payloads_ok_status = errors::GetPayloads(s_ok);
ASSERT_TRUE(payloads_ok_status.empty());
}
TEST(Status, ErrorStatusInsertPayloadsFromOKStatus) {
absl::Status s_error(absl::StatusCode::kInternal, "Error message");
s_error.SetPayload("Error key", absl::Cord("foo"));
absl::Status s_ok = absl::Status();
errors::InsertPayloads(s_error, errors::GetPayloads(s_ok));
ASSERT_EQ(s_error.GetPayload("Error key"), "foo");
}
TEST(Status, ErrorStatusInsertPayloadsFromErrorStatus) {
absl::Status s_error1(absl::StatusCode::kInternal, "Error message");
s_error1.SetPayload("Error key 1", absl::Cord("foo"));
s_error1.SetPayload("Error key 2", absl::Cord("bar"));
absl::Status s_error2(absl::StatusCode::kInternal, "Error message");
s_error2.SetPayload("Error key", absl::Cord("bar"));
ASSERT_EQ(s_error2.GetPayload("Error key"), "bar");
errors::InsertPayloads(s_error2, errors::GetPayloads(s_error1));
ASSERT_EQ(s_error2.GetPayload("Error key 1"), "foo");
ASSERT_EQ(s_error2.GetPayload("Error key 2"), "bar");
auto payloads_error_status = errors::GetPayloads(s_error2);
ASSERT_EQ(payloads_error_status.size(), 3);
}
#if defined(PLATFORM_GOOGLE)
absl::Status GetError() {
return absl::InvalidArgumentError("An invalid argument error");
}
absl::Status PropagateError() {
TF_RETURN_IF_ERROR(GetError());
return absl::OkStatus();
}
absl::Status PropagateError2() {
TF_RETURN_IF_ERROR(PropagateError());
return absl::OkStatus();
}
TEST(Status, StackTracePropagation) {
absl::Status s = PropagateError2();
auto sources = s.GetSourceLocations();
ASSERT_EQ(sources.size(), 3);
for (int i = 0; i < 3; ++i) {
ASSERT_EQ(sources[i].file_name(),
"third_party/tensorflow/tsl/platform/errors_test.cc");
}
}
TEST(Status, SourceLocationsPreservedByAppend) {
absl::Status s = PropagateError2();
ASSERT_EQ(s.GetSourceLocations().size(), 3);
errors::AppendToMessage(&s, "A new message.");
ASSERT_EQ(s.GetSourceLocations().size(), 3);
}
TEST(Status, SourceLocationsPreservedByUpdate) {
absl::Status s = PropagateError2();
ASSERT_EQ(s.GetSourceLocations().size(), 3);
absl::Status s2 = errors::CreateWithUpdatedMessage(s, "New message.");
ASSERT_EQ(s2.GetSourceLocations().size(), 3);
}
#endif
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/errors.cc | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/errors_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
871d31db-5559-4d8e-9619-1a704c808ec6 | cpp | google/tsl | status | tsl/platform/status.cc | tsl/platform/status_test.cc | #include "tsl/platform/status.h"
#include <stdio.h>
#include <deque>
#include <functional>
#include <memory>
#include <ostream>
#include <sstream>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/base/call_once.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/escaping.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/stack_frame.h"
#include "tsl/platform/stacktrace.h"
#include "tsl/platform/str_util.h"
#include "tsl/platform/strcat.h"
#include "tsl/platform/stringprintf.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tsl {
namespace {
class StatusLogSink : public TFLogSink {
public:
static StatusLogSink* GetInstance() {
static StatusLogSink* sink = new StatusLogSink();
return sink;
}
void enable() {
absl::call_once(flag_, [this] {
num_messages_ = 5;
if (const char* num_msgs_str =
getenv("TF_WORKER_NUM_FORWARDED_LOG_MESSAGES")) {
if (!absl::SimpleAtoi(num_msgs_str, &num_messages_)) {
LOG(WARNING) << "Failed to parse env variable "
"TF_WORKER_NUM_WARNING_ERROR_LOG_IN_STATUS="
<< num_msgs_str << " as int. Using the default value "
<< num_messages_ << ".";
}
}
if (num_messages_ > 0) {
TFAddLogSink(this);
}
});
}
void GetMessages(std::vector<std::string>* logs) TF_LOCKS_EXCLUDED(mu_) {
mutex_lock lock(mu_);
for (auto& msg : messages_) {
logs->push_back(msg);
}
}
void Send(const TFLogEntry& entry) override TF_LOCKS_EXCLUDED(mu_) {
if (entry.log_severity() < absl::LogSeverity::kWarning) return;
mutex_lock lock(mu_);
messages_.emplace_back(entry.ToString());
if (messages_.size() > static_cast<size_t>(num_messages_)) {
messages_.pop_front();
}
}
private:
mutex mu_;
absl::once_flag flag_;
int num_messages_ = 0;
std::deque<std::string> messages_ TF_GUARDED_BY(mu_);
};
}
namespace errors {
static constexpr const char kStackTraceProtoUrl[] =
"type.googleapis.com/tensorflow.StackTracePayload";
void SetStackTrace(absl::Status& status, std::vector<StackFrame> stack_trace) {
std::vector<std::string> items;
items.reserve(stack_trace.size());
for (StackFrame& frame : stack_trace) {
items.push_back(
absl::StrCat(absl::StrReplaceAll(frame.file_name, {{"\n", ""}}), "\n",
frame.line_number, "\n",
absl::StrReplaceAll(frame.function_name, {{"\n", ""}})));
}
status.SetPayload(kStackTraceProtoUrl,
absl::Cord(absl::StrJoin(items, "\n")));
}
std::vector<StackFrame> GetStackTrace(const absl::Status& status) {
std::vector<StackFrame> stack_trace;
absl::optional<absl::Cord> maybe_serialized_payload =
status.GetPayload(kStackTraceProtoUrl);
if (maybe_serialized_payload.has_value()) {
std::vector<std::string> split =
absl::StrSplit(maybe_serialized_payload.value().Flatten(), '\n');
assert(split.size() % 3 == 0);
for (int i = 0; i < split.size() / 3; ++i) {
const int idx = 3 * i;
int line_number = -1;
CHECK(absl::SimpleAtoi(split[idx + 1], &line_number));
stack_trace.emplace_back(std::move(split[idx]), line_number,
std::move(split[idx + 2]));
}
}
return stack_trace;
}
}
#ifdef _WIN32
const char* NullTerminatedMessage(const absl::Status& status) {
return absl::StatusMessageAsCStr(status);
}
#endif
std::string* TfCheckOpHelperOutOfLine(const absl::Status& v, const char* msg) {
std::stringstream ss;
ss << "Non-OK-status: " << msg << "\nStatus: " << v;
return new std::string(ss.str());
}
StatusGroup::StatusGroup() {}
StatusGroup::StatusGroup(std::initializer_list<absl::Status> statuses) {
for (const absl::Status& s : statuses) {
Update(s);
}
}
static constexpr const char kDerivedStatusProtoUrl[] =
"type.googleapis.com/tensorflow.DerivedStatus";
absl::Status StatusGroup::MakeDerived(const absl::Status& s) {
if (IsDerived(s)) {
return s;
} else {
absl::Status derived(s);
derived.SetPayload(kDerivedStatusProtoUrl, absl::Cord(""));
return derived;
}
}
bool StatusGroup::IsDerived(const absl::Status& s) {
return s.GetPayload(kDerivedStatusProtoUrl).has_value();
}
void StatusGroup::ConfigureLogHistory() {
StatusLogSink::GetInstance()->enable();
}
void StatusGroup::Update(const absl::Status& s) {
if (s.ok()) {
++num_ok_;
} else {
ok_ = false;
if (IsDerived(s)) {
derived_.insert(s);
} else {
non_derived_.insert(s);
}
}
}
static constexpr int kMaxAggregatedStatusMessageSize = 8 * 1024;
static constexpr int kMaxAttachedLogMessageSize = 512;
std::unordered_map<std::string, absl::Cord> StatusGroup::GetPayloads() const {
std::unordered_map<std::string, absl::Cord> payloads;
auto capture_payload = [&payloads](absl::string_view key,
const absl::Cord& value) {
payloads[std::string(key)] = value;
};
for (const auto& status : derived_) {
status.ForEachPayload(capture_payload);
}
for (const auto& status : non_derived_) {
status.ForEachPayload(capture_payload);
}
payloads.erase(kDerivedStatusProtoUrl);
return payloads;
}
absl::Status MakeStatus(
absl::StatusCode code, absl::string_view message,
const std::unordered_map<std::string, absl::Cord>& payloads) {
absl::Status status(code, message);
for (const auto& payload : payloads) {
status.SetPayload(payload.first, payload.second);
}
return status;
}
std::string MakeString(const absl::Status& status) {
return absl::StrCat(absl::StatusCodeToString(status.code()), ": ",
status.message());
}
absl::Status StatusGroup::as_summary_status() const {
if (ok_) {
return absl::OkStatus();
}
auto get_recent_logs = [this]() -> std::string {
if (!recent_logs_.empty()) {
std::vector<std::string> fmt;
fmt.push_back("\nRecent warning and error logs:");
for (auto& log : recent_logs_) {
fmt.push_back(" " + log.substr(0, kMaxAttachedLogMessageSize));
}
return absl::StrJoin(fmt, "\n");
} else {
return "";
}
};
if (non_derived_.size() == 1) {
return MakeStatus(
non_derived_.begin()->code(),
strings::StrCat(non_derived_.begin()->message(), get_recent_logs()),
GetPayloads());
}
if (!non_derived_.empty()) {
std::vector<std::string> fmt;
fmt.push_back(
strings::Printf("%zu root error(s) found.", non_derived_.size()));
int index = 0;
auto code = absl::StatusCode::kCancelled;
for (const auto& s : non_derived_) {
if (code == absl::StatusCode::kCancelled &&
s.code() != absl::StatusCode::kCancelled) {
code = s.code();
}
fmt.emplace_back(strings::StrCat(" (", index, ") ", MakeString(s)));
++index;
}
fmt.push_back(strings::Printf("%zu successful operations.", num_ok_));
fmt.push_back(
strings::Printf("%zu derived errors ignored.", derived_.size()));
std::string error_msg =
absl::StrJoin(fmt, "\n").substr(0, kMaxAggregatedStatusMessageSize);
return MakeStatus(code, strings::StrCat(error_msg, get_recent_logs()),
GetPayloads());
} else {
return MakeDerived(MakeStatus(derived_.begin()->code(),
derived_.begin()->message(), GetPayloads()));
}
}
absl::Status StatusGroup::as_concatenated_status() const {
if (ok_) {
return absl::OkStatus();
}
if (non_derived_.size() == 1) {
return MakeStatus(non_derived_.begin()->code(),
non_derived_.begin()->message(), GetPayloads());
}
if (!non_derived_.empty()) {
std::vector<string> fmt;
fmt.emplace_back("\n=====================");
for (const auto& s : non_derived_) {
fmt.emplace_back(MakeString(s));
}
fmt.emplace_back("=====================\n");
return MakeStatus(
non_derived_.begin()->code(),
absl::StrJoin(fmt, "\n").substr(0, kMaxAggregatedStatusMessageSize),
GetPayloads());
} else {
return MakeDerived(MakeStatus(derived_.begin()->code(),
derived_.begin()->message(), GetPayloads()));
}
}
void StatusGroup::AttachLogMessages() {
recent_logs_.clear();
StatusLogSink::GetInstance()->GetMessages(&recent_logs_);
}
} | #include "tsl/platform/status.h"
#include <unordered_map>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_format.h"
#include "xla/tsl/protobuf/status.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/stack_frame.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/status_to_from_proto.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tsl {
namespace {
using ::testing::IsEmpty;
using ::tsl::testing::IsOk;
using ::tsl::testing::StatusIs;
TEST(ToStringTest, PayloadsArePrinted) {
absl::Status status = errors::Aborted("Aborted Error Message");
status.SetPayload("payload_key", absl::Cord(absl::StrFormat(
"payload_value %c%c%c", 1, 2, 3)));
EXPECT_EQ(status.ToString(),
"ABORTED: Aborted Error Message [payload_key='payload_value "
"\\x01\\x02\\x03']");
}
TEST(ToStringTest, MatchesAbslStatus) {
absl::Status status = errors::Aborted("Aborted Error Message");
status.SetPayload("payload_key", absl::Cord(absl::StrFormat(
"payload_value %c%c%c", 1, 2, 3)));
absl::Status absl_status =
absl::Status(absl::StatusCode::kAborted, status.message());
absl_status.SetPayload("payload_key", absl::Cord(absl::StrFormat(
"payload_value %c%c%c", 1, 2, 3)));
EXPECT_EQ(status.ToString(), absl_status.ToString());
}
TEST(StackTrace, SerializeAndDeserializeCorrectly) {
absl::Status status = errors::Aborted("Aborted Error Message");
std::vector<StackFrame> stack_trace;
stack_trace.push_back(StackFrame("filename_1", 33, "func_name_1"));
stack_trace.push_back(StackFrame("filename_2", 66, "func_name_2"));
errors::SetStackTrace(status, stack_trace);
std::vector<StackFrame> deserialized = errors::GetStackTrace(status);
EXPECT_EQ(stack_trace.size(), deserialized.size());
for (size_t i = 0; i < stack_trace.size(); ++i) {
EXPECT_EQ(stack_trace[i], deserialized[i]);
}
}
TEST(StatusGroupTest, DeterministicOrderWithoutPayloads) {
absl::Status status_a = errors::Aborted("Status A");
absl::Status status_b = errors::Aborted("Status B");
absl::Status status_c = errors::Aborted("Status C");
absl::Status combined =
StatusGroup({status_a, status_b, status_c}).as_summary_status();
EXPECT_EQ(combined,
StatusGroup({status_a, status_b, status_c}).as_summary_status());
EXPECT_EQ(combined,
StatusGroup({status_a, status_c, status_b}).as_summary_status());
EXPECT_EQ(combined,
StatusGroup({status_b, status_a, status_c}).as_summary_status());
EXPECT_EQ(combined,
StatusGroup({status_b, status_c, status_a}).as_summary_status());
EXPECT_EQ(combined,
StatusGroup({status_c, status_a, status_b}).as_summary_status());
EXPECT_EQ(combined,
StatusGroup({status_c, status_b, status_a}).as_summary_status());
}
TEST(StatusGroupTest, DeterministicOrderWithPayloads) {
absl::Status status_a = errors::Aborted("Status A");
status_a.SetPayload("payload_key", absl::Cord("payload_value_a"));
absl::Status status_b = errors::Aborted("Status B");
status_b.SetPayload("payload_key", absl::Cord("payload_value_b"));
absl::Status status_c = errors::Aborted("Status C");
status_c.SetPayload("payload_key", absl::Cord("payload_value_c"));
absl::Status combined =
StatusGroup({status_a, status_b, status_c}).as_summary_status();
ASSERT_TRUE(combined.GetPayload("payload_key").has_value());
std::string payload(combined.GetPayload("payload_key").value());
EXPECT_EQ(payload, StatusGroup({status_a, status_b, status_c})
.as_summary_status()
.GetPayload("payload_key"));
EXPECT_EQ(payload, StatusGroup({status_a, status_c, status_b})
.as_summary_status()
.GetPayload("payload_key"));
EXPECT_EQ(payload, StatusGroup({status_b, status_a, status_c})
.as_summary_status()
.GetPayload("payload_key"));
EXPECT_EQ(payload, StatusGroup({status_b, status_c, status_a})
.as_summary_status()
.GetPayload("payload_key"));
EXPECT_EQ(payload, StatusGroup({status_c, status_a, status_b})
.as_summary_status()
.GetPayload("payload_key"));
EXPECT_EQ(payload, StatusGroup({status_c, status_b, status_a})
.as_summary_status()
.GetPayload("payload_key"));
}
TEST(StatusGroupTest, PayloadsMergedProperly) {
absl::Status status_a = errors::Aborted("Status A");
status_a.SetPayload("payload_key_a",
absl::Cord(std::string("payload_value_a")));
absl::Status status_b = errors::Aborted("Status B");
status_b.SetPayload("payload_key_b",
absl::Cord(std::string("payload_value_b")));
absl::Status status_c = errors::Aborted("Status C");
status_c.SetPayload("payload_key_c",
absl::Cord(std::string("payload_value_c")));
absl::Status derived_status_c =
StatusGroup::MakeDerived(errors::Aborted("Status C"));
derived_status_c.SetPayload(
"payload_key_c", absl::Cord(std::string("derived_payload_value_c")));
StatusGroup status_group({status_a, status_b, status_c, derived_status_c});
EXPECT_THAT(status_group.GetPayloads(), ::testing::SizeIs(3));
absl::Status combined = status_group.as_summary_status();
EXPECT_EQ(combined.GetPayload("payload_key_a"), "payload_value_a");
EXPECT_EQ(combined.GetPayload("payload_key_b"), "payload_value_b");
EXPECT_EQ(combined.GetPayload("payload_key_c"), "payload_value_c");
}
TEST(Status, ErrorStatusForEachPayloadIteratesOverAll) {
absl::Status s(absl::StatusCode::kInternal, "Error message");
s.SetPayload("key1", absl::Cord("value1"));
s.SetPayload("key2", absl::Cord("value2"));
s.SetPayload("key3", absl::Cord("value3"));
std::unordered_map<std::string, absl::Cord> payloads;
s.ForEachPayload([&payloads](absl::string_view key, const absl::Cord& value) {
payloads[std::string(key)] = value;
});
EXPECT_EQ(payloads.size(), 3);
EXPECT_EQ(payloads["key1"], "value1");
EXPECT_EQ(payloads["key2"], "value2");
EXPECT_EQ(payloads["key3"], "value3");
}
TEST(Status, OkStatusForEachPayloadNoIteration) {
absl::Status s = absl::OkStatus();
s.SetPayload("key1", absl::Cord("value1"));
s.SetPayload("key2", absl::Cord("value2"));
s.SetPayload("key3", absl::Cord("value3"));
std::unordered_map<std::string, absl::Cord> payloads;
s.ForEachPayload([&payloads](absl::string_view key, const absl::Cord& value) {
payloads[std::string(key)] = value;
});
EXPECT_EQ(payloads.size(), 0);
}
TEST(Status, SaveOKStatusToProto) {
tensorflow::StatusProto status_proto = StatusToProto(absl::OkStatus());
EXPECT_EQ(status_proto.code(), error::OK);
EXPECT_THAT(status_proto.message(), IsEmpty());
}
TEST(Status, SaveErrorStatusToProto) {
tensorflow::StatusProto status_proto =
StatusToProto(errors::NotFound("Not found"));
EXPECT_EQ(status_proto.code(), error::NOT_FOUND);
EXPECT_EQ(status_proto.message(), "Not found");
}
TEST(Status, SaveEmptyStatusToProto) {
tensorflow::StatusProto status_proto = StatusToProto(absl::Status());
EXPECT_EQ(status_proto.code(), error::OK);
EXPECT_THAT(status_proto.message(), IsEmpty());
}
TEST(Status, MakeOKStatusFromProto) {
tensorflow::StatusProto status_proto;
status_proto.set_code(error::OK);
EXPECT_THAT(StatusFromProto(status_proto), IsOk());
}
TEST(Status, MakeErrorStatusFromProto) {
tensorflow::StatusProto status_proto;
status_proto.set_code(error::INVALID_ARGUMENT);
status_proto.set_message("Invalid argument");
EXPECT_THAT(StatusFromProto(status_proto),
StatusIs(error::INVALID_ARGUMENT, "Invalid argument"));
}
TEST(Status, MakeStatusFromEmptyProto) {
EXPECT_THAT(StatusFromProto(tensorflow::StatusProto()), IsOk());
}
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/status.cc | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/status_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
b2165cf5-6d76-4ef0-ab7b-10c49b938012 | cpp | google/tsl | path | tsl/platform/path.cc | tsl/platform/path_test.cc | #include "tsl/platform/path.h"
#include <errno.h>
#include <fcntl.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <sys/types.h>
#if defined(PLATFORM_WINDOWS)
#include <windows.h>
#else
#include <unistd.h>
#endif
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/scanner.h"
#include "tsl/platform/str_util.h"
#include "tsl/platform/strcat.h"
#include "tsl/platform/stringpiece.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace io {
namespace internal {
namespace {
const char kPathSep[] = "/";
}
string JoinPathImpl(std::initializer_list<absl::string_view> paths) {
string result;
for (absl::string_view path : paths) {
if (path.empty()) continue;
if (result.empty()) {
result = string(path);
continue;
}
if (IsAbsolutePath(path)) path = path.substr(1);
if (result[result.size() - 1] == kPathSep[0]) {
strings::StrAppend(&result, path);
} else {
strings::StrAppend(&result, kPathSep, path);
}
}
return result;
}
std::pair<absl::string_view, absl::string_view> SplitPath(
absl::string_view uri) {
absl::string_view scheme, host, path;
ParseURI(uri, &scheme, &host, &path);
auto pos = path.rfind('/');
#ifdef PLATFORM_WINDOWS
if (pos == StringPiece::npos) pos = path.rfind('\\');
#endif
if (pos == absl::string_view::npos)
return std::make_pair(
absl::string_view(uri.data(), host.end() - uri.begin()), path);
if (pos == 0)
return std::make_pair(
absl::string_view(uri.data(), path.begin() + 1 - uri.begin()),
absl::string_view(path.data() + 1, path.size() - 1));
return std::make_pair(
absl::string_view(uri.data(), path.begin() + pos - uri.begin()),
absl::string_view(path.data() + pos + 1, path.size() - (pos + 1)));
}
std::pair<absl::string_view, absl::string_view> SplitBasename(
absl::string_view path) {
path = Basename(path);
auto pos = path.rfind('.');
if (pos == absl::string_view::npos)
return std::make_pair(path,
absl::string_view(path.data() + path.size(), 0));
return std::make_pair(
absl::string_view(path.data(), pos),
absl::string_view(path.data() + pos + 1, path.size() - (pos + 1)));
}
}
bool IsAbsolutePath(absl::string_view path) {
return !path.empty() && path[0] == '/';
}
absl::string_view Dirname(absl::string_view path) {
return internal::SplitPath(path).first;
}
absl::string_view Basename(absl::string_view path) {
return internal::SplitPath(path).second;
}
absl::string_view Extension(absl::string_view path) {
return internal::SplitBasename(path).second;
}
absl::string_view BasenamePrefix(absl::string_view path) {
return internal::SplitBasename(path).first;
}
string CleanPath(absl::string_view unclean_path) {
string path(unclean_path);
const char* src = path.c_str();
string::iterator dst = path.begin();
const bool is_absolute_path = *src == '/';
if (is_absolute_path) {
*dst++ = *src++;
while (*src == '/') ++src;
}
string::const_iterator backtrack_limit = dst;
while (*src) {
bool parsed = false;
if (src[0] == '.') {
if (src[1] == '/' || !src[1]) {
if (*++src) {
++src;
}
parsed = true;
} else if (src[1] == '.' && (src[2] == '/' || !src[2])) {
src += 2;
if (dst != backtrack_limit) {
for (--dst; dst != backtrack_limit && dst[-1] != '/'; --dst) {
}
} else if (!is_absolute_path) {
src -= 2;
*dst++ = *src++;
*dst++ = *src++;
if (*src) {
*dst++ = *src;
}
backtrack_limit = dst;
}
if (*src) {
++src;
}
parsed = true;
}
}
if (!parsed) {
while (*src && *src != '/') {
*dst++ = *src++;
}
if (*src) {
*dst++ = *src++;
}
}
while (*src == '/') {
++src;
}
}
string::difference_type path_length = dst - path.begin();
if (path_length != 0) {
if (path_length > 1 && path[path_length - 1] == '/') {
--path_length;
}
path.resize(path_length);
} else {
path.assign(1, '.');
}
return path;
}
void ParseURI(absl::string_view uri, absl::string_view* scheme,
absl::string_view* host, absl::string_view* path) {
if (!strings::Scanner(uri)
.One(strings::Scanner::LETTER)
.Many(strings::Scanner::LETTER_DIGIT_DOT)
.StopCapture()
.OneLiteral(":
.GetResult(&uri, scheme)) {
*scheme = absl::string_view(uri.data(), 0);
*host = absl::string_view(uri.data(), 0);
*path = uri;
return;
}
if (!strings::Scanner(uri).ScanUntil('/').GetResult(&uri, host)) {
*host = uri;
*path = absl::string_view();
return;
}
*path = uri;
}
string CreateURI(absl::string_view scheme, absl::string_view host,
absl::string_view path) {
if (scheme.empty()) {
return string(path);
}
return strings::StrCat(scheme, ":
}
int64_t UniqueId() {
static mutex mu(LINKER_INITIALIZED);
static int64_t id = 0;
mutex_lock l(mu);
return ++id;
}
string CommonPathPrefix(absl::Span<const string> paths) {
if (paths.empty()) return "";
size_t min_filename_size =
absl::c_min_element(paths, [](const string& a, const string& b) {
return a.size() < b.size();
})->size();
if (min_filename_size == 0) return "";
size_t common_prefix_size = [&] {
for (size_t prefix_size = 0; prefix_size < min_filename_size;
prefix_size++) {
char c = paths[0][prefix_size];
for (int f = 1; f < paths.size(); f++) {
if (paths[f][prefix_size] != c) {
return prefix_size;
}
}
}
return min_filename_size;
}();
size_t rpos = absl::string_view(paths[0])
.substr(0, common_prefix_size)
.rfind(internal::kPathSep);
return rpos == std::string::npos
? ""
: std::string(absl::string_view(paths[0]).substr(0, rpos + 1));
}
string GetTempFilename(const string& extension) {
#if defined(__ANDROID__)
LOG(FATAL) << "GetTempFilename is not implemented in this platform.";
#elif defined(PLATFORM_WINDOWS)
char temp_dir[_MAX_PATH];
DWORD retval;
retval = GetTempPath(_MAX_PATH, temp_dir);
if (retval > _MAX_PATH || retval == 0) {
LOG(FATAL) << "Cannot get the directory for temporary files.";
}
char temp_file_name[_MAX_PATH];
retval = GetTempFileName(temp_dir, "", UniqueId(), temp_file_name);
if (retval > _MAX_PATH || retval == 0) {
LOG(FATAL) << "Cannot get a temporary file in: " << temp_dir;
}
string full_tmp_file_name(temp_file_name);
full_tmp_file_name.append(extension);
return full_tmp_file_name;
#else
for (const char* dir : std::vector<const char*>(
{getenv("TEST_TMPDIR"), getenv("TMPDIR"), getenv("TMP"), "/tmp"})) {
if (!dir || !dir[0]) {
continue;
}
struct stat statbuf;
if (!stat(dir, &statbuf) && S_ISDIR(statbuf.st_mode)) {
string tmp_filepath;
int fd;
if (extension.length()) {
tmp_filepath = io::JoinPath(
dir, strings::StrCat("tmp_file_tensorflow_", UniqueId(), "_XXXXXX.",
extension));
fd = mkstemps(&tmp_filepath[0], extension.length() + 1);
} else {
tmp_filepath = io::JoinPath(
dir,
strings::StrCat("tmp_file_tensorflow_", UniqueId(), "_XXXXXX"));
fd = mkstemp(&tmp_filepath[0]);
}
if (fd < 0) {
LOG(FATAL) << "Failed to create temp file.";
} else {
if (close(fd) < 0) {
LOG(ERROR) << "close() failed: " << strerror(errno);
}
return tmp_filepath;
}
}
}
LOG(FATAL) << "No temp directory found.";
std::abort();
#endif
}
namespace {
bool StartsWithSegment(absl::string_view path, absl::string_view segment) {
return absl::StartsWith(path, segment) &&
(path.size() == segment.size() ||
path.at(segment.size()) == internal::kPathSep[0]);
}
}
bool GetTestWorkspaceDir(string* dir) {
const char* srcdir = getenv("TEST_SRCDIR");
if (srcdir == nullptr) {
return false;
}
const char* workspace = getenv("TEST_WORKSPACE");
if (workspace == nullptr) {
return false;
}
if (dir != nullptr) {
*dir = tsl::io::JoinPath(srcdir, workspace);
}
return true;
}
bool GetTestUndeclaredOutputsDir(string* dir) {
const char* outputs_dir = getenv("TEST_UNDECLARED_OUTPUTS_DIR");
if (outputs_dir == nullptr) {
return false;
}
if (dir != nullptr) {
*dir = outputs_dir;
}
return true;
}
bool ResolveTestPrefixes(absl::string_view path, string& resolved_path) {
constexpr absl::string_view kTestWorkspaceSegment = "TEST_WORKSPACE";
constexpr absl::string_view kOutputDirSegment = "TEST_UNDECLARED_OUTPUTS_DIR";
if (StartsWithSegment(path, kTestWorkspaceSegment)) {
if (!GetTestWorkspaceDir(&resolved_path)) {
return false;
}
resolved_path += path.substr(kTestWorkspaceSegment.size());
return true;
} else if (StartsWithSegment(path, kOutputDirSegment)) {
if (!GetTestUndeclaredOutputsDir(&resolved_path)) {
return false;
}
resolved_path += path.substr(kOutputDirSegment.size());
return true;
} else {
resolved_path = path;
return true;
}
}
[[maybe_unused]] std::string& AppendDotExeIfWindows(std::string& path) {
#ifdef PLATFORM_WINDOWS
path.append(".exe");
#endif
return path;
}
}
} | #include "tsl/platform/path.h"
#include <string>
#include "tsl/platform/env.h"
#include "tsl/platform/stringpiece.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace io {
TEST(PathTest, JoinPath) {
EXPECT_EQ("/foo/bar", JoinPath("/foo", "bar"));
EXPECT_EQ("foo/bar", JoinPath("foo", "bar"));
EXPECT_EQ("foo/bar", JoinPath("foo", "/bar"));
EXPECT_EQ("/foo/bar", JoinPath("/foo", "/bar"));
EXPECT_EQ("/bar", JoinPath("", "/bar"));
EXPECT_EQ("bar", JoinPath("", "bar"));
EXPECT_EQ("/foo", JoinPath("/foo", ""));
EXPECT_EQ("/foo/bar/baz/blah/blink/biz",
JoinPath("/foo/bar/baz/", "/blah/blink/biz"));
EXPECT_EQ("/foo/bar/baz/blah", JoinPath("/foo", "bar", "baz", "blah"));
}
TEST(PathTest, IsAbsolutePath) {
EXPECT_FALSE(IsAbsolutePath(""));
EXPECT_FALSE(IsAbsolutePath("../foo"));
EXPECT_FALSE(IsAbsolutePath("foo"));
EXPECT_FALSE(IsAbsolutePath("./foo"));
EXPECT_FALSE(IsAbsolutePath("foo/bar/baz/"));
EXPECT_TRUE(IsAbsolutePath("/foo"));
EXPECT_TRUE(IsAbsolutePath("/foo/bar/../baz"));
}
TEST(PathTest, Dirname) {
EXPECT_EQ("hdfs:
Dirname("hdfs:
EXPECT_EQ("/hello", Dirname("/hello/"));
EXPECT_EQ("/", Dirname("/hello"));
EXPECT_EQ("hello", Dirname("hello/world"));
EXPECT_EQ("hello", Dirname("hello/"));
EXPECT_EQ("", Dirname("world"));
EXPECT_EQ("/", Dirname("/"));
EXPECT_EQ("", Dirname(""));
}
TEST(PathTest, Basename) {
EXPECT_EQ("", Basename("/hello/"));
EXPECT_EQ("hello", Basename("/hello"));
EXPECT_EQ("world", Basename("hello/world"));
EXPECT_EQ("", Basename("hello/"));
EXPECT_EQ("world", Basename("world"));
EXPECT_EQ("", Basename("/"));
EXPECT_EQ("", Basename(""));
}
TEST(PathTest, Extension) {
EXPECT_EQ("gif", Extension("foo.gif"));
EXPECT_EQ("", Extension("foo."));
EXPECT_EQ("", Extension(""));
EXPECT_EQ("", Extension("/"));
EXPECT_EQ("", Extension("foo"));
EXPECT_EQ("", Extension("foo/"));
EXPECT_EQ("gif", Extension("/a/path/to/foo.gif"));
EXPECT_EQ("html", Extension("/a/path.bar/to/foo.html"));
EXPECT_EQ("", Extension("/a/path.bar/to/foo"));
EXPECT_EQ("baz", Extension("/a/path.bar/to/foo.bar.baz"));
}
TEST(PathTest, CleanPath) {
EXPECT_EQ(".", CleanPath(""));
EXPECT_EQ("x", CleanPath("x"));
EXPECT_EQ("/a/b/c/d", CleanPath("/a/b/c/d"));
EXPECT_EQ("/a/b/c/dtrue);
tsl::setenv("TEST_WORKSPACE", "my/workspace", true);
EXPECT_TRUE(GetTestWorkspaceDir(&dir));
EXPECT_EQ(dir, "/repo/src/my/workspace");
EXPECT_TRUE(GetTestWorkspaceDir(nullptr));
dir = kOriginalValue;
tsl::unsetenv("TEST_SRCDIR");
tsl::setenv("TEST_WORKSPACE", "my/workspace", true);
EXPECT_FALSE(GetTestWorkspaceDir(&dir));
EXPECT_EQ(dir, kOriginalValue);
EXPECT_FALSE(GetTestWorkspaceDir(nullptr));
dir = kOriginalValue;
tsl::setenv("TEST_SRCDIR", "/repo/src", true);
tsl::unsetenv("TEST_WORKSPACE");
EXPECT_FALSE(GetTestWorkspaceDir(&dir));
EXPECT_EQ(dir, kOriginalValue);
EXPECT_FALSE(GetTestWorkspaceDir(nullptr));
dir = kOriginalValue;
tsl::unsetenv("TEST_SRCDIR");
tsl::unsetenv("TEST_WORKSPACE");
EXPECT_FALSE(GetTestWorkspaceDir(&dir));
EXPECT_EQ(dir, kOriginalValue);
EXPECT_FALSE(GetTestWorkspaceDir(nullptr));
}
TEST(PathTest, GetTestUndeclaredOutputsDir) {
constexpr absl::string_view kOriginalValue = "original value";
std::string dir;
dir = kOriginalValue;
tsl::setenv("TEST_UNDECLARED_OUTPUTS_DIR", "/test/outputs",
true);
EXPECT_TRUE(GetTestUndeclaredOutputsDir(&dir));
EXPECT_EQ(dir, "/test/outputs");
EXPECT_TRUE(GetTestUndeclaredOutputsDir(nullptr));
dir = kOriginalValue;
tsl::unsetenv("TEST_UNDECLARED_OUTPUTS_DIR");
EXPECT_FALSE(GetTestUndeclaredOutputsDir(&dir));
EXPECT_EQ(dir, kOriginalValue);
EXPECT_FALSE(GetTestUndeclaredOutputsDir(nullptr));
}
TEST(PathTest, ResolveTestPrefixesKeepsThePathUnchanged) {
constexpr absl::string_view kOriginalValue = "original value";
std::string resolved_path;
resolved_path = kOriginalValue;
EXPECT_TRUE(ResolveTestPrefixes("", resolved_path));
EXPECT_EQ(resolved_path, "");
resolved_path = kOriginalValue;
EXPECT_TRUE(ResolveTestPrefixes("/", resolved_path));
EXPECT_EQ(resolved_path, "/");
resolved_path = kOriginalValue;
EXPECT_TRUE(ResolveTestPrefixes("alpha/beta", resolved_path));
EXPECT_EQ(resolved_path, "alpha/beta");
resolved_path = kOriginalValue;
EXPECT_TRUE(ResolveTestPrefixes("/alpha/beta", resolved_path));
EXPECT_EQ(resolved_path, "/alpha/beta");
}
TEST(PathTest, ResolveTestPrefixesCanResolveTestWorkspace) {
constexpr absl::string_view kOriginalValue = "original value";
std::string resolved_path;
tsl::setenv("TEST_SRCDIR", "/repo/src", true);
tsl::setenv("TEST_WORKSPACE", "my/workspace", true);
resolved_path = kOriginalValue;
EXPECT_TRUE(ResolveTestPrefixes("TEST_WORKSPACE", resolved_path));
EXPECT_EQ(resolved_path, "/repo/src/my/workspace");
resolved_path = kOriginalValue;
EXPECT_TRUE(ResolveTestPrefixes("TEST_WORKSPACE/", resolved_path));
EXPECT_EQ(resolved_path, "/repo/src/my/workspace/");
resolved_path = kOriginalValue;
EXPECT_TRUE(ResolveTestPrefixes("TEST_WORKSPACE/a/b", resolved_path));
EXPECT_EQ(resolved_path, "/repo/src/my/workspace/a/b");
resolved_path = kOriginalValue;
EXPECT_TRUE(ResolveTestPrefixes("TEST_WORKSPACEE", resolved_path));
EXPECT_EQ(resolved_path, "TEST_WORKSPACEE");
resolved_path = kOriginalValue;
EXPECT_TRUE(ResolveTestPrefixes("/TEST_WORKSPACE", resolved_path));
EXPECT_EQ(resolved_path, "/TEST_WORKSPACE");
}
TEST(PathTest, ResolveTestPrefixesCannotResolveTestWorkspace) {
constexpr absl::string_view kOriginalValue = "original value";
std::string resolved_path;
tsl::unsetenv("TEST_SRCDIR");
tsl::unsetenv("TEST_WORKSPACE");
resolved_path = kOriginalValue;
EXPECT_FALSE(ResolveTestPrefixes("TEST_WORKSPACE", resolved_path));
EXPECT_EQ(resolved_path, kOriginalValue);
}
TEST(PathTest, ResolveTestPrefixesCanResolveTestUndeclaredOutputsDir) {
constexpr absl::string_view kOriginalValue = "original value";
std::string resolved_path;
tsl::setenv("TEST_UNDECLARED_OUTPUTS_DIR", "/test/outputs",
true);
resolved_path = kOriginalValue;
EXPECT_TRUE(
ResolveTestPrefixes("TEST_UNDECLARED_OUTPUTS_DIR", resolved_path));
EXPECT_EQ(resolved_path, "/test/outputs");
resolved_path = kOriginalValue;
EXPECT_TRUE(
ResolveTestPrefixes("TEST_UNDECLARED_OUTPUTS_DIR/", resolved_path));
EXPECT_EQ(resolved_path, "/test/outputs/");
resolved_path = kOriginalValue;
EXPECT_TRUE(
ResolveTestPrefixes("TEST_UNDECLARED_OUTPUTS_DIR/a/b", resolved_path));
EXPECT_EQ(resolved_path, "/test/outputs/a/b");
resolved_path = kOriginalValue;
EXPECT_TRUE(
ResolveTestPrefixes("TEST_UNDECLARED_OUTPUTS_DIRR", resolved_path));
EXPECT_EQ(resolved_path, "TEST_UNDECLARED_OUTPUTS_DIRR");
resolved_path = kOriginalValue;
EXPECT_TRUE(
ResolveTestPrefixes("/TEST_UNDECLARED_OUTPUTS_DIR", resolved_path));
EXPECT_EQ(resolved_path, "/TEST_UNDECLARED_OUTPUTS_DIR");
}
TEST(PathTest, ResolveTestPrefixesCannotResolveTestUndeclaredOutputsDir) {
constexpr absl::string_view kOriginalValue = "original value";
std::string resolved_path;
tsl::unsetenv("TEST_UNDECLARED_OUTPUTS_DIR");
resolved_path = kOriginalValue;
EXPECT_FALSE(
ResolveTestPrefixes("TEST_UNDECLARED_OUTPUTS_DIR", resolved_path));
EXPECT_EQ(resolved_path, kOriginalValue);
}
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/path.cc | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/path_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.