ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 13
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 78
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 93
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
d13a71f9-9641-49d4-9959-d13d8a3bdbf0 | cpp | google/tensorstore | bfloat16 | tensorstore/util/bfloat16.h | tensorstore/util/bfloat16_test.cc | #ifndef TENSORSTORE_UTIL_BFLOAT16_H_
#define TENSORSTORE_UTIL_BFLOAT16_H_
#include <cassert>
#include <cmath>
#include <cstdint>
#include <cstring>
#include <limits>
#include <type_traits>
#include "absl/base/casts.h"
#include <nlohmann/json_fwd.hpp>
namespace tensorstore {
class BFloat16;
}
namespace std {
template <>
struct numeric_limits<::tensorstore::BFloat16>;
}
namespace tensorstore {
namespace internal {
BFloat16 NumericFloat32ToBfloat16RoundNearestEven(float v);
BFloat16 Float32ToBfloat16RoundNearestEven(float v);
float Bfloat16ToFloat(BFloat16 v);
}
class BFloat16 {
public:
constexpr BFloat16() : rep_(0) {}
template <typename T,
typename = std::enable_if_t<std::is_convertible_v<T, float>>>
explicit BFloat16(T x) {
if constexpr (std::is_same_v<T, bool>) {
rep_ = static_cast<uint16_t>(x) * 0x3f80;
} else if constexpr (std::numeric_limits<T>::is_integer) {
*this = internal::NumericFloat32ToBfloat16RoundNearestEven(
static_cast<float>(x));
} else {
*this =
internal::Float32ToBfloat16RoundNearestEven(static_cast<float>(x));
}
}
operator float() const { return internal::Bfloat16ToFloat(*this); }
BFloat16& operator=(float v) { return *this = static_cast<BFloat16>(v); }
BFloat16& operator=(bool v) { return *this = static_cast<BFloat16>(v); }
template <typename T>
std::enable_if_t<std::numeric_limits<T>::is_integer, BFloat16&> operator=(
T v) {
return *this = static_cast<BFloat16>(v);
}
#define TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_OP(OP) \
friend BFloat16 operator OP(BFloat16 a, BFloat16 b) { \
return BFloat16(static_cast<float>(a) OP static_cast<float>(b)); \
} \
template <typename T> \
friend std::enable_if_t<std::numeric_limits<T>::is_integer, BFloat16> \
operator OP(BFloat16 a, T b) { \
return BFloat16(static_cast<float>(a) OP b); \
} \
template <typename T> \
friend std::enable_if_t<std::numeric_limits<T>::is_integer, BFloat16> \
operator OP(T a, BFloat16 b) { \
return BFloat16(a OP static_cast<float>(b)); \
} \
#define TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_ASSIGN_OP(OP) \
friend BFloat16& operator OP##=(BFloat16& a, BFloat16 b) { \
return a = BFloat16(static_cast<float>(a) OP static_cast<float>(b)); \
} \
template <typename T> \
friend std::enable_if_t<std::numeric_limits<T>::is_integer, BFloat16&> \
operator OP##=(BFloat16& a, T b) { \
return a = BFloat16(static_cast<float>(a) OP b); \
} \
TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_OP(+)
TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_ASSIGN_OP(+)
TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_OP(-)
TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_ASSIGN_OP(-)
TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_OP(*)
TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_ASSIGN_OP(*)
TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_OP(/)
TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_ASSIGN_OP(/)
#undef TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_OP
#undef TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_ASSIGN_OP
friend BFloat16 operator-(BFloat16 a) {
BFloat16 result;
result.rep_ = a.rep_ ^ 0x8000;
return result;
}
friend BFloat16 operator+(BFloat16 a) { return a; }
friend BFloat16 operator++(BFloat16& a) {
a += BFloat16(1);
return a;
}
friend BFloat16 operator--(BFloat16& a) {
a -= BFloat16(1);
return a;
}
friend BFloat16 operator++(BFloat16& a, int) {
BFloat16 original_value = a;
++a;
return original_value;
}
friend BFloat16 operator--(BFloat16& a, int) {
BFloat16 original_value = a;
--a;
return original_value;
}
template <template <typename U, typename V, typename... Args>
class ObjectType ,
template <typename U, typename... Args>
class ArrayType ,
class StringType , class BooleanType ,
class NumberIntegerType ,
class NumberUnsignedType ,
class NumberFloatType ,
template <typename U> class AllocatorType ,
template <typename T, typename SFINAE = void>
class JSONSerializer ,
class BinaryType >
friend void to_json(
::nlohmann::basic_json<ObjectType, ArrayType, StringType, BooleanType,
NumberIntegerType, NumberUnsignedType,
NumberFloatType, AllocatorType, JSONSerializer,
BinaryType>& j,
BFloat16 v) {
j = static_cast<NumberFloatType>(v);
}
struct bitcast_construct_t {};
explicit constexpr BFloat16(bitcast_construct_t, uint16_t rep) : rep_(rep) {}
uint16_t rep_;
};
inline bool isinf(BFloat16 x) { return std::isinf(static_cast<float>(x)); }
inline bool signbit(BFloat16 x) { return std::signbit(static_cast<float>(x)); }
inline bool isnan(BFloat16 x) { return std::isnan(static_cast<float>(x)); }
inline bool isfinite(BFloat16 x) {
return std::isfinite(static_cast<float>(x));
}
inline BFloat16 abs(BFloat16 x) {
x.rep_ &= 0x7fff;
return x;
}
inline BFloat16 exp(BFloat16 x) {
return BFloat16(std::exp(static_cast<float>(x)));
}
inline BFloat16 exp2(BFloat16 x) {
return BFloat16(std::exp2(static_cast<float>(x)));
}
inline BFloat16 expm1(BFloat16 x) {
return BFloat16(std::expm1(static_cast<float>(x)));
}
inline BFloat16 log(BFloat16 x) {
return BFloat16(std::log(static_cast<float>(x)));
}
inline BFloat16 log1p(BFloat16 x) {
return BFloat16(std::log1p(static_cast<float>(x)));
}
inline BFloat16 log10(BFloat16 x) {
return BFloat16(std::log10(static_cast<float>(x)));
}
inline BFloat16 log2(BFloat16 x) {
return BFloat16(std::log2(static_cast<float>(x)));
}
inline BFloat16 sqrt(BFloat16 x) {
return BFloat16(std::sqrt(static_cast<float>(x)));
}
inline BFloat16 pow(BFloat16 x, BFloat16 y) {
return BFloat16(std::pow(static_cast<float>(x), static_cast<float>(y)));
}
inline BFloat16 sin(BFloat16 x) {
return BFloat16(std::sin(static_cast<float>(x)));
}
inline BFloat16 cos(BFloat16 x) {
return BFloat16(std::cos(static_cast<float>(x)));
}
inline BFloat16 tan(BFloat16 x) {
return BFloat16(std::tan(static_cast<float>(x)));
}
inline BFloat16 asin(BFloat16 x) {
return BFloat16(std::asin(static_cast<float>(x)));
}
inline BFloat16 acos(BFloat16 x) {
return BFloat16(std::acos(static_cast<float>(x)));
}
inline BFloat16 atan(BFloat16 x) {
return BFloat16(std::atan(static_cast<float>(x)));
}
inline BFloat16 sinh(BFloat16 x) {
return BFloat16(std::sinh(static_cast<float>(x)));
}
inline BFloat16 cosh(BFloat16 x) {
return BFloat16(std::cosh(static_cast<float>(x)));
}
inline BFloat16 tanh(BFloat16 x) {
return BFloat16(std::tanh(static_cast<float>(x)));
}
inline BFloat16 asinh(BFloat16 x) {
return BFloat16(std::asinh(static_cast<float>(x)));
}
inline BFloat16 acosh(BFloat16 x) {
return BFloat16(std::acosh(static_cast<float>(x)));
}
inline BFloat16 atanh(BFloat16 x) {
return BFloat16(std::atanh(static_cast<float>(x)));
}
inline BFloat16 floor(BFloat16 x) {
return BFloat16(std::floor(static_cast<float>(x)));
}
inline BFloat16 trunc(BFloat16 x) {
return BFloat16(std::trunc(static_cast<float>(x)));
}
inline BFloat16 rint(BFloat16 x) {
return BFloat16(std::rint(static_cast<float>(x)));
}
inline BFloat16 ceil(BFloat16 x) {
return BFloat16(std::ceil(static_cast<float>(x)));
}
inline BFloat16 fmod(BFloat16 x, BFloat16 y) {
return BFloat16(std::fmod(static_cast<float>(x), static_cast<float>(y)));
}
inline BFloat16 fmin(BFloat16 a, BFloat16 b) {
return BFloat16(std::fmin(static_cast<float>(a), static_cast<float>(b)));
}
inline BFloat16 fmax(BFloat16 a, BFloat16 b) {
return BFloat16(std::fmax(static_cast<float>(a), static_cast<float>(b)));
}
inline BFloat16 nextafter(BFloat16 from, BFloat16 to) {
const uint16_t from_as_int = absl::bit_cast<uint16_t>(from),
to_as_int = absl::bit_cast<uint16_t>(to);
const uint16_t sign_mask = 1 << 15;
float from_as_float(from), to_as_float(to);
if (std::isnan(from_as_float) || std::isnan(to_as_float)) {
return BFloat16(std::numeric_limits<float>::quiet_NaN());
}
if (from_as_int == to_as_int) {
return to;
}
if (from_as_float == 0) {
if (to_as_float == 0) {
return to;
} else {
return absl::bit_cast<BFloat16, uint16_t>((to_as_int & sign_mask) | 1);
}
}
uint16_t from_sign = from_as_int & sign_mask;
uint16_t to_sign = to_as_int & sign_mask;
uint16_t from_abs = from_as_int & ~sign_mask;
uint16_t to_abs = to_as_int & ~sign_mask;
uint16_t magnitude_adjustment =
(from_abs > to_abs || from_sign != to_sign) ? 0xFFFF : 0x0001;
return absl::bit_cast<BFloat16, uint16_t>(from_as_int + magnitude_adjustment);
}
namespace internal {
inline uint16_t GetFloat32High16(float v) {
return static_cast<uint16_t>(absl::bit_cast<uint32_t>(v) >> 16);
}
inline BFloat16 Float32ToBfloat16Truncate(float v) {
uint32_t bits = absl::bit_cast<uint32_t>(v);
if (std::isnan(v)) {
bits |= (static_cast<uint32_t>(1) << 21);
}
return absl::bit_cast<BFloat16, uint16_t>(bits >> 16);
}
inline BFloat16 NumericFloat32ToBfloat16RoundNearestEven(float v) {
assert(!std::isnan(v));
uint32_t input = absl::bit_cast<uint32_t>(v);
const uint32_t lsb = (input >> 16) & 1;
const uint32_t rounding_bias = 0x7fff + lsb;
input += rounding_bias;
return absl::bit_cast<BFloat16, uint16_t>(input >> 16);
}
inline BFloat16 Float32ToBfloat16RoundNearestEven(float v) {
if (std::isnan(v)) {
return tensorstore::BFloat16(
tensorstore::BFloat16::bitcast_construct_t{},
static_cast<uint16_t>((absl::bit_cast<uint32_t>(v) | 0x00200000u) >>
16));
}
return NumericFloat32ToBfloat16RoundNearestEven(v);
}
inline float Bfloat16ToFloat(BFloat16 v) {
return absl::bit_cast<float>(
static_cast<uint32_t>(absl::bit_cast<uint16_t>(v)) << 16);
}
}
}
namespace std {
template <>
struct numeric_limits<tensorstore::BFloat16> {
static constexpr bool is_specialized = true;
static constexpr bool is_signed = true;
static constexpr bool is_integer = false;
static constexpr bool is_exact = false;
static constexpr bool has_infinity = true;
static constexpr bool has_quiet_NaN = true;
static constexpr bool has_signaling_NaN = true;
static constexpr float_denorm_style has_denorm = std::denorm_present;
static constexpr bool has_denorm_loss = false;
static constexpr std::float_round_style round_style =
numeric_limits<float>::round_style;
static constexpr bool is_iec559 = false;
static constexpr bool is_bounded = true;
static constexpr bool is_modulo = false;
static constexpr int digits = 8;
static constexpr int digits10 = 2;
static constexpr int max_digits10 = 4;
static constexpr int radix = 2;
static constexpr int min_exponent = numeric_limits<float>::min_exponent;
static constexpr int min_exponent10 = numeric_limits<float>::min_exponent10;
static constexpr int max_exponent = numeric_limits<float>::max_exponent;
static constexpr int max_exponent10 = numeric_limits<float>::max_exponent10;
static constexpr bool traps = numeric_limits<float>::traps;
static constexpr bool tinyness_before =
numeric_limits<float>::tinyness_before;
static constexpr tensorstore::BFloat16 min() {
return tensorstore::BFloat16(tensorstore::BFloat16::bitcast_construct_t{},
static_cast<uint16_t>(0x0080));
}
static constexpr tensorstore::BFloat16 lowest() {
return tensorstore::BFloat16(tensorstore::BFloat16::bitcast_construct_t{},
static_cast<uint16_t>(0xff7f));
}
static constexpr tensorstore::BFloat16 max() {
return tensorstore::BFloat16(tensorstore::BFloat16::bitcast_construct_t{},
static_cast<uint16_t>(0x7f7f));
}
static constexpr tensorstore::BFloat16 epsilon() {
return tensorstore::BFloat16(tensorstore::BFloat16::bitcast_construct_t{},
static_cast<uint16_t>(0x3c00));
}
static constexpr tensorstore::BFloat16 round_error() {
return tensorstore::BFloat16(tensorstore::BFloat16::bitcast_construct_t{},
static_cast<uint16_t>(0x3f00));
}
static constexpr tensorstore::BFloat16 infinity() {
return tensorstore::BFloat16(tensorstore::BFloat16::bitcast_construct_t{},
static_cast<uint16_t>(0x7f80));
}
static constexpr tensorstore::BFloat16 quiet_NaN() {
return tensorstore::BFloat16(tensorstore::BFloat16::bitcast_construct_t{},
static_cast<uint16_t>(0x7fc0));
}
static constexpr tensorstore::BFloat16 signaling_NaN() {
return tensorstore::BFloat16(tensorstore::BFloat16::bitcast_construct_t{},
static_cast<uint16_t>(0x7f81));
}
static constexpr tensorstore::BFloat16 denorm_min() {
return tensorstore::BFloat16(tensorstore::BFloat16::bitcast_construct_t{},
static_cast<uint16_t>(0x0001));
}
};
}
#endif | #include "tensorstore/util/bfloat16.h"
#include <cmath>
#include <cstdint>
#include <cstring>
#include <limits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/casts.h"
#include "tensorstore/internal/json_gtest.h"
namespace {
using ::tensorstore::internal::Float32ToBfloat16RoundNearestEven;
using ::tensorstore::internal::Float32ToBfloat16Truncate;
using bfloat16_t = tensorstore::BFloat16;
::testing::Matcher<bfloat16_t> MatchesBits(uint16_t bits) {
return ::testing::ResultOf(
[](bfloat16_t y) { return absl::bit_cast<uint16_t>(y); },
::testing::Eq(bits));
}
::testing::Matcher<float> NearFloat(float x, float relative_error = 1e-3) {
return ::testing::FloatNear(x, std::abs(x) * relative_error);
}
float BinaryToFloat(uint32_t sign, uint32_t exponent, uint32_t high_mantissa,
uint32_t low_mantissa) {
float dest;
uint32_t src =
(sign << 31) + (exponent << 23) + (high_mantissa << 16) + low_mantissa;
memcpy(static_cast<void*>(&dest), static_cast<const void*>(&src),
sizeof(dest));
return dest;
}
void TestTruncate(float input, float expected_truncation,
float expected_rounding) {
bfloat16_t truncated = Float32ToBfloat16Truncate(input);
bfloat16_t rounded = Float32ToBfloat16RoundNearestEven(input);
if (std::isnan(input)) {
EXPECT_TRUE(std::isnan(truncated));
EXPECT_TRUE(std::isnan(rounded));
return;
}
EXPECT_EQ(expected_truncation, static_cast<float>(truncated));
EXPECT_EQ(expected_rounding, static_cast<float>(rounded));
}
template <typename T>
void TestRoundtrips() {
for (T value : {
-std::numeric_limits<T>::infinity(),
std::numeric_limits<T>::infinity(),
T(-1.0),
T(-0.5),
T(-0.0),
T(1.0),
T(0.5),
T(0.0),
}) {
EXPECT_EQ(value, static_cast<T>(static_cast<bfloat16_t>(value)));
}
}
TEST(Bfloat16Test, FloatRoundtrips) { TestRoundtrips<float>(); }
TEST(Bfloat16Test, DoubleRoundtrips) { TestRoundtrips<double>(); }
TEST(Bfloat16Test, Float16Roundtrips) { TestRoundtrips<bfloat16_t>(); }
TEST(Bfloat16Test, ConversionFromFloat) {
EXPECT_THAT(bfloat16_t(1.0f), MatchesBits(0x3f80));
EXPECT_THAT(bfloat16_t(0.5f), MatchesBits(0x3f00));
EXPECT_THAT(bfloat16_t(0.33333f), MatchesBits(0x3eab));
EXPECT_THAT(bfloat16_t(3.38e38f), MatchesBits(0x7f7e));
EXPECT_THAT(bfloat16_t(3.40e38f), MatchesBits(0x7f80));
}
TEST(Bfloat16Test, RoundToNearestEven) {
float val1 = static_cast<float>(absl::bit_cast<bfloat16_t>(uint16_t{0x3c00}));
float val2 = static_cast<float>(absl::bit_cast<bfloat16_t>(uint16_t{0x3c01}));
float val3 = static_cast<float>(absl::bit_cast<bfloat16_t>(uint16_t{0x3c02}));
EXPECT_THAT(bfloat16_t(0.5f * (val1 + val2)), MatchesBits(0x3c00));
EXPECT_THAT(bfloat16_t(0.5f * (val2 + val3)), MatchesBits(0x3c02));
}
TEST(Bfloat16Test, ConversionFromInt) {
EXPECT_THAT(bfloat16_t(-1), MatchesBits(0xbf80));
EXPECT_THAT(bfloat16_t(0), MatchesBits(0x0000));
EXPECT_THAT(bfloat16_t(1), MatchesBits(0x3f80));
EXPECT_THAT(bfloat16_t(2), MatchesBits(0x4000));
EXPECT_THAT(bfloat16_t(3), MatchesBits(0x4040));
EXPECT_THAT(bfloat16_t(12), MatchesBits(0x4140));
}
TEST(Bfloat16Test, ConversionFromBool) {
EXPECT_THAT(bfloat16_t(false), MatchesBits(0x0000));
EXPECT_THAT(bfloat16_t(true), MatchesBits(0x3f80));
}
TEST(Bfloat16Test, ConversionToBool) {
EXPECT_EQ(static_cast<bool>(bfloat16_t(3)), true);
EXPECT_EQ(static_cast<bool>(bfloat16_t(0.33333f)), true);
EXPECT_EQ(bfloat16_t(-0.0), false);
EXPECT_EQ(static_cast<bool>(bfloat16_t(0.0)), false);
}
TEST(Bfloat16Test, ExplicitConversionToFloat) {
EXPECT_EQ(static_cast<float>(absl::bit_cast<bfloat16_t, uint16_t>(0x0000)),
0.0f);
EXPECT_EQ(static_cast<float>(absl::bit_cast<bfloat16_t, uint16_t>(0x3f80)),
1.0f);
}
TEST(Bfloat16Test, ImplicitConversionToFloat) {
EXPECT_EQ((absl::bit_cast<bfloat16_t, uint16_t>(0x0000)), 0.0f);
EXPECT_EQ((absl::bit_cast<bfloat16_t, uint16_t>(0x3f80)), 1.0f);
}
TEST(Bfloat16Test, Zero) {
EXPECT_EQ(bfloat16_t(0.0f), bfloat16_t(0.0f));
EXPECT_EQ(bfloat16_t(-0.0f), bfloat16_t(0.0f));
EXPECT_EQ(bfloat16_t(-0.0f), bfloat16_t(-0.0f));
EXPECT_THAT(bfloat16_t(0.0f), MatchesBits(0x0000));
EXPECT_THAT(bfloat16_t(-0.0f), MatchesBits(0x8000));
}
TEST(Bfloat16Test, DefaultConstruct) {
EXPECT_EQ(static_cast<float>(bfloat16_t()), 0.0f);
}
TEST(Bfloat16Test, Truncate0) {
TestTruncate(BinaryToFloat(0, 0x80, 0x48, 0xf5c3),
BinaryToFloat(0, 0x80, 0x48, 0x0000),
BinaryToFloat(0, 0x80, 0x49, 0x0000));
}
TEST(Bfloat16Test, Truncate1) {
TestTruncate(BinaryToFloat(1, 0x80, 0x48, 0xf5c3),
BinaryToFloat(1, 0x80, 0x48, 0x0000),
BinaryToFloat(1, 0x80, 0x49, 0x0000));
}
TEST(Bfloat16Test, Truncate2) {
TestTruncate(BinaryToFloat(0, 0x80, 0x48, 0x8000),
BinaryToFloat(0, 0x80, 0x48, 0x0000),
BinaryToFloat(0, 0x80, 0x48, 0x0000));
}
TEST(Bfloat16Test, Truncate3) {
TestTruncate(BinaryToFloat(0, 0xff, 0x00, 0x0001),
BinaryToFloat(0, 0xff, 0x40, 0x0000),
BinaryToFloat(0, 0xff, 0x40, 0x0000));
}
TEST(Bfloat16Test, Truncate4) {
TestTruncate(BinaryToFloat(0, 0xff, 0x7f, 0xffff),
BinaryToFloat(0, 0xff, 0x40, 0x0000),
BinaryToFloat(0, 0xff, 0x40, 0x0000));
}
TEST(Bfloat16Test, Truncate5) {
TestTruncate(BinaryToFloat(1, 0x80, 0x48, 0xc000),
BinaryToFloat(1, 0x80, 0x48, 0x0000),
BinaryToFloat(1, 0x80, 0x49, 0x0000));
}
TEST(Bfloat16Test, Truncate6) {
TestTruncate(BinaryToFloat(0, 0x80, 0x48, 0x0000),
BinaryToFloat(0, 0x80, 0x48, 0x0000),
BinaryToFloat(0, 0x80, 0x48, 0x0000));
}
TEST(Bfloat16Test, Truncate7) {
TestTruncate(BinaryToFloat(0, 0x80, 0x48, 0x4000),
BinaryToFloat(0, 0x80, 0x48, 0x0000),
BinaryToFloat(0, 0x80, 0x48, 0x0000));
}
TEST(Bfloat16Test, Truncate8) {
TestTruncate(BinaryToFloat(0, 0x80, 0x48, 0x8000),
BinaryToFloat(0, 0x80, 0x48, 0x0000),
BinaryToFloat(0, 0x80, 0x48, 0x0000));
}
TEST(Bfloat16Test, Truncate9) {
TestTruncate(BinaryToFloat(0, 0x00, 0x48, 0x8000),
BinaryToFloat(0, 0x00, 0x48, 0x0000),
BinaryToFloat(0, 0x00, 0x48, 0x0000));
}
TEST(Bfloat16Test, Truncate10) {
TestTruncate(BinaryToFloat(0, 0x00, 0x7f, 0xc000),
BinaryToFloat(0, 0x00, 0x7f, 0x0000),
BinaryToFloat(0, 0x00, 0x80, 0x0000));
}
TEST(Bfloat16Test, Conversion) {
for (int i = 0; i < 100; ++i) {
float a = i + 1.25;
bfloat16_t b = static_cast<bfloat16_t>(a);
float c = static_cast<float>(b);
EXPECT_LE(std::abs(c - a), a / 128);
}
}
TEST(Bfloat16Test, Epsilon) {
EXPECT_LE(1.0f,
static_cast<float>(std::numeric_limits<bfloat16_t>::epsilon() +
bfloat16_t(1.0f)));
EXPECT_EQ(1.0f,
static_cast<float>(std::numeric_limits<bfloat16_t>::epsilon() /
bfloat16_t(2.0f) +
bfloat16_t(1.0f)));
}
TEST(Bfloat16Test, NextAfter) {
const bfloat16_t one(1), two(2), zero(0),
nan = std::numeric_limits<bfloat16_t>::quiet_NaN(),
epsilon = std::numeric_limits<bfloat16_t>::epsilon(),
denorm_min = std::numeric_limits<bfloat16_t>::denorm_min();
EXPECT_EQ(epsilon, nextafter(one, two) - one);
EXPECT_EQ(-epsilon / 2, nextafter(one, zero) - one);
EXPECT_EQ(one, nextafter(one, one));
EXPECT_EQ(denorm_min, nextafter(zero, one));
EXPECT_EQ(-denorm_min, nextafter(zero, -one));
const bfloat16_t values[] = {zero, -zero, nan};
for (int i = 0; i < 3; ++i) {
auto a = values[i];
for (int j = 0; j < 3; ++j) {
if (i == j) continue;
auto b = values[j];
auto next_float =
std::nextafter(static_cast<float>(a), static_cast<float>(b));
auto next_bfloat16 = nextafter(a, b);
EXPECT_EQ(std::isnan(next_float), isnan(next_bfloat16));
if (!std::isnan(next_float)) {
EXPECT_EQ(next_float, next_bfloat16);
}
}
}
EXPECT_EQ(std::numeric_limits<bfloat16_t>::infinity(),
nextafter(std::numeric_limits<bfloat16_t>::max(),
std::numeric_limits<bfloat16_t>::infinity()));
}
TEST(Bfloat16Test, Negate) {
EXPECT_EQ(static_cast<float>(-bfloat16_t(3.0f)), -3.0f);
EXPECT_EQ(static_cast<float>(-bfloat16_t(-4.5f)), 4.5f);
}
#ifndef _MSC_VER
TEST(Bfloat16Test, DivisionByZero) {
EXPECT_TRUE(std::isnan(static_cast<float>(bfloat16_t(0.0 / 0.0))));
EXPECT_TRUE(std::isinf(static_cast<float>(bfloat16_t(1.0 / 0.0))));
EXPECT_TRUE(std::isinf(static_cast<float>(bfloat16_t(-1.0 / 0.0))));
EXPECT_TRUE(std::isnan(bfloat16_t(0.0 / 0.0)));
EXPECT_TRUE(std::isinf(bfloat16_t(1.0 / 0.0)));
EXPECT_TRUE(std::isinf(bfloat16_t(-1.0 / 0.0)));
}
#endif
TEST(Bfloat16Test, NonFinite) {
EXPECT_FALSE(std::isinf(
static_cast<float>(bfloat16_t(3.38e38f))));
EXPECT_FALSE(std::isnan(static_cast<float>(bfloat16_t(0.0f))));
EXPECT_TRUE(std::isinf(
static_cast<float>(absl::bit_cast<bfloat16_t, uint16_t>(0xff80))));
EXPECT_TRUE(std::isnan(
static_cast<float>(absl::bit_cast<bfloat16_t, uint16_t>(0xffc0))));
EXPECT_TRUE(std::isinf(
static_cast<float>(absl::bit_cast<bfloat16_t, uint16_t>(0x7f80))));
EXPECT_TRUE(std::isnan(
static_cast<float>(absl::bit_cast<bfloat16_t, uint16_t>(0x7fc0))));
EXPECT_FALSE(isinf(absl::bit_cast<bfloat16_t, uint16_t>(0x7bff)));
EXPECT_FALSE(isnan(absl::bit_cast<bfloat16_t, uint16_t>(0x0000)));
EXPECT_TRUE(isinf(absl::bit_cast<bfloat16_t, uint16_t>(0xff80)));
EXPECT_TRUE(isnan(absl::bit_cast<bfloat16_t, uint16_t>(0xffc0)));
EXPECT_TRUE(isinf(absl::bit_cast<bfloat16_t, uint16_t>(0x7f80)));
EXPECT_TRUE(isnan(absl::bit_cast<bfloat16_t, uint16_t>(0x7fc0)));
EXPECT_THAT(bfloat16_t(BinaryToFloat(0x0, 0xff, 0x40, 0x0)),
MatchesBits(0x7fe0));
EXPECT_THAT(bfloat16_t(BinaryToFloat(0x1, 0xff, 0x40, 0x0)),
MatchesBits(0xffe0));
EXPECT_THAT(
Float32ToBfloat16Truncate(BinaryToFloat(0x0, 0xff, 0x40, 0x0)),
MatchesBits(0x7fe0));
EXPECT_THAT(
Float32ToBfloat16Truncate(BinaryToFloat(0x1, 0xff, 0x40, 0x0)),
MatchesBits(0xffe0));
}
TEST(Bfloat16Test, NumericLimits) {
static_assert(std::numeric_limits<bfloat16_t>::is_signed);
EXPECT_EQ(
absl::bit_cast<uint16_t>(std::numeric_limits<bfloat16_t>::infinity()),
absl::bit_cast<uint16_t>(
bfloat16_t(std::numeric_limits<float>::infinity())));
constexpr uint16_t BFLOAT16_QUIET_BIT = 0x0040;
EXPECT_TRUE(isnan(std::numeric_limits<bfloat16_t>::quiet_NaN()));
EXPECT_TRUE(isnan(bfloat16_t(std::numeric_limits<float>::quiet_NaN())));
EXPECT_GT(
(absl::bit_cast<uint16_t>(std::numeric_limits<bfloat16_t>::quiet_NaN()) &
BFLOAT16_QUIET_BIT),
0);
EXPECT_GT((absl::bit_cast<uint16_t>(
bfloat16_t(std::numeric_limits<float>::quiet_NaN())) &
BFLOAT16_QUIET_BIT),
0);
EXPECT_TRUE(isnan(std::numeric_limits<bfloat16_t>::signaling_NaN()));
EXPECT_TRUE(isnan(bfloat16_t(std::numeric_limits<float>::signaling_NaN())));
EXPECT_EQ(0, (absl::bit_cast<uint16_t>(
std::numeric_limits<bfloat16_t>::signaling_NaN()) &
BFLOAT16_QUIET_BIT));
#ifndef _MSC_VER
EXPECT_EQ(0, (absl::bit_cast<uint16_t>(
bfloat16_t(std::numeric_limits<float>::signaling_NaN())) &
BFLOAT16_QUIET_BIT));
#endif
EXPECT_GT(std::numeric_limits<bfloat16_t>::min(), bfloat16_t(0.f));
EXPECT_GT(std::numeric_limits<bfloat16_t>::denorm_min(), bfloat16_t(0.f));
EXPECT_EQ(std::numeric_limits<bfloat16_t>::denorm_min() / bfloat16_t(2),
bfloat16_t(0.f));
}
TEST(Bfloat16Test, Arithmetic) {
EXPECT_EQ(static_cast<float>(bfloat16_t(2) + bfloat16_t(2)), 4);
EXPECT_EQ(static_cast<float>(bfloat16_t(2) + bfloat16_t(-2)), 0);
EXPECT_THAT(static_cast<float>(bfloat16_t(0.33333f) + bfloat16_t(0.66667f)),
NearFloat(1.0f));
EXPECT_EQ(static_cast<float>(bfloat16_t(2.0f) * bfloat16_t(-5.5f)), -11.0f);
EXPECT_THAT(static_cast<float>(bfloat16_t(1.0f) / bfloat16_t(3.0f)),
NearFloat(0.3339f));
EXPECT_EQ(static_cast<float>(-bfloat16_t(4096.0f)), -4096.0f);
EXPECT_EQ(static_cast<float>(-bfloat16_t(-4096.0f)), 4096.0f);
}
TEST(Bfloat16Test, Comparison) {
EXPECT_TRUE(bfloat16_t(1.0f) > bfloat16_t(0.5f));
EXPECT_TRUE(bfloat16_t(0.5f) < bfloat16_t(1.0f));
EXPECT_FALSE((bfloat16_t(1.0f) < bfloat16_t(0.5f)));
EXPECT_FALSE((bfloat16_t(0.5f) > bfloat16_t(1.0f)));
EXPECT_FALSE((bfloat16_t(4.0f) > bfloat16_t(4.0f)));
EXPECT_FALSE((bfloat16_t(4.0f) < bfloat16_t(4.0f)));
EXPECT_FALSE((bfloat16_t(0.0f) < bfloat16_t(-0.0f)));
EXPECT_FALSE((bfloat16_t(-0.0f) < bfloat16_t(0.0f)));
EXPECT_FALSE((bfloat16_t(0.0f) > bfloat16_t(-0.0f)));
EXPECT_FALSE((bfloat16_t(-0.0f) > bfloat16_t(0.0f)));
EXPECT_TRUE(bfloat16_t(0.2f) > bfloat16_t(-1.0f));
EXPECT_TRUE(bfloat16_t(-1.0f) < bfloat16_t(0.2f));
EXPECT_TRUE(bfloat16_t(-16.0f) < bfloat16_t(-15.0f));
EXPECT_TRUE(bfloat16_t(1.0f) == bfloat16_t(1.0f));
EXPECT_TRUE(bfloat16_t(1.0f) != bfloat16_t(2.0f));
#ifndef _MSC_VER
EXPECT_FALSE((bfloat16_t(0.0 / 0.0) == bfloat16_t(0.0 / 0.0)));
EXPECT_TRUE(bfloat16_t(0.0 / 0.0) != bfloat16_t(0.0 / 0.0));
EXPECT_FALSE((bfloat16_t(1.0) == bfloat16_t(0.0 / 0.0)));
EXPECT_FALSE((bfloat16_t(1.0) < bfloat16_t(0.0 / 0.0)));
EXPECT_FALSE((bfloat16_t(1.0) > bfloat16_t(0.0 / 0.0)));
EXPECT_TRUE(bfloat16_t(1.0) != bfloat16_t(0.0 / 0.0));
EXPECT_TRUE(bfloat16_t(1.0) < bfloat16_t(1.0 / 0.0));
EXPECT_TRUE(bfloat16_t(1.0) > bfloat16_t(-1.0 / 0.0));
#endif
}
constexpr float PI = 3.14159265358979323846f;
TEST(Bfloat16Test, BasicFunctions) {
EXPECT_EQ(static_cast<float>(abs(bfloat16_t(3.5f))), 3.5f);
EXPECT_EQ(static_cast<float>(abs(bfloat16_t(3.5f))), 3.5f);
EXPECT_EQ(static_cast<float>(abs(bfloat16_t(-3.5f))), 3.5f);
EXPECT_EQ(static_cast<float>(abs(bfloat16_t(-3.5f))), 3.5f);
EXPECT_EQ(static_cast<float>(floor(bfloat16_t(3.5f))), 3.0f);
EXPECT_EQ(static_cast<float>(floor(bfloat16_t(3.5f))), 3.0f);
EXPECT_EQ(static_cast<float>(floor(bfloat16_t(-3.5f))), -4.0f);
EXPECT_EQ(static_cast<float>(floor(bfloat16_t(-3.5f))), -4.0f);
EXPECT_EQ(static_cast<float>(ceil(bfloat16_t(3.5f))), 4.0f);
EXPECT_EQ(static_cast<float>(ceil(bfloat16_t(3.5f))), 4.0f);
EXPECT_EQ(static_cast<float>(ceil(bfloat16_t(-3.5f))), -3.0f);
EXPECT_EQ(static_cast<float>(ceil(bfloat16_t(-3.5f))), -3.0f);
EXPECT_FLOAT_EQ(static_cast<float>(sqrt(bfloat16_t(0.0f))), 0.0f);
EXPECT_FLOAT_EQ(static_cast<float>(sqrt(bfloat16_t(0.0f))), 0.0f);
EXPECT_FLOAT_EQ(static_cast<float>(sqrt(bfloat16_t(4.0f))), 2.0f);
EXPECT_FLOAT_EQ(static_cast<float>(sqrt(bfloat16_t(4.0f))), 2.0f);
EXPECT_FLOAT_EQ(static_cast<float>(pow(bfloat16_t(0.0f), bfloat16_t(1.0f))),
0.0f);
EXPECT_FLOAT_EQ(static_cast<float>(pow(bfloat16_t(0.0f), bfloat16_t(1.0f))),
0.0f);
EXPECT_FLOAT_EQ(static_cast<float>(pow(bfloat16_t(2.0f), bfloat16_t(2.0f))),
4.0f);
EXPECT_FLOAT_EQ(static_cast<float>(pow(bfloat16_t(2.0f), bfloat16_t(2.0f))),
4.0f);
EXPECT_EQ(static_cast<float>(exp(bfloat16_t(0.0f))), 1.0f);
EXPECT_EQ(static_cast<float>(exp(bfloat16_t(0.0f))), 1.0f);
EXPECT_THAT(static_cast<float>(exp(bfloat16_t(PI))),
NearFloat(20.f + static_cast<float>(PI)));
EXPECT_THAT(static_cast<float>(exp(bfloat16_t(PI))),
NearFloat(20.f + static_cast<float>(PI)));
EXPECT_EQ(static_cast<float>(expm1(bfloat16_t(0.0f))), 0.0f);
EXPECT_EQ(static_cast<float>(expm1(bfloat16_t(0.0f))), 0.0f);
EXPECT_THAT(static_cast<float>(expm1(bfloat16_t(2.0f))), NearFloat(6.375f));
EXPECT_THAT(static_cast<float>(expm1(bfloat16_t(2.0f))), NearFloat(6.375f));
EXPECT_EQ(static_cast<float>(log(bfloat16_t(1.0f))), 0.0f);
EXPECT_EQ(static_cast<float>(log(bfloat16_t(1.0f))), 0.0f);
EXPECT_THAT(static_cast<float>(log(bfloat16_t(10.0f))), NearFloat(2.296875f));
EXPECT_THAT(static_cast<float>(log(bfloat16_t(10.0f))), NearFloat(2.296875f));
EXPECT_EQ(static_cast<float>(log1p(bfloat16_t(0.0f))), 0.0f);
EXPECT_EQ(static_cast<float>(log1p(bfloat16_t(0.0f))), 0.0f);
EXPECT_THAT(static_cast<float>(log1p(bfloat16_t(10.0f))),
NearFloat(2.390625f));
EXPECT_THAT(static_cast<float>(log1p(bfloat16_t(10.0f))),
NearFloat(2.390625f));
}
TEST(Bfloat16Test, TrigonometricFunctions) {
EXPECT_THAT(cos(bfloat16_t(0.0f)), NearFloat(bfloat16_t(std::cos(0.0f))));
EXPECT_THAT(cos(bfloat16_t(0.0f)), NearFloat(bfloat16_t(std::cos(0.0f))));
EXPECT_FLOAT_EQ(cos(bfloat16_t(PI)), bfloat16_t(std::cos(PI)));
EXPECT_NEAR(cos(bfloat16_t(PI / 2)), bfloat16_t(std::cos(PI / 2)), 1e-3);
EXPECT_NEAR(cos(bfloat16_t(3 * PI / 2)), bfloat16_t(std::cos(3 * PI / 2)),
1e-2);
EXPECT_THAT(cos(bfloat16_t(3.5f)), NearFloat(bfloat16_t(std::cos(3.5f))));
EXPECT_FLOAT_EQ(sin(bfloat16_t(0.0f)), bfloat16_t(std::sin(0.0f)));
EXPECT_FLOAT_EQ(sin(bfloat16_t(0.0f)), bfloat16_t(std::sin(0.0f)));
EXPECT_NEAR(sin(bfloat16_t(PI)), bfloat16_t(std::sin(PI)), 1e-3);
EXPECT_THAT(sin(bfloat16_t(PI / 2)), NearFloat(bfloat16_t(std::sin(PI / 2))));
EXPECT_THAT(sin(bfloat16_t(3 * PI / 2)),
NearFloat(bfloat16_t(std::sin(3 * PI / 2))));
EXPECT_THAT(sin(bfloat16_t(3.5f)), NearFloat(bfloat16_t(std::sin(3.5f))));
EXPECT_FLOAT_EQ(tan(bfloat16_t(0.0f)), bfloat16_t(std::tan(0.0f)));
EXPECT_FLOAT_EQ(tan(bfloat16_t(0.0f)), bfloat16_t(std::tan(0.0f)));
EXPECT_NEAR(tan(bfloat16_t(PI)), bfloat16_t(std::tan(PI)), 1e-3);
EXPECT_THAT(tan(bfloat16_t(3.5f)), NearFloat(bfloat16_t(std::tan(3.5f))));
}
TEST(Bfloat16Test, JsonConversion) {
EXPECT_THAT(::nlohmann::json(bfloat16_t(1.5)), tensorstore::MatchesJson(1.5));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/bfloat16.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/bfloat16_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
c08d64b7-ea07-462b-8780-bba018bd7092 | cpp | google/tensorstore | extents | tensorstore/util/extents.h | tensorstore/util/extents_test.cc | #ifndef TENSORSTORE_UTIL_EXTENTS_H_
#define TENSORSTORE_UTIL_EXTENTS_H_
#include <cassert>
#include <cstddef>
#include <limits>
#include <type_traits>
#include "absl/base/optimization.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
template <typename T, ptrdiff_t Extent>
T ProductOfExtents(tensorstore::span<T, Extent> s) {
using value_type = std::remove_const_t<T>;
value_type result = 1;
for (const auto& x : s) {
assert(x >= 0);
if (ABSL_PREDICT_FALSE(internal::MulOverflow(result, x, &result))) {
result = std::numeric_limits<value_type>::max();
}
}
return result;
}
template <DimensionIndex Rank, typename Indices, typename = void>
constexpr inline bool IsCompatibleFullIndexVector = false;
template <DimensionIndex Rank, typename Indices>
constexpr inline bool IsCompatibleFullIndexVector<
Rank, Indices, std::void_t<internal::ConstSpanType<Indices>>> =
RankConstraint::EqualOrUnspecified(
Rank, internal::ConstSpanType<Indices>::extent) &&
internal::IsIndexPack<
typename internal::ConstSpanType<Indices>::value_type>;
template <DimensionIndex Rank, typename Indices, typename = void>
constexpr inline bool IsImplicitlyCompatibleFullIndexVector = false;
template <DimensionIndex Rank, typename Indices>
constexpr inline bool IsImplicitlyCompatibleFullIndexVector<
Rank, Indices, std::void_t<internal::ConstSpanType<Indices>>> =
RankConstraint::Implies(internal::ConstSpanType<Indices>::extent, Rank) &&
internal::IsIndexPack<
typename internal::ConstSpanType<Indices>::value_type>;
template <DimensionIndex Rank, typename Indices, typename = void>
constexpr inline bool IsCompatiblePartialIndexVector = false;
template <DimensionIndex Rank, typename Indices>
constexpr inline bool IsCompatiblePartialIndexVector<
Rank, Indices, std::void_t<internal::ConstSpanType<Indices>>> =
RankConstraint::GreaterEqualOrUnspecified(
Rank, internal::ConstSpanType<Indices>::extent) &&
internal::IsIndexPack<
typename internal::ConstSpanType<Indices>::value_type>;
template <DimensionIndex Rank, typename... IndexType>
constexpr inline bool IsCompatibleFullIndexPack =
RankConstraint::EqualOrUnspecified(Rank, sizeof...(IndexType)) &&
internal::IsIndexPack<IndexType...>;
template <typename Indices, typename = void>
constexpr inline bool IsIndexConvertibleVector = false;
template <typename Indices>
constexpr inline bool IsIndexConvertibleVector<
Indices, std::void_t<internal::ConstSpanType<Indices>>> =
internal::IsIndexPack<
typename internal::ConstSpanType<Indices>::value_type>;
template <typename Indices, typename = Index>
constexpr inline bool IsIndexVector = false;
template <typename Indices>
constexpr inline bool IsIndexVector<
Indices, typename internal::ConstSpanType<Indices>::value_type> = true;
template <typename Indices, typename = Index>
constexpr inline bool IsMutableIndexVector = false;
template <typename Indices>
constexpr inline bool IsMutableIndexVector<
Indices, typename internal::SpanType<Indices>::element_type> = true;
namespace internal_extents {
template <typename... Xs>
struct SpanStaticExtentHelper {};
template <typename... Ts, ptrdiff_t Extent>
struct SpanStaticExtentHelper<tensorstore::span<Ts, Extent>...>
: public std::integral_constant<ptrdiff_t, Extent> {};
}
template <typename X0, typename... Xs>
using SpanStaticExtent =
internal_extents::SpanStaticExtentHelper<internal::ConstSpanType<X0>,
internal::ConstSpanType<Xs>...>;
}
#endif | #include "tensorstore/util/extents.h"
#include <cstdint>
#include <limits>
#include <type_traits>
#include <vector>
#include <gtest/gtest.h>
#include "tensorstore/index.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/span.h"
namespace {
using ::tensorstore::dynamic_extent;
using ::tensorstore::Index;
using ::tensorstore::IsCompatibleFullIndexVector;
using ::tensorstore::IsCompatiblePartialIndexVector;
using ::tensorstore::IsImplicitlyCompatibleFullIndexVector;
using ::tensorstore::IsIndexConvertibleVector;
using ::tensorstore::IsIndexVector;
using ::tensorstore::IsMutableIndexVector;
using ::tensorstore::ProductOfExtents;
using ::tensorstore::span;
using ::tensorstore::SpanStaticExtent;
static_assert(IsCompatibleFullIndexVector<3, int (&)[3]>);
static_assert(IsCompatibleFullIndexVector<dynamic_extent, int (&)[3]>);
static_assert(IsCompatibleFullIndexVector<3, span<int, 3>>);
static_assert(IsCompatibleFullIndexVector<3, span<int>>);
static_assert(IsCompatibleFullIndexVector<dynamic_extent, span<int>>);
static_assert(IsCompatibleFullIndexVector<dynamic_extent, span<int, 3>>);
static_assert(!IsCompatibleFullIndexVector<3, span<int, 2>>);
static_assert(!IsCompatibleFullIndexVector<3, span<float, 3>>);
static_assert(!IsCompatibleFullIndexVector<3, span<float, 2>>);
static_assert(IsCompatiblePartialIndexVector<3, int (&)[3]>);
static_assert(IsCompatiblePartialIndexVector<4, int (&)[3]>);
static_assert(IsCompatiblePartialIndexVector<dynamic_extent, int (&)[3]>);
static_assert(IsCompatiblePartialIndexVector<3, span<int, 3>>);
static_assert(IsCompatiblePartialIndexVector<4, span<int, 3>>);
static_assert(IsCompatiblePartialIndexVector<3, span<int>>);
static_assert(IsCompatiblePartialIndexVector<dynamic_extent, span<int>>);
static_assert(IsCompatiblePartialIndexVector<dynamic_extent, span<int, 3>>);
static_assert(!IsCompatiblePartialIndexVector<3, span<int, 4>>);
static_assert(!IsCompatiblePartialIndexVector<3, span<float, 3>>);
static_assert(!IsCompatiblePartialIndexVector<3, span<float, 2>>);
static_assert(IsImplicitlyCompatibleFullIndexVector<3, int (&)[3]>);
static_assert(
IsImplicitlyCompatibleFullIndexVector<dynamic_extent, int (&)[3]>);
static_assert(IsImplicitlyCompatibleFullIndexVector<3, span<int, 3>>);
static_assert(IsImplicitlyCompatibleFullIndexVector<dynamic_extent, span<int>>);
static_assert(!IsImplicitlyCompatibleFullIndexVector<3, span<int>>);
static_assert(!IsImplicitlyCompatibleFullIndexVector<3, span<float, 3>>);
static_assert(!IsImplicitlyCompatibleFullIndexVector<3, span<float, 2>>);
static_assert(IsIndexConvertibleVector<span<int>>);
static_assert(IsIndexConvertibleVector<span<int, 3>>);
static_assert(IsIndexConvertibleVector<std::vector<int>>);
static_assert(!IsIndexConvertibleVector<span<float, 3>>);
static_assert(IsIndexVector<span<Index>>);
static_assert(IsIndexVector<span<Index, 3>>);
static_assert(IsIndexVector<span<const Index>>);
static_assert(IsIndexVector<span<const Index, 3>>);
static_assert(IsIndexVector<span<const Index>>);
static_assert(IsIndexVector<std::vector<Index>>);
static_assert(IsIndexVector<const std::vector<Index>>);
static_assert(!IsIndexVector<span<int, 3>>);
static_assert(!IsIndexVector<span<float>>);
static_assert(IsMutableIndexVector<span<Index>>);
static_assert(IsMutableIndexVector<span<Index, 3>>);
static_assert(!IsMutableIndexVector<span<const Index>>);
static_assert(!IsMutableIndexVector<span<const Index, 3>>);
static_assert(IsMutableIndexVector<std::vector<Index>&>);
static_assert(!IsMutableIndexVector<const std::vector<Index>>);
static_assert(!IsMutableIndexVector<span<int, 3>>);
static_assert(!IsMutableIndexVector<span<float>>);
static_assert(SpanStaticExtent<std::vector<int>>() == dynamic_extent);
static_assert(SpanStaticExtent<span<int, 3>>() == 3);
static_assert(SpanStaticExtent<span<int>>() == dynamic_extent);
static_assert(SpanStaticExtent<std::vector<int>, span<int>>() == dynamic_extent,
"");
static_assert(SpanStaticExtent<span<int, 3>, span<float, 3>>() == 3);
TEST(ProductOfExtentsTest, Basic) {
EXPECT_EQ(1, ProductOfExtents(span<int, 0>()));
EXPECT_EQ(20, ProductOfExtents(span({4, 5})));
}
TEST(ProductOfExtentsTest, Overflow) {
EXPECT_EQ(0, ProductOfExtents(span<const int>(
{5, std::numeric_limits<int>::max() - 1, 0})));
EXPECT_EQ(std::numeric_limits<int>::max(),
ProductOfExtents(
span<const int>({5, std::numeric_limits<int>::max() - 1})));
EXPECT_EQ(std::numeric_limits<std::int64_t>::max(),
ProductOfExtents(
span<const std::int64_t>({32768, 32768, 32768, 32768, 32768})));
EXPECT_EQ(0, ProductOfExtents(span<const int>(
{5, std::numeric_limits<int>::max() - 1, 0})));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/extents.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/extents_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
2b8f4dea-181a-4111-9c9e-1972e76f79b6 | cpp | google/tensorstore | bit_span | tensorstore/util/bit_span.h | tensorstore/util/bit_span_test.cc | #ifndef TENSORSTORE_UTIL_BIT_SPAN_H_
#define TENSORSTORE_UTIL_BIT_SPAN_H_
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <type_traits>
#include "absl/base/attributes.h"
#include "tensorstore/util/small_bit_set.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_bit_span {
template <bool FillValue, typename T>
void FillBits(T* base, std::ptrdiff_t offset, std::ptrdiff_t size) {
constexpr std::ptrdiff_t kBitsPerBlock = sizeof(T) * 8;
constexpr const T kAllOnes = ~static_cast<T>(0);
assert(offset >= 0);
std::ptrdiff_t end;
for (base += offset / kBitsPerBlock, offset %= kBitsPerBlock,
end = size + offset;
end >= kBitsPerBlock; ++base, offset = 0, end -= kBitsPerBlock) {
const T mask = kAllOnes << offset;
if (FillValue) {
*base |= mask;
} else {
*base &= ~mask;
}
}
if (end) {
const T mask = (kAllOnes << offset) ^ (kAllOnes << (end % kBitsPerBlock));
if (FillValue) {
*base |= mask;
} else {
*base &= ~mask;
}
}
}
template <typename T, typename U>
void CopyBits(const U* source, std::ptrdiff_t source_offset, T* dest,
std::ptrdiff_t dest_offset, std::ptrdiff_t size) {
std::copy(BitIterator<const U>(source, source_offset),
BitIterator<const U>(source, source_offset + size),
BitIterator<T>(dest, dest_offset));
}
}
template <typename T, std::ptrdiff_t Extent = dynamic_extent>
class BitSpan {
static_assert(std::is_unsigned_v<T>, "Storage type T must be unsigned.");
static_assert(Extent == dynamic_extent || Extent >= 0,
"Extent must be dynamic_extent or >= 0.");
public:
using ExtentType =
std::conditional_t<Extent == dynamic_extent, std::ptrdiff_t,
std::integral_constant<std::ptrdiff_t, Extent>>;
using size_type = std::ptrdiff_t;
using difference_type = std::ptrdiff_t;
using iterator = BitIterator<T>;
using const_iterator = BitIterator<const T>;
using pointer = BitIterator<T>;
using const_pointer = BitIterator<T>;
using value_type = bool;
using reference = BitRef<T>;
using base_type = T;
using element_type = std::conditional_t<std::is_const_v<T>, const bool, bool>;
constexpr static std::ptrdiff_t kBitsPerBlock = sizeof(T) * 8;
constexpr static std::ptrdiff_t static_extent = Extent;
constexpr BitSpan(T* base ABSL_ATTRIBUTE_LIFETIME_BOUND,
std::ptrdiff_t offset, std::ptrdiff_t size)
: BitSpan(BitIterator<T>(base, offset), size) {}
constexpr BitSpan(BitIterator<T> begin, std::ptrdiff_t size) : begin_(begin) {
if constexpr (Extent == dynamic_extent) {
assert(size >= 0);
size_ = size;
} else {
assert(size == Extent);
}
}
template <
typename U, std::ptrdiff_t E,
std::enable_if_t<((std::is_same_v<T, U> || std::is_same_v<T, const U>)&&(
E == Extent || Extent == dynamic_extent))>* = nullptr>
constexpr BitSpan(BitSpan<U, E> other)
: begin_(other.begin()), size_(other.size()) {}
constexpr T* base() const { return begin().base(); }
constexpr std::ptrdiff_t offset() const { return begin().offset(); }
constexpr ExtentType size() const { return size_; }
BitIterator<T> begin() const { return begin_; }
BitIterator<T> end() const { return begin_ + size_; }
constexpr BitRef<T> operator[](std::ptrdiff_t i) const {
assert(i >= 0 && i <= size());
return *(begin() + i);
}
template <bool FillValue, int&... ExplicitArgumentBarrier, typename X = T>
std::enable_if_t<!std::is_const_v<X>> fill() const {
internal_bit_span::FillBits<FillValue>(base(), offset(), size());
}
template <int&... ExplicitArgumentBarrier, typename X = T>
std::enable_if_t<!std::is_const_v<X>> fill(bool value) const {
if (value) {
fill<true>();
} else {
fill<false>();
}
}
template <typename U, std::ptrdiff_t E, int&... ExplicitArgumentBarrier,
typename X = T>
std::enable_if_t<!std::is_const_v<X> &&
(E == Extent || Extent == dynamic_extent ||
E == dynamic_extent)>
DeepAssign(BitSpan<U, E> other) {
assert(other.size() == size());
internal_bit_span::CopyBits(other.base(), other.offset(), base(), offset(),
size());
}
private:
BitIterator<T> begin_;
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS ExtentType size_;
};
template <typename Block>
inline constexpr std::ptrdiff_t BitVectorSizeInBlocks(std::ptrdiff_t length) {
return (length + sizeof(Block) * 8 - 1) / (sizeof(Block) * 8);
}
}
#endif | #include "tensorstore/util/bit_span.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <type_traits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::BitIterator;
using ::tensorstore::BitSpan;
using ::tensorstore::BitVectorSizeInBlocks;
static_assert(
std::is_convertible_v<BitSpan<uint32_t>, BitSpan<const uint32_t>>);
static_assert(
std::is_convertible_v<BitSpan<const uint32_t, 3>, BitSpan<const uint32_t>>);
static_assert(
std::is_convertible_v<BitSpan<uint32_t, 3>, BitSpan<const uint32_t>>);
TEST(BitSpanTest, Basic) {
uint16_t data[2] = {0, 0};
BitSpan<uint16_t> s(data, 11, 10);
EXPECT_EQ(10, s.size());
EXPECT_EQ(data, s.base());
EXPECT_EQ(11, s.offset());
}
TEST(BitSpanTest, ConstructFromIterator) {
uint16_t data[2] = {0, 0};
BitSpan<uint16_t> s(BitIterator<uint16_t>(data, 11), 10);
EXPECT_EQ(10, s.size());
EXPECT_EQ(data, s.base());
EXPECT_EQ(11, s.offset());
}
TEST(BitSpanTest, Iterate) {
uint16_t data[2] = {0, 0};
BitSpan<uint16_t> s(data, 11, 10);
std::array<bool, 10> arr = {1, 1, 0, 0, 1, 1, 1, 0, 1, 0};
std::copy(arr.begin(), arr.end(), s.begin());
EXPECT_THAT(data, ::testing::ElementsAre(0x9800 ,
0xb ));
std::array<bool, 10> arr2;
std::copy(s.begin(), s.end(), arr2.begin());
EXPECT_EQ(arr, arr2);
std::sort(s.begin(), s.end());
EXPECT_THAT(s, ::testing::ElementsAre(0, 0, 0, 0, 1, 1, 1, 1, 1, 1));
}
TEST(BitSpanTest, Convert) {
uint16_t data[2] = {0, 0};
BitSpan<uint16_t, 10> s_static(data, 11, 10);
BitSpan<uint16_t> s2 = s_static;
BitSpan<const uint16_t> s2_const = s2;
EXPECT_EQ(data, s_static.base());
EXPECT_EQ(11, s_static.offset());
EXPECT_EQ(10, s_static.size());
EXPECT_EQ(data, s2.base());
EXPECT_EQ(11, s2.offset());
EXPECT_EQ(10, s2.size());
EXPECT_EQ(data, s2_const.base());
EXPECT_EQ(11, s2_const.offset());
EXPECT_EQ(10, s2_const.size());
}
TEST(BitSpanTest, FillPartialSingleBlockTrue) {
uint16_t data[2] = {0xaaaa, 0xaaaa};
BitSpan<uint16_t>(data, 10, 4).fill(true);
EXPECT_THAT(data,
::testing::ElementsAre(0xbeaa , 0xaaaa));
}
TEST(BitSpanTest, FillPartialSingleBlockFalse) {
uint16_t data[2] = {0xaaaa, 0xaaaa};
BitSpan<uint16_t>(data, 11, 4).fill(false);
EXPECT_THAT(data,
::testing::ElementsAre(0x82aa , 0xaaaa));
}
TEST(BitSpanTest, FillPartialTwoBlocksTrue) {
uint16_t data[2] = {0xaaaa, 0xaaaa};
BitSpan<uint16_t>(data, 11, 10).fill(true);
EXPECT_THAT(data, ::testing::ElementsAre(0xfaaa ,
0xaabf ));
}
TEST(BitSpanTest, FillPartialTwoBlocksFalse) {
uint16_t data[2] = {0xaaaa, 0xaaaa};
BitSpan<uint16_t>(data, 11, 10).fill(false);
EXPECT_THAT(data, ::testing::ElementsAre(0x02aa ,
0xaaa0 ));
}
TEST(BitSpanTest, FillOneBlockExactEndTrue) {
uint16_t data[2] = {0xaaaa, 0xaaaa};
BitSpan<uint16_t>(data, 13, 3).fill(true);
EXPECT_THAT(data, ::testing::ElementsAre(0xeaaa ,
0xaaaa ));
}
TEST(BitSpanTest, FillOneBlockExactEndFalse) {
uint16_t data[2] = {0xaaaa, 0xaaaa};
BitSpan<uint16_t>(data, 13, 3).fill(false);
EXPECT_THAT(data, ::testing::ElementsAre(0x0aaa ,
0xaaaa ));
}
TEST(BitSpanTest, FillTwoBlockExactEndTrue) {
uint16_t data[3] = {0xaaaa, 0xaaaa, 0xaaaa};
BitSpan<uint16_t>(data, 13, 19).fill(true);
EXPECT_THAT(data, ::testing::ElementsAre(0xeaaa ,
0xffff ,
0xaaaa ));
}
TEST(BitSpanTest, FillTwoBlockExactEndFalse) {
uint16_t data[3] = {0xaaaa, 0xaaaa, 0xaaaa};
BitSpan<uint16_t>(data, 13, 19).fill(false);
EXPECT_THAT(data, ::testing::ElementsAre(0x0aaa ,
0x0000 ,
0xaaaa ));
}
TEST(BitSpanTest, FillPartialThreeBlocksTrue) {
uint16_t data[3] = {0xaaaa, 0xaaaa, 0xaaaa};
BitSpan<uint16_t>(data, 11, 23).fill(true);
EXPECT_THAT(data, ::testing::ElementsAre(0xfaaa ,
0xffff ,
0xaaab ));
}
TEST(BitSpanTest, FillPartialThreeBlocksFalse) {
uint16_t data[3] = {0xaaaa, 0xaaaa, 0xaaaa};
BitSpan<uint16_t>(data, 11, 23).fill(false);
EXPECT_THAT(data, ::testing::ElementsAre(0x02aa ,
0x0000 ,
0xaaa8 ));
}
TEST(BitSpanTest, DeepAssign) {
uint16_t data[2] = {0x9e0e ,
0xe1f1 };
BitSpan<uint16_t> s1(data, 11, 10);
uint16_t data2[2] = {0x1e0e ,
0xe1f1 };
BitSpan<uint16_t> s2(data2, 9, 10);
s2.DeepAssign(s1);
EXPECT_THAT(data, ::testing::ElementsAre(0x9e0e ,
0xe1f1 ));
EXPECT_THAT(data2, ::testing::ElementsAre(0x660e ,
0xe1f4 ));
}
static_assert(BitVectorSizeInBlocks<uint64_t>(0) == 0, "");
static_assert(BitVectorSizeInBlocks<uint64_t>(1) == 1, "");
static_assert(BitVectorSizeInBlocks<uint64_t>(63) == 1, "");
static_assert(BitVectorSizeInBlocks<uint64_t>(64) == 1, "");
static_assert(BitVectorSizeInBlocks<uint64_t>(65) == 2, "");
static_assert(BitVectorSizeInBlocks<uint32_t>(65) == 3, "");
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/bit_span.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/bit_span_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
e8a18a24-143d-4a50-a88f-2624383365b6 | cpp | google/tensorstore | span | tensorstore/serialization/span.h | tensorstore/serialization/span_test.cc | #ifndef TENSORSTORE_SERIALIZATION_SPAN_H_
#define TENSORSTORE_SERIALIZATION_SPAN_H_
#include <cstddef>
#include <type_traits>
#include "absl/base/attributes.h"
#include "tensorstore/serialization/fwd.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace serialization {
template <typename T, ptrdiff_t N,
typename ElementSerializer = Serializer<std::remove_cv_t<T>>>
struct SpanSerializer {
[[nodiscard]] bool Encode(EncodeSink& sink, span<const T, N> value) const {
for (const auto& element : value) {
if (!element_serializer.Encode(sink, element)) return false;
}
return true;
}
[[nodiscard]] bool Decode(DecodeSource& source, span<T, N> value) const {
for (auto& element : value) {
if (!element_serializer.Decode(source, element)) return false;
}
return true;
}
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS ElementSerializer element_serializer = {};
constexpr static bool non_serializable() {
return IsNonSerializer<ElementSerializer>;
}
};
template <typename T, ptrdiff_t N>
struct Serializer<span<T, N>> : public SpanSerializer<T, N> {};
}
}
#endif | #include "tensorstore/serialization/span.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/serialization/batch.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::span;
using ::tensorstore::serialization::DecodeBatch;
using ::tensorstore::serialization::EncodeBatch;
TEST(SpanSerializationTest, StaticExtent) {
int values[2] = {1, 2};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded,
EncodeBatch(span<int, 2>(values)));
int values_decoded[2] = {0, 0};
TENSORSTORE_ASSERT_OK(DecodeBatch(encoded, span<int, 2>(values_decoded)));
EXPECT_THAT(values_decoded, ::testing::ElementsAre(1, 2));
}
TEST(SpanSerializationTest, DynamicExtent) {
int values[2] = {1, 2};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded,
EncodeBatch(span<int>(values)));
int values_decoded[2] = {0, 0};
TENSORSTORE_ASSERT_OK(DecodeBatch(encoded, span<int>(values_decoded)));
EXPECT_THAT(values_decoded, ::testing::ElementsAre(1, 2));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/serialization/span.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/serialization/span_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
ecdbf438-0221-4da0-a616-979e57418d7d | cpp | google/tensorstore | division | tensorstore/util/division.h | tensorstore/util/division_test.cc | #ifndef TENSORSTORE_UTIL_DIVISION_H_
#define TENSORSTORE_UTIL_DIVISION_H_
#include <cassert>
#include <limits>
#include <type_traits>
namespace tensorstore {
template <typename IntegralType>
constexpr IntegralType RoundUpTo(IntegralType input,
IntegralType rounding_value) {
static_assert(std::is_integral<IntegralType>::value,
"IntegralType must be an integral type.");
assert(input >= 0 && rounding_value > 0);
return ((input + rounding_value - 1) / rounding_value) * rounding_value;
}
template <typename IntegralType, bool ceil>
constexpr IntegralType CeilOrFloorOfRatio(IntegralType numerator,
IntegralType denominator);
template <typename IntegralType>
constexpr IntegralType CeilOfRatio(IntegralType numerator,
IntegralType denominator) {
return CeilOrFloorOfRatio<IntegralType, true>(numerator, denominator);
}
template <typename IntegralType>
constexpr IntegralType FloorOfRatio(IntegralType numerator,
IntegralType denominator) {
return CeilOrFloorOfRatio<IntegralType, false>(numerator, denominator);
}
template <typename IntegralType, bool ceil>
constexpr IntegralType CeilOrFloorOfRatio(IntegralType numerator,
IntegralType denominator) {
const IntegralType rounded_toward_zero = numerator / denominator;
const IntegralType intermediate_product = rounded_toward_zero * denominator;
if constexpr (ceil) {
const bool needs_adjustment =
(rounded_toward_zero >= 0) &&
((denominator > 0 && numerator > intermediate_product) ||
(denominator < 0 && numerator < intermediate_product));
const IntegralType adjustment = static_cast<IntegralType>(needs_adjustment);
const IntegralType ceil_of_ratio = rounded_toward_zero + adjustment;
return ceil_of_ratio;
} else {
const bool needs_adjustment =
(rounded_toward_zero <= 0) &&
((denominator > 0 && numerator < intermediate_product) ||
(denominator < 0 && numerator > intermediate_product));
const IntegralType adjustment = static_cast<IntegralType>(needs_adjustment);
const IntegralType floor_of_ratio = rounded_toward_zero - adjustment;
return floor_of_ratio;
}
}
template <typename IntegralType>
constexpr IntegralType NonnegativeMod(IntegralType numerator,
IntegralType denominator) {
assert(denominator > 0);
IntegralType modulus = numerator % denominator;
return modulus + (modulus < 0) * denominator;
}
template <typename IntegralType>
constexpr IntegralType GreatestCommonDivisor(IntegralType x, IntegralType y) {
assert(x != 0 || y != 0);
if (std::is_signed_v<IntegralType> &&
x == std::numeric_limits<IntegralType>::min()) {
x = x % y;
}
if (std::is_signed_v<IntegralType> &&
y == std::numeric_limits<IntegralType>::min()) {
y = y % x;
}
if (std::is_signed_v<IntegralType> && x < 0) x = -x;
if (std::is_signed_v<IntegralType> && y < 0) y = -y;
while (y != 0) {
IntegralType r = x % y;
x = y;
y = r;
}
return x;
}
}
#endif | #include "tensorstore/util/division.h"
#include <cstdint>
namespace {
static_assert(3 == tensorstore::FloorOfRatio(10, 3));
static_assert(-4 == tensorstore::FloorOfRatio(-10, 3));
static_assert(4 == tensorstore::CeilOfRatio(10, 3));
static_assert(-3 == tensorstore::CeilOfRatio(-10, 3));
static_assert(10 == tensorstore::RoundUpTo(7, 5));
static_assert(10 == tensorstore::RoundUpTo(10, 5));
static_assert(3 == tensorstore::NonnegativeMod(10, 7));
static_assert(4 == tensorstore::NonnegativeMod(-10, 7));
static_assert(5 == tensorstore::GreatestCommonDivisor(5, 10));
static_assert(5 == tensorstore::GreatestCommonDivisor(10, 15));
static_assert(5 == tensorstore::GreatestCommonDivisor(10, -15));
static_assert(5 == tensorstore::GreatestCommonDivisor(-10, 15));
static_assert(5 == tensorstore::GreatestCommonDivisor(-10, -15));
static_assert(5 == tensorstore::GreatestCommonDivisor(15, 10));
static_assert(5u == tensorstore::GreatestCommonDivisor(15u, 10u));
static_assert(15 == tensorstore::GreatestCommonDivisor(15, 0));
static_assert(15 == tensorstore::GreatestCommonDivisor(-15, 0));
static_assert(15 == tensorstore::GreatestCommonDivisor(0, 15));
static_assert(8 == tensorstore::GreatestCommonDivisor<int32_t>(-0x80000000, 8));
static_assert(8 ==
tensorstore::GreatestCommonDivisor<int32_t>(-0x80000000, -8));
static_assert(8 == tensorstore::GreatestCommonDivisor<int32_t>(8, -0x80000000));
static_assert(8 ==
tensorstore::GreatestCommonDivisor<int32_t>(-8, -0x80000000));
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/division.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/division_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
e1211064-e34b-4b2f-a04e-4c5792219b55 | cpp | google/tensorstore | str_cat | tensorstore/util/str_cat.h | tensorstore/util/str_cat_test.cc | #ifndef TENSORSTORE_UTIL_STR_CAT_H_
#define TENSORSTORE_UTIL_STR_CAT_H_
#include <cstddef>
#include <ostream>
#include <sstream>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include "absl/strings/str_cat.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_strcat {
template <typename... T, typename F>
constexpr bool Requires(F) {
return std::is_invocable_v<F, T...>;
}
template <typename T>
auto ToAlphaNumOrString(const T& x);
template <typename T>
std::string StringifyUsingOstream(const T& x) {
std::ostringstream ostr;
ostr << x;
return ostr.str();
}
template <typename... T>
std::string StringifyTuple(const std::tuple<T...>& x) {
return std::apply(
[](const auto&... item) {
std::string result = "{";
size_t i = 0;
(absl::StrAppend(&result, ToAlphaNumOrString(item),
(++i == sizeof...(item) ? "}" : ", ")),
...);
return result;
},
x);
}
template <typename A, typename B>
std::string StringifyPair(const std::pair<A, B>& x) {
return absl::StrCat("{", ToAlphaNumOrString(x.first), ", ",
ToAlphaNumOrString(x.second), "}");
}
template <typename Iterator>
std::string StringifyContainer(Iterator begin, Iterator end) {
std::string result = "{";
if (begin != end) {
absl::StrAppend(&result, ToAlphaNumOrString(*begin++));
}
for (; begin != end; ++begin) {
absl::StrAppend(&result, ", ", ToAlphaNumOrString(*begin));
}
absl::StrAppend(&result, "}");
return result;
}
template <typename T>
auto ToAlphaNumOrString(const T& x) {
if constexpr (std::is_same_v<T, std::nullptr_t>) {
return "null";
} else if constexpr (std::is_convertible_v<T, absl::AlphaNum> &&
!std::is_enum_v<T>) {
return x;
} else if constexpr (internal::IsOstreamable<T>) {
return StringifyUsingOstream(x);
} else if constexpr (Requires<const T>(
[](auto&& v) -> decltype(StringifyPair(v)) {})) {
return StringifyPair(x);
} else if constexpr (Requires<const T>(
[](auto&& v) -> decltype(StringifyTuple(v)) {})) {
return StringifyTuple(x);
} else if constexpr (Requires<const T>(
[](auto&& v) -> decltype(v.begin(), v.end()) {})) {
return StringifyContainer(x.begin(), x.end());
} else if constexpr (std::is_enum_v<T>) {
using I = typename std::underlying_type<T>::type;
return static_cast<I>(x);
} else {
return StringifyUsingOstream(x);
}
}
}
template <typename Element, std::ptrdiff_t N>
std::enable_if_t<internal::IsOstreamable<Element>, std::ostream&> operator<<(
std::ostream& os, ::tensorstore::span<Element, N> s) {
os << "{";
std::ptrdiff_t size = s.size();
for (std::ptrdiff_t i = 0; i < size; ++i) {
if (i != 0) os << ", ";
os << s[i];
}
return os << "}";
}
template <typename... Arg>
std::string StrCat(const Arg&... arg) {
return absl::StrCat(internal_strcat::ToAlphaNumOrString(arg)...);
}
template <typename... Arg>
void StrAppend(std::string* result, const Arg&... arg) {
return absl::StrAppend(result, internal_strcat::ToAlphaNumOrString(arg)...);
}
}
#endif | #include "tensorstore/util/str_cat.h"
#include <complex>
#include <map>
#include <optional>
#include <ostream>
#include <sstream>
#include <string>
#include <tuple>
#include <utility>
#include <gtest/gtest.h>
#include "tensorstore/util/span.h"
namespace {
using ::tensorstore::internal_strcat::StringifyUsingOstream;
enum class OstreamableEnum { value = 0 };
enum class PlainEnum { value = 0 };
std::ostream& operator<<(std::ostream& os, OstreamableEnum e) {
return os << "enum";
}
TEST(ToStringUsingOstreamTest, Basic) {
EXPECT_EQ("hello", StringifyUsingOstream("hello"));
EXPECT_EQ("1", StringifyUsingOstream(1));
EXPECT_EQ("(1,2)", StringifyUsingOstream(std::complex<float>(1, 2)));
}
TEST(StrAppendTest, Basic) {
std::string result = "X";
tensorstore::StrAppend(&result, "a", std::complex<float>(1, 2), 3);
EXPECT_EQ("Xa(1,2)3", result);
}
TEST(StrCat, Basic) {
EXPECT_EQ("a(1,2)3", tensorstore::StrCat("a", std::complex<float>(1, 2), 3));
char a = 'a';
EXPECT_EQ("a", tensorstore::StrCat(a));
}
TEST(StrCat, Enum) {
EXPECT_EQ("enum", tensorstore::StrCat(OstreamableEnum::value));
EXPECT_EQ("0", tensorstore::StrCat(PlainEnum::value));
}
TEST(StrCat, Null) { EXPECT_EQ("null", tensorstore::StrCat(nullptr)); }
TEST(StrCat, Tuple) {
EXPECT_EQ("{1, 2, abc}", tensorstore::StrCat(std::make_tuple(1, 2.0, "abc")));
}
TEST(StrCat, Pair) {
EXPECT_EQ("{2, abc}", tensorstore::StrCat(std::make_pair(2.0, "abc")));
}
TEST(StrCat, Container) {
std::vector<int> x{1, 2, 3};
EXPECT_EQ("{1, 2, 3}", tensorstore::StrCat(x));
EXPECT_EQ("{1, 2, 3}", tensorstore::StrCat(tensorstore::span(x)));
std::map<std::string, int> y{{"a", 1}, {"b", 2}};
EXPECT_EQ("{{a, 1}, {b, 2}}", tensorstore::StrCat(y));
}
TEST(StrCat, Nested) {
std::vector<std::pair<int, int>> x{{1, 2}, {2, 3}};
EXPECT_EQ("{{1, 2}, {2, 3}}", tensorstore::StrCat(x));
std::pair<std::pair<int, int>, std::pair<int, int>> y{{1, 2}, {2, 3}};
EXPECT_EQ("{{1, 2}, {2, 3}}", tensorstore::StrCat(y));
}
TEST(SpanTest, Ostream) {
std::ostringstream ostr;
ostr << tensorstore::span({1, 2, 3});
EXPECT_EQ("{1, 2, 3}", ostr.str());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/str_cat.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/str_cat_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
bc2dbb66-ecd6-4962-8879-565a8becbf6f | cpp | google/tensorstore | byte_strided_pointer | tensorstore/util/byte_strided_pointer.h | tensorstore/util/byte_strided_pointer_test.cc | #ifndef TENSORSTORE_UTIL_BYTE_STRIDED_POINTER_H_
#define TENSORSTORE_UTIL_BYTE_STRIDED_POINTER_H_
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <type_traits>
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/util/element_traits.h"
namespace tensorstore {
template <typename T>
class ByteStridedPointer {
public:
using element_type = T;
using difference_type = std::ptrdiff_t;
constexpr static size_t alignment =
alignof(std::conditional_t<std::is_void_v<T>, char, T>);
ByteStridedPointer() = default;
template <
typename U,
std::enable_if_t<IsElementTypeImplicitlyConvertible<U, T>>* = nullptr>
ByteStridedPointer(U* value)
: value_(reinterpret_cast<std::uintptr_t>(value)) {
assert(value_ % alignment == 0);
}
template <
typename U,
std::enable_if_t<IsElementTypeOnlyExplicitlyConvertible<U, T>>* = nullptr>
explicit ByteStridedPointer(U* value)
: value_(reinterpret_cast<std::uintptr_t>(value)) {
assert(value_ % alignment == 0);
}
template <
typename U,
std::enable_if_t<IsElementTypeImplicitlyConvertible<U, T>>* = nullptr>
ByteStridedPointer(ByteStridedPointer<U> value)
: value_(reinterpret_cast<std::uintptr_t>(value.get())) {
assert(value_ % alignment == 0);
}
template <
typename U,
std::enable_if_t<IsElementTypeOnlyExplicitlyConvertible<U, T>>* = nullptr>
explicit ByteStridedPointer(ByteStridedPointer<U> value)
: value_(reinterpret_cast<std::uintptr_t>(value.get())) {
assert(value_ % alignment == 0);
}
T* get() const {
assert(value_ % alignment == 0);
return reinterpret_cast<T*>(value_);
}
T* operator->() const { return get(); }
template <typename U = T>
U& operator*() const {
return *static_cast<U*>(get());
}
operator T*() const { return get(); }
template <
typename U,
std::enable_if_t<IsElementTypeOnlyExplicitlyConvertible<T, U>>* = nullptr>
explicit operator U*() const {
return static_cast<U*>(get());
}
template <typename Integer>
std::enable_if_t<std::is_integral_v<Integer>, ByteStridedPointer&> operator+=(
Integer byte_offset) {
value_ = internal::wrap_on_overflow::Add(
value_, static_cast<std::uintptr_t>(byte_offset));
assert(value_ % alignment == 0);
return *this;
}
template <typename Integer>
std::enable_if_t<std::is_integral_v<Integer>, ByteStridedPointer&> operator-=(
Integer byte_offset) {
value_ = internal::wrap_on_overflow::Subtract(
value_, static_cast<std::uintptr_t>(byte_offset));
assert(value_ % alignment == 0);
return *this;
}
template <typename Integer>
std::enable_if_t<std::is_integral_v<Integer>, T>& operator[](
Integer byte_offset) const {
ByteStridedPointer x = *this;
x += byte_offset;
assert(x.value_ % alignment == 0);
return *x;
}
template <typename U>
friend std::ptrdiff_t operator-(ByteStridedPointer<T> a,
ByteStridedPointer<U> b) {
return reinterpret_cast<const char*>(a.get()) -
reinterpret_cast<const char*>(b.get());
}
template <typename Integer>
friend std::enable_if_t<std::is_integral_v<Integer>, ByteStridedPointer<T>>
operator+(ByteStridedPointer<T> ptr, Integer byte_offset) {
ptr += static_cast<std::uintptr_t>(byte_offset);
return ptr;
}
template <typename Integer>
friend inline std::enable_if_t<std::is_integral_v<Integer>,
ByteStridedPointer<T>>
operator+(Integer byte_offset, ByteStridedPointer<T> ptr) {
ptr += static_cast<std::uintptr_t>(byte_offset);
return ptr;
}
template <typename Integer>
friend inline std::enable_if_t<std::is_integral_v<Integer>,
ByteStridedPointer<T>>
operator-(ByteStridedPointer<T> ptr, Integer byte_offset) {
ptr -= static_cast<std::uintptr_t>(byte_offset);
return ptr;
}
private:
std::uintptr_t value_;
};
}
#endif | #include "tensorstore/util/byte_strided_pointer.h"
#include <limits>
#include <type_traits>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::ByteStridedPointer;
struct Base {};
struct Derived : Base {};
static_assert(std::is_convertible_v<int*, ByteStridedPointer<int>>);
static_assert(std::is_constructible_v<int*, ByteStridedPointer<void>>);
static_assert(!std::is_constructible_v<int*, ByteStridedPointer<const void>>);
static_assert(std::is_convertible_v<ByteStridedPointer<int>, int*>);
static_assert(std::is_convertible_v<ByteStridedPointer<int>, const int*>);
static_assert(
std::is_convertible_v<ByteStridedPointer<int>, ByteStridedPointer<void>>);
static_assert(std::is_convertible_v<ByteStridedPointer<const int>,
ByteStridedPointer<const void>>);
static_assert(!std::is_convertible_v<ByteStridedPointer<const int>,
ByteStridedPointer<void>>);
static_assert(
!std::is_convertible_v<ByteStridedPointer<void>, ByteStridedPointer<int>>);
static_assert(
std::is_constructible_v<ByteStridedPointer<int>, ByteStridedPointer<void>>);
static_assert(!std::is_convertible_v<ByteStridedPointer<const int>,
ByteStridedPointer<const float>>);
static_assert(!std::is_convertible_v<ByteStridedPointer<Derived>,
ByteStridedPointer<Base>>);
static_assert(!std::is_convertible_v<ByteStridedPointer<Base>,
ByteStridedPointer<Derived>>);
TEST(ByteStridedPointerTest, DefaultConstructor) {
ByteStridedPointer<int> ptr;
static_cast<void>(ptr);
}
TEST(ByteStridedPointerTest, ConstructFromRaw) {
int value;
ByteStridedPointer<int> ptr = &value;
EXPECT_EQ(&value, ptr.get());
}
TEST(ByteStridedPointerTest, ConstructFromRawConvertImplicit) {
int value;
ByteStridedPointer<const int> ptr = &value;
EXPECT_EQ(&value, ptr.get());
}
TEST(ByteStridedPointerTest, ConstructFromRawConvertExplicit) {
int value;
ByteStridedPointer<const int> ptr(static_cast<void*>(&value));
EXPECT_EQ(&value, ptr.get());
}
TEST(ByteStridedPointerTest, ConstructFromOther) {
int value;
ByteStridedPointer<int> ptr = ByteStridedPointer<int>(&value);
EXPECT_EQ(&value, ptr.get());
}
TEST(ByteStridedPointerTest, ConstructFromOtherConvertImplicit) {
int value;
ByteStridedPointer<const int> ptr = ByteStridedPointer<int>(&value);
EXPECT_EQ(&value, ptr.get());
}
TEST(ByteStridedPointerTest, ConstructFromOtherConvertExplicit) {
int value;
ByteStridedPointer<const int> ptr{ByteStridedPointer<void>(&value)};
EXPECT_EQ(&value, ptr.get());
}
TEST(ByteStridedPointerTest, ArrowOperator) {
int value;
ByteStridedPointer<const int> x(&value);
EXPECT_EQ(&value, x.operator->());
}
TEST(ByteStridedPointerTest, Dereference) {
int value = 3;
ByteStridedPointer<const int> x(&value);
EXPECT_EQ(3, *x);
EXPECT_EQ(&value, &*x);
}
TEST(ByteStridedPointerTest, CastImplicit) {
int value = 3;
ByteStridedPointer<const int> x(&value);
const int* p = x;
EXPECT_EQ(&value, p);
}
TEST(ByteStridedPointerTest, CastExplicit) {
int value = 3;
ByteStridedPointer<void> x(&value);
const int* p = static_cast<const int*>(x);
EXPECT_EQ(&value, p);
}
TEST(ByteStridedPointerTest, Add) {
int arr[] = {1, 2, 3};
ByteStridedPointer<int> x(&arr[0]);
x += sizeof(int);
EXPECT_EQ(x, ByteStridedPointer<int>(&arr[0]) + sizeof(int));
EXPECT_EQ(x, sizeof(int) + ByteStridedPointer<int>(&arr[0]));
EXPECT_EQ(&arr[1], x.get());
}
TEST(ByteStridedPointerTest, Subtract) {
int arr[] = {1, 2, 3};
ByteStridedPointer<int> x(&arr[2]);
x -= sizeof(int);
EXPECT_EQ(x, ByteStridedPointer<int>(&arr[2]) - sizeof(int));
EXPECT_EQ(&arr[1], x.get());
}
TEST(ByteStridedPointerTest, AddWrapOnOverflow) {
int arr[] = {1, 2, 3};
ByteStridedPointer<int> x(&arr[0]);
const std::uintptr_t base_index =
std::numeric_limits<std::uintptr_t>::max() - 99;
x -= base_index;
x += (base_index + sizeof(int));
EXPECT_EQ(x, ByteStridedPointer<int>(&arr[0]) + sizeof(int));
EXPECT_EQ(x, sizeof(int) + ByteStridedPointer<int>(&arr[0]));
EXPECT_EQ(&arr[1], x.get());
}
TEST(ByteStridedPointerTest, Difference) {
int arr[] = {1, 2, 3};
ByteStridedPointer<int> x(&arr[2]);
ByteStridedPointer<int> y(&arr[1]);
EXPECT_EQ(4, x - y);
}
TEST(ByteStridedPointerTest, Comparison) {
int arr[] = {1, 2, 3};
ByteStridedPointer<int> x(&arr[2]);
ByteStridedPointer<int> y = x;
EXPECT_TRUE(x == y);
x -= sizeof(int);
EXPECT_FALSE(x == y);
EXPECT_TRUE(x < y);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/byte_strided_pointer.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/byte_strided_pointer_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
e0201d58-2450-421f-960a-aa40ebcad6aa | cpp | google/tensorstore | result_sender | tensorstore/util/execution/result_sender.h | tensorstore/util/execution/result_sender_test.cc | #ifndef TENSORSTORE_UTIL_EXECUTION_RESULT_SENDER_H_
#define TENSORSTORE_UTIL_EXECUTION_RESULT_SENDER_H_
#include <functional>
#include <type_traits>
#include <utility>
#include "absl/status/status.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_result {
template <typename Receiver, typename = void, typename = void, typename = void,
typename = void>
struct IsResultReceiver : public std::false_type {};
template <typename Receiver, typename T>
struct IsResultReceiver<
Receiver, T,
decltype(execution::set_value(std::declval<Receiver&>(),
std::declval<T>())),
decltype(execution::set_error(std::declval<Receiver&>(),
std::declval<absl::Status>())),
decltype(execution::set_cancel(std::declval<Receiver&>()))>
: public std::true_type {};
}
template <typename T, typename... V>
std::enable_if_t<((std::is_same_v<void, T> && sizeof...(V) == 0) ||
std::is_constructible_v<T, V&&...>)>
set_value(Result<T>& r, V&&... v) {
r.emplace(std::forward<V>(v)...);
}
template <typename T, typename... V>
std::enable_if_t<((std::is_same_v<void, T> && sizeof...(V) == 0) ||
std::is_constructible_v<T, V&&...>)>
set_value(std::reference_wrapper<Result<T>> r, V&&... v) {
set_value(r.get(), std::forward<V>(v)...);
}
template <typename T>
void set_error(Result<T>& r, absl::Status status) {
r = std::move(status);
}
template <typename T>
void set_error(std::reference_wrapper<Result<T>> r, absl::Status status) {
set_error(r.get(), std::move(status));
}
template <typename T>
void set_cancel(Result<T>& r) {
r = absl::CancelledError("");
}
template <typename T>
void set_cancel(std::reference_wrapper<Result<T>> r) {
set_cancel(r.get());
}
template <typename T, typename Receiver>
std::enable_if_t<internal_result::IsResultReceiver<Receiver, T>::value>
submit(Result<T>& r, Receiver&& receiver) {
if (r.has_value()) {
execution::set_value(receiver, r.value());
} else {
auto status = r.status();
if (status.code() == absl::StatusCode::kCancelled) {
execution::set_cancel(receiver);
} else {
execution::set_error(receiver, std::move(status));
}
}
}
template <typename T, typename Receiver>
std::enable_if_t<internal_result::IsResultReceiver<Receiver, T>::value>
submit(std::reference_wrapper<Result<T>> r, Receiver&& receiver) {
submit(r.get(), std::forward<Receiver>(receiver));
}
}
#endif | #include "tensorstore/util/execution/result_sender.h"
#include <functional>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/execution/any_sender.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender_testutil.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Result;
using ::tensorstore::StatusIs;
TEST(ResultReceiverTest, SetCancel) {
Result<int> result = absl::InternalError("");
tensorstore::AnyReceiver<absl::Status, int> receiver{std::ref(result)};
tensorstore::execution::set_cancel(receiver);
EXPECT_THAT(result, StatusIs(absl::StatusCode::kCancelled));
}
TEST(ResultReceiverTest, SetValue) {
Result<int> result = absl::InternalError("");
tensorstore::AnyReceiver<absl::Status, int> receiver{std::ref(result)};
tensorstore::execution::set_value(receiver, 3);
EXPECT_EQ(result, Result<int>(3));
}
TEST(ResultReceiverTest, SetError) {
Result<int> result = absl::InternalError("");
tensorstore::AnyReceiver<absl::Status, int> receiver{std::ref(result)};
tensorstore::execution::set_error(receiver, absl::UnknownError("message"));
EXPECT_THAT(result, StatusIs(absl::StatusCode::kUnknown, "message"));
}
TEST(ResultSenderTest, SetValue) {
Result<int> result(3);
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::AnySender<absl::Status, int>(result),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_value: 3"));
}
TEST(ResultSenderTest, SetError) {
Result<int> result{absl::UnknownError("")};
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::AnySender<absl::Status, int>(result),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_error: UNKNOWN: "));
}
TEST(ResultSenderTest, SetCancel) {
Result<int> result{absl::CancelledError("")};
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::AnySender<absl::Status, int>(result),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_cancel"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/result_sender.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/result_sender_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
915dbca6-57f2-4dc1-bd74-29bf051fe4d0 | cpp | google/tensorstore | collecting_sender | tensorstore/util/execution/collecting_sender.h | tensorstore/util/execution/collecting_sender_test.cc | #ifndef TENSORSTORE_UTIL_EXECUTION_COLLECTING_SENDER_H_
#define TENSORSTORE_UTIL_EXECUTION_COLLECTING_SENDER_H_
#include <utility>
#include "tensorstore/util/execution/execution.h"
namespace tensorstore {
namespace internal {
template <typename Container, typename SingleReceiver>
struct CollectingReceiver {
SingleReceiver receiver;
Container container;
template <typename CancelReceiver>
friend void set_starting(CollectingReceiver& self, CancelReceiver cancel) {
}
template <typename... V>
friend void set_value(CollectingReceiver& self, V... v) {
self.container.emplace_back(std::move(v)...);
}
template <typename E>
friend void set_error(CollectingReceiver& self, E e) {
execution::set_error(self.receiver, std::move(e));
}
friend void set_done(CollectingReceiver& self) {
execution::set_value(self.receiver, std::move(self.container));
}
friend void set_stopping(CollectingReceiver& self) {}
};
template <typename Container, typename Sender>
struct CollectingSender {
Sender sender;
template <typename Receiver>
friend void submit(CollectingSender& self, Receiver receiver) {
execution::submit(self.sender, CollectingReceiver<Container, Receiver>{
std::move(receiver)});
}
};
template <typename Container, typename Sender>
CollectingSender<Container, Sender> MakeCollectingSender(Sender sender) {
return {std::move(sender)};
}
}
}
#endif | #include "tensorstore/util/execution/collecting_sender.h"
#include <ostream>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_join.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender.h"
#include "tensorstore/util/execution/sender_testutil.h"
#include "tensorstore/util/execution/sender_util.h"
#include "tensorstore/util/span.h"
namespace {
struct X {
explicit X(int value) : value(value) {}
int value;
friend std::ostream& operator<<(std::ostream& os, const std::vector<X>& vec) {
for (auto v : vec) {
os << v.value << ' ';
}
return os;
}
};
TEST(CollectingSenderTest, SuccessX) {
std::vector<std::string> log;
std::vector<int> input{1, 2, 3, 4};
tensorstore::execution::submit(
tensorstore::internal::MakeCollectingSender<std::vector<X>>(
tensorstore::RangeFlowSender<tensorstore::span<int>>{input}),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_value: 1 2 3 4 "));
}
struct Y {
explicit Y(int value) : value(value) {}
int value;
template <typename Sink>
friend void AbslStringify(Sink& sink, const Y& x) {
absl::Format(&sink, "%d", x.value);
}
template <typename Sink>
friend void AbslStringify(Sink& sink, const std::vector<Y>& vec) {
sink.Append(absl::StrJoin(vec, " "));
}
};
TEST(CollectingSenderTest, SuccessY) {
std::vector<std::string> log;
std::vector<int> input{1, 2, 3, 4};
tensorstore::execution::submit(
tensorstore::internal::MakeCollectingSender<std::vector<Y>>(
tensorstore::RangeFlowSender<tensorstore::span<int>>{input}),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_value: 1 2 3 4"));
}
TEST(CollectingSenderTest, Error) {
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::internal::MakeCollectingSender<std::vector<X>>(
tensorstore::FlowSingleSender<tensorstore::ErrorSender<int>>{5}),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_error: 5"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/collecting_sender.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/collecting_sender_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
a87ce158-3c36-4244-b5ab-8c4260ede5cd | cpp | google/tensorstore | sender_util | tensorstore/util/execution/sender_util.h | tensorstore/util/execution/sender_util_test.cc | #ifndef TENSORSTORE_UTIL_EXECUTION_SENDER_UTIL_H_
#define TENSORSTORE_UTIL_EXECUTION_SENDER_UTIL_H_
#include <atomic>
#include <iterator>
#include <utility>
#include "tensorstore/util/execution/execution.h"
namespace tensorstore {
template <typename FlowReceiver>
struct FlowSingleReceiver {
FlowReceiver receiver;
template <typename... V>
void set_value(V... v) {
execution::set_starting(receiver, [] {});
execution::set_value(receiver, std::move(v)...);
execution::set_done(receiver);
execution::set_stopping(receiver);
}
template <typename E>
void set_error(E e) {
execution::set_starting(receiver, [] {});
execution::set_error(receiver, std::move(e));
execution::set_stopping(receiver);
}
void set_cancel() {
execution::set_starting(receiver, [] {});
execution::set_done(receiver);
execution::set_stopping(receiver);
}
};
template <typename FlowReceiver>
FlowSingleReceiver(FlowReceiver receiver) -> FlowSingleReceiver<FlowReceiver>;
template <typename Sender>
struct FlowSingleSender {
Sender sender;
template <typename Receiver>
void submit(Receiver receiver) {
execution::submit(sender,
FlowSingleReceiver<Receiver>{std::move(receiver)});
}
};
template <typename Sender>
FlowSingleSender(Sender sender) -> FlowSingleSender<Sender>;
template <typename Range>
struct RangeFlowSender {
Range range;
template <typename Receiver>
friend void submit(RangeFlowSender& sender, Receiver receiver) {
std::atomic<bool> cancelled{false};
execution::set_starting(receiver, [&cancelled] { cancelled = true; });
using std::begin;
using std::end;
auto it = begin(sender.range);
auto end_it = end(sender.range);
for (; !cancelled && it != end_it; ++it) {
auto&& value = *it;
execution::set_value(receiver, std::forward<decltype(value)>(value));
}
execution::set_done(receiver);
execution::set_stopping(receiver);
}
};
}
#endif | #include "tensorstore/util/execution/sender_util.h"
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/execution/any_sender.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender.h"
#include "tensorstore/util/execution/sender_testutil.h"
namespace {
TEST(FlowSingleSenderTest, SetValue) {
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::FlowSingleSender<tensorstore::ValueSender<int, std::string>>{
{3, "hello"}},
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_starting", "set_value: 3, hello",
"set_done", "set_stopping"));
}
TEST(FlowSingleSenderTest, AnyFlowSenderSetValue) {
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::AnyFlowSender<int, int, std::string>(
tensorstore::FlowSingleSender<
tensorstore::ValueSender<int, std::string>>{{3, "hello"}}),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_starting", "set_value: 3, hello",
"set_done", "set_stopping"));
}
TEST(FlowSingleSenderTest, SetError) {
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::FlowSingleSender<tensorstore::ErrorSender<int>>{{3}},
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_starting", "set_error: 3",
"set_stopping"));
}
TEST(FlowSingleSenderTest, AnyFlowSenderSetError) {
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::AnyFlowSender<int>(
tensorstore::FlowSingleSender<tensorstore::ErrorSender<int>>{{3}}),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_starting", "set_error: 3",
"set_stopping"));
}
TEST(FlowSingleSenderTest, SetCancel) {
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::FlowSingleSender<tensorstore::CancelSender>{},
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(
log, ::testing::ElementsAre("set_starting", "set_done", "set_stopping"));
}
TEST(FlowSingleSenderTest, AnyFlowSenderSetCancel) {
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::AnyFlowSender<int>(
tensorstore::FlowSingleSender<tensorstore::CancelSender>{}),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(
log, ::testing::ElementsAre("set_starting", "set_done", "set_stopping"));
}
TEST(RangeFlowSenderTest, Basic) {
std::vector<int> values{1, 2, 3};
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::AnyFlowSender<int, int>(
tensorstore::RangeFlowSender<std::vector<int>&>{values}),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_starting", "set_value: 1",
"set_value: 2", "set_value: 3",
"set_done", "set_stopping"));
}
TEST(RangeFlowSenderTest, CancelImmediately) {
std::vector<int> values{1, 2, 3};
std::vector<std::string> log;
struct Receiver : public tensorstore::LoggingReceiver {
tensorstore::AnyCancelReceiver cancel;
void set_starting(tensorstore::AnyCancelReceiver cancel) {
this->tensorstore::LoggingReceiver::set_starting({});
cancel();
}
};
tensorstore::execution::submit(
tensorstore::AnyFlowSender<int, int>(
tensorstore::RangeFlowSender<std::vector<int>&>{values}),
Receiver{{&log}});
EXPECT_THAT(
log, ::testing::ElementsAre("set_starting", "set_done", "set_stopping"));
}
TEST(RangeFlowSenderTest, Cancel) {
std::vector<int> values{1, 2, 3};
std::vector<std::string> log;
struct Receiver : public tensorstore::LoggingReceiver {
tensorstore::AnyCancelReceiver cancel;
void set_starting(tensorstore::AnyCancelReceiver cancel) {
this->cancel = std::move(cancel);
this->tensorstore::LoggingReceiver::set_starting({});
}
void set_value(int value) {
this->tensorstore::LoggingReceiver::set_value(value);
if (value == 2) {
this->cancel();
}
}
};
tensorstore::execution::submit(
tensorstore::AnyFlowSender<int, int>(
tensorstore::RangeFlowSender<std::vector<int>&>{values}),
Receiver{{&log}});
EXPECT_THAT(
log, ::testing::ElementsAre("set_starting", "set_value: 1",
"set_value: 2", "set_done", "set_stopping"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/sender_util.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/sender_util_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
58e77cc6-ed69-45d3-b6b2-27bf9c79df8e | cpp | google/tensorstore | future_sender | tensorstore/util/execution/future_sender.h | tensorstore/util/execution/future_sender_test.cc | #ifndef TENSORSTORE_UTIL_EXECUTION_FUTURE_SENDER_H_
#define TENSORSTORE_UTIL_EXECUTION_FUTURE_SENDER_H_
#include <functional>
#include <type_traits>
#include <utility>
#include "absl/status/status.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/future.h"
namespace tensorstore {
namespace internal_future {
template <typename Receiver, typename = void, typename = void, typename = void,
typename = void>
struct IsFutureReceiver : public std::false_type {};
template <typename Receiver, typename T>
struct IsFutureReceiver<
Receiver, T,
decltype(execution::set_value(std::declval<Receiver&>(),
std::declval<T>())),
decltype(execution::set_error(std::declval<Receiver&>(),
std::declval<absl::Status>())),
decltype(execution::set_cancel(std::declval<Receiver&>()))>
: public std::true_type {};
}
template <typename T, typename... V>
std::enable_if_t<(!std::is_const_v<T> &&
std::is_constructible_v<typename Promise<T>::result_type,
std::in_place_t, V...>)>
set_value(const Promise<T>& promise, V&&... v) {
promise.SetResult(std::in_place, std::forward<V>(v)...);
}
template <typename T, typename... V>
std::enable_if_t<(!std::is_const_v<T> &&
std::is_constructible_v<typename Promise<T>::result_type,
std::in_place_t, V...>)>
set_value(std::reference_wrapper<const Promise<T>> promise, V&&... v) {
set_value(promise.get(), std::forward<V>(v)...);
}
template <typename T>
void set_error(const Promise<T>& promise, absl::Status error) {
promise.SetResult(std::move(error));
}
template <typename T>
void set_error(std::reference_wrapper<const Promise<T>> promise,
absl::Status error) {
set_error(promise.get(), std::move(error));
}
template <typename T>
void set_cancel(const Promise<T>& promise) {
promise.SetResult(absl::CancelledError(""));
}
template <typename T>
void set_cancel(std::reference_wrapper<const Promise<T>> promise) {
set_cancel(promise.get());
}
template <typename T, typename Receiver>
std::enable_if_t<internal_future::IsFutureReceiver<Receiver, T>::value>
submit(Future<T>& f, Receiver receiver) {
f.Force();
f.ExecuteWhenReady([r = std::move(receiver)](ReadyFuture<T> ready) mutable {
auto& result = ready.result();
if (result.has_value()) {
execution::set_value(r, result.value());
} else {
auto status = ready.status();
if (status.code() == absl::StatusCode::kCancelled) {
execution::set_cancel(r);
} else {
execution::set_error(r, std::move(status));
}
}
});
}
template <typename T, typename Receiver>
std::enable_if_t<internal_future::IsFutureReceiver<Receiver, T>::value>
submit(std::reference_wrapper<Future<T>> f, Receiver&& receiver) {
submit(f.get(), std::forward<Receiver>(receiver));
}
template <typename T, typename Sender>
Future<T> MakeSenderFuture(Sender sender) {
auto pair = PromiseFuturePair<T>::Make();
struct Callback {
Sender sender;
void operator()(Promise<T> promise) { execution::submit(sender, promise); }
};
pair.promise.ExecuteWhenForced(Callback{std::move(sender)});
return pair.future;
}
}
#endif | #include "tensorstore/util/execution/future_sender.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/execution/any_sender.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender.h"
#include "tensorstore/util/execution/sender_testutil.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
namespace {
using ::tensorstore::Promise;
using ::tensorstore::PromiseFuturePair;
using ::tensorstore::Result;
TEST(PromiseReceiverTest, SetCancel) {
auto pair = PromiseFuturePair<int>::Make();
tensorstore::execution::set_cancel(pair.promise);
EXPECT_EQ(pair.future.result(), Result<int>(absl::CancelledError("")));
}
TEST(PromiseReceiverTest, AnyReceiverSetCancel) {
auto pair = PromiseFuturePair<int>::Make();
tensorstore::execution::set_cancel(
tensorstore::AnyReceiver<absl::Status, int>(std::cref(pair.promise)));
EXPECT_EQ(pair.future.result(), Result<int>(absl::CancelledError("")));
}
TEST(PromiseReceiverTest, SetValue) {
auto pair = PromiseFuturePair<int>::Make();
tensorstore::execution::set_value(pair.promise, 3);
EXPECT_EQ(pair.future.result(), Result<int>(3));
}
TEST(PromiseReceiverTest, SetValueThenSetCancel) {
auto pair = PromiseFuturePair<int>::Make();
tensorstore::execution::set_value(pair.promise, 3);
tensorstore::execution::set_cancel(pair.promise);
EXPECT_EQ(pair.future.result(), Result<int>(3));
}
TEST(PromiseReceiverTest, AnyReceiverSetValue) {
auto pair = PromiseFuturePair<int>::Make();
tensorstore::execution::set_value(
tensorstore::AnyReceiver<absl::Status, int>(std::cref(pair.promise)), 3);
EXPECT_EQ(pair.future.result(), Result<int>(3));
}
TEST(PromiseReceiverTest, SetError) {
auto pair = PromiseFuturePair<int>::Make();
tensorstore::execution::set_error(
tensorstore::AnyReceiver<absl::Status, int>(pair.promise),
absl::UnknownError("message"));
EXPECT_EQ(pair.future.result(), Result<int>(absl::UnknownError("message")));
}
TEST(PromiseReceiverTest, AnyReceiverSetError) {
auto pair = PromiseFuturePair<int>::Make();
tensorstore::execution::set_error(std::cref(pair.promise),
absl::UnknownError("message"));
EXPECT_EQ(pair.future.result(), Result<int>(absl::UnknownError("message")));
}
TEST(FutureSenderTest, SetValue) {
auto pair = PromiseFuturePair<int>::Make();
bool forced = false;
pair.promise.ExecuteWhenForced([&](Promise<int>) { forced = true; });
std::vector<std::string> log1, log2;
tensorstore::execution::submit(pair.future,
tensorstore::LoggingReceiver{&log1});
tensorstore::execution::submit(pair.future,
tensorstore::LoggingReceiver{&log2});
EXPECT_THAT(log1, ::testing::ElementsAre());
EXPECT_THAT(log2, ::testing::ElementsAre());
EXPECT_TRUE(forced);
pair.promise.SetResult(3);
EXPECT_THAT(log1, ::testing::ElementsAre("set_value: 3"));
EXPECT_THAT(log2, ::testing::ElementsAre("set_value: 3"));
}
TEST(FutureSenderTest, AnySenderSetValue) {
auto pair = PromiseFuturePair<int>::Make();
bool forced = false;
pair.promise.ExecuteWhenForced([&](Promise<int>) { forced = true; });
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::AnySender<absl::Status, int>(pair.future),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre());
EXPECT_TRUE(forced);
pair.promise.SetResult(3);
EXPECT_THAT(log, ::testing::ElementsAre("set_value: 3"));
}
TEST(FutureSenderTest, SetError) {
auto pair = PromiseFuturePair<int>::Make();
bool forced = false;
pair.promise.ExecuteWhenForced([&](Promise<int>) { forced = true; });
std::vector<std::string> log;
tensorstore::execution::submit(std::ref(pair.future),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre());
EXPECT_TRUE(forced);
pair.promise.SetResult(absl::UnknownError(""));
EXPECT_THAT(log, ::testing::ElementsAre("set_error: UNKNOWN: "));
}
TEST(FutureSenderTest, AnySenderSetError) {
auto pair = PromiseFuturePair<int>::Make();
bool forced = false;
pair.promise.ExecuteWhenForced([&](Promise<int>) { forced = true; });
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::AnySender<absl::Status, int>(pair.future),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre());
EXPECT_TRUE(forced);
pair.promise.SetResult(absl::UnknownError(""));
EXPECT_THAT(log, ::testing::ElementsAre("set_error: UNKNOWN: "));
}
TEST(FutureSenderTest, SetCancel) {
auto pair = PromiseFuturePair<int>::Make();
bool forced = false;
pair.promise.ExecuteWhenForced([&](Promise<int>) { forced = true; });
std::vector<std::string> log;
tensorstore::execution::submit(pair.future,
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre());
EXPECT_TRUE(forced);
pair.promise.SetResult(absl::CancelledError(""));
EXPECT_THAT(log, ::testing::ElementsAre("set_cancel"));
}
TEST(FutureSenderTest, AnySenderSetCancel) {
auto pair = PromiseFuturePair<int>::Make();
bool forced = false;
pair.promise.ExecuteWhenForced([&](Promise<int>) { forced = true; });
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::AnySender<absl::Status, int>(std::ref(pair.future)),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre());
EXPECT_TRUE(forced);
pair.promise.SetResult(absl::CancelledError(""));
EXPECT_THAT(log, ::testing::ElementsAre("set_cancel"));
}
TEST(MakeSenderFutureTest, SetValue) {
auto future =
tensorstore::MakeSenderFuture<int>(tensorstore::ValueSender<int>{3});
EXPECT_FALSE(future.ready());
EXPECT_EQ(future.result(), Result<int>(3));
}
TEST(MakeSenderFutureTest, SetError) {
auto future = tensorstore::MakeSenderFuture<int>(
tensorstore::ErrorSender<absl::Status>{absl::UnknownError("")});
EXPECT_FALSE(future.ready());
EXPECT_EQ(future.result(), Result<int>(absl::UnknownError("")));
}
TEST(MakeSenderFutureTest, SetCancel) {
auto future = tensorstore::MakeSenderFuture<int>(tensorstore::CancelSender{});
EXPECT_FALSE(future.ready());
EXPECT_EQ(future.result(), Result<int>(absl::CancelledError("")));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/future_sender.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/future_sender_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
2172c17c-1538-4397-a83d-016d03c34528 | cpp | google/tensorstore | sender | tensorstore/util/execution/sender.h | tensorstore/util/execution/sender_test.cc | #ifndef TENSORSTORE_UTIL_EXECUTION_SENDER_H_
#define TENSORSTORE_UTIL_EXECUTION_SENDER_H_
#include <cstddef>
#include <tuple>
#include <utility>
#include "tensorstore/util/execution/execution.h"
namespace tensorstore {
class NullReceiver {
public:
template <typename CancelReceiver>
friend void set_starting(NullReceiver&, CancelReceiver) {}
template <typename... V>
friend void set_value(NullReceiver&, V...) {}
friend void set_done(NullReceiver&) {}
template <typename E>
friend void set_error(NullReceiver&, E e) {}
friend void set_cancel(NullReceiver&) {}
friend void set_stopping(NullReceiver&) {}
};
class NullSender {
template <typename R>
friend void submit(NullSender&, R&&) {}
};
struct CancelSender {
template <typename Receiver>
friend void submit(CancelSender, Receiver&& receiver) {
execution::set_cancel(receiver);
}
};
template <typename E>
struct ErrorSender {
E error;
template <typename Receiver>
friend void submit(ErrorSender& sender, Receiver&& receiver) {
execution::set_error(receiver, std::move(sender.error));
}
};
template <typename E>
ErrorSender(E error) -> ErrorSender<E>;
template <typename... V>
struct ValueSender {
ValueSender(V... v) : value(std::move(v)...) {}
std::tuple<V...> value;
template <typename Receiver>
friend void submit(ValueSender& sender, Receiver&& receiver) {
sender.SubmitHelper(std::forward<Receiver>(receiver),
std::make_index_sequence<sizeof...(V)>{});
}
private:
template <typename Receiver, size_t... Is>
void SubmitHelper(Receiver&& receiver, std::index_sequence<Is...>) {
execution::set_value(receiver, std::move(std::get<Is>(value))...);
}
};
template <typename... V>
ValueSender(V... v) -> ValueSender<V...>;
}
#endif | #include "tensorstore/util/execution/sender.h"
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender_testutil.h"
#include "tensorstore/util/executor.h"
namespace {
template <typename T, typename... Arg>
using trait_has_submit =
decltype(std::declval<T&&>().submit(std::declval<Arg>()...));
template <typename... Arg>
using trait_has_adl_submit = decltype(submit(std::declval<Arg>()...));
static_assert(!tensorstore::internal::is_detected<
trait_has_submit, tensorstore::NullSender&, int>::value);
static_assert(tensorstore::internal::is_detected<
trait_has_adl_submit, tensorstore::NullSender&, int>::value);
TEST(NullReceiverTest, SetDone) {
tensorstore::NullReceiver receiver;
tensorstore::execution::set_done(receiver);
}
TEST(NullReceiverTest, SetValue) {
tensorstore::NullReceiver receiver;
tensorstore::execution::set_value(receiver, 3, 4);
}
TEST(NullReceiverTest, SetError) {
tensorstore::NullReceiver receiver;
tensorstore::execution::set_error(receiver, 10);
}
TEST(CancelSenderTest, Basic) {
std::vector<std::string> log;
tensorstore::execution::submit(tensorstore::CancelSender{},
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_cancel"));
}
TEST(ErrorSenderTest, Basic) {
std::vector<std::string> log;
tensorstore::execution::submit(tensorstore::ErrorSender<int>{3},
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_error: 3"));
}
TEST(ValueSenderTest, Basic) {
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::ValueSender<int, std::string>{3, "hello"},
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_value: 3, hello"));
}
template <typename Sender, typename Executor>
struct SenderWithExecutor {
Executor executor;
Sender sender;
template <typename Receiver>
void submit(Receiver receiver) {
struct Callback {
Sender sender;
Receiver receiver;
void operator()() {
tensorstore::execution::submit(sender, std::move(receiver));
}
};
executor(Callback{std::move(sender), std::move(receiver)});
}
};
struct QueueExecutor {
std::vector<tensorstore::ExecutorTask>* queue;
void operator()(tensorstore::ExecutorTask task) const {
queue->push_back(std::move(task));
}
};
TEST(SenderWithExecutorTest, SetValue) {
std::vector<tensorstore::ExecutorTask> queue;
std::vector<std::string> log;
QueueExecutor executor{&queue};
tensorstore::execution::submit(
SenderWithExecutor<tensorstore::ValueSender<int, std::string>,
tensorstore::Executor>{executor, {3, "hello"}},
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre());
EXPECT_EQ(1, queue.size());
std::move(queue[0])();
EXPECT_THAT(log, ::testing::ElementsAre("set_value: 3, hello"));
}
TEST(SenderWithExecutorTest, SetError) {
std::vector<tensorstore::ExecutorTask> queue;
std::vector<std::string> log;
QueueExecutor executor{&queue};
tensorstore::execution::submit(
SenderWithExecutor<tensorstore::ErrorSender<int>, tensorstore::Executor>{
executor, {3}},
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre());
EXPECT_EQ(1, queue.size());
std::move(queue[0])();
EXPECT_THAT(log, ::testing::ElementsAre("set_error: 3"));
}
TEST(SenderWithExecutorTest, SetCancel) {
std::vector<tensorstore::ExecutorTask> queue;
std::vector<std::string> log;
QueueExecutor executor{&queue};
tensorstore::execution::submit(
SenderWithExecutor<tensorstore::CancelSender, tensorstore::Executor>{
executor},
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre());
EXPECT_EQ(1, queue.size());
std::move(queue[0])();
EXPECT_THAT(log, ::testing::ElementsAre("set_cancel"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/sender.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/sender_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
50dc4182-df60-4ae7-a9a7-df5628cbc0a3 | cpp | google/tensorstore | sync_flow_sender | tensorstore/util/execution/sync_flow_sender.h | tensorstore/util/execution/sync_flow_sender_test.cc | #ifndef TENSORSTORE_UTIL_EXECUTION_SYNC_FLOW_SENDER_H_
#define TENSORSTORE_UTIL_EXECUTION_SYNC_FLOW_SENDER_H_
#include <utility>
#include "absl/synchronization/mutex.h"
#include "tensorstore/util/execution/execution.h"
namespace tensorstore {
template <typename Receiver>
struct SyncFlowReceiver {
SyncFlowReceiver() = default;
SyncFlowReceiver(Receiver receiver) : receiver(std::move(receiver)) {}
SyncFlowReceiver(SyncFlowReceiver&& other)
: receiver(std::move(other.receiver)) {}
SyncFlowReceiver& operator=(SyncFlowReceiver&& other) {
receiver = std::move(other.receiver);
return *this;
}
template <typename CancelReceiver>
friend void set_starting(SyncFlowReceiver& self, CancelReceiver cancel) {
execution::set_starting(self.receiver, std::move(cancel));
}
template <typename... V>
friend void set_value(SyncFlowReceiver& self, V... v) {
absl::MutexLock lock(&self.mutex);
execution::set_value(self.receiver, std::move(v)...);
}
friend void set_done(SyncFlowReceiver& self) {
execution::set_done(self.receiver);
}
template <typename E>
friend void set_error(SyncFlowReceiver& self, E e) {
execution::set_error(self.receiver, std::move(e));
}
friend void set_stopping(SyncFlowReceiver& self) {
execution::set_stopping(self.receiver);
}
Receiver receiver;
absl::Mutex mutex;
};
template <typename Sender>
struct SyncFlowSender {
Sender sender;
template <typename Receiver>
friend void submit(SyncFlowSender& self, Receiver receiver) {
execution::submit(self.sender,
SyncFlowReceiver<Receiver>{std::move(receiver)});
}
};
template <typename Sender>
SyncFlowSender<Sender> MakeSyncFlowSender(Sender sender) {
return {std::move(sender)};
}
}
#endif | #include "tensorstore/util/execution/sync_flow_sender.h"
#include <stddef.h>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/internal/thread/thread.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender_testutil.h"
namespace {
struct ConcurrentSender {
size_t num_threads;
bool error;
template <typename Receiver>
void submit(Receiver receiver) {
tensorstore::execution::set_starting(receiver, [] {});
std::vector<tensorstore::internal::Thread> threads;
for (size_t i = 0; i < num_threads; ++i) {
threads.emplace_back(tensorstore::internal::Thread(
{"sender"},
[i, &receiver] { tensorstore::execution::set_value(receiver, i); }));
}
for (auto& thread : threads) thread.Join();
if (error) {
tensorstore::execution::set_error(receiver, 3);
} else {
tensorstore::execution::set_done(receiver);
}
tensorstore::execution::set_stopping(receiver);
}
};
TEST(SyncFlowSender, Values) {
std::vector<std::string> log;
const size_t num_threads = 10;
tensorstore::execution::submit(
tensorstore::MakeSyncFlowSender(
ConcurrentSender{num_threads, false}),
tensorstore::LoggingReceiver{&log});
ASSERT_EQ(num_threads + 3, log.size());
EXPECT_EQ("set_starting", log[0]);
EXPECT_EQ("set_done", log[log.size() - 2]);
EXPECT_EQ("set_stopping", log[log.size() - 1]);
EXPECT_THAT(
log, ::testing::UnorderedElementsAre(
"set_starting", "set_value: 0", "set_value: 1", "set_value: 2",
"set_value: 3", "set_value: 4", "set_value: 5", "set_value: 6",
"set_value: 7", "set_value: 8", "set_value: 9", "set_done",
"set_stopping"));
}
TEST(SyncFlowSender, Error) {
std::vector<std::string> log;
const size_t num_threads = 10;
tensorstore::execution::submit(
tensorstore::MakeSyncFlowSender(
ConcurrentSender{num_threads, true}),
tensorstore::LoggingReceiver{&log});
ASSERT_EQ(num_threads + 3, log.size());
EXPECT_EQ("set_starting", log[0]);
EXPECT_EQ("set_error: 3", log[log.size() - 2]);
EXPECT_EQ("set_stopping", log[log.size() - 1]);
EXPECT_THAT(
log, ::testing::UnorderedElementsAre(
"set_starting", "set_value: 0", "set_value: 1", "set_value: 2",
"set_value: 3", "set_value: 4", "set_value: 5", "set_value: 6",
"set_value: 7", "set_value: 8", "set_value: 9", "set_error: 3",
"set_stopping"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/sync_flow_sender.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/sync_flow_sender_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
a2bab1db-6420-4fbc-87b6-1fc3ea4883f7 | cpp | google/tensorstore | future_collecting_receiver | tensorstore/util/execution/future_collecting_receiver.h | tensorstore/util/execution/future_collecting_receiver_test.cc | #ifndef TENSORSTORE_UTIL_EXECUTION_FUTURE_COLLECTING_RECEIVER_H_
#define TENSORSTORE_UTIL_EXECUTION_FUTURE_COLLECTING_RECEIVER_H_
#include <utility>
#include "absl/status/status.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/future.h"
namespace tensorstore {
template <typename Container>
struct FutureCollectingReceiver {
Promise<Container> promise;
Container container;
FutureCallbackRegistration cancel_registration;
template <typename... V>
void set_value(V&&... v) {
container.emplace_back(std::forward<V>(v)...);
}
void set_error(absl::Status status) { promise.SetResult(std::move(status)); }
void set_done() { promise.SetResult(std::move(container)); }
template <typename Cancel>
void set_starting(Cancel cancel) {
cancel_registration = promise.ExecuteWhenNotNeeded(std::move(cancel));
}
void set_stopping() { cancel_registration.Unregister(); }
};
template <typename Container, typename Sender>
Future<Container> CollectFlowSenderIntoFuture(Sender sender) {
auto [promise, future] = PromiseFuturePair<Container>::Make();
execution::submit(std::move(sender),
FutureCollectingReceiver<Container>{std::move(promise)});
return std::move(future);
}
}
#endif | #include "tensorstore/util/execution/future_collecting_receiver.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender_testutil.h"
#include "tensorstore/util/execution/sender_util.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::CollectFlowSenderIntoFuture;
using ::tensorstore::MatchesStatus;
TEST(CollectingSenderTest, Success) {
std::vector<int> input{1, 2, 3, 4};
EXPECT_THAT(CollectFlowSenderIntoFuture<std::vector<int>>(
tensorstore::RangeFlowSender<tensorstore::span<int>>{input})
.result(),
::testing::Optional(::testing::ElementsAreArray(input)));
}
TEST(CollectingSenderTest, Error) {
EXPECT_THAT(
CollectFlowSenderIntoFuture<std::vector<int>>(
tensorstore::FlowSingleSender<tensorstore::ErrorSender<absl::Status>>{
absl::UnknownError("abc")})
.result(),
MatchesStatus(absl::StatusCode::kUnknown, "abc"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/future_collecting_receiver.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/future_collecting_receiver_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
f56698df-bac0-4617-9f71-d61e1f535cee | cpp | google/tensorstore | any_sender | tensorstore/util/execution/any_sender.h | tensorstore/util/execution/any_sender_test.cc | #ifndef TENSORSTORE_UTIL_EXECUTION_ANY_SENDER_H_
#define TENSORSTORE_UTIL_EXECUTION_ANY_SENDER_H_
#include <utility>
#include "absl/base/attributes.h"
#include "tensorstore/internal/poly/poly.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender.h"
namespace tensorstore {
namespace internal_sender {
template <typename E, typename... V>
using SenderPoly =
poly::Poly<(sizeof(V) + ... + 0), false,
void(internal_execution::submit_t, AnyReceiver<E, V...>)>;
template <typename E, typename... V>
using FlowSenderPoly =
poly::Poly<(sizeof(V) + ... + 0), false,
void(internal_execution::submit_t, AnyFlowReceiver<E, V...>)>;
}
template <typename E, typename... V>
class AnySender : public internal_sender::SenderPoly<E, V...> {
using Base = internal_sender::SenderPoly<E, V...>;
public:
using Base::Base;
AnySender() : Base(NullSender{}) {}
ABSL_ATTRIBUTE_ALWAYS_INLINE void submit(AnyReceiver<E, V...> receiver) {
(*this)(internal_execution::submit_t{}, std::move(receiver));
}
};
template <typename E, typename... V>
class AnyFlowSender : public internal_sender::FlowSenderPoly<E, V...> {
using Base = internal_sender::FlowSenderPoly<E, V...>;
public:
using Base::Base;
AnyFlowSender() : Base(NullSender{}) {}
ABSL_ATTRIBUTE_ALWAYS_INLINE void submit(AnyFlowReceiver<E, V...> receiver) {
(*this)(internal_execution::submit_t{}, std::move(receiver));
}
};
}
#endif | #include "tensorstore/util/execution/any_sender.h"
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender.h"
#include "tensorstore/util/execution/sender_testutil.h"
#include "tensorstore/util/executor.h"
namespace {
TEST(AnySenderTest, Construct) {
tensorstore::AnySender<int, std::string> sender(tensorstore::CancelSender{});
}
TEST(AnySenderTest, Assignment) {
tensorstore::AnySender<int, std::string> sender;
sender = tensorstore::CancelSender{};
}
TEST(AnySenderTest, Submit) {
tensorstore::AnySender<int, std::string> sender;
tensorstore::execution::submit(
tensorstore::AnySender<int>(tensorstore::NullSender{}),
tensorstore::NullReceiver{});
}
TEST(AnySenderTest, CancelSender) {
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::AnySender<int>(tensorstore::CancelSender{}),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_cancel"));
}
TEST(AnySenderTest, ErrorSender) {
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::AnySender<int>(tensorstore::ErrorSender<int>{3}),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_error: 3"));
}
TEST(AnySenderTest, ValueSender) {
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::AnySender<int, int, std::string>(
tensorstore::ValueSender<int, std::string>{3, "hello"}),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_value: 3, hello"));
}
template <typename Sender, typename Executor>
struct SenderWithExecutor {
Executor executor;
Sender sender;
template <typename Receiver>
void submit(Receiver receiver) {
struct Callback {
Sender sender;
Receiver receiver;
void operator()() {
tensorstore::execution::submit(sender, std::move(receiver));
}
};
executor(Callback{std::move(sender), std::move(receiver)});
}
};
struct QueueExecutor {
std::vector<tensorstore::ExecutorTask>* queue;
void operator()(tensorstore::ExecutorTask task) const {
queue->push_back(std::move(task));
}
};
TEST(AnySenderWithExecutor, SetValue) {
std::vector<tensorstore::ExecutorTask> queue;
std::vector<std::string> log;
QueueExecutor executor{&queue};
tensorstore::execution::submit(
tensorstore::AnySender<int, int, std::string>(
SenderWithExecutor<tensorstore::ValueSender<int, std::string>,
tensorstore::Executor>{executor, {3, "hello"}}),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre());
EXPECT_EQ(1, queue.size());
std::move(queue[0])();
EXPECT_THAT(log, ::testing::ElementsAre("set_value: 3, hello"));
}
TEST(AnySenderWithExecutor, SetCancel) {
std::vector<tensorstore::ExecutorTask> queue;
std::vector<std::string> log;
QueueExecutor executor{&queue};
tensorstore::execution::submit(
tensorstore::AnySender<int>(
SenderWithExecutor<tensorstore::CancelSender, tensorstore::Executor>{
executor}),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre());
EXPECT_EQ(1, queue.size());
std::move(queue[0])();
EXPECT_THAT(log, ::testing::ElementsAre("set_cancel"));
}
TEST(AnySenderWithExecutor, SetError) {
std::vector<tensorstore::ExecutorTask> queue;
std::vector<std::string> log;
QueueExecutor executor{&queue};
tensorstore::execution::submit(
tensorstore::AnySender<int>(
SenderWithExecutor<tensorstore::ErrorSender<int>,
tensorstore::Executor>{executor, {3}}),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre());
EXPECT_EQ(1, queue.size());
std::move(queue[0])();
EXPECT_THAT(log, ::testing::ElementsAre("set_error: 3"));
}
TEST(AnyFlowSenderTest, Construct) {
tensorstore::AnyFlowSender<int, std::string> sender(
tensorstore::NullSender{});
}
TEST(AnyFlowSenderTest, Assignment) {
tensorstore::AnyFlowSender<int, std::string> sender;
sender = tensorstore::NullSender{};
}
TEST(AnyFlowSenderTest, Submit) {
tensorstore::AnyFlowSender<int, std::string> sender;
tensorstore::execution::submit(std::move(sender),
tensorstore::NullReceiver{});
}
TEST(AnyFlowSenderTest, ValueSender) {
std::vector<std::string> log;
tensorstore::AnyFlowSender<int, std::string> sender(
tensorstore::ValueSender("A"));
tensorstore::execution::submit(std::move(sender),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_value: A"));
}
TEST(AnyFlowSenderTest, ErrorSender) {
std::vector<std::string> log;
tensorstore::AnyFlowSender<int, std::string> sender(
tensorstore::ErrorSender<int>{4});
tensorstore::execution::submit(std::move(sender),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_error: 4"));
}
struct MySender {
template <typename Receiver>
void submit(Receiver receiver) {
tensorstore::execution::set_starting(receiver, []() {});
tensorstore::execution::set_value(receiver, "B");
tensorstore::execution::set_value(receiver, "C");
tensorstore::execution::set_done(receiver);
tensorstore::execution::set_stopping(receiver);
}
};
TEST(AnyFlowSenderTest, MySender) {
std::vector<std::string> log;
tensorstore::AnyFlowSender<int, std::string> sender(MySender{});
tensorstore::execution::submit(std::move(sender),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(
log, ::testing::ElementsAre("set_starting", "set_value: B",
"set_value: C", "set_done", "set_stopping"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/any_sender.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/any_sender_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
4b03c7db-37a2-41d3-bf1b-d4b8f63cfab0 | cpp | google/tensorstore | any_receiver | tensorstore/util/execution/any_receiver.h | tensorstore/util/execution/any_receiver_test.cc | #ifndef TENSORSTORE_UTIL_EXECUTION_ANY_RECEIVER_H_
#define TENSORSTORE_UTIL_EXECUTION_ANY_RECEIVER_H_
#include <utility>
#include "absl/base/attributes.h"
#include "tensorstore/internal/poly/poly.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender.h"
namespace tensorstore {
using AnyCancelReceiver = poly::Poly<0, false, void()>;
namespace internal_sender {
template <typename E, typename... V>
using ReceiverPoly = poly::Poly<sizeof(void*) * 2, false,
void(internal_execution::set_value_t, V...),
void(internal_execution::set_error_t, E),
void(internal_execution::set_cancel_t)>;
template <typename E, typename... V>
using FlowReceiverPoly =
poly::Poly<sizeof(void*) * 2, false,
void(internal_execution::set_starting_t, AnyCancelReceiver up),
void(internal_execution::set_value_t, V...),
void(internal_execution::set_done_t),
void(internal_execution::set_error_t, E),
void(internal_execution::set_stopping_t)>;
}
template <typename E, typename... V>
class AnyReceiver : public internal_sender::ReceiverPoly<E, V...> {
using Base = internal_sender::ReceiverPoly<E, V...>;
public:
using Base::Base;
AnyReceiver() : Base(NullReceiver{}) {}
ABSL_ATTRIBUTE_ALWAYS_INLINE void set_value(V... v) {
(*this)(internal_execution::set_value_t{}, std::forward<V>(v)...);
}
ABSL_ATTRIBUTE_ALWAYS_INLINE void set_error(E e) {
(*this)(internal_execution::set_error_t{}, std::forward<E>(e));
}
ABSL_ATTRIBUTE_ALWAYS_INLINE void set_cancel() {
(*this)(internal_execution::set_cancel_t{});
}
};
template <typename E, typename... V>
class AnyFlowReceiver : public internal_sender::FlowReceiverPoly<E, V...> {
using Base = internal_sender::FlowReceiverPoly<E, V...>;
public:
using Base::Base;
AnyFlowReceiver() : Base(NullReceiver{}) {}
ABSL_ATTRIBUTE_ALWAYS_INLINE void set_starting(AnyCancelReceiver cancel) {
(*this)(internal_execution::set_starting_t{}, std::move(cancel));
}
ABSL_ATTRIBUTE_ALWAYS_INLINE void set_value(V... v) {
(*this)(internal_execution::set_value_t{}, std::forward<V>(v)...);
}
ABSL_ATTRIBUTE_ALWAYS_INLINE void set_done() {
(*this)(internal_execution::set_done_t{});
}
ABSL_ATTRIBUTE_ALWAYS_INLINE void set_error(E e) {
(*this)(internal_execution::set_error_t{}, std::forward<E>(e));
}
ABSL_ATTRIBUTE_ALWAYS_INLINE void set_stopping() {
(*this)(internal_execution::set_stopping_t{});
}
};
}
#endif | #include "tensorstore/util/execution/any_receiver.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender.h"
#include "tensorstore/util/execution/sender_testutil.h"
namespace {
TEST(AnyReceiverTest, Construct) {
tensorstore::AnyReceiver<int, std::string> receiver(
tensorstore::NullReceiver{});
}
TEST(AnyReceiverTest, Assignment) {
tensorstore::AnyReceiver<int, std::string> receiver;
receiver = tensorstore::NullReceiver{};
{
tensorstore::NullReceiver tmp{};
receiver = tmp;
}
}
TEST(AnyReceiverTest, NullSetValue) {
tensorstore::AnyReceiver<int, std::string> receiver;
tensorstore::execution::set_value(receiver, "message");
}
TEST(AnyReceiverTest, NullSetError) {
tensorstore::AnyReceiver<int, std::string> receiver;
tensorstore::execution::set_error(receiver, 3);
}
TEST(AnyReceiverTest, NullSetCancel) {
tensorstore::AnyReceiver<int> receiver;
tensorstore::execution::set_cancel(receiver);
}
TEST(AnyReceiverTest, LoggingSetValue) {
std::vector<std::string> log;
tensorstore::AnyReceiver<int, std::string> receiver(
tensorstore::LoggingReceiver{&log});
tensorstore::execution::set_value(receiver, "ok");
EXPECT_THAT(log, ::testing::ElementsAre("set_value: ok"));
}
TEST(AnyReceiverTest, SetErrorInt) {
std::vector<std::string> log;
tensorstore::AnyReceiver<int, std::string> receiver(
tensorstore::LoggingReceiver{&log});
tensorstore::execution::set_error(receiver, 5);
EXPECT_THAT(log, ::testing::ElementsAre("set_error: 5"));
}
TEST(AnyReceiverTest, SetCancel) {
std::vector<std::string> log;
tensorstore::AnyReceiver<int, std::string> receiver(
tensorstore::LoggingReceiver{&log});
tensorstore::execution::set_cancel(receiver);
EXPECT_THAT(log, ::testing::ElementsAre("set_cancel"));
}
TEST(AnyFlowReceiver, Construct) {
tensorstore::AnyFlowReceiver<int, std::string> receiver(
tensorstore::NullReceiver{});
}
TEST(AnyFlowReceiver, Assignment) {
tensorstore::AnyFlowReceiver<int, std::string> receiver;
receiver = tensorstore::NullReceiver{};
{
tensorstore::NullReceiver tmp{};
receiver = tmp;
}
}
TEST(AnyFlowReceiver, NullSetStarting) {
tensorstore::AnyFlowReceiver<int> receiver;
tensorstore::execution::set_starting(receiver, []() {});
}
TEST(AnyFlowReceiver, NullSetValue) {
tensorstore::AnyFlowReceiver<int, std::string> receiver;
tensorstore::execution::set_value(receiver, "messaage");
}
TEST(AnyFlowReceiver, NullSetError) {
tensorstore::AnyFlowReceiver<int, std::string> receiver;
tensorstore::execution::set_error(receiver, 3);
}
TEST(AnyFlowReceiver, NullSetDone) {
tensorstore::AnyFlowReceiver<int> receiver;
tensorstore::execution::set_done(receiver);
}
TEST(AnyFlowReceiver, NullSetStopping) {
tensorstore::AnyFlowReceiver<int> receiver;
tensorstore::execution::set_stopping(receiver);
}
TEST(AnyFlowReceiver, LoggingSetValue) {
std::vector<std::string> log;
tensorstore::AnyFlowReceiver<int, std::string> receiver(
tensorstore::LoggingReceiver{&log});
tensorstore::execution::set_starting(receiver, []() {});
tensorstore::execution::set_value(receiver, "A");
tensorstore::execution::set_value(receiver, "B");
tensorstore::execution::set_done(receiver);
tensorstore::execution::set_stopping(receiver);
EXPECT_THAT(
log, ::testing::ElementsAre("set_starting", "set_value: A",
"set_value: B", "set_done", "set_stopping"));
}
TEST(AnyFlowReceiver, LoggingSetError) {
std::vector<std::string> log;
tensorstore::AnyFlowReceiver<int, std::string> receiver(
tensorstore::LoggingReceiver{&log});
tensorstore::execution::set_starting(receiver, []() {});
tensorstore::execution::set_value(receiver, "A");
tensorstore::execution::set_error(receiver, 5);
tensorstore::execution::set_done(receiver);
tensorstore::execution::set_stopping(receiver);
EXPECT_THAT(
log, ::testing::ElementsAre("set_starting", "set_value: A",
"set_error: 5", "set_done", "set_stopping"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/any_receiver.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/any_receiver_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
c4fda505-d7d6-4f22-b4e5-da8367c55fed | cpp | google/tensorstore | std_array | tensorstore/internal/json_binding/std_array.h | tensorstore/internal/json_binding/std_array_test.cc | #ifndef TENSORSTORE_INTERNAL_JSON_BINDING_STD_ARRAY_H_
#define TENSORSTORE_INTERNAL_JSON_BINDING_STD_ARRAY_H_
#include <stddef.h>
#include <array>
#include <iterator>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json/array.h"
#include "tensorstore/internal/json/json.h"
#include "tensorstore/internal/json/value_as.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_json_binding {
template <bool kDiscardEmpty, typename GetSize, typename SetSize,
typename GetElement, typename ElementBinder>
struct ArrayBinderImpl {
GetSize get_size;
SetSize set_size;
GetElement get_element;
ElementBinder element_binder;
template <typename Loading, typename Options, typename Obj>
absl::Status operator()(Loading is_loading, const Options& options, Obj* obj,
::nlohmann::json* j) const {
::nlohmann::json::array_t* j_arr;
if constexpr (is_loading) {
if constexpr (kDiscardEmpty) {
if (j->is_discarded()) return absl::OkStatus();
}
j_arr = j->get_ptr<::nlohmann::json::array_t*>();
if (!j_arr) {
return internal_json::ExpectedError(*j, "array");
}
const size_t size = j_arr->size();
TENSORSTORE_RETURN_IF_ERROR(
internal::InvokeForStatus(set_size, *obj, size));
} else {
const auto size = get_size(*obj);
if constexpr (kDiscardEmpty) {
if (size == 0) {
*j = ::nlohmann::json(::nlohmann::json::value_t::discarded);
return absl::OkStatus();
}
}
*j = ::nlohmann::json::array_t(size);
j_arr = j->get_ptr<::nlohmann::json::array_t*>();
}
for (size_t i = 0, size = j_arr->size(); i < size; ++i) {
auto&& element = get_element(*obj, i);
TENSORSTORE_RETURN_IF_ERROR(
element_binder(is_loading, options, &element, &(*j_arr)[i]),
MaybeAnnotateStatus(
_, tensorstore::StrCat("Error ",
is_loading ? "parsing" : "converting",
" value at position ", i)));
}
return absl::OkStatus();
}
};
template <typename GetSize, typename SetSize, typename GetElement,
typename ElementBinder = decltype(DefaultBinder<>)>
constexpr auto Array(GetSize get_size, SetSize set_size, GetElement get_element,
ElementBinder element_binder = DefaultBinder<>) {
return ArrayBinderImpl<false, GetSize, SetSize, GetElement, ElementBinder>{
std::move(get_size), std::move(set_size), std::move(get_element),
std::move(element_binder)};
}
template <typename GetSize, typename SetSize, typename GetElement,
typename ElementBinder = decltype(DefaultBinder<>)>
constexpr auto OptionalArray(GetSize get_size, SetSize set_size,
GetElement get_element,
ElementBinder element_binder = DefaultBinder<>) {
return ArrayBinderImpl<true, GetSize, SetSize, GetElement, ElementBinder>{
std::move(get_size), std::move(set_size), std::move(get_element),
std::move(element_binder)};
}
template <typename ElementBinder = decltype(DefaultBinder<>)>
constexpr auto Array(ElementBinder element_binder = DefaultBinder<>) {
return internal_json_binding::Array(
[](auto& c) { return c.size(); },
[](auto& c, size_t size) { c.resize(size); },
[](auto& c, size_t i) -> decltype(auto) { return c[i]; }, element_binder);
}
template <typename ElementBinder = decltype(DefaultBinder<>)>
constexpr auto OptionalArray(ElementBinder element_binder = DefaultBinder<>) {
return internal_json_binding::OptionalArray(
[](auto& c) { return c.size(); },
[](auto& c, size_t size) { c.resize(size); },
[](auto& c, size_t i) -> decltype(auto) { return c[i]; }, element_binder);
}
template <typename ElementBinder = decltype(DefaultBinder<>)>
constexpr auto FixedSizeArray(ElementBinder element_binder = DefaultBinder<>) {
return internal_json_binding::Array(
[](auto& c) { return std::size(c); },
[](auto& c, size_t new_size) {
return internal_json::JsonValidateArrayLength(new_size, std::size(c));
},
[](auto& c, size_t i) -> decltype(auto) { return c[i]; }, element_binder);
}
namespace array_binder {
inline constexpr auto ArrayBinder = [](auto is_loading, const auto& options,
auto* obj, auto* j) -> absl::Status {
return internal_json_binding::Array()(is_loading, options, obj, j);
};
}
namespace fixed_size_array_binder {
inline constexpr auto FixedSizeArrayBinder = [](auto is_loading,
const auto& options, auto* obj,
auto* j) -> absl::Status {
return internal_json_binding::FixedSizeArray()(is_loading, options, obj, j);
};
}
using array_binder::ArrayBinder;
using fixed_size_array_binder::FixedSizeArrayBinder;
template <typename T, typename Allocator>
constexpr inline auto DefaultBinder<std::vector<T, Allocator>> = ArrayBinder;
template <typename T, size_t N>
constexpr inline auto DefaultBinder<std::array<T, N>> = FixedSizeArrayBinder;
template <typename T, std::ptrdiff_t Extent>
constexpr inline auto DefaultBinder<tensorstore::span<T, Extent>> =
FixedSizeArrayBinder;
}
}
#endif | #include "tensorstore/internal/json_binding/std_array.h"
#include <array>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/util/status_testutil.h"
using ::tensorstore::MatchesStatus;
namespace jb = tensorstore::internal_json_binding;
namespace {
TEST(JsonBindingTest, Array) {
const auto binder = jb::Array();
tensorstore::TestJsonBinderRoundTrip<std::vector<int>>(
{
{{1, 2, 3}, {1, 2, 3}},
},
binder);
tensorstore::TestJsonBinderFromJson<std::vector<int>>(
{
{{1, 2, "a"},
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Error parsing value at position 2: Expected integer .*")},
},
binder);
}
TEST(JsonBindingTest, FixedSizeArray) {
const auto binder = jb::FixedSizeArray();
tensorstore::TestJsonBinderRoundTrip<std::array<int, 3>>(
{
{{{1, 2, 3}}, {1, 2, 3}},
},
binder);
tensorstore::TestJsonBinderFromJson<std::array<int, 3>>(
{
{{1, 2, 3, 4},
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Array has length 4 but should have length 3")},
},
binder);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/std_array.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/std_array_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
77850688-3555-4dde-8343-88ffd0e70fbc | cpp | google/tensorstore | std_tuple | tensorstore/internal/json_binding/std_tuple.h | tensorstore/internal/json_binding/std_tuple_test.cc | #ifndef TENSORSTORE_INTERNAL_JSON_BINDING_STD_TUPLE_H_
#define TENSORSTORE_INTERNAL_JSON_BINDING_STD_TUPLE_H_
#include <stddef.h>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/meta/type_traits.h"
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json/json.h"
#include "tensorstore/internal/json/value_as.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_json_binding {
inline absl::Status MaybeAnnotateTupleElementError(absl::Status status,
size_t i, bool is_loading) {
return status.ok()
? status
: MaybeAnnotateStatus(
status, tensorstore::StrCat(
"Error ", is_loading ? "parsing" : "converting",
" value at position ", i));
}
template <bool IsLoading>
Result<::nlohmann::json::array_t*> EnsureJsonTupleRepresentationImpl(
std::integral_constant<bool, IsLoading> is_loading, ::nlohmann::json* j,
size_t n) {
if constexpr (is_loading) {
auto* array_ptr = j->get_ptr<::nlohmann::json::array_t*>();
if (!array_ptr) return internal_json::ExpectedError(*j, "array");
TENSORSTORE_RETURN_IF_ERROR(
internal_json::JsonValidateArrayLength(array_ptr->size(), n));
return array_ptr;
} else {
*j = ::nlohmann::json::array_t(n);
return j->get_ptr<::nlohmann::json::array_t*>();
}
}
template <size_t... Is, typename... ElementBinder>
constexpr auto TupleJsonBinderImpl(std::index_sequence<Is...>,
ElementBinder... element_binder) {
return [=](auto is_loading, const auto& options, auto* obj,
::nlohmann::json* j) -> absl::Status {
TENSORSTORE_ASSIGN_OR_RETURN(
::nlohmann::json::array_t * array_ptr,
EnsureJsonTupleRepresentationImpl(is_loading, j, sizeof...(Is)));
if (absl::Status status;
(((status = element_binder(is_loading, options, &std::get<Is>(*obj),
&(*array_ptr)[Is]))
.ok() ||
((status = MaybeAnnotateTupleElementError(status, Is, is_loading)),
false)) &&
...)) {
return status;
}
return absl::OkStatus();
};
}
template <size_t... Is>
constexpr auto TupleDefaultJsonBinderImpl(std::index_sequence<Is...>) {
return [](auto is_loading, const auto& options, auto* obj,
::nlohmann::json* j) -> absl::Status {
TENSORSTORE_ASSIGN_OR_RETURN(
::nlohmann::json::array_t * array_ptr,
EnsureJsonTupleRepresentationImpl(is_loading, j, sizeof...(Is)));
using std::get;
if (absl::Status status;
(((status = DefaultBinder<>(is_loading, options, &get<Is>(*obj),
&(*array_ptr)[Is]))
.ok() ||
((status = MaybeAnnotateTupleElementError(status, Is, is_loading)),
false)) &&
...)) {
return status;
}
return absl::OkStatus();
};
}
template <size_t... Is, typename... ElementBinder>
constexpr auto HeterogeneousArrayJsonBinderImpl(
std::index_sequence<Is...>, ElementBinder... element_binder) {
return [=](auto is_loading, const auto& options, auto* obj,
::nlohmann::json* j) -> absl::Status {
TENSORSTORE_ASSIGN_OR_RETURN(
::nlohmann::json::array_t * array_ptr,
EnsureJsonTupleRepresentationImpl(is_loading, j, sizeof...(Is)));
if (absl::Status status;
(((status = element_binder(is_loading, options, obj, &(*array_ptr)[Is]))
.ok() ||
((status = MaybeAnnotateTupleElementError(status, Is, is_loading)),
false)) &&
...)) {
return status;
}
return absl::OkStatus();
};
}
template <typename... ElementBinder>
constexpr auto Tuple(ElementBinder... element_binder) {
return TupleJsonBinderImpl(std::index_sequence_for<ElementBinder...>{},
std::move(element_binder)...);
}
constexpr auto Tuple() {
return [](auto is_loading, const auto& options, auto* obj, auto* j) {
constexpr size_t N =
std::tuple_size_v<absl::remove_cvref_t<decltype(*obj)>>;
return TupleDefaultJsonBinderImpl(std::make_index_sequence<N>{})(
is_loading, options, obj, j);
};
}
template <typename... ElementBinder>
constexpr auto HeterogeneousArray(ElementBinder... element_binder) {
return [=](auto is_loading, const auto& options, auto* obj,
::nlohmann::json* j) {
TENSORSTORE_ASSIGN_OR_RETURN(::nlohmann::json::array_t * array_ptr,
EnsureJsonTupleRepresentationImpl(
is_loading, j, sizeof...(ElementBinder)));
absl::Status status;
size_t i = 0;
[[maybe_unused]] bool ok =
(((status =
element_binder(is_loading, options, obj, &(*array_ptr)[i++]))
.ok() ||
((status = MaybeAnnotateTupleElementError(status, i - 1, is_loading)),
false)) &&
...);
return status;
};
}
template <typename... T>
constexpr inline auto DefaultBinder<std::tuple<T...>> = Tuple();
template <typename T, typename U>
constexpr inline auto DefaultBinder<std::pair<T, U>> = Tuple();
}
}
#endif | #include "tensorstore/internal/json_binding/std_tuple.h"
#include <string>
#include <tuple>
#include <utility>
#include <gtest/gtest.h>
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/util/status_testutil.h"
namespace jb = tensorstore::internal_json_binding;
namespace {
TEST(TupleDefaultJsonBinderTest, RoundTrip) {
tensorstore::TestJsonBinderRoundTrip<std::pair<int, int>>({
{{5, 5}, {5, 5}},
{{5, 3}, {5, 3}},
});
tensorstore::TestJsonBinderRoundTrip<std::tuple<int, int, std::string>>({
{{5, 5, "a"}, {5, 5, "a"}},
{{5, 3, "b"}, {5, 3, "b"}},
});
}
TEST(TupleJsonBinderTest, RoundTrip) {
const auto binder =
jb::Tuple(jb::Integer<int>(0, 9), jb::Integer<int>(10, 19));
tensorstore::TestJsonBinderRoundTrip<std::pair<int, int>>(
{
{{5, 15}, {5, 15}},
{{5, 13}, {5, 13}},
},
binder);
}
TEST(HeterogeneousArrayJsonBinderTest, RoundTrip) {
struct X {
int a;
std::string b;
};
tensorstore::TestJsonBinderRoundTripJsonOnly<X>(
{
{5, "a"},
{5, "b"},
},
jb::HeterogeneousArray(jb::Projection<&X::a>(), jb::Projection<&X::b>()));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/std_tuple.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/std_tuple_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
23427dcf-22eb-4675-9011-32fc7381c6eb | cpp | google/tensorstore | apply_members | tensorstore/util/apply_members/apply_members.h | tensorstore/util/apply_members/apply_members_test.cc | #ifndef TENSORSTORE_UTIL_APPLY_MEMBERS_APPLY_MEMBERS_H_
#define TENSORSTORE_UTIL_APPLY_MEMBERS_APPLY_MEMBERS_H_
#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
namespace half_float {
class half;
}
namespace tensorstore {
class BFloat16;
template <typename T, typename SFINAE = void>
struct ApplyMembers {
using NotSpecialized = void;
};
namespace internal_apply_members {
struct IgnoreMembers {
template <typename... T>
constexpr void operator()(const T&...) const {}
};
template <typename T, typename SFINAE = void>
struct SupportsApplyMembersImpl : public std::true_type {};
template <typename T>
struct SupportsApplyMembersImpl<T, typename ApplyMembers<T>::NotSpecialized>
: public std::false_type {};
template <typename T>
using MemberApplyMembersCallExpr = decltype(T::ApplyMembers(
std::declval<const T&>(), internal_apply_members::IgnoreMembers{}));
}
template <typename T>
struct ApplyMembers<
T,
std::enable_if_t<
!std::is_empty_v<T>,
std::void_t<internal_apply_members::MemberApplyMembersCallExpr<T>>>> {
template <typename X, typename F>
ABSL_ATTRIBUTE_ALWAYS_INLINE static constexpr auto Apply(X&& x, F f) {
return T::ApplyMembers(x, std::move(f));
}
};
template <typename T>
struct ApplyMembers<T, std::enable_if_t<std::is_empty_v<T>>> {
template <typename X, typename F>
ABSL_ATTRIBUTE_ALWAYS_INLINE static constexpr auto Apply(X&& x, F f) {
return f();
}
};
template <typename T>
constexpr inline bool SupportsApplyMembers =
internal_apply_members::SupportsApplyMembersImpl<T>::value;
template <typename T, typename SFINAE = void>
constexpr inline bool SerializeUsingMemcpy =
std::is_integral_v<T> || std::is_floating_point_v<T> || std::is_enum_v<T>;
template <>
constexpr inline bool SerializeUsingMemcpy<BFloat16> = true;
template <>
constexpr inline bool SerializeUsingMemcpy<half_float::half> = true;
}
#endif | #include "tensorstore/util/apply_members/apply_members.h"
#include <array>
#include <complex>
#include <tuple>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/apply_members/std_array.h"
#include "tensorstore/util/apply_members/std_complex.h"
#include "tensorstore/util/apply_members/std_pair.h"
#include "tensorstore/util/apply_members/std_tuple.h"
namespace {
struct Foo {
int x, y;
static constexpr auto ApplyMembers = [](auto&& x, auto f) {
return f(x.x, x.y);
};
};
struct Bar {};
struct Baz {
int x, y;
};
[[maybe_unused]] void TestFooApplyMembers() {
Foo value;
tensorstore::ApplyMembers<Foo>::Apply(value, [&](int& x, int& y) {});
}
[[maybe_unused]] void TestBarApplyMembers() {
Bar value;
tensorstore::ApplyMembers<Bar>::Apply(value, [&]() {});
}
[[maybe_unused]] void TestTupleApplyMembers() {
using T = std::tuple<int, double>;
T value;
tensorstore::ApplyMembers<T>::Apply(value, [&](int& x, double& y) {});
}
[[maybe_unused]] void TestStdArrayApplyMembers() {
using T = std::array<int, 3>;
T value;
tensorstore::ApplyMembers<T>::Apply(value, [&](int& x, int& y, int& z) {});
}
[[maybe_unused]] void TestArrayApplyMembers() {
using T = int[3];
T value;
tensorstore::ApplyMembers<T>::Apply(value, [&](int& x, int& y, int& z) {});
}
[[maybe_unused]] void TestPairApplyMembers() {
using T = std::pair<int, double>;
T value;
tensorstore::ApplyMembers<T>::Apply(value, [&](int& x, double& y) {});
}
[[maybe_unused]] void TestComplexApplyMembers() {
using T = std::complex<double>;
T value;
tensorstore::ApplyMembers<T>::Apply(value, [&](double& r, double& i) {});
}
static_assert(tensorstore::SupportsApplyMembers<Foo>);
static_assert(tensorstore::SupportsApplyMembers<Bar>);
static_assert(tensorstore::SupportsApplyMembers<std::complex<float>>);
static_assert(tensorstore::SupportsApplyMembers<std::pair<int, double>>);
static_assert(tensorstore::SupportsApplyMembers<std::tuple<>>);
static_assert(tensorstore::SupportsApplyMembers<std::tuple<int>>);
static_assert(tensorstore::SupportsApplyMembers<std::tuple<int, double>>);
static_assert(tensorstore::SupportsApplyMembers<std::array<int, 3>>);
static_assert(!tensorstore::SupportsApplyMembers<Baz>);
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/apply_members/apply_members.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/apply_members/apply_members_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
12fc993c-78e4-40a0-8d2e-c84b4ff0b025 | cpp | google/tensorstore | std_optional | tensorstore/internal/estimate_heap_usage/std_optional.h | tensorstore/internal/json_binding/std_optional_test.cc | #ifndef TENSORSTORE_INTERNAL_ESTIMATE_HEAP_USAGE_STD_OPTIONAL_H_
#define TENSORSTORE_INTERNAL_ESTIMATE_HEAP_USAGE_STD_OPTIONAL_H_
#include <optional>
#include "tensorstore/internal/estimate_heap_usage/estimate_heap_usage.h"
namespace tensorstore {
namespace internal {
template <typename T>
struct HeapUsageEstimator<std::optional<T>> {
static size_t EstimateHeapUsage(const std::optional<T>& v, size_t max_depth) {
if (!v) return 0;
return internal::EstimateHeapUsage(*v, max_depth);
}
static constexpr bool MayUseHeapMemory() {
return internal::MayUseHeapMemory<T>;
}
};
}
}
#endif | #include "tensorstore/internal/json_binding/std_optional.h"
#include <optional>
#include <string>
#include <type_traits>
#include <utility>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace jb = tensorstore::internal_json_binding;
namespace {
TEST(JsonBindingTest, Optional) {
tensorstore::TestJsonBinderRoundTrip<std::optional<int>>({
{3, ::nlohmann::json(3)},
{std::nullopt, ::nlohmann::json(::nlohmann::json::value_t::discarded)},
});
}
TEST(JsonBindingTest, OptionalWithNull) {
auto binder = jb::OptionalWithNull();
tensorstore::TestJsonBinderRoundTrip<std::optional<int>>(
{
{3, ::nlohmann::json(3)},
{std::nullopt, ::nlohmann::json(nullptr)},
},
binder);
}
TEST(JsonBindingTest, OptionalExplicitNullopt) {
const auto binder =
jb::Optional(jb::DefaultBinder<>, [] { return "nullopt"; });
tensorstore::TestJsonBinderRoundTrip<std::optional<int>>(
{
{3, 3},
{std::nullopt, "nullopt"},
},
binder);
}
TEST(JsonBindingTest, OptionalResult) {
::nlohmann::json j;
tensorstore::Result<int> x(absl::UnknownError("x"));
j = 3;
EXPECT_TRUE(jb::Optional()(std::false_type{}, jb::NoOptions{}, &x, &j).ok());
EXPECT_TRUE(j.is_discarded());
j = 4;
EXPECT_TRUE(jb::Optional()(std::true_type{}, jb::NoOptions{}, &x, &j).ok());
EXPECT_TRUE(x.has_value());
EXPECT_EQ(4, x.value());
j = ::nlohmann::json::value_t::discarded;
EXPECT_TRUE(jb::Optional()(std::false_type{}, jb::NoOptions{}, &x, &j).ok());
EXPECT_FALSE(j.is_discarded());
EXPECT_EQ(4, j);
j = ::nlohmann::json::value_t::discarded;
EXPECT_TRUE(jb::Optional()(std::true_type{}, jb::NoOptions{}, &x, &j).ok());
EXPECT_TRUE(x.has_value());
EXPECT_EQ(4, x.value());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/estimate_heap_usage/std_optional.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/std_optional_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
53ac2892-65fc-4c7b-870d-4c1419791a50 | cpp | google/tensorstore | driver_impl | tensorstore/driver/zarr/driver_impl.h | tensorstore/driver/zarr/driver_impl_test.cc | #ifndef TENSORSTORE_DRIVER_ZARR_DRIVER_IMPL_H_
#define TENSORSTORE_DRIVER_ZARR_DRIVER_IMPL_H_
#include <stddef.h>
#include <string>
#include <string_view>
#include "tensorstore/driver/kvs_backed_chunk_driver.h"
#include "tensorstore/driver/zarr/metadata.h"
#include "tensorstore/driver/zarr/spec.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/cache/chunk_cache.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_zarr {
std::string EncodeChunkIndices(span<const Index> indices,
DimensionSeparator dimension_separator);
class MetadataCache : public internal_kvs_backed_chunk_driver::MetadataCache {
using Base = internal_kvs_backed_chunk_driver::MetadataCache;
public:
using Base::Base;
std::string GetMetadataStorageKey(std::string_view entry_key) override;
Result<MetadataPtr> DecodeMetadata(std::string_view entry_key,
absl::Cord encoded_metadata) override;
Result<absl::Cord> EncodeMetadata(std::string_view entry_key,
const void* metadata) override;
};
class ZarrDriverSpec
: public internal::RegisteredDriverSpec<
ZarrDriverSpec,
internal_kvs_backed_chunk_driver::KvsDriverSpec> {
public:
using Base = internal::RegisteredDriverSpec<
ZarrDriverSpec,
internal_kvs_backed_chunk_driver::KvsDriverSpec>;
constexpr static char id[] = "zarr";
ZarrPartialMetadata partial_metadata;
SelectedField selected_field;
std::string metadata_key;
constexpr static auto ApplyMembers = [](auto& x, auto f) {
return f(internal::BaseCast<KvsDriverSpec>(x), x.partial_metadata,
x.selected_field, x.metadata_key);
};
absl::Status ApplyOptions(SpecOptions&& options) override;
Result<SpecRankAndFieldInfo> GetSpecInfo() const;
TENSORSTORE_DECLARE_JSON_DEFAULT_BINDER(ZarrDriverSpec,
JsonSerializationOptions,
JsonSerializationOptions,
::nlohmann::json::object_t)
Result<IndexDomain<>> GetDomain() const override;
Result<CodecSpec> GetCodec() const override;
Result<ChunkLayout> GetChunkLayout() const override;
Result<SharedArray<const void>> GetFillValue(
IndexTransformView<> transform) const override;
Future<internal::Driver::Handle> Open(
DriverOpenRequest request) const override;
};
class DataCache : public internal_kvs_backed_chunk_driver::DataCache {
using Base = internal_kvs_backed_chunk_driver::DataCache;
public:
explicit DataCache(Initializer&& initializer, std::string key_prefix,
DimensionSeparator dimension_separator,
std::string metadata_key);
const ZarrMetadata& metadata() {
return *static_cast<const ZarrMetadata*>(initial_metadata().get());
}
absl::Status ValidateMetadataCompatibility(
const void* existing_metadata_ptr, const void* new_metadata_ptr) override;
void GetChunkGridBounds(const void* metadata_ptr, MutableBoxView<> bounds,
DimensionSet& implicit_lower_bounds,
DimensionSet& implicit_upper_bounds) override;
Result<std::shared_ptr<const void>> GetResizedMetadata(
const void* existing_metadata, span<const Index> new_inclusive_min,
span<const Index> new_exclusive_max) override;
static internal::ChunkGridSpecification GetChunkGridSpecification(
const ZarrMetadata& metadata);
Result<absl::InlinedVector<SharedArray<const void>, 1>> DecodeChunk(
span<const Index> chunk_indices, absl::Cord data) override;
Result<absl::Cord> EncodeChunk(
span<const Index> chunk_indices,
span<const SharedArray<const void>> component_arrays) override;
std::string GetChunkStorageKey(span<const Index> cell_indices) override;
absl::Status GetBoundSpecData(
internal_kvs_backed_chunk_driver::KvsDriverSpec& spec_base,
const void* metadata_ptr, size_t component_index) override;
Result<ChunkLayout> GetChunkLayoutFromMetadata(
const void* metadata_ptr, size_t component_index) override;
std::string GetBaseKvstorePath() override;
std::string key_prefix_;
DimensionSeparator dimension_separator_;
std::string metadata_key_;
};
class ZarrDriver;
using ZarrDriverBase = internal_kvs_backed_chunk_driver::RegisteredKvsDriver<
ZarrDriver, ZarrDriverSpec, DataCache,
internal::ChunkCacheReadWriteDriverMixin<
ZarrDriver, internal_kvs_backed_chunk_driver::KvsChunkedDriverBase>>;
class ZarrDriver : public ZarrDriverBase {
using Base = ZarrDriverBase;
public:
using Base::Base;
class OpenState;
const ZarrMetadata& metadata() const {
return *static_cast<const ZarrMetadata*>(
this->cache()->initial_metadata().get());
}
Result<CodecSpec> GetCodec() override;
Result<SharedArray<const void>> GetFillValue(
IndexTransformView<> transform) override;
Future<ArrayStorageStatistics> GetStorageStatistics(
GetStorageStatisticsRequest request) override;
};
}
}
TENSORSTORE_DECLARE_GARBAGE_COLLECTION_SPECIALIZATION(
tensorstore::internal_zarr::ZarrDriver)
#endif | #include "tensorstore/driver/zarr/driver_impl.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/driver/kvs_backed_chunk_driver_impl.h"
#include "tensorstore/driver/zarr/metadata.h"
#include "tensorstore/driver/zarr/spec.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/open.h"
#include "tensorstore/resize_options.h"
#include "tensorstore/tensorstore.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kImplicit;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Result;
using ::tensorstore::span;
using ::tensorstore::TransactionMode;
using ::tensorstore::internal_kvs_backed_chunk_driver::ResizeParameters;
using ::tensorstore::internal_zarr::DimensionSeparator;
using ::tensorstore::internal_zarr::ZarrDriver;
using ::tensorstore::internal_zarr::ZarrMetadata;
template <typename... Option>
Result<tensorstore::IndexTransform<>> ResolveBoundsFromMetadata(
const ZarrMetadata& metadata, std::string field,
tensorstore::IndexTransform<> transform,
tensorstore::ResolveBoundsOptions options) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto store,
tensorstore::Open({
{"driver", "zarr"},
{"kvstore", {{"driver", "memory"}}},
{"metadata", ::nlohmann::json(metadata)},
{"field", field},
{"create", true},
})
.result());
return tensorstore::internal::TensorStoreAccess::handle(store)
.driver->ResolveBounds({{}, transform, options})
.result();
}
Result<ResizeParameters> GetResizeParameters(
const ZarrMetadata& metadata, std::string field,
tensorstore::IndexTransformView<> transform,
span<const Index> inclusive_min, span<const Index> exclusive_max,
tensorstore::ResizeOptions options,
TransactionMode transaction_mode = TransactionMode::no_transaction_mode) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto store,
tensorstore::Open({
{"driver", "zarr"},
{"kvstore", {{"driver", "memory"}}},
{"metadata", ::nlohmann::json(metadata)},
{"field", field},
{"create", true},
})
.result());
auto driver = tensorstore::internal::dynamic_pointer_cast<ZarrDriver>(
tensorstore::internal::TensorStoreAccess::handle(store).driver);
return tensorstore::internal_kvs_backed_chunk_driver::GetResizeParameters(
driver->cache(), &metadata, driver->component_index(), transform,
inclusive_min, exclusive_max, options, transaction_mode);
}
TEST(EncodeChunkIndicesTest, DotSeparated) {
EXPECT_EQ("1.2.3", EncodeChunkIndices(span<const Index>({1, 2, 3}),
DimensionSeparator::kDotSeparated));
}
TEST(EncodeChunkIndicesTest, SlashSeparated) {
EXPECT_EQ("1/2/3", EncodeChunkIndices(span<const Index>({1, 2, 3}),
DimensionSeparator::kSlashSeparated));
}
TEST(ResolveBoundsFromMetadataTest, Basic) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto metadata, ZarrMetadata::FromJson({
{"zarr_format", 2},
{"order", "C"},
{"filters", nullptr},
{"fill_value", nullptr},
{"compressor", nullptr},
{"dtype", "<i2"},
{"shape", {100, 100}},
{"chunks", {3, 2}},
}));
EXPECT_THAT(ResolveBoundsFromMetadata(
metadata, "",
tensorstore::IdentityTransform(2),
{}),
(tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({100, 100})
.implicit_upper_bounds({1, 1})
.output_identity_transform()
.Finalize()
.value()));
}
TEST(ResolveBoundsFromMetadataTest, FixResizableBoundsSuccess) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto metadata, ZarrMetadata::FromJson({
{"zarr_format", 2},
{"order", "C"},
{"filters", nullptr},
{"fill_value", nullptr},
{"compressor", nullptr},
{"dtype", "<i2"},
{"shape", {100, 100}},
{"chunks", {3, 2}},
}));
tensorstore::ResolveBoundsOptions options;
options.Set(tensorstore::fix_resizable_bounds).IgnoreError();
EXPECT_THAT(ResolveBoundsFromMetadata(
metadata, "",
tensorstore::IdentityTransform(2),
options),
(tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({100, 100})
.output_identity_transform()
.Finalize()
.value()));
}
TEST(ResolveBoundsFromMetadataTest, FixResizableBoundsFailure) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto metadata, ZarrMetadata::FromJson({
{"zarr_format", 2},
{"order", "C"},
{"filters", nullptr},
{"fill_value", nullptr},
{"compressor", nullptr},
{"dtype", "<i2"},
{"shape", {100, 100}},
{"chunks", {3, 2}},
}));
tensorstore::ResolveBoundsOptions options;
options.Set(tensorstore::fix_resizable_bounds).IgnoreError();
EXPECT_THAT(ResolveBoundsFromMetadata(
metadata, "",
tensorstore::IdentityTransform(span<const Index>({200, 100})),
options),
MatchesStatus(absl::StatusCode::kOutOfRange));
}
TEST(ResolveBoundsFromMetadataTest, MultipleFieldsWithFieldShape) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto metadata, ZarrMetadata::FromJson({
{"zarr_format", 2},
{"order", "C"},
{"filters", nullptr},
{"fill_value", nullptr},
{"compressor", nullptr},
{"dtype",
{
{"x", "<i2", {2, 3}},
{"y", "<i4", {4}},
}},
{"shape", {100, 100}},
{"chunks", {3, 2}},
}));
EXPECT_THAT(
ResolveBoundsFromMetadata(
metadata, "x",
tensorstore::IdentityTransform(4), {}),
(tensorstore::IndexTransformBuilder<>(4, 4)
.input_origin({0, 0, 0, 0})
.input_shape({100, 100, 2, 3})
.implicit_upper_bounds({1, 1, 0, 0})
.output_identity_transform()
.Finalize()
.value()));
EXPECT_THAT(
ResolveBoundsFromMetadata(
metadata, "y",
tensorstore::IdentityTransform(3), {}),
(tensorstore::IndexTransformBuilder<>(3, 3)
.input_origin({0, 0, 0})
.input_shape({100, 100, 4})
.implicit_upper_bounds({1, 1, 0})
.output_identity_transform()
.Finalize()
.value()));
}
TEST(GetResizeParametersTest, Basic) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto metadata, ZarrMetadata::FromJson({
{"zarr_format", 2},
{"order", "C"},
{"filters", nullptr},
{"fill_value", nullptr},
{"compressor", nullptr},
{"dtype", "<i2"},
{"shape", {100, 100}},
{"chunks", {3, 2}},
}));
const auto transform = tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({100, 100})
.implicit_upper_bounds({1, 1})
.output_identity_transform()
.Finalize()
.value();
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto p, GetResizeParameters(metadata,
"", transform,
span<const Index>({kImplicit, kImplicit}),
span<const Index>({kImplicit, 150}), {}));
EXPECT_THAT(p.new_exclusive_max, ::testing::ElementsAre(kImplicit, 150));
EXPECT_THAT(p.exclusive_max_constraint,
::testing::ElementsAre(kImplicit, kImplicit));
EXPECT_FALSE(p.expand_only);
EXPECT_FALSE(p.shrink_only);
}
{
tensorstore::ResizeOptions options;
options.Set(tensorstore::expand_only).IgnoreError();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto p,
GetResizeParameters(metadata,
"", transform,
span<const Index>({kImplicit, kImplicit}),
span<const Index>({kImplicit, 150}), options));
EXPECT_THAT(p.new_exclusive_max, ::testing::ElementsAre(kImplicit, 150));
EXPECT_THAT(p.exclusive_max_constraint,
::testing::ElementsAre(kImplicit, kImplicit));
EXPECT_TRUE(p.expand_only);
EXPECT_FALSE(p.shrink_only);
}
{
tensorstore::ResizeOptions options;
options.Set(tensorstore::shrink_only).IgnoreError();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto p,
GetResizeParameters(metadata,
"", transform,
span<const Index>({kImplicit, kImplicit}),
span<const Index>({kImplicit, 150}), options));
EXPECT_THAT(p.new_exclusive_max, ::testing::ElementsAre(kImplicit, 150));
EXPECT_THAT(p.exclusive_max_constraint,
::testing::ElementsAre(kImplicit, kImplicit));
EXPECT_FALSE(p.expand_only);
EXPECT_TRUE(p.shrink_only);
}
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto p, GetResizeParameters(metadata,
"", transform,
span<const Index>({kImplicit, kImplicit}),
span<const Index>({kImplicit, 150}), {},
TransactionMode::atomic_isolated));
EXPECT_THAT(p.new_exclusive_max, ::testing::ElementsAre(kImplicit, 150));
EXPECT_THAT(p.exclusive_max_constraint, ::testing::ElementsAre(100, 100));
EXPECT_THAT(p.inclusive_min_constraint, ::testing::ElementsAre(0, 0));
EXPECT_FALSE(p.expand_only);
EXPECT_FALSE(p.shrink_only);
}
{
tensorstore::ResizeOptions options;
options.Set(tensorstore::resize_metadata_only).IgnoreError();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto p, GetResizeParameters(metadata,
"", transform,
span<const Index>({kImplicit, kImplicit}),
span<const Index>({kImplicit, 150}),
options, TransactionMode::atomic_isolated));
EXPECT_THAT(p.new_exclusive_max, ::testing::ElementsAre(kImplicit, 150));
EXPECT_THAT(p.exclusive_max_constraint,
::testing::ElementsAre(kImplicit, kImplicit));
EXPECT_FALSE(p.expand_only);
EXPECT_FALSE(p.shrink_only);
}
EXPECT_THAT(
GetResizeParameters(metadata,
"", transform,
span<const Index>({kImplicit, kImplicit}),
span<const Index>({kImplicit, kImplicit}), {}),
MatchesStatus(absl::StatusCode::kAborted));
EXPECT_THAT(
GetResizeParameters(metadata,
"",
tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({100, 100})
.implicit_lower_bounds({1, 1})
.implicit_upper_bounds({1, 1})
.output_identity_transform()
.Finalize()
.value(),
span<const Index>({2, kImplicit}),
span<const Index>({kImplicit, kImplicit}), {}),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST(GetResizeParametersTest, MultipleFields) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto metadata, ZarrMetadata::FromJson({
{"zarr_format", 2},
{"order", "C"},
{"filters", nullptr},
{"fill_value", nullptr},
{"compressor", nullptr},
{"dtype",
{
{"x", "<i2", {2, 3}},
{"y", "<i4", {4}},
}},
{"shape", {100, 100}},
{"chunks", {3, 2}},
}));
const auto transform = tensorstore::IndexTransformBuilder<>(4, 4)
.input_origin({0, 0, 0, 0})
.input_shape({100, 100, 2, 3})
.implicit_lower_bounds({1, 1, 1, 1})
.implicit_upper_bounds({1, 1, 1, 1})
.output_identity_transform()
.Finalize()
.value();
EXPECT_THAT(
GetResizeParameters(
metadata,
"x", transform,
span<const Index>({kImplicit, kImplicit, kImplicit, kImplicit}),
span<const Index>({kImplicit, 150, kImplicit, kImplicit}), {}),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Resize operation would affect other fields but "
"`resize_tied_bounds` was not specified"));
tensorstore::ResizeOptions options;
options.Set(tensorstore::ResizeMode::resize_tied_bounds).IgnoreError();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto p,
GetResizeParameters(
metadata,
"x", transform,
span<const Index>({kImplicit, kImplicit, kImplicit, kImplicit}),
span<const Index>({kImplicit, 150, kImplicit, kImplicit}), options));
EXPECT_THAT(p.new_exclusive_max, ::testing::ElementsAre(kImplicit, 150));
EXPECT_THAT(p.exclusive_max_constraint,
::testing::ElementsAre(kImplicit, kImplicit));
EXPECT_FALSE(p.expand_only);
EXPECT_FALSE(p.shrink_only);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr/driver_impl.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr/driver_impl_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
850b4321-06d4-460c-9d9b-7b489fe04b50 | cpp | google/tensorstore | container_to_shared | tensorstore/internal/container_to_shared.h | tensorstore/internal/container_to_shared_test.cc | #ifndef TENSORSTORE_INTERNAL_STRING_TO_SHARED_H_
#define TENSORSTORE_INTERNAL_STRING_TO_SHARED_H_
#include <stddef.h>
#include <memory>
#include <utility>
namespace tensorstore {
namespace internal {
template <typename Container>
inline std::shared_ptr<typename Container::value_type>
ContainerToSharedDataPointerWithOffset(Container&& container,
size_t offset = 0) {
auto ptr = std::make_shared<Container>(std::forward<Container>(container));
return std::shared_ptr<typename Container::value_type>(std::move(ptr),
ptr->data() + offset);
}
}
}
#endif | #include "tensorstore/internal/container_to_shared.h"
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal::ContainerToSharedDataPointerWithOffset;
TEST(ContainerToSharedDataPointerWithOffsetTest, SmallBuffer) {
std::string small = "hello";
auto ptr = ContainerToSharedDataPointerWithOffset(std::move(small), 2);
small = "aaaaa";
EXPECT_EQ("hello", std::string_view(ptr.get() - 2, 5));
}
TEST(ContainerToSharedDataPointerWithOffsetTest, LargeBuffer) {
std::string large(200, '\0');
for (int i = 0; i < 200; ++i) {
large[i] = i;
}
std::string large_copy = large;
auto* data = large.data();
auto ptr = ContainerToSharedDataPointerWithOffset(std::move(large), 5);
EXPECT_EQ(data + 5, ptr.get());
EXPECT_EQ(large_copy, std::string_view(ptr.get() - 5, 200));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/container_to_shared.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/container_to_shared_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
94567bfa-b355-4273-8c93-21320569b490 | cpp | google/tensorstore | intrusive_ptr | tensorstore/internal/intrusive_ptr.h | tensorstore/internal/intrusive_ptr_test.cc | #ifndef TENSORSTORE_INTERNAL_INTRUSIVE_PTR_H_
#define TENSORSTORE_INTERNAL_INTRUSIVE_PTR_H_
#include <atomic>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <type_traits>
#include <utility>
#include "tensorstore/internal/memory.h"
#include "tensorstore/internal/type_traits.h"
namespace tensorstore {
namespace internal {
template <typename Derived>
class AtomicReferenceCount {
public:
AtomicReferenceCount() = default;
AtomicReferenceCount(size_t initial_ref_count)
: ref_count_(initial_ref_count) {}
AtomicReferenceCount(const AtomicReferenceCount&) noexcept {}
AtomicReferenceCount& operator=(const AtomicReferenceCount&) noexcept {
return *this;
}
uint32_t use_count() const noexcept {
return ref_count_.load(std::memory_order_acquire);
}
template <typename D>
friend bool IncrementReferenceCountIfNonZero(
const AtomicReferenceCount<D>& base);
template <typename D>
friend bool DecrementReferenceCount(const AtomicReferenceCount<D>& base);
friend void intrusive_ptr_increment(const AtomicReferenceCount* p) noexcept {
p->ref_count_.fetch_add(1, std::memory_order_acq_rel);
}
friend void intrusive_ptr_decrement(const AtomicReferenceCount* p) noexcept {
if (DecrementReferenceCount(*p)) {
delete static_cast<const Derived*>(p);
}
}
private:
mutable std::atomic<uint32_t> ref_count_{0};
};
template <typename Derived>
inline bool IncrementReferenceCountIfNonZero(
const AtomicReferenceCount<Derived>& base) {
uint32_t count = base.ref_count_.load(std::memory_order_relaxed);
do {
if (count == 0) return false;
} while (!base.ref_count_.compare_exchange_weak(count, count + 1,
std::memory_order_acq_rel));
return true;
}
template <typename Derived>
inline bool DecrementReferenceCount(const AtomicReferenceCount<Derived>& base) {
return base.ref_count_.fetch_sub(1, std::memory_order_acq_rel) == 1;
}
template <typename T>
bool DecrementReferenceCountIfGreaterThanOne(std::atomic<T>& reference_count) {
auto count = reference_count.load(std::memory_order_relaxed);
while (true) {
if (count == 1) return false;
if (reference_count.compare_exchange_weak(count, count - 1,
std::memory_order_acq_rel)) {
return true;
}
}
}
struct DefaultIntrusivePtrTraits {
template <typename U>
using pointer = U*;
template <typename Pointer>
static void increment(Pointer p) noexcept {
intrusive_ptr_increment(p);
}
template <typename Pointer>
static void decrement(Pointer p) noexcept {
intrusive_ptr_decrement(p);
}
};
struct acquire_object_ref_t {
explicit constexpr acquire_object_ref_t() = default;
};
struct adopt_object_ref_t {
explicit constexpr adopt_object_ref_t() = default;
};
constexpr acquire_object_ref_t acquire_object_ref{};
constexpr adopt_object_ref_t adopt_object_ref{};
template <typename T, typename R>
class IntrusivePtr;
template <typename T>
struct IsIntrusivePtr : public std::false_type {};
template <typename T, typename R>
struct IsIntrusivePtr<IntrusivePtr<T, R>> : public std::true_type {};
template <typename T, typename R = DefaultIntrusivePtrTraits>
class IntrusivePtr {
public:
using element_type = T;
using traits_type = R;
using pointer = typename R::template pointer<T>;
~IntrusivePtr() {
if (pointer p = get()) R::decrement(p);
}
constexpr IntrusivePtr() noexcept : ptr_(nullptr) {}
constexpr IntrusivePtr(std::nullptr_t) noexcept : ptr_(nullptr) {}
explicit IntrusivePtr(pointer p) noexcept : ptr_(p) {
if (ptr_) R::increment(ptr_);
}
explicit IntrusivePtr(pointer p, acquire_object_ref_t) noexcept : ptr_(p) {
if (ptr_) R::increment(ptr_);
}
constexpr explicit IntrusivePtr(pointer p, adopt_object_ref_t) noexcept
: ptr_(p) {}
IntrusivePtr(const IntrusivePtr& rhs) noexcept
: IntrusivePtr(rhs.get(), acquire_object_ref) {}
IntrusivePtr& operator=(const IntrusivePtr& rhs) noexcept {
IntrusivePtr(rhs).swap(*this);
return *this;
}
template <typename U,
std::enable_if_t<std::is_convertible_v<
typename R::template pointer<U>, pointer>>* = nullptr>
IntrusivePtr(const IntrusivePtr<U, R>& rhs) noexcept
: IntrusivePtr(rhs.get(), acquire_object_ref) {}
template <typename U, typename = std::enable_if_t<std::is_convertible_v<
typename R::template pointer<U>, pointer>>>
IntrusivePtr& operator=(const IntrusivePtr<U, R>& rhs) noexcept {
IntrusivePtr(rhs).swap(*this);
return *this;
}
constexpr IntrusivePtr(IntrusivePtr&& rhs) noexcept
: IntrusivePtr(rhs.release(), adopt_object_ref) {}
constexpr IntrusivePtr& operator=(IntrusivePtr&& rhs) noexcept {
IntrusivePtr(std::move(rhs)).swap(*this);
return *this;
}
template <typename U,
std::enable_if_t<std::is_convertible_v<
typename R::template pointer<U>, pointer>>* = nullptr>
constexpr IntrusivePtr(IntrusivePtr<U, R>&& rhs) noexcept
: IntrusivePtr(rhs.release(), adopt_object_ref) {}
template <typename U, typename = std::enable_if_t<std::is_convertible_v<
typename R::template pointer<U>, pointer>>>
constexpr IntrusivePtr& operator=(IntrusivePtr<U, R>&& rhs) noexcept {
IntrusivePtr(std::move(rhs)).swap(*this);
return *this;
}
void reset() noexcept { IntrusivePtr().swap(*this); }
void reset(std::nullptr_t) noexcept { IntrusivePtr().swap(*this); }
void reset(pointer rhs) { IntrusivePtr(rhs, acquire_object_ref).swap(*this); }
void reset(pointer rhs, acquire_object_ref_t) {
IntrusivePtr(rhs, acquire_object_ref).swap(*this);
}
void reset(pointer rhs, adopt_object_ref_t) {
IntrusivePtr(rhs, adopt_object_ref).swap(*this);
}
constexpr explicit operator bool() const { return static_cast<bool>(ptr_); }
constexpr pointer get() const noexcept { return ptr_; }
constexpr pointer operator->() const {
pointer ptr = get();
assert(static_cast<bool>(ptr));
return ptr;
}
constexpr element_type& operator*() const {
pointer ptr = get();
assert(static_cast<bool>(ptr));
return *ptr;
}
constexpr pointer release() noexcept {
pointer ptr = get();
ptr_ = pointer{};
return ptr;
}
void swap(IntrusivePtr& rhs) noexcept {
std::swap(ptr_, rhs.ptr_);
}
template <typename H>
friend H AbslHashValue(H h, const IntrusivePtr& x) {
return H::combine(std::move(h), x.get());
}
friend bool operator==(const IntrusivePtr& p, std::nullptr_t) { return !p; }
friend bool operator!=(const IntrusivePtr& p, std::nullptr_t) {
return static_cast<bool>(p);
}
friend bool operator==(std::nullptr_t, const IntrusivePtr& p) { return !p; }
friend bool operator!=(std::nullptr_t, const IntrusivePtr& p) {
return static_cast<bool>(p);
}
private:
pointer ptr_;
};
template <typename T, typename R>
inline T* to_address(const IntrusivePtr<T, R>& p) {
return to_address(p.get());
}
template <typename T, typename U, typename R>
inline std::enable_if_t<IsEqualityComparable<typename R::template pointer<T>,
typename R::template pointer<U>>,
bool>
operator==(const IntrusivePtr<T, R>& x, const IntrusivePtr<U, R>& y) {
return x.get() == y.get();
}
template <typename T, typename U, typename R>
inline std::enable_if_t<IsEqualityComparable<typename R::template pointer<T>,
typename R::template pointer<U>>,
bool>
operator!=(const IntrusivePtr<T, R>& x, const IntrusivePtr<U, R>& y) {
return x.get() != y.get();
}
template <typename T, typename U, typename R>
inline IntrusivePtr<T, R> static_pointer_cast(IntrusivePtr<U, R> p) {
return IntrusivePtr<T, R>(static_pointer_cast<T>(p.release()),
adopt_object_ref);
}
template <typename T, typename U, typename R>
inline IntrusivePtr<T, R> const_pointer_cast(IntrusivePtr<U, R> p) {
return IntrusivePtr<T, R>(const_pointer_cast<T>(p.release()),
adopt_object_ref);
}
template <typename T, typename U, typename R>
inline IntrusivePtr<T, R> dynamic_pointer_cast(IntrusivePtr<U, R> p) {
if (auto new_pointer = dynamic_pointer_cast<T>(p.get())) {
p.release();
return IntrusivePtr<T, R>(std::move(new_pointer), adopt_object_ref);
} else {
return IntrusivePtr<T, R>(std::move(new_pointer), adopt_object_ref);
}
}
template <typename T, typename Traits>
std::shared_ptr<T> IntrusiveToShared(internal::IntrusivePtr<T, Traits> p) {
auto* ptr = p.get();
return std::shared_ptr<T>(
std::make_shared<internal::IntrusivePtr<T, Traits>>(std::move(p)), ptr);
}
template <typename T, typename R = DefaultIntrusivePtrTraits, typename... Args>
inline IntrusivePtr<T, R> MakeIntrusivePtr(Args&&... args) {
return IntrusivePtr<T, R>(new T(std::forward<Args>(args)...));
}
}
}
#endif | #include "tensorstore/internal/intrusive_ptr.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <gtest/gtest.h>
#include "tensorstore/internal/memory.h"
namespace {
using ::tensorstore::internal::acquire_object_ref;
using ::tensorstore::internal::adopt_object_ref;
using ::tensorstore::internal::AtomicReferenceCount;
using ::tensorstore::internal::const_pointer_cast;
using ::tensorstore::internal::dynamic_pointer_cast;
using ::tensorstore::internal::IntrusivePtr;
using ::tensorstore::internal::static_pointer_cast;
namespace default_behavior {
struct X : public AtomicReferenceCount<X> {
virtual ~X() = default;
};
struct Y : public X {
virtual ~Y() = default;
};
TEST(IntrusivePtrTest, DefaultConstructor) {
IntrusivePtr<X> p;
EXPECT_EQ(p.get(), nullptr);
EXPECT_EQ(p, p);
EXPECT_EQ(p.get(), static_cast<X*>(nullptr));
EXPECT_EQ(p, nullptr);
EXPECT_EQ(nullptr, p);
}
TEST(IntrusivePtrTest, PointerConstructor) {
X* x = new X;
IntrusivePtr<X> p(x, acquire_object_ref);
EXPECT_EQ(p.get(), x);
EXPECT_EQ(1, x->use_count());
EXPECT_EQ(p, p);
EXPECT_NE(p, nullptr);
EXPECT_NE(nullptr, p);
EXPECT_EQ(x, p.operator->());
EXPECT_EQ(x, &*p);
}
TEST(IntrusivePtrTest, ConstructFromDerivedPointer) {
IntrusivePtr<X> p(new Y);
}
TEST(IntrusivePtrTest, PointerConstructorNoAddRef) {
X* x = new X;
intrusive_ptr_increment(x);
EXPECT_EQ(1, x->use_count());
IntrusivePtr<X> p(x, adopt_object_ref);
EXPECT_EQ(p.get(), x);
EXPECT_EQ(1, x->use_count());
}
TEST(IntrusivePtrTest, CopyConstructorNonNull) {
X* x = new X;
IntrusivePtr<X> p(x);
IntrusivePtr<X> p2(p);
EXPECT_EQ(2, x->use_count());
EXPECT_EQ(x, p.get());
EXPECT_EQ(x, p2.get());
}
TEST(IntrusivePtrTest, CopyConstructorNull) {
IntrusivePtr<X> p;
IntrusivePtr<X> p2(p);
EXPECT_EQ(nullptr, p.get());
EXPECT_EQ(nullptr, p2.get());
}
TEST(IntrusivePtrTest, MoveConstructorNonNull) {
X* x = new X;
IntrusivePtr<X> p(x);
IntrusivePtr<X> p2(std::move(p));
EXPECT_EQ(1, x->use_count());
EXPECT_EQ(x, p2.get());
EXPECT_EQ(nullptr, p.get());
}
TEST(IntrusivePtrTest, MoveConstructorNull) {
IntrusivePtr<X> p;
IntrusivePtr<X> p2(std::move(p));
EXPECT_EQ(nullptr, p.get());
EXPECT_EQ(nullptr, p2.get());
}
TEST(IntrusivePtrTest, ConvertingCopyConstructorNonNull) {
Y* y = new Y;
IntrusivePtr<Y> p(y);
IntrusivePtr<X> p2(p);
EXPECT_EQ(2, y->use_count());
EXPECT_EQ(y, p2.get());
EXPECT_EQ(y, p.get());
}
TEST(IntrusivePtrTest, ConvertingMoveConstructorNonNull) {
Y* y = new Y;
IntrusivePtr<Y> p(y);
IntrusivePtr<X> p2(std::move(p));
EXPECT_EQ(1, y->use_count());
EXPECT_EQ(y, p2.get());
EXPECT_EQ(nullptr, p.get());
}
TEST(IntrusivePtrTest, ConvertingCopyConstructorNull) {
IntrusivePtr<Y> p;
IntrusivePtr<X> p2(p);
EXPECT_EQ(nullptr, p2.get());
EXPECT_EQ(nullptr, p.get());
}
TEST(IntrusivePtrTest, ConvertingMoveConstructorNull) {
IntrusivePtr<Y> p;
IntrusivePtr<X> p2(std::move(p));
EXPECT_EQ(nullptr, p2.get());
EXPECT_EQ(nullptr, p.get());
}
TEST(IntrusivePtrTest, CopyAssignment) {
X* x = new X;
IntrusivePtr<X> p(x);
X* x2 = new X;
IntrusivePtr<X> p3(x2);
IntrusivePtr<X> p2(x2);
p2 = p;
EXPECT_EQ(2, x->use_count());
EXPECT_EQ(x, p.get());
EXPECT_EQ(x, p2.get());
EXPECT_EQ(1, x2->use_count());
}
TEST(IntrusivePtrTest, CopyAssignmentSelf) {
X* x = new X;
IntrusivePtr<X> p(x);
auto& p_ref = p;
p = p_ref;
EXPECT_EQ(1, x->use_count());
EXPECT_EQ(x, p.get());
}
TEST(IntrusivePtrTest, MoveAssignment) {
X* x = new X;
IntrusivePtr<X> p(x);
X* x2 = new X;
IntrusivePtr<X> p3(x2);
IntrusivePtr<X> p2(x2);
p2 = std::move(p);
EXPECT_EQ(1, x->use_count());
EXPECT_EQ(nullptr, p.get());
EXPECT_EQ(x, p2.get());
EXPECT_EQ(1, x2->use_count());
}
TEST(IntrusivePtrTest, MoveAssignmentSelf) {
X* x = new X;
IntrusivePtr<X> p(x);
auto& p_ref = p;
p = std::move(p_ref);
EXPECT_EQ(1, x->use_count());
EXPECT_EQ(x, p.get());
}
TEST(IntrusivePtrTest, ConvertingCopyAssignment) {
Y* y = new Y;
IntrusivePtr<Y> p(y);
X* x2 = new X;
IntrusivePtr<X> p3(x2);
IntrusivePtr<X> p2(x2);
p2 = p;
EXPECT_EQ(2, y->use_count());
EXPECT_EQ(y, p.get());
EXPECT_EQ(y, p2.get());
EXPECT_EQ(1, x2->use_count());
}
TEST(IntrusivePtrTest, ConvertingMoveAssignment) {
Y* y = new Y;
IntrusivePtr<Y> p(y);
X* x2 = new X;
IntrusivePtr<X> p3(x2);
IntrusivePtr<X> p2(x2);
p2 = std::move(p);
EXPECT_EQ(1, y->use_count());
EXPECT_EQ(nullptr, p.get());
EXPECT_EQ(y, p2.get());
EXPECT_EQ(1, x2->use_count());
}
TEST(IntrusivePtrTest, Swap) {
X* x = new X;
X* x2 = new X;
IntrusivePtr<X> p(x);
IntrusivePtr<X> p2(x2);
p.swap(p2);
EXPECT_EQ(x, p2.get());
EXPECT_EQ(x2, p.get());
EXPECT_EQ(1, x->use_count());
EXPECT_EQ(1, x2->use_count());
}
TEST(IntrusivePtrTest, BoolConversion) {
IntrusivePtr<X> p;
EXPECT_FALSE(static_cast<bool>(p));
IntrusivePtr<X> p2(new X);
EXPECT_TRUE(static_cast<bool>(p2));
}
TEST(IntrusivePtrTest, Detach) {
X* x = new X;
IntrusivePtr<X> p(x);
IntrusivePtr<X> p2(x);
EXPECT_EQ(2, x->use_count());
EXPECT_EQ(x, p.release());
EXPECT_EQ(nullptr, p.get());
EXPECT_EQ(2, x->use_count());
p.reset(x, adopt_object_ref);
}
TEST(IntrusivePtrTest, ResetNoArg) {
X* x = new X;
IntrusivePtr<X> p(x);
IntrusivePtr<X> p2(x);
EXPECT_EQ(2, x->use_count());
p.reset();
EXPECT_EQ(1, x->use_count());
EXPECT_EQ(nullptr, p.get());
}
TEST(IntrusivePtrTest, ResetNullptr) {
X* x = new X;
IntrusivePtr<X> p(x);
IntrusivePtr<X> p2(x);
EXPECT_EQ(2, x->use_count());
p.reset(static_cast<X*>(nullptr));
EXPECT_EQ(1, x->use_count());
EXPECT_EQ(nullptr, p.get());
}
TEST(IntrusivePtrTest, ResetPointerAddRef) {
X* x = new X;
IntrusivePtr<X> p(x);
IntrusivePtr<X> p2(x);
IntrusivePtr<X> p3(new X);
EXPECT_EQ(2, x->use_count());
EXPECT_EQ(1, p3->use_count());
p.reset(p3.get());
EXPECT_EQ(2, p3->use_count());
EXPECT_EQ(p3.get(), p.get());
EXPECT_EQ(1, x->use_count());
}
TEST(IntrusivePtrTest, ResetPointerNoAddRef) {
X* x = new X;
IntrusivePtr<X> p(x);
IntrusivePtr<X> p2(x);
IntrusivePtr<X> p3(new X);
EXPECT_EQ(2, x->use_count());
EXPECT_EQ(1, p3->use_count());
p.reset(p3.get(), adopt_object_ref);
EXPECT_EQ(1, p3->use_count());
EXPECT_EQ(p3.get(), p.get());
EXPECT_EQ(1, x->use_count());
p.release();
}
TEST(IntrusivePtrTest, Comparison) {
X* x = new X;
X* x2 = new X;
IntrusivePtr<X> p(x);
IntrusivePtr<X> p2(x2);
EXPECT_EQ(p, p);
EXPECT_NE(p, p2);
EXPECT_NE(p, nullptr);
EXPECT_NE(nullptr, p);
}
TEST(IntrusivePtrTest, StaticPointerCast) {
X* x = new Y;
IntrusivePtr<X> p(x);
IntrusivePtr<Y> p2 = static_pointer_cast<Y>(p);
EXPECT_EQ(2, x->use_count());
EXPECT_EQ(x, p2.get());
}
TEST(IntrusivePtrTest, ConstPointerCast) {
X* x = new X;
IntrusivePtr<const X> p(x);
IntrusivePtr<X> p2 = const_pointer_cast<X>(p);
EXPECT_EQ(2, x->use_count());
EXPECT_EQ(x, p2.get());
}
TEST(IntrusivePtrTest, DynamicPointerCastSuccess) {
X* x = new Y;
IntrusivePtr<X> p(x);
IntrusivePtr<Y> p2 = dynamic_pointer_cast<Y>(p);
EXPECT_EQ(2, x->use_count());
EXPECT_EQ(x, p2.get());
}
TEST(IntrusivePtrTest, DynamicPointerCastFailure) {
X* x = new X;
IntrusivePtr<X> p(x);
IntrusivePtr<Y> p2 = dynamic_pointer_cast<Y>(p);
EXPECT_EQ(1, x->use_count());
EXPECT_EQ(nullptr, p2.get());
}
TEST(IntrusivePtrTest, MakeIntrusive) {
auto x = tensorstore::internal::MakeIntrusivePtr<X>();
EXPECT_EQ(1, x->use_count());
EXPECT_NE(nullptr, x.get());
}
}
namespace custom_increment_decrement_functions {
class X {
public:
X(int v) : v_(v) {}
virtual ~X() = default;
friend void intrusive_ptr_increment(X* p) { ++p->ref_count_; }
friend void intrusive_ptr_decrement(X* p) {
if (--p->ref_count_ == 0) {
delete p;
}
}
uint32_t ref_count_{0};
int v_{0};
};
class Y : public X {
public:
using X::X;
};
TEST(IntrusivePtrTest, CustomIncrementDecrementFunctions) {
IntrusivePtr<X> x1(new X(1));
EXPECT_EQ(1, x1->ref_count_);
IntrusivePtr<X> x2 = x1;
EXPECT_EQ(2, x2->ref_count_);
IntrusivePtr<Y> y1(new Y(2));
IntrusivePtr<X> y2 = y1;
IntrusivePtr<Y> y3 = dynamic_pointer_cast<Y>(y2);
EXPECT_EQ(y2, y1);
EXPECT_EQ(y3, y1);
}
TEST(IntrusivePtrTest, MakeIntrusiveWithCustomIncrementDecrement) {
auto x = tensorstore::internal::MakeIntrusivePtr<X>(1);
EXPECT_EQ(1, x->ref_count_);
EXPECT_NE(nullptr, x.get());
EXPECT_EQ(1, x->v_);
auto y = tensorstore::internal::MakeIntrusivePtr<Y>(2);
EXPECT_EQ(1, y->ref_count_);
EXPECT_NE(nullptr, y.get());
EXPECT_EQ(2, y->v_);
}
}
namespace custom_traits {
class X {
public:
X(int v) : v_(v) {}
virtual ~X() = default;
uint32_t ref_count_{0};
int v_{0};
};
class Y : public X {
public:
using X::X;
};
struct XTraits {
template <typename U>
using pointer = U*;
static void increment(X* p) noexcept { ++p->ref_count_; }
static void decrement(X* p) noexcept {
if (--p->ref_count_ == 0) delete p;
}
};
TEST(IntrusivePtrTest, CustomTraits) {
IntrusivePtr<X, XTraits> x1(new X(2));
EXPECT_EQ(1, x1->ref_count_);
IntrusivePtr<X, XTraits> x2 = x1;
EXPECT_EQ(2, x2->ref_count_);
IntrusivePtr<Y, XTraits> y1(new Y(3));
IntrusivePtr<X, XTraits> y2 = y1;
IntrusivePtr<Y, XTraits> y3 = dynamic_pointer_cast<Y>(y2);
EXPECT_EQ(y2, y1);
EXPECT_EQ(y3, y1);
}
TEST(IntrusivePtrTest, MakeIntrusiveWithCustomTraits) {
auto x = tensorstore::internal::MakeIntrusivePtr<X, XTraits>(2);
EXPECT_EQ(1, x->ref_count_);
EXPECT_NE(nullptr, x.get());
EXPECT_EQ(2, x->v_);
auto y = tensorstore::internal::MakeIntrusivePtr<Y, XTraits>(3);
EXPECT_EQ(1, y->ref_count_);
EXPECT_NE(nullptr, y.get());
EXPECT_EQ(3, y->v_);
}
struct InvokeInDestructorType
: public AtomicReferenceCount<InvokeInDestructorType> {
std::function<void()> invoke_in_destructor;
~InvokeInDestructorType() { invoke_in_destructor(); }
};
TEST(AtomicReferenceCountTest, IncrementReferenceCountIfNonZero) {
AtomicReferenceCount<int> x;
EXPECT_FALSE(IncrementReferenceCountIfNonZero(x));
EXPECT_EQ(0, x.use_count());
intrusive_ptr_increment(&x);
EXPECT_TRUE(IncrementReferenceCountIfNonZero(x));
EXPECT_EQ(2, x.use_count());
}
TEST(AtomicReferenceCountTest,
IncrementReferenceCountIfNonZeroDuringDestructor) {
IntrusivePtr<InvokeInDestructorType> ptr(new InvokeInDestructorType);
{
ASSERT_TRUE(tensorstore::internal::IncrementReferenceCountIfNonZero(*ptr));
IntrusivePtr<InvokeInDestructorType> ptr2(ptr.get(), adopt_object_ref);
ASSERT_TRUE(tensorstore::internal::IncrementReferenceCountIfNonZero(*ptr));
IntrusivePtr<InvokeInDestructorType> ptr3(ptr.get(), adopt_object_ref);
}
bool test_ran = false;
bool could_acquire = false;
ptr->invoke_in_destructor = [&, ptr_copy = ptr.get()] {
test_ran = true;
could_acquire =
tensorstore::internal::IncrementReferenceCountIfNonZero(*ptr_copy);
};
ptr.reset();
EXPECT_TRUE(test_ran);
EXPECT_FALSE(could_acquire);
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/intrusive_ptr.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/intrusive_ptr_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
cd74caa6-384a-4978-adf7-847e8d12598e | cpp | google/tensorstore | multi_vector | tensorstore/internal/multi_vector.h | tensorstore/internal/multi_vector_test.cc | #ifndef TENSORSTORE_INTERNAL_MULTI_VECTOR_H_
#define TENSORSTORE_INTERNAL_MULTI_VECTOR_H_
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstring>
#include <type_traits>
#include <utility>
#include "tensorstore/internal/gdb_scripting.h"
#include "tensorstore/internal/meta.h"
#include "tensorstore/internal/multi_vector_impl.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/span.h"
TENSORSTORE_GDB_AUTO_SCRIPT("multi_vector_gdb.py")
namespace tensorstore {
namespace internal {
template <ptrdiff_t Extent, ptrdiff_t InlineSize, typename... Ts>
class MultiVectorStorageImpl;
template <ptrdiff_t Extent, typename... Ts>
using MultiVectorStorage =
MultiVectorStorageImpl<RankConstraint::FromInlineRank(Extent),
InlineRankLimit(Extent), Ts...>;
template <typename StorageT>
class MultiVectorAccess;
template <ptrdiff_t Extent, ptrdiff_t InlineSize, typename... Ts>
class MultiVectorStorageImpl {
private:
static_assert((... && std::is_trivial_v<Ts>),
"Non-trivial types are not currently supported.");
static_assert(InlineSize == 0,
"InlineSize must be 0 if Extent != dynamic_extent.");
using Offsets = internal_multi_vector::PackStorageOffsets<Ts...>;
friend class MultiVectorAccess<MultiVectorStorageImpl>;
void* InternalGetDataPointer(size_t array_i) {
return data_ + Offsets::GetVectorOffset(Extent, array_i);
}
constexpr static StaticRank<Extent> InternalGetExtent() { return {}; }
void InternalResize(StaticRank<Extent>) {}
alignas(Offsets::kAlignment) char data_[Offsets::GetTotalSize(Extent)];
};
template <ptrdiff_t InlineSize, typename... Ts>
class MultiVectorStorageImpl<0, InlineSize, Ts...> {
private:
static_assert(InlineSize == 0,
"InlineSize must be 0 if Extent != dynamic_extent.");
friend class MultiVectorAccess<MultiVectorStorageImpl>;
void* InternalGetDataPointer(size_t array_i) { return nullptr; }
constexpr static StaticRank<0> InternalGetExtent() { return {}; }
void InternalResize(StaticRank<0>) {}
};
template <ptrdiff_t InlineSize, typename... Ts>
class MultiVectorStorageImpl<dynamic_rank, InlineSize, Ts...> {
static_assert((std::is_trivial_v<Ts> && ...),
"Non-trivial types are not currently supported.");
static_assert(InlineSize >= 0, "InlineSize must be non-negative.");
using Offsets = internal_multi_vector::PackStorageOffsets<Ts...>;
public:
explicit constexpr MultiVectorStorageImpl() noexcept {}
MultiVectorStorageImpl(MultiVectorStorageImpl&& other) {
*this = std::move(other);
}
MultiVectorStorageImpl(const MultiVectorStorageImpl& other) { *this = other; }
MultiVectorStorageImpl& operator=(MultiVectorStorageImpl&& other) noexcept {
std::swap(data_, other.data_);
std::swap(extent_, other.extent_);
return *this;
}
MultiVectorStorageImpl& operator=(const MultiVectorStorageImpl& other) {
if (this == &other) return *this;
const ptrdiff_t extent = other.extent_;
InternalResize(extent);
const bool use_inline = InlineSize > 0 && extent <= InlineSize;
std::memcpy(use_inline ? data_.inline_data : data_.pointer,
use_inline ? other.data_.inline_data : other.data_.pointer,
Offsets::GetTotalSize(extent));
return *this;
}
~MultiVectorStorageImpl() {
if (extent_ > InlineSize) {
::operator delete(data_.pointer);
}
}
private:
friend class MultiVectorAccess<MultiVectorStorageImpl>;
ptrdiff_t InternalGetExtent() const { return extent_; }
void* InternalGetDataPointer(ptrdiff_t array_i) {
return (extent_ > InlineSize ? data_.pointer : data_.inline_data) +
Offsets::GetVectorOffset(extent_, array_i);
}
void InternalResize(ptrdiff_t new_extent) {
assert(new_extent >= 0);
if (extent_ == new_extent) return;
if (new_extent > InlineSize) {
void* new_data = ::operator new(Offsets::GetTotalSize(new_extent));
if (extent_ > InlineSize) ::operator delete(data_.pointer);
data_.pointer = static_cast<char*>(new_data);
} else if (extent_ > InlineSize) {
::operator delete(data_.pointer);
}
extent_ = new_extent;
}
constexpr static ptrdiff_t kAlignment =
InlineSize == 0 ? 1 : Offsets::kAlignment;
constexpr static ptrdiff_t kInlineBytes =
InlineSize == 0 ? 1 : Offsets::GetTotalSize(InlineSize);
union Data {
char* pointer;
alignas(kAlignment) char inline_data[kInlineBytes];
};
Data data_;
ptrdiff_t extent_ = 0;
};
template <ptrdiff_t Extent, ptrdiff_t InlineSize, typename... Ts>
class MultiVectorAccess<MultiVectorStorageImpl<Extent, InlineSize, Ts...>> {
public:
using StorageType = MultiVectorStorageImpl<Extent, InlineSize, Ts...>;
using ExtentType = StaticOrDynamicRank<Extent>;
constexpr static ptrdiff_t static_extent = Extent;
constexpr static size_t num_vectors = sizeof...(Ts);
template <size_t I>
using ElementType = TypePackElement<I, Ts...>;
template <size_t I>
using ConstElementType = const TypePackElement<I, Ts...>;
static ExtentType GetExtent(const StorageType& storage) {
return storage.InternalGetExtent();
}
template <size_t I>
static tensorstore::span<ElementType<I>, Extent> get(StorageType* array) {
return {static_cast<ElementType<I>*>(array->InternalGetDataPointer(I)),
GetExtent(*array)};
}
template <size_t I>
static tensorstore::span<ConstElementType<I>, Extent> get(
const StorageType* array) {
return get<I>(const_cast<StorageType*>(array));
}
template <typename... Us>
static void Assign(StorageType* array, ExtentType extent, Us*... pointers) {
static_assert(sizeof...(Us) == sizeof...(Ts));
array->InternalResize(extent);
size_t vector_i = 0;
(std::copy_n(pointers, extent,
static_cast<Ts*>(array->InternalGetDataPointer(vector_i++))),
...);
}
template <typename... Us, ptrdiff_t... Extents>
static void Assign(StorageType* array,
tensorstore::span<Us, Extents>... spans) {
static_assert(sizeof...(Us) == sizeof...(Ts));
const ExtentType extent =
GetFirstArgument(GetStaticOrDynamicExtent(spans)...);
assert(((spans.size() == extent) && ...));
Assign(array, extent, spans.data()...);
}
static void Resize(StorageType* array, ExtentType new_extent) {
array->InternalResize(new_extent);
}
};
}
}
#endif | #include "tensorstore/internal/multi_vector.h"
#include <cstddef>
#include <type_traits>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/rank.h"
#include "tensorstore/util/span.h"
namespace {
using ::tensorstore::dynamic_rank;
using ::tensorstore::internal::MultiVectorAccess;
using ::tensorstore::internal::MultiVectorStorage;
using ::tensorstore::internal::MultiVectorStorageImpl;
using ::tensorstore::internal_multi_vector::GetAlignedOffset;
using ::tensorstore::internal_multi_vector::PackStorageOffsets;
using ::testing::ElementsAre;
static_assert(
MultiVectorAccess<MultiVectorStorage<3, int, float>>::static_extent == 3);
static_assert(
MultiVectorAccess<MultiVectorStorage<3, int, float>>::num_vectors == 2);
static_assert(GetAlignedOffset(0, 4, 4) == 0);
static_assert(GetAlignedOffset(4, 4, 4) == 4);
static_assert(GetAlignedOffset(4, 4, 8) == 8);
static_assert(GetAlignedOffset(4, 4, 8) == 8);
template <size_t Len, size_t Align>
using Aligned = typename std::aligned_storage<Len, Align>::type;
static_assert(PackStorageOffsets<Aligned<4, 4>>::GetVectorOffset(5, 0) == 0);
static_assert(PackStorageOffsets<Aligned<4, 4>>::GetVectorOffset(5, 1) ==
5 * 4);
static_assert(PackStorageOffsets<Aligned<4, 4>>::GetTotalSize(5) == 5 * 4);
static_assert(PackStorageOffsets<Aligned<4, 4>, Aligned<4, 4>>::GetVectorOffset(
5, 0) == 0);
static_assert(PackStorageOffsets<Aligned<4, 4>, Aligned<4, 4>>::GetVectorOffset(
5, 1) == 5 * 4);
static_assert(PackStorageOffsets<Aligned<4, 4>, Aligned<4, 4>>::GetVectorOffset(
5, 2) == 2 * 5 * 4);
static_assert(PackStorageOffsets<Aligned<4, 4>, Aligned<4, 4>>::GetTotalSize(
5) == 2 * 5 * 4);
static_assert(PackStorageOffsets<Aligned<4, 4>, Aligned<4, 4>,
Aligned<4, 4>>::GetVectorOffset(5, 0) == 0);
static_assert(PackStorageOffsets<Aligned<4, 4>, Aligned<4, 4>,
Aligned<4, 4>>::GetVectorOffset(5, 1) ==
5 * 4);
static_assert(PackStorageOffsets<Aligned<4, 4>, Aligned<4, 4>,
Aligned<4, 4>>::GetVectorOffset(5, 2) ==
2 * 5 * 4);
static_assert(PackStorageOffsets<Aligned<4, 4>, Aligned<4, 4>,
Aligned<4, 4>>::GetVectorOffset(5, 3) ==
3 * 5 * 4);
static_assert(PackStorageOffsets<Aligned<4, 4>, Aligned<8, 8>,
Aligned<4, 4>>::GetTotalSize(5) ==
4 * 6 + 8 * 5 + 4 * 5);
static_assert(PackStorageOffsets<Aligned<8, 8>, Aligned<4, 4>>::GetVectorOffset(
5, 0) == 0);
static_assert(PackStorageOffsets<Aligned<8, 8>, Aligned<4, 4>>::GetVectorOffset(
5, 1) == 8 * 5);
static_assert(PackStorageOffsets<Aligned<8, 8>, Aligned<4, 4>>::GetVectorOffset(
5, 2) == 8 * 5 + 4 * 5);
template <typename StorageType>
class MultiVectorDynamicTest : public ::testing::Test {};
using DynamicStorageTypes =
::testing::Types<MultiVectorStorage<dynamic_rank, int, int>,
MultiVectorStorage<dynamic_rank, int, float>,
MultiVectorStorage<dynamic_rank, int, double>,
MultiVectorStorage<dynamic_rank, double, int>,
MultiVectorStorage<dynamic_rank(2), int, int>,
MultiVectorStorage<dynamic_rank(2), int, float>,
MultiVectorStorage<dynamic_rank(2), int, double>,
MultiVectorStorage<dynamic_rank(2), double, int>,
MultiVectorStorage<dynamic_rank(3), int, int>,
MultiVectorStorage<dynamic_rank(3), int, float>,
MultiVectorStorage<dynamic_rank(3), int, double>,
MultiVectorStorage<dynamic_rank(3), double, int>,
MultiVectorStorage<dynamic_rank(4), int, int>,
MultiVectorStorage<dynamic_rank(4), int, float>,
MultiVectorStorage<dynamic_rank(4), int, double>,
MultiVectorStorage<dynamic_rank(4), double, int>>;
TYPED_TEST_SUITE(MultiVectorDynamicTest, DynamicStorageTypes);
template <typename T>
struct Decompose;
template <ptrdiff_t Extent, ptrdiff_t InlineSize, typename T0, typename T1>
struct Decompose<MultiVectorStorageImpl<Extent, InlineSize, T0, T1>> {
constexpr static ptrdiff_t inline_size = InlineSize;
constexpr static ptrdiff_t extent = Extent;
using Element0 = T0;
using Element1 = T1;
};
TYPED_TEST(MultiVectorDynamicTest, Basic) {
using Container = TypeParam;
using D = Decompose<Container>;
using T0 = typename D::Element0;
using T1 = typename D::Element1;
using Access = MultiVectorAccess<Container>;
static_assert(std::is_same_v<T0, typename Access::template ElementType<0>>);
static_assert(std::is_same_v<T1, typename Access::template ElementType<1>>);
static_assert(
std::is_same_v<const T0, typename Access::template ConstElementType<0>>);
static_assert(
std::is_same_v<const T1, typename Access::template ConstElementType<1>>);
Container vec;
EXPECT_EQ(0, Access::GetExtent(vec));
Access::Resize(&vec, 3);
EXPECT_EQ(3, Access::GetExtent(vec));
const T0 a0[] = {1, 2, 3, 4};
const T1 a1[] = {5, 6, 7, 8};
Access::Assign(&vec, 4, a0, a1);
EXPECT_THAT(Access::template get<0>(&vec), ElementsAre(1, 2, 3, 4));
EXPECT_THAT(Access::template get<1>(&vec), ElementsAre(5, 6, 7, 8));
Access::Assign(&vec, tensorstore::span<const T0>({4, 5, 6}),
tensorstore::span<const T1>({7, 8, 9}));
EXPECT_THAT(Access::template get<0>(&vec), ElementsAre(4, 5, 6));
EXPECT_THAT(Access::template get<1>(&vec), ElementsAre(7, 8, 9));
{
Container vec2 = vec;
EXPECT_THAT(Access::template get<0>(&vec2), ElementsAre(4, 5, 6));
EXPECT_THAT(Access::template get<1>(&vec2), ElementsAre(7, 8, 9));
EXPECT_THAT(Access::template get<0>(&vec), ElementsAre(4, 5, 6));
EXPECT_THAT(Access::template get<1>(&vec), ElementsAre(7, 8, 9));
EXPECT_NE(Access::template get<0>(&vec2).data(),
Access::template get<0>(&vec).data());
{
T0* ptr0 = Access::template get<0>(&vec2).data();
T1* ptr1 = Access::template get<1>(&vec2).data();
Container vec3 = std::move(vec2);
EXPECT_THAT(Access::template get<0>(&vec3), ElementsAre(4, 5, 6));
EXPECT_THAT(Access::template get<1>(&vec3), ElementsAre(7, 8, 9));
EXPECT_EQ(0, Access::GetExtent(vec2));
if (D::inline_size < 3) {
EXPECT_EQ(ptr0, Access::template get<0>(&vec3).data());
EXPECT_EQ(ptr1, Access::template get<1>(&vec3).data());
}
}
}
{
Container vec4;
vec4 = vec;
EXPECT_THAT(Access::template get<0>(&vec), ElementsAre(4, 5, 6));
EXPECT_THAT(Access::template get<1>(&vec), ElementsAre(7, 8, 9));
EXPECT_THAT(Access::template get<0>(&vec4), ElementsAre(4, 5, 6));
EXPECT_THAT(Access::template get<1>(&vec4), ElementsAre(7, 8, 9));
EXPECT_NE(Access::template get<0>(&vec).data(),
Access::template get<0>(&vec4).data());
{
T0* ptr0 = Access::template get<0>(&vec4).data();
T1* ptr1 = Access::template get<1>(&vec4).data();
Container vec5;
vec5 = std::move(vec4);
EXPECT_THAT(Access::template get<0>(&vec5), ElementsAre(4, 5, 6));
EXPECT_THAT(Access::template get<1>(&vec5), ElementsAre(7, 8, 9));
EXPECT_EQ(0, Access::GetExtent(vec4));
if (D::inline_size < 3) {
EXPECT_EQ(ptr0, Access::template get<0>(&vec5).data());
EXPECT_EQ(ptr1, Access::template get<1>(&vec5).data());
}
}
}
}
template <typename StorageType>
class MultiVectorStaticTest : public ::testing::Test {};
using StaticStorageTypes = ::testing::Types<
MultiVectorStorage<2, int, int>, MultiVectorStorage<2, int, float>,
MultiVectorStorage<2, int, double>, MultiVectorStorage<2, double, int>>;
TYPED_TEST_SUITE(MultiVectorStaticTest, StaticStorageTypes);
TYPED_TEST(MultiVectorStaticTest, Basic) {
using Container = TypeParam;
using Access = MultiVectorAccess<Container>;
using D = Decompose<Container>;
using T0 = typename D::Element0;
using T1 = typename D::Element1;
static_assert(std::is_same_v<T0, typename Access::template ElementType<0>>);
static_assert(std::is_same_v<T1, typename Access::template ElementType<1>>);
static_assert(
std::is_same_v<const T0, typename Access::template ConstElementType<0>>);
static_assert(
std::is_same_v<const T1, typename Access::template ConstElementType<1>>);
Container vec;
static_assert(std::is_same_v<std::integral_constant<ptrdiff_t, 2>,
decltype(Access::GetExtent(vec))>);
EXPECT_EQ(2, Access::GetExtent(vec));
Access::Resize(&vec, std::integral_constant<ptrdiff_t, 2>());
const T0 a0[] = {1, 2};
const T1 a1[] = {5, 6};
Access::Assign(&vec, std::integral_constant<ptrdiff_t, 2>(), a0, a1);
EXPECT_THAT(Access::template get<0>(&vec), ElementsAre(1, 2));
EXPECT_THAT(Access::template get<1>(&vec), ElementsAre(5, 6));
Access::Assign(&vec, tensorstore::span<const T0, 2>({4, 5}),
tensorstore::span<const T1, 2>({7, 8}));
EXPECT_THAT(Access::template get<0>(&vec), ElementsAre(4, 5));
EXPECT_THAT(Access::template get<1>(&vec), ElementsAre(7, 8));
{
Container vec2 = vec;
EXPECT_THAT(Access::template get<0>(&vec2), ElementsAre(4, 5));
EXPECT_THAT(Access::template get<1>(&vec2), ElementsAre(7, 8));
EXPECT_THAT(Access::template get<0>(&vec), ElementsAre(4, 5));
EXPECT_THAT(Access::template get<1>(&vec), ElementsAre(7, 8));
EXPECT_NE(Access::template get<0>(&vec2).data(),
Access::template get<0>(&vec).data());
{
Container vec3 = std::move(vec2);
EXPECT_THAT(Access::template get<0>(&vec3), ElementsAre(4, 5));
EXPECT_THAT(Access::template get<1>(&vec3), ElementsAre(7, 8));
}
}
{
Container vec4;
vec4 = vec;
EXPECT_THAT(Access::template get<0>(&vec), ElementsAre(4, 5));
EXPECT_THAT(Access::template get<1>(&vec), ElementsAre(7, 8));
EXPECT_THAT(Access::template get<0>(&vec4), ElementsAre(4, 5));
EXPECT_THAT(Access::template get<1>(&vec4), ElementsAre(7, 8));
{
Container vec5;
vec5 = std::move(vec4);
EXPECT_THAT(Access::template get<0>(&vec5), ElementsAre(4, 5));
EXPECT_THAT(Access::template get<1>(&vec5), ElementsAre(7, 8));
}
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/multi_vector.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/multi_vector_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
a9f555b1-f12d-4de3-a59e-cd6f28002d77 | cpp | google/tensorstore | tagged_ptr | tensorstore/internal/tagged_ptr.h | tensorstore/internal/tagged_ptr_test.cc | #ifndef TENSORSTORE_INTERNAL_TAGGED_PTR_H_
#define TENSORSTORE_INTERNAL_TAGGED_PTR_H_
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <type_traits>
#include <utility>
namespace tensorstore {
namespace internal {
template <typename T, int TagBits>
class TaggedPtr {
constexpr static std::uintptr_t kTagMask =
(static_cast<std::uintptr_t>(1) << TagBits) - 1;
constexpr static std::uintptr_t kPointerMask = ~kTagMask;
public:
using element_type = T;
template <typename U>
using rebind = TaggedPtr<U, TagBits>;
constexpr TaggedPtr() noexcept : value_(0) {}
constexpr TaggedPtr(std::nullptr_t) noexcept : value_(0) {}
constexpr TaggedPtr(std::nullptr_t, std::uintptr_t tag) noexcept
: value_(tag) {
assert((tag & kPointerMask) == 0);
}
template <typename U,
std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
TaggedPtr(U* ptr, std::uintptr_t tag = 0) noexcept {
assert((reinterpret_cast<std::uintptr_t>(static_cast<T*>(ptr)) &
kTagMask) == 0 &&
(tag & kPointerMask) == 0);
value_ = reinterpret_cast<std::uintptr_t>(static_cast<T*>(ptr)) | tag;
}
template <typename U,
std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
TaggedPtr(TaggedPtr<U, TagBits> other) noexcept
: TaggedPtr(other.get(), other.tag()) {}
TaggedPtr& operator=(std::nullptr_t) noexcept {
value_ = 0;
return *this;
}
template <typename U>
std::enable_if_t<std::is_convertible_v<U*, T*>, TaggedPtr&> operator=(
U* ptr) noexcept {
*this = TaggedPtr(ptr);
return *this;
}
explicit operator bool() const noexcept {
return static_cast<bool>(reinterpret_cast<T*>(value_ & kPointerMask));
}
T* get() const noexcept {
static_assert(alignof(T) >= (1 << TagBits),
"Number of TagBits is incompatible with alignment of T.");
return reinterpret_cast<T*>(value_ & kPointerMask);
}
operator T*() const noexcept { return get(); }
std::uintptr_t tag() const noexcept { return value_ & kTagMask; }
template <int Bit>
std::enable_if_t<(Bit >= 0 && Bit < TagBits), bool> tag() const noexcept {
return static_cast<bool>((value_ >> Bit) & 1);
}
template <int Bit>
std::enable_if_t<(Bit >= 0 && Bit < TagBits), void> set_tag(
bool value) noexcept {
constexpr std::uintptr_t mask = (static_cast<std::uintptr_t>(1) << Bit);
value_ = (value_ & ~mask) | (static_cast<std::uintptr_t>(value) << Bit);
}
void set_tag(std::uintptr_t tag) noexcept {
assert((tag & kPointerMask) == 0);
value_ = (value_ & kPointerMask) | tag;
}
T* operator->() const noexcept {
T* ptr = get();
assert(ptr != nullptr);
return ptr;
}
T& operator*() const noexcept {
T* ptr = get();
assert(ptr != nullptr);
return *ptr;
}
friend bool operator==(TaggedPtr x, TaggedPtr y) {
return x.get() == y.get() && x.tag() == y.tag();
}
friend bool operator!=(TaggedPtr x, TaggedPtr y) { return !(x == y); }
template <typename H>
friend H AbslHashValue(H h, TaggedPtr x) {
return H::combine(std::move(h), x.value_);
}
private:
std::uintptr_t value_;
};
template <typename T, int TagBits>
inline T* to_address(TaggedPtr<T, TagBits> p) {
return p.get();
}
template <typename T, typename U, int TagBits>
TaggedPtr<T, TagBits> static_pointer_cast(TaggedPtr<U, TagBits> p) {
return TaggedPtr<T, TagBits>(static_cast<T*>(p.get()), p.tag());
}
template <typename T, typename U, int TagBits>
TaggedPtr<T, TagBits> const_pointer_cast(TaggedPtr<U, TagBits> p) {
return TaggedPtr<T, TagBits>(const_cast<T*>(p.get()), p.tag());
}
template <typename T, typename U, int TagBits>
TaggedPtr<T, TagBits> dynamic_pointer_cast(TaggedPtr<U, TagBits> p) {
return TaggedPtr<T, TagBits>(dynamic_cast<T*>(p.get()), p.tag());
}
}
}
#endif | #include "tensorstore/internal/tagged_ptr.h"
#include <memory>
#include <type_traits>
#include <gtest/gtest.h>
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/memory.h"
namespace {
using ::tensorstore::internal::const_pointer_cast;
using ::tensorstore::internal::dynamic_pointer_cast;
using ::tensorstore::internal::IntrusivePtr;
using ::tensorstore::internal::static_pointer_cast;
using ::tensorstore::internal::TaggedPtr;
struct alignas(8) X {
virtual ~X() = default;
};
struct Y : public X {
virtual ~Y() = default;
};
static_assert(!std::is_convertible_v<TaggedPtr<Y, 1>, TaggedPtr<Y, 2>>);
static_assert(std::is_convertible_v<TaggedPtr<Y, 2>, TaggedPtr<X, 2>>);
static_assert(!std::is_convertible_v<TaggedPtr<Y, 1>, TaggedPtr<X, 2>>);
static_assert(std::is_convertible_v<Y*, TaggedPtr<X, 2>>);
static_assert(!std::is_convertible_v<TaggedPtr<Y, 2>, TaggedPtr<Y, 1>>);
static_assert(!std::is_convertible_v<TaggedPtr<X, 2>, TaggedPtr<Y, 2>>);
static_assert(!std::is_convertible_v<TaggedPtr<X, 2>, TaggedPtr<Y, 1>>);
static_assert(!std::is_convertible_v<X*, TaggedPtr<Y, 2>>);
static_assert(std::is_assignable_v<TaggedPtr<X, 2>, TaggedPtr<Y, 2>>);
static_assert(!std::is_assignable_v<TaggedPtr<X, 2>, TaggedPtr<X, 1>>);
static_assert(!std::is_assignable_v<TaggedPtr<X, 2>, TaggedPtr<Y, 1>>);
static_assert(!std::is_assignable_v<TaggedPtr<Y, 2>, TaggedPtr<Y, 3>>);
static_assert(!std::is_assignable_v<TaggedPtr<Y, 2>, TaggedPtr<X, 2>>);
static_assert(!std::is_assignable_v<TaggedPtr<Y, 2>, TaggedPtr<X, 3>>);
TEST(TaggedPtr, DefaultConstruct) {
TaggedPtr<X, 3> p;
EXPECT_EQ(nullptr, p.get());
EXPECT_EQ(0u, p.tag());
}
TEST(TaggedPtr, Construct) {
X x;
TaggedPtr<X, 3> p(&x, 5);
EXPECT_EQ(&x, p.get());
EXPECT_EQ(5u, p.tag());
}
TEST(TaggedPtr, ConstructNullptr) {
TaggedPtr<X, 3> p(nullptr, 5);
EXPECT_EQ(nullptr, p.get());
EXPECT_EQ(5u, p.tag());
}
TEST(TaggedPtr, CopyConstruct) {
X x;
TaggedPtr<X, 3> p(&x, 5);
TaggedPtr<X, 3> p2(p);
EXPECT_EQ(&x, p2.get());
EXPECT_EQ(&x, p.get());
EXPECT_EQ(5u, p.tag());
EXPECT_EQ(5u, p2.tag());
}
TEST(TaggedPtr, CopyAssignTaggedPtr) {
X x;
TaggedPtr<X, 3> p(&x, 5);
TaggedPtr<X, 3> p2;
p2 = p;
EXPECT_EQ(&x, p2.get());
EXPECT_EQ(&x, p.get());
EXPECT_EQ(5u, p2.tag());
EXPECT_EQ(5u, p.tag());
}
TEST(TaggedPtr, CopyAssignPointer) {
X x;
TaggedPtr<X, 3> p(nullptr, 5);
p = &x;
EXPECT_EQ(&x, p.get());
EXPECT_EQ(0u, p.tag());
}
TEST(TaggedPtr, CopyAssignNullptr) {
X x;
TaggedPtr<X, 3> p(&x, 5);
p = nullptr;
EXPECT_EQ(nullptr, p.get());
EXPECT_EQ(0u, p.tag());
}
TEST(TaggedPtr, GetAndSetTag) {
X x;
TaggedPtr<X, 3> p(&x, 3);
EXPECT_EQ(3u, p.tag());
p.set_tag(4);
EXPECT_EQ(4u, p.tag());
EXPECT_TRUE(p.tag<2>());
EXPECT_FALSE(p.tag<0>());
EXPECT_FALSE(p.tag<1>());
p.set_tag<0>(true);
EXPECT_EQ(5u, p.tag());
p.set_tag<2>(false);
EXPECT_EQ(1u, p.tag());
}
TEST(TaggedPtr, TagComparison) {
X x;
X x2;
TaggedPtr<X, 2> p(&x, 3);
TaggedPtr<X, 2> p2(&x, 1);
TaggedPtr<X, 2> p3(&x2, 3);
EXPECT_EQ(p, p);
EXPECT_NE(p, p2);
EXPECT_NE(p, p3);
}
TEST(TaggedPtr, StaticPointerCast) {
Y y;
TaggedPtr<X, 3> p(&y, 5);
TaggedPtr<Y, 3> p2 = static_pointer_cast<Y>(p);
EXPECT_EQ(&y, p2.get());
EXPECT_EQ(5u, p2.tag());
}
TEST(TaggedPtr, ConstPointerCast) {
X x;
TaggedPtr<const X, 3> p(&x, 5);
TaggedPtr<X, 3> p2 = const_pointer_cast<X>(p);
EXPECT_EQ(&x, p2.get());
EXPECT_EQ(5u, p2.tag());
}
TEST(TaggedPtr, DynamicPointerCastSuccess) {
Y y;
TaggedPtr<X, 3> p(&y, 5);
TaggedPtr<Y, 3> p2 = dynamic_pointer_cast<Y>(p);
EXPECT_EQ(&y, p2.get());
EXPECT_EQ(5u, p2.tag());
}
TEST(TaggedPtr, DynamicPointerCastFailure) {
X x;
TaggedPtr<X, 3> p(&x, 5);
TaggedPtr<Y, 3> p2 = dynamic_pointer_cast<Y>(p);
EXPECT_EQ(nullptr, p2.get());
EXPECT_EQ(5u, p2.tag());
}
struct alignas(8) X2 : public tensorstore::internal::AtomicReferenceCount<X2> {
int value;
virtual ~X2() = default;
};
struct Y2 : public X2 {
virtual ~Y2() = default;
};
template <int TagBits>
struct TaggedIntrusivePtrTraits
: public tensorstore::internal::DefaultIntrusivePtrTraits {
template <typename U>
using pointer = TaggedPtr<U, TagBits>;
};
template <typename T, int TagBits>
using TaggedIntrusivePtr = IntrusivePtr<T, TaggedIntrusivePtrTraits<TagBits>>;
TEST(IntrusivePtrTest, Basic) {
Y2* x = new Y2;
TaggedIntrusivePtr<Y2, 3> p(x);
EXPECT_EQ(1u, p->use_count());
EXPECT_EQ(x, p.get().get());
EXPECT_EQ(0u, p.get().tag());
TaggedIntrusivePtr<Y2, 3> p2({x, 5});
EXPECT_EQ(2u, p2->use_count());
EXPECT_EQ(x, p2.get().get());
EXPECT_EQ(5u, p2.get().tag());
TaggedIntrusivePtr<const X2, 3> p3 = p2;
EXPECT_EQ(3u, p3->use_count());
EXPECT_EQ(x, p3.get().get());
EXPECT_EQ(5u, p3.get().tag());
auto p4 = static_pointer_cast<const Y2>(p3);
static_assert(std::is_same_v<TaggedIntrusivePtr<const Y2, 3>, decltype(p4)>);
EXPECT_EQ(4u, p4->use_count());
EXPECT_EQ(x, p4.get().get());
EXPECT_EQ(5u, p4.get().tag());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/tagged_ptr.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/tagged_ptr_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
3d035e9e-877d-400c-a39b-c8334bb20cfd | cpp | google/tensorstore | void_wrapper | tensorstore/internal/void_wrapper.h | tensorstore/internal/void_wrapper_test.cc | #ifndef TENSORSTORE_INTERNAL_VOID_WRAPPER_H_
#define TENSORSTORE_INTERNAL_VOID_WRAPPER_H_
#include <type_traits>
#include <utility>
#include "absl/status/status.h"
namespace tensorstore {
namespace internal {
struct Void {
explicit operator bool() const { return true; }
static void Unwrap(Void) {}
template <typename T>
static T Unwrap(T x) {
return x;
}
template <typename T>
using WrappedType = std::conditional_t<std::is_void_v<T>, Void, T>;
template <typename T>
using UnwrappedType = std::conditional_t<std::is_same_v<T, Void>, void, T>;
template <typename Func, typename... Args>
static std::enable_if_t<std::is_void_v<std::invoke_result_t<Func, Args...>>,
Void>
CallAndWrap(Func&& func, Args&&... args) {
std::forward<Func>(func)(std::forward<Args>(args)...);
return {};
}
template <typename Func, typename... Args>
static std::enable_if_t<!std::is_void_v<std::invoke_result_t<Func, Args...>>,
std::invoke_result_t<Func, Args...>>
CallAndWrap(Func&& func, Args&&... args) {
return std::forward<Func>(func)(std::forward<Args>(args)...);
}
};
}
}
#endif | #include "tensorstore/internal/void_wrapper.h"
#include <type_traits>
#include <gtest/gtest.h>
#include "tensorstore/internal/type_traits.h"
namespace {
using ::tensorstore::internal::Void;
static_assert(std::is_same_v<Void, Void::WrappedType<void>>);
static_assert(std::is_same_v<int, Void::WrappedType<int>>);
static_assert(std::is_same_v<void, Void::UnwrappedType<Void>>);
static_assert(std::is_same_v<int, Void::UnwrappedType<int>>);
TEST(VoidWrapperTest, BoolConversion) {
EXPECT_EQ(true, static_cast<bool>(Void{}));
}
TEST(VoidWrapperTest, Unwrap) {
EXPECT_EQ(3, Void::Unwrap(3));
Void::Unwrap(Void{});
}
TEST(VoidWrapperTest, CallAndWrap) {
int value;
const auto void_func = [&](int arg) -> void {
value = arg;
return;
};
const auto int_func = [&](int arg) {
value = arg;
return 3;
};
auto result = Void::CallAndWrap(void_func, 4);
static_assert(std::is_same_v<decltype(result), Void>);
EXPECT_EQ(4, value);
EXPECT_EQ(3, Void::CallAndWrap(int_func, 5));
EXPECT_EQ(5, value);
}
template <typename Func, typename... Args,
typename ResultType = std::invoke_result_t<Func, Args...>>
ResultType Repeat(int n, Func func, Args... args) {
Void::WrappedType<ResultType> result = {};
for (int i = 0; i < n; ++i) {
result = Void::CallAndWrap(func, args...);
if (!result) break;
}
return Void::Unwrap(result);
}
TEST(RepeatTest, VoidReturn) {
int num = 0;
Repeat(
3, [&](int k) { num += k; }, 2);
EXPECT_EQ(6, num);
}
TEST(RepeatTest, NonVoidReturn) {
{
int num = 0;
EXPECT_EQ(true, Repeat(
3,
[&](int k) {
num += k;
return true;
},
2));
EXPECT_EQ(6, num);
}
{
int num = 0;
EXPECT_EQ(false, Repeat(
3,
[&](int k) {
num += k;
return num < 4;
},
2));
EXPECT_EQ(4, num);
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/void_wrapper.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/void_wrapper_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
22370324-516e-4f49-8766-2c999df7dcb1 | cpp | google/tensorstore | string_like | tensorstore/internal/string_like.h | tensorstore/internal/string_like_test.cc | #ifndef TENSORSTORE_INTERNAL_STRING_LIKE_H_
#define TENSORSTORE_INTERNAL_STRING_LIKE_H_
#include <cassert>
#include <cstddef>
#include <string>
#include <string_view>
#include "absl/base/optimization.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal {
template <typename T>
constexpr inline bool IsStringLike = false;
template <>
constexpr inline bool IsStringLike<std::string_view> = true;
template <>
constexpr inline bool IsStringLike<std::string> = true;
template <>
constexpr inline bool IsStringLike<const char*> = true;
class StringLikeSpan {
public:
StringLikeSpan() = default;
StringLikeSpan(tensorstore::span<const char* const> c_strings)
: c_strings_(c_strings.data()), size_and_tag_(c_strings.size() << 2) {}
StringLikeSpan(tensorstore::span<const std::string> strings)
: strings_(strings.data()), size_and_tag_((strings.size() << 2) | 1) {}
StringLikeSpan(tensorstore::span<const std::string_view> string_views)
: string_views_(string_views.data()),
size_and_tag_((string_views.size() << 2) | 2) {}
std::string_view operator[](ptrdiff_t i) const {
assert(i >= 0 && i < size());
switch (size_and_tag_ & 3) {
case 0:
return c_strings_[i];
case 1:
return strings_[i];
case 2:
return string_views_[i];
default:
ABSL_UNREACHABLE();
}
}
ptrdiff_t size() const { return size_and_tag_ >> 2; }
private:
union {
const char* const* c_strings_;
const std::string* strings_;
const std::string_view* string_views_;
};
ptrdiff_t size_and_tag_ = 0;
};
}
}
#endif | #include "tensorstore/internal/string_like.h"
#include <string>
#include <string_view>
#include <vector>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal::StringLikeSpan;
TEST(StringLikeSpan, Default) {
StringLikeSpan x;
EXPECT_EQ(0, x.size());
}
TEST(StringLikeSpan, CStrings) {
std::vector<const char*> c_strings{"a", "b", "c"};
StringLikeSpan x(c_strings);
EXPECT_EQ(3, x.size());
EXPECT_EQ("a", x[0]);
EXPECT_EQ("b", x[1]);
EXPECT_EQ("c", x[2]);
}
TEST(StringLikeSpan, StdStrings) {
std::vector<std::string> std_strings{"a", "b", "c"};
StringLikeSpan x(std_strings);
EXPECT_EQ(3, x.size());
EXPECT_EQ("a", x[0]);
EXPECT_EQ("b", x[1]);
EXPECT_EQ("c", x[2]);
}
TEST(StringLikeSpan, StringViews) {
std::vector<std::string_view> string_views{"a", "b", "c"};
StringLikeSpan x(string_views);
EXPECT_EQ(3, x.size());
EXPECT_EQ("a", x[0]);
EXPECT_EQ("b", x[1]);
EXPECT_EQ("c", x[2]);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/string_like.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/string_like_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
2707fc17-810b-4e82-b824-5dc398f336ce | cpp | google/tensorstore | multi_vector_view | tensorstore/internal/multi_vector_view.h | tensorstore/internal/multi_vector_view_test.cc | #ifndef TENSORSTORE_INTERNAL_MULTI_VECTOR_VIEW_H_
#define TENSORSTORE_INTERNAL_MULTI_VECTOR_VIEW_H_
#include <cassert>
#include <cstddef>
#include "tensorstore/index.h"
#include "tensorstore/internal/gdb_scripting.h"
#include "tensorstore/internal/meta.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/span.h"
TENSORSTORE_GDB_AUTO_SCRIPT("multi_vector_gdb.py")
namespace tensorstore {
namespace internal {
template <DimensionIndex Extent, typename... Ts>
class MultiVectorViewStorage;
template <typename StorageT>
class MultiVectorAccess;
template <ptrdiff_t Extent, typename... Ts>
class MultiVectorViewStorage {
private:
friend class MultiVectorAccess<MultiVectorViewStorage>;
constexpr static StaticRank<Extent> InternalGetExtent() { return {}; }
void InternalSetExtent(StaticRank<Extent>) {}
void* InternalGetDataPointer(size_t i) const {
return const_cast<void*>(data_[i]);
}
void InternalSetDataPointer(size_t i, const void* ptr) { data_[i] = ptr; }
const void* data_[sizeof...(Ts)]{};
};
template <typename... Ts>
class MultiVectorViewStorage<0, Ts...> {
private:
friend class MultiVectorAccess<MultiVectorViewStorage>;
constexpr static StaticRank<0> InternalGetExtent() { return {}; }
void InternalSetExtent(StaticRank<0>) {}
void* InternalGetDataPointer(size_t i) const { return nullptr; }
void InternalSetDataPointer(size_t i, const void* ptr) {}
};
template <typename... Ts>
class MultiVectorViewStorage<dynamic_rank, Ts...> {
private:
friend class MultiVectorAccess<MultiVectorViewStorage>;
ptrdiff_t InternalGetExtent() const { return extent_; }
void InternalSetExtent(ptrdiff_t extent) { extent_ = extent; }
void* InternalGetDataPointer(size_t i) const {
return const_cast<void*>(data_[i]);
}
void InternalSetDataPointer(size_t i, const void* ptr) { data_[i] = ptr; }
const void* data_[sizeof...(Ts)]{};
ptrdiff_t extent_ = 0;
};
template <DimensionIndex Extent, typename... Ts>
class MultiVectorAccess<MultiVectorViewStorage<Extent, Ts...>> {
public:
using StorageType = MultiVectorViewStorage<Extent, Ts...>;
using ExtentType = StaticOrDynamicRank<Extent>;
constexpr static ptrdiff_t static_extent = Extent;
constexpr static size_t num_vectors = sizeof...(Ts);
template <size_t I>
using ElementType = TypePackElement<I, Ts...>;
template <size_t I>
using ConstElementType = TypePackElement<I, Ts...>;
static ExtentType GetExtent(const StorageType& storage) {
return storage.InternalGetExtent();
}
template <size_t I>
static tensorstore::span<ElementType<I>, Extent> get(
const StorageType* array) noexcept {
return {static_cast<ElementType<I>*>(array->InternalGetDataPointer(I)),
array->InternalGetExtent()};
}
static void Assign(StorageType* array, ExtentType extent, Ts*... pointers) {
array->InternalSetExtent(extent);
size_t i = 0;
(array->InternalSetDataPointer(i++, pointers), ...);
}
static void Assign(StorageType* array,
tensorstore::span<Ts, Extent>... spans) {
const ExtentType extent =
GetFirstArgument(GetStaticOrDynamicExtent(spans)...);
assert(((spans.size() == extent) && ...));
Assign(array, extent, spans.data()...);
}
};
}
}
#endif | #include "tensorstore/internal/multi_vector_view.h"
#include <cstddef>
#include <type_traits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/rank.h"
#include "tensorstore/util/span.h"
namespace {
using ::tensorstore::dynamic_rank;
using ::tensorstore::internal::MultiVectorAccess;
using ::tensorstore::internal::MultiVectorViewStorage;
using ::testing::ElementsAre;
static_assert(
MultiVectorAccess<MultiVectorViewStorage<3, int, float>>::static_extent ==
3);
static_assert(
MultiVectorAccess<MultiVectorViewStorage<3, int, float>>::num_vectors == 2);
TEST(MultiVectorViewStorageTest, StaticExtent2) {
using Container = MultiVectorViewStorage<2, float, int>;
using Access = MultiVectorAccess<Container>;
static_assert(
std::is_same_v<float, typename Access::template ElementType<0>>);
static_assert(std::is_same_v<int, typename Access::template ElementType<1>>);
static_assert(
std::is_same_v<float, typename Access::template ConstElementType<0>>);
static_assert(
std::is_same_v<int, typename Access::template ConstElementType<1>>);
Container vec;
static_assert(std::is_same_v<std::integral_constant<ptrdiff_t, 2>,
decltype(Access::GetExtent(vec))>);
EXPECT_EQ(2, Access::GetExtent(vec));
EXPECT_EQ(nullptr, Access::template get<0>(&vec).data());
EXPECT_EQ(nullptr, Access::template get<1>(&vec).data());
float float_arr[] = {1, 2};
int int_arr[] = {3, 4};
Access::Assign(&vec, std::integral_constant<ptrdiff_t, 2>(), float_arr,
int_arr);
EXPECT_THAT(Access::template get<0>(&vec), ElementsAre(1, 2));
EXPECT_THAT(Access::template get<1>(&vec), ElementsAre(3, 4));
EXPECT_EQ(&float_arr[0], Access::template get<0>(&vec).data());
EXPECT_EQ(&int_arr[0], Access::template get<1>(&vec).data());
float float_arr2[] = {5, 6};
int int_arr2[] = {7, 8};
Access::Assign(&vec, tensorstore::span(float_arr2),
tensorstore::span(int_arr2));
EXPECT_EQ(&float_arr2[0], Access::template get<0>(&vec).data());
EXPECT_EQ(&int_arr2[0], Access::template get<1>(&vec).data());
}
TEST(MultiVectorViewStorageTest, StaticExtent0) {
using Container = MultiVectorViewStorage<0, float, int>;
using Access = MultiVectorAccess<Container>;
static_assert(
std::is_same_v<float, typename Access::template ElementType<0>>);
static_assert(std::is_same_v<int, typename Access::template ElementType<1>>);
static_assert(
std::is_same_v<float, typename Access::template ConstElementType<0>>);
static_assert(
std::is_same_v<int, typename Access::template ConstElementType<1>>);
Container vec;
static_assert(std::is_same_v<std::integral_constant<ptrdiff_t, 0>,
decltype(Access::GetExtent(vec))>);
EXPECT_EQ(0, Access::GetExtent(vec));
EXPECT_EQ(nullptr, Access::template get<0>(&vec).data());
EXPECT_EQ(nullptr, Access::template get<1>(&vec).data());
Access::Assign(&vec, std::integral_constant<ptrdiff_t, 0>(), nullptr,
nullptr);
EXPECT_EQ(nullptr, Access::template get<0>(&vec).data());
EXPECT_EQ(nullptr, Access::template get<1>(&vec).data());
Access::Assign(&vec, tensorstore::span<float, 0>{},
tensorstore::span<int, 0>{});
}
TEST(MultiVectorViewStorageTest, DynamicExtent) {
using Container = MultiVectorViewStorage<dynamic_rank, float, int>;
using Access = MultiVectorAccess<Container>;
static_assert(
std::is_same_v<float, typename Access::template ElementType<0>>);
static_assert(std::is_same_v<int, typename Access::template ElementType<1>>);
static_assert(
std::is_same_v<float, typename Access::template ConstElementType<0>>);
static_assert(
std::is_same_v<int, typename Access::template ConstElementType<1>>);
Container vec;
static_assert(std::is_same_v<ptrdiff_t, decltype(Access::GetExtent(vec))>);
EXPECT_EQ(0, Access::GetExtent(vec));
EXPECT_EQ(nullptr, Access::template get<0>(&vec).data());
EXPECT_EQ(nullptr, Access::template get<1>(&vec).data());
float float_arr[] = {1, 2};
int int_arr[] = {3, 4};
Access::Assign(&vec, std::integral_constant<ptrdiff_t, 2>(), float_arr,
int_arr);
EXPECT_EQ(2, Access::GetExtent(vec));
EXPECT_THAT(Access::template get<0>(&vec), ElementsAre(1, 2));
EXPECT_THAT(Access::template get<1>(&vec), ElementsAre(3, 4));
EXPECT_EQ(&float_arr[0], Access::template get<0>(&vec).data());
EXPECT_EQ(&int_arr[0], Access::template get<1>(&vec).data());
float float_arr2[] = {5, 6, 7};
int int_arr2[] = {7, 8, 9};
Access::Assign(&vec, tensorstore::span<float>(float_arr2),
tensorstore::span<int>(int_arr2));
EXPECT_EQ(3, Access::GetExtent(vec));
EXPECT_EQ(&float_arr2[0], Access::template get<0>(&vec).data());
EXPECT_EQ(&int_arr2[0], Access::template get<1>(&vec).data());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/multi_vector_view.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/multi_vector_view_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
df23d9ae-e864-4c4b-8284-7fedafb1249f | cpp | google/tensorstore | integer_overflow | tensorstore/internal/integer_overflow.h | tensorstore/internal/integer_overflow_test.cc | #ifndef TENSORSTORE_INTERNAL_INTEGER_OVERFLOW_H_
#define TENSORSTORE_INTERNAL_INTEGER_OVERFLOW_H_
#include <limits>
#include <type_traits>
#include "absl/base/attributes.h"
namespace tensorstore {
namespace internal {
namespace wrap_on_overflow {
#if ABSL_HAVE_ATTRIBUTE(no_sanitize) && defined(__clang__)
#define TENSORSTORE_ATTRIBUTE_NO_SANITIZE_UNSIGNED_INTEGER_OVERFLOW \
__attribute__((no_sanitize("unsigned-integer-overflow")))
#else
#define TENSORSTORE_ATTRIBUTE_NO_SANITIZE_UNSIGNED_INTEGER_OVERFLOW
#endif
#define TENSORSTORE_INTERNAL_DEFINE_WRAP_ON_OVERFLOW_OP(OP, NAME) \
template <typename T> \
TENSORSTORE_ATTRIBUTE_NO_SANITIZE_UNSIGNED_INTEGER_OVERFLOW \
std::enable_if_t<std::is_integral<T>::value, T> \
NAME(T a, T b) { \
using UnsignedT = std::make_unsigned_t<T>; \
return static_cast<T>(static_cast<UnsignedT>( \
static_cast<UnsignedT>(a) OP static_cast<UnsignedT>(b))); \
} \
TENSORSTORE_INTERNAL_DEFINE_WRAP_ON_OVERFLOW_OP(+, Add)
TENSORSTORE_INTERNAL_DEFINE_WRAP_ON_OVERFLOW_OP(-, Subtract)
TENSORSTORE_INTERNAL_DEFINE_WRAP_ON_OVERFLOW_OP(*, Multiply)
#undef TENSORSTORE_INTERNAL_DEFINE_WRAP_ON_OVERFLOW_OP
template <typename AccumType, typename T0, typename T1>
inline AccumType InnerProduct(std::ptrdiff_t n, const T0* a, const T1* b) {
AccumType sum = 0;
for (std::ptrdiff_t i = 0; i < n; ++i) {
sum = Add(sum, Multiply(static_cast<AccumType>(a[i]),
static_cast<AccumType>(b[i])));
}
return sum;
}
template <ptrdiff_t N, typename AccumType, typename T0, typename T1>
inline AccumType InnerProduct(const T0* a, const T1* b) {
AccumType sum = 0;
for (std::ptrdiff_t i = 0; i < N; ++i) {
sum = Add(sum, Multiply(static_cast<AccumType>(a[i]),
static_cast<AccumType>(b[i])));
}
return sum;
}
}
template <typename T>
constexpr bool AddOverflow(T a, T b, T* result) {
#if defined(__clang__) || !defined(_MSC_VER)
return __builtin_add_overflow(a, b, result);
#else
*result = wrap_on_overflow::Add(a, b);
return (a > 0 && (b > std::numeric_limits<T>::max() - a)) ||
(a < 0 && (b < std::numeric_limits<T>::min() - a));
#endif
}
template <typename T>
constexpr T AddSaturate(T a, T b) {
T result;
if (AddOverflow(a, b, &result)) {
result = (b >= 0 ? std::numeric_limits<T>::max()
: std::numeric_limits<T>::min());
}
return result;
}
template <typename T>
constexpr bool SubOverflow(T a, T b, T* result) {
#if defined(__clang__) || !defined(_MSC_VER)
return __builtin_sub_overflow(a, b, result);
#else
*result = wrap_on_overflow::Subtract(a, b);
return (b < 0 && (a > std::numeric_limits<T>::max() + b)) ||
(b > 0 && (a < std::numeric_limits<T>::min() + b));
#endif
}
template <typename T>
constexpr bool MulOverflow(T a, T b, T* result) {
#if defined(__clang__) || !defined(_MSC_VER)
return __builtin_mul_overflow(a, b, result);
#else
const T r = *result = wrap_on_overflow::Multiply(a, b);
return b && (r / b) != a;
#endif
}
}
}
#endif | #include "tensorstore/internal/integer_overflow.h"
#include <cstdint>
#include <gtest/gtest.h>
#include "tensorstore/index.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::internal::AddOverflow;
using ::tensorstore::internal::AddSaturate;
using ::tensorstore::internal::MulOverflow;
using ::tensorstore::internal::SubOverflow;
using ::tensorstore::internal::wrap_on_overflow::Add;
using ::tensorstore::internal::wrap_on_overflow::InnerProduct;
using ::tensorstore::internal::wrap_on_overflow::Multiply;
TEST(AddTest, Overflow) {
EXPECT_EQ(std::int32_t{-0x80000000LL},
Add(std::int32_t{0x40000000}, std::int32_t{0x40000000}));
}
TEST(MultiplyTest, Overflow) {
EXPECT_EQ(std::int32_t{-0x80000000LL},
Multiply(std::int32_t{0x40000000}, std::int32_t{2}));
}
TEST(InnerProductTest, Basic) {
const Index a[] = {1, 2, 3};
const Index b[] = {4, 5, 6};
EXPECT_EQ(1 * 4 + 2 * 5 + 3 * 6, InnerProduct<Index>(3, a, b));
}
TEST(InnerProductTest, Convert) {
const uint32_t a[] = {0x80000000};
const uint32_t b[] = {2};
EXPECT_EQ(Index{0x100000000}, InnerProduct<Index>(1, a, b));
}
TEST(InnerProductTest, WrapOnOverflowMultiply) {
const Index a[] = {Index(1) << 62, 2, 3};
const Index b[] = {4, 5, 6};
EXPECT_EQ(Index{2 * 5 + 3 * 6}, InnerProduct<Index>(3, a, b));
}
TEST(InnerProductTest, WrapOnOverflowAdd) {
const Index a[] = {Index(1) << 62, Index(1) << 62};
const Index b[] = {2, 2};
EXPECT_EQ(Index{0}, InnerProduct<Index>(2, a, b));
}
TEST(MulOverflow, Uint32) {
uint32_t a, b, c;
a = 0x7fffffff;
b = 2;
EXPECT_EQ(false, MulOverflow(a, b, &c));
EXPECT_EQ(uint32_t{0xfffffffe}, c);
EXPECT_EQ(false, MulOverflow(b, a, &c));
EXPECT_EQ(uint32_t{0xfffffffe}, c);
a = 0x80000000;
c = 2;
EXPECT_EQ(true, MulOverflow(a, b, &c));
EXPECT_EQ(uint32_t{0}, c);
EXPECT_EQ(true, MulOverflow(b, a, &c));
EXPECT_EQ(uint32_t{0}, c);
}
TEST(MulOverflow, Int32) {
std::int32_t a, b, c;
a = -0x40000000;
b = 2;
EXPECT_EQ(false, MulOverflow(a, b, &c));
EXPECT_EQ(std::int32_t{-0x80000000LL}, c);
EXPECT_EQ(false, MulOverflow(b, a, &c));
EXPECT_EQ(std::int32_t{-0x80000000LL}, c);
a = 0x40000000;
c = 2;
EXPECT_EQ(true, MulOverflow(a, b, &c));
EXPECT_EQ(std::int32_t{-0x80000000LL}, c);
EXPECT_EQ(true, MulOverflow(b, a, &c));
EXPECT_EQ(std::int32_t{-0x80000000LL}, c);
}
TEST(AddOverflow, Uint32) {
uint32_t a, b, c;
a = 0x7fffffff;
b = 0x80000000;
EXPECT_EQ(false, AddOverflow(a, b, &c));
EXPECT_EQ(uint32_t{0xffffffff}, c);
EXPECT_EQ(false, AddOverflow(b, a, &c));
EXPECT_EQ(uint32_t{0xffffffff}, c);
a = 0x80000000;
c = 0x80000000;
EXPECT_EQ(true, MulOverflow(a, b, &c));
EXPECT_EQ(uint32_t{0}, c);
}
TEST(AddOverflow, Int32) {
std::int32_t a, b, c;
a = 0x40000000;
b = 0x3fffffff;
EXPECT_EQ(false, AddOverflow(a, b, &c));
EXPECT_EQ(std::int32_t{0x7fffffff}, c);
EXPECT_EQ(false, AddOverflow(b, a, &c));
EXPECT_EQ(std::int32_t{0x7fffffff}, c);
a = -0x40000000;
b = -0x40000000;
EXPECT_EQ(false, AddOverflow(a, b, &c));
EXPECT_EQ(std::int32_t{-0x80000000LL}, c);
a = 0x40000000;
b = 0x40000000;
EXPECT_EQ(true, AddOverflow(a, b, &c));
EXPECT_EQ(std::int32_t{-0x80000000LL}, c);
}
TEST(AddSaturate, Int32) {
EXPECT_EQ(0x7fffffff, AddSaturate<int32_t>(0x40000000, 0x3fffffff));
EXPECT_EQ(0x7fffffff, AddSaturate<int32_t>(0x40000000, 0x40000000));
EXPECT_EQ(-0x80000000, AddSaturate<int32_t>(-0x40000000, -0x40000000));
EXPECT_EQ(-0x80000000, AddSaturate<int32_t>(-0x40000000, -0x41000000));
}
TEST(SubOverflow, Uint32) {
uint32_t a, b, c;
a = 0x80000000;
b = 0x7fffffff;
EXPECT_EQ(false, SubOverflow(a, b, &c));
EXPECT_EQ(uint32_t{1}, c);
a = 0x7fffffff;
b = 0x80000000;
EXPECT_EQ(true, SubOverflow(a, b, &c));
EXPECT_EQ(uint32_t{0xffffffff}, c);
}
TEST(SubOverflow, Int32) {
std::int32_t a, b, c;
a = -0x40000000;
b = 0x40000000;
EXPECT_EQ(false, SubOverflow(a, b, &c));
EXPECT_EQ(std::int32_t{-0x80000000LL}, c);
a = 0x40000000;
b = -0x40000000;
EXPECT_EQ(true, SubOverflow(a, b, &c));
EXPECT_EQ(std::int32_t{-0x80000000LL}, c);
a = -0x40000001;
b = 0x40000000;
EXPECT_EQ(true, SubOverflow(a, b, &c));
EXPECT_EQ(std::int32_t{0x7fffffff}, c);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/integer_overflow.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/integer_overflow_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
8eece88e-b26c-48bd-9e50-14e9a4efb88c | cpp | google/tensorstore | unique_with_intrusive_allocator | tensorstore/internal/unique_with_intrusive_allocator.h | tensorstore/internal/unique_with_intrusive_allocator_test.cc | #ifndef TENSORSTORE_INTERNAL_UNIQUE_WITH_INTRUSIVE_ALLOCATOR_H_
#define TENSORSTORE_INTERNAL_UNIQUE_WITH_INTRUSIVE_ALLOCATOR_H_
#include <memory>
#include <new>
#include <utility>
namespace tensorstore {
namespace internal {
template <typename T>
struct IntrusiveAllocatorDeleter {
void operator()(T* p) {
auto allocator = p->get_allocator();
typename std::allocator_traits<decltype(
allocator)>::template rebind_alloc<T>
rebound_allocator(std::move(allocator));
std::allocator_traits<decltype(rebound_allocator)>::destroy(
rebound_allocator, p);
std::allocator_traits<decltype(rebound_allocator)>::deallocate(
rebound_allocator, p, 1);
}
};
template <typename T, typename Allocator, typename... Arg>
std::unique_ptr<T, IntrusiveAllocatorDeleter<T>>
MakeUniqueWithIntrusiveAllocator(Allocator allocator, Arg&&... arg) {
using ReboundAllocator =
typename std::allocator_traits<Allocator>::template rebind_alloc<T>;
ReboundAllocator rebound_allocator(std::move(allocator));
auto temp_deleter = [&rebound_allocator](T* p) {
std::allocator_traits<ReboundAllocator>::deallocate(rebound_allocator, p,
1);
};
std::unique_ptr<T, decltype(temp_deleter)> temp_ptr(
std::allocator_traits<ReboundAllocator>::allocate(rebound_allocator, 1),
temp_deleter);
new (temp_ptr.get())
T(std::forward<Arg>(arg)..., std::move(rebound_allocator));
return std::unique_ptr<T, IntrusiveAllocatorDeleter<T>>(temp_ptr.release());
}
struct VirtualDestroyDeleter {
template <typename T>
void operator()(T* p) const {
p->Destroy();
}
};
template <typename Derived, typename IntrusiveBase>
class IntrusiveAllocatorBase : public IntrusiveBase {
public:
using IntrusiveBase::IntrusiveBase;
void Destroy() override {
IntrusiveAllocatorDeleter<Derived>()(static_cast<Derived*>(this));
}
};
template <typename T, typename Allocator, typename... Arg>
std::unique_ptr<T, VirtualDestroyDeleter>
MakeUniqueWithVirtualIntrusiveAllocator(Allocator allocator, Arg&&... arg) {
return std::unique_ptr<T, VirtualDestroyDeleter>(
MakeUniqueWithIntrusiveAllocator<T>(std::move(allocator),
std::forward<Arg>(arg)...)
.release());
}
}
}
#endif | #include "tensorstore/internal/unique_with_intrusive_allocator.h"
#include <cstddef>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "tensorstore/internal/arena.h"
namespace {
using ::tensorstore::internal::Arena;
using ::tensorstore::internal::ArenaAllocator;
using ::tensorstore::internal::IntrusiveAllocatorBase;
using ::tensorstore::internal::MakeUniqueWithIntrusiveAllocator;
using ::tensorstore::internal::MakeUniqueWithVirtualIntrusiveAllocator;
class Base {
public:
virtual void Destroy() = 0;
virtual ~Base() = default;
};
class Derived : public IntrusiveAllocatorBase<Derived, Base> {
public:
Derived(ArenaAllocator<> allocator)
:
vec(100, allocator) {}
ArenaAllocator<> get_allocator() const { return vec.get_allocator(); }
std::vector<double, ArenaAllocator<double>> vec;
};
TEST(UniqueWithVirtualIntrusiveAllocatorTest, Basic) {
Arena arena;
std::unique_ptr<Base, tensorstore::internal::VirtualDestroyDeleter> ptr =
MakeUniqueWithVirtualIntrusiveAllocator<Derived>(
ArenaAllocator<>(&arena));
}
class Foo {
public:
using allocator_type = ArenaAllocator<int>;
Foo(size_t n, ArenaAllocator<int> allocator) : vec_(n, allocator) {}
allocator_type get_allocator() const { return vec_.get_allocator(); }
int operator()(int x) const { return vec_[x]; }
void operator()(int x, int y) { vec_[x] = y; }
private:
std::vector<int, allocator_type> vec_;
};
TEST(UniqueWithIntrusiveAllocatorTest, Basic) {
unsigned char buffer[200];
Arena arena(buffer);
auto ptr =
MakeUniqueWithIntrusiveAllocator<Foo>(ArenaAllocator<>(&arena), 10);
(*ptr)(2, 3);
EXPECT_EQ(3, (*ptr)(2));
EXPECT_EQ(3, (static_cast<const Foo&>(*ptr)(2)));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/unique_with_intrusive_allocator.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/unique_with_intrusive_allocator_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
59593b95-a75a-4b03-b909-6c7098377862 | cpp | google/tensorstore | json_registry | tensorstore/internal/json_registry.h | tensorstore/internal/json_registry_test.cc | #ifndef TENSORSTORE_INTERNAL_JSON_REGISTRY_H_
#define TENSORSTORE_INTERNAL_JSON_REGISTRY_H_
#include <memory>
#include <string>
#include <string_view>
#include <type_traits>
#include <typeindex>
#include <utility>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_registry_fwd.h"
#include "tensorstore/internal/json_registry_impl.h"
#include "tensorstore/json_serialization_options.h"
namespace tensorstore {
namespace internal {
template <typename Base, typename LoadOptions, typename SaveOptions,
typename BasePtr>
class JsonRegistry {
static_assert(std::has_virtual_destructor_v<Base>);
public:
auto KeyBinder() const { return KeyBinderImpl{impl_}; }
constexpr auto RegisteredObjectBinder() const {
return RegisteredObjectBinderImpl{impl_};
}
template <typename MemberName>
auto MemberBinder(MemberName member_name) const {
namespace jb = tensorstore::internal_json_binding;
return jb::Sequence(jb::Member(member_name, this->KeyBinder()),
RegisteredObjectBinder());
}
template <typename T, typename Binder>
void Register(std::string_view id, Binder binder) {
static_assert(std::is_base_of_v<Base, T>);
auto entry =
std::make_unique<internal_json_registry::JsonRegistryImpl::Entry>();
entry->id = std::string(id);
entry->type = &typeid(T);
entry->allocate =
+[](void* obj) { static_cast<BasePtr*>(obj)->reset(new T); };
entry->binder = [binder](
auto is_loading, const void* options, const void* obj,
::nlohmann::json::object_t* j_obj) -> absl::Status {
using Options = std::conditional_t<decltype(is_loading)::value,
LoadOptions, SaveOptions>;
using Obj = std::conditional_t<decltype(is_loading)::value, T, const T>;
return binder(is_loading, *static_cast<const Options*>(options),
const_cast<Obj*>(
static_cast<const Obj*>(static_cast<const Base*>(obj))),
j_obj);
};
impl_.Register(std::move(entry));
}
private:
struct KeyBinderImpl {
const internal_json_registry::JsonRegistryImpl& impl;
template <typename Options>
absl::Status operator()(std::true_type is_loading, const Options& options,
BasePtr* obj, ::nlohmann::json* j) const {
return impl.LoadKey(obj, j);
}
template <typename Ptr, typename Options>
absl::Status operator()(std::false_type is_loading, const Options& options,
const Ptr* obj, ::nlohmann::json* j) const {
static_assert(std::is_convertible_v<decltype(&**obj), const Base*>);
return impl.SaveKey(typeid(**obj), j);
}
};
struct RegisteredObjectBinderImpl {
const internal_json_registry::JsonRegistryImpl& impl;
absl::Status operator()(std::true_type is_loading,
const LoadOptions& options, BasePtr* obj,
::nlohmann::json::object_t* j_obj) const {
if (!*obj) return absl::OkStatus();
return impl.LoadRegisteredObject(typeid(*obj->get()), &options,
static_cast<const Base*>(&**obj), j_obj);
}
template <typename Ptr>
absl::Status operator()(std::false_type is_loading,
const SaveOptions& options, const Ptr* obj,
::nlohmann::json::object_t* j_obj) const {
static_assert(std::is_convertible_v<decltype(&**obj), const Base*>);
if (!*obj) return absl::OkStatus();
return impl.SaveRegisteredObject(typeid(**obj), &options,
static_cast<const Base*>(&**obj), j_obj);
}
};
internal_json_registry::JsonRegistryImpl impl_;
};
}
}
#endif | #include "tensorstore/internal/json_registry.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/no_destructor.h"
#include "absl/status/status.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/json_serialization_options.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal::IntrusivePtr;
using ::tensorstore::internal::JsonRegistry;
class MyInterface
: public tensorstore::internal::AtomicReferenceCount<MyInterface> {
public:
virtual int Whatever() const = 0;
virtual ~MyInterface() = default;
};
class MyInterfacePtr : public IntrusivePtr<MyInterface> {
public:
TENSORSTORE_DECLARE_JSON_DEFAULT_BINDER(MyInterfacePtr,
tensorstore::JsonSerializationOptions,
tensorstore::JsonSerializationOptions)
};
using Registry =
JsonRegistry<MyInterface, tensorstore::JsonSerializationOptions,
tensorstore::JsonSerializationOptions>;
Registry& GetRegistry() {
static absl::NoDestructor<Registry> registry;
return *registry;
}
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(MyInterfacePtr, [](auto is_loading,
const auto& options,
auto* obj,
::nlohmann::json* j) {
namespace jb = tensorstore::internal_json_binding;
return jb::Object(GetRegistry().MemberBinder("id"))(is_loading, options, obj,
j);
})
class FooImpl : public MyInterface {
public:
int x;
int Whatever() const override { return x; }
};
class BarImpl : public MyInterface {
public:
float y;
int Whatever() const override { return static_cast<int>(y); }
};
struct FooRegistration {
FooRegistration() {
namespace jb = tensorstore::internal_json_binding;
GetRegistry().Register<FooImpl>(
"foo", jb::Object(jb::Member("x", jb::Projection(&FooImpl::x))));
}
} foo_registration;
struct BarRegistration {
BarRegistration() {
namespace jb = tensorstore::internal_json_binding;
GetRegistry().Register<BarImpl>(
"bar", jb::Object(jb::Member("y", jb::Projection(&BarImpl::y))));
}
} bar_registration;
TEST(RegistryTest, Foo) {
const ::nlohmann::json j{{"id", "foo"}, {"x", 10}};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto obj, MyInterfacePtr::FromJson(j));
EXPECT_EQ(10, obj->Whatever());
EXPECT_EQ(j, obj.ToJson());
}
TEST(RegistryTest, Bar) {
const ::nlohmann::json j{{"id", "bar"}, {"y", 42.5}};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto obj, MyInterfacePtr::FromJson(j));
EXPECT_EQ(42, obj->Whatever());
EXPECT_EQ(j, obj.ToJson());
}
TEST(RegistryTest, Unknown) {
EXPECT_THAT(MyInterfacePtr::FromJson({{"id", "baz"}, {"y", 42.5}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"id\": "
"\"baz\" is not registered"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_registry.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_registry_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
8377d019-df08-42d2-95d1-41c6f8d85eb8 | cpp | google/tensorstore | global_initializer | tensorstore/internal/global_initializer.h | tensorstore/internal/global_initializer_test.cc | #ifndef TENSORSTORE_INTERNAL_GLOBAL_INITIALIZER_H_
#define TENSORSTORE_INTERNAL_GLOBAL_INITIALIZER_H_
#include "tensorstore/internal/preprocessor/cat.h"
#define TENSORSTORE_GLOBAL_INITIALIZER \
namespace { \
const struct TENSORSTORE_PP_CAT(TsGlobalInit, __LINE__) { \
TENSORSTORE_PP_CAT(TsGlobalInit, __LINE__) \
(); \
} TENSORSTORE_PP_CAT(tensorstore_global_init, __LINE__); \
} \
TENSORSTORE_PP_CAT(TsGlobalInit, __LINE__)::TENSORSTORE_PP_CAT( \
TsGlobalInit, __LINE__)()
#endif | #include "tensorstore/internal/global_initializer.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
std::vector<int> vec;
TENSORSTORE_GLOBAL_INITIALIZER { vec.push_back(1); }
TENSORSTORE_GLOBAL_INITIALIZER { vec.push_back(2); }
TENSORSTORE_GLOBAL_INITIALIZER { vec.push_back(3); }
TEST(GlobalInitializerTest, Ordering) {
EXPECT_THAT(vec, ::testing::ElementsAre(1, 2, 3));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/global_initializer.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/global_initializer_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
4de958a3-e1a5-4cae-a5d6-151e97e19ad6 | cpp | google/tensorstore | meta | tensorstore/internal/meta.h | tensorstore/internal/meta_test.cc | #ifndef TENSORSTORE_INTERNAL_META_H_
#define TENSORSTORE_INTERNAL_META_H_
namespace tensorstore {
namespace internal {
template <typename T, typename... Ts>
constexpr T&& GetFirstArgument(T&& t, Ts&&... ts) {
return static_cast<T&&>(t);
}
inline int constexpr_assert_failed() noexcept { return 0; }
#define TENSORSTORE_CONSTEXPR_ASSERT(...) \
(static_cast<void>( \
(__VA_ARGS__) ? 0 \
: tensorstore::internal::constexpr_assert_failed()))
}
}
#endif | #include "tensorstore/internal/meta.h"
#include <type_traits>
namespace {
using ::tensorstore::internal::GetFirstArgument;
static_assert(
std::is_same_v<int&, decltype(GetFirstArgument(std::declval<int&>(),
std::declval<float&>()))>);
static_assert(std::is_same_v<
const int&, decltype(GetFirstArgument(std::declval<const int&>(),
std::declval<float&>()))>);
static_assert(
std::is_same_v<int&&, decltype(GetFirstArgument(std::declval<int>(),
std::declval<float&>()))>);
static_assert(GetFirstArgument(3, 4) == 3);
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/meta.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/meta_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
cee18ae8-9b62-4adf-9023-605307ccd725 | cpp | google/tensorstore | type_traits | tensorstore/internal/type_traits.h | tensorstore/internal/type_traits_test.cc | #ifndef TENSORSTORE_INTERNAL_TYPE_TRAITS_H_
#define TENSORSTORE_INTERNAL_TYPE_TRAITS_H_
#include <cstddef>
#include <initializer_list>
#include <iosfwd>
#include <type_traits>
#include <utility>
#if defined(__has_builtin)
#if __has_builtin(__type_pack_element)
#define TENSORSTORE_HAS_TYPE_PACK_ELEMENT
#endif
#endif
#ifndef TENSORSTORE_HAS_TYPE_PACK_ELEMENT
#include <tuple>
#endif
#include "absl/meta/type_traits.h"
#include "tensorstore/index.h"
namespace tensorstore {
namespace internal {
struct not_detected {
~not_detected() = delete;
not_detected(not_detected const&) = delete;
void operator=(not_detected const&) = delete;
};
template <class AlwaysVoid, template <class...> class Op, class... Args>
struct detector_impl {
using value_t = std::false_type;
using type = not_detected;
};
template <template <class...> class Op, class... Args>
struct detector_impl<std::void_t<Op<Args...>>, Op, Args...> {
using value_t = std::true_type;
using type = Op<Args...>;
};
template <template <class...> class Op, class... Args>
using is_detected = typename detector_impl<void, Op, Args...>::value_t;
template <template <class...> class Op, class... Args>
using detected_t = typename detector_impl<void, Op, Args...>::type;
template <typename T, typename U = T, typename = void>
constexpr inline bool IsEqualityComparable = false;
template <typename T, typename U>
constexpr inline bool IsEqualityComparable<
T, U,
std::enable_if_t<std::is_convertible_v<
decltype(std::declval<T>() == std::declval<U>()), bool>>> = true;
template <typename To, typename, typename... From>
constexpr inline bool IsPackConvertibleWithoutNarrowingHelper = false;
template <typename To, typename... From>
constexpr inline bool IsPackConvertibleWithoutNarrowingHelper<
To,
std::void_t<decltype(std::initializer_list<To>{std::declval<From>()...})>,
From...> = true;
template <typename Source, typename Target>
constexpr inline bool IsOnlyExplicitlyConvertible =
(std::is_constructible_v<Target, Source> &&
!std::is_convertible_v<Source, Target>);
template <typename To, typename... From>
constexpr inline bool IsPackConvertibleWithoutNarrowing =
IsPackConvertibleWithoutNarrowingHelper<To, void, From...>;
template <typename... IndexType>
constexpr inline bool IsIndexPack =
IsPackConvertibleWithoutNarrowing<Index, IndexType...>;
template <typename ASource, typename BSource, typename ADest, typename BDest>
constexpr inline bool IsPairImplicitlyConvertible =
std::is_convertible_v<ASource, ADest> &&
std::is_convertible_v<BSource, BDest>;
template <typename ASource, typename BSource, typename ADest, typename BDest>
constexpr inline bool IsPairExplicitlyConvertible =
std::is_constructible_v<ADest, ASource> &&
std::is_constructible_v<BDest, BSource>;
template <typename ASource, typename BSource, typename ADest, typename BDest>
constexpr inline bool IsPairOnlyExplicitlyConvertible =
IsPairExplicitlyConvertible<ASource, BSource, ADest, BDest> &&
!IsPairImplicitlyConvertible<ASource, BSource, ADest, BDest>;
template <typename ASource, typename BSource, typename ADest, typename BDest>
constexpr inline bool IsPairAssignable =
std::is_assignable_v<ADest&, ASource> &&
std::is_assignable_v<BDest&, BSource>;
template <typename From, typename To>
constexpr inline bool IsConvertibleOrVoid = std::is_convertible_v<From, To>;
template <typename From>
constexpr inline bool IsConvertibleOrVoid<From, void> = true;
template <typename T, typename = void>
constexpr inline bool IsOstreamable = false;
template <typename T>
constexpr inline bool
IsOstreamable<T, std::void_t<decltype(std::declval<std::ostream&>()
<< std ::declval<const T&>())>> =
true;
template <typename Qualified, typename T>
struct CopyQualifiersHelper {
using type = T;
};
template <typename Qualified, typename T>
struct CopyQualifiersHelper<const Qualified, T> {
using type = const typename CopyQualifiersHelper<Qualified, T>::type;
};
template <typename Qualified, typename T>
struct CopyQualifiersHelper<volatile Qualified, T> {
using type = volatile typename CopyQualifiersHelper<Qualified, T>::type;
};
template <typename Qualified, typename T>
struct CopyQualifiersHelper<const volatile Qualified, T> {
using type = const volatile typename CopyQualifiersHelper<Qualified, T>::type;
};
template <typename Qualified, typename T>
struct CopyQualifiersHelper<Qualified&, T> {
using type = typename CopyQualifiersHelper<Qualified, T>::type&;
};
template <typename T, typename Qualified>
struct CopyQualifiersHelper<Qualified&&, T> {
using type = typename CopyQualifiersHelper<Qualified, T>::type&&;
};
template <typename Qualified, typename T>
using CopyQualifiers =
typename CopyQualifiersHelper<Qualified, absl::remove_cvref_t<T>>::type;
template <typename T>
inline T& GetLValue(T&& x) {
return x;
}
template <typename T, typename... U>
using FirstType = T;
template <typename Source, typename Dest>
constexpr inline bool IsConstConvertible =
(std::is_same_v<Source, Dest> || std::is_same_v<const Source, Dest>);
template <typename Source, typename Dest>
constexpr inline bool IsConstConvertibleOrVoid =
(std::is_same_v<Source, Dest> || std::is_same_v<const Source, Dest> ||
std::is_void_v<Dest>);
#ifdef TENSORSTORE_HAS_TYPE_PACK_ELEMENT
#if __clang__
template <size_t I, typename... Ts>
using TypePackElement = __type_pack_element<I, Ts...>;
#else
template <std::size_t I, typename... Ts>
struct TypePackElementImpl {
using type = __type_pack_element<I, Ts...>;
};
template <size_t I, typename... Ts>
using TypePackElement = typename TypePackElementImpl<I, Ts...>::type;
#endif
#else
template <size_t I, typename... Ts>
using TypePackElement = typename std::tuple_element<I, std::tuple<Ts...>>::type;
#endif
template <typename T>
class EmptyObject {
static_assert(std::is_empty_v<T>, "T must be an empty type.");
static_assert(std::is_standard_layout_v<T>, "T must be standard layout.");
struct T1 {
char c;
};
struct T2 : T {
char c;
};
union Storage {
constexpr Storage() : t1{} {}
T1 t1;
T2 t2;
};
Storage storage{};
public:
T& get(T* = nullptr) {
char* c = &storage.t2.c;
T2* t2 = reinterpret_cast<T2*>(c);
return *static_cast<T*>(t2);
}
};
class NonEmptyObjectGetter {
public:
template <typename T>
static T& get(T* pointer) {
return *pointer;
}
};
template <typename T>
using PossiblyEmptyObjectGetter =
std::conditional_t<std::is_empty_v<T>, EmptyObject<T>,
NonEmptyObjectGetter>;
template <typename T>
struct DefaultConstructibleFunction {
constexpr DefaultConstructibleFunction() = default;
constexpr DefaultConstructibleFunction(const T&) {}
template <typename... Arg>
constexpr std::invoke_result_t<T&, Arg...> operator()(Arg&&... arg) const {
EmptyObject<T> obj;
return obj.get()(static_cast<Arg&&>(arg)...);
}
};
template <typename T>
using DefaultConstructibleFunctionIfEmpty =
std::conditional_t<(std::is_empty_v<T> &&
!std::is_default_constructible_v<T>),
DefaultConstructibleFunction<T>, T>;
template <typename T>
struct type_identity {
using type = T;
};
template <typename T>
using type_identity_t = typename type_identity<T>::type;
struct identity {
template <typename T>
constexpr T&& operator()(T&& t) const noexcept {
return static_cast<T&&>(t);
}
};
template <typename Base, typename Derived>
Base* BaseCast(Derived* derived) {
return derived;
}
template <typename Base, typename Derived>
const Base* BaseCast(const Derived* derived) {
return derived;
}
template <typename Base, typename Derived>
Base& BaseCast(Derived& derived) {
return derived;
}
template <typename Base, typename Derived>
const Base& BaseCast(const Derived& derived) {
return derived;
}
template <typename T>
using Undocumented = T;
}
}
#endif | #include "tensorstore/internal/type_traits.h"
#include <stddef.h>
#include <tuple>
#include <type_traits>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal::CopyQualifiers;
using ::tensorstore::internal::FirstType;
using ::tensorstore::internal::GetLValue;
using ::tensorstore::internal::IsConstConvertible;
using ::tensorstore::internal::IsConstConvertibleOrVoid;
using ::tensorstore::internal::IsEqualityComparable;
using ::tensorstore::internal::PossiblyEmptyObjectGetter;
using ::tensorstore::internal::type_identity_t;
using ::tensorstore::internal::TypePackElement;
namespace equality_comparable_tests {
struct X {};
static_assert(IsEqualityComparable<float, float>);
static_assert(IsEqualityComparable<float, int>);
static_assert(!IsEqualityComparable<X, X>);
}
static_assert(std::is_same_v<CopyQualifiers<float, int>, int>);
static_assert(std::is_same_v<CopyQualifiers<const float, int>, const int>);
static_assert(std::is_same_v<CopyQualifiers<const float&, int>, const int&>);
static_assert(std::is_same_v<CopyQualifiers<const float, int&>, const int>);
static_assert(std::is_same_v<CopyQualifiers<float&&, const int&>, int&&>);
static_assert(std::is_same_v<int&, decltype(GetLValue(3))>);
static_assert(std::is_same_v<int*, decltype(&GetLValue(3))>);
static_assert(std::is_same_v<FirstType<void>, void>);
static_assert(std::is_same_v<FirstType<int, void>, int>);
static_assert(IsConstConvertible<int, int>);
static_assert(IsConstConvertible<void, void>);
static_assert(IsConstConvertible<void, const void>);
static_assert(IsConstConvertible<int, const int>);
static_assert(!IsConstConvertible<const int, int>);
static_assert(!IsConstConvertible<int, float>);
static_assert(!IsConstConvertible<int, const float>);
static_assert(!IsConstConvertible<int, const void>);
static_assert(!IsConstConvertible<const int, void>);
static_assert(!IsConstConvertible<int, void>);
static_assert(IsConstConvertibleOrVoid<int, int>);
static_assert(IsConstConvertibleOrVoid<int, const int>);
static_assert(IsConstConvertibleOrVoid<int, void>);
static_assert(IsConstConvertibleOrVoid<const int, void>);
static_assert(IsConstConvertibleOrVoid<int, const void>);
static_assert(!IsConstConvertibleOrVoid<const int, int>);
static_assert(!IsConstConvertibleOrVoid<int, float>);
static_assert(!IsConstConvertibleOrVoid<int, const float>);
static_assert(std::is_same_v<TypePackElement<0, int, float>, int>);
static_assert(std::is_same_v<TypePackElement<1, int, float>, float>);
template <size_t I, typename... Ts>
using NonBuiltinTypePackElement =
typename std::tuple_element<I, std::tuple<Ts...>>::type;
static_assert(std::is_same_v<NonBuiltinTypePackElement<0, int, float>, int>);
static_assert(std::is_same_v<NonBuiltinTypePackElement<1, int, float>, float>);
TEST(PossiblyEmptyObjectGetterTest, Basic) {
struct Empty {
Empty() = delete;
int foo() { return 3; }
};
{
PossiblyEmptyObjectGetter<Empty> helper;
Empty& obj = helper.get(nullptr);
EXPECT_EQ(3, obj.foo());
}
{
auto lambda = [](int x, int y) { return x + y; };
using Lambda = decltype(lambda);
PossiblyEmptyObjectGetter<Lambda> helper;
Lambda& obj = helper.get(nullptr);
EXPECT_EQ(3, obj(1, 2));
}
{
int value = 3;
PossiblyEmptyObjectGetter<int> helper;
auto& obj = helper.get(&value);
EXPECT_EQ(&value, &obj);
}
}
static_assert(std::is_same_v<int, type_identity_t<int>>);
namespace explict_conversion_tests {
using ::tensorstore::internal::IsOnlyExplicitlyConvertible;
using ::tensorstore::internal::IsPairExplicitlyConvertible;
using ::tensorstore::internal::IsPairImplicitlyConvertible;
using ::tensorstore::internal::IsPairOnlyExplicitlyConvertible;
struct X {
X(int) {}
explicit X(float*) {}
};
static_assert(IsOnlyExplicitlyConvertible<float*, X>);
static_assert(std::is_convertible_v<int, X>);
static_assert(std::is_constructible_v<X, int>);
static_assert(!IsOnlyExplicitlyConvertible<int, X>);
struct Y {
Y(int*) {}
explicit Y(double*) {}
};
static_assert(IsPairImplicitlyConvertible<int, int*, X, Y>);
static_assert(IsPairExplicitlyConvertible<int, int*, X, Y>);
static_assert(IsPairExplicitlyConvertible<int, double*, X, Y>);
static_assert(IsPairExplicitlyConvertible<float*, int*, X, Y>);
static_assert(IsPairExplicitlyConvertible<float*, double*, X, Y>);
static_assert(!IsPairImplicitlyConvertible<int, double*, X, Y>);
static_assert(!IsPairImplicitlyConvertible<float*, int*, X, Y>);
static_assert(!IsPairImplicitlyConvertible<float*, double*, X, Y>);
static_assert(IsPairOnlyExplicitlyConvertible<int, double*, X, Y>);
static_assert(IsPairOnlyExplicitlyConvertible<float*, int*, X, Y>);
static_assert(IsPairOnlyExplicitlyConvertible<float*, double*, X, Y>);
}
TEST(DefaultConstructibleFunctionIfEmptyTest, Basic) {
auto fn = [](int x) { return x + 1; };
using Wrapper =
tensorstore::internal::DefaultConstructibleFunctionIfEmpty<decltype(fn)>;
static_assert(std::is_default_constructible_v<Wrapper>);
EXPECT_EQ(4, Wrapper()(3));
EXPECT_EQ(4, Wrapper(fn)(3));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/type_traits.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/type_traits_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
3d8edbda-9c2e-45e4-8660-fd15f9471059 | cpp | google/tensorstore | regular_grid | tensorstore/internal/regular_grid.h | tensorstore/internal/regular_grid_test.cc | #ifndef TENSORSTORE_INTERNAL_REGULAR_GRID_H_
#define TENSORSTORE_INTERNAL_REGULAR_GRID_H_
#include <cassert>
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/util/division.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_grid_partition {
struct RegularGridRef {
tensorstore::span<const Index> grid_cell_shape;
DimensionIndex rank() const { return grid_cell_shape.size(); }
IndexInterval GetCellOutputInterval(DimensionIndex dim,
Index cell_index) const {
assert(dim >= 0 && dim < rank());
return IndexInterval::UncheckedSized(cell_index * grid_cell_shape[dim],
grid_cell_shape[dim]);
}
Index operator()(DimensionIndex dim, Index output_index,
IndexInterval* cell_bounds) const {
assert(dim >= 0 && dim < rank());
Index cell_index = FloorOfRatio(output_index, grid_cell_shape[dim]);
if (cell_bounds) {
*cell_bounds = GetCellOutputInterval(dim, cell_index);
}
return cell_index;
}
};
}
}
#endif | #include "tensorstore/internal/regular_grid.h"
#include <array>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::internal_grid_partition::RegularGridRef;
using ::testing::Eq;
TEST(RegularGridTest, Basic) {
std::array<Index, 3> grid_cell_shape = {10, 20, 30};
RegularGridRef regular_grid{grid_cell_shape};
IndexInterval cell_bounds;
for (Index i = 0; i < 10; i++) {
EXPECT_THAT(regular_grid(0, i, &cell_bounds), Eq(0));
EXPECT_THAT(cell_bounds, Eq(IndexInterval::UncheckedSized(0, 10)));
EXPECT_THAT(regular_grid(1, i, &cell_bounds), Eq(0));
EXPECT_THAT(cell_bounds, Eq(IndexInterval::UncheckedSized(0, 20)));
EXPECT_THAT(regular_grid(2, i, &cell_bounds), Eq(0));
EXPECT_THAT(cell_bounds, Eq(IndexInterval::UncheckedSized(0, 30)));
}
for (DimensionIndex i = 0; i < 3; i++) {
Index j = (i + 1) * 10;
EXPECT_THAT(regular_grid(i, j - 1, &cell_bounds), Eq(0));
EXPECT_THAT(cell_bounds, Eq(IndexInterval::UncheckedSized(0, j)));
EXPECT_THAT(regular_grid(i, j, &cell_bounds), Eq(1));
EXPECT_THAT(cell_bounds, Eq(IndexInterval::UncheckedSized(j, j)));
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/regular_grid.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/regular_grid_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
eab9cf0e-5b44-4116-a994-bad172501059 | cpp | google/tensorstore | arena | tensorstore/internal/arena.h | tensorstore/internal/arena_test.cc | #ifndef TENSORSTORE_INTERNAL_ARENA_H_
#define TENSORSTORE_INTERNAL_ARENA_H_
#include <stddef.h>
#include <memory>
#include <new>
#include <utility>
#include "tensorstore/internal/exception_macros.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal {
class Arena {
public:
Arena() : remaining_bytes_(0) {}
explicit Arena(tensorstore::span<unsigned char> initial_buffer)
: initial_buffer_(initial_buffer),
remaining_bytes_(initial_buffer.size()) {}
template <typename T = unsigned char>
T* allocate(size_t n, size_t alignment = alignof(T)) {
size_t num_bytes;
if (MulOverflow(n, sizeof(T), &num_bytes)) {
TENSORSTORE_THROW_BAD_ALLOC;
}
void* ptr = static_cast<void*>(initial_buffer_.end() - remaining_bytes_);
if (std::align(alignment, num_bytes, ptr, remaining_bytes_)) {
remaining_bytes_ -= num_bytes;
} else {
ptr = ::operator new(num_bytes, std::align_val_t(alignment));
}
return static_cast<T*>(ptr);
}
template <typename T>
void deallocate(T* p, size_t n, size_t alignment = alignof(T)) {
if (static_cast<void*>(p) >= static_cast<void*>(initial_buffer_.data()) &&
static_cast<void*>(p + n) <=
static_cast<void*>(initial_buffer_.data() +
initial_buffer_.size())) {
return;
}
::operator delete(static_cast<void*>(p), n * sizeof(T),
std::align_val_t(alignment));
}
private:
tensorstore::span<unsigned char> initial_buffer_;
size_t remaining_bytes_;
};
template <typename T = unsigned char>
class ArenaAllocator {
public:
using value_type = T;
using pointer = T*;
using void_pointer = void*;
using const_void_pointer = const void*;
using reference = T&;
using const_pointer = const T*;
using const_reference = const T&;
using size_type = size_t;
using difference_type = ptrdiff_t;
template <typename U>
struct rebind {
using other = ArenaAllocator<U>;
};
ArenaAllocator(Arena* arena) : arena_(arena) {}
template <typename U>
ArenaAllocator(ArenaAllocator<U> other) : arena_(other.arena()) {}
T* allocate(size_t n) const { return arena_->allocate<T>(n); }
void deallocate(T* p, size_t n) const { arena_->deallocate(p, n); }
template <typename... Arg>
void construct(T* p, Arg&&... arg) {
new (p) T(std::forward<Arg>(arg)...);
}
void destroy(T* p) { p->~T(); }
Arena* arena() const { return arena_; }
friend bool operator==(ArenaAllocator a, ArenaAllocator b) {
return a.arena_ == b.arena_;
}
friend bool operator!=(ArenaAllocator a, ArenaAllocator b) {
return a.arena_ != b.arena_;
}
Arena* arena_;
};
}
}
#endif | #include "tensorstore/internal/arena.h"
#include <algorithm>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/span.h"
namespace {
using ::tensorstore::internal::Arena;
using ::tensorstore::internal::ArenaAllocator;
bool Contains(tensorstore::span<const unsigned char> buffer, void* ptr) {
return ptr >= buffer.data() && ptr < buffer.data() + buffer.size();
}
TEST(ArenaTest, Small) {
unsigned char buffer[1024];
Arena arena(buffer);
std::vector<int, ArenaAllocator<int>> vec(100, &arena);
EXPECT_EQ(&arena, vec.get_allocator().arena());
std::fill(vec.begin(), vec.end(), 5);
EXPECT_TRUE(Contains(buffer, vec.data()));
}
TEST(ArenaTest, Alignment) {
alignas(16) unsigned char buffer[1024];
for (int x = 1; x <= 16; x *= 2) {
Arena arena(buffer);
unsigned char* ptr1 = arena.allocate(1, 1);
EXPECT_EQ(&buffer[0], ptr1);
unsigned char* ptr2 = arena.allocate(1, x);
EXPECT_EQ(0u, reinterpret_cast<std::uintptr_t>(ptr2) % x);
EXPECT_EQ(&buffer[x], ptr2);
arena.deallocate(ptr1, 1, 1);
arena.deallocate(ptr2, 1, x);
}
{
Arena arena(buffer);
unsigned char* ptr = arena.allocate(2000, 16);
EXPECT_EQ(0u, reinterpret_cast<std::uintptr_t>(ptr) % 16);
arena.deallocate(ptr, 2000, 16);
}
}
TEST(ArenaTest, Large) {
unsigned char buffer[1024];
Arena arena(buffer);
std::vector<int, ArenaAllocator<int>> vec(&arena);
vec.resize(2000);
std::fill(vec.begin(), vec.end(), 7);
EXPECT_FALSE(Contains(buffer, vec.data()));
}
TEST(ArenaTest, MultipleSmall) {
unsigned char buffer[1024];
Arena arena(buffer);
std::vector<std::int32_t, ArenaAllocator<int>> vec(100, &arena);
EXPECT_EQ(&arena, vec.get_allocator().arena());
std::fill(vec.begin(), vec.end(), 5);
EXPECT_TRUE(Contains(buffer, vec.data()));
std::vector<std::int32_t, ArenaAllocator<int>> vec2(100, &arena);
std::fill(vec2.begin(), vec2.end(), 6);
EXPECT_TRUE(Contains(buffer, vec2.data()));
std::vector<std::int32_t, ArenaAllocator<int>> vec3(100, &arena);
std::fill(vec3.begin(), vec3.end(), 7);
EXPECT_FALSE(Contains(buffer, vec3.data()));
std::vector<std::int32_t, ArenaAllocator<int>> vec4(5, &arena);
std::fill(vec4.begin(), vec4.end(), 8);
EXPECT_TRUE(Contains(buffer, vec4.data()));
EXPECT_THAT(vec,
::testing::ElementsAreArray(std::vector<std::int32_t>(100, 5)));
EXPECT_THAT(vec2,
::testing::ElementsAreArray(std::vector<std::int32_t>(100, 6)));
EXPECT_THAT(vec3,
::testing::ElementsAreArray(std::vector<std::int32_t>(100, 7)));
EXPECT_THAT(vec4,
::testing::ElementsAreArray(std::vector<std::int32_t>(5, 8)));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/arena.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/arena_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
74755980-68bd-4fe2-9dcb-b9323729bddb | cpp | google/tensorstore | source_location | tensorstore/internal/source_location.h | tensorstore/internal/source_location_test.cc | #ifndef TENSORSTORE_INTERNAL_SOURCE_LOCATION_H_
#define TENSORSTORE_INTERNAL_SOURCE_LOCATION_H_
#include <cstdint>
#include <utility>
#include "absl/base/config.h"
namespace tensorstore {
#ifndef TENSORSTORE_HAVE_SOURCE_LOCATION_CURRENT
#if ABSL_HAVE_BUILTIN(__builtin_LINE) && ABSL_HAVE_BUILTIN(__builtin_FILE)
#define TENSORSTORE_HAVE_SOURCE_LOCATION_CURRENT 1
#elif defined(__GNUC__) && __GNUC__ >= 5
#define TENSORSTORE_HAVE_SOURCE_LOCATION_CURRENT 1
#elif defined(_MSC_VER) && _MSC_VER >= 1926
#define TENSORSTORE_HAVE_SOURCE_LOCATION_CURRENT 1
#else
#define TENSORSTORE_HAVE_SOURCE_LOCATION_CURRENT 0
#endif
#endif
class SourceLocation {
struct PrivateTag {
private:
explicit PrivateTag() = default;
friend class SourceLocation;
};
public:
constexpr SourceLocation() : line_(1), file_name_("") {}
#if TENSORSTORE_HAVE_SOURCE_LOCATION_CURRENT
static constexpr SourceLocation current(
PrivateTag = PrivateTag{}, std::uint_least32_t line = __builtin_LINE(),
const char* file_name = __builtin_FILE()) {
return SourceLocation(line, file_name);
}
#else
static constexpr SourceLocation current() {
return SourceLocation(1, "<source_location>");
}
#endif
const char* file_name() const { return file_name_; }
constexpr std::uint_least32_t line() const { return line_; }
private:
constexpr SourceLocation(std::uint_least32_t line, const char* file_name)
: line_(line), file_name_(file_name) {}
std::uint_least32_t line_;
const char* file_name_;
};
}
#endif | #include "tensorstore/internal/source_location.h"
#include <cstdint>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::SourceLocation;
uint64_t TakesSourceLocation(
SourceLocation loc = tensorstore::SourceLocation::current()) {
return loc.line();
}
TEST(SourceLocationTest, Basic) {
constexpr tensorstore::SourceLocation loc =
tensorstore::SourceLocation::current();
EXPECT_NE(0, loc.line());
EXPECT_NE(1, loc.line());
EXPECT_NE(1, TakesSourceLocation(tensorstore::SourceLocation::current()));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/source_location.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/source_location_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
ce08a832-4904-4f05-8fe8-fe63853de4ff | cpp | google/tensorstore | elementwise_function | tensorstore/internal/elementwise_function.h | tensorstore/internal/elementwise_function_test.cc | #ifndef TENSORSTORE_UTIL_ELEMENTWISE_FUNCTION_H_
#define TENSORSTORE_UTIL_ELEMENTWISE_FUNCTION_H_
#include <array>
#include <cstddef>
#include <type_traits>
#include <utility>
#include "absl/meta/type_traits.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/internal/void_wrapper.h"
#include "tensorstore/util/byte_strided_pointer.h"
namespace tensorstore {
namespace internal {
enum class IterationBufferKind {
kContiguous,
kStrided,
kIndexed,
};
constexpr size_t kNumIterationBufferKinds = 3;
struct IterationBufferPointer {
IterationBufferPointer() = default;
explicit IterationBufferPointer(ByteStridedPointer<void> pointer,
Index outer_byte_stride,
Index inner_byte_stride)
: pointer(pointer),
outer_byte_stride(outer_byte_stride),
inner_byte_stride(inner_byte_stride) {}
explicit IterationBufferPointer(ByteStridedPointer<void> pointer,
Index byte_offsets_outer_stride,
const Index* byte_offsets)
: pointer(pointer),
byte_offsets_outer_stride(byte_offsets_outer_stride),
byte_offsets(byte_offsets) {}
ByteStridedPointer<void> pointer;
union {
Index outer_byte_stride;
Index byte_offsets_outer_stride;
};
union {
Index inner_byte_stride;
const Index* byte_offsets;
};
void AddElementOffset(IterationBufferKind kind, Index outer_offset,
Index inner_offset) {
if (kind == IterationBufferKind::kIndexed) {
byte_offsets += inner_offset;
byte_offsets +=
wrap_on_overflow::Multiply(byte_offsets_outer_stride, outer_offset);
} else {
pointer += wrap_on_overflow::Multiply(inner_byte_stride, inner_offset);
pointer += wrap_on_overflow::Multiply(outer_byte_stride, outer_offset);
}
}
};
template <IterationBufferKind BufferKind>
struct IterationBufferAccessor;
template <>
struct IterationBufferAccessor<IterationBufferKind::kStrided> {
constexpr static IterationBufferKind buffer_kind =
IterationBufferKind::kStrided;
template <typename Element>
static Element* GetPointerAtPosition(IterationBufferPointer ptr, Index outer,
Index inner) {
return static_cast<Element*>(
ptr.pointer +
internal::wrap_on_overflow::Multiply(ptr.outer_byte_stride, outer) +
internal::wrap_on_overflow::Multiply(ptr.inner_byte_stride, inner));
}
};
template <>
struct IterationBufferAccessor<IterationBufferKind::kContiguous> {
constexpr static IterationBufferKind buffer_kind =
IterationBufferKind::kContiguous;
template <typename Element>
static Element* GetPointerAtPosition(IterationBufferPointer ptr, Index outer,
Index inner) {
return static_cast<Element*>(
ptr.pointer +
internal::wrap_on_overflow::Multiply(ptr.outer_byte_stride, outer) +
internal::wrap_on_overflow::Multiply(
static_cast<Index>(sizeof(Element)), inner));
}
};
template <>
struct IterationBufferAccessor<IterationBufferKind::kIndexed> {
constexpr static IterationBufferKind buffer_kind =
IterationBufferKind::kIndexed;
template <typename Element>
static Element* GetPointerAtPosition(IterationBufferPointer ptr, Index outer,
Index inner) {
return static_cast<Element*>(
ptr.pointer +
ptr.byte_offsets[internal::wrap_on_overflow::Multiply(
ptr.byte_offsets_outer_stride, outer) +
inner]);
}
};
template <size_t Arity, typename... ExtraArg>
class ElementwiseFunction;
using IterationBufferShape = std::array<Index, 2>;
}
namespace internal_elementwise_function {
template <typename SequenceType, typename... ExtraArg>
struct ElementwiseFunctionPointerHelper;
template <size_t I>
using IterationBufferPointerHelper = internal::IterationBufferPointer;
template <size_t... Is, typename... ExtraArg>
struct ElementwiseFunctionPointerHelper<std::index_sequence<Is...>,
ExtraArg...> {
using type = bool (*)(void*, internal::IterationBufferShape,
IterationBufferPointerHelper<Is>..., ExtraArg...);
};
template <typename, typename SFINAE, typename...>
constexpr inline bool HasApplyContiguous = false;
template <typename Func, typename... Element, typename... ExtraArg>
constexpr inline bool HasApplyContiguous<
Func(Element...),
std::void_t<decltype(std::declval<Func>().ApplyContiguous(
std::declval<Index>(), std::declval<Element*>()...,
std::declval<ExtraArg>()...))>,
ExtraArg...> = true;
template <typename, typename...>
struct SimpleLoopTemplate;
template <typename T, typename Func>
struct Stateless {
static_assert(std::is_empty_v<Func>);
using type = Func;
using ContextType = T;
};
template <typename T>
struct StatelessTraits {
constexpr static bool is_stateless = false;
using type = T;
};
template <typename T, typename Func>
struct StatelessTraits<Stateless<T, Func>> {
constexpr static bool is_stateless = true;
using type = Func;
};
template <typename Func, typename... Element, typename... ExtraArg>
struct SimpleLoopTemplate<Func(Element...), ExtraArg...> {
using ElementwiseFunctionType =
internal::ElementwiseFunction<sizeof...(Element), ExtraArg...>;
template <typename ArrayAccessor>
static constexpr auto GetLoopFn() {
if constexpr (ArrayAccessor::buffer_kind ==
internal::IterationBufferKind::kContiguous &&
HasApplyContiguous<Func(Element...), void,
ExtraArg...>) {
return &FastLoop<ArrayAccessor>;
} else {
return &Loop<ArrayAccessor>;
}
}
template <typename ArrayAccessor>
static bool FastLoop(
void* context, internal::IterationBufferShape shape,
internal::FirstType<internal::IterationBufferPointer, Element>... pointer,
ExtraArg... extra_arg) {
using Traits = StatelessTraits<Func>;
using FuncType = typename Traits::type;
static_assert(ArrayAccessor::buffer_kind ==
internal::IterationBufferKind::kContiguous);
static_assert(
HasApplyContiguous<Func(Element...), void, ExtraArg...>);
internal::PossiblyEmptyObjectGetter<FuncType> func_helper;
FuncType& func = func_helper.get(static_cast<FuncType*>(context));
for (Index outer = 0; outer < shape[0]; ++outer) {
if constexpr (StatelessTraits<Func>::is_stateless) {
if (!func.ApplyContiguous(
*static_cast<typename Func::ContextType*>(context), shape[1],
ArrayAccessor::template GetPointerAtPosition<Element>(
pointer, outer, 0)...,
extra_arg...)) {
return false;
}
} else {
if (!func.ApplyContiguous(
shape[1],
ArrayAccessor::template GetPointerAtPosition<Element>(
pointer, outer, 0)...,
extra_arg...)) {
return false;
}
}
}
return true;
}
template <typename ArrayAccessor>
static bool Loop(
void* context, internal::IterationBufferShape shape,
internal::FirstType<internal::IterationBufferPointer, Element>... pointer,
ExtraArg... extra_arg) {
static_assert(
!(ArrayAccessor::buffer_kind ==
internal::IterationBufferKind::kContiguous &&
HasApplyContiguous<Func(Element...), void, ExtraArg...>));
using Traits = StatelessTraits<Func>;
using FuncType = typename Traits::type;
internal::PossiblyEmptyObjectGetter<FuncType> func_helper;
FuncType& func = func_helper.get(static_cast<FuncType*>(context));
for (Index outer = 0; outer < shape[0]; ++outer) {
for (Index inner = 0; inner < shape[1]; ++inner) {
if constexpr (StatelessTraits<Func>::is_stateless) {
if (!static_cast<bool>(internal::Void::CallAndWrap(
func, *static_cast<typename Func::ContextType*>(context),
ArrayAccessor::template GetPointerAtPosition<Element>(
pointer, outer, inner)...,
extra_arg...))) {
return false;
}
} else {
if (!static_cast<bool>(internal::Void::CallAndWrap(
func,
ArrayAccessor::template GetPointerAtPosition<Element>(
pointer, outer, inner)...,
extra_arg...))) {
return false;
}
}
}
}
return true;
}
};
}
namespace internal {
template <size_t Arity, typename... ExtraArg>
using SpecializedElementwiseFunctionPointer =
typename internal_elementwise_function::ElementwiseFunctionPointerHelper<
std::make_index_sequence<Arity>, ExtraArg...>::type;
template <size_t Arity, typename... ExtraArg>
struct ElementwiseClosure {
using Function = ElementwiseFunction<Arity, ExtraArg...>;
constexpr static size_t arity = Arity;
const Function* function;
void* context;
};
template <size_t Arity, typename... ExtraArg>
class ElementwiseFunction {
public:
constexpr static size_t arity = Arity;
using Closure = ElementwiseClosure<Arity, ExtraArg...>;
using SpecializedFunctionPointer =
SpecializedElementwiseFunctionPointer<Arity, ExtraArg...>;
constexpr ElementwiseFunction() = default;
template <typename LoopTemplate,
typename = decltype(LoopTemplate::template GetLoopFn<
IterationBufferAccessor<
IterationBufferKind::kContiguous>>())>
constexpr explicit ElementwiseFunction(LoopTemplate)
: functions_{
LoopTemplate::template GetLoopFn<
IterationBufferAccessor<IterationBufferKind::kContiguous>>(),
LoopTemplate::template GetLoopFn<
IterationBufferAccessor<IterationBufferKind::kStrided>>(),
LoopTemplate::template GetLoopFn<
IterationBufferAccessor<IterationBufferKind::kIndexed>>()} {}
constexpr SpecializedFunctionPointer operator[](
IterationBufferKind buffer_kind) const {
return functions_[static_cast<size_t>(buffer_kind)];
}
constexpr SpecializedFunctionPointer& operator[](
IterationBufferKind buffer_kind) {
return functions_[static_cast<size_t>(buffer_kind)];
}
private:
SpecializedFunctionPointer functions_[kNumIterationBufferKinds];
};
template <typename LoopTemplate>
struct GetElementwiseFunction {
using ElementwiseFunctionType =
typename LoopTemplate::ElementwiseFunctionType;
constexpr static ElementwiseFunctionType function{LoopTemplate{}};
constexpr operator const ElementwiseFunctionType*() const {
return &function;
}
constexpr operator ElementwiseFunctionType() const { return function; }
};
template <typename LoopTemplate>
constexpr typename LoopTemplate::ElementwiseFunctionType
GetElementwiseFunction<LoopTemplate>::function;
template <typename, typename...>
struct SimpleElementwiseFunction;
template <typename Func, typename... Element, typename... ExtraArg>
struct SimpleElementwiseFunction<Func(Element...), ExtraArg...>
: public GetElementwiseFunction<
internal_elementwise_function::SimpleLoopTemplate<
std::remove_reference_t<Func>(Element...), ExtraArg...>> {
using ElementwiseFunctionType =
internal::ElementwiseFunction<sizeof...(Element), ExtraArg...>;
using ClosureType =
internal::ElementwiseClosure<sizeof...(Element), ExtraArg...>;
constexpr static ClosureType Closure(std::remove_reference_t<Func>* func) {
return ClosureType{SimpleElementwiseFunction{},
const_cast<absl::remove_cvref_t<Func>*>(func)};
}
template <int&... ExplicitArgumentBarrier,
std::enable_if_t<
(sizeof...(ExplicitArgumentBarrier) == 0 &&
std::is_empty<absl::remove_cvref_t<Func>>::value)>* = nullptr>
constexpr operator ClosureType() const {
return {SimpleElementwiseFunction{}, nullptr};
}
};
}
namespace internal_elementwise_function {
template <size_t Arity, typename... ExtraArg, typename Pointers, size_t... Is>
inline bool InvokeElementwiseFunctionImpl(
std::index_sequence<Is...>,
internal::SpecializedElementwiseFunctionPointer<Arity, ExtraArg...>
function,
void* context, internal::IterationBufferShape shape,
const Pointers& pointers, ExtraArg... extra_arg) {
using std::get;
return function(context, shape, get<Is>(pointers)...,
std::forward<ExtraArg>(extra_arg)...);
}
}
namespace internal {
template <size_t Arity, typename... ExtraArg, typename Pointers>
inline bool InvokeElementwiseClosure(
ElementwiseClosure<Arity, ExtraArg...> closure,
IterationBufferKind buffer_kind, internal::IterationBufferShape shape,
const Pointers& pointers,
internal::type_identity_t<ExtraArg>... extra_arg) {
return internal_elementwise_function::InvokeElementwiseFunctionImpl<
Arity, ExtraArg...>(
std::make_index_sequence<Arity>{}, (*closure.function)[buffer_kind],
closure.context, shape, pointers, std::forward<ExtraArg>(extra_arg)...);
}
template <size_t Arity, typename... ExtraArg, typename Pointers>
inline bool InvokeElementwiseFunction(
SpecializedElementwiseFunctionPointer<Arity, ExtraArg...> function,
void* context, internal::IterationBufferShape shape,
const Pointers& pointers, ExtraArg... extra_arg) {
return internal_elementwise_function::InvokeElementwiseFunctionImpl<
Arity, ExtraArg...>(std::make_index_sequence<Arity>{}, function, context,
shape, pointers,
std::forward<ExtraArg>(extra_arg)...);
}
}
}
#endif | #include "tensorstore/internal/elementwise_function.h"
#include <functional>
#include <limits>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/attributes.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/util/byte_strided_pointer.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::internal::ElementwiseClosure;
using ::tensorstore::internal::ElementwiseFunction;
using ::tensorstore::internal::IterationBufferAccessor;
using ::tensorstore::internal::IterationBufferKind;
using ::tensorstore::internal::IterationBufferPointer;
using ::tensorstore::internal::SimpleElementwiseFunction;
using ContiguousAccessor =
IterationBufferAccessor<IterationBufferKind::kContiguous>;
using StridedAccessor = IterationBufferAccessor<IterationBufferKind::kStrided>;
using OffsetArrayAccessor =
IterationBufferAccessor<IterationBufferKind::kIndexed>;
TEST(ContiguousAccessorTest, Basic) {
int arr[3] = {1, 2, 3};
IterationBufferPointer ptr{&arr[0], Index(0), Index(0)};
EXPECT_EQ(&arr[0], ContiguousAccessor::GetPointerAtPosition<int>(ptr, 0, 0));
EXPECT_EQ(&arr[1], ContiguousAccessor::GetPointerAtPosition<int>(ptr, 0, 1));
}
TEST(ContiguousAccessorTest, WrapOnOverflow) {
int arr[3] = {1, 2, 3};
IterationBufferPointer ptr{&arr[0], Index(0), Index(0)};
const Index base_index = std::numeric_limits<Index>::max() - 3;
ptr.pointer -= tensorstore::internal::wrap_on_overflow::Multiply(
base_index, static_cast<Index>(sizeof(int)));
EXPECT_EQ(&arr[0], ContiguousAccessor::GetPointerAtPosition<int>(
ptr, 0, base_index + 0));
EXPECT_EQ(&arr[1], ContiguousAccessor::GetPointerAtPosition<int>(
ptr, 0, base_index + 1));
}
TEST(StridedAccessorTest, Basic) {
int arr[3] = {1, 2, 3};
IterationBufferPointer ptr{&arr[0], Index(0), sizeof(int) * 2};
EXPECT_EQ(&arr[0], StridedAccessor::GetPointerAtPosition<int>(ptr, 0, 0));
EXPECT_EQ(&arr[2], StridedAccessor::GetPointerAtPosition<int>(ptr, 0, 1));
}
TEST(StridedAccessorTest, WrapOnOverflow) {
int arr[3] = {1, 2, 3};
IterationBufferPointer ptr{&arr[0], Index(0), sizeof(int) * 2};
const Index base_index = std::numeric_limits<Index>::max() - 3;
ptr.pointer -= tensorstore::internal::wrap_on_overflow::Multiply(
base_index, ptr.inner_byte_stride);
EXPECT_EQ(&arr[0],
StridedAccessor::GetPointerAtPosition<int>(ptr, 0, base_index + 0));
EXPECT_EQ(&arr[2],
StridedAccessor::GetPointerAtPosition<int>(ptr, 0, base_index + 1));
}
TEST(OffsetArrayAccessorTest, Basic) {
int arr[3] = {1, 2, 3};
Index offsets[] = {0, sizeof(int) * 2};
IterationBufferPointer ptr{&arr[0], Index(0), &offsets[0]};
EXPECT_EQ(&arr[0], OffsetArrayAccessor::GetPointerAtPosition<int>(ptr, 0, 0));
EXPECT_EQ(&arr[2], OffsetArrayAccessor::GetPointerAtPosition<int>(ptr, 0, 1));
}
TEST(OffsetArrayAccessorTest, WrapOnOverflow) {
int arr[3] = {1, 2, 3};
const Index base_index = std::numeric_limits<Index>::max() - 100;
Index offsets[] = {base_index + 0, base_index + sizeof(int) * 2};
IterationBufferPointer ptr{&arr[0], Index(0), &offsets[0]};
ptr.pointer -= base_index;
EXPECT_EQ(&arr[0], OffsetArrayAccessor::GetPointerAtPosition<int>(ptr, 0, 0));
EXPECT_EQ(&arr[2], OffsetArrayAccessor::GetPointerAtPosition<int>(ptr, 0, 1));
}
TEST(SimpleElementwiseFunctionTest, ArityOne) {
struct AddOneB {
AddOneB() = delete;
bool operator()(int* x) const {
if (*x > 0) return false;
*x += 1;
return true;
}
};
ElementwiseFunction<1> function = SimpleElementwiseFunction<AddOneB(int)>();
std::vector<int> arr{-5, -6, 1, 2};
EXPECT_TRUE(function[IterationBufferKind::kContiguous](
nullptr, {1, 2}, IterationBufferPointer{&arr[0], Index(0), sizeof(int)}));
EXPECT_THAT(arr, ::testing::ElementsAre(-4, -5, 1, 2));
EXPECT_FALSE(function[IterationBufferKind::kStrided](
nullptr, {1, 2},
IterationBufferPointer{&arr[0], Index(0), sizeof(int) * 2}));
EXPECT_THAT(arr, ::testing::ElementsAre(-3, -5, 1, 2));
Index offsets[] = {sizeof(int), sizeof(int)};
EXPECT_TRUE(function[IterationBufferKind::kIndexed](
nullptr, {1, 2}, IterationBufferPointer{&arr[0], Index(0), &offsets[0]}));
EXPECT_THAT(arr, ::testing::ElementsAre(-3, -3, 1, 2));
}
TEST(SimpleElementwiseFunctionTest, ArityOneCaptureLessLambda) {
[[maybe_unused]] const auto add_one = [](int* x) {
if (*x > 0) return false;
*x += 1;
return true;
};
ElementwiseFunction<1> function =
SimpleElementwiseFunction<decltype(add_one)(int)>();
std::vector<int> arr{-5, -6, 1, 2};
EXPECT_TRUE(function[IterationBufferKind::kContiguous](
nullptr, {1, 2}, IterationBufferPointer{&arr[0], Index(0), sizeof(int)}));
EXPECT_THAT(arr, ::testing::ElementsAre(-4, -5, 1, 2));
}
TEST(SimpleElementwiseFunctionTest, NonEmptyArityOne) {
struct AddOneC {
int value = 0;
bool operator()(int* x) {
++value;
if (*x > 0) return false;
*x += 1;
return true;
}
};
AddOneC add_one;
ElementwiseClosure<1> closure =
SimpleElementwiseFunction<AddOneC(int)>::Closure(&add_one);
EXPECT_EQ(&add_one, closure.context);
std::vector<int> arr{-5, -6, 1, 2};
EXPECT_TRUE((*closure.function)[IterationBufferKind::kContiguous](
closure.context, {1, 2},
IterationBufferPointer{&arr[0], Index(0), sizeof(int)}));
EXPECT_THAT(arr, ::testing::ElementsAre(-4, -5, 1, 2));
EXPECT_EQ(2, add_one.value);
}
TEST(SimpleElementwiseFunctionTest, NonEmptyArityOneBind) {
struct AddOneD {
bool operator()(int* x, int* counter) {
++*counter;
if (*x > 0) return false;
*x += 1;
return true;
}
};
int counter = 0;
auto add_one = std::bind(AddOneD{}, std::placeholders::_1, &counter);
ElementwiseClosure<1> closure =
SimpleElementwiseFunction<decltype(add_one)(int)>::Closure(&add_one);
std::vector<int> arr{-5, -6, 1, 2};
EXPECT_TRUE((*closure.function)[IterationBufferKind::kContiguous](
&add_one, {1, 2},
IterationBufferPointer{&arr[0], Index(0), sizeof(int)}));
EXPECT_THAT(arr, ::testing::ElementsAre(-4, -5, 1, 2));
EXPECT_EQ(2, counter);
}
TEST(SimpleElementwiseFunctionTest, ArityTwo) {
struct Convert {
bool operator()(int* x, double* y) const {
*x = static_cast<int>(*y);
return (*x < 0);
}
};
ElementwiseFunction<2> function =
SimpleElementwiseFunction<Convert(int, double)>();
std::vector<int> arr{0, 0, 0, 0};
std::vector<double> arr2{-3.5, -2.5, -1.5, 2.5};
EXPECT_TRUE(function[IterationBufferKind::kContiguous](
nullptr, {1, 2}, IterationBufferPointer{&arr[0], Index(0), sizeof(int)},
IterationBufferPointer{&arr2[0], Index(0), sizeof(double)}));
EXPECT_THAT(arr, ::testing::ElementsAre(-3, -2, 0, 0));
EXPECT_TRUE(function[IterationBufferKind::kStrided](
nullptr, {1, 2},
IterationBufferPointer{&arr[0], Index(0), sizeof(int) * 2},
IterationBufferPointer{&arr2[0], Index(0), sizeof(double)}));
EXPECT_THAT(arr, ::testing::ElementsAre(-3, -2, -2, 0));
Index offsets[] = {0, sizeof(int), 2 * sizeof(int)};
Index offsets2[] = {sizeof(double), sizeof(double) * 3, 0};
EXPECT_FALSE(function[IterationBufferKind::kIndexed](
nullptr, {1, 3}, IterationBufferPointer{&arr[0], Index(0), &offsets[0]},
IterationBufferPointer{&arr2[0], Index(0), &offsets2[0]}));
EXPECT_THAT(arr, ::testing::ElementsAre(-2, 2, -2, 0));
}
TEST(SimpleElementwiseFunctionTest, ArityOneExtraArgsIndexReturn) {
struct AddOneA {
bool operator()(int* x, int* sum) const {
if (*x > 0) return false;
*sum += *x;
*x += 1;
return true;
}
};
ElementwiseFunction<1, int*> function =
SimpleElementwiseFunction<AddOneA(int), int*>();
std::vector<int> arr{-5, -6, 1, 2};
{
int sum = 0;
EXPECT_TRUE(function[IterationBufferKind::kContiguous](
nullptr, {1, 2}, IterationBufferPointer{&arr[0], Index(0), sizeof(int)},
&sum));
EXPECT_EQ(-11, sum);
EXPECT_THAT(arr, ::testing::ElementsAre(-4, -5, 1, 2));
}
{
int sum = 0;
EXPECT_FALSE(function[IterationBufferKind::kStrided](
nullptr, {1, 2},
IterationBufferPointer{&arr[0], Index(0), sizeof(int) * 2}, &sum));
EXPECT_THAT(arr, ::testing::ElementsAre(-3, -5, 1, 2));
EXPECT_EQ(-4, sum);
}
{
int sum = 0;
Index offsets[] = {sizeof(int), sizeof(int)};
EXPECT_TRUE(function[IterationBufferKind::kIndexed](
nullptr, {1, 2}, IterationBufferPointer{&arr[0], Index(0), &offsets[0]},
&sum));
EXPECT_THAT(arr, ::testing::ElementsAre(-3, -3, 1, 2));
EXPECT_EQ(-9, sum);
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/elementwise_function.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/elementwise_function_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
cb0e2bda-0e2c-4bc6-b373-3abf2587c040 | cpp | google/tensorstore | image_writer | tensorstore/internal/image/image_writer.h | tensorstore/internal/image/image_writer_test.cc | #ifndef TENSORSTORE_INTERNAL_IMAGE_IMAGE_WRITER_H_
#define TENSORSTORE_INTERNAL_IMAGE_IMAGE_WRITER_H_
#include "absl/status/status.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/internal/image/image_info.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_image {
class ImageWriter {
public:
virtual ~ImageWriter() = default;
virtual absl::Status Initialize(riegeli::Writer*) = 0;
virtual absl::Status Encode(
const ImageInfo& image,
tensorstore::span<const unsigned char> source) = 0;
virtual absl::Status Done() = 0;
};
}
}
#endif | #include "tensorstore/internal/image/image_writer.h"
#include <stddef.h>
#include <stdint.h>
#include <any>
#include <cmath>
#include <functional>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "riegeli/bytes/cord_reader.h"
#include "riegeli/bytes/cord_writer.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/internal/image/avif_reader.h"
#include "tensorstore/internal/image/avif_writer.h"
#include "tensorstore/internal/image/image_info.h"
#include "tensorstore/internal/image/image_reader.h"
#include "tensorstore/internal/image/image_view.h"
#include "tensorstore/internal/image/jpeg_reader.h"
#include "tensorstore/internal/image/jpeg_writer.h"
#include "tensorstore/internal/image/png_reader.h"
#include "tensorstore/internal/image/png_writer.h"
#include "tensorstore/internal/image/tiff_reader.h"
#include "tensorstore/internal/image/tiff_writer.h"
#include "tensorstore/internal/image/webp_reader.h"
#include "tensorstore/internal/image/webp_writer.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::internal_image::AvifReader;
using ::tensorstore::internal_image::AvifReaderOptions;
using ::tensorstore::internal_image::AvifWriter;
using ::tensorstore::internal_image::AvifWriterOptions;
using ::tensorstore::internal_image::ImageInfo;
using ::tensorstore::internal_image::ImageReader;
using ::tensorstore::internal_image::ImageView;
using ::tensorstore::internal_image::ImageWriter;
using ::tensorstore::internal_image::JpegReader;
using ::tensorstore::internal_image::JpegWriter;
using ::tensorstore::internal_image::JpegWriterOptions;
using ::tensorstore::internal_image::PngReader;
using ::tensorstore::internal_image::PngWriter;
using ::tensorstore::internal_image::PngWriterOptions;
using ::tensorstore::internal_image::TiffReader;
using ::tensorstore::internal_image::TiffWriter;
using ::tensorstore::internal_image::TiffWriterOptions;
using ::tensorstore::internal_image::WebPReader;
using ::tensorstore::internal_image::WebPReaderOptions;
using ::tensorstore::internal_image::WebPWriter;
using ::tensorstore::internal_image::WebPWriterOptions;
template <typename T>
const T* GetPointerFromAny(std::any* any_ptr) {
if (!any_ptr->has_value()) {
return nullptr;
}
if (auto opt = std::any_cast<T>(any_ptr); opt != nullptr) {
return opt;
}
if (auto opt = std::any_cast<std::reference_wrapper<T>>(any_ptr);
opt != nullptr) {
return &(opt->get());
}
if (auto opt = std::any_cast<std::reference_wrapper<const T>>(any_ptr);
opt != nullptr) {
return &(opt->get());
}
return nullptr;
}
double ComputeRMSE(const unsigned char* a, const unsigned char* b, size_t c) {
double squared_error = 0;
for (size_t i = 0; i < c; ++i) {
const int diff = static_cast<double>(a[i]) - static_cast<double>(b[i]);
squared_error += diff * diff;
}
return std::sqrt(squared_error / static_cast<double>(c));
}
void MakeTestImage(const ImageInfo& info,
tensorstore::span<unsigned char> data) {
ImageView image(info, data);
uint64_t lcg = info.width * info.height * info.num_components;
for (size_t y = 0; y < info.height; ++y) {
auto* row = image.data_row(y).data();
for (size_t x = 0; x < info.width; ++x) {
double gradient = static_cast<double>(x + y) /
static_cast<double>(info.width + info.height);
*row++ = static_cast<unsigned char>(gradient * 255);
if (info.num_components > 1) {
lcg = (lcg * 6364136223846793005) + 1;
*row++ = static_cast<unsigned char>(lcg);
}
if (info.num_components > 2) {
*row++ = (y & 1) ? static_cast<unsigned char>((1.0 - gradient) * 255)
: static_cast<unsigned char>(x);
}
if (info.num_components > 3) {
*row++ =
(y & 1)
? static_cast<unsigned char>(x)
: static_cast<unsigned char>(std::abs(128 - gradient * 255));
}
}
}
}
struct TestParam {
std::any options;
ImageInfo image_params;
double rmse_error_limit = 0;
std::any reader_options;
};
[[maybe_unused]] std::string PrintToString(const TestParam& p) {
return absl::StrCat(p.image_params.num_components,
p.rmse_error_limit != 0 ? "_rmse" : "");
}
class WriterTest : public ::testing::TestWithParam<TestParam> {
public:
WriterTest() {
std::any* options = const_cast<std::any*>(&GetParam().options);
if (GetPointerFromAny<TiffWriterOptions>(options)) {
writer = std::make_unique<TiffWriter>();
reader = std::make_unique<TiffReader>();
} else if (GetPointerFromAny<JpegWriterOptions>(options)) {
writer = std::make_unique<JpegWriter>();
reader = std::make_unique<JpegReader>();
} else if (GetPointerFromAny<PngWriterOptions>(options)) {
writer = std::make_unique<PngWriter>();
reader = std::make_unique<PngReader>();
} else if (GetPointerFromAny<AvifWriterOptions>(options)) {
writer = std::make_unique<AvifWriter>();
reader = std::make_unique<AvifReader>();
} else if (GetPointerFromAny<WebPWriterOptions>(options)) {
writer = std::make_unique<WebPWriter>();
reader = std::make_unique<WebPReader>();
}
}
absl::Status InitializeWithOptions(riegeli::Writer* riegeli_writer) {
std::any* options = const_cast<std::any*>(&GetParam().options);
if (auto* ptr = GetPointerFromAny<TiffWriterOptions>(options)) {
return reinterpret_cast<TiffWriter*>(writer.get())
->Initialize(riegeli_writer, *ptr);
} else if (auto* ptr = GetPointerFromAny<JpegWriterOptions>(options)) {
return reinterpret_cast<JpegWriter*>(writer.get())
->Initialize(riegeli_writer, *ptr);
} else if (auto* ptr = GetPointerFromAny<PngWriterOptions>(options)) {
return reinterpret_cast<PngWriter*>(writer.get())
->Initialize(riegeli_writer, *ptr);
} else if (auto* ptr = GetPointerFromAny<AvifWriterOptions>(options)) {
return reinterpret_cast<AvifWriter*>(writer.get())
->Initialize(riegeli_writer, *ptr);
} else if (auto* ptr = GetPointerFromAny<WebPWriterOptions>(options)) {
return reinterpret_cast<WebPWriter*>(writer.get())
->Initialize(riegeli_writer, *ptr);
}
return writer->Initialize(riegeli_writer);
}
absl::Status DecodeWithOptions(tensorstore::span<unsigned char> dest) {
std::any* options = const_cast<std::any*>(&GetParam().reader_options);
if (auto* ptr = GetPointerFromAny<AvifReaderOptions>(options)) {
return reinterpret_cast<AvifReader*>(reader.get())->Decode(dest, *ptr);
}
return reader->Decode(dest);
}
std::unique_ptr<ImageWriter> writer;
std::unique_ptr<ImageReader> reader;
};
TEST_P(WriterTest, RoundTrip) {
ASSERT_FALSE(writer == nullptr);
ASSERT_FALSE(reader.get() == nullptr);
const ImageInfo source_info = GetParam().image_params;
std::vector<unsigned char> source(ImageRequiredBytes(source_info));
MakeTestImage(source_info, source);
absl::Cord encoded;
{
riegeli::CordWriter riegeli_writer(&encoded);
ASSERT_THAT(InitializeWithOptions(&riegeli_writer), ::tensorstore::IsOk());
ASSERT_THAT(writer->Encode(source_info, source), ::tensorstore::IsOk());
ASSERT_THAT(writer->Done(), ::tensorstore::IsOk());
}
ImageInfo decoded_info;
std::vector<unsigned char> decoded(source.size());
{
riegeli::CordReader cord_reader(&encoded);
ASSERT_THAT(reader->Initialize(&cord_reader), ::tensorstore::IsOk());
decoded_info = reader->GetImageInfo();
EXPECT_EQ(decoded_info.width, source_info.width);
EXPECT_EQ(decoded_info.height, source_info.height);
EXPECT_EQ(decoded_info.num_components, source_info.num_components);
EXPECT_THAT(DecodeWithOptions(decoded), ::tensorstore::IsOk());
}
double rmse = ComputeRMSE(decoded.data(), source.data(), source.size());
if (GetParam().rmse_error_limit == 0) {
EXPECT_EQ(0, rmse) << "\nA: " << source_info << " "
<< "\nB: " << decoded_info;
EXPECT_THAT(decoded, testing::Eq(source));
} else {
EXPECT_LT(rmse, GetParam().rmse_error_limit) << decoded_info;
}
}
INSTANTIATE_TEST_SUITE_P(
AvifLossless, WriterTest,
::testing::Values(
TestParam{AvifWriterOptions{}, ImageInfo{33, 100, 1}, 0},
TestParam{AvifWriterOptions{}, ImageInfo{33, 100, 2}, 0},
TestParam{AvifWriterOptions{}, ImageInfo{33, 100, 3}, 0},
TestParam{AvifWriterOptions{}, ImageInfo{33, 100, 4}, 0}));
INSTANTIATE_TEST_SUITE_P(
AVifLossy, WriterTest,
::testing::Values(
TestParam{AvifWriterOptions{1}, ImageInfo{33, 100, 1}, 0.26},
TestParam{AvifWriterOptions{1}, ImageInfo{33, 100, 2}, 0.5},
TestParam{AvifWriterOptions{1}, ImageInfo{33, 100, 3}, 28.5},
TestParam{AvifWriterOptions{1}, ImageInfo{33, 100, 4}, 24.5}));
INSTANTIATE_TEST_SUITE_P(
AVifExtended, WriterTest,
::testing::Values(
TestParam{AvifWriterOptions{0, 6, false}, ImageInfo{33, 100, 3}, 0,
AvifReaderOptions{false}},
TestParam{AvifWriterOptions{0, 6, false}, ImageInfo{33, 100, 4}, 0,
AvifReaderOptions{false}},
TestParam{AvifWriterOptions{1, 6, false}, ImageInfo{33, 100, 3}, 0.5,
AvifReaderOptions{false}},
TestParam{AvifWriterOptions{1, 6, false}, ImageInfo{33, 100, 4}, 44,
AvifReaderOptions{false}}));
INSTANTIATE_TEST_SUITE_P(
JpegFiles, WriterTest,
::testing::Values(
TestParam{JpegWriterOptions{100}, ImageInfo{33, 100, 1}, 0.5},
TestParam{JpegWriterOptions{100}, ImageInfo{33, 100, 3}, 48}));
INSTANTIATE_TEST_SUITE_P(
PngFiles, WriterTest,
::testing::Values(
TestParam{PngWriterOptions{}, ImageInfo{33, 100, 1}, 0},
TestParam{PngWriterOptions{}, ImageInfo{33, 100, 2}, 0},
TestParam{PngWriterOptions{}, ImageInfo{33, 100, 3}, 0},
TestParam{PngWriterOptions{}, ImageInfo{33, 100, 4}, 0}));
INSTANTIATE_TEST_SUITE_P(
TiffFiles, WriterTest,
::testing::Values(
TestParam{TiffWriterOptions{}, ImageInfo{33, 100, 1}, 0},
TestParam{TiffWriterOptions{}, ImageInfo{33, 100, 2}, 0},
TestParam{TiffWriterOptions{}, ImageInfo{33, 100, 3}, 0},
TestParam{TiffWriterOptions{}, ImageInfo{33, 100, 4}, 0}));
INSTANTIATE_TEST_SUITE_P(
WebPLossless, WriterTest,
::testing::Values(
TestParam{WebPWriterOptions{true}, ImageInfo{33, 100, 3}, 0},
TestParam{WebPWriterOptions{true}, ImageInfo{33, 100, 4}, 0}));
INSTANTIATE_TEST_SUITE_P(
WebPLossy, WriterTest,
::testing::Values(
TestParam{WebPWriterOptions{false}, ImageInfo{33, 100, 3}, 47},
TestParam{WebPWriterOptions{false}, ImageInfo{33, 100, 4}, 44}));
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/image/image_writer.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/image/image_writer_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
5eceaa04-6a53-46b1-828b-67ccd30cfabc | cpp | google/tensorstore | image_reader | tensorstore/internal/image/image_reader.h | tensorstore/internal/image/image_reader_test.cc | #ifndef TENSORSTORE_INTERNAL_IMAGE_IMAGE_READER_H_
#define TENSORSTORE_INTERNAL_IMAGE_IMAGE_READER_H_
#include "absl/status/status.h"
#include "riegeli/bytes/reader.h"
#include "tensorstore/internal/image/image_info.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_image {
class ImageReader {
public:
virtual ~ImageReader() = default;
virtual absl::Status Initialize(riegeli::Reader* reader) = 0;
virtual ImageInfo GetImageInfo() = 0;
virtual absl::Status Decode(tensorstore::span<unsigned char> dest) = 0;
};
}
}
#endif | #include "tensorstore/internal/image/image_reader.h"
#include <stddef.h>
#include <array>
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/flags/flag.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "riegeli/bytes/cord_reader.h"
#include "riegeli/bytes/fd_reader.h"
#include "riegeli/bytes/read_all.h"
#include "tensorstore/data_type.h"
#include "tensorstore/internal/image/avif_reader.h"
#include "tensorstore/internal/image/bmp_reader.h"
#include "tensorstore/internal/image/image_info.h"
#include "tensorstore/internal/image/jpeg_reader.h"
#include "tensorstore/internal/image/png_reader.h"
#include "tensorstore/internal/image/tiff_reader.h"
#include "tensorstore/internal/image/webp_reader.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
ABSL_FLAG(std::string, tensorstore_test_data_dir, ".",
"Path to directory containing test data.");
namespace {
using ::tensorstore::internal_image::AvifReader;
using ::tensorstore::internal_image::BmpReader;
using ::tensorstore::internal_image::ImageInfo;
using ::tensorstore::internal_image::ImageReader;
using ::tensorstore::internal_image::JpegReader;
using ::tensorstore::internal_image::PngReader;
using ::tensorstore::internal_image::TiffReader;
using ::tensorstore::internal_image::WebPReader;
struct V {
std::array<size_t, 2> yx;
std::array<unsigned char, 3> rgb;
};
struct TestParam {
std::string filename;
ImageInfo info;
std::vector<V> values;
};
[[maybe_unused]] std::string PrintToString(const TestParam& p) {
return p.filename;
}
class ReaderTest : public ::testing::TestWithParam<TestParam> {
public:
ReaderTest() {
if (IsTiff()) {
reader = std::make_unique<TiffReader>();
} else if (IsJpeg()) {
reader = std::make_unique<JpegReader>();
} else if (IsPng()) {
reader = std::make_unique<PngReader>();
} else if (IsBmp()) {
reader = std::make_unique<BmpReader>();
} else if (IsAvif()) {
reader = std::make_unique<AvifReader>();
} else if (IsWebP()) {
reader = std::make_unique<WebPReader>();
}
}
bool IsTiff() {
return (absl::EndsWith(GetParam().filename, ".tiff") ||
absl::EndsWith(GetParam().filename, ".tif"));
}
bool IsPng() { return absl::EndsWith(GetParam().filename, ".png"); }
bool IsJpeg() {
return (absl::EndsWith(GetParam().filename, ".jpg") ||
absl::EndsWith(GetParam().filename, ".jpeg"));
}
bool IsAvif() { return absl::EndsWith(GetParam().filename, ".avif"); }
bool IsBmp() { return absl::EndsWith(GetParam().filename, ".bmp"); }
bool IsWebP() { return absl::EndsWith(GetParam().filename, ".webp"); }
bool ReadsEntireFile() { return IsAvif() || IsJpeg(); }
std::string GetFilename() {
return tensorstore::internal::JoinPath(
absl::GetFlag(FLAGS_tensorstore_test_data_dir), GetParam().filename);
}
tensorstore::Result<absl::Cord> ReadEntireFile(std::string filename) {
absl::Cord file_data;
TENSORSTORE_RETURN_IF_ERROR(
riegeli::ReadAll(riegeli::FdReader(filename), file_data));
return file_data;
}
std::unique_ptr<ImageReader> reader;
};
TEST_P(ReaderTest, ReadImage) {
const auto& filename = GetParam().filename;
ASSERT_FALSE(reader.get() == nullptr) << filename;
ABSL_LOG(INFO) << filename;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(absl::Cord file_data,
ReadEntireFile(GetFilename()));
ASSERT_FALSE(file_data.empty());
riegeli::CordReader cord_reader(&file_data);
ASSERT_THAT(reader->Initialize(&cord_reader), ::tensorstore::IsOk())
<< filename;
auto expected_info = GetParam().info;
auto info = reader->GetImageInfo();
EXPECT_EQ(info.width, expected_info.width) << filename;
EXPECT_EQ(info.height, expected_info.height) << filename;
EXPECT_EQ(info.num_components, expected_info.num_components) << filename;
EXPECT_EQ(info.dtype, expected_info.dtype) << filename;
const size_t image_bytes = ImageRequiredBytes(info);
EXPECT_EQ(image_bytes, ImageRequiredBytes(expected_info));
std::unique_ptr<unsigned char[]> image(new unsigned char[image_bytes]());
EXPECT_THAT(reader->Decode(tensorstore::span(image.get(), image_bytes)),
::tensorstore::IsOk());
EXPECT_TRUE(cord_reader.Close()) << cord_reader.status();
for (const V& v : GetParam().values) {
ASSERT_LT(v.yx[0], expected_info.height)
<< " (" << v.yx[0] << "," << v.yx[1] << ")";
ASSERT_LT(v.yx[1], expected_info.width)
<< " (" << v.yx[0] << "," << v.yx[1] << ")";
size_t offset =
expected_info.width * expected_info.num_components * v.yx[0] + v.yx[1];
EXPECT_THAT(tensorstore::span<unsigned char>(image.get() + offset, 3),
::testing::ElementsAreArray(v.rgb))
<< " (" << v.yx[0] << "," << v.yx[1] << ") " << offset;
}
}
TEST_P(ReaderTest, ReadImageTruncated) {
const auto& filename = GetParam().filename;
ASSERT_FALSE(reader.get() == nullptr) << filename;
if (filename == "png/D75_01b.png") return;
if (filename == "tiff/D75_01b.tiff") return;
if (filename == "bmp/D75_08b_grey.bmp") return;
if (IsWebP()) return;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(absl::Cord file_data,
ReadEntireFile(GetFilename()));
absl::Cord partial_file = file_data.Subcord(0, file_data.size() * 0.9);
riegeli::CordReader cord_reader(&partial_file);
absl::Status status = reader->Initialize(&cord_reader);
if (status.ok()) {
auto info = reader->GetImageInfo();
auto expected_info = GetParam().info;
if (info.width == expected_info.width) {
EXPECT_EQ(info.width, expected_info.width) << filename;
EXPECT_EQ(info.height, expected_info.height) << filename;
EXPECT_EQ(info.num_components, expected_info.num_components) << filename;
EXPECT_EQ(info.dtype, expected_info.dtype) << filename;
}
size_t image_bytes = ImageRequiredBytes(expected_info);
std::unique_ptr<unsigned char[]> image(new unsigned char[image_bytes]());
status.Update(reader->Decode(tensorstore::span(image.get(), image_bytes)));
}
if (status.ok()) {
if (!cord_reader.Close()) {
status.Update(cord_reader.status());
}
}
EXPECT_FALSE(status.ok());
}
std ::vector<V> GetD75_08_Values() {
return {
V{{0, 0}, {151, 75, 83}},
V{{171, 0}, {255, 250, 251}},
V{{29, 117}, {173, 93, 97}},
};
}
std ::vector<V> GetD75_08_Values_JPEG() {
return {
V{{0, 0}, {152, 76, 88}},
V{{171, 0}, {253, 247, 251}},
V{{29, 117}, {174, 93, 99}},
};
}
INSTANTIATE_TEST_SUITE_P(
AvifFiles, ReaderTest,
::testing::Values(
TestParam{"avif/D75_08b.avif", ImageInfo{172, 306, 3},
GetD75_08_Values()},
TestParam{"avif/D75_08b_cq1.avif", ImageInfo{172, 306, 3}},
TestParam{"avif/D75_10b_cq1.avif",
ImageInfo{172, 306, 3, ::tensorstore::dtype_v<uint16_t>}},
TestParam{"avif/D75_08b_grey.avif",
ImageInfo{172, 306, 1},
{V{{29, 117}, {87, 87, 87}}}},
TestParam{"avif/D75_12b_grey.avif",
ImageInfo{172, 306, 1, ::tensorstore::dtype_v<uint16_t>}}
));
INSTANTIATE_TEST_SUITE_P(
BmpFiles, ReaderTest,
::testing::Values(
TestParam{"bmp/D75_08b.bmp", ImageInfo{172, 306, 3},
GetD75_08_Values()},
TestParam{"bmp/D75_08b_grey.bmp",
ImageInfo{172, 306, 1},
{V{{29, 117}, {87, 87, 87}}}}
));
INSTANTIATE_TEST_SUITE_P(
JpegFiles, ReaderTest,
::testing::Values(
TestParam{"jpeg/D75_08b.jpeg", ImageInfo{172, 306, 3},
GetD75_08_Values_JPEG()}
));
INSTANTIATE_TEST_SUITE_P(
PngFiles, ReaderTest,
::testing::Values(
TestParam{"png/D75_08b.png", ImageInfo{172, 306, 3},
GetD75_08_Values()},
TestParam{"png/D75_16b.png",
ImageInfo{172, 306, 3, ::tensorstore::dtype_v<uint16_t>}},
TestParam{"png/D75_04b.png", ImageInfo{172, 306, 3}},
TestParam{"png/D75_08b_grey.png",
ImageInfo{172, 306, 1},
{V{{29, 117}, {87, 87, 87}}}},
TestParam{"png/D75_16b_grey.png",
ImageInfo{172, 306, 1, ::tensorstore::dtype_v<uint16_t>}},
TestParam{"png/D75_01b.png",
ImageInfo{172, 306, 1, ::tensorstore::dtype_v<bool>}}
));
INSTANTIATE_TEST_SUITE_P(
TifFiles, ReaderTest,
::testing::Values(
TestParam{"tiff/D75_08b.tiff", ImageInfo{172, 306, 3},
GetD75_08_Values()},
TestParam{"tiff/D75_08b_tiled.tiff", ImageInfo{172, 306, 3},
GetD75_08_Values()},
TestParam{"tiff/D75_08b_scanline.tiff", ImageInfo{172, 306, 3},
GetD75_08_Values()},
TestParam{"tiff/D75_08b_zip.tiff", ImageInfo{172, 306, 3},
GetD75_08_Values()},
TestParam{"tiff/D75_08b_lzw.tiff", ImageInfo{172, 306, 3},
GetD75_08_Values()},
TestParam{"tiff/D75_16b.tiff",
ImageInfo{172, 306, 3, ::tensorstore::dtype_v<uint16_t>}},
TestParam{"tiff/D75_01b.tiff",
ImageInfo{172, 306, 1, ::tensorstore::dtype_v<bool>}},
TestParam{"tiff/D75_08b_grey.tiff",
ImageInfo{172, 306, 1},
{V{{29, 117}, {87, 87, 87}}}},
TestParam{"tiff/D75_16b_grey.tiff",
ImageInfo{172, 306, 1, ::tensorstore::dtype_v<uint16_t>}}
));
INSTANTIATE_TEST_SUITE_P(
WebPFiles, ReaderTest,
::testing::Values(
TestParam{"webp/D75_08b.webp", ImageInfo{172, 306, 3},
GetD75_08_Values()},
TestParam{"webp/D75_08b_q90.webp",
ImageInfo{172, 306, 3},
{V{{29, 117}, {166, 94, 91}}}}
));
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/image/image_reader.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/image/image_reader_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
96eac068-aee0-4173-954e-e2d2797caf66 | cpp | google/tensorstore | single_producer_queue | tensorstore/internal/container/single_producer_queue.h | tensorstore/internal/container/single_producer_queue_test.cc | #ifndef TENSORSTORE_INTERNAL_THREAD_SINGLE_PRODUCER_QUEUE_H_
#define TENSORSTORE_INTERNAL_THREAD_SINGLE_PRODUCER_QUEUE_H_
#include <stdint.h>
#include <algorithm>
#include <atomic>
#include <cassert>
#include <cstddef>
#include <memory>
#include <optional>
#include <type_traits>
#include "absl/base/attributes.h"
#include "absl/base/optimization.h"
#include "absl/log/absl_check.h"
namespace tensorstore {
namespace internal_container {
template <typename T, bool kCanResize = true,
typename Allocator = std::allocator<T>>
class SingleProducerQueue;
template <typename T, typename Allocator>
class SPQArray {
private:
static_assert(std::is_trivially_destructible_v<T>);
using ArrayAllocator = typename std::allocator_traits<
Allocator>::template rebind_alloc<SPQArray>;
using ByteAllocator =
typename std::allocator_traits<Allocator>::template rebind_alloc<char>;
constexpr static ptrdiff_t start_offset() {
struct X {
SPQArray array;
std::atomic<T> item[1];
};
return offsetof(X, item);
}
constexpr static size_t alloc_size(int64_t c) {
struct X {
SPQArray array;
std::atomic<T> item[1];
};
return sizeof(X) + (c - 1) * sizeof(std::atomic<T>);
}
struct private_t {
private:
friend class SPQArray;
private_t() = default;
};
public:
static SPQArray* New(int64_t c, SPQArray* retired, Allocator* alloc) {
size_t allocation_bytes = alloc_size(c);
ByteAllocator byte_alloc(*alloc);
void* mem = std::allocator_traits<ByteAllocator>::allocate(
byte_alloc, allocation_bytes);
auto* as_array = static_cast<SPQArray*>(mem);
ArrayAllocator array_alloc(*alloc);
std::allocator_traits<ArrayAllocator>::construct(array_alloc, as_array,
private_t{}, c, retired);
return as_array;
}
static void Delete(SPQArray* ptr, Allocator* alloc) {
const size_t allocation_bytes = alloc_size(ptr->capacity);
void* mem = ptr;
ByteAllocator byte_alloc(*alloc);
std::allocator_traits<ByteAllocator>::deallocate(
byte_alloc, static_cast<char*>(mem), allocation_bytes);
}
SPQArray(private_t, int64_t c, SPQArray* retired)
: capacity(c), mask(c - 1), retired(retired) {}
SPQArray* resize(int64_t b, int64_t t, Allocator* alloc) {
auto* a = SPQArray::New(2 * capacity, this, alloc);
for (int64_t i = t; i != b; ++i) {
a->item(i).store(
item(i).load(std::memory_order_relaxed), std::memory_order_relaxed);
}
return a;
}
std::atomic<T>* buffer() {
return reinterpret_cast<std::atomic<T>*>(reinterpret_cast<char*>(this) +
start_offset());
}
std::atomic<T>& item(int64_t i) { return buffer()[i & mask]; }
int64_t capacity;
int64_t mask;
SPQArray* retired;
};
template <typename T, bool kCanResize, typename Allocator>
class SingleProducerQueue {
static_assert(std::is_trivially_destructible_v<T>);
std::nullopt_t missing(std::false_type) { return std::nullopt; }
std::nullptr_t missing(std::true_type) { return nullptr; }
using Array = SPQArray<T, Allocator>;
public:
using optional_t =
std::conditional_t<std::is_pointer_v<T>, T, std::optional<T>>;
SingleProducerQueue(int64_t n, Allocator alloc)
: top_(0),
bottom_(0),
allocator_(alloc),
array_(Array::New(n, nullptr, &allocator_)) {
ABSL_CHECK_EQ(n & (n - 1), 0);
}
explicit SingleProducerQueue(int64_t n)
: SingleProducerQueue(n, Allocator()) {}
~SingleProducerQueue() {
Array* a = array_.load(std::memory_order_relaxed);
while (a) {
Array* b = a->retired;
a->retired = nullptr;
Array::Delete(a, &allocator_);
a = b;
}
}
int64_t capacity() const {
return array_.load(std::memory_order_relaxed)->capacity;
}
size_t size() const {
int64_t b = bottom_.load(std::memory_order_relaxed);
int64_t t = top_.load(std::memory_order_relaxed);
return static_cast<size_t>(b > t ? b - t : 0);
}
bool empty() const { return !size(); }
bool push(T x) {
auto b = bottom_.load(std::memory_order_relaxed);
auto t = top_.load(std::memory_order_acquire);
Array* a = array_.load(std::memory_order_relaxed);
if (a->capacity < (b - t) + 1) {
if (!kCanResize) return false;
a = a->resize(b, t, &allocator_);
array_.store(a, std::memory_order_release);
}
a->item(b).store(std::move(x), std::memory_order_relaxed);
std::atomic_thread_fence(std::memory_order_release);
bottom_.store(b + 1, std::memory_order_relaxed);
return true;
}
optional_t try_pop() {
auto b = bottom_.load(std::memory_order_relaxed) - 1;
Array* a = array_.load(std::memory_order_relaxed);
bottom_.store(b, std::memory_order_relaxed);
std::atomic_thread_fence(std::memory_order_seq_cst);
auto t = top_.load(std::memory_order_relaxed);
if (t > b) {
bottom_.store(b + 1, std::memory_order_relaxed);
return missing(std::is_pointer<T>{});
}
if (t == b) {
if (!top_.compare_exchange_strong(t, t + 1, std::memory_order_seq_cst,
std::memory_order_relaxed)) {
bottom_.store(b + 1, std::memory_order_relaxed);
return missing(std::is_pointer<T>{});
}
bottom_.store(b + 1, std::memory_order_relaxed);
}
return a->item(b).load(std::memory_order_relaxed);
}
optional_t try_steal() {
auto t = top_.load(std::memory_order_acquire);
std::atomic_thread_fence(std::memory_order_seq_cst);
auto b = bottom_.load(std::memory_order_acquire);
if (t >= b) {
return missing(std::is_pointer<T>{});
}
Array* a = array_.load(std::memory_order_consume);
T x = a->item(t).load(std::memory_order_relaxed);
if (!top_.compare_exchange_strong(t, t + 1, std::memory_order_seq_cst,
std::memory_order_relaxed)) {
return missing(std::is_pointer<T>{});
}
return x;
}
private:
ABSL_CACHELINE_ALIGNED std::atomic<int64_t> top_;
std::atomic<int64_t> bottom_;
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS Allocator allocator_;
std::atomic<Array*> array_;
};
}
}
#endif | #include "tensorstore/internal/container/single_producer_queue.h"
#include <stddef.h>
#include <atomic>
#include <optional>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/absl_check.h"
#include "tensorstore/internal/thread/thread.h"
using ::tensorstore::internal_container::SingleProducerQueue;
using ::testing::Eq;
using ::testing::Optional;
namespace {
TEST(SingleProducerQueueTest, Basic) {
SingleProducerQueue<int> q(32);
EXPECT_THAT(q.capacity(), Eq(32));
EXPECT_THAT(q.empty(), true);
EXPECT_THAT(q.size(), Eq(0));
q.push(1);
EXPECT_THAT(q.capacity(), Eq(32));
EXPECT_THAT(q.empty(), false);
EXPECT_THAT(q.size(), Eq(1));
EXPECT_THAT(q.try_pop(), Optional(1));
EXPECT_THAT(q.try_pop(), Eq(std::nullopt));
q.push(2);
EXPECT_THAT(q.try_steal(), Optional(2));
EXPECT_THAT(q.try_pop(), Eq(std::nullopt));
}
TEST(SingleProducerQueueTest, BasicPtr) {
SingleProducerQueue<int*> q(32);
int a[2];
EXPECT_THAT(q.capacity(), Eq(32));
EXPECT_THAT(q.empty(), true);
EXPECT_THAT(q.size(), Eq(0));
q.push(&a[0]);
EXPECT_THAT(q.capacity(), Eq(32));
EXPECT_THAT(q.empty(), false);
EXPECT_THAT(q.size(), Eq(1));
EXPECT_THAT(q.try_pop(), Eq(a));
EXPECT_THAT(q.try_pop(), Eq(nullptr));
q.push(&a[1]);
EXPECT_THAT(q.try_steal(), Eq(a + 1));
EXPECT_THAT(q.try_pop(), Eq(nullptr));
}
TEST(SimpleQueue, PushPop) {
SingleProducerQueue<int, false> q(1);
for (int i = 0; i < 4096; i++) {
if (!q.push(i)) {
q.try_pop();
q.push(i);
if (i & 0x2) q.try_pop();
}
}
}
TEST(SingleProducerQueueTest, ConcurrentSteal) {
static constexpr size_t kNumThreads = 4;
static constexpr int kSize = 10000;
SingleProducerQueue<int> q(32);
std::atomic<int> remaining(kSize);
std::vector<tensorstore::internal::Thread> threads;
threads.reserve(kNumThreads + 1);
for (size_t thread_i = 0; thread_i < kNumThreads; ++thread_i) {
bool c = thread_i & 1;
threads.emplace_back(
tensorstore::internal::Thread({"steal"}, [c, &remaining, &q]() {
while (remaining.load(std::memory_order_seq_cst) > 0) {
if (auto v = q.try_steal(); v.has_value()) {
ABSL_CHECK_EQ(*v, 1);
remaining.fetch_sub(1);
}
if (c) {
q.capacity();
} else {
q.size();
}
}
}));
}
threads.emplace_back(tensorstore::internal::Thread({"create"}, [&q]() {
for (int i = 0; i < kSize; ++i) q.push(1);
}));
for (auto& t : threads) t.Join();
EXPECT_THAT(remaining.load(std::memory_order_seq_cst), 0);
EXPECT_THAT(q.try_steal(), Eq(std::nullopt));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/container/single_producer_queue.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/container/single_producer_queue_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
33c10622-4577-400a-a33f-12d488fbe8c1 | cpp | google/tensorstore | compressed_tuple | tensorstore/internal/container/compressed_tuple.h | tensorstore/internal/container/compressed_tuple_test.cc | #ifndef TENSORSTORE_INTERNAL_CONTAINER_COMPRESSED_TUPLE_H_
#define TENSORSTORE_INTERNAL_CONTAINER_COMPRESSED_TUPLE_H_
#include <stddef.h>
#include <initializer_list>
#include <tuple>
#include <type_traits>
#include <utility>
#if defined(_MSC_VER) && !defined(__NVCC__)
#define TENSORSTORE_INTERNAL_COMPRESSED_TUPLE_DECLSPEC __declspec(empty_bases)
#else
#define TENSORSTORE_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
#endif
namespace tensorstore {
namespace internal_container {
template <typename... Ts>
class CompressedTuple;
namespace internal_compressed_tuple {
template <typename D, size_t I>
struct Elem;
template <typename... B, size_t I>
struct Elem<CompressedTuple<B...>, I>
: std::tuple_element<I, std::tuple<B...>> {};
template <typename D, size_t I>
using ElemT = typename Elem<D, I>::type;
struct uses_inheritance {};
template <typename T>
constexpr bool ShouldUseBase() {
return std::is_class<T>::value && std::is_empty<T>::value &&
!std::is_final<T>::value &&
!std::is_base_of<uses_inheritance, T>::value;
}
template <typename T, size_t I, bool UseBase = ShouldUseBase<T>()>
struct Storage {
T value;
constexpr Storage() = default;
template <typename V>
explicit constexpr Storage(std::in_place_t, V&& v)
: value(std::forward<V>(v)) {}
constexpr const T& get() const& { return value; }
T& get() & { return value; }
constexpr const T&& get() const&& { return std::move(*this).value; }
T&& get() && { return std::move(*this).value; }
};
template <typename T, size_t I>
struct TENSORSTORE_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage<T, I, true> : T {
constexpr Storage() = default;
template <typename V>
explicit constexpr Storage(std::in_place_t, V&& v) : T(std::forward<V>(v)) {}
constexpr const T& get() const& { return *this; }
T& get() & { return *this; }
constexpr const T&& get() const&& { return std::move(*this); }
T&& get() && { return std::move(*this); }
};
template <typename D, typename I, bool ShouldAnyUseBase>
struct TENSORSTORE_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl;
template <typename... Ts, size_t... I, bool ShouldAnyUseBase>
struct TENSORSTORE_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl<
CompressedTuple<Ts...>, std::index_sequence<I...>, ShouldAnyUseBase>
: uses_inheritance,
Storage<Ts, std::integral_constant<size_t, I>::value>... {
constexpr CompressedTupleImpl() = default;
template <typename... Vs>
explicit constexpr CompressedTupleImpl(std::in_place_t, Vs&&... args)
: Storage<Ts, I>(std::in_place, std::forward<Vs>(args))... {}
friend CompressedTuple<Ts...>;
};
template <typename... Ts, size_t... I>
struct TENSORSTORE_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl<
CompressedTuple<Ts...>, std::index_sequence<I...>, false>
: Storage<Ts, std::integral_constant<size_t, I>::value, false>... {
constexpr CompressedTupleImpl() = default;
template <typename... Vs>
explicit constexpr CompressedTupleImpl(std::in_place_t, Vs&&... args)
: Storage<Ts, I, false>(std::in_place, std::forward<Vs>(args))... {}
friend CompressedTuple<Ts...>;
};
std::false_type Or(std::initializer_list<std::false_type>);
std::true_type Or(std::initializer_list<bool>);
template <typename... Ts>
constexpr bool ShouldAnyUseBase() {
return decltype(Or(
{std::integral_constant<bool, ShouldUseBase<Ts>()>()...})){};
}
template <typename T, typename V>
using TupleElementMoveConstructible =
typename std::conditional<std::is_reference<T>::value,
std::is_convertible<V, T>,
std::is_constructible<T, V&&>>::type;
template <bool SizeMatches, class T, class... Vs>
struct TupleMoveConstructible : std::false_type {};
template <class... Ts, class... Vs>
struct TupleMoveConstructible<true, CompressedTuple<Ts...>, Vs...>
: std::integral_constant<
bool,
std::conjunction<TupleElementMoveConstructible<Ts, Vs&&>...>::value> {
};
template <typename T>
struct compressed_tuple_size;
template <typename... Es>
struct compressed_tuple_size<CompressedTuple<Es...>>
: public std::integral_constant<size_t, sizeof...(Es)> {};
template <class T, class... Vs>
struct TupleItemsMoveConstructible
: std::integral_constant<
bool, TupleMoveConstructible<compressed_tuple_size<T>::value ==
sizeof...(Vs),
T, Vs...>::value> {};
}
template <typename... Ts>
class TENSORSTORE_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple
: private internal_compressed_tuple::CompressedTupleImpl<
CompressedTuple<Ts...>, std::index_sequence_for<Ts...>,
internal_compressed_tuple::ShouldAnyUseBase<Ts...>()> {
private:
template <int I>
using ElemT = internal_compressed_tuple::ElemT<CompressedTuple, I>;
template <int I>
using StorageT = internal_compressed_tuple::Storage<ElemT<I>, I>;
public:
#if defined(_MSC_VER)
constexpr CompressedTuple() : CompressedTuple::CompressedTupleImpl() {}
#else
constexpr CompressedTuple() = default;
#endif
explicit constexpr CompressedTuple(const Ts&... base)
: CompressedTuple::CompressedTupleImpl(std::in_place, base...) {}
template <typename First, typename... Vs,
std::enable_if_t<
std::conjunction<
std::negation<std::is_same<void(CompressedTuple),
void(std::decay_t<First>)>>,
internal_compressed_tuple::TupleItemsMoveConstructible<
CompressedTuple<Ts...>, First, Vs...>>::value,
bool> = true>
explicit constexpr CompressedTuple(First&& first, Vs&&... base)
: CompressedTuple::CompressedTupleImpl(std::in_place,
std::forward<First>(first),
std::forward<Vs>(base)...) {}
template <int I>
ElemT<I>& get() & {
return StorageT<I>::get();
}
template <int I>
constexpr const ElemT<I>& get() const& {
return StorageT<I>::get();
}
template <int I>
ElemT<I>&& get() && {
return std::move(*this).StorageT<I>::get();
}
template <int I>
constexpr const ElemT<I>&& get() const&& {
return std::move(*this).StorageT<I>::get();
}
};
template <>
class TENSORSTORE_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple<> {};
}
}
#undef TENSORSTORE_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
#endif | #include "tensorstore/internal/container/compressed_tuple.h"
#include <any>
#include <memory>
#include <optional>
#include <set>
#include <string>
#include <type_traits>
#include <utility>
#include <gtest/gtest.h>
using tensorstore::internal_container::CompressedTuple;
namespace {
struct CopyableMovableInstance {
explicit CopyableMovableInstance(int x) : value_(x) { ++num_instances; }
CopyableMovableInstance(const CopyableMovableInstance& rhs) {
value_ = rhs.value_;
++num_copies;
}
CopyableMovableInstance(CopyableMovableInstance&& rhs) {
value_ = rhs.value_;
++num_moves;
}
CopyableMovableInstance& operator=(const CopyableMovableInstance& rhs) {
value_ = rhs.value_;
++num_copies;
return *this;
}
CopyableMovableInstance& operator=(CopyableMovableInstance&& rhs) {
value_ = rhs.value_;
++num_moves;
return *this;
}
int value() const& { return value_; }
int value() const&& { return value_; }
int value_;
static void Reset() {
num_instances = 0;
num_moves = 0;
num_copies = 0;
num_swaps = 0;
}
static int num_instances;
static int num_moves;
static int num_copies;
static int num_swaps;
};
int CopyableMovableInstance::num_instances{0};
int CopyableMovableInstance::num_moves{0};
int CopyableMovableInstance::num_copies{0};
int CopyableMovableInstance::num_swaps{0};
enum class CallType { kConstRef, kConstMove };
template <int>
struct Empty {
constexpr CallType value() const& { return CallType::kConstRef; }
constexpr CallType value() const&& { return CallType::kConstMove; }
};
template <typename T>
struct NotEmpty {
T value;
};
template <typename T, typename U>
struct TwoValues {
T value1;
U value2;
};
TEST(CompressedTupleTest, Sizeof) {
EXPECT_EQ(sizeof(int), sizeof(CompressedTuple<int>));
EXPECT_EQ(sizeof(int), sizeof(CompressedTuple<int, Empty<0>>));
EXPECT_EQ(sizeof(int), sizeof(CompressedTuple<int, Empty<0>, Empty<1>>));
EXPECT_EQ(sizeof(int),
sizeof(CompressedTuple<int, Empty<0>, Empty<1>, Empty<2>>));
EXPECT_EQ(sizeof(TwoValues<int, double>),
sizeof(CompressedTuple<int, NotEmpty<double>>));
EXPECT_EQ(sizeof(TwoValues<int, double>),
sizeof(CompressedTuple<int, Empty<0>, NotEmpty<double>>));
EXPECT_EQ(sizeof(TwoValues<int, double>),
sizeof(CompressedTuple<int, Empty<0>, NotEmpty<double>, Empty<1>>));
}
TEST(CompressedTupleTest, OneMoveOnRValueConstructionTemp) {
CopyableMovableInstance::Reset();
CompressedTuple<CopyableMovableInstance> x1(CopyableMovableInstance(1));
EXPECT_EQ(CopyableMovableInstance::num_instances, 1);
EXPECT_EQ(CopyableMovableInstance::num_copies, 0);
EXPECT_LE(CopyableMovableInstance::num_moves, 1);
EXPECT_EQ(x1.get<0>().value(), 1);
}
TEST(CompressedTupleTest, OneMoveOnRValueConstructionMove) {
CopyableMovableInstance::Reset();
CopyableMovableInstance i1(1);
CompressedTuple<CopyableMovableInstance> x1(std::move(i1));
EXPECT_EQ(CopyableMovableInstance::num_instances, 1);
EXPECT_EQ(CopyableMovableInstance::num_copies, 0);
EXPECT_LE(CopyableMovableInstance::num_moves, 1);
EXPECT_EQ(x1.get<0>().value(), 1);
}
TEST(CompressedTupleTest, OneMoveOnRValueConstructionMixedTypes) {
CopyableMovableInstance::Reset();
CopyableMovableInstance i1(1);
CopyableMovableInstance i2(2);
Empty<0> empty;
CompressedTuple<CopyableMovableInstance, CopyableMovableInstance&, Empty<0>>
x1(std::move(i1), i2, empty);
EXPECT_EQ(x1.get<0>().value(), 1);
EXPECT_EQ(x1.get<1>().value(), 2);
EXPECT_EQ(CopyableMovableInstance::num_copies, 0);
EXPECT_EQ(CopyableMovableInstance::num_moves, 1);
}
struct IncompleteType;
CompressedTuple<CopyableMovableInstance, IncompleteType&, Empty<0>>
MakeWithIncomplete(CopyableMovableInstance i1,
IncompleteType& t,
Empty<0> empty) {
return CompressedTuple<CopyableMovableInstance, IncompleteType&, Empty<0>>{
std::move(i1), t, empty};
}
struct IncompleteType {};
TEST(CompressedTupleTest, OneMoveOnRValueConstructionWithIncompleteType) {
CopyableMovableInstance::Reset();
CopyableMovableInstance i1(1);
Empty<0> empty;
struct DerivedType : IncompleteType {
int value = 0;
};
DerivedType fd;
fd.value = 7;
CompressedTuple<CopyableMovableInstance, IncompleteType&, Empty<0>> x1 =
MakeWithIncomplete(std::move(i1), fd, empty);
EXPECT_EQ(x1.get<0>().value(), 1);
EXPECT_EQ(static_cast<DerivedType&>(x1.get<1>()).value, 7);
EXPECT_EQ(CopyableMovableInstance::num_copies, 0);
EXPECT_EQ(CopyableMovableInstance::num_moves, 2);
}
TEST(CompressedTupleTest,
OneMoveOnRValueConstructionMixedTypes_BraceInitPoisonPillExpected) {
CopyableMovableInstance::Reset();
CopyableMovableInstance i1(1);
CopyableMovableInstance i2(2);
CompressedTuple<CopyableMovableInstance, CopyableMovableInstance&, Empty<0>>
x1(std::move(i1), i2, {});
EXPECT_EQ(x1.get<0>().value(), 1);
EXPECT_EQ(x1.get<1>().value(), 2);
EXPECT_EQ(CopyableMovableInstance::num_instances, 2);
EXPECT_EQ(CopyableMovableInstance::num_copies, 1);
EXPECT_EQ(CopyableMovableInstance::num_moves, 0);
}
TEST(CompressedTupleTest, OneCopyOnLValueConstruction) {
CopyableMovableInstance::Reset();
CopyableMovableInstance i1(1);
CompressedTuple<CopyableMovableInstance> x1(i1);
EXPECT_EQ(CopyableMovableInstance::num_copies, 1);
EXPECT_EQ(CopyableMovableInstance::num_moves, 0);
CopyableMovableInstance::Reset();
CopyableMovableInstance i2(2);
const CopyableMovableInstance& i2_ref = i2;
CompressedTuple<CopyableMovableInstance> x2(i2_ref);
EXPECT_EQ(CopyableMovableInstance::num_copies, 1);
EXPECT_EQ(CopyableMovableInstance::num_moves, 0);
}
TEST(CompressedTupleTest, OneMoveOnRValueAccess) {
CopyableMovableInstance i1(1);
CompressedTuple<CopyableMovableInstance> x(std::move(i1));
CopyableMovableInstance::Reset();
CopyableMovableInstance i2 = std::move(x).get<0>();
EXPECT_EQ(CopyableMovableInstance::num_copies, 0);
EXPECT_EQ(CopyableMovableInstance::num_moves, 1);
EXPECT_EQ(i2.value(), 1);
}
TEST(CompressedTupleTest, OneCopyOnLValueAccess) {
CopyableMovableInstance::Reset();
CompressedTuple<CopyableMovableInstance> x(CopyableMovableInstance(0));
EXPECT_EQ(CopyableMovableInstance::num_copies, 0);
EXPECT_EQ(CopyableMovableInstance::num_moves, 1);
CopyableMovableInstance t = x.get<0>();
EXPECT_EQ(CopyableMovableInstance::num_copies, 1);
EXPECT_EQ(CopyableMovableInstance::num_moves, 1);
EXPECT_EQ(t.value(), 0);
}
TEST(CompressedTupleTest, ZeroCopyOnRefAccess) {
CopyableMovableInstance::Reset();
CompressedTuple<CopyableMovableInstance> x(CopyableMovableInstance(0));
EXPECT_EQ(CopyableMovableInstance::num_copies, 0);
EXPECT_EQ(CopyableMovableInstance::num_moves, 1);
CopyableMovableInstance& t1 = x.get<0>();
const CopyableMovableInstance& t2 = x.get<0>();
EXPECT_EQ(CopyableMovableInstance::num_copies, 0);
EXPECT_EQ(CopyableMovableInstance::num_moves, 1);
EXPECT_EQ(t1.value(), 0);
EXPECT_EQ(t2.value(), 0);
}
TEST(CompressedTupleTest, Access) {
struct S {
std::string x;
};
CompressedTuple<int, Empty<0>, S> x(7, {}, S{"ABC"});
EXPECT_EQ(sizeof(x), sizeof(TwoValues<int, S>));
EXPECT_EQ(7, x.get<0>());
EXPECT_EQ("ABC", x.get<2>().x);
}
TEST(CompressedTupleTest, NonClasses) {
CompressedTuple<int, const char*> x(7, "ABC");
EXPECT_EQ(7, x.get<0>());
EXPECT_STREQ("ABC", x.get<1>());
}
TEST(CompressedTupleTest, MixClassAndNonClass) {
CompressedTuple<int, const char*, Empty<0>, NotEmpty<double>> x(7, "ABC", {},
{1.25});
struct Mock {
int v;
const char* p;
double d;
};
EXPECT_EQ(sizeof(x), sizeof(Mock));
EXPECT_EQ(7, x.get<0>());
EXPECT_STREQ("ABC", x.get<1>());
EXPECT_EQ(1.25, x.get<3>().value);
}
TEST(CompressedTupleTest, Nested) {
CompressedTuple<int, CompressedTuple<int>,
CompressedTuple<int, CompressedTuple<int>>>
x(1, CompressedTuple<int>(2),
CompressedTuple<int, CompressedTuple<int>>(3, CompressedTuple<int>(4)));
EXPECT_EQ(1, x.get<0>());
EXPECT_EQ(2, x.get<1>().get<0>());
EXPECT_EQ(3, x.get<2>().get<0>());
EXPECT_EQ(4, x.get<2>().get<1>().get<0>());
CompressedTuple<Empty<0>, Empty<0>,
CompressedTuple<Empty<0>, CompressedTuple<Empty<0>>>>
y;
std::set<Empty<0>*> empties{&y.get<0>(), &y.get<1>(), &y.get<2>().get<0>(),
&y.get<2>().get<1>().get<0>()};
#ifdef _MSC_VER
int expected = 1;
#else
int expected = 4;
#endif
EXPECT_EQ(expected, sizeof(y));
EXPECT_EQ(expected, empties.size());
EXPECT_EQ(sizeof(y), sizeof(Empty<0>) * empties.size());
EXPECT_EQ(4 * sizeof(char),
sizeof(CompressedTuple<CompressedTuple<char, char>,
CompressedTuple<char, char>>));
EXPECT_TRUE((std::is_empty<CompressedTuple<Empty<0>, Empty<1>>>::value));
struct CT_Empty : CompressedTuple<Empty<0>> {};
CompressedTuple<Empty<0>, CT_Empty> nested_empty;
auto contained = nested_empty.get<0>();
auto nested = nested_empty.get<1>().get<0>();
EXPECT_TRUE((std::is_same<decltype(contained), decltype(nested)>::value));
}
TEST(CompressedTupleTest, Reference) {
int i = 7;
std::string s = "Very long string that goes in the heap";
CompressedTuple<int, int&, std::string, std::string&> x(i, i, s, s);
EXPECT_EQ(s, "Very long string that goes in the heap");
EXPECT_EQ(x.get<0>(), x.get<1>());
EXPECT_NE(&x.get<0>(), &x.get<1>());
EXPECT_EQ(&x.get<1>(), &i);
EXPECT_EQ(x.get<2>(), x.get<3>());
EXPECT_NE(&x.get<2>(), &x.get<3>());
EXPECT_EQ(&x.get<3>(), &s);
}
TEST(CompressedTupleTest, NoElements) {
CompressedTuple<> x;
static_cast<void>(x);
EXPECT_TRUE(std::is_empty<CompressedTuple<>>::value);
}
TEST(CompressedTupleTest, MoveOnlyElements) {
CompressedTuple<std::unique_ptr<std::string>> str_tup(
std::make_unique<std::string>("str"));
CompressedTuple<CompressedTuple<std::unique_ptr<std::string>>,
std::unique_ptr<int>>
x(std::move(str_tup), std::make_unique<int>(5));
EXPECT_EQ(*x.get<0>().get<0>(), "str");
EXPECT_EQ(*x.get<1>(), 5);
std::unique_ptr<std::string> x0 = std::move(x.get<0>()).get<0>();
std::unique_ptr<int> x1 = std::move(x).get<1>();
EXPECT_EQ(*x0, "str");
EXPECT_EQ(*x1, 5);
}
TEST(CompressedTupleTest, MoveConstructionMoveOnlyElements) {
CompressedTuple<std::unique_ptr<std::string>> base(
std::make_unique<std::string>("str"));
EXPECT_EQ(*base.get<0>(), "str");
CompressedTuple<std::unique_ptr<std::string>> copy(std::move(base));
EXPECT_EQ(*copy.get<0>(), "str");
}
TEST(CompressedTupleTest, AnyElements) {
std::any a(std::string("str"));
CompressedTuple<std::any, std::any&> x(std::any(5), a);
EXPECT_EQ(std::any_cast<int>(x.get<0>()), 5);
EXPECT_EQ(std::any_cast<std::string>(x.get<1>()), "str");
a = 0.5f;
EXPECT_EQ(std::any_cast<float>(x.get<1>()), 0.5);
}
TEST(CompressedTupleTest, Constexpr) {
struct NonTrivialStruct {
constexpr NonTrivialStruct() = default;
constexpr int value() const { return v; }
int v = 5;
};
struct TrivialStruct {
TrivialStruct() = default;
constexpr int value() const { return v; }
int v;
};
constexpr CompressedTuple<int, double, CompressedTuple<int>, Empty<0>> x(
7, 1.25, CompressedTuple<int>(5), {});
constexpr int x0 = x.get<0>();
constexpr double x1 = x.get<1>();
constexpr int x2 = x.get<2>().get<0>();
constexpr CallType x3 = x.get<3>().value();
EXPECT_EQ(x0, 7);
EXPECT_EQ(x1, 1.25);
EXPECT_EQ(x2, 5);
EXPECT_EQ(x3, CallType::kConstRef);
#if !defined(__GNUC__) || defined(__clang__) || __GNUC__ > 4
constexpr CompressedTuple<Empty<0>, TrivialStruct, int> trivial = {};
constexpr CallType trivial0 = trivial.get<0>().value();
constexpr int trivial1 = trivial.get<1>().value();
constexpr int trivial2 = trivial.get<2>();
EXPECT_EQ(trivial0, CallType::kConstRef);
EXPECT_EQ(trivial1, 0);
EXPECT_EQ(trivial2, 0);
#endif
constexpr CompressedTuple<Empty<0>, NonTrivialStruct, std::optional<int>>
non_trivial = {};
constexpr CallType non_trivial0 = non_trivial.get<0>().value();
constexpr int non_trivial1 = non_trivial.get<1>().value();
constexpr std::optional<int> non_trivial2 = non_trivial.get<2>();
EXPECT_EQ(non_trivial0, CallType::kConstRef);
EXPECT_EQ(non_trivial1, 5);
EXPECT_EQ(non_trivial2, std::nullopt);
static constexpr char data[] = "DEF";
constexpr CompressedTuple<const char*> z(data);
constexpr const char* z1 = z.get<0>();
EXPECT_EQ(std::string(z1), std::string(data));
#if defined(__clang__)
constexpr int x2m = std::move(x.get<2>()).get<0>();
constexpr CallType x3m = std::move(x).get<3>().value();
EXPECT_EQ(x2m, 5);
EXPECT_EQ(x3m, CallType::kConstMove);
#endif
}
#if defined(__clang__) || defined(__GNUC__)
TEST(CompressedTupleTest, EmptyFinalClass) {
struct S final {
int f() const { return 5; }
};
CompressedTuple<S> x;
EXPECT_EQ(x.get<0>().f(), 5);
}
#endif
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/container/compressed_tuple.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/container/compressed_tuple_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
553bcb62-c346-40ab-86a4-569637b54c24 | cpp | google/tensorstore | heterogeneous_container | tensorstore/internal/container/heterogeneous_container.h | tensorstore/internal/container/heterogeneous_container_test.cc | #ifndef TENSORSTORE_INTERNAL_CONTAINER_HETEROGENEOUS_CONTAINER_H_
#define TENSORSTORE_INTERNAL_CONTAINER_HETEROGENEOUS_CONTAINER_H_
#include <functional>
#include "absl/container/flat_hash_set.h"
namespace tensorstore {
namespace internal {
template <typename T>
struct SupportsHeterogeneous : public T {
using is_transparent = void;
};
template <typename EntryPointer, typename T, auto Getter>
struct KeyAdapter {
template <typename U,
typename = std::enable_if_t<std::is_convertible_v<U, T>>>
KeyAdapter(U&& key) : value(std::forward<U>(key)) {}
KeyAdapter(const EntryPointer& e) : value(std::invoke(Getter, *e)) {}
template <typename H>
friend H AbslHashValue(H h, const KeyAdapter& key) {
return H::combine(std::move(h), key.value);
}
friend bool operator==(const KeyAdapter& a, const KeyAdapter& b) {
return a.value == b.value;
}
T value;
};
template <typename EntryPointer, typename T, auto Getter>
using HeterogeneousHashSet = absl::flat_hash_set<
EntryPointer,
SupportsHeterogeneous<absl::Hash<KeyAdapter<EntryPointer, T, Getter>>>,
SupportsHeterogeneous<std::equal_to<KeyAdapter<EntryPointer, T, Getter>>>>;
}
}
#endif | #include "tensorstore/internal/container/heterogeneous_container.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal::HeterogeneousHashSet;
struct Entry {
std::string id;
};
using Set =
HeterogeneousHashSet<std::shared_ptr<Entry>, std::string_view, &Entry::id>;
TEST(HeterogeneousHashSetTest, Basic) {
Set set;
auto a = std::make_shared<Entry>(Entry{"a"});
auto b = std::make_shared<Entry>(Entry{"b"});
EXPECT_TRUE(set.insert(a).second);
EXPECT_TRUE(set.insert(b).second);
{
auto it = set.find("a");
ASSERT_NE(set.end(), it);
EXPECT_EQ(a, *it);
}
{
auto it = set.find(a);
ASSERT_NE(set.end(), it);
EXPECT_EQ(a, *it);
}
{
auto it = set.find("b");
ASSERT_NE(set.end(), it);
EXPECT_EQ(b, *it);
}
{
auto it = set.find(b);
ASSERT_NE(set.end(), it);
EXPECT_EQ(b, *it);
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/container/heterogeneous_container.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/container/heterogeneous_container_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
5cac40c0-8bd8-4c5d-96ca-8c138da0aa8b | cpp | google/tensorstore | intrusive_linked_list | tensorstore/internal/container/intrusive_linked_list.h | tensorstore/internal/container/intrusive_linked_list_test.cc | #ifndef TENSORSTORE_INTERNAL_CONTAINER_INTRUSIVE_LINKED_LIST_H_
#define TENSORSTORE_INTERNAL_CONTAINER_INTRUSIVE_LINKED_LIST_H_
namespace tensorstore {
namespace internal {
namespace intrusive_linked_list {
template <typename T, T* T::*PrevMember = &T::prev,
T* T::*NextMember = &T::next>
struct MemberAccessor {
using Node = T*;
static void SetPrev(T* node, T* prev) { node->*PrevMember = prev; }
static void SetNext(T* node, T* next) { node->*NextMember = next; }
static T* GetPrev(T* node) { return node->*PrevMember; }
static T* GetNext(T* node) { return node->*NextMember; }
};
template <typename Accessor>
void Initialize(Accessor accessor, typename Accessor::Node node) {
accessor.SetPrev(node, node);
accessor.SetNext(node, node);
}
template <typename Accessor>
void InsertBefore(Accessor accessor, typename Accessor::Node existing_node,
typename Accessor::Node new_node) {
accessor.SetPrev(new_node, accessor.GetPrev(existing_node));
accessor.SetNext(new_node, existing_node);
accessor.SetNext(accessor.GetPrev(existing_node), new_node);
accessor.SetPrev(existing_node, new_node);
}
template <typename Accessor>
void Remove(Accessor accessor, typename Accessor::Node node) {
accessor.SetPrev(accessor.GetNext(node), accessor.GetPrev(node));
accessor.SetNext(accessor.GetPrev(node), accessor.GetNext(node));
}
template <typename Accessor>
bool OnlyContainsNode(Accessor accessor, typename Accessor::Node node) {
return accessor.GetNext(node) == node;
}
}
}
}
#endif | #include "tensorstore/internal/container/intrusive_linked_list.h"
#include <gtest/gtest.h>
namespace {
struct Node {
Node* prev;
Node* next;
};
using Accessor =
tensorstore::internal::intrusive_linked_list::MemberAccessor<Node>;
TEST(IntrusiveLinkedListTest, Initialize) {
Node head;
Initialize(Accessor{}, &head);
EXPECT_EQ(&head, head.next);
EXPECT_EQ(&head, head.prev);
EXPECT_TRUE(OnlyContainsNode(Accessor{}, &head));
}
TEST(IntrusiveLinkedListTest, InsertBefore) {
Node head;
Node a;
Node b;
Initialize(Accessor{}, &head);
InsertBefore(Accessor{}, &head, &a);
EXPECT_EQ(&a, head.next);
EXPECT_EQ(&a, head.prev);
EXPECT_EQ(&head, a.next);
EXPECT_EQ(&head, a.prev);
EXPECT_FALSE(OnlyContainsNode(Accessor{}, &head));
InsertBefore(Accessor{}, &head, &b);
EXPECT_EQ(&a, head.next);
EXPECT_EQ(&b, head.prev);
EXPECT_EQ(&head, b.next);
EXPECT_EQ(&a, b.prev);
EXPECT_EQ(&b, a.next);
EXPECT_EQ(&head, a.prev);
}
TEST(IntrusiveLinkedListTest, Remove) {
Node head;
Node a;
Node b;
Initialize(Accessor{}, &head);
InsertBefore(Accessor{}, &head, &a);
InsertBefore(Accessor{}, &head, &b);
Remove(Accessor{}, &b);
EXPECT_EQ(&a, head.next);
EXPECT_EQ(&a, head.prev);
EXPECT_EQ(&head, a.next);
EXPECT_EQ(&head, a.prev);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/container/intrusive_linked_list.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/container/intrusive_linked_list_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
1455137a-edab-40f9-aa60-81625b5e6bd1 | cpp | google/tensorstore | hash_set_of_any | tensorstore/internal/container/hash_set_of_any.h | tensorstore/internal/container/hash_set_of_any_test.cc | #ifndef TENSORSTORE_INTERNAL_CONTAINER_HASH_SET_OF_ANY_H_
#define TENSORSTORE_INTERNAL_CONTAINER_HASH_SET_OF_ANY_H_
#include <stddef.h>
#include <cassert>
#include <functional>
#include <memory>
#include <type_traits>
#include <typeindex>
#include <typeinfo>
#include <utility>
#include "absl/container/flat_hash_set.h"
namespace tensorstore {
namespace internal {
class HashSetOfAny {
public:
class Entry {
public:
virtual ~Entry() = default;
private:
friend class HashSetOfAny;
size_t hash_;
};
template <typename DerivedEntry, typename MakeEntry>
std::pair<DerivedEntry*, bool> FindOrInsert(
typename DerivedEntry::KeyParam key, MakeEntry make_entry,
const std::type_info& derived_type = typeid(DerivedEntry)) {
static_assert(std::is_base_of_v<Entry, DerivedEntry>);
KeyFor<DerivedEntry> key_wrapper{derived_type, key};
size_t hash = Hash{}(key_wrapper);
auto it = entries_.find(key_wrapper);
if (it != entries_.end()) {
return {static_cast<DerivedEntry*>(*it), false};
}
std::unique_ptr<DerivedEntry> derived_entry = make_entry();
auto* entry = static_cast<Entry*>(derived_entry.get());
assert(derived_type == typeid(*entry));
entry->hash_ = hash;
[[maybe_unused]] auto inserted = entries_.insert(entry).second;
assert(inserted);
return {derived_entry.release(), true};
}
void erase(Entry* entry) { entries_.erase(entry); }
void clear() { entries_.clear(); }
auto begin() { return entries_.begin(); }
auto begin() const { return entries_.begin(); }
auto end() { return entries_.end(); }
auto end() const { return entries_.end(); }
size_t size() const { return entries_.size(); }
bool empty() const { return entries_.empty(); }
private:
template <typename DerivedEntry>
struct KeyFor {
const std::type_info& derived_type;
typename DerivedEntry::KeyParam key;
friend bool operator==(Entry* entry, const KeyFor<DerivedEntry>& other) {
return typeid(*entry) == other.derived_type &&
static_cast<DerivedEntry*>(entry)->key() == other.key;
}
};
struct Hash {
using is_transparent = void;
template <typename DerivedEntry>
size_t operator()(KeyFor<DerivedEntry> key) const {
return absl::HashOf(std::type_index(key.derived_type), key.key);
}
size_t operator()(Entry* entry) const { return entry->hash_; }
};
struct Eq : public std::equal_to<void> {
using is_transparent = void;
};
absl::flat_hash_set<Entry*, Hash, Eq> entries_;
};
}
}
#endif | #include "tensorstore/internal/container/hash_set_of_any.h"
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal::HashSetOfAny;
template <typename T>
struct Entry : public HashSetOfAny::Entry {
using KeyParam = T;
Entry(T key) : key_(key) {}
static Entry& FindOrInsert(HashSetOfAny& set, T key) {
return *set.FindOrInsert<Entry<T>>(
key, [&] { return std::make_unique<Entry<T>>(key); })
.first;
}
T key_;
T key() const { return key_; }
};
TEST(HashSetOfAnyTest, Basic) {
HashSetOfAny set;
auto& a = Entry<int>::FindOrInsert(set, 5);
auto& b = Entry<int>::FindOrInsert(set, 5);
auto& c = Entry<int>::FindOrInsert(set, 6);
auto& e = Entry<float>::FindOrInsert(set, 1.5);
auto& f = Entry<float>::FindOrInsert(set, 2.5);
EXPECT_EQ(set.size(), 4);
EXPECT_FALSE(set.empty());
EXPECT_EQ(&a, &b);
EXPECT_NE(&a, &c);
EXPECT_NE(&e, &f);
for (auto* entry : set) {
delete entry;
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/container/hash_set_of_any.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/container/hash_set_of_any_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
6deec09b-f59f-458b-9ae1-7ae910be6816 | cpp | google/tensorstore | circular_queue | tensorstore/internal/container/circular_queue.h | tensorstore/internal/container/circular_queue_test.cc | #ifndef TENSORSTORE_INTERNAL_THREAD_CIRCULAR_QUEUE_H_
#define TENSORSTORE_INTERNAL_THREAD_CIRCULAR_QUEUE_H_
#include <stddef.h>
#include <algorithm>
#include <cassert>
#include <memory>
#include <type_traits>
#include "absl/base/attributes.h"
#include "absl/log/absl_check.h"
#include "tensorstore/internal/container/item_traits.h"
namespace tensorstore {
namespace internal_container {
template <typename T, typename Allocator = std::allocator<T>>
class CircularQueue {
using TransferTraits = ItemTraits<T>;
using Storage = typename std::aligned_storage<sizeof(T), alignof(T)>::type;
static_assert(sizeof(T) == sizeof(Storage));
using StorageAllocator =
typename std::allocator_traits<Allocator>::template rebind_alloc<Storage>;
using StorageAllocatorTraits = std::allocator_traits<StorageAllocator>;
static constexpr bool kDestroyIsTrivial =
TransferTraits::template destroy_is_trivial<Allocator>();
public:
explicit CircularQueue(size_t n) : CircularQueue(n, Allocator()) {}
CircularQueue(size_t n, Allocator alloc)
: allocator_(std::move(alloc)),
begin_(0),
end_(0),
mask_(0),
buffer_(nullptr) {
ABSL_CHECK_EQ(n & (n - 1), 0);
internal_resize(n);
}
~CircularQueue() {
clear();
if (buffer_) {
StorageAllocator storage_alloc(allocator_);
StorageAllocatorTraits::deallocate(
storage_alloc, reinterpret_cast<Storage*>(buffer_), mask_ + 1);
}
}
CircularQueue(const CircularQueue&) = delete;
CircularQueue& operator=(const CircularQueue&) = delete;
size_t capacity() const { return mask_ + 1; }
size_t size() const { return end_ - begin_; }
bool empty() const { return !size(); }
T& front() {
ABSL_CHECK(!empty());
return buffer_[begin_ & mask_];
}
const T& front() const {
ABSL_CHECK(!empty());
return buffer_[begin_ & mask_];
}
T& back() {
ABSL_CHECK(!empty());
return buffer_[(end_ - 1) & mask_];
}
const T& back() const {
ABSL_CHECK(!empty());
return buffer_[(end_ - 1) & mask_];
}
T& operator[](size_t i) {
ABSL_CHECK_LT(i, size());
return buffer_[(begin_ + i) & mask_];
}
const T& operator[](size_t i) const {
ABSL_CHECK_LT(i, size());
return buffer_[(begin_ + i) & mask_];
}
void push_back(const T& val) { emplace_back(val); }
void push_back(T&& val) { emplace_back(std::move(val)); }
template <typename... A>
T& emplace_back(A&&... args) {
auto* storage = emplace_back_raw();
TransferTraits::construct(&allocator_, storage, std::forward<A>(args)...);
return *storage;
}
void pop_front() {
ABSL_CHECK(!empty());
auto x = begin_++;
if constexpr (!kDestroyIsTrivial) {
TransferTraits::destroy(&allocator_, buffer_ + (x & mask_));
}
}
void clear() {
if constexpr (!kDestroyIsTrivial) {
for (size_t i = begin_; i < end_; i++) {
TransferTraits::destroy(&allocator_, buffer_ + (i & mask_));
}
}
begin_ = 0;
end_ = 0;
}
private:
T* emplace_back_raw() {
if (size() == capacity()) {
internal_resize((mask_ + 1) * 2);
}
return buffer_ + (end_++ & mask_);
}
void internal_resize(size_t c) {
ABSL_CHECK_EQ(c & (c - 1), 0);
ABSL_CHECK_GT(c, mask_ + 1);
StorageAllocator storage_alloc(allocator_);
T* new_buffer = std::launder(reinterpret_cast<T*>(
StorageAllocatorTraits::allocate(storage_alloc, c)));
size_t j = 0;
for (size_t i = begin_; i < end_; i++) {
auto* storage = buffer_ + (i & mask_);
TransferTraits::transfer(&allocator_, new_buffer + j++, storage);
}
if (buffer_) {
StorageAllocatorTraits::deallocate(
storage_alloc, reinterpret_cast<Storage*>(buffer_), mask_ + 1);
}
begin_ = 0;
end_ = j;
mask_ = c - 1;
buffer_ = new_buffer;
}
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS Allocator allocator_;
size_t begin_;
size_t end_;
size_t mask_;
T* buffer_;
};
}
}
#endif | #include "tensorstore/internal/container/circular_queue.h"
#include <stdint.h>
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal_container::CircularQueue;
TEST(CircularQueue, Basic) {
CircularQueue<int64_t> q(2);
EXPECT_TRUE(q.empty());
EXPECT_THAT(q.size(), 0);
q.push_back(10);
EXPECT_FALSE(q.empty());
EXPECT_EQ(q.front(), 10);
EXPECT_EQ(q.back(), 10);
q.pop_front();
EXPECT_TRUE(q.empty());
for (int i = 0; i < 10; ++i) {
q.push_back(i);
}
EXPECT_FALSE(q.empty());
q.clear();
EXPECT_TRUE(q.empty());
}
TEST(CircularQueue, BasicWithSharedPtr) {
CircularQueue<std::shared_ptr<int64_t>> q(2);
EXPECT_TRUE(q.empty());
EXPECT_THAT(q.size(), 0);
q.push_back(std::make_shared<int64_t>(10));
EXPECT_FALSE(q.empty());
EXPECT_EQ(*q.front(), 10);
EXPECT_EQ(*q.back(), 10);
q.pop_front();
EXPECT_TRUE(q.empty());
for (int i = 0; i < 10; ++i) {
q.push_back(std::make_shared<int64_t>(i));
}
EXPECT_FALSE(q.empty());
q.clear();
EXPECT_TRUE(q.empty());
}
TEST(CircularQueue, Resize) {
CircularQueue<int64_t> q(2);
for (int64_t i = 0; i < 1234; ++i) {
q.push_back(i);
}
EXPECT_FALSE(q.empty());
EXPECT_THAT(q.size(), 1234);
EXPECT_THAT(q.capacity(), ::testing::Gt(1234));
for (int64_t i = 0; i < 1234; ++i) {
EXPECT_THAT(q.front(), i);
q.pop_front();
}
EXPECT_THAT(q.size(), ::testing::Eq(0));
}
class OnlyConstructibleByAllocator {
explicit OnlyConstructibleByAllocator(int i) : i_(i) {}
public:
OnlyConstructibleByAllocator(const OnlyConstructibleByAllocator &other)
: i_(other.i_) {}
OnlyConstructibleByAllocator &operator=(
const OnlyConstructibleByAllocator &other) {
i_ = other.i_;
return *this;
}
int Get() const { return i_; }
bool operator==(int i) const { return i_ == i; }
private:
template <typename T>
friend class OnlyConstructibleAllocator;
int i_;
};
template <typename T = OnlyConstructibleByAllocator>
class OnlyConstructibleAllocator : public std::allocator<T> {
public:
OnlyConstructibleAllocator() = default;
template <class U>
explicit OnlyConstructibleAllocator(const OnlyConstructibleAllocator<U> &) {}
void construct(OnlyConstructibleByAllocator *p, int i) {
new (p) OnlyConstructibleByAllocator(i);
}
template <class U>
struct rebind {
using other = OnlyConstructibleAllocator<U>;
};
};
TEST(CircularQueue, OnlyConstructibleByAllocator) {
CircularQueue<OnlyConstructibleByAllocator, OnlyConstructibleAllocator<>> q(
2);
EXPECT_TRUE(q.empty());
EXPECT_THAT(q.size(), 0);
q.emplace_back(10);
EXPECT_FALSE(q.empty());
EXPECT_EQ(q.front(), 10);
EXPECT_EQ(q.back(), 10);
q.pop_front();
EXPECT_TRUE(q.empty());
for (int i = 0; i < 10; ++i) {
q.emplace_back(i);
}
EXPECT_FALSE(q.empty());
q.clear();
EXPECT_TRUE(q.empty());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/container/circular_queue.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/container/circular_queue_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
8a82f1ac-bf7f-4b52-9cb6-b9539c798c42 | cpp | google/tensorstore | block_queue | tensorstore/internal/container/block_queue.h | tensorstore/internal/container/block_queue_test.cc | #ifndef TENSORSTORE_INTERNAL_THREAD_BLOCK_QUEUE_H_
#define TENSORSTORE_INTERNAL_THREAD_BLOCK_QUEUE_H_
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <cassert>
#include <memory>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/log/absl_check.h"
#include "tensorstore/internal/container/item_traits.h"
namespace tensorstore {
namespace internal_container {
template <typename T, size_t kMin = 1024, size_t kMax = 1024,
typename Allocator = std::allocator<T>>
class BlockQueue;
template <typename T, typename Allocator>
class SQBlock {
private:
using BlockAllocator =
typename std::allocator_traits<Allocator>::template rebind_alloc<SQBlock>;
using ByteAllocator =
typename std::allocator_traits<Allocator>::template rebind_alloc<char>;
constexpr static ptrdiff_t start_offset() {
struct X {
SQBlock array;
T item[1];
};
return offsetof(X, item);
}
constexpr static size_t start_items() {
return (start_offset() + sizeof(T) - 1) / sizeof(T);
}
struct private_t {
private:
friend class SQBlock;
private_t() = default;
};
public:
static SQBlock* New(int64_t c, Allocator* alloc) {
size_t allocation_bytes =
(c <= start_items() + 2)
? (start_offset() + 2 * sizeof(T))
: (c -= start_items(), ((c + start_items()) * sizeof(T)));
ByteAllocator byte_alloc(*alloc);
void* mem = std::allocator_traits<ByteAllocator>::allocate(
byte_alloc, allocation_bytes);
auto* as_array = static_cast<SQBlock*>(mem);
BlockAllocator array_alloc(*alloc);
std::allocator_traits<BlockAllocator>::construct(array_alloc, as_array,
private_t{}, c);
return as_array;
}
static void Delete(SQBlock* ptr, Allocator* alloc) {
const size_t allocation_bytes =
(ptr->capacity() == 2) ? (start_offset() + 2 * sizeof(T))
: (start_items() + ptr->capacity()) * sizeof(T);
BlockAllocator block_alloc(*alloc);
std::allocator_traits<BlockAllocator>::destroy(block_alloc, ptr);
void* mem = ptr;
ByteAllocator byte_alloc(*alloc);
std::allocator_traits<ByteAllocator>::deallocate(
byte_alloc, static_cast<char*>(mem), allocation_bytes);
}
SQBlock(private_t, size_t c) : end_(begin() + c), next_(nullptr) {}
SQBlock* next() const { return next_; }
void set_next(SQBlock* b) { next_ = b; }
T* begin() {
return reinterpret_cast<T*>(reinterpret_cast<char*>(this) + start_offset());
}
T* end() { return end_; }
size_t capacity() { return end() - begin(); }
private:
T* end_;
SQBlock* next_;
};
template <typename T, size_t kMin, size_t kMax, typename Allocator>
class BlockQueue {
using Block = SQBlock<T, Allocator>;
using TransferTraits = ItemTraits<T>;
static constexpr bool kDestroyIsTrivial =
TransferTraits::template destroy_is_trivial<Allocator>();
static_assert(kMin > 0);
static_assert(kMin <= kMax);
struct Cursor {
Cursor(Block* b) : block(b), ptr(b->begin()), end(b->end()) {}
Cursor() : block(nullptr), ptr(nullptr), end(nullptr) {}
Block* block;
T* ptr;
T* end;
};
public:
BlockQueue() : BlockQueue(Allocator()) {}
explicit BlockQueue(Allocator alloc)
: allocator_(std::move(alloc)), head_(), tail_(), size_(0) {}
~BlockQueue() {
Block* b = head_.block;
while (b) {
Block* next = b->next();
ClearBlock(b);
Block::Delete(b, &allocator_);
b = next;
}
}
BlockQueue(const BlockQueue&) = delete;
BlockQueue& operator=(const BlockQueue&) = delete;
size_t size() const { return size_; }
bool empty() const { return !size(); }
T& front() {
ABSL_CHECK(!empty());
return *head_.ptr;
}
const T& front() const {
ABSL_CHECK(!empty());
return *head_.ptr;
}
T& back() {
ABSL_CHECK(!empty());
return *((tail_.ptr) - 1);
}
const T& back() const {
ABSL_CHECK(!empty());
return *((tail_.ptr) - 1);
}
void push_back(const T& val) { emplace_back(val); }
void push_back(T&& val) { emplace_back(std::move(val)); }
template <typename... A>
T& emplace_back(A&&... args) {
auto* storage = emplace_back_raw();
TransferTraits::construct(&allocator_, storage, std::forward<A>(args)...);
return *storage;
}
void pop_front() {
ABSL_CHECK(!empty());
ABSL_CHECK(head_.block);
TransferTraits::destroy(&allocator_, head_.ptr);
++head_.ptr;
--size_;
if (empty()) {
ABSL_CHECK_EQ(head_.block, tail_.block);
head_.ptr = tail_.ptr = head_.block->begin();
return;
}
if (head_.ptr == head_.end) {
Block* n = head_.block->next();
Block::Delete(head_.block, &allocator_);
head_ = Cursor(n);
}
}
void clear() {
Block* b = head_.block;
if (!b) {
ABSL_CHECK(empty());
return;
}
while (b) {
Block* next = b->next();
ClearBlock(b);
if (head_.block != b) {
Block::Delete(b, &allocator_);
}
b = next;
}
b = head_.block;
b->set_next(nullptr);
head_ = tail_ = Cursor(b);
size_ = 0;
}
private:
T* emplace_back_raw() {
if (tail_.ptr == tail_.end) {
size_t capacity = kMin;
if (tail_.block) {
capacity = 2 * tail_.block->capacity();
if (capacity > kMax) capacity = kMax;
}
auto* b = Block::New(capacity, &allocator_);
if (!head_.block) {
ABSL_CHECK(tail_.block == nullptr);
head_ = Cursor(b);
} else {
ABSL_CHECK(head_.block != nullptr);
tail_.block->set_next(b);
}
tail_ = Cursor(b);
}
++size_;
return tail_.ptr++;
}
void ClearBlock(Block* b) {
auto* begin = b == head_.block ? head_.ptr : b->begin();
auto* end = b == tail_.block ? tail_.ptr : b->end();
if constexpr (!kDestroyIsTrivial) {
for (; begin != end; ++begin) {
TransferTraits::destroy(&allocator_, begin);
}
}
}
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS Allocator allocator_;
Cursor head_;
Cursor tail_;
size_t size_;
};
}
}
#endif | #include "tensorstore/internal/container/block_queue.h"
#include <stdint.h>
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal_container::BlockQueue;
TEST(BlockQueue, Basic) {
BlockQueue<int64_t> q;
EXPECT_TRUE(q.empty());
EXPECT_THAT(q.size(), 0);
q.push_back(10);
EXPECT_FALSE(q.empty());
EXPECT_EQ(q.front(), 10);
EXPECT_EQ(q.back(), 10);
q.pop_front();
EXPECT_TRUE(q.empty());
q.clear();
EXPECT_TRUE(q.empty());
}
TEST(BlockQueue, PushPop) {
BlockQueue<int64_t> q;
for (int i = 0; i < 4096; i++) {
q.push_back(i);
if (i & 0x08) {
q.pop_front();
}
}
EXPECT_FALSE(q.empty());
q.clear();
EXPECT_TRUE(q.empty());
}
class OnlyConstructibleByAllocator {
explicit OnlyConstructibleByAllocator(int i) : i_(i) {}
public:
OnlyConstructibleByAllocator(const OnlyConstructibleByAllocator &other)
: i_(other.i_) {}
OnlyConstructibleByAllocator &operator=(
const OnlyConstructibleByAllocator &other) {
i_ = other.i_;
return *this;
}
int Get() const { return i_; }
bool operator==(int i) const { return i_ == i; }
private:
template <typename T>
friend class OnlyConstructibleAllocator;
int i_;
};
template <typename T = OnlyConstructibleByAllocator>
class OnlyConstructibleAllocator : public std::allocator<T> {
public:
OnlyConstructibleAllocator() = default;
template <class U>
explicit OnlyConstructibleAllocator(const OnlyConstructibleAllocator<U> &) {}
void construct(OnlyConstructibleByAllocator *p, int i) {
new (p) OnlyConstructibleByAllocator(i);
}
template <class U>
struct rebind {
using other = OnlyConstructibleAllocator<U>;
};
};
TEST(BlockQueue, OnlyConstructibleByAllocator) {
BlockQueue<OnlyConstructibleByAllocator, 1024, 1024,
OnlyConstructibleAllocator<>>
q;
EXPECT_TRUE(q.empty());
EXPECT_THAT(q.size(), 0);
q.emplace_back(10);
EXPECT_FALSE(q.empty());
EXPECT_EQ(q.front().Get(), 10);
EXPECT_EQ(q.back().Get(), 10);
q.pop_front();
EXPECT_TRUE(q.empty());
q.clear();
EXPECT_TRUE(q.empty());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/container/block_queue.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/container/block_queue_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
3ebcb91f-5f3f-4f11-9ce0-c059d17b31b9 | cpp | google/tensorstore | enum | tensorstore/internal/json_binding/enum.h | tensorstore/internal/json_binding/enum_test.cc | #ifndef TENSORSTORE_INTERNAL_JSON_BINDING_ENUM_H_
#define TENSORSTORE_INTERNAL_JSON_BINDING_ENUM_H_
#include <stddef.h>
#include <string>
#include <utility>
#include <variant>
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "absl/strings/str_join.h"
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/internal/json/same.h"
#include "tensorstore/internal/json/value_as.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_json_binding {
template <typename EnumValue, typename JsonValue, size_t N>
constexpr auto Enum(const std::pair<EnumValue, JsonValue> (&values)[N]) {
return [=](auto is_loading, const auto& options, auto* obj,
auto* j) -> absl::Status {
for (const auto& p : values) {
if constexpr (is_loading) {
if (internal_json::JsonSame(p.second, *j)) {
*obj = p.first;
return absl::OkStatus();
}
} else {
if (p.first == *obj) {
*j = p.second;
return absl::OkStatus();
}
}
}
if constexpr (is_loading) {
return internal_json::ExpectedError(
*j,
tensorstore::StrCat(
"one of ",
absl::StrJoin(values, ", ", [](std::string* out, const auto& p) {
*out += ::nlohmann::json(p.second).dump();
})));
} else {
ABSL_UNREACHABLE();
}
};
}
template <typename Binder, typename... Value, typename... JsonValue>
constexpr auto MapValue(Binder binder, std::pair<Value, JsonValue>... pairs) {
constexpr size_t N = sizeof...(pairs);
static_assert(N > 0);
return [=](auto is_loading, const auto& options, auto* obj,
auto* j) -> absl::Status {
if constexpr (is_loading) {
if (((internal_json::JsonSame(*j, pairs.second) &&
(static_cast<void>(*obj = pairs.first), true)) ||
...))
return absl::OkStatus();
} else {
if ((((*obj == pairs.first) &&
(static_cast<void>(*j = pairs.second), true)) ||
...))
return absl::OkStatus();
}
return binder(is_loading, options, obj, j);
};
}
}
}
#endif | #include "tensorstore/internal/json_binding/enum.h"
#include <memory>
#include <string_view>
#include <utility>
#include <variant>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/internal/json/json.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/util/status_testutil.h"
using ::tensorstore::MatchesStatus;
namespace jb = tensorstore::internal_json_binding;
namespace {
TEST(JsonBindingTest, Enum) {
enum class TestEnum { a, b };
const auto binder = jb::Enum<TestEnum, std::string_view>({
{TestEnum::a, "a"},
{TestEnum::b, "b"},
});
tensorstore::TestJsonBinderRoundTrip<TestEnum>(
{
{TestEnum::a, "a"},
{TestEnum::b, "b"},
},
binder);
tensorstore::TestJsonBinderFromJson<TestEnum>(
{
{"c",
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected one of \"a\", \"b\", but received: \"c\"")},
},
binder);
}
TEST(JsonBindingTest, MapValue) {
enum class TestMap { a, b };
const auto binder = jb::MapValue(
[](auto...) { return absl::InvalidArgumentError("missing"); },
std::make_pair(TestMap::a, "a"),
std::make_pair(TestMap::b, "b"),
std::make_pair(TestMap::a, 1),
std::make_pair(TestMap::b, 2));
tensorstore::TestJsonBinderRoundTrip<TestMap>(
{
{TestMap::a, "a"},
{TestMap::b, "b"},
},
binder);
tensorstore::TestJsonBinderFromJson<TestMap>(
{
{"a", ::testing::Eq(TestMap::a)},
{"b", ::testing::Eq(TestMap::b)},
{"c",
MatchesStatus(absl::StatusCode::kInvalidArgument, ".*missing.*")},
{1, ::testing::Eq(TestMap::a)},
{2, ::testing::Eq(TestMap::b)},
{3, MatchesStatus(absl::StatusCode::kInvalidArgument, ".*missing.*")},
},
binder);
}
namespace map_variant_test {
struct A {
[[maybe_unused]] friend bool operator==(const A&, const A&) { return true; }
};
struct B {
[[maybe_unused]] friend bool operator==(const B&, const B&) { return true; }
};
struct C {
[[maybe_unused]] friend bool operator==(const C&, const C&) { return true; }
};
}
TEST(JsonBindingTest, MapValueVariant) {
using map_variant_test::A;
using map_variant_test::B;
using map_variant_test::C;
using T = std::variant<A, B, C>;
const auto binder = jb::MapValue(
[](auto...) { return absl::InvalidArgumentError("missing"); },
std::make_pair(T{A{}}, "a"),
std::make_pair(T{B{}}, "b"),
std::make_pair(T{C{}}, 3));
tensorstore::TestJsonBinderRoundTrip<T>(
{
{A{}, "a"},
{B{}, "b"},
{C{}, 3},
},
binder);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/enum.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/enum_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
2b7b518f-026f-4331-bdbc-8e050845acbb | cpp | google/tensorstore | json_binding | tensorstore/internal/json_binding/json_binding.h | tensorstore/internal/json_binding/json_binding_test.cc | #ifndef TENSORSTORE_INTERNAL_JSON_BINDING_JSON_H_
#define TENSORSTORE_INTERNAL_JSON_BINDING_JSON_H_
#include <functional>
#include <limits>
#include <map>
#include <string>
#include <string_view>
#include <tuple>
#include <type_traits>
#include <utility>
#include "absl/meta/type_traits.h"
#include "absl/status/status.h"
#include "absl/strings/str_join.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json/json.h"
#include "tensorstore/internal/json/same.h"
#include "tensorstore/internal/json/value_as.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_json_binding {
namespace empty_binder {
constexpr inline auto EmptyBinder = [](auto is_loading, const auto& options,
auto* obj, auto* j) -> absl::Status {
return absl::OkStatus();
};
}
using empty_binder::EmptyBinder;
namespace loose_value_as_binder {
constexpr inline auto LooseValueAsBinder =
[](auto is_loading, const auto& options, auto* obj,
::nlohmann::json* j) -> absl::Status {
if constexpr (is_loading) {
return internal_json::JsonRequireValueAs(*j, obj, false);
} else {
*j = *obj;
return absl::OkStatus();
}
};
}
using loose_value_as_binder::LooseValueAsBinder;
namespace value_as_binder {
constexpr inline auto ValueAsBinder = [](auto is_loading, const auto& options,
auto* obj,
::nlohmann::json* j) -> absl::Status {
if constexpr (is_loading) {
return internal_json::JsonRequireValueAs(*j, obj, true);
} else {
*j = *obj;
return absl::OkStatus();
}
};
}
using value_as_binder::ValueAsBinder;
template <>
constexpr inline auto DefaultBinder<bool> = ValueAsBinder;
template <>
constexpr inline auto DefaultBinder<std::int64_t> = ValueAsBinder;
template <>
constexpr inline auto DefaultBinder<std::string> = ValueAsBinder;
template <>
constexpr inline auto DefaultBinder<uint64_t> = ValueAsBinder;
template <>
constexpr inline auto DefaultBinder<double> = ValueAsBinder;
template <>
constexpr inline auto DefaultBinder<std::nullptr_t> = ValueAsBinder;
namespace loose_float_binder {
constexpr inline auto LooseFloatBinder =
[](auto is_loading, const auto& options, auto* obj,
::nlohmann::json* j) -> absl::Status {
if constexpr (is_loading) {
double x;
auto status = internal_json::JsonRequireValueAs(*j, &x, false);
if (status.ok()) *obj = x;
return status;
} else {
*j = static_cast<double>(*obj);
return absl::OkStatus();
}
};
}
using loose_float_binder::LooseFloatBinder;
namespace float_binder {
constexpr inline auto FloatBinder = [](auto is_loading, const auto& options,
auto* obj,
::nlohmann::json* j) -> absl::Status {
if constexpr (is_loading) {
double x;
auto status = internal_json::JsonRequireValueAs(*j, &x, true);
if (status.ok()) *obj = x;
return status;
} else {
*j = static_cast<double>(*obj);
return absl::OkStatus();
}
};
}
using float_binder::FloatBinder;
template <typename T>
constexpr inline auto
DefaultBinder<T, std::enable_if_t<std::is_floating_point_v<T>>> =
FloatBinder;
template <typename T>
constexpr auto LooseInteger(T min = std::numeric_limits<T>::min(),
T max = std::numeric_limits<T>::max()) {
return [=](auto is_loading, const auto& options, auto* obj,
::nlohmann::json* j) -> absl::Status {
if constexpr (is_loading) {
return internal_json::JsonRequireInteger(*j, obj, false, min,
max);
} else {
*j = *obj;
return absl::OkStatus();
}
};
}
template <typename T>
constexpr auto Integer(T min = std::numeric_limits<T>::min(),
T max = std::numeric_limits<T>::max()) {
return [=](auto is_loading, const auto& options, auto* obj,
::nlohmann::json* j) -> absl::Status {
if constexpr (is_loading) {
return internal_json::JsonRequireInteger(*j, obj, true, min,
max);
} else {
*j = *obj;
return absl::OkStatus();
}
};
}
template <typename T>
constexpr inline auto
DefaultBinder<T, std::enable_if_t<std::numeric_limits<T>::is_integer>> =
Integer<T>();
namespace non_empty_string_binder {
constexpr inline auto NonEmptyStringBinder =
[](auto is_loading, const auto& options, auto* obj,
::nlohmann::json* j) -> absl::Status {
if constexpr (is_loading) {
return internal_json::JsonRequireValueAs(
*j, obj, [](const std::string& value) { return !value.empty(); },
true);
} else {
*j = *obj;
return absl::OkStatus();
}
};
}
using non_empty_string_binder::NonEmptyStringBinder;
namespace copy_binder {
constexpr inline auto CopyJsonBinder = [](auto is_loading, const auto& options,
auto* obj,
::nlohmann::json* j) -> absl::Status {
if constexpr (is_loading) {
*obj = std::move(*j);
} else {
*j = *obj;
}
return absl::OkStatus();
};
}
using copy_binder::CopyJsonBinder;
template <>
constexpr inline auto DefaultBinder<::nlohmann::json> = CopyJsonBinder;
namespace object_binder {
constexpr inline auto CopyJsonObjectBinder = [](auto is_loading,
const auto& options, auto* obj,
auto* j) -> absl::Status {
if constexpr (is_loading) {
if constexpr (std::is_same_v<decltype(j), ::nlohmann::json::object_t*>) {
*obj = std::move(*j);
} else {
if (auto* j_obj = j->template get_ptr<::nlohmann::json::object_t*>()) {
*obj = std::move(*j_obj);
} else {
return internal_json::ExpectedError(*j, "object");
}
}
} else {
*j = *obj;
}
return absl::OkStatus();
};
}
using object_binder::CopyJsonObjectBinder;
template <>
constexpr inline auto DefaultBinder<::nlohmann::json::object_t> =
CopyJsonObjectBinder;
template <typename GetValue>
constexpr auto Constant(GetValue get_value) {
return [=](auto is_loading, const auto& options, auto* obj,
auto* j) -> absl::Status {
if constexpr (is_loading) {
const auto& value = get_value();
if (!internal_json::JsonSame(*j, value)) {
return internal_json::ExpectedError(*j, ::nlohmann::json(value).dump());
}
} else {
*j = get_value();
}
return absl::OkStatus();
};
}
template <typename Validator, typename Binder = decltype(DefaultBinder<>)>
constexpr auto Validate(Validator validator, Binder binder = DefaultBinder<>) {
return [=](auto is_loading, const auto& options, auto* obj,
auto* j) -> absl::Status {
if constexpr (is_loading) {
TENSORSTORE_RETURN_IF_ERROR(binder(is_loading, options, obj, j));
return internal::InvokeForStatus(validator, options, obj);
} else {
return binder(is_loading, options, obj, j);
}
};
}
template <typename Initializer>
constexpr auto Initialize(Initializer initializer) {
return [=](auto is_loading, const auto& options, [[maybe_unused]] auto* obj,
auto*) -> absl::Status {
if constexpr (is_loading) {
return internal::InvokeForStatus(initializer, obj);
} else {
return absl::OkStatus();
}
};
}
template <auto Proj, typename Binder = decltype(DefaultBinder<>)>
constexpr auto Projection(Binder binder = DefaultBinder<>) {
return [binder = std::move(binder)](auto is_loading, const auto& options,
auto* obj, auto* j) -> absl::Status {
auto&& projected = std::invoke(Proj, *obj);
return binder(is_loading, options, &projected, j);
};
}
template <typename Proj, typename Binder = decltype(DefaultBinder<>)>
constexpr auto Projection(Proj projection, Binder binder = DefaultBinder<>) {
return [projection = std::move(projection), binder = std::move(binder)](
auto is_loading, const auto& options, auto* obj,
auto* j) -> absl::Status {
auto&& projected = std::invoke(projection, *obj);
return binder(is_loading, options, &projected, j);
};
}
template <typename T = void, typename Get, typename Set,
typename Binder = decltype(DefaultBinder<>)>
constexpr auto GetterSetter(Get get, Set set, Binder binder = DefaultBinder<>) {
return [get = std::move(get), set = std::move(set),
binder = std::move(binder)](auto is_loading, const auto& options,
auto* obj, auto* j) -> absl::Status {
if constexpr (is_loading) {
using Projected = std::conditional_t<
std::is_void_v<T>,
absl::remove_cvref_t<std::invoke_result_t<Get, decltype(*obj)>>, T>;
Projected projected;
TENSORSTORE_RETURN_IF_ERROR(binder(is_loading, options, &projected, j));
return internal::InvokeForStatus(set, *obj, std::move(projected));
} else {
auto&& projected = std::invoke(get, *obj);
return binder(is_loading, options, &projected, j);
}
};
}
template <typename LoadBinder = decltype(EmptyBinder),
typename SaveBinder = decltype(EmptyBinder)>
constexpr auto LoadSave(LoadBinder load_binder = EmptyBinder,
SaveBinder save_binder = EmptyBinder) {
return [=](auto is_loading, const auto& options, auto* obj,
auto* j) -> absl::Status {
if constexpr (is_loading) {
return load_binder(is_loading, options, obj, j);
} else {
return save_binder(is_loading, options, obj, j);
}
};
}
enum IncludeDefaultsPolicy {
kMaybeIncludeDefaults,
kNeverIncludeDefaults,
kAlwaysIncludeDefaults,
};
template <IncludeDefaultsPolicy Policy = kMaybeIncludeDefaults,
typename GetDefault, typename Binder = decltype(DefaultBinder<>)>
constexpr auto DefaultValue(GetDefault get_default,
Binder binder = DefaultBinder<>) {
return [=](auto is_loading, const auto& options, auto* obj,
::nlohmann::json* j) -> absl::Status {
using T = std::remove_const_t<std::remove_pointer_t<decltype(obj)>>;
if constexpr (is_loading) {
if (j->is_discarded()) {
return internal::InvokeForStatus(get_default, obj);
}
return binder(is_loading, options, obj, j);
} else {
TENSORSTORE_RETURN_IF_ERROR(binder(is_loading, options, obj, j));
if constexpr (Policy == kAlwaysIncludeDefaults) {
return absl::OkStatus();
}
if constexpr (Policy == kMaybeIncludeDefaults) {
IncludeDefaults include_defaults(options);
if (include_defaults.include_defaults()) {
return absl::OkStatus();
}
}
T default_obj;
::nlohmann::json default_j;
if (internal::InvokeForStatus(get_default, &default_obj).ok() &&
binder(is_loading, options, &default_obj, &default_j).ok() &&
internal_json::JsonSame(default_j, *j)) {
*j = ::nlohmann::json(::nlohmann::json::value_t::discarded);
}
return absl::OkStatus();
}
};
}
template <IncludeDefaultsPolicy DefaultsPolicy = kMaybeIncludeDefaults,
typename Binder = decltype(DefaultBinder<>)>
constexpr auto DefaultInitializedValue(Binder binder = DefaultBinder<>) {
return internal_json_binding::DefaultValue<DefaultsPolicy>(
[](auto* obj) { *obj = absl::remove_cvref_t<decltype(*obj)>{}; },
std::move(binder));
}
template <IncludeDefaultsPolicy Policy = kMaybeIncludeDefaults,
typename GetDefault, typename IsDefault,
typename Binder = decltype(DefaultBinder<>)>
constexpr auto DefaultPredicate(GetDefault get_default, IsDefault is_default,
Binder binder = DefaultBinder<>) {
return [=](auto is_loading, const auto& options, auto* obj,
::nlohmann::json* j) -> absl::Status {
if constexpr (is_loading) {
if (j->is_discarded()) {
return internal::InvokeForStatus(get_default, obj);
}
return binder(is_loading, options, obj, j);
} else {
bool include_defaults_value = Policy == kAlwaysIncludeDefaults;
if constexpr (Policy == kMaybeIncludeDefaults) {
IncludeDefaults include_defaults(options);
include_defaults_value = include_defaults.include_defaults();
}
if (!include_defaults_value && is_default(obj)) {
*j = ::nlohmann::json(::nlohmann::json::value_t::discarded);
return absl::OkStatus();
}
return binder(is_loading, options, obj, j);
}
};
}
template <IncludeDefaultsPolicy Policy = kMaybeIncludeDefaults,
typename IsDefault, typename Binder = decltype(DefaultBinder<>)>
constexpr auto DefaultInitializedPredicate(IsDefault is_default,
Binder binder = DefaultBinder<>) {
return internal_json_binding::DefaultPredicate<Policy>(
[](auto* obj) { *obj = absl::remove_cvref_t<decltype(*obj)>{}; },
std::move(is_default), std::move(binder));
}
template <typename T, typename TransformedValueBinder,
typename OriginalValueBinder = decltype(DefaultBinder<>)>
constexpr auto Compose(
TransformedValueBinder transformed_value_binder,
OriginalValueBinder original_value_binder = DefaultBinder<>) {
return [=](auto is_loading, const auto& options, auto* obj,
auto* j) -> absl::Status {
T value;
if constexpr (is_loading) {
TENSORSTORE_RETURN_IF_ERROR(
original_value_binder(is_loading, options, &value, j));
return transformed_value_binder(is_loading, options, obj, &value);
} else {
TENSORSTORE_RETURN_IF_ERROR(
transformed_value_binder(is_loading, options, obj, &value));
return original_value_binder(is_loading, options, &value, j);
}
};
}
template <typename GetBinder>
constexpr auto Dependent(GetBinder get_binder) {
return [=](auto is_loading, const auto& options, auto* obj,
auto*... j) -> absl::Status {
return get_binder(is_loading, options, obj, j...)(is_loading, options, obj,
j...);
};
}
namespace sequence_impl {
template <typename Loading, typename Options, typename Obj, typename J,
typename... Binder>
inline absl::Status invoke_reverse(Loading is_loading, Options& options,
Obj* obj, J* j, Binder... binder) {
absl::Status s;
std::true_type right_to_left;
right_to_left =
(((s.ok() ? (void)(s = binder(is_loading, options, obj, j)) : (void)0),
right_to_left) = ... = right_to_left);
return s;
}
template <typename Loading, typename Options, typename Obj, typename J,
typename... Binder>
inline absl::Status invoke_forward(Loading is_loading, Options& options,
Obj* obj, J* j, Binder... binder) {
absl::Status s;
[[maybe_unused]] bool ok =
(((s = binder(is_loading, options, obj, j)).ok()) && ...);
return s;
}
}
template <typename... Binder>
constexpr auto Sequence(Binder... binder) {
return [=](auto is_loading, const auto& options, auto* obj, auto* j) {
if constexpr (is_loading) {
return sequence_impl::invoke_forward(is_loading, options, obj, j,
binder...);
} else {
return sequence_impl::invoke_reverse(is_loading, options, obj, j,
binder...);
}
};
}
template <typename... MemberBinder>
constexpr auto Object(MemberBinder... member_binder) {
return [=](auto is_loading, const auto& options, auto* obj,
auto* j) -> absl::Status {
::nlohmann::json::object_t* j_obj;
if constexpr (is_loading) {
if constexpr (std::is_same_v<::nlohmann::json*, decltype(j)>) {
j_obj = j->template get_ptr<::nlohmann::json::object_t*>();
if (!j_obj) {
return internal_json::ExpectedError(*j, "object");
}
} else {
j_obj = j;
}
TENSORSTORE_RETURN_IF_ERROR(sequence_impl::invoke_forward(
is_loading, options, obj, j_obj, member_binder...));
if (!j_obj->empty()) {
return internal_json::JsonExtraMembersError(*j_obj);
}
return absl::OkStatus();
} else {
if constexpr (std::is_same_v<::nlohmann::json*, decltype(j)>) {
*j = ::nlohmann::json::object_t();
j_obj = j->template get_ptr<::nlohmann::json::object_t*>();
} else {
j_obj = j;
j_obj->clear();
}
return sequence_impl::invoke_reverse(is_loading, options, obj, j_obj,
member_binder...);
}
};
}
template <bool kDropDiscarded, typename MemberName, typename Binder>
struct MemberBinderImpl {
MemberName name;
Binder binder;
template <typename Options, typename Obj>
absl::Status operator()(std::true_type is_loading, const Options& options,
Obj* obj, ::nlohmann::json::object_t* j_obj) const {
::nlohmann::json j_member = internal_json::JsonExtractMember(j_obj, name);
if constexpr (kDropDiscarded) {
if (j_member.is_discarded()) return absl::OkStatus();
}
auto status = binder(is_loading, options, obj, &j_member);
return status.ok()
? status
: MaybeAnnotateStatus(
status, tensorstore::StrCat("Error parsing object member ",
QuoteString(name)));
}
template <typename Options, typename Obj>
absl::Status operator()(std::false_type is_loading, const Options& options,
Obj* obj, ::nlohmann::json::object_t* j_obj) const {
::nlohmann::json j_member(::nlohmann::json::value_t::discarded);
TENSORSTORE_RETURN_IF_ERROR(
binder(is_loading, options, obj, &j_member),
MaybeAnnotateStatus(
_, tensorstore::StrCat("Error converting object member ",
QuoteString(name))));
if (!j_member.is_discarded()) {
j_obj->emplace(name, std::move(j_member));
}
return absl::OkStatus();
}
};
template <typename MemberName, typename Binder = decltype(DefaultBinder<>)>
constexpr auto Member(MemberName name, Binder binder = DefaultBinder<>) {
return MemberBinderImpl<false, MemberName, Binder>{std::move(name),
std::move(binder)};
}
template <typename MemberName, typename Binder = decltype(DefaultBinder<>)>
constexpr auto OptionalMember(MemberName name,
Binder binder = DefaultBinder<>) {
return MemberBinderImpl<true, MemberName, Binder>{std::move(name),
std::move(binder)};
}
template <typename... MemberName>
constexpr auto AtMostOne(MemberName... names) {
return [=](auto is_loading, const auto& options, auto* obj,
::nlohmann::json::object_t* j) -> absl::Status {
if constexpr (is_loading) {
const auto has_member = [&](auto name) {
return j->find(name) == j->end() ? 0 : 1;
};
if ((has_member(names) + ...) > 1) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"At most one of ",
absl::StrJoin({QuoteString(std::string_view(names))...}, ", "),
" members is allowed"));
}
}
return absl::OkStatus();
};
}
template <typename... MemberName>
constexpr auto AtLeastOne(MemberName... names) {
return [=](auto is_loading, const auto& options, auto* obj,
::nlohmann::json::object_t* j) -> absl::Status {
if constexpr (is_loading) {
const auto has_member = [&](auto name) {
return j->find(name) == j->end() ? 0 : 1;
};
if ((has_member(names) + ...) == 0) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"At least one of ",
absl::StrJoin(
std::make_tuple(QuoteString(std::string_view(names))...), ", "),
" members must be specified"));
}
}
return absl::OkStatus();
};
}
namespace discard_extra_members_binder {
constexpr inline auto DiscardExtraMembers =
[](auto is_loading, const auto& options, auto* obj,
::nlohmann::json::object_t* j_obj) -> absl::Status {
if constexpr (is_loading) {
j_obj->clear();
}
return absl::OkStatus();
};
}
using discard_extra_members_binder::DiscardExtraMembers;
}
}
#endif | #include "tensorstore/internal/json_binding/json_binding.h"
#include <cstdint>
#include <optional>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/json/json.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_binding/std_array.h"
#include "tensorstore/internal/json_binding/std_optional.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
namespace jb = tensorstore::internal_json_binding;
using ::nlohmann::json;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal::ParseJson;
using ::tensorstore::internal_json::JsonParseArray;
using ::tensorstore::internal_json::JsonValidateArrayLength;
TEST(JsonTest, SimpleParse) {
const char kArray[] = R"({ "foo": "bar" })";
auto x = ParseJson("");
EXPECT_TRUE(x.is_discarded());
auto y = ParseJson(kArray);
EXPECT_FALSE(y.is_discarded());
auto one = ParseJson("1");
EXPECT_FALSE(one.is_discarded());
}
TEST(JsonParseArrayTest, Basic) {
bool size_received = false;
std::vector<std::pair<::nlohmann::json, std::ptrdiff_t>> elements;
EXPECT_EQ(absl::OkStatus(),
JsonParseArray(
::nlohmann::json{1, 2, 3},
[&](std::ptrdiff_t s) {
EXPECT_EQ(3, s);
size_received = true;
return JsonValidateArrayLength(s, 3);
},
[&](const ::nlohmann::json& j, std::ptrdiff_t i) {
EXPECT_TRUE(size_received);
elements.emplace_back(j, i);
return absl::OkStatus();
}));
EXPECT_TRUE(size_received);
EXPECT_THAT(elements, ::testing::ElementsAre(::testing::Pair(1, 0),
::testing::Pair(2, 1),
::testing::Pair(3, 2)));
}
TEST(JsonParseArrayTest, NotArray) {
EXPECT_THAT(JsonParseArray(
::nlohmann::json(3),
[&](std::ptrdiff_t s) { return absl::OkStatus(); },
[&](const ::nlohmann::json& j, std::ptrdiff_t i) {
return absl::OkStatus();
}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected array, but received: 3"));
}
TEST(JsonValidateArrayLength, Success) {
EXPECT_EQ(absl::OkStatus(), JsonValidateArrayLength(3, 3));
}
TEST(JsonValidateArrayLength, Failure) {
EXPECT_THAT(JsonValidateArrayLength(3, 4),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Array has length 3 but should have length 4"));
}
TEST(JsonParseArrayTest, SizeCallbackError) {
EXPECT_THAT(
JsonParseArray(
::nlohmann::json{1, 2, 3},
[&](std::ptrdiff_t s) { return absl::UnknownError("size_callback"); },
[&](const ::nlohmann::json& j, std::ptrdiff_t i) {
return absl::OkStatus();
}),
MatchesStatus(absl::StatusCode::kUnknown, "size_callback"));
}
TEST(JsonParseArrayTest, ElementCallbackError) {
EXPECT_THAT(JsonParseArray(
::nlohmann::json{1, 2, 3},
[&](std::ptrdiff_t s) { return absl::OkStatus(); },
[&](const ::nlohmann::json& j, std::ptrdiff_t i) {
if (i == 0) return absl::OkStatus();
return absl::UnknownError("element");
}),
MatchesStatus(absl::StatusCode::kUnknown,
"Error parsing value at position 1: element"));
}
TEST(JsonBindingTest, Example) {
struct Foo {
int x;
std::string y;
std::optional<int> z;
};
constexpr auto FooBinder = [] {
return jb::Object(
jb::Member("x", jb::Projection(&Foo::x)),
jb::Member("y", jb::Projection(&Foo::y, jb::DefaultValue([](auto* y) {
*y = "default";
}))),
jb::Member("z", jb::Projection(&Foo::z)));
};
EXPECT_EQ(::nlohmann::json({{"x", 3}}),
jb::ToJson(Foo{3, "default", std::nullopt}, FooBinder(),
tensorstore::IncludeDefaults{false}));
auto value =
jb::FromJson<Foo>({{"x", 3}, {"y", "value"}, {"z", 10}}, FooBinder())
.value();
EXPECT_EQ(3, value.x);
EXPECT_EQ("value", value.y);
EXPECT_EQ(10, value.z);
}
TEST(JsonBindingTest, SequenceOrder) {
auto binder = jb::Sequence(
[](auto is_loading, const auto& options, int* obj, auto* j) {
*obj = 1;
return absl::OkStatus();
},
[](auto is_loading, const auto& options, int* obj, auto* j) {
*obj = 3;
return absl::OkStatus();
});
int x = 0;
::nlohmann::json j({{"x", 3}});
EXPECT_TRUE(binder(std::true_type{}, jb::NoOptions{}, &x, &j).ok());
EXPECT_EQ(3, x);
EXPECT_TRUE(binder(std::false_type{}, jb::NoOptions{}, &x, &j).ok());
EXPECT_EQ(1, x);
}
TEST(JsonBindingTest, ValueAsBinder) {
tensorstore::TestJsonBinderRoundTrip<bool>(
{
{true, ::nlohmann::json(true)},
},
jb::ValueAsBinder);
tensorstore::TestJsonBinderRoundTrip<std::int64_t>(
{
{3, ::nlohmann::json(3)},
},
jb::ValueAsBinder);
tensorstore::TestJsonBinderRoundTrip<uint64_t>(
{
{4, ::nlohmann::json(4)},
},
jb::ValueAsBinder);
tensorstore::TestJsonBinderRoundTrip<double>(
{
{5, ::nlohmann::json(5)},
{5.0, ::nlohmann::json(5.0)},
},
jb::ValueAsBinder);
tensorstore::TestJsonBinderRoundTrip<std::string>(
{
{"a", ::nlohmann::json("a")},
{"", ::nlohmann::json("")},
},
jb::ValueAsBinder);
}
TEST(JsonBindingTest, LooseValueAsBinder) {
using testing::Eq;
tensorstore::TestJsonBinderFromJson<bool>(
{
{::nlohmann::json(true), Eq(true)},
{::nlohmann::json("true"), Eq(true)},
},
jb::LooseValueAsBinder);
tensorstore::TestJsonBinderFromJson<std::int64_t>(
{
{::nlohmann::json(3), Eq(3)},
{::nlohmann::json(3.0), Eq(3)},
{::nlohmann::json("3"), Eq(3)},
},
jb::LooseValueAsBinder);
tensorstore::TestJsonBinderFromJson<uint64_t>(
{
{::nlohmann::json(4), Eq(4)},
{::nlohmann::json(4.0), Eq(4)},
{::nlohmann::json("4"), Eq(4)},
},
jb::LooseValueAsBinder);
tensorstore::TestJsonBinderFromJson<double>(
{
{::nlohmann::json(5.0), Eq(5.0)},
{::nlohmann::json(5), Eq(5.0)},
{::nlohmann::json("5"), Eq(5.0)},
},
jb::LooseValueAsBinder);
tensorstore::TestJsonBinderRoundTrip<std::string>(
{
{"a", ::nlohmann::json("a")},
{"", ::nlohmann::json("")},
},
jb::LooseValueAsBinder);
}
TEST(JsonBindingTest, NonEmptyStringBinder) {
using testing::Eq;
tensorstore::TestJsonBinderRoundTrip<std::string>(
{
{"a", ::nlohmann::json("a")},
},
jb::NonEmptyStringBinder);
tensorstore::TestJsonBinderFromJson<std::string>(
{
{"", MatchesStatus(absl::StatusCode::kInvalidArgument,
"Validation of string failed, received: \"\"")},
},
jb::NonEmptyStringBinder);
}
TEST(JsonBindingTest, FloatBinders) {
using testing::Eq;
tensorstore::TestJsonBinderFromJson<float>(
{
{::nlohmann::json(5.0), Eq(5.0f)},
{::nlohmann::json(5), Eq(5.0f)},
},
jb::FloatBinder);
tensorstore::TestJsonBinderFromJson<double>(
{
{::nlohmann::json(5.0), Eq(5.0)},
{::nlohmann::json(5), Eq(5.0)},
},
jb::FloatBinder);
tensorstore::TestJsonBinderFromJson<float>(
{
{::nlohmann::json(5.0), Eq(5.0f)},
{::nlohmann::json(5), Eq(5.0f)},
{::nlohmann::json("5"), Eq(5.0f)},
},
jb::LooseFloatBinder);
tensorstore::TestJsonBinderFromJson<double>(
{
{::nlohmann::json(5.0), Eq(5.0)},
{::nlohmann::json(5), Eq(5.0)},
{::nlohmann::json("5"), Eq(5.0)},
},
jb::LooseFloatBinder);
}
TEST(JsonBindingTest, DefaultValueDiscarded) {
const auto binder =
jb::DefaultValue([](auto* obj) { *obj = 3; },
jb::DefaultValue([](auto* obj) { *obj = 3; }));
tensorstore::TestJsonBinderRoundTrip<int>(
{
{3, ::nlohmann::json(::nlohmann::json::value_t::discarded)},
{4, 4},
},
binder, tensorstore::IncludeDefaults{false});
tensorstore::TestJsonBinderRoundTrip<int>(
{
{3, 3},
{4, 4},
},
binder, tensorstore::IncludeDefaults{true});
}
TEST(JsonBindingTest, GetterSetter) {
struct Foo {
int x;
int get_x() const { return x; }
void set_x(int value) { this->x = value; }
};
const auto FooBinder =
jb::Object(jb::Member("x", jb::GetterSetter(&Foo::get_x, &Foo::set_x)));
EXPECT_EQ(::nlohmann::json({{"x", 3}}), jb::ToJson(Foo{3}, FooBinder));
auto value = jb::FromJson<Foo>({{"x", 3}}, FooBinder).value();
EXPECT_EQ(3, value.x);
}
TEST(JsonBindingTest, Constant) {
const auto binder = jb::Constant([] { return 3; });
EXPECT_THAT(jb::ToJson("ignored", binder),
::testing::Optional(::nlohmann::json(3)));
EXPECT_THAT(jb::FromJson<std::string>(::nlohmann::json(3), binder),
::testing::Optional(std::string{}));
EXPECT_THAT(jb::FromJson<std::string>(::nlohmann::json(4), binder),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected 3, but received: 4"));
}
TEST(JsonBindingTest, ObjectMember) {
tensorstore::TestJsonBinderRoundTrip<int>(
{
{3, ::nlohmann::json({{"x", 3}})},
},
jb::Object(jb::Member("x")));
}
TEST(JsonBindingTest, ObjectOptionalMember) {
struct Foo {
int x = 1;
};
const auto FooBinder =
jb::Object(jb::OptionalMember("x", jb::Projection(&Foo::x)),
jb::DiscardExtraMembers);
EXPECT_EQ(::nlohmann::json({{"x", 3}}), jb::ToJson(Foo{3}, FooBinder));
{
auto value = jb::FromJson<Foo>({{"x", 3}}, FooBinder).value();
EXPECT_EQ(3, value.x);
}
{
auto value = jb::FromJson<Foo>({{"y", 3}}, FooBinder).value();
EXPECT_EQ(1, value.x);
}
}
TEST(JsonBindingTest, StaticRankBox) {
using Value = tensorstore::Box<3>;
const auto binder = jb::Object(
jb::Member("origin", jb::Projection([](auto& x) { return x.origin(); })),
jb::Member("shape", jb::Projection([](auto& x) { return x.shape(); })));
tensorstore::TestJsonBinderRoundTrip<Value>(
{
{Value({1, 2, 3}, {4, 5, 6}),
{{"origin", {1, 2, 3}}, {"shape", {4, 5, 6}}}},
},
binder);
}
TEST(JsonBindingTest, DynamicRankBox) {
using Value = tensorstore::Box<>;
const auto binder = jb::Object(
jb::Member("rank", jb::GetterSetter(
[](auto& x) { return x.rank(); },
[](auto& x, tensorstore::DimensionIndex rank) {
x.set_rank(rank);
},
jb::Integer(0))),
jb::Member("origin", jb::Projection([](auto& x) { return x.origin(); })),
jb::Member("shape", jb::Projection([](auto& x) { return x.shape(); })));
tensorstore::TestJsonBinderRoundTrip<Value>(
{
{Value({1, 2, 3}, {4, 5, 6}),
{{"rank", 3}, {"origin", {1, 2, 3}}, {"shape", {4, 5, 6}}}},
},
binder);
}
TEST(JsonBindingTest, Null) {
tensorstore::TestJsonBinderRoundTrip<std::nullptr_t>({
{nullptr, nullptr},
});
tensorstore::TestJsonBinderFromJson<std::nullptr_t>({
{42, MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected null, but received: 42")},
});
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/json_binding.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/json_binding_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
177c49f7-cafe-4e0f-86a9-c448609d3dd8 | cpp | google/tensorstore | optional_object | tensorstore/internal/json_binding/optional_object.h | tensorstore/internal/json_binding/optional_object_test.cc | #ifndef TENSORSTORE_INTERNAL_JSON_BINDING_OPTIONAL_OBJECT_H_
#define TENSORSTORE_INTERNAL_JSON_BINDING_OPTIONAL_OBJECT_H_
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json/value_as.h"
#include "tensorstore/internal/json_binding/json_binding.h"
namespace tensorstore {
namespace internal_json_binding {
template <typename ObjectBinder>
constexpr auto OptionalObject(ObjectBinder binder) {
return [binder = std::move(binder)](auto is_loading, const auto& options,
auto* obj, auto* j) -> absl::Status {
::nlohmann::json::object_t json_obj;
if constexpr (is_loading) {
if (!j->is_discarded()) {
if (auto* ptr = j->template get_ptr<::nlohmann::json::object_t*>();
ptr) {
json_obj = std::move(*ptr);
} else {
return internal_json::ExpectedError(*j, "object");
}
}
}
if (auto status = internal_json_binding::Object(binder)(is_loading, options,
obj, &json_obj);
!status.ok()) {
return status;
}
if constexpr (!is_loading) {
if (!json_obj.empty()) {
*j = std::move(json_obj);
} else {
*j = ::nlohmann::json::value_t::discarded;
}
}
return absl::OkStatus();
};
}
}
}
#endif | #include "tensorstore/internal/json_binding/optional_object.h"
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/status_testutil.h"
namespace jb = tensorstore::internal_json_binding;
namespace {
using ::tensorstore::MatchesStatus;
TEST(JsonBindingTest, RoundTrip) {
tensorstore::TestJsonBinderRoundTrip<::nlohmann::json::object_t>(
{
{{}, ::nlohmann::json(::nlohmann::json::value_t::discarded)},
{{{"a", 1}, {"b", 2}}, {{"a", 1}, {"b", 2}}},
},
jb::OptionalObject(jb::DefaultBinder<>));
}
TEST(JsonBindingTest, Invalid) {
tensorstore::TestJsonBinderFromJson<::nlohmann::json::object_t>(
{
{"abc", MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected object, but received: \"abc\"")},
},
jb::OptionalObject(jb::DefaultBinder<>));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/optional_object.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/optional_object_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
81a99ee7-1816-4e79-bc07-d34924514471 | cpp | google/tensorstore | cache_key | tensorstore/internal/cache_key/cache_key.h | tensorstore/internal/cache_key/cache_key_test.cc | #ifndef TENSORSTORE_INTERNAL_CACHE_KEY_CACHE_KEY_H_
#define TENSORSTORE_INTERNAL_CACHE_KEY_CACHE_KEY_H_
#include <string>
#include <string_view>
#include <type_traits>
#include <typeinfo>
#include "absl/base/attributes.h"
#include "tensorstore/internal/cache_key/fwd.h"
#include "tensorstore/util/apply_members/apply_members.h"
namespace tensorstore {
namespace internal {
template <typename T>
struct CacheKeyExcludes {
T value;
template <typename X, typename F>
static constexpr auto ApplyMembers(X&& x, F f) {
return f(x.value);
}
};
template <typename T>
CacheKeyExcludes(T&& x) -> CacheKeyExcludes<T>;
template <typename... U>
void EncodeCacheKey(std::string* out, const U&... u);
inline void EncodeCacheKeyAdl() {}
template <typename T, typename SFINAE>
struct CacheKeyEncoder {
static void Encode(std::string* out, const T& value) {
EncodeCacheKeyAdl(out, value);
}
};
template <typename T>
struct CacheKeyEncoder<T, std::enable_if_t<SerializeUsingMemcpy<T>>> {
static void Encode(std::string* out, T value) {
out->append(reinterpret_cast<const char*>(&value), sizeof(value));
}
};
template <>
struct CacheKeyEncoder<std::string_view> {
static void Encode(std::string* out, std::string_view k) {
EncodeCacheKey(out, k.size());
out->append(k.data(), k.size());
}
};
template <>
struct CacheKeyEncoder<std::string> : public CacheKeyEncoder<std::string_view> {
};
template <>
struct CacheKeyEncoder<std::type_info> {
static void Encode(std::string* out, const std::type_info& t) {
EncodeCacheKey(out, std::string_view(t.name()));
}
};
template <typename T>
struct CacheKeyEncoder<T*> {
static void Encode(std::string* out, T* value) {
out->append(reinterpret_cast<const char*>(&value), sizeof(value));
}
};
template <typename T>
struct CacheKeyEncoder<CacheKeyExcludes<T>> {
static void Encode(std::string* out, const CacheKeyExcludes<T>& v) {
}
};
template <typename T>
constexpr inline bool IsCacheKeyExcludes = false;
template <typename T>
constexpr inline bool IsCacheKeyExcludes<CacheKeyExcludes<T>> = true;
template <typename T>
struct CacheKeyEncoder<
T, std::enable_if_t<SupportsApplyMembers<T> && !IsCacheKeyExcludes<T> &&
!SerializeUsingMemcpy<T>>> {
static void Encode(std::string* out, const T& v) {
ApplyMembers<T>::Apply(
v, [&out](auto&&... x) { (internal::EncodeCacheKey(out, x), ...); });
}
};
template <typename... U>
ABSL_ATTRIBUTE_ALWAYS_INLINE inline void EncodeCacheKey(std::string* out,
const U&... u) {
(CacheKeyEncoder<U>::Encode(out, u), ...);
}
}
}
#endif | #include "tensorstore/internal/cache_key/cache_key.h"
#include <optional>
#include <variant>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/time/time.h"
#include "tensorstore/internal/cache_key/absl_time.h"
#include "tensorstore/internal/cache_key/std_optional.h"
#include "tensorstore/internal/cache_key/std_variant.h"
namespace {
TEST(CacheKeyTest, CacheKeyTest) {
int x = 1;
float y = 2;
std::string q("q");
absl::Duration d = absl::Seconds(1);
std::optional<int> o = 2;
std::string key;
tensorstore::internal::EncodeCacheKey(&key, x, y, q, d, o);
{
std::string key2;
tensorstore::internal::EncodeCacheKey(&key2, x, y, q, d,
std::optional<int>{});
EXPECT_NE(key, key2);
}
{
std::string key3;
tensorstore::internal::EncodeCacheKey(&key3, x, y, q,
absl::InfiniteDuration(), o);
EXPECT_NE(key, key3);
}
{
std::string key4;
tensorstore::internal::EncodeCacheKey(
&key4, x, y, q, d, tensorstore::internal::CacheKeyExcludes{o});
EXPECT_NE(key, key4);
}
}
TEST(CacheKeyTest, Variant) {
using V = std::variant<int, std::string>;
std::string key;
tensorstore::internal::EncodeCacheKey(&key, V(10));
{
std::string key2;
tensorstore::internal::EncodeCacheKey(&key2, V(11));
EXPECT_NE(key, key2);
}
{
std::string key3;
tensorstore::internal::EncodeCacheKey(&key3, V("abc"));
EXPECT_NE(key, key3);
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/cache_key/cache_key.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/cache_key/cache_key_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
815416d7-7bea-456d-bc05-1a7d561ca943 | cpp | google/tensorstore | file_util | tensorstore/internal/os/file_util.h | tensorstore/internal/os/file_util_test.cc | #ifndef TENSORSTORE_INTERNAL_OS_FILE_UTIL_H_
#define TENSORSTORE_INTERNAL_OS_FILE_UTIL_H_
#include <stddef.h>
#include <stdint.h>
#include <string>
#include <string_view>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/time/time.h"
#include "tensorstore/internal/os/unique_handle.h"
#include "tensorstore/util/result.h"
#ifndef _WIN32
#include <fcntl.h>
#include <sys/stat.h>
#include <time.h>
#include <unistd.h>
#endif
#include "tensorstore/internal/os/include_windows.h"
namespace tensorstore {
namespace internal_os {
#ifdef _WIN32
using FileDescriptor = HANDLE;
struct FileDescriptorTraits {
static FileDescriptor Invalid() { return ((FileDescriptor)-1); }
static void Close(FileDescriptor fd);
};
using FileInfo = ::BY_HANDLE_FILE_INFORMATION;
constexpr inline bool IsDirSeparator(char c) { return c == '\\' || c == '/'; }
#else
using FileDescriptor = int;
struct FileDescriptorTraits {
static FileDescriptor Invalid() { return -1; }
static void Close(FileDescriptor fd) { ::close(fd); }
};
typedef struct ::stat FileInfo;
constexpr inline bool IsDirSeparator(char c) { return c == '/'; }
#endif
inline constexpr std::string_view kLockSuffix = ".__lock";
using UniqueFileDescriptor = UniqueHandle<FileDescriptor, FileDescriptorTraits>;
Result<UniqueFileDescriptor> OpenExistingFileForReading(
const std::string& path);
Result<UniqueFileDescriptor> OpenFileForWriting(const std::string& path);
Result<ptrdiff_t> ReadFromFile(FileDescriptor fd, void* buf, size_t count,
int64_t offset);
Result<ptrdiff_t> WriteToFile(FileDescriptor fd, const void* buf, size_t count);
Result<ptrdiff_t> WriteCordToFile(FileDescriptor fd, absl::Cord value);
absl::Status TruncateFile(FileDescriptor fd);
absl::Status RenameOpenFile(FileDescriptor fd, const std::string& old_name,
const std::string& new_name);
absl::Status DeleteOpenFile(FileDescriptor fd, const std::string& path);
absl::Status DeleteFile(const std::string& path);
absl::Status FsyncFile(FileDescriptor fd);
using UnlockFn = void (*)(FileDescriptor fd);
Result<UnlockFn> AcquireFdLock(FileDescriptor fd);
absl::Status GetFileInfo(FileDescriptor fd, FileInfo* info);
absl::Status GetFileInfo(const std::string& path, FileInfo* info);
inline bool IsRegularFile(const FileInfo& info) {
#ifdef _WIN32
return !(info.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY);
#else
return S_ISREG(info.st_mode);
#endif
}
inline bool IsDirectory(const FileInfo& info) {
#ifdef _WIN32
return (info.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY);
#else
return S_ISDIR(info.st_mode);
#endif
}
inline uint64_t GetSize(const FileInfo& info) {
#ifdef _WIN32
return (static_cast<int64_t>(info.nFileSizeHigh) << 32) +
static_cast<int64_t>(info.nFileSizeLow);
#else
return info.st_size;
#endif
}
inline auto GetDeviceId(const FileInfo& info) {
#ifdef _WIN32
return info.dwVolumeSerialNumber;
#else
return info.st_dev;
#endif
}
inline uint64_t GetFileId(const FileInfo& info) {
#ifdef _WIN32
return (static_cast<uint64_t>(info.nFileIndexHigh) << 32) |
static_cast<uint64_t>(info.nFileIndexLow);
#else
return info.st_ino;
#endif
}
inline absl::Time GetMTime(const FileInfo& info) {
#ifdef _WIN32
uint64_t windowsTicks =
(static_cast<uint64_t>(info.ftLastWriteTime.dwHighDateTime) << 32) |
static_cast<uint64_t>(info.ftLastWriteTime.dwLowDateTime);
return absl::UnixEpoch() +
absl::Seconds((windowsTicks / 10000000) - 11644473600ULL) +
absl::Nanoseconds(windowsTicks % 10000000);
#else
#if defined(__APPLE__)
const struct ::timespec t = info.st_mtimespec;
#else
const struct ::timespec t = info.st_mtim;
#endif
return absl::FromTimeT(t.tv_sec) + absl::Nanoseconds(t.tv_nsec);
#endif
}
Result<UniqueFileDescriptor> OpenDirectoryDescriptor(const std::string& path);
absl::Status MakeDirectory(const std::string& path);
absl::Status FsyncDirectory(FileDescriptor fd);
#ifdef _WIN32
Result<std::string> GetWindowsTempDir();
#endif
}
}
#endif | #include "tensorstore/internal/os/file_util.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/internal/testing/scoped_directory.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::IsOk;
using ::tensorstore::IsOkAndHolds;
using ::tensorstore::StatusIs;
using ::tensorstore::internal_os::DeleteFile;
using ::tensorstore::internal_os::DeleteOpenFile;
using ::tensorstore::internal_os::FileInfo;
using ::tensorstore::internal_os::FsyncFile;
using ::tensorstore::internal_os::GetDeviceId;
using ::tensorstore::internal_os::GetFileId;
using ::tensorstore::internal_os::GetFileInfo;
using ::tensorstore::internal_os::GetMTime;
using ::tensorstore::internal_os::GetSize;
using ::tensorstore::internal_os::IsDirSeparator;
using ::tensorstore::internal_os::IsRegularFile;
using ::tensorstore::internal_os::OpenExistingFileForReading;
using ::tensorstore::internal_os::OpenFileForWriting;
using ::tensorstore::internal_os::ReadFromFile;
using ::tensorstore::internal_os::RenameOpenFile;
using ::tensorstore::internal_os::TruncateFile;
using ::tensorstore::internal_os::WriteCordToFile;
using ::tensorstore::internal_os::WriteToFile;
TEST(FileUtilTest, Basics) {
tensorstore::internal_testing::ScopedTemporaryDirectory tempdir;
std::string foo_txt = tempdir.path() + "/foo.txt";
std::string renamed_txt = tempdir.path() + "/renamed.txt";
EXPECT_TRUE(IsDirSeparator('/'));
auto now = absl::Now() - absl::Seconds(1);
{
auto f = OpenExistingFileForReading(foo_txt);
EXPECT_THAT(f, StatusIs(absl::StatusCode::kNotFound));
EXPECT_THAT(DeleteFile(foo_txt), StatusIs(absl::StatusCode::kNotFound));
}
{
auto f = OpenFileForWriting(foo_txt);
EXPECT_THAT(f, IsOk());
EXPECT_THAT(TruncateFile(f->get()), IsOk());
EXPECT_THAT(WriteCordToFile(f->get(), absl::Cord("foo")), IsOkAndHolds(3));
EXPECT_THAT(WriteToFile(f->get(), "bar", 3), IsOkAndHolds(3));
EXPECT_THAT(FsyncFile(f->get()), IsOk());
}
{
char buf[16];
auto f = OpenExistingFileForReading(foo_txt);
EXPECT_THAT(f, IsOk());
EXPECT_THAT(ReadFromFile(f->get(), buf, 3, 0), IsOkAndHolds(3));
FileInfo info;
EXPECT_THAT(GetFileInfo(f->get(), &info), IsOk());
EXPECT_TRUE(IsRegularFile(info));
EXPECT_THAT(GetSize(info), 6);
EXPECT_TRUE(IsRegularFile(info));
EXPECT_THAT(GetFileId(info), ::testing::Ne(0));
EXPECT_THAT(GetDeviceId(info), ::testing::Ne(0));
EXPECT_THAT(GetMTime(info), ::testing::Ge(now));
EXPECT_THAT(RenameOpenFile(f->get(), foo_txt, renamed_txt), IsOk());
}
{
auto f = OpenExistingFileForReading(renamed_txt);
EXPECT_THAT(f, IsOk());
EXPECT_THAT(
TruncateFile(f->get()),
::testing::AnyOf(StatusIs(absl::StatusCode::kInvalidArgument),
StatusIs(absl::StatusCode::kPermissionDenied)));
}
{
std::string bar_txt = tempdir.path() + "/bar.txt";
auto f = OpenFileForWriting(bar_txt);
EXPECT_THAT(WriteToFile(f->get(), "bar", 3), IsOkAndHolds(3));
EXPECT_THAT(DeleteOpenFile(f->get(), bar_txt), IsOk());
}
}
TEST(FileUtilTest, LockFile) {
tensorstore::internal_testing::ScopedTemporaryDirectory tempdir;
std::string foo_txt = absl::StrCat(tempdir.path(), "/foo.txt",
tensorstore::internal_os::kLockSuffix);
auto f = OpenFileForWriting(foo_txt);
EXPECT_THAT(f, IsOk());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto lock, tensorstore::internal_os::AcquireFdLock(f->get()));
lock(f->get());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/os/file_util.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/os/file_util_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
49981549-6092-4e51-8692-efceb12efc11 | cpp | google/tensorstore | file_lister | tensorstore/internal/os/file_lister.h | tensorstore/internal/os/file_lister_test.cc | #ifndef TENSORSTORE_INTERNAL_OS_FILE_LISTER_H_
#define TENSORSTORE_INTERNAL_OS_FILE_LISTER_H_
#include <stddef.h>
#include <stdint.h>
#include <string>
#include <string_view>
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
namespace tensorstore {
namespace internal_os {
class ListerEntry {
public:
struct Impl;
ListerEntry(Impl* impl) : impl_(impl) {}
bool IsDirectory();
const std::string& GetFullPath();
std::string_view GetPathComponent();
int64_t GetSize();
absl::Status Delete();
private:
Impl* impl_;
};
absl::Status RecursiveFileList(
std::string root_directory,
absl::FunctionRef<bool(std::string_view)> recurse_into,
absl::FunctionRef<absl::Status(ListerEntry)> on_item);
}
}
#endif | #include "tensorstore/internal/os/file_lister.h"
#include <optional>
#include <string>
#include <string_view>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/absl_check.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorstore/internal/os/file_util.h"
#include "tensorstore/internal/testing/scoped_directory.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::IsOk;
using ::tensorstore::IsOkAndHolds;
using ::tensorstore::internal_os::FsyncDirectory;
using ::tensorstore::internal_os::FsyncFile;
using ::tensorstore::internal_os::MakeDirectory;
using ::tensorstore::internal_os::OpenDirectoryDescriptor;
using ::tensorstore::internal_os::OpenExistingFileForReading;
using ::tensorstore::internal_os::OpenFileForWriting;
using ::tensorstore::internal_os::ReadFromFile;
using ::tensorstore::internal_os::RecursiveFileList;
using ::tensorstore::internal_os::WriteToFile;
using ::tensorstore::internal_testing::ScopedTemporaryDirectory;
static std::optional<ScopedTemporaryDirectory> g_scoped_dir;
void AddFiles(std::string_view root) {
ABSL_CHECK(!root.empty());
TENSORSTORE_CHECK_OK(MakeDirectory(absl::StrCat(root, "/xyz")));
TENSORSTORE_CHECK_OK(MakeDirectory(absl::StrCat(root, "/zzq")));
std::string fname = "/a.txt";
for (; fname[1] < 'd'; fname[1] += 1) {
TENSORSTORE_CHECK_OK_AND_ASSIGN(
auto f, OpenFileForWriting(absl::StrCat(root, fname)));
TENSORSTORE_CHECK_OK(FsyncFile(f.get()));
TENSORSTORE_CHECK_OK_AND_ASSIGN(
auto g, OpenFileForWriting(absl::StrCat(root, "/xyz", fname)));
TENSORSTORE_CHECK_OK(FsyncFile(g.get()));
}
for (const auto& suffix : {"/xyz", ""}) {
TENSORSTORE_CHECK_OK_AND_ASSIGN(
auto f, OpenDirectoryDescriptor(absl::StrCat(root, suffix)));
EXPECT_THAT(FsyncDirectory(f.get()), IsOk());
}
}
class RecursiveFileListTest : public testing::Test {
public:
static void SetUpTestSuite() {
g_scoped_dir.emplace();
AddFiles(g_scoped_dir->path());
}
static void TearDownTestSuite() { g_scoped_dir = std::nullopt; }
RecursiveFileListTest() : cwd_(g_scoped_dir->path()) {}
private:
tensorstore::internal_testing::ScopedCurrentWorkingDirectory cwd_;
};
TEST_F(RecursiveFileListTest, MissingIsOk) {
EXPECT_THAT(RecursiveFileList(
g_scoped_dir->path() + "/aax",
[](std::string_view path) { return true; },
[](auto entry) { return absl::OkStatus(); }),
IsOk());
}
TEST_F(RecursiveFileListTest, EmptyIsOk) {
EXPECT_THAT(RecursiveFileList(
g_scoped_dir->path() + "/zzq",
[](std::string_view path) { return true; },
[](auto entry) { return absl::OkStatus(); }),
IsOk());
}
TEST_F(RecursiveFileListTest, FileIsFailure) {
EXPECT_THAT(RecursiveFileList(
g_scoped_dir->path() + "/a.txt",
[](std::string_view path) { return true; },
[](auto entry) { return absl::OkStatus(); }),
::testing::Not(IsOk()));
}
TEST_F(RecursiveFileListTest, FullDirectory) {
for (const std::string& root :
{g_scoped_dir->path(), std::string("."), std::string()}) {
std::vector<std::string> files;
EXPECT_THAT(
RecursiveFileList(
root, [](std::string_view path) { return true; },
[&](auto entry) {
files.push_back(absl::StrCat(entry.IsDirectory() ? "<dir>" : "",
entry.GetPathComponent()));
return absl::OkStatus();
}),
IsOk());
EXPECT_THAT(files, ::testing::UnorderedElementsAre(
"c.txt", "b.txt", "a.txt", "<dir>zzq", "c.txt",
"b.txt", "a.txt", "<dir>xyz", "<dir>"));
}
}
TEST_F(RecursiveFileListTest, SubDirectory) {
std::vector<std::string> files;
EXPECT_THAT(
RecursiveFileList(
"xyz", [](std::string_view path) { return true; },
[&](auto entry) {
files.push_back(absl::StrCat(entry.IsDirectory() ? "<dir>" : "",
entry.GetFullPath()));
return absl::OkStatus();
}),
IsOk());
EXPECT_THAT(files, ::testing::UnorderedElementsAre("xyz/a.txt", "xyz/b.txt",
"xyz/c.txt", "<dir>xyz"));
}
TEST_F(RecursiveFileListTest, NonRecursive) {
std::vector<std::string> files;
EXPECT_THAT(
RecursiveFileList(
"",
[](std::string_view path) {
ABSL_LOG(INFO) << path;
return path.empty();
},
[&](auto entry) {
files.push_back(absl::StrCat(entry.IsDirectory() ? "<dir>" : "",
entry.GetFullPath()));
return absl::OkStatus();
}),
IsOk());
EXPECT_THAT(files,
::testing::UnorderedElementsAre("c.txt", "b.txt", "a.txt",
"<dir>zzq", "<dir>xyz", "<dir>"));
}
TEST(RecursiveFileListEntryTest, DeleteWithOpenFile) {
ScopedTemporaryDirectory tmpdir;
AddFiles(tmpdir.path());
{
auto f = OpenFileForWriting(absl::StrCat(tmpdir.path(), "/read.txt"));
EXPECT_THAT(f, IsOk());
EXPECT_THAT(WriteToFile(f->get(), "bar", 3), IsOkAndHolds(3));
}
{
auto f =
OpenExistingFileForReading(absl::StrCat(tmpdir.path(), "/read.txt"));
EXPECT_THAT(f, IsOk());
std::vector<std::string> files;
EXPECT_THAT(RecursiveFileList(
tmpdir.path(),
[](std::string_view path) { return true; },
[&](auto entry) {
if (entry.GetFullPath() == tmpdir.path()) {
return absl::OkStatus();
}
auto status = entry.Delete();
if (status.ok() || absl::IsNotFound(status))
return absl::OkStatus();
return status;
}),
IsOk());
char buf[16];
EXPECT_THAT(ReadFromFile(f->get(), buf, 3, 0), IsOkAndHolds(3));
}
std::vector<std::string> files;
EXPECT_THAT(
RecursiveFileList(
tmpdir.path(),
[](std::string_view path) { return true; },
[&](auto entry) {
files.push_back(absl::StrCat(entry.IsDirectory() ? "<dir>" : "",
entry.GetPathComponent()));
return absl::OkStatus();
}),
IsOk());
EXPECT_THAT(files, ::testing::UnorderedElementsAre("<dir>"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/os/file_lister.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/os/file_lister_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
2c1d0483-3721-4fbd-81ae-17a6f8237a77 | cpp | google/tensorstore | subprocess | tensorstore/internal/os/subprocess.h | tensorstore/internal/os/subprocess_test.cc | #ifndef TENSORSTORE_INTERNAL_SUBPROCESS_H_
#define TENSORSTORE_INTERNAL_SUBPROCESS_H_
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal {
class Subprocess;
struct SubprocessOptions {
std::string executable;
std::vector<std::string> args;
std::optional<absl::flat_hash_map<std::string, std::string>> env;
struct Inherit {};
struct Ignore {};
struct Redirect {
std::string filename;
};
std::variant<Ignore, Redirect> stdin_action = Ignore{};
std::variant<Inherit, Ignore, Redirect> stdout_action = Inherit{};
std::variant<Inherit, Ignore, Redirect> stderr_action = Inherit{};
};
Result<Subprocess> SpawnSubprocess(const SubprocessOptions& options);
class Subprocess {
public:
Subprocess(const Subprocess&) = default;
Subprocess& operator=(const Subprocess&) = default;
Subprocess(Subprocess&&) = default;
Subprocess& operator=(Subprocess&&) = default;
~Subprocess();
absl::Status Kill(int signal = 9) const;
Result<int> Join(bool block = true) const;
private:
friend Result<Subprocess> SpawnSubprocess(const SubprocessOptions& options);
struct Impl;
Subprocess(std::shared_ptr<Subprocess::Impl> impl) : impl_(std::move(impl)) {}
std::shared_ptr<Subprocess::Impl> impl_;
};
}
}
#endif | #include "tensorstore/internal/os/subprocess.h"
#include <cstdio>
#include <string>
#include <string_view>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "riegeli/bytes/fd_reader.h"
#include "riegeli/bytes/read_all.h"
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/internal/testing/scoped_directory.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::internal::JoinPath;
using ::tensorstore::internal::SpawnSubprocess;
using ::tensorstore::internal::SubprocessOptions;
static std::string* program_name = nullptr;
const char kSubprocessArg[] = "--is_subprocess";
const char kSleepArg[] = "--sleep";
TEST(SubprocessTest, Join) {
SubprocessOptions opts;
opts.executable = *program_name;
opts.args = {kSubprocessArg};
auto child = SpawnSubprocess(opts);
TENSORSTORE_ASSERT_OK(child.status());
int exit_code;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(exit_code, child->Join());
EXPECT_EQ(exit_code, 33);
}
TEST(SubprocessTest, Kill) {
SubprocessOptions opts;
opts.executable = *program_name;
opts.args = {kSleepArg, kSubprocessArg};
auto child = SpawnSubprocess(opts);
TENSORSTORE_ASSERT_OK(child.status());
EXPECT_THAT(child->Join(false),
tensorstore::MatchesStatus(absl::StatusCode::kUnavailable));
child->Kill().IgnoreError();
int exit_code;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(exit_code, child->Join());
EXPECT_NE(exit_code, 33);
}
TEST(SubprocessTest, DontInherit) {
SubprocessOptions opts;
opts.executable = *program_name;
opts.args = {kSubprocessArg};
opts.stdout_action = SubprocessOptions::Ignore();
opts.stderr_action = SubprocessOptions::Ignore();
auto child = SpawnSubprocess(opts);
TENSORSTORE_ASSERT_OK(child.status());
int exit_code;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(exit_code, child->Join());
EXPECT_EQ(exit_code, 33);
}
TEST(SubprocessTest, Redirects) {
::tensorstore::internal_testing::ScopedTemporaryDirectory temp_dir;
std::string out_file = JoinPath(temp_dir.path(), "stdout");
SubprocessOptions opts;
opts.executable = *program_name;
opts.args = {kSubprocessArg};
opts.env.emplace(::tensorstore::internal::GetEnvironmentMap());
opts.env->insert_or_assign("SUBPROCESS_TEST_ENV", "1");
opts.stdout_action = SubprocessOptions::Redirect{out_file};
opts.stderr_action = SubprocessOptions::Redirect{out_file};
auto child = SpawnSubprocess(opts);
TENSORSTORE_ASSERT_OK(child.status());
int exit_code;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(exit_code, child->Join());
EXPECT_EQ(exit_code, 33);
std::string filedata;
TENSORSTORE_CHECK_OK(riegeli::ReadAll(riegeli::FdReader(out_file), filedata));
EXPECT_THAT(filedata, ::testing::HasSubstr("PASS"));
}
TEST(SubprocessTest, Drop) {
SubprocessOptions opts;
opts.executable = *program_name;
opts.args = {kSubprocessArg};
auto child = SpawnSubprocess(opts);
TENSORSTORE_ASSERT_OK(child.status());
child->Kill().IgnoreError();
}
TEST(SubprocessTest, Env) {
SubprocessOptions opts;
opts.executable = *program_name;
opts.args = {"--env=SUBPROCESS_TEST_ENV"};
opts.env = absl::flat_hash_map<std::string, std::string>({
#ifdef _WIN32
{"PATH", ::tensorstore::internal::GetEnv("PATH").value_or("")},
#endif
{"SUBPROCESS_TEST_ENV", "1"}});
auto child = SpawnSubprocess(opts);
ASSERT_TRUE(child.ok());
int exit_code;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(exit_code, child->Join());
EXPECT_EQ(exit_code, 41);
}
}
int main(int argc, char* argv[]) {
program_name = new std::string(argv[0]);
ABSL_LOG(INFO) << *program_name;
for (int i = 1; i < argc; i++) {
std::string_view argv_i(argv[i]);
if (argv_i == kSubprocessArg) {
printf("PASS\n");
return 33;
}
if (argv_i == kSleepArg) {
absl::SleepFor(absl::Seconds(1));
}
if (absl::StartsWith(argv_i, "--env=")) {
auto env_str = argv_i.substr(6);
if (env_str.empty()) {
return 40;
}
if (tensorstore::internal::GetEnv(env_str.data()).has_value()) {
return 41;
}
return 42;
}
}
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/os/subprocess.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/os/subprocess_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
f7259880-9f62-41fd-9467-e1f62aa32ba3 | cpp | google/tensorstore | estimate_heap_usage | tensorstore/internal/estimate_heap_usage/estimate_heap_usage.h | tensorstore/internal/estimate_heap_usage/estimate_heap_usage_test.cc | #ifndef TENSORSTORE_INTERNAL_ESTIMATE_HEAP_USAGE_ESTIMATE_HEAP_USAGE_H_
#define TENSORSTORE_INTERNAL_ESTIMATE_HEAP_USAGE_ESTIMATE_HEAP_USAGE_H_
#include <stddef.h>
#include <memory>
#include <string>
#include <type_traits>
#include "absl/strings/cord.h"
#include "tensorstore/util/apply_members/apply_members.h"
namespace tensorstore {
namespace internal {
template <typename T, typename SFINAE = void>
struct HeapUsageEstimator;
template <typename T, typename SFINAE = void>
constexpr inline bool MayUseHeapMemory = true;
template <typename T>
constexpr inline bool MayUseHeapMemory<
T, std::enable_if_t<
!std::is_trivially_destructible_v<T>,
std::void_t<decltype(&HeapUsageEstimator<T>::MayUseHeapMemory)>>> =
HeapUsageEstimator<T>::MayUseHeapMemory();
template <typename T>
constexpr inline bool
MayUseHeapMemory<T, std::enable_if_t<std::is_trivially_destructible_v<T>>> =
false;
template <typename T>
size_t EstimateHeapUsage(const T& x, size_t max_depth = -1) {
if constexpr (!MayUseHeapMemory<T>) {
return 0;
} else {
return HeapUsageEstimator<T>::EstimateHeapUsage(x, max_depth);
}
}
struct MayAnyUseHeapMemory {
template <typename... T>
constexpr auto operator()(const T&... arg) const {
return std::integral_constant<bool, (MayUseHeapMemory<T> || ...)>{};
}
};
template <typename T>
struct HeapUsageEstimator<T, std::enable_if_t<SupportsApplyMembers<T>>> {
static size_t EstimateHeapUsage(const T& v, size_t max_depth) {
return ApplyMembers<T>::Apply(v, [&](auto&&... x) {
return (internal::EstimateHeapUsage(x, max_depth) + ... +
static_cast<size_t>(0));
});
}
static constexpr bool MayUseHeapMemory() {
return decltype(ApplyMembers<T>::Apply(std::declval<const T&>(),
MayAnyUseHeapMemory{}))::value;
}
};
template <>
struct HeapUsageEstimator<std::string> {
static size_t EstimateHeapUsage(const std::string& x, size_t max_depth) {
return x.capacity();
}
};
template <>
struct HeapUsageEstimator<absl::Cord> {
static size_t EstimateHeapUsage(const absl::Cord& x, size_t max_depth) {
return x.size();
}
};
template <typename T>
struct PointerHeapUsageEstimator {
static size_t EstimateHeapUsage(const T& x, size_t max_depth) {
if (!x) return 0;
size_t total = sizeof(*x);
if (max_depth > 0) {
total += internal::EstimateHeapUsage(*x);
}
return total;
}
};
template <typename T>
struct HeapUsageEstimator<std::shared_ptr<T>>
: public PointerHeapUsageEstimator<std::shared_ptr<T>> {};
template <typename T>
struct HeapUsageEstimator<std::unique_ptr<T>>
: public PointerHeapUsageEstimator<std::unique_ptr<T>> {};
template <typename T, typename R>
class IntrusivePtr;
template <typename T, typename R>
struct HeapUsageEstimator<IntrusivePtr<T, R>>
: public PointerHeapUsageEstimator<IntrusivePtr<T, R>> {};
}
}
#endif | #include "tensorstore/internal/estimate_heap_usage/estimate_heap_usage.h"
#include <optional>
#include <tuple>
#include <variant>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include "tensorstore/internal/estimate_heap_usage/std_optional.h"
#include "tensorstore/internal/estimate_heap_usage/std_variant.h"
#include "tensorstore/internal/estimate_heap_usage/std_vector.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/util/apply_members/std_tuple.h"
namespace {
using ::tensorstore::internal::AtomicReferenceCount;
using ::tensorstore::internal::EstimateHeapUsage;
using ::tensorstore::internal::IntrusivePtr;
TEST(EstimateHeapUsageTest, Trivial) {
EXPECT_EQ(0, EstimateHeapUsage(5));
struct Trivial {};
EXPECT_EQ(0, EstimateHeapUsage(Trivial{}));
}
TEST(EstimateHeapUsageTest, String) {
std::string s(1000, 'x');
EXPECT_EQ(s.capacity(), EstimateHeapUsage(s));
}
TEST(EstimateHeapUsageTest, Cord) {
auto cord = absl::Cord(std::string(1000, 'x'));
EXPECT_EQ(cord.size(), EstimateHeapUsage(cord));
}
TEST(EstimateHeapUsageTest, Optional) {
EXPECT_EQ(0, EstimateHeapUsage(std::optional<int>()));
EXPECT_EQ(0, EstimateHeapUsage(std::optional<int>(42)));
EXPECT_EQ(0, EstimateHeapUsage(std::optional<std::string>()));
auto o = std::optional<std::string>(std::in_place, 1000, 'x');
EXPECT_EQ(o->capacity(), EstimateHeapUsage(o));
}
TEST(EstimateHeapUsageTest, UniquePtr) {
std::unique_ptr<int> ptr;
EXPECT_EQ(0, EstimateHeapUsage(ptr));
ptr.reset(new int);
EXPECT_EQ(sizeof(int), EstimateHeapUsage(ptr));
}
TEST(EstimateHeapUsageTest, SharedPtr) {
std::shared_ptr<int> ptr;
EXPECT_EQ(0, EstimateHeapUsage(ptr));
ptr.reset(new int);
EXPECT_EQ(sizeof(int), EstimateHeapUsage(ptr));
}
struct Foo : public AtomicReferenceCount<Foo> {
int x;
constexpr static auto ApplyMembers = [](auto& x, auto f) { return f(x.x); };
};
TEST(EstimateHeapUsageTest, IntrusivePtr) {
IntrusivePtr<Foo> ptr;
EXPECT_EQ(0, EstimateHeapUsage(ptr));
ptr.reset(new Foo);
EXPECT_EQ(sizeof(Foo), EstimateHeapUsage(ptr));
}
TEST(EstimateHeapUsageTest, Vector) {
std::vector<std::string> v;
v.push_back(std::string(1000, 'x'));
v.push_back(std::string(5000, 'x'));
size_t expected =
v[0].capacity() + v[1].capacity() + v.capacity() * sizeof(std::string);
EXPECT_EQ(expected, EstimateHeapUsage(v));
EXPECT_EQ(v.capacity() * sizeof(std::string), EstimateHeapUsage(v, 0));
}
TEST(EstimateHeapUsageTest, Composite) {
std::variant<std::vector<std::string>, std::vector<int>> v;
v = std::vector<std::string>({"a", "b"});
{
auto& string_vec = std::get<std::vector<std::string>>(v);
EXPECT_EQ(string_vec.capacity() * sizeof(std::string) +
string_vec[0].capacity() + string_vec[1].capacity(),
EstimateHeapUsage(v));
EXPECT_EQ(string_vec.capacity() * sizeof(std::string),
EstimateHeapUsage(v, 0));
}
v = std::vector<int>({1, 2, 3});
{
auto& int_vec = std::get<std::vector<int>>(v);
EXPECT_EQ(int_vec.capacity() * sizeof(int), EstimateHeapUsage(v));
}
}
TEST(EstimateHeapUsageTest, Tuple) {
auto t = std::tuple{std::string(1000, 'x'), std::string(5000, 'x')};
auto& [s0, s1] = t;
EXPECT_EQ(s0.capacity() + s1.capacity(), EstimateHeapUsage(t));
}
TEST(EstimateHeapUsageTest, Variant) {
using Variant = std::variant<int, std::string>;
EXPECT_EQ(0, EstimateHeapUsage(Variant(5)));
std::string s(1000, 'x');
size_t capacity = s.capacity();
EXPECT_EQ(capacity, EstimateHeapUsage(Variant(std::move(s))));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/estimate_heap_usage/estimate_heap_usage.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/estimate_heap_usage/estimate_heap_usage_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
3c74241c-37bf-4b8c-8147-6df223966067 | cpp | google/tensorstore | stringify | tensorstore/internal/preprocessor/stringify.h | tensorstore/internal/preprocessor/stringify_test.cc | #ifndef TENSORSTORE_INTERNAL_PREPROCESSOR_STRINGIFY_H_
#define TENSORSTORE_INTERNAL_PREPROCESSOR_STRINGIFY_H_
#define TENSORSTORE_PP_STRINGIFY(...) TENSORSTORE_PP_STRINGIFY_IMPL(__VA_ARGS__)
#define TENSORSTORE_PP_STRINGIFY_IMPL(...) #__VA_ARGS__
#endif | #include "tensorstore/internal/preprocessor/stringify.h"
#include <string_view>
namespace {
inline constexpr bool Equal(std::string_view a, std::string_view b) {
return a == b;
}
#define X abc
#define Y abc, def
static_assert(Equal(TENSORSTORE_PP_STRINGIFY(X), "abc"));
static_assert(Equal(TENSORSTORE_PP_STRINGIFY(Y), "abc, def"));
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/preprocessor/stringify.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/preprocessor/stringify_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
e2240d47-0a71-4308-980c-dd7f9b170555 | cpp | google/tensorstore | cat | tensorstore/internal/preprocessor/cat.h | tensorstore/internal/preprocessor/cat_test.cc | #ifndef TENSORSTORE_INTERNAL_PREPROCESSOR_CAT_H_
#define TENSORSTORE_INTERNAL_PREPROCESSOR_CAT_H_
#define TENSORSTORE_PP_CAT(a, b) TENSORSTORE_INTERNAL_PP_CAT1(a, b)
#define TENSORSTORE_INTERNAL_PP_CAT1(a, b) a##b
#endif | #include "tensorstore/internal/preprocessor/cat.h"
#include <string_view>
#include "tensorstore/internal/preprocessor/stringify.h"
namespace {
inline constexpr bool Equal(std::string_view a, std::string_view b) {
return a == b;
}
#define X abc
#define Y def
static_assert(Equal(TENSORSTORE_PP_STRINGIFY(TENSORSTORE_PP_CAT(X, Y)),
"abcdef"));
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/preprocessor/cat.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/preprocessor/cat_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
babaf6da-6250-4244-8550-c1c937e9a5b0 | cpp | google/tensorstore | poly | tensorstore/internal/poly/poly.h | tensorstore/internal/poly/poly_test.cc | #ifndef TENSORSTORE_INTERNAL_POLY_POLY_H_
#define TENSORSTORE_INTERNAL_POLY_POLY_H_
#include <cstddef>
#include <type_traits>
#include <typeinfo>
#include <utility>
#include "absl/meta/type_traits.h"
#include "tensorstore/internal/poly/poly_impl.h"
namespace tensorstore {
namespace poly {
template <typename T, typename... Signature>
using SupportsPolySignatures =
std::conjunction<typename internal_poly::SignatureTraits<
Signature>::template IsSupportedBy<T>...>;
template <size_t InlineSize, bool Copyable, typename... Signature>
class Poly;
template <typename T>
struct IsPoly : public std::false_type {};
template <size_t InlineSize, bool Copyable, typename... Signature>
struct IsPoly<Poly<InlineSize, Copyable, Signature...>>
: public std::true_type {};
template <typename T, bool Copyable, typename... Signature>
struct IsCompatibleWithPoly : public SupportsPolySignatures<T, Signature...> {};
template <typename T, typename... Signature>
struct IsCompatibleWithPoly<T, true, Signature...>
: public std::integral_constant<
bool, (std::is_copy_constructible<T>::value &&
SupportsPolySignatures<T, Signature...>::value)> {};
template <size_t InlineSize_, bool Copyable, typename... Signature>
class Poly
: private internal_poly::PolyImpl<Poly<InlineSize_, Copyable, Signature...>,
Signature...> {
template <typename, typename...>
friend class internal_poly::PolyImpl;
template <size_t, bool, typename...>
friend class Poly;
static constexpr size_t InlineSize =
internal_poly_storage::ActualInlineSize(InlineSize_);
using Storage = internal_poly_storage::Storage<InlineSize, Copyable>;
using Base = internal_poly::PolyImpl<Poly, Signature...>;
using VTable = internal_poly::VTableType<Signature...>;
template <typename Self>
using VTInstance =
internal_poly::VTableInstance<typename Storage::template Ops<Self>,
Copyable, Signature...>;
template <typename... S>
using HasConvertibleVTable =
std::is_convertible<internal_poly::VTableType<S...>, VTable>;
public:
template <typename T>
using IsCompatible =
std::disjunction<std::is_same<Poly, T>,
IsCompatibleWithPoly<T, Copyable, Signature...>>;
template <typename T>
using IsCompatibleAndConstructible =
std::disjunction<
std::is_same<Poly, absl::remove_cvref_t<T>>,
std::conjunction<
IsCompatibleWithPoly<absl::remove_cvref_t<T>, Copyable,
Signature...>,
std::is_constructible<absl::remove_cvref_t<T>, T&&>>>;
Poly() = default;
Poly(std::nullptr_t) noexcept {}
template <typename T,
std::enable_if_t<IsCompatibleAndConstructible<T>::value>* = nullptr>
Poly(T&& obj) {
Construct(std::in_place_type_t<absl::remove_cvref_t<T>>{},
std::forward<T>(obj));
}
template <typename T, typename... U,
std::enable_if_t<(IsCompatible<T>::value &&
std::is_constructible_v<T, U&&...>)>* = nullptr>
Poly(std::in_place_type_t<T> in_place, U&&... arg) {
Construct(in_place, std::forward<U>(arg)...);
}
Poly(const Poly&) = default;
Poly(Poly&&) = default;
Poly& operator=(const Poly&) = default;
Poly& operator=(Poly&&) noexcept = default;
template <typename T,
std::enable_if_t<IsCompatibleAndConstructible<T>::value>* = nullptr>
Poly& operator=(T&& obj) {
emplace(std::forward<T>(obj));
return *this;
}
Poly& operator=(std::nullptr_t) noexcept {
storage_.Destroy();
return *this;
}
template <typename T, typename... U,
std::enable_if_t<(IsCompatible<T>::value &&
std::is_constructible_v<T, U&&...>)>* = nullptr>
void emplace(U&&... arg) {
storage_.Destroy();
Construct(std::in_place_type_t<T>{}, std::forward<U>(arg)...);
}
template <typename T,
std::enable_if_t<IsCompatibleAndConstructible<T>::value>* = nullptr>
void emplace(T&& obj) {
storage_.Destroy();
Construct(std::in_place_type_t<absl::remove_cvref_t<T>>{},
std::forward<T>(obj));
}
using Base::operator();
explicit operator bool() const { return !storage_.null(); }
template <typename T>
T* target() {
return storage_.template get_if<T>();
}
template <typename T>
const T* target() const {
return storage_.template get_if<T>();
}
friend bool operator==(std::nullptr_t, const Poly& poly) {
return static_cast<bool>(poly) == false;
}
friend bool operator!=(std::nullptr_t, const Poly& poly) {
return static_cast<bool>(poly) == true;
}
friend bool operator==(const Poly& poly, std::nullptr_t) {
return static_cast<bool>(poly) == false;
}
friend bool operator!=(const Poly& poly, std::nullptr_t) {
return static_cast<bool>(poly) == true;
}
private:
template <typename T, typename... U>
std::enable_if_t<!IsPoly<T>::value> Construct(std::in_place_type_t<T>,
U&&... arg) {
return storage_.template ConstructT<T>(&VTInstance<T>::vtable,
static_cast<U&&>(arg)...);
}
template <size_t ISize, bool C, typename... S, typename T>
void Construct(std::in_place_type_t<Poly<ISize, C, S...>>, T&& poly) {
if constexpr (internal_poly_storage::ActualInlineSize(ISize) <=
InlineSize &&
HasConvertibleVTable<S...>::value) {
if constexpr (std::is_lvalue_reference_v<decltype(poly)>) {
storage_.CopyConstruct(std::forward<T>(poly).storage_);
} else {
storage_.Construct(std::forward<T>(poly).storage_);
}
} else {
storage_.template ConstructT<Poly<ISize, C, S...>>(
&VTInstance<Poly<ISize, C, S...>>::vtable, std::forward<T>(poly));
}
}
Storage storage_;
};
}
}
#endif | #include "tensorstore/internal/poly/poly.h"
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <type_traits>
#include <utility>
#include <gtest/gtest.h>
#include "absl/functional/function_ref.h"
#include "tensorstore/util/result.h"
namespace {
using ::tensorstore::internal_poly::CallPolyApplyResult;
using ::tensorstore::internal_poly::HasPolyApply;
using ::tensorstore::internal_poly::IsCallPolyApplyResultConvertible;
using ::tensorstore::poly::Poly;
struct GetWidth {};
struct GetHeight {};
struct Scale {};
using PolyRectangle = Poly<sizeof(double), true, double(GetWidth) const,
double(GetHeight) const, void(Scale, double scalar)>;
struct Rectangle {
double width;
double height;
double operator()(GetWidth) const { return width; }
double operator()(GetHeight) const { return height; }
void operator()(Scale, double scalar) {
width *= scalar;
height *= scalar;
}
};
struct Square {
double size;
double operator()(GetWidth) const { return size; }
double operator()(GetHeight) const { return size; }
};
void PolyApply(Square& self, Scale, double scalar) { self.size *= scalar; }
template <typename T, typename P>
bool IsStoredInline(P& p) {
auto min = reinterpret_cast<uintptr_t>(&p);
auto t = reinterpret_cast<uintptr_t>(p.template target<T>());
return t >= min && t <= (min + sizeof(p));
}
TEST(PolyTest, Example) {
PolyRectangle square = Square{5};
EXPECT_EQ(5, square(GetWidth{}));
EXPECT_EQ(5, square(GetHeight{}));
square(Scale{}, 2);
EXPECT_EQ(10, square(GetWidth{}));
EXPECT_EQ(10, square(GetHeight{}));
PolyRectangle rect = Rectangle{6, 7};
EXPECT_EQ(6, rect(GetWidth{}));
EXPECT_EQ(7, rect(GetHeight{}));
rect(Scale{}, 2);
EXPECT_EQ(12, rect(GetWidth{}));
EXPECT_EQ(14, rect(GetHeight{}));
}
TEST(PolyTest, Interface) {
class RectangleInterface {
public:
RectangleInterface(PolyRectangle poly) : poly(std::move(poly)) {}
operator PolyRectangle() { return poly; }
double GetHeight() const { return poly(::GetHeight{}); }
double GetWidth() const { return poly(::GetWidth{}); }
double GetArea() const { return GetHeight() * GetWidth(); }
void Scale(double scalar) { poly(::Scale{}, scalar); }
private:
PolyRectangle poly;
};
{
RectangleInterface rect(Square{5});
EXPECT_EQ(5, rect.GetWidth());
EXPECT_EQ(5, rect.GetHeight());
EXPECT_EQ(25, rect.GetArea());
rect.Scale(2);
EXPECT_EQ(10, rect.GetWidth());
EXPECT_EQ(10, rect.GetHeight());
}
{
RectangleInterface rect(Rectangle{6, 7});
EXPECT_EQ(6, rect.GetWidth());
EXPECT_EQ(7, rect.GetHeight());
EXPECT_EQ(42, rect.GetArea());
rect.Scale(2);
EXPECT_EQ(12, rect.GetWidth());
EXPECT_EQ(14, rect.GetHeight());
}
}
std::string Foo(Poly<0, true, std::string()> poly) { return "Text: " + poly(); }
int Foo(Poly<0, true, int()> poly) { return 3 + poly(); }
TEST(PolyTest, ConstructorOverloadResolution) {
EXPECT_EQ(6, Foo([] { return 3; }));
EXPECT_EQ("Text: Message", Foo([] { return "Message"; }));
}
struct Add {
std::shared_ptr<int> value;
Add(std::shared_ptr<int> value) : value(value) {}
template <typename T>
T operator()(T x) const {
return x + *value;
}
};
TEST(PolyTest, DefaultConstruct) {
Poly<1, true, int(int)&, float(float)&> poly;
EXPECT_FALSE(poly);
EXPECT_EQ(nullptr, poly.target<Add>());
const auto& const_poly = poly;
EXPECT_EQ(nullptr, const_poly.target<Add>());
}
TEST(PolyTest, NullptrConstruct) {
Poly<1, true, int(int)&, float(float)&> poly(nullptr);
EXPECT_FALSE(poly);
}
TEST(PolyTest, NullCopy) {
Poly<1, true, int(int)&, float(float)&> poly;
EXPECT_FALSE(poly);
auto poly2 = poly;
EXPECT_FALSE(poly2);
}
TEST(PolyTest, InlineConstruct) {
auto amount = std::make_shared<int>(1);
{
Poly<sizeof(Add), true, int(int)&, double(double)&> poly(Add{amount});
EXPECT_EQ(2, amount.use_count());
EXPECT_TRUE(poly);
EXPECT_TRUE(IsStoredInline<Add>(poly));
auto* contained_obj = poly.target<Add>();
ASSERT_NE(nullptr, contained_obj);
EXPECT_EQ(amount, contained_obj->value);
EXPECT_EQ(3, poly(2));
EXPECT_EQ(3.5, poly(2.5));
}
EXPECT_EQ(1, amount.use_count());
}
TEST(PolyTest, ConstructInplace) {
auto amount = std::make_shared<int>(1);
{
Poly<sizeof(Add), true, int(int)&, double(double)&> poly(
std::in_place_type_t<Add>{}, amount);
EXPECT_EQ(2, amount.use_count());
EXPECT_TRUE(poly);
EXPECT_EQ(3, poly(2));
EXPECT_EQ(3.5, poly(2.5));
}
EXPECT_EQ(1, amount.use_count());
}
TEST(PolyTest, Emplace) {
auto amount = std::make_shared<int>(1);
Poly<sizeof(Add), true, int(int)&, double(double)&> poly;
poly.emplace(Add{amount});
EXPECT_TRUE(poly);
EXPECT_EQ(2, amount.use_count());
EXPECT_EQ(3, poly(2));
EXPECT_EQ(3.5, poly(2.5));
auto amount2 = std::make_shared<int>(2);
poly.emplace(Add{amount2});
EXPECT_TRUE(poly);
EXPECT_EQ(1, amount.use_count());
EXPECT_EQ(2, amount2.use_count());
EXPECT_EQ(4, poly(2));
EXPECT_EQ(4.5, poly(2.5));
}
TEST(PolyTest, EmplaceInplace) {
auto amount = std::make_shared<int>(1);
Poly<sizeof(Add), true, int(int)&, double(double)&> poly;
poly.emplace<Add>(amount);
EXPECT_TRUE(poly);
EXPECT_EQ(2, amount.use_count());
EXPECT_EQ(3, poly(2));
EXPECT_EQ(3.5, poly(2.5));
auto amount2 = std::make_shared<int>(2);
poly.emplace<Add>(amount2);
EXPECT_TRUE(poly);
EXPECT_EQ(1, amount.use_count());
EXPECT_EQ(2, amount2.use_count());
EXPECT_EQ(4, poly(2));
EXPECT_EQ(4.5, poly(2.5));
}
TEST(PolyTest, AssignNullptr) {
auto amount = std::make_shared<int>(1);
Poly<sizeof(Add), true, int(int)&, double(double)&> poly(Add{amount});
EXPECT_EQ(2, amount.use_count());
EXPECT_TRUE(poly);
poly = nullptr;
EXPECT_EQ(1, amount.use_count());
EXPECT_FALSE(poly);
}
TEST(PolyTest, AssignObject) {
auto amount = std::make_shared<int>(1);
Poly<sizeof(Add), true, int(int)&, double(double)&> poly(Add{amount});
EXPECT_TRUE(poly);
EXPECT_EQ(2, amount.use_count());
EXPECT_EQ(3, poly(2));
EXPECT_EQ(3.5, poly(2.5));
auto amount2 = std::make_shared<int>(2);
poly = Add{amount2};
EXPECT_TRUE(poly);
EXPECT_EQ(1, amount.use_count());
EXPECT_EQ(2, amount2.use_count());
EXPECT_EQ(4, poly(2));
EXPECT_EQ(4.5, poly(2.5));
}
TEST(PolyTest, CopyAssign) {
auto amount = std::make_shared<int>(1);
Poly<sizeof(Add), true, int(int)&, double(double)&> poly(Add{amount});
EXPECT_TRUE(poly);
EXPECT_EQ(2, amount.use_count());
auto amount2 = std::make_shared<int>(2);
Poly<sizeof(Add), true, int(int)&, double(double)&> poly2(Add{amount2});
EXPECT_TRUE(poly2);
EXPECT_EQ(2, amount2.use_count());
poly2 =
static_cast<const Poly<sizeof(Add), true, int(int)&, double(double)&>&>(
poly);
EXPECT_EQ(1, amount2.use_count());
EXPECT_EQ(3, amount.use_count());
EXPECT_EQ(3, poly(2));
EXPECT_EQ(3.5, poly(2.5));
}
TEST(PolyTest, InlineMove) {
auto amount = std::make_shared<int>(1);
{
Poly<sizeof(Add), true, int(int)&, double(double)&> poly(Add{amount});
EXPECT_TRUE(poly);
EXPECT_EQ(2, amount.use_count());
auto poly2 = std::move(poly);
EXPECT_TRUE(poly2);
EXPECT_FALSE(poly);
EXPECT_EQ(2, amount.use_count());
EXPECT_EQ(3, poly2(2));
EXPECT_EQ(3.5, poly2(2.5));
}
EXPECT_EQ(1, amount.use_count());
}
TEST(PolyTest, InlineCopy) {
auto amount = std::make_shared<int>(1);
{
Poly<sizeof(Add), true, int(int)&, double(double)&> poly(Add{amount});
EXPECT_TRUE(poly);
EXPECT_EQ(2, amount.use_count());
EXPECT_TRUE(IsStoredInline<Add>(poly));
auto poly2 = poly;
EXPECT_TRUE(poly2);
EXPECT_TRUE(IsStoredInline<Add>(poly2));
EXPECT_TRUE(poly);
EXPECT_EQ(3, amount.use_count());
EXPECT_EQ(3, poly2(2));
EXPECT_EQ(3.5, poly2(2.5));
}
EXPECT_EQ(1, amount.use_count());
}
TEST(PolyTest, HeapConstruct) {
auto amount = std::make_shared<int>(1);
{
Poly<0, true, int(int)&, double(double)&> poly(Add{amount});
EXPECT_TRUE(poly);
EXPECT_TRUE(poly.target<Add>());
EXPECT_FALSE(IsStoredInline<Add>(poly));
EXPECT_EQ(amount, poly.target<Add>()->value);
EXPECT_EQ(2, amount.use_count());
EXPECT_EQ(3, poly(2));
EXPECT_EQ(3.5, poly(2.5));
}
EXPECT_EQ(1, amount.use_count());
}
TEST(PolyTest, HeapMove) {
auto amount = std::make_shared<int>(1);
{
Poly<0, true, int(int)&, double(double)&> poly(Add{amount});
EXPECT_TRUE(poly);
EXPECT_EQ(2, amount.use_count());
EXPECT_FALSE(IsStoredInline<Add>(poly));
auto poly2 = std::move(poly);
EXPECT_TRUE(poly2);
EXPECT_FALSE(poly);
EXPECT_EQ(2, amount.use_count());
EXPECT_EQ(3, poly2(2));
EXPECT_EQ(3.5, poly2(2.5));
}
EXPECT_EQ(1, amount.use_count());
}
TEST(PolyTest, HeapCopy) {
auto amount = std::make_shared<int>(1);
{
Poly<0, true, int(int)&, double(double)&> poly(Add{amount});
EXPECT_TRUE(poly);
EXPECT_EQ(2, amount.use_count());
EXPECT_FALSE(IsStoredInline<Add>(poly));
auto poly2 = poly;
EXPECT_TRUE(poly2);
EXPECT_TRUE(poly);
EXPECT_EQ(3, amount.use_count());
EXPECT_EQ(3, poly2(2));
EXPECT_EQ(3.5, poly2(2.5));
}
EXPECT_EQ(1, amount.use_count());
}
struct AddPolyApply {
std::shared_ptr<int> value;
template <typename T>
friend T PolyApply(const AddPolyApply& self, T x) {
return x + *self.value;
}
};
static_assert(HasPolyApply<AddPolyApply, int>);
static_assert(!HasPolyApply<AddPolyApply, int, int>);
static_assert(!HasPolyApply<Add, int>);
static_assert(!HasPolyApply<Add, int, int>);
static_assert(std::is_same_v<CallPolyApplyResult<AddPolyApply, int>, int>);
static_assert(
std::is_same_v<CallPolyApplyResult<AddPolyApply, double>, double>);
static_assert(std::is_same_v<CallPolyApplyResult<Add, int>, int>);
static_assert(std::is_same_v<CallPolyApplyResult<Add, double>, double>);
static_assert(IsCallPolyApplyResultConvertible<Add, int, double>::value);
static_assert(IsCallPolyApplyResultConvertible<Add, double, double>::value);
static_assert(!IsCallPolyApplyResultConvertible<Add, int*, double>::value);
static_assert(IsCallPolyApplyResultConvertible<Add, void, double>::value);
static_assert(!IsCallPolyApplyResultConvertible<Add, void, int, int>::value);
static_assert(!IsCallPolyApplyResultConvertible<Add, int, int, int>::value);
static_assert(
IsCallPolyApplyResultConvertible<AddPolyApply, int, double>::value);
static_assert(
IsCallPolyApplyResultConvertible<AddPolyApply, double, double>::value);
static_assert(IsCallPolyApplyResultConvertible<AddPolyApply, void, int>::value);
static_assert(
!IsCallPolyApplyResultConvertible<AddPolyApply, int*, int>::value);
static_assert(
!IsCallPolyApplyResultConvertible<AddPolyApply, void, int, int>::value);
TEST(PolyTest, PolyApply) {
auto amount = std::make_shared<int>(1);
{
Poly<sizeof(AddPolyApply), true, int(int)&, double(double)&> poly(
AddPolyApply{amount});
EXPECT_EQ(2, amount.use_count());
EXPECT_TRUE(poly);
EXPECT_EQ(3, poly(2));
EXPECT_EQ(3.5, poly(2.5));
}
EXPECT_EQ(1, amount.use_count());
}
TEST(PolyTest, MoveOnly) {
struct Callable {
std::unique_ptr<int> value;
int operator()() const { return *value; }
};
using PolyT = Poly<sizeof(Callable), false, int() const>;
static_assert(!std::is_constructible_v<Poly<0, true, int() const>, Callable>);
static_assert(std::is_constructible_v<Poly<0, false, int() const>, Callable>);
PolyT poly(Callable{std::unique_ptr<int>(new int(5))});
auto poly2 = std::move(poly);
EXPECT_FALSE(poly);
EXPECT_EQ(5, poly2());
}
struct IntGetterSetter {
int operator()() { return value; }
void operator()(int v) { value = v; }
int value;
};
TEST(PolyTest, CopyConstructFromPolyWithIncompatibleVTable) {
auto amount = std::make_shared<int>(1);
{
using Poly1 = Poly<sizeof(Add), true, int(int)&, double(double)&>;
using Poly2 = Poly<sizeof(Add), true, double(double)&, int(int)&>;
Poly1 poly(Add{amount});
EXPECT_EQ(2, amount.use_count());
EXPECT_TRUE(poly.target<Add>());
Poly2 poly2 = poly;
EXPECT_TRUE(poly2);
EXPECT_FALSE(poly2.target<Add>());
EXPECT_TRUE(poly2.target<Poly1>());
EXPECT_EQ(3, amount.use_count());
EXPECT_EQ(3, poly2(2));
EXPECT_EQ(3.5, poly2(2.5));
}
EXPECT_EQ(1, amount.use_count());
}
TEST(PolyTest, MoveConstructFromPolyWithIncompatibleVTable) {
auto amount = std::make_shared<int>(1);
{
using Poly1 = Poly<sizeof(Add), true, int(int)&, double(double)&>;
using Poly2 = Poly<sizeof(Add), true, double(double)&, int(int)&>;
Poly1 poly(Add{amount});
EXPECT_EQ(2, amount.use_count());
Poly2 poly2 = std::move(poly);
EXPECT_FALSE(poly);
EXPECT_FALSE(poly2.target<Add>());
EXPECT_TRUE(poly2.target<Poly1>());
EXPECT_TRUE(poly2);
EXPECT_EQ(2, amount.use_count());
EXPECT_EQ(3, poly2(2));
EXPECT_EQ(3.5, poly2(2.5));
}
EXPECT_EQ(1, amount.use_count());
}
TEST(PolyTest, EmplaceFromPolyWithIncompatibleVTable) {
auto amount = std::make_shared<int>(1);
{
using Poly1 = Poly<sizeof(Add), true, int(int)&, double(double)&>;
using Poly2 = Poly<sizeof(Add), true, double(double)&, int(int)&>;
Poly1 poly(Add{amount});
EXPECT_EQ(2, amount.use_count());
Poly2 poly2;
poly2.emplace(std::move(poly));
EXPECT_FALSE(poly);
EXPECT_FALSE(poly2.target<Add>());
EXPECT_TRUE(poly2.target<Poly1>());
EXPECT_TRUE(poly2);
EXPECT_EQ(2, amount.use_count());
EXPECT_EQ(3, poly2(2));
EXPECT_EQ(3.5, poly2(2.5));
}
EXPECT_EQ(1, amount.use_count());
}
TEST(PolyTest, CopyConstructFromPolyWithCompatibleVTable) {
Poly<0, true, void(int), int()> poly1 = IntGetterSetter{5};
EXPECT_EQ(5, poly1());
poly1(6);
EXPECT_EQ(6, poly1());
Poly<0, true, int()> poly2{poly1};
EXPECT_TRUE(poly2.target<IntGetterSetter>());
EXPECT_EQ(6, poly2());
}
TEST(PolyTest, MoveConstructFromPolyWithCompatibleVTable) {
Poly<0, true, void(int), int()> poly1 = IntGetterSetter{5};
EXPECT_EQ(5, poly1());
poly1(6);
EXPECT_EQ(6, poly1());
Poly<0, true, int()> poly2{std::move(poly1)};
EXPECT_TRUE(poly2.target<IntGetterSetter>());
EXPECT_EQ(6, poly2());
EXPECT_FALSE(poly1);
}
TEST(PolyTest, EmplacePolyWithCompatibleVTable) {
Poly<0, true, void(int), int()> poly1 = IntGetterSetter{5};
EXPECT_EQ(5, poly1());
poly1(6);
EXPECT_EQ(6, poly1());
Poly<0, true, int()> poly2;
poly2.emplace(std::move(poly1));
EXPECT_TRUE(poly2.target<IntGetterSetter>());
EXPECT_EQ(6, poly2());
EXPECT_FALSE(poly1);
}
template <typename T>
using SinglePoly = Poly<0, false, T>;
template <template <typename> class OptionalLike,
template <typename> class FunctionLike>
void TestAvoidsSfinaeLoop() {
using Poly1 = FunctionLike<void()>;
using Poly2 = FunctionLike<OptionalLike<Poly1>()>;
struct X {
void operator()() const {}
};
struct Y {
OptionalLike<Poly1> operator()() const { return X{}; }
};
auto use_poly2 = [](Poly2) { };
use_poly2(Poly2{Y{}});
}
TEST(PolyTest, AvoidsSfinaeLoop) {
TestAvoidsSfinaeLoop<tensorstore::Result, absl::FunctionRef>();
TestAvoidsSfinaeLoop<tensorstore::Result, std::function>();
TestAvoidsSfinaeLoop<std::optional, SinglePoly>();
TestAvoidsSfinaeLoop<tensorstore::Result, SinglePoly>();
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/poly/poly.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/poly/poly_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
7f8c9751-eda1-48e8-8c1c-ed96540f619b | cpp | google/tensorstore | key_range_map | tensorstore/kvstore/kvstack/key_range_map.h | tensorstore/kvstore/kvstack/key_range_map_test.cc | #ifndef TENSORSTORE_KVSTORE_KVSTACK_RANGE_MAP_H_
#define TENSORSTORE_KVSTORE_KVSTACK_RANGE_MAP_H_
#include <cassert>
#include <iterator>
#include <string>
#include <string_view>
#include <utility>
#include "absl/container/btree_set.h"
#include "tensorstore/kvstore/key_range.h"
namespace tensorstore {
namespace internal_kvstack {
template <typename V>
class KeyRangeMap {
struct Compare;
public:
struct Value {
KeyRange range;
V value;
};
using value_type = Value;
using const_iterator =
typename absl::btree_set<Value, Compare>::const_iterator;
const_iterator begin() const { return table_.begin(); }
const_iterator end() const { return table_.end(); }
const_iterator range_containing(std::string_view key) const {
auto it = range_containing_impl(key);
return Contains(it->range, key) ? it : table_.end();
}
template <typename V2>
void Set(KeyRange range, V2&& value) {
Erase(range);
[[maybe_unused]] auto insert_result =
table_.insert(Value{std::move(range), std::forward<V2>(value)});
assert(insert_result.second);
}
void Erase(const KeyRange& range) {
const_iterator it = range_containing_impl(range.inclusive_min);
if (it != table_.end()) {
if (range.inclusive_min > it->range.inclusive_min) {
std::string tmp = range.inclusive_min;
std::swap(const_cast<KeyRange&>(it->range).exclusive_max, tmp);
it = table_.insert(
it,
Value{KeyRange(range.inclusive_min, std::move(tmp)), it->value});
}
while (it != table_.end() && Contains(range, it->range)) {
it = table_.erase(it);
}
if (it != table_.end() && it->range.inclusive_min < range.exclusive_max) {
#if 0
auto node = table_.extract(it);
node.value().range.inclusive_min = range.exclusive_max;
auto insert_result = table_.insert(std::move(node));
assert(insert_result.inserted);
#endif
const_cast<KeyRange&>(it->range).inclusive_min = range.exclusive_max;
}
}
}
template <typename Fn>
void VisitRange(const KeyRange& range, Fn&& fn) const {
if (range.empty()) return;
auto it = range_containing_impl(range.inclusive_min);
auto end = range.exclusive_max.empty()
? table_.end()
: table_.lower_bound(range.exclusive_max);
for (; it != end; ++it) {
KeyRange intersect = Intersect(range, it->range);
if (!intersect.empty()) {
fn(intersect, it->value);
}
}
}
private:
const_iterator range_containing_impl(std::string_view key) const {
const_iterator it = table_.lower_bound(key);
if (it == table_.end() || it->range.inclusive_min > key) {
if (it != table_.begin() && !table_.empty()) {
return std::prev(it);
}
}
return it;
}
struct Compare {
using is_transparent = void;
bool operator()(const Value& a, const Value& b) const {
return a.range.inclusive_min < b.range.inclusive_min;
}
bool operator()(const Value& a, std::string_view b) const {
return a.range.inclusive_min < b;
}
bool operator()(std::string_view a, const Value& b) const {
return a < b.range.inclusive_min;
}
};
absl::btree_set<Value, Compare> table_;
};
}
}
#endif | #include "tensorstore/kvstore/kvstack/key_range_map.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/absl_log.h"
#include "tensorstore/kvstore/key_range.h"
using ::tensorstore::KeyRange;
using ::tensorstore::internal_kvstack::KeyRangeMap;
namespace tensorstore {
namespace internal_kvstack {
using IntRange = KeyRangeMap<int>::Value;
bool operator==(const IntRange& a, const IntRange& b) {
return a.range == b.range && a.value == b.value;
}
bool operator!=(const IntRange& a, const IntRange& b) { return !(a == b); }
template <typename V, typename Sink>
void AbslStringify(Sink& sink, const IntRange& r) {
absl::Format(&sink, "{%s, %s}", r.range.inclusive_min, r.range.exclusive_max);
}
}
}
namespace {
using IntRange = ::tensorstore::internal_kvstack::IntRange;
TEST(RangeMapTest, Dense) {
KeyRangeMap<int> m;
m.Set(KeyRange({}, {}), 1);
m.Set(KeyRange("a", "z"), 20);
m.Set(KeyRange::Prefix("a"), 30);
m.Set(KeyRange::Singleton("a/b.c"), 40);
m.Set(KeyRange::Prefix("a/b"), 50);
EXPECT_THAT(m, testing::ElementsAre(IntRange{KeyRange("", "a"), 1},
IntRange{KeyRange("a", "a/b"), 30},
IntRange{KeyRange("a/b", "a/c"), 50},
IntRange{KeyRange("a/c", "b"), 30},
IntRange{KeyRange("b", "z"), 20},
IntRange{KeyRange("z", ""), 1}));
ASSERT_NE(m.range_containing(""), m.end());
EXPECT_THAT(*m.range_containing(""), (IntRange{KeyRange("", "a"), 1}));
ASSERT_NE(m.range_containing("a"), m.end());
EXPECT_THAT(*m.range_containing("a"), (IntRange{KeyRange("a", "a/b"), 30}));
ASSERT_NE(m.range_containing("z"), m.end());
EXPECT_THAT(*m.range_containing("z"), (IntRange{KeyRange("z", ""), 1}));
ASSERT_NE(m.range_containing("b"), m.end());
EXPECT_THAT(*m.range_containing("b"), (IntRange{KeyRange("b", "z"), 20}));
ASSERT_NE(m.range_containing("d"), m.end());
EXPECT_THAT(*m.range_containing("d"), (IntRange{KeyRange("b", "z"), 20}));
ASSERT_NE(m.range_containing("a/d"), m.end());
EXPECT_THAT(*m.range_containing("a/d"), (IntRange{KeyRange("a/c", "b"), 30}));
{
std::vector<KeyRange> ranges;
m.VisitRange(KeyRange("", ""),
[&](KeyRange r, auto& value) { ranges.push_back(r); });
EXPECT_THAT(ranges, testing::ElementsAre(
KeyRange("", "a"), KeyRange("a", "a/b"),
KeyRange("a/b", "a/c"), KeyRange("a/c", "b"),
KeyRange("b", "z"), KeyRange("z", "")));
}
{
std::vector<KeyRange> ranges;
m.VisitRange(KeyRange::EmptyRange(),
[&](KeyRange r, auto& value) { ranges.push_back(r); });
EXPECT_TRUE(ranges.empty());
}
{
std::vector<KeyRange> ranges;
m.VisitRange(KeyRange("", "a/z"),
[&](KeyRange r, auto& value) { ranges.push_back(r); });
EXPECT_THAT(ranges, testing::ElementsAre(
KeyRange("", "a"), KeyRange("a", "a/b"),
KeyRange("a/b", "a/c"), KeyRange("a/c", "a/z")));
}
}
TEST(RangeMapTest, Sparse) {
KeyRangeMap<int> m;
m.Set(KeyRange("a", "z"), 20);
m.Set(KeyRange::Prefix("a"), 30);
m.Set(KeyRange::Singleton("a/b.c"), 40);
m.Set(KeyRange::Prefix("a/b"), 50);
for (const auto& v : m) {
ABSL_LOG(INFO) << v.range << ": " << v.value;
}
EXPECT_THAT(m, testing::ElementsAre(IntRange{KeyRange("a", "a/b"), 30},
IntRange{KeyRange("a/b", "a/c"), 50},
IntRange{KeyRange("a/c", "b"), 30},
IntRange{KeyRange("b", "z"), 20}));
ASSERT_EQ(m.range_containing(""), m.end());
ASSERT_EQ(m.range_containing("z"), m.end());
ASSERT_NE(m.range_containing("a"), m.end());
EXPECT_THAT(*m.range_containing("a"), (IntRange{KeyRange("a", "a/b"), 30}));
ASSERT_NE(m.range_containing("b"), m.end());
EXPECT_THAT(*m.range_containing("b"), (IntRange{KeyRange("b", "z"), 20}));
ASSERT_NE(m.range_containing("d"), m.end());
EXPECT_THAT(*m.range_containing("d"), (IntRange{KeyRange("b", "z"), 20}));
ASSERT_NE(m.range_containing("a/d"), m.end());
EXPECT_THAT(*m.range_containing("a/d"), (IntRange{KeyRange("a/c", "b"), 30}));
{
std::vector<KeyRange> ranges;
m.VisitRange(KeyRange("", ""),
[&](KeyRange r, auto& value) { ranges.push_back(r); });
EXPECT_THAT(ranges, testing::ElementsAre(
KeyRange("a", "a/b"), KeyRange("a/b", "a/c"),
KeyRange("a/c", "b"), KeyRange("b", "z")));
}
{
std::vector<KeyRange> ranges;
m.VisitRange(KeyRange::EmptyRange(),
[&](KeyRange r, auto& value) { ranges.push_back(r); });
EXPECT_TRUE(ranges.empty());
}
{
std::vector<KeyRange> ranges;
m.VisitRange(KeyRange("", "a/z"),
[&](KeyRange r, auto& value) { ranges.push_back(r); });
EXPECT_THAT(ranges, testing::ElementsAre(KeyRange("a", "a/b"),
KeyRange("a/b", "a/c"),
KeyRange("a/c", "a/z")));
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/kvstack/key_range_map.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/kvstack/key_range_map_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
2b46fc07-d111-4925-b26a-7aae4df750d2 | cpp | google/tensorstore | output_index_map | tensorstore/index_space/output_index_map.h | tensorstore/index_space/output_index_map_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_OUTPUT_INDEX_MAP_H_
#define TENSORSTORE_INDEX_SPACE_OUTPUT_INDEX_MAP_H_
#include <cassert>
#include "tensorstore/array.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/element_pointer.h"
namespace tensorstore {
template <DimensionIndex InputRank = dynamic_rank>
class OutputIndexMapRef {
public:
class IndexArrayView {
public:
SharedArrayView<const Index, InputRank, offset_origin> shared_array_ref()
const {
return {element_pointer(), layout()};
}
ArrayView<const Index, InputRank, offset_origin> array_ref() const {
return {element_pointer(), layout()};
}
const SharedElementPointer<const Index>& element_pointer() const {
return index_array_data_->element_pointer;
}
IndexInterval index_range() const { return index_array_data_->index_range; }
StaticOrDynamicRank<InputRank> rank() const {
return StaticRankCast<InputRank, unchecked>(
static_cast<DimensionIndex>(rep_->input_rank));
}
StridedLayoutView<InputRank, offset_origin> layout() const {
return StridedLayoutView<InputRank, offset_origin>(
rank(), rep_->input_origin().data(), rep_->input_shape().data(),
index_array_data_->byte_strides);
}
span<const Index, InputRank> byte_strides() const {
return {index_array_data_->byte_strides, rank()};
}
private:
template <DimensionIndex>
friend class OutputIndexMapRef;
explicit IndexArrayView(
internal_index_space::IndexArrayData* index_array_data,
internal_index_space::TransformRep* rep)
: index_array_data_(index_array_data), rep_(rep) {}
internal_index_space::IndexArrayData* index_array_data_;
internal_index_space::TransformRep* rep_;
};
OutputIndexMapRef() = default;
OutputIndexMapRef& operator=(const OutputIndexMapRef&) = default;
StaticOrDynamicRank<InputRank> input_rank() const {
return StaticRankCast<InputRank, unchecked>(
static_cast<DimensionIndex>(rep_->input_rank));
}
OutputIndexMethod method() const { return map_->method(); }
Index offset() const { return map_->offset(); }
Index stride() const { return map_->stride(); }
DimensionIndex input_dimension() const { return map_->input_dimension(); }
IndexArrayView index_array() const {
return IndexArrayView(&map_->index_array_data(), rep_);
}
private:
template <DimensionIndex, DimensionIndex, ContainerKind>
friend class OutputIndexMapRange;
template <DimensionIndex>
friend class OutputIndexMapIterator;
explicit OutputIndexMapRef(internal_index_space::OutputIndexMap* map,
internal_index_space::TransformRep* rep)
: map_(map), rep_(rep) {}
internal_index_space::OutputIndexMap* map_ = nullptr;
internal_index_space::TransformRep* rep_ = nullptr;
};
template <DimensionIndex InputRank = dynamic_rank>
class OutputIndexMapIterator {
public:
using value_type = OutputIndexMapRef<InputRank>;
using reference = OutputIndexMapRef<InputRank>;
using difference_type = DimensionIndex;
using pointer = value_type*;
using iterator_category = std::random_access_iterator_tag;
OutputIndexMapIterator() = default;
OutputIndexMapRef<InputRank> operator*() const { return ref_; }
const OutputIndexMapRef<InputRank>* operator->() const { return &ref_; }
OutputIndexMapRef<InputRank> operator[](DimensionIndex n) const {
auto new_ref = ref_;
new_ref.map_ += n;
return new_ref;
}
OutputIndexMapIterator& operator+=(DimensionIndex n) {
ref_.map_ += n;
return *this;
}
OutputIndexMapIterator& operator-=(DimensionIndex n) { return *this += (-n); }
OutputIndexMapIterator& operator++() {
++ref_.map_;
return *this;
}
OutputIndexMapIterator& operator--() {
--ref_.map_;
return *this;
}
OutputIndexMapIterator operator++(int) {
auto temp = *this;
++ref_.map_;
return temp;
}
OutputIndexMapIterator operator--(int) {
auto temp = *this;
--ref_.map_;
return temp;
}
friend DimensionIndex operator-(OutputIndexMapIterator a,
OutputIndexMapIterator b) {
return a.map() - b.map();
}
friend OutputIndexMapIterator operator+(OutputIndexMapIterator it,
DimensionIndex n) {
it += n;
return it;
}
friend OutputIndexMapIterator operator+(DimensionIndex n,
OutputIndexMapIterator it) {
it += n;
return it;
}
friend OutputIndexMapIterator operator-(OutputIndexMapIterator it,
DimensionIndex n) {
it -= n;
return it;
}
friend bool operator==(OutputIndexMapIterator a, OutputIndexMapIterator b) {
return a.map() == b.map();
}
friend bool operator!=(OutputIndexMapIterator a, OutputIndexMapIterator b) {
return a.map() != b.map();
}
friend bool operator<(OutputIndexMapIterator a, OutputIndexMapIterator b) {
return a.map() < b.map();
}
friend bool operator<=(OutputIndexMapIterator a, OutputIndexMapIterator b) {
return a.map() <= b.map();
}
friend bool operator>(OutputIndexMapIterator a, OutputIndexMapIterator b) {
return a.map() > b.map();
}
friend bool operator>=(OutputIndexMapIterator a, OutputIndexMapIterator b) {
return a.map() >= b.map();
}
private:
internal_index_space::OutputIndexMap* map() const { return ref_.map_; }
template <DimensionIndex, DimensionIndex, ContainerKind>
friend class OutputIndexMapRange;
OutputIndexMapRef<InputRank> ref_;
explicit OutputIndexMapIterator(internal_index_space::OutputIndexMap* map,
internal_index_space::TransformRep* rep)
: ref_(map, rep) {}
};
template <DimensionIndex InputRank = dynamic_rank,
DimensionIndex OutputRank = dynamic_rank, ContainerKind CKind = view>
class OutputIndexMapRange {
public:
using value_type = OutputIndexMapRef<InputRank>;
using reference = value_type;
using iterator = OutputIndexMapIterator<InputRank>;
using difference_type = DimensionIndex;
constexpr static DimensionIndex extent = OutputRank;
OutputIndexMapRange() = default;
explicit OutputIndexMapRange(
IndexTransform<InputRank, OutputRank, CKind> transform)
: transform_(std::move(transform)) {}
template <DimensionIndex OtherInputRank, DimensionIndex OtherOutputRank,
ContainerKind OtherCKind,
typename = std::enable_if_t<
(RankConstraint::Implies(OtherInputRank, InputRank) &&
RankConstraint::Implies(OtherOutputRank, OutputRank))>>
OutputIndexMapRange(
OutputIndexMapRange<OtherInputRank, OtherOutputRank, OtherCKind> other)
: transform_(std::move(other.transform_)) {}
StaticOrDynamicRank<OutputRank> size() const {
return transform_.output_rank();
}
bool empty() const { return size() == 0; }
iterator begin() const {
return iterator(rep()->output_index_maps().data(), rep());
}
iterator end() const {
return iterator(rep()->output_index_maps().data() + size(), rep());
}
OutputIndexMapRef<InputRank> operator[](DimensionIndex output_dim) const {
assert(output_dim >= 0 && output_dim < size());
return OutputIndexMapRef<InputRank>(
rep()->output_index_maps().data() + output_dim, rep());
}
StaticOrDynamicRank<InputRank> input_rank() const {
return transform_.input_rank();
}
private:
template <DimensionIndex, DimensionIndex, ContainerKind>
friend class OutputIndexMapRange;
internal_index_space::TransformRep* rep() const {
return internal_index_space::TransformAccess::rep(transform_);
}
IndexTransform<InputRank, OutputRank, CKind> transform_;
};
}
#endif | #include "tensorstore/index_space/output_index_map.h"
#include <type_traits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/array.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::dynamic_rank;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::MakeOffsetArray;
using ::tensorstore::offset_origin;
using ::tensorstore::OutputIndexMapIterator;
using ::tensorstore::OutputIndexMapRange;
using ::tensorstore::OutputIndexMapRef;
using ::tensorstore::OutputIndexMethod;
using ::tensorstore::span;
using ::tensorstore::StaticRank;
using ::tensorstore::StridedLayout;
TEST(OutputIndexMethodTest, Ostream) {
EXPECT_EQ("constant", tensorstore::StrCat(OutputIndexMethod::constant));
EXPECT_EQ("single_input_dimension",
tensorstore::StrCat(OutputIndexMethod::single_input_dimension));
EXPECT_EQ("array", tensorstore::StrCat(OutputIndexMethod::array));
EXPECT_EQ("<unknown>",
tensorstore::StrCat(static_cast<OutputIndexMethod>(-1)));
}
TEST(OutputIndexMapTest, StaticRanks) {
auto index_array = MakeOffsetArray<Index>({1, 2, 3}, {{{5}, {6}, {7}, {8}}});
auto t = IndexTransformBuilder<3, 4>()
.input_origin({1, 2, 3})
.input_shape({4, 4, 3})
.output_constant(0, 10)
.output_single_input_dimension(1, 20, 2, 2)
.output_index_array(2, 30, 3, index_array,
IndexInterval::Closed(3, 10))
.Finalize()
.value();
auto range = t.output_index_maps();
static_assert(std::is_same_v<decltype(range), OutputIndexMapRange<3, 4>>);
static_assert(std::is_same_v<StaticRank<4>, decltype(range.size())>);
static_assert(std::is_same_v<StaticRank<3>, decltype(range.input_rank())>);
EXPECT_EQ(4, range.size());
EXPECT_EQ(3, range.input_rank());
EXPECT_EQ(false, range.empty());
auto it = range.begin();
static_assert(std::is_same_v<OutputIndexMapIterator<3>, decltype(it)>);
EXPECT_EQ(range.begin(), it);
EXPECT_NE(range.end(), it);
EXPECT_EQ(range.end(), range.end());
{
auto output0 = *it;
static_assert(std::is_same_v<OutputIndexMapRef<3>, decltype(output0)>);
EXPECT_EQ(OutputIndexMethod::constant, output0.method());
EXPECT_EQ(10, output0.offset());
}
{
auto it0 = it;
EXPECT_EQ(&++it0, &it0);
EXPECT_EQ(20, it0->offset());
EXPECT_EQ(&--it0, &it0);
EXPECT_EQ(10, it0->offset());
}
{
auto it0 = it + 1;
EXPECT_EQ(20, it0->offset());
it0 = 2 + it;
EXPECT_EQ(30, it0->offset());
it0 = it0 - 2;
EXPECT_EQ(10, it0->offset());
}
{
auto it0 = it + 1;
EXPECT_EQ(1, it0 - it);
EXPECT_EQ(-1, it - it0);
EXPECT_TRUE(it < it0);
EXPECT_TRUE(it <= it0);
EXPECT_TRUE(it != it0);
EXPECT_FALSE(it == it0);
EXPECT_FALSE(it >= it0);
EXPECT_FALSE(it > it0);
EXPECT_FALSE(it0 < it);
EXPECT_FALSE(it0 <= it);
EXPECT_TRUE(it0 != it);
EXPECT_FALSE(it0 == it);
EXPECT_TRUE(it0 >= it);
EXPECT_TRUE(it0 > it);
EXPECT_FALSE(it < it);
EXPECT_TRUE(it <= it);
EXPECT_FALSE(it != it);
EXPECT_TRUE(it == it);
EXPECT_TRUE(it >= it);
EXPECT_FALSE(it > it);
}
{
auto it0 = it;
auto it1 = it0++;
EXPECT_EQ(it1, it);
EXPECT_EQ(it0, it + 1);
EXPECT_EQ(10, it1->offset());
EXPECT_EQ(20, it0->offset());
auto it2 = it0--;
EXPECT_EQ(it2, it + 1);
EXPECT_EQ(it0, it);
}
++it;
{
auto output1 = *it;
EXPECT_EQ(OutputIndexMethod::single_input_dimension, output1.method());
EXPECT_EQ(2, output1.input_dimension());
EXPECT_EQ(20, output1.offset());
EXPECT_EQ(2, output1.stride());
}
{
auto output1a = range.begin()[1];
static_assert(std::is_same_v<OutputIndexMapRef<3>, decltype(output1a)>);
EXPECT_EQ(OutputIndexMethod::single_input_dimension, output1a.method());
EXPECT_EQ(2, output1a.input_dimension());
EXPECT_EQ(20, output1a.offset());
EXPECT_EQ(2, output1a.stride());
}
{
auto output1b = range[1];
static_assert(std::is_same_v<OutputIndexMapRef<3>, decltype(output1b)>);
EXPECT_EQ(OutputIndexMethod::single_input_dimension, output1b.method());
EXPECT_EQ(2, output1b.input_dimension());
EXPECT_EQ(20, output1b.offset());
EXPECT_EQ(2, output1b.stride());
}
{
auto output1c = t.output_index_map(1);
static_assert(std::is_same_v<OutputIndexMapRef<3>, decltype(output1c)>);
EXPECT_EQ(OutputIndexMethod::single_input_dimension, output1c.method());
EXPECT_EQ(2, output1c.input_dimension());
EXPECT_EQ(20, output1c.offset());
EXPECT_EQ(2, output1c.stride());
}
++it;
{
auto output2 = *it;
EXPECT_EQ(OutputIndexMethod::array, output2.method());
EXPECT_EQ(30, output2.offset());
EXPECT_EQ(3, output2.stride());
auto index_array_ref = output2.index_array();
EXPECT_EQ(&index_array(1, 2, 3), &index_array_ref.array_ref()(1, 2, 3));
EXPECT_EQ(IndexInterval::UncheckedClosed(3, 10),
index_array_ref.index_range());
static_assert(
std::is_same_v<StaticRank<3>, decltype(index_array_ref.rank())>);
const StridedLayout<3, offset_origin> expected_layout(
{1, 2, 3}, {4, 4, 3}, {0, sizeof(Index), 0});
EXPECT_EQ(expected_layout, index_array_ref.layout());
EXPECT_EQ(&index_array(1, 2, 3),
&index_array_ref.shared_array_ref()(1, 2, 3));
EXPECT_EQ(expected_layout, index_array_ref.shared_array_ref().layout());
EXPECT_EQ(expected_layout, index_array_ref.array_ref().layout());
EXPECT_THAT(index_array_ref.byte_strides(),
testing::ElementsAreArray(expected_layout.byte_strides()));
EXPECT_EQ(0, index_array_ref.byte_strides()[0]);
EXPECT_EQ(sizeof(Index), index_array_ref.byte_strides()[1]);
EXPECT_EQ(0, index_array_ref.byte_strides()[2]);
}
++it;
{
auto output3 = *it;
EXPECT_EQ(OutputIndexMethod::constant, output3.method());
EXPECT_EQ(0, output3.offset());
}
++it;
EXPECT_EQ(range.end(), it);
}
TEST(OutputIndexMapTest, ZeroRank) {
auto t = IndexTransformBuilder<3, 0>()
.input_origin({1, 2, 3})
.input_shape({4, 4, 3})
.Finalize()
.value();
auto range = t.output_index_maps();
EXPECT_EQ(0, range.size());
EXPECT_EQ(3, range.input_rank());
EXPECT_TRUE(range.empty());
}
TEST(OutputIndexMapTest, DynamicRanks) {
auto index_array = MakeOffsetArray<Index>({1, 2, 3}, {{{5}, {6}, {7}, {8}}});
auto t = IndexTransformBuilder<>(3, 4)
.input_origin({1, 2, 3})
.input_shape({4, 4, 3})
.output_constant(0, 10)
.output_single_input_dimension(1, 20, 2, 2)
.output_index_array(2, 30, 3, index_array,
IndexInterval::Closed(3, 10))
.Finalize()
.value();
auto range = t.output_index_maps();
static_assert(std::is_same_v<decltype(range), OutputIndexMapRange<>>);
EXPECT_EQ(4, range.size());
EXPECT_EQ(3, range.input_rank());
EXPECT_EQ(false, range.empty());
auto it = range.begin();
static_assert(std::is_same_v<OutputIndexMapIterator<>, decltype(it)>);
{
auto output0 = *it;
static_assert(std::is_same_v<OutputIndexMapRef<>, decltype(output0)>);
EXPECT_EQ(OutputIndexMethod::constant, output0.method());
EXPECT_EQ(10, output0.offset());
}
{
auto output2 = range[2];
static_assert(std::is_same_v<OutputIndexMapRef<>, decltype(output2)>);
EXPECT_EQ(OutputIndexMethod::array, output2.method());
EXPECT_EQ(30, output2.offset());
EXPECT_EQ(3, output2.stride());
auto index_array_ref = output2.index_array();
EXPECT_EQ(&index_array(1, 2, 3), &index_array_ref.array_ref()(1, 2, 3));
EXPECT_EQ(IndexInterval::UncheckedClosed(3, 10),
index_array_ref.index_range());
EXPECT_EQ(3, index_array.rank());
const StridedLayout<dynamic_rank, offset_origin> expected_layout(
{1, 2, 3}, {4, 4, 3}, {0, sizeof(Index), 0});
EXPECT_EQ(expected_layout, index_array_ref.layout());
EXPECT_EQ(&index_array(1, 2, 3),
&index_array_ref.shared_array_ref()(1, 2, 3));
EXPECT_EQ(expected_layout, index_array_ref.shared_array_ref().layout());
}
}
TEST(OutputIndexMapTest, Unbroadcast) {
auto index_array = tensorstore::MakeArray<Index>({{{5}, {6}, {7}, {8}}});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto t, IndexTransformBuilder(3, 4)
.input_origin({1, 2, 3})
.input_shape({4, 4, 3})
.output_constant(0, 10)
.output_single_input_dimension(1, 20, 2, 2)
.output_index_array(2, 30, 3, index_array)
.Finalize());
auto map = t.output_index_maps()[2];
EXPECT_THAT(map.index_array().array_ref(),
MakeOffsetArray<Index>(
{1, 2, 3}, {
{{5, 5, 5}, {6, 6, 6}, {7, 7, 7}, {8, 8, 8}},
{{5, 5, 5}, {6, 6, 6}, {7, 7, 7}, {8, 8, 8}},
{{5, 5, 5}, {6, 6, 6}, {7, 7, 7}, {8, 8, 8}},
{{5, 5, 5}, {6, 6, 6}, {7, 7, 7}, {8, 8, 8}},
}));
EXPECT_THAT(UnbroadcastArrayPreserveRank(map.index_array().array_ref()),
index_array);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/output_index_map.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/output_index_map_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
ac970f59-4c27-4d55-8616-1699c02adfcc | cpp | google/tensorstore | transform_array_constraints | tensorstore/index_space/transform_array_constraints.h | tensorstore/index_space/transform_array_constraints_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_TRANSFORM_ARRAY_CONSTRAINTS_H_
#define TENSORSTORE_INDEX_SPACE_TRANSFORM_ARRAY_CONSTRAINTS_H_
#include "tensorstore/util/iterate.h"
namespace tensorstore {
enum class MustAllocateConstraint {
may_allocate = 0,
must_allocate = 1
};
constexpr MustAllocateConstraint may_allocate =
MustAllocateConstraint::may_allocate;
constexpr MustAllocateConstraint must_allocate =
MustAllocateConstraint::must_allocate;
class TransformArrayConstraints {
public:
constexpr TransformArrayConstraints(
IterationConstraints iteration_constraint = {},
MustAllocateConstraint allocate_constraint = may_allocate)
: value_(iteration_constraint.value() |
(static_cast<int>(allocate_constraint)
<< IterationConstraints::kNumBits)) {}
constexpr TransformArrayConstraints(
LayoutOrderConstraint order_constraint,
RepeatedElementsConstraint repeat_constraint = include_repeated_elements,
MustAllocateConstraint allocate_constraint = may_allocate)
: TransformArrayConstraints(
IterationConstraints(order_constraint, repeat_constraint),
allocate_constraint) {}
constexpr TransformArrayConstraints(
UnspecifiedLayoutOrder order_constraint,
RepeatedElementsConstraint repeat_constraint = include_repeated_elements,
MustAllocateConstraint allocate_constraint = may_allocate)
: TransformArrayConstraints(
IterationConstraints(order_constraint, repeat_constraint),
allocate_constraint) {}
constexpr TransformArrayConstraints(
ContiguousLayoutOrder order_constraint,
RepeatedElementsConstraint repeat_constraint = include_repeated_elements,
MustAllocateConstraint allocate_constraint = may_allocate)
: TransformArrayConstraints(
IterationConstraints(order_constraint, repeat_constraint),
allocate_constraint) {}
constexpr TransformArrayConstraints(
LayoutOrderConstraint order_constraint,
MustAllocateConstraint allocate_constraint)
: TransformArrayConstraints(IterationConstraints(order_constraint),
allocate_constraint) {}
constexpr TransformArrayConstraints(
UnspecifiedLayoutOrder order_constraint,
MustAllocateConstraint allocate_constraint)
: TransformArrayConstraints(IterationConstraints(order_constraint),
allocate_constraint) {}
constexpr TransformArrayConstraints(
ContiguousLayoutOrder order_constraint,
MustAllocateConstraint allocate_constraint)
: TransformArrayConstraints(IterationConstraints(order_constraint),
allocate_constraint) {}
constexpr TransformArrayConstraints(
RepeatedElementsConstraint repeat_constraint,
MustAllocateConstraint allocate_constraint = may_allocate)
: TransformArrayConstraints(IterationConstraints(repeat_constraint),
allocate_constraint) {}
constexpr TransformArrayConstraints(
MustAllocateConstraint allocate_constraint)
: TransformArrayConstraints(IterationConstraints{}, allocate_constraint) {
}
explicit constexpr TransformArrayConstraints(int value) : value_(value) {}
constexpr IterationConstraints iteration_constraints() const {
return IterationConstraints(value() &
((1 << IterationConstraints::kNumBits) - 1));
}
constexpr LayoutOrderConstraint order_constraint() const {
return iteration_constraints().order_constraint();
}
constexpr RepeatedElementsConstraint repeated_elements_constraint() const {
return iteration_constraints().repeated_elements_constraint();
}
constexpr MustAllocateConstraint allocate_constraint() const {
return static_cast<MustAllocateConstraint>(value_ >>
IterationConstraints::kNumBits);
}
constexpr int value() const { return value_; }
constexpr static int kNumBits = IterationConstraints::kNumBits + 1;
friend constexpr bool operator==(TransformArrayConstraints a,
TransformArrayConstraints b) {
return a.value() == b.value();
}
friend constexpr bool operator!=(TransformArrayConstraints a,
TransformArrayConstraints b) {
return a.value() != b.value();
}
private:
int value_;
};
}
#endif | #include "tensorstore/index_space/transform_array_constraints.h"
#include <gtest/gtest.h>
namespace {
using ::tensorstore::ContiguousLayoutOrder;
using ::tensorstore::IterationConstraints;
using ::tensorstore::TransformArrayConstraints;
TEST(TransformArrayConstraintsTest, Basic) {
EXPECT_TRUE(
TransformArrayConstraints(ContiguousLayoutOrder::c).order_constraint());
EXPECT_EQ(IterationConstraints(ContiguousLayoutOrder::c,
tensorstore::skip_repeated_elements),
TransformArrayConstraints(
IterationConstraints(ContiguousLayoutOrder::c,
tensorstore::skip_repeated_elements))
.iteration_constraints());
EXPECT_FALSE(TransformArrayConstraints(tensorstore::unspecified_order)
.order_constraint());
EXPECT_EQ(tensorstore::skip_repeated_elements,
TransformArrayConstraints(tensorstore::skip_repeated_elements,
tensorstore::may_allocate)
.repeated_elements_constraint());
EXPECT_EQ(tensorstore::may_allocate,
TransformArrayConstraints(tensorstore::skip_repeated_elements,
tensorstore::may_allocate)
.allocate_constraint());
EXPECT_EQ(tensorstore::must_allocate,
TransformArrayConstraints(tensorstore::skip_repeated_elements,
tensorstore::must_allocate)
.allocate_constraint());
EXPECT_EQ(
tensorstore::c_order,
TransformArrayConstraints(tensorstore::c_order, tensorstore::may_allocate)
.order_constraint()
.order());
EXPECT_EQ(tensorstore::fortran_order,
TransformArrayConstraints(tensorstore::fortran_order,
tensorstore::may_allocate)
.order_constraint()
.order());
static_assert(
11 == TransformArrayConstraints(tensorstore::fortran_order,
tensorstore::include_repeated_elements,
tensorstore::must_allocate)
.value(),
"");
static_assert(
3 == TransformArrayConstraints(tensorstore::fortran_order,
tensorstore::include_repeated_elements)
.value(),
"");
static_assert(
10 == TransformArrayConstraints(tensorstore::c_order,
tensorstore::include_repeated_elements,
tensorstore::must_allocate)
.value(),
"");
static_assert(
8 == TransformArrayConstraints(tensorstore::include_repeated_elements,
tensorstore::must_allocate)
.value(),
"");
EXPECT_EQ(tensorstore::fortran_order,
TransformArrayConstraints(tensorstore::fortran_order,
tensorstore::include_repeated_elements,
tensorstore::must_allocate)
.order_constraint()
.order());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/transform_array_constraints.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/transform_array_constraints_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
2dcebcc7-6c69-44ba-9f86-9cae5f936ef9 | cpp | google/tensorstore | deep_copy_transform_rep_ptr | tensorstore/index_space/internal/deep_copy_transform_rep_ptr.h | tensorstore/index_space/deep_copy_transform_rep_ptr_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_INTERNAL_DEEP_COPY_TRANSFORM_REP_PTR_H_
#define TENSORSTORE_INDEX_SPACE_INTERNAL_DEEP_COPY_TRANSFORM_REP_PTR_H_
#include <utility>
#include "tensorstore/index_space/internal/transform_rep.h"
namespace tensorstore {
namespace internal_index_space {
class DeepCopyTransformRepPtr {
public:
DeepCopyTransformRepPtr(std::nullptr_t = nullptr) : ptr_(nullptr) {}
explicit DeepCopyTransformRepPtr(TransformRep* ptr,
internal::adopt_object_ref_t)
: ptr_(ptr) {
assert(ptr == nullptr ||
(ptr->input_rank_capacity == 0 && ptr->output_rank_capacity == 0) ||
ptr->reference_count == 1);
}
explicit DeepCopyTransformRepPtr(TransformRep* ptr,
internal::acquire_object_ref_t) {
if (ptr) {
ptr_ =
TransformRep::Allocate(ptr->input_rank, ptr->output_rank).release();
CopyTransformRep(ptr, ptr_);
} else {
ptr_ = nullptr;
}
}
DeepCopyTransformRepPtr(DeepCopyTransformRepPtr&& other)
: ptr_(std::exchange(other.ptr_, nullptr)) {}
DeepCopyTransformRepPtr(const DeepCopyTransformRepPtr& other)
: DeepCopyTransformRepPtr(other.ptr_, internal::acquire_object_ref) {}
DeepCopyTransformRepPtr& operator=(DeepCopyTransformRepPtr&& other) {
if (ptr_) Free();
ptr_ = std::exchange(other.ptr_, nullptr);
return *this;
}
DeepCopyTransformRepPtr& operator=(const DeepCopyTransformRepPtr& other) {
return *this = DeepCopyTransformRepPtr(other.ptr_,
internal::acquire_object_ref);
}
DeepCopyTransformRepPtr& operator=(std::nullptr_t) {
if (ptr_) Free();
ptr_ = nullptr;
return *this;
}
~DeepCopyTransformRepPtr() {
if (ptr_) Free();
}
explicit operator bool() const { return static_cast<bool>(ptr_); }
TransformRep* get() const { return ptr_; }
TransformRep* operator->() const { return ptr_; }
TransformRep& operator*() const { return *ptr_; }
TransformRep* release() { return std::exchange(ptr_, nullptr); }
private:
void Free() {
TransformRep::Ptr<>(ptr_, internal::adopt_object_ref);
}
TransformRep* ptr_;
};
}
}
#endif | #include "tensorstore/index_space/internal/deep_copy_transform_rep_ptr.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal::acquire_object_ref;
using ::tensorstore::internal::adopt_object_ref;
using ::tensorstore::internal_index_space::DeepCopyTransformRepPtr;
using ::tensorstore::internal_index_space::TransformRep;
TEST(DeepCopyTransformRepPtr, DefaultConstruct) {
DeepCopyTransformRepPtr ptr;
EXPECT_FALSE(ptr);
EXPECT_EQ(nullptr, ptr.get());
EXPECT_EQ(nullptr, ptr.operator->());
EXPECT_EQ(nullptr, ptr.release());
}
TEST(DeepCopyTransformRepPtr, Nullptr) {
DeepCopyTransformRepPtr ptr = nullptr;
EXPECT_FALSE(ptr);
EXPECT_EQ(nullptr, ptr.get());
EXPECT_EQ(nullptr, ptr.operator->());
EXPECT_EQ(nullptr, ptr.release());
}
TEST(DeepCopyTransformRepPtr, AdoptAllocate) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
EXPECT_EQ(ptr, ptr2.operator->());
EXPECT_EQ(ptr, &*ptr2);
}
TEST(DeepCopyTransformRepPtr, AdoptAllocateZero) {
auto ptr1 = TransformRep::Allocate(0, 0);
ptr1->input_rank = ptr1->output_rank = 0;
auto ptr = ptr1.get();
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
EXPECT_EQ(ptr, ptr2.operator->());
EXPECT_EQ(ptr, &*ptr2);
}
TEST(DeepCopyTransformRepPtr, AcquireAllocate) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
ptr1->input_origin()[0] = 7;
DeepCopyTransformRepPtr ptr2(ptr1.get(), acquire_object_ref);
EXPECT_NE(ptr1.get(), ptr2.get());
EXPECT_EQ(7, ptr2->input_origin()[0]);
}
TEST(DeepCopyTransformRepPtr, Release) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
auto ptr3 = ptr2.release();
EXPECT_EQ(ptr, ptr3);
TransformRep::Ptr<>(ptr3, adopt_object_ref);
}
TEST(DeepCopyTransformRepPtr, MoveConstruct) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
auto ptr3 = std::move(ptr2);
EXPECT_EQ(ptr, ptr3.get());
EXPECT_FALSE(ptr2);
}
TEST(DeepCopyTransformRepPtr, CopyConstruct) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
ptr1->input_origin()[0] = 7;
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
auto ptr3 = ptr2;
EXPECT_NE(ptr, ptr3.get());
EXPECT_TRUE(ptr2);
EXPECT_TRUE(ptr3);
EXPECT_EQ(7, ptr3->input_origin()[0]);
}
TEST(DeepCopyTransformRepPtr, AssignNullptr) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
ptr2 = nullptr;
EXPECT_EQ(nullptr, ptr2.get());
}
TEST(DeepCopyTransformRepPtr, MoveAssignNonNullToNull) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
DeepCopyTransformRepPtr ptr3;
ptr3 = std::move(ptr2);
EXPECT_EQ(ptr, ptr3.get());
EXPECT_FALSE(ptr2);
}
TEST(DeepCopyTransformRepPtr, MoveAssignNullToNonNull) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
DeepCopyTransformRepPtr ptr3;
ptr2 = std::move(ptr3);
EXPECT_FALSE(ptr2);
EXPECT_FALSE(ptr3);
}
TEST(DeepCopyTransformRepPtr, CopyAssignNonNullToNull) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
ptr1->input_origin()[0] = 7;
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
DeepCopyTransformRepPtr ptr3;
ptr3 = ptr2;
EXPECT_TRUE(ptr2);
EXPECT_EQ(ptr, ptr2.get());
EXPECT_NE(ptr, ptr3.get());
EXPECT_EQ(7, ptr3->input_origin()[0]);
}
TEST(DeepCopyTransformRepPtr, CopyAssignNullToNonNull) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
ptr1->input_origin()[0] = 7;
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
DeepCopyTransformRepPtr ptr3;
ptr2 = ptr3;
EXPECT_FALSE(ptr2);
EXPECT_FALSE(ptr3);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/internal/deep_copy_transform_rep_ptr.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/deep_copy_transform_rep_ptr_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
2ba2982c-fc25-44e0-8663-1450c5bfd129 | cpp | google/arolla | my_complex_type | py/arolla/examples/my_complex/my_complex_type.cc | py/arolla/examples/my_complex/my_complex_type_test.cc | #include "py/arolla/examples/my_complex/my_complex_type.h"
#include "absl/strings/str_format.h"
#include "arolla/qtype/simple_qtype.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/repr.h"
namespace arolla {
void FingerprintHasherTraits<my_complex::MyComplex>::operator()(
FingerprintHasher* hasher, const my_complex::MyComplex& value) const {
hasher->Combine(value.im, value.re);
}
ReprToken ReprTraits<my_complex::MyComplex>::operator()(
const my_complex::MyComplex& value) const {
return ReprToken{absl::StrFormat("%v + %vi", value.re, value.im)};
}
AROLLA_DEFINE_SIMPLE_QTYPE(MY_COMPLEX, my_complex::MyComplex);
} | #include "py/arolla/examples/my_complex/my_complex_type.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/repr.h"
namespace my_complex {
namespace {
using ::testing::Eq;
using ::testing::Ne;
TEST(ComplexTest, GetQType) {
EXPECT_THAT(arolla::GetQType<MyComplex>()->name(), Eq("MY_COMPLEX"));
}
TEST(ComplexTest, Fingerprint) {
MyComplex c{.re = 5.7, .im = 0.7};
auto c_fingerprint = arolla::FingerprintHasher("").Combine(c).Finish();
EXPECT_THAT(arolla::FingerprintHasher("").Combine(c).Finish(),
Eq(c_fingerprint));
EXPECT_THAT(arolla::FingerprintHasher("")
.Combine(MyComplex{.re = 0.7, .im = 5.7})
.Finish(),
Ne(c_fingerprint));
}
TEST(ComplexTest, Repr) {
EXPECT_THAT(arolla::Repr(MyComplex{}), Eq("0 + 0i"));
EXPECT_THAT(arolla::Repr(MyComplex{.re = 5.7, .im = 0.7}), Eq("5.7 + 0.7i"));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/py/arolla/examples/my_complex/my_complex_type.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/py/arolla/examples/my_complex/my_complex_type_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
0cae6dda-bd73-4939-aa3d-96eb9675ec45 | cpp | google/arolla | decision_forest | arolla/decision_forest/decision_forest.cc | arolla/decision_forest/decision_forest_test.cc | #include "arolla/decision_forest/decision_forest.h"
#include <algorithm>
#include <cstddef>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/log/check.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/memory/frame.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/simple_qtype.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
using NodeId = DecisionTreeNodeId;
float DecisionForestNaiveEvaluation(const DecisionForest& forest,
const ConstFramePtr ctx,
absl::Span<const TypedSlot> inputs,
const TreeFilter& filter) {
DCHECK_OK(forest.ValidateInputSlots(inputs));
double res = 0;
for (const auto& tree : forest.GetTrees()) {
if (!filter(tree.tag)) continue;
NodeId node_id = GetTreeRootId(tree);
while (!node_id.is_leaf()) {
DCHECK(node_id.split_node_index() >= 0 &&
node_id.split_node_index() < tree.split_nodes.size());
const auto& node = tree.split_nodes[node_id.split_node_index()];
if (node.condition->EvaluateCondition(ctx, inputs)) {
node_id = node.child_if_true;
} else {
node_id = node.child_if_false;
}
}
DCHECK(node_id.adjustment_index() >= 0 &&
node_id.adjustment_index() < tree.adjustments.size());
res += tree.adjustments[node_id.adjustment_index()] * tree.weight;
}
return res;
}
namespace {
std::string NodeIdToString(DecisionTreeNodeId id) {
if (id.is_leaf()) {
return absl::StrFormat("adjustments[%d]", id.adjustment_index());
} else {
return absl::StrFormat("goto %d", id.split_node_index());
}
}
}
std::string ToDebugString(const DecisionTree& tree) {
std::string res = " DecisionTree {\n";
absl::StrAppendFormat(&res, " tag { step: %d submodel_id: %d }\n",
tree.tag.step, tree.tag.submodel_id);
absl::StrAppendFormat(&res, " weight: %f\n", tree.weight);
absl::StrAppend(&res, " split_nodes {\n");
for (size_t i = 0; i < tree.split_nodes.size(); ++i) {
const SplitNode& node = tree.split_nodes[i];
absl::StrAppendFormat(&res, " %d: IF %s THEN %s ELSE %s\n", i,
node.condition->ToString(),
NodeIdToString(node.child_if_true),
NodeIdToString(node.child_if_false));
}
absl::StrAppend(&res, " }\n");
absl::StrAppend(&res, " adjustments:");
for (float adj : tree.adjustments) {
absl::StrAppendFormat(&res, " %f", adj);
}
absl::StrAppend(&res, "\n }");
return res;
}
std::string ToDebugString(const DecisionForest& forest) {
std::string res = "DecisionForest {\n";
auto required_qtypes = forest.GetRequiredQTypes();
for (const auto& [k, v] : std::map<int, QTypePtr>(required_qtypes.begin(),
required_qtypes.end())) {
absl::StrAppendFormat(&res, " input #%d: %s\n", k, v->name());
}
for (const auto& tree : forest.GetTrees()) {
absl::StrAppend(&res, ToDebugString(tree), "\n");
}
absl::StrAppend(&res, "}");
return res;
}
absl::StatusOr<std::unique_ptr<DecisionForest>> DecisionForest::FromTrees(
std::vector<DecisionTree>&& trees) {
auto forest = absl::WrapUnique(new DecisionForest(std::move(trees)));
RETURN_IF_ERROR(forest->Initialize());
return forest;
}
absl::Status DecisionForest::ValidateInputSlots(
absl::Span<const TypedSlot> input_slots) const {
for (const auto& kv : required_qtypes_) {
if (kv.first >= input_slots.size()) {
return absl::InvalidArgumentError("not enough arguments");
}
if (input_slots[kv.first].GetType() != kv.second) {
return absl::InvalidArgumentError("type mismatch");
}
}
return absl::OkStatus();
}
absl::Status DecisionForest::Initialize() {
FingerprintHasher hasher("::arolla::DecisionForest");
hasher.Combine(trees_.size());
submodel_count_ = 0;
step_count_ = 0;
for (const auto& tree : trees_) {
hasher.CombineSpan(tree.split_nodes)
.CombineSpan(tree.adjustments)
.Combine(tree.weight, tree.tag.step, tree.tag.submodel_id);
if (tree.tag.submodel_id < 0) {
return absl::InvalidArgumentError("submodel_id can not be negative");
}
if (tree.tag.step < 0) {
return absl::InvalidArgumentError("step can not be negative");
}
submodel_count_ = std::max(submodel_count_, tree.tag.submodel_id + 1);
step_count_ = std::max(step_count_, tree.tag.step + 1);
if (tree.split_nodes.size() + 1 != tree.adjustments.size()) {
return absl::InvalidArgumentError("incorrect number of regions");
}
for (const auto& node : tree.split_nodes) {
bool is_correct = true;
DecisionTreeNodeId child = node.child_if_false;
if (child.is_leaf()) {
is_correct &= child.adjustment_index() < tree.adjustments.size();
} else {
is_correct &= child.split_node_index() < tree.split_nodes.size();
}
child = node.child_if_true;
if (child.is_leaf()) {
is_correct &= child.adjustment_index() < tree.adjustments.size();
} else {
is_correct &= child.split_node_index() < tree.split_nodes.size();
}
if (!is_correct)
return absl::InvalidArgumentError("incorrect split node");
for (auto id_type : node.condition->GetInputSignatures()) {
auto it = required_qtypes_.emplace(id_type.id, id_type.type);
if (it.first->second != id_type.type) {
return absl::InvalidArgumentError(
"types mismatch in decision forest");
}
}
}
}
fingerprint_ = std::move(hasher).Finish();
return absl::OkStatus();
}
void FingerprintHasherTraits<SplitNode>::operator()(
FingerprintHasher* hasher, const SplitNode& value) const {
hasher->Combine(value.child_if_false.raw_index(),
value.child_if_true.raw_index());
value.condition->CombineToFingerprintHasher(hasher);
}
void FingerprintHasherTraits<TreeFilter>::operator()(
FingerprintHasher* hasher, const TreeFilter& value) const {
std::vector<int> submodels(value.submodels.begin(), value.submodels.end());
absl::c_sort(submodels);
hasher->Combine(value.step_range_from, value.step_range_to)
.CombineSpan(submodels);
}
void FingerprintHasherTraits<DecisionForestPtr>::operator()(
FingerprintHasher* hasher, const DecisionForestPtr& value) const {
hasher->Combine(value->fingerprint());
}
AROLLA_DEFINE_SIMPLE_QTYPE(DECISION_FOREST, DecisionForestPtr);
AROLLA_DEFINE_SIMPLE_QTYPE(TREE_FILTER, TreeFilter);
} | #include "arolla/decision_forest/decision_forest.h"
#include <cmath>
#include <cstdint>
#include <limits>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/decision_forest/split_conditions/interval_split_condition.h"
#include "arolla/decision_forest/split_conditions/set_of_values_split_condition.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/typed_slot.h"
namespace arolla::testing {
namespace {
using ::absl_testing::StatusIs;
using ::testing::HasSubstr;
using ::testing::Test;
constexpr float inf = std::numeric_limits<float>::infinity();
constexpr auto S = DecisionTreeNodeId::SplitNodeId;
constexpr auto A = DecisionTreeNodeId::AdjustmentId;
TEST(DecisionForestTest, ForestValidation) {
DecisionTree tree1;
tree1.adjustments = {0.5, 1.5, 2.5, 3.5};
tree1.split_nodes = {{S(1), S(2), IntervalSplit(0, 1.5, inf)},
{A(0), A(1), SetOfValuesSplit<int64_t>(1, {5}, false)},
{A(2), A(3), IntervalSplit(0, -inf, 10)}};
DecisionTree tree2;
tree2.adjustments = {1., 2.};
tree2.split_nodes = {{A(0), A(1), IntervalSplit(0, 1.5, inf)}};
DecisionTree tree3;
tree3.adjustments = {1., 2., 3.};
tree3.split_nodes = {{A(0), A(1), IntervalSplit(0, 1.5, inf)}};
DecisionTree tree4;
tree4.adjustments = {1., 2.};
tree4.split_nodes = {
{A(0), A(1), IntervalSplit(1, 1.5, inf)}};
EXPECT_OK(DecisionForest::FromTrees({tree1, tree2}));
EXPECT_THAT(DecisionForest::FromTrees({tree1, tree3}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("incorrect number of regions")));
EXPECT_THAT(DecisionForest::FromTrees({tree1, tree4}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("types mismatch in decision forest")));
}
TEST(DecisionForestTest, Fingerprint) {
DecisionTree tree;
tree.adjustments = {0.5, 1.5, 2.5, 3.5};
tree.split_nodes = {{S(1), S(2), IntervalSplit(0, 1.5, inf)},
{A(0), A(1), SetOfValuesSplit<int64_t>(1, {5}, false)},
{A(2), A(3), IntervalSplit(0, -inf, 10)}};
ASSERT_OK_AND_ASSIGN(auto forest1, DecisionForest::FromTrees({tree}));
ASSERT_OK_AND_ASSIGN(auto forest2, DecisionForest::FromTrees({tree}));
tree.adjustments[1] += 0.1;
ASSERT_OK_AND_ASSIGN(auto forest3, DecisionForest::FromTrees({tree}));
EXPECT_EQ(forest1->fingerprint(), forest2->fingerprint());
EXPECT_NE(forest1->fingerprint(), forest3->fingerprint());
}
TEST(DecisionForestTest, ToDebugString) {
std::vector<DecisionTree> trees(2);
trees[0].adjustments = {0.5, 1.5, 2.5, 3.5};
trees[0].split_nodes = {
{S(1), S(2), IntervalSplit(0, 1.5, inf)},
{A(0), A(1), SetOfValuesSplit<int64_t>(1, {5}, false)},
{A(2), A(3), IntervalSplit(0, -inf, 10)}};
trees[1].adjustments = {5};
trees[1].tag.step = 1;
ASSERT_OK_AND_ASSIGN(auto forest,
DecisionForest::FromTrees(std::move(trees)));
EXPECT_EQ(ToDebugString(*forest),
"DecisionForest {\n"
" input #0: OPTIONAL_FLOAT32\n"
" input #1: OPTIONAL_INT64\n"
" DecisionTree {\n"
" tag { step: 0 submodel_id: 0 }\n"
" weight: 1.000000\n"
" split_nodes {\n"
" 0: IF #0 in range [1.500000 inf] THEN goto 2 ELSE goto 1\n"
" 1: IF #1 in set [5] "
"THEN adjustments[1] ELSE adjustments[0]\n"
" 2: IF #0 in range [-inf 10.000000] "
"THEN adjustments[3] ELSE adjustments[2]\n"
" }\n"
" adjustments: 0.500000 1.500000 2.500000 3.500000\n"
" }\n"
" DecisionTree {\n"
" tag { step: 1 submodel_id: 0 }\n"
" weight: 1.000000\n"
" split_nodes {\n"
" }\n"
" adjustments: 5.000000\n"
" }\n"
"}");
}
TEST(DecisionForestTest, InputsValidation) {
std::vector<DecisionTree> trees(1);
DecisionTree& tree = trees[0];
tree.adjustments = {0.5, 1.5, 2.5, 3.5};
tree.split_nodes = {{S(1), S(2), IntervalSplit(0, 1.5, inf)},
{A(0), A(1), SetOfValuesSplit<int64_t>(1, {5}, false)},
{A(2), A(3), IntervalSplit(0, -inf, 10)}};
FrameLayout::Builder bldr;
auto slot_float = bldr.AddSlot<OptionalValue<float>>();
auto slot_int64 = bldr.AddSlot<OptionalValue<int64_t>>();
FrameLayout layout = std::move(bldr).Build();
ASSERT_OK_AND_ASSIGN(auto forest,
DecisionForest::FromTrees(std::move(trees)));
EXPECT_OK(forest->ValidateInputSlots(
{TypedSlot::FromSlot(slot_float), TypedSlot::FromSlot(slot_int64)}));
EXPECT_THAT(forest->ValidateInputSlots({}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("not enough arguments")));
EXPECT_THAT(
forest->ValidateInputSlots(
{TypedSlot::FromSlot(slot_float), TypedSlot::FromSlot(slot_float)}),
StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("type mismatch")));
}
TEST(DecisionForestTest, TreeFilter) {
DecisionTree::Tag t0{.step = 0, .submodel_id = 0};
DecisionTree::Tag t1{.step = 1, .submodel_id = 1};
DecisionTree::Tag t2{.step = 2, .submodel_id = 0};
TreeFilter f0{};
TreeFilter f1{.submodels = {0}};
TreeFilter f2{.submodels = {1}};
TreeFilter f3{.submodels = {0, 1}};
TreeFilter f4{.step_range_from = 1};
TreeFilter f5{.step_range_to = 2};
TreeFilter f6{.step_range_from = 1, .step_range_to = 2, .submodels = {0}};
EXPECT_EQ((std::vector<bool>{f0(t0), f0(t1), f0(t2)}),
(std::vector<bool>{true, true, true}));
EXPECT_EQ((std::vector<bool>{f1(t0), f1(t1), f1(t2)}),
(std::vector<bool>{true, false, true}));
EXPECT_EQ((std::vector<bool>{f2(t0), f2(t1), f2(t2)}),
(std::vector<bool>{false, true, false}));
EXPECT_EQ((std::vector<bool>{f3(t0), f3(t1), f3(t2)}),
(std::vector<bool>{true, true, true}));
EXPECT_EQ((std::vector<bool>{f4(t0), f4(t1), f4(t2)}),
(std::vector<bool>{false, true, true}));
EXPECT_EQ((std::vector<bool>{f5(t0), f5(t1), f5(t2)}),
(std::vector<bool>{true, true, false}));
EXPECT_EQ((std::vector<bool>{f6(t0), f6(t1), f6(t2)}),
(std::vector<bool>{false, false, false}));
}
TEST(DecisionForestTest, GetTreeRootId) {
DecisionTree tree1;
tree1.adjustments = {1.0};
EXPECT_TRUE(GetTreeRootId(tree1).is_leaf());
DecisionTree tree2;
tree2.split_nodes = {{A(0), A(1), IntervalSplit(0, 1, 2)}};
tree2.adjustments = {1.0, 2.0};
EXPECT_FALSE(GetTreeRootId(tree2).is_leaf());
}
TEST(DecisionForestTest, NaiveEvaluation) {
std::vector<DecisionTree> trees(3);
trees[0].adjustments = {0.5, 1.5, 2.5, 3.5};
trees[0].split_nodes = {
{S(1), S(2), IntervalSplit(0, 1.5, inf)},
{A(0), A(1), SetOfValuesSplit<int64_t>(1, {5}, false)},
{A(2), A(3), IntervalSplit(0, -inf, 10)}};
trees[1].adjustments = {5.0};
trees[1].tag = {1, 1};
trees[2].adjustments = {2.0};
trees[2].tag = {2, 0};
ASSERT_OK_AND_ASSIGN(auto forest,
DecisionForest::FromTrees(std::move(trees)));
EXPECT_EQ(forest->step_count(), 3);
EXPECT_EQ(forest->submodel_count(), 2);
FrameLayout::Builder bldr;
auto input1_slot = bldr.AddSlot<OptionalValue<float>>();
auto input2_slot = bldr.AddSlot<OptionalValue<int64_t>>();
std::vector<TypedSlot> slots = {TypedSlot::FromSlot(input1_slot),
TypedSlot::FromSlot(input2_slot)};
FrameLayout layout = std::move(bldr).Build();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
frame.Set(input1_slot, 1.0f);
frame.Set(input2_slot, 5);
EXPECT_EQ(DecisionForestNaiveEvaluation(*forest, frame, slots), 8.5);
frame.Set(input1_slot, NAN);
frame.Set(input2_slot, {});
EXPECT_EQ(DecisionForestNaiveEvaluation(*forest, frame, slots), 7.5);
frame.Set(input1_slot, 2.0f);
frame.Set(input2_slot, 4);
EXPECT_EQ(DecisionForestNaiveEvaluation(*forest, frame, slots), 10.5);
EXPECT_EQ(DecisionForestNaiveEvaluation(*forest, frame, slots,
TreeFilter{.submodels = {0}}),
5.5);
EXPECT_EQ(DecisionForestNaiveEvaluation(*forest, frame, slots,
TreeFilter{.submodels = {1}}),
5.0);
EXPECT_EQ(DecisionForestNaiveEvaluation(*forest, frame, slots,
TreeFilter{.submodels = {0, 1}}),
10.5);
EXPECT_EQ(DecisionForestNaiveEvaluation(*forest, frame, slots,
TreeFilter{.step_range_from = 1}),
7.0);
EXPECT_EQ(DecisionForestNaiveEvaluation(*forest, frame, slots,
TreeFilter{.step_range_to = 2}),
8.5);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/decision_forest/decision_forest.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/decision_forest/decision_forest_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
0389467c-ba24-47c6-a136-10cfbb920e4a | cpp | google/arolla | batched_forest_evaluator | arolla/decision_forest/batched_evaluation/batched_forest_evaluator.cc | arolla/decision_forest/batched_evaluation/batched_forest_evaluator_test.cc | #include "arolla/decision_forest/batched_evaluation/batched_forest_evaluator.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/base/no_destructor.h"
#include "absl/log/check.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/array/array.h"
#include "arolla/array/qtype/types.h"
#include "arolla/decision_forest/decision_forest.h"
#include "arolla/decision_forest/pointwise_evaluation/forest_evaluator.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/buffer.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/raw_buffer_factory.h"
#include "arolla/qtype/array_like/array_like_qtype.h"
#include "arolla/qtype/array_like/frame_iter.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/threading.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
absl::StatusOr<TypedValue> AddFullFloatArrays(TypedRef a, TypedRef b) {
if (a.GetType() == GetDenseArrayQType<float>() &&
b.GetType() == GetDenseArrayQType<float>()) {
const auto& va = a.UnsafeAs<DenseArray<float>>();
const auto& vb = b.UnsafeAs<DenseArray<float>>();
DCHECK_EQ(va.size(), vb.size());
DCHECK(va.IsFull() && vb.IsFull());
Buffer<float>::Builder bldr(va.size());
auto sa = va.values.span();
auto sb = vb.values.span();
auto sr = bldr.GetMutableSpan();
for (int64_t i = 0; i < va.size(); ++i) {
sr[i] = sa[i] + sb[i];
}
return TypedValue::FromValue(DenseArray<float>{std::move(bldr).Build()});
} else if (a.GetType() == GetArrayQType<float>() &&
b.GetType() == GetArrayQType<float>()) {
const auto& va = a.UnsafeAs<Array<float>>();
const auto& vb = b.UnsafeAs<Array<float>>();
DCHECK_EQ(va.size(), vb.size());
DCHECK(va.IsFullForm() && vb.IsFullForm());
Buffer<float>::Builder bldr(va.size());
auto sa = va.dense_data().values.span();
auto sb = vb.dense_data().values.span();
auto sr = bldr.GetMutableSpan();
for (int64_t i = 0; i < va.size(); ++i) {
sr[i] = sa[i] + sb[i];
}
return TypedValue::FromValue(Array<float>{std::move(bldr).Build()});
} else {
return absl::InternalError("Invalid type in BatchedForestEvaluator/Add");
}
}
absl::StatusOr<std::vector<ForestEvaluator>> CreatePointwiseEvaluators(
const BatchedForestEvaluator::CompilationParams& params,
const DecisionForest& decision_forest, const std::vector<TypedSlot>& inputs,
const std::vector<ForestEvaluator::Output>& outputs) {
int64_t split_count = 0;
for (const auto& tree : decision_forest.GetTrees()) {
split_count += tree.split_nodes.size();
}
int64_t evaluator_count = std::max<int64_t>(
1, (split_count + params.optimal_splits_per_evaluator - 1) /
params.optimal_splits_per_evaluator);
std::vector<ForestEvaluator> evaluators;
evaluators.reserve(evaluator_count);
if (evaluator_count == 1) {
ASSIGN_OR_RETURN(auto evaluator, ForestEvaluator::Compile(decision_forest,
inputs, outputs));
evaluators.push_back(std::move(evaluator));
return evaluators;
}
int64_t splits_per_evaluator =
(split_count + evaluator_count - 1) / evaluator_count;
int64_t estimated_trees_per_evaluator =
(decision_forest.GetTrees().size() + evaluator_count - 1) /
evaluator_count;
std::vector<DecisionTree> trees;
trees.reserve(estimated_trees_per_evaluator);
int64_t current_split_count = 0;
for (const auto& tree : decision_forest.GetTrees()) {
trees.push_back(tree);
current_split_count += tree.split_nodes.size();
if (current_split_count >= splits_per_evaluator) {
ASSIGN_OR_RETURN(auto partial_forest,
DecisionForest::FromTrees(std::move(trees)));
ASSIGN_OR_RETURN(auto evaluator, ForestEvaluator::Compile(
*partial_forest, inputs, outputs));
evaluators.push_back(std::move(evaluator));
trees.clear();
trees.reserve(estimated_trees_per_evaluator);
current_split_count = 0;
}
}
if (!trees.empty()) {
ASSIGN_OR_RETURN(auto partial_forest,
DecisionForest::FromTrees(std::move(trees)));
ASSIGN_OR_RETURN(auto evaluator, ForestEvaluator::Compile(*partial_forest,
inputs, outputs));
evaluators.push_back(std::move(evaluator));
}
return evaluators;
}
}
absl::NoDestructor<std::unique_ptr<ThreadingInterface>>
BatchedForestEvaluator::threading_;
int64_t BatchedForestEvaluator::min_rows_per_thread_;
absl::StatusOr<std::unique_ptr<BatchedForestEvaluator>>
BatchedForestEvaluator::Compile(const DecisionForest& decision_forest,
absl::Span<const TreeFilter> groups,
const CompilationParams& params) {
FrameLayout::Builder bldr;
std::vector<SlotMapping> input_slots_mapping;
TypedSlot placeholder =
TypedSlot::FromSlot(FrameLayout::Slot<float>::UnsafeUninitializedSlot());
std::vector<TypedSlot> input_pointwise_slots;
for (const auto& kv : decision_forest.GetRequiredQTypes()) {
TypedSlot pointwise_slot = AddSlot(kv.second, &bldr);
while (input_pointwise_slots.size() <= kv.first) {
input_pointwise_slots.push_back(placeholder);
}
input_pointwise_slots[kv.first] = pointwise_slot;
input_slots_mapping.push_back({kv.first, pointwise_slot});
}
std::vector<ForestEvaluator::Output> pointwise_outputs;
std::vector<TypedSlot> output_pointwise_slots;
pointwise_outputs.reserve(groups.size());
output_pointwise_slots.reserve(groups.size());
for (const TreeFilter& filter : groups) {
auto slot = bldr.AddSlot<float>();
pointwise_outputs.push_back({filter, slot});
output_pointwise_slots.push_back(TypedSlot::FromSlot(slot));
}
auto pointwise_layout = std::move(bldr).Build();
ASSIGN_OR_RETURN(
std::vector<ForestEvaluator> pointwise_evaluators,
CreatePointwiseEvaluators(params, decision_forest, input_pointwise_slots,
pointwise_outputs));
return absl::WrapUnique(new BatchedForestEvaluator(
std::move(pointwise_layout), std::move(input_slots_mapping),
std::move(output_pointwise_slots), std::move(pointwise_evaluators)));
}
absl::Status BatchedForestEvaluator::GetInputsFromSlots(
absl::Span<const TypedSlot> input_slots, ConstFramePtr frame,
std::vector<TypedRef>* input_arrays) const {
if (input_slots.size() < input_count_) {
return absl::InvalidArgumentError(
absl::StrFormat("not enough inputs: at least %d expected, %d found",
input_count_, input_slots.size()));
}
for (auto m : input_mapping_) {
input_arrays->push_back(
TypedRef::FromSlot(input_slots[m.input_index], frame));
}
return absl::OkStatus();
}
absl::Status BatchedForestEvaluator::EvalBatch(
absl::Span<const TypedSlot> input_slots,
absl::Span<const TypedSlot> output_slots, FramePtr frame,
RawBufferFactory* buffer_factory, std::optional<int64_t> row_count) const {
std::vector<TypedRef> input_arrays;
input_arrays.reserve(input_mapping_.size());
RETURN_IF_ERROR(GetInputsFromSlots(input_slots, frame, &input_arrays));
if (!row_count.has_value()) {
if (!input_arrays.empty()) {
ASSIGN_OR_RETURN(row_count, GetArraySize(input_arrays[0]));
} else if (!input_slots.empty()) {
ASSIGN_OR_RETURN(row_count,
GetArraySize(TypedRef::FromSlot(input_slots[0], frame)));
}
}
int thread_count = 1;
auto run_evaluator = [&](const ForestEvaluator& eval) -> absl::Status {
ASSIGN_OR_RETURN(
auto frame_iterator,
FrameIterator::Create(
input_arrays, {input_pointwise_slots_.data(), input_arrays.size()},
output_slots, output_pointwise_slots_, &pointwise_layout_,
FrameIterator::Options{.row_count = row_count,
.frame_buffer_count = 64 * thread_count,
.buffer_factory = buffer_factory}));
if (thread_count > 1) {
frame_iterator.ForEachFrame([&eval](FramePtr f) { eval.Eval(f, f); },
**threading_, thread_count);
} else {
frame_iterator.ForEachFrame([&eval](FramePtr f) { eval.Eval(f, f); });
}
return frame_iterator.StoreOutput(frame);
};
if (pointwise_evaluators_.size() == 1) {
return run_evaluator(pointwise_evaluators_.front());
} else {
std::vector<TypedValue> res_sum;
res_sum.reserve(output_slots.size());
RETURN_IF_ERROR(run_evaluator(pointwise_evaluators_.front()));
for (const auto& s : output_slots) {
res_sum.push_back(TypedValue::FromSlot(s, frame));
}
for (int eval_id = 1; eval_id < pointwise_evaluators_.size() - 1;
++eval_id) {
RETURN_IF_ERROR(run_evaluator(pointwise_evaluators_[eval_id]));
for (int i = 0; i < output_slots.size(); ++i) {
ASSIGN_OR_RETURN(
res_sum[i],
AddFullFloatArrays(res_sum[i].AsRef(),
TypedRef::FromSlot(output_slots[i], frame)));
}
}
RETURN_IF_ERROR(run_evaluator(pointwise_evaluators_.back()));
for (int i = 0; i < output_slots.size(); ++i) {
ASSIGN_OR_RETURN(
TypedValue full_sum,
AddFullFloatArrays(res_sum[i].AsRef(),
TypedRef::FromSlot(output_slots[i], frame)));
RETURN_IF_ERROR(full_sum.CopyToSlot(output_slots[i], frame));
}
return absl::OkStatus();
}
}
} | #include "arolla/decision_forest/batched_evaluation/batched_forest_evaluator.h"
#include <cmath>
#include <cstdint>
#include <limits>
#include <memory>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/random/distributions.h"
#include "absl/random/random.h"
#include "absl/status/statusor.h"
#include "arolla/array/array.h"
#include "arolla/array/qtype/types.h"
#include "arolla/decision_forest/decision_forest.h"
#include "arolla/decision_forest/split_conditions/interval_split_condition.h"
#include "arolla/decision_forest/split_conditions/set_of_values_split_condition.h"
#include "arolla/decision_forest/testing/test_util.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/threading.h"
namespace arolla {
namespace {
absl::StatusOr<DecisionForestPtr> CreateTestForest() {
constexpr float kInf = std::numeric_limits<float>::infinity();
constexpr auto S = DecisionTreeNodeId::SplitNodeId;
constexpr auto A = DecisionTreeNodeId::AdjustmentId;
std::vector<DecisionTree> trees(2);
trees[0].tag = {.submodel_id = 0};
trees[0].adjustments = {0.5, 1.5, 2.5, 3.5};
trees[0].split_nodes = {
{S(1), S(2), IntervalSplit(0, 1.5, kInf)},
{A(0), A(2), SetOfValuesSplit<int64_t>(1, {1, 2}, false)},
{A(1), A(3), IntervalSplit(0, -kInf, 10)}};
trees[1].tag = {.submodel_id = 1};
trees[1].adjustments = {-1.0, 1.0};
trees[1].split_nodes = {{A(0), A(1), IntervalSplit(0, 1, 5)}};
return DecisionForest::FromTrees(std::move(trees));
}
TEST(BatchedForestEvaluator, EvalBatch) {
ASSERT_OK_AND_ASSIGN(auto forest, CreateTestForest());
std::vector<TreeFilter> groups{{.submodels = {0}}, {.submodels = {1}}};
ASSERT_OK_AND_ASSIGN(auto eval,
BatchedForestEvaluator::Compile(*forest, groups));
FrameLayout::Builder bldr;
auto in1_slot = bldr.AddSlot<DenseArray<float>>();
auto in2_slot = bldr.AddSlot<DenseArray<int64_t>>();
auto out1_slot = bldr.AddSlot<DenseArray<float>>();
auto out2_slot = bldr.AddSlot<DenseArray<float>>();
FrameLayout layout = std::move(bldr).Build();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
frame.Set(in1_slot,
CreateDenseArray<float>({0, 0, 1.2, 1.6, 7.0, 13.5, NAN}));
frame.Set(in2_slot, CreateDenseArray<int64_t>({3, 1, 1, 1, 1, 1, {}}));
{
ASSERT_OK(eval->EvalBatch(
{TypedSlot::FromSlot(in1_slot), TypedSlot::FromSlot(in2_slot)},
{TypedSlot::FromSlot(out1_slot), TypedSlot::FromSlot(out2_slot)},
frame));
EXPECT_THAT(frame.Get(out1_slot),
::testing::ElementsAre(0.5, 2.5, 2.5, 3.5, 3.5, 1.5, 0.5));
EXPECT_THAT(frame.Get(out2_slot),
::testing::ElementsAre(-1, -1, 1, 1, -1, -1, -1));
}
frame.Set(out1_slot, DenseArray<float>());
frame.Set(out2_slot, DenseArray<float>());
{
BatchedForestEvaluator::SetThreading(std::make_unique<StdThreading>(2));
ASSERT_OK(eval->EvalBatch(
{TypedSlot::FromSlot(in1_slot), TypedSlot::FromSlot(in2_slot)},
{TypedSlot::FromSlot(out1_slot), TypedSlot::FromSlot(out2_slot)},
frame));
EXPECT_THAT(frame.Get(out1_slot),
::testing::ElementsAre(0.5, 2.5, 2.5, 3.5, 3.5, 1.5, 0.5));
EXPECT_THAT(frame.Get(out2_slot),
::testing::ElementsAre(-1, -1, 1, 1, -1, -1, -1));
BatchedForestEvaluator::SetThreading(nullptr);
}
frame.Set(out1_slot, DenseArray<float>());
frame.Set(out2_slot, DenseArray<float>());
{
BatchedForestEvaluator::SetThreading(std::make_unique<StdThreading>(2),
1);
ASSERT_OK(eval->EvalBatch(
{TypedSlot::FromSlot(in1_slot), TypedSlot::FromSlot(in2_slot)},
{TypedSlot::FromSlot(out1_slot), TypedSlot::FromSlot(out2_slot)},
frame));
EXPECT_THAT(frame.Get(out1_slot),
::testing::ElementsAre(0.5, 2.5, 2.5, 3.5, 3.5, 1.5, 0.5));
EXPECT_THAT(frame.Get(out2_slot),
::testing::ElementsAre(-1, -1, 1, 1, -1, -1, -1));
BatchedForestEvaluator::SetThreading(nullptr);
}
}
TEST(BatchedForestEvaluator, UnusedInputs) {
constexpr auto A = DecisionTreeNodeId::AdjustmentId;
DecisionTree tree;
tree.adjustments = {-1, 1};
tree.split_nodes = {{A(0), A(1), IntervalSplit(2, 0, 1)}};
ASSERT_OK_AND_ASSIGN(auto forest, DecisionForest::FromTrees({tree}));
ASSERT_OK_AND_ASSIGN(auto eval, BatchedForestEvaluator::Compile(*forest));
FrameLayout::Builder bldr;
auto unused1_slot = bldr.AddSlot<DenseArray<int64_t>>();
auto unused2_slot = bldr.AddSlot<DenseArray<int64_t>>();
auto in_slot = bldr.AddSlot<DenseArray<float>>();
auto out_slot = bldr.AddSlot<DenseArray<float>>();
FrameLayout layout = std::move(bldr).Build();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
frame.Set(in_slot, CreateDenseArray<float>({-1, 0.5, 2}));
ASSERT_OK(eval->EvalBatch(
{TypedSlot::FromSlot(unused1_slot), TypedSlot::FromSlot(unused2_slot),
TypedSlot::FromSlot(in_slot)},
{TypedSlot::FromSlot(out_slot)}, frame));
EXPECT_THAT(frame.Get(out_slot), ::testing::ElementsAre(-1, 1, -1));
}
TEST(BatchedForestEvaluator, AllInputUnused) {
std::vector<DecisionTree> trees(1);
trees[0].adjustments = {1.5};
ASSERT_OK_AND_ASSIGN(DecisionForestPtr forest,
DecisionForest::FromTrees(std::move(trees)));
std::vector<TreeFilter> groups{{.submodels = {0}}};
ASSERT_OK_AND_ASSIGN(auto eval,
BatchedForestEvaluator::Compile(*forest, groups));
FrameLayout::Builder bldr;
auto in1_slot = bldr.AddSlot<DenseArray<float>>();
auto in2_slot = bldr.AddSlot<DenseArray<int64_t>>();
auto out_slot = bldr.AddSlot<DenseArray<float>>();
FrameLayout layout = std::move(bldr).Build();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
frame.Set(in1_slot,
CreateDenseArray<float>({0, 0, 1.2, 1.6, 7.0, 13.5, NAN}));
frame.Set(in2_slot, CreateDenseArray<int64_t>({3, 1, 1, 1, 1, 1, {}}));
ASSERT_OK(eval->EvalBatch(
{TypedSlot::FromSlot(in1_slot), TypedSlot::FromSlot(in2_slot)},
{TypedSlot::FromSlot(out_slot)}, frame));
EXPECT_THAT(frame.Get(out_slot),
::testing::ElementsAre(1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5));
}
TEST(BatchedForestEvaluator, SplitCountPerEvaluator) {
constexpr int64_t min_num_splits = 10;
constexpr int64_t max_num_splits = 30;
constexpr int64_t num_trees = 100;
constexpr int64_t batch_size = 10;
absl::BitGen rnd;
constexpr int64_t min_total_split_count = num_trees * min_num_splits;
int64_t split_count_per_evaluator = absl::Uniform<int64_t>(
rnd, min_total_split_count / 5, min_total_split_count * 4 / 5);
auto forest =
CreateRandomFloatForest(&rnd, 10, true,
min_num_splits, max_num_splits, num_trees);
ASSERT_OK_AND_ASSIGN(auto evaluator,
BatchedForestEvaluator::Compile(*forest));
ASSERT_OK_AND_ASSIGN(
auto subdivided_evaluator,
BatchedForestEvaluator::Compile(*forest, {TreeFilter()},
{split_count_per_evaluator}));
std::vector<TypedSlot> slots;
FrameLayout::Builder layout_builder;
ASSERT_OK(CreateArraySlotsForForest(*forest, &layout_builder, &slots));
auto dense_array_output_slot = layout_builder.AddSlot<DenseArray<float>>();
auto array_output_slot = layout_builder.AddSlot<Array<float>>();
FrameLayout layout = std::move(layout_builder).Build();
MemoryAllocation ctx(&layout);
FramePtr frame = ctx.frame();
for (auto slot : slots) {
ASSERT_OK(FillArrayWithRandomValues(batch_size, slot, frame, &rnd));
}
ASSERT_OK(evaluator->EvalBatch(slots,
{TypedSlot::FromSlot(dense_array_output_slot)},
frame, nullptr, batch_size));
ASSERT_OK(evaluator->EvalBatch(slots,
{TypedSlot::FromSlot(array_output_slot)},
frame, nullptr, batch_size));
DenseArray<float> dense_array1 = frame.Get(dense_array_output_slot);
Array<float> array1 = frame.Get(array_output_slot);
frame.Set(dense_array_output_slot, DenseArray<float>());
frame.Set(array_output_slot, Array<float>());
ASSERT_OK(subdivided_evaluator->EvalBatch(
slots, {TypedSlot::FromSlot(dense_array_output_slot)}, frame, nullptr,
batch_size));
ASSERT_OK(subdivided_evaluator->EvalBatch(
slots, {TypedSlot::FromSlot(array_output_slot)}, frame, nullptr,
batch_size));
DenseArray<float> dense_array2 = frame.Get(dense_array_output_slot);
Array<float> array2 = frame.Get(array_output_slot);
ASSERT_EQ(dense_array1.size(), batch_size);
ASSERT_EQ(array1.size(), batch_size);
ASSERT_EQ(dense_array2.size(), batch_size);
ASSERT_EQ(array2.size(), batch_size);
for (int64_t i = 0; i < batch_size; ++i) {
bool present = array1[i].present;
EXPECT_EQ(array2[i].present, present);
EXPECT_EQ(dense_array1[i].present, present);
EXPECT_EQ(dense_array2[i].present, present);
if (present) {
float value = array1[i].value;
EXPECT_FLOAT_EQ(array2[i].value, value);
EXPECT_FLOAT_EQ(dense_array1[i].value, value);
EXPECT_FLOAT_EQ(dense_array2[i].value, value);
}
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/decision_forest/batched_evaluation/batched_forest_evaluator.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/decision_forest/batched_evaluation/batched_forest_evaluator_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
780c0485-dad1-448e-b5ed-a3e5f8d5dfb2 | cpp | google/arolla | forest_evaluator | arolla/decision_forest/pointwise_evaluation/forest_evaluator.cc | arolla/decision_forest/pointwise_evaluation/forest_evaluator_test.cc | #include "arolla/decision_forest/pointwise_evaluation/forest_evaluator.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <map>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/decision_forest/decision_forest.h"
#include "arolla/decision_forest/pointwise_evaluation/bitmask_builder.h"
#include "arolla/decision_forest/pointwise_evaluation/bound_split_conditions.h"
#include "arolla/decision_forest/pointwise_evaluation/oblivious.h"
#include "arolla/decision_forest/pointwise_evaluation/single_input_eval.h"
#include "arolla/decision_forest/split_condition.h"
#include "arolla/decision_forest/split_conditions/interval_split_condition.h"
#include "arolla/memory/frame.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/fast_dynamic_downcast_final.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
bool HasOnlyIntervalSplitConditions(const DecisionTree& tree) {
for (const auto& split_node : tree.split_nodes) {
if (!fast_dynamic_downcast_final<const IntervalSplitCondition*>(
split_node.condition.get()))
return false;
}
return true;
}
absl::StatusOr<std::vector<int>> SplitTreesByGroups(
absl::Span<const DecisionTree> trees,
absl::Span<const ForestEvaluator::Output> outputs) {
if (outputs.empty()) {
return absl::InvalidArgumentError("at least one output is expected");
}
std::vector<int> tree2group(trees.size(), -1);
for (int group_id = 0; group_id < outputs.size(); ++group_id) {
for (int tree_id = 0; tree_id < trees.size(); ++tree_id) {
if (!outputs[group_id].filter(trees[tree_id].tag)) continue;
if (tree2group[tree_id] != -1) {
return absl::InvalidArgumentError(absl::StrFormat(
"intersection of groups for outputs #%d and #%d is not empty",
tree2group[tree_id], group_id));
}
tree2group[tree_id] = group_id;
}
}
return tree2group;
}
std::optional<SplitCondition::InputSignature> GetSingleInputSignature(
const DecisionTree& tree) {
std::optional<SplitCondition::InputSignature> input_signature;
for (const auto& node : tree.split_nodes) {
auto signatures = node.condition->GetInputSignatures();
if (signatures.size() != 1 ||
(input_signature && input_signature->id != signatures[0].id)) {
return std::nullopt;
}
input_signature = signatures[0];
}
return input_signature;
}
}
class ForestEvaluator::RegularPredictorsBuilder {
public:
RegularPredictorsBuilder(int group_count,
absl::Span<const TypedSlot> input_slots)
: group_count_(group_count),
input_slots_(input_slots.begin(), input_slots.end()),
universal_compilers_(group_count),
interval_splits_compilers_(group_count) {}
absl::Status AddTree(const DecisionTree& tree, int group_id) {
if (HasOnlyIntervalSplitConditions(tree)) {
return AddTreeToRegularForestCompiler(
tree,
[this](const std::shared_ptr<const SplitCondition>& cond) {
auto interval_cond =
std::static_pointer_cast<const IntervalSplitCondition>(cond);
return IntervalBoundCondition::Create(interval_cond, input_slots_);
},
&interval_splits_compilers_[group_id]);
} else {
return AddTreeToRegularForestCompiler(
tree,
[this](const std::shared_ptr<const SplitCondition>& cond) {
return UniversalBoundCondition::Create(cond, input_slots_);
},
&universal_compilers_[group_id]);
}
}
absl::StatusOr<RegularPredictorsList> Build() && {
RegularPredictorsList res;
res.reserve(group_count_);
for (int i = 0; i < group_count_; ++i) {
ASSIGN_OR_RETURN(auto universal_predictor,
universal_compilers_[i].Compile());
ASSIGN_OR_RETURN(auto interval_splits_predictor,
interval_splits_compilers_[i].Compile());
res.push_back({std::move(universal_predictor),
std::move(interval_splits_predictor)});
}
return res;
}
private:
template <typename ForestCompiler, typename CreateConditionFunc>
absl::Status AddTreeToRegularForestCompiler(const DecisionTree& tree,
CreateConditionFunc create_cond,
ForestCompiler* forest_compiler) {
auto tree_compiler = forest_compiler->AddTree(
tree.split_nodes.size() + tree.adjustments.size(),
tree.tag.submodel_id);
for (int64_t id = 0; id < tree.split_nodes.size(); ++id) {
const auto& split_node = tree.split_nodes[id];
auto child_if_false = split_node.child_if_false.is_leaf()
? split_node.child_if_false.adjustment_index() +
tree.split_nodes.size()
: split_node.child_if_false.split_node_index();
auto child_if_true = split_node.child_if_true.is_leaf()
? split_node.child_if_true.adjustment_index() +
tree.split_nodes.size()
: split_node.child_if_true.split_node_index();
ASSIGN_OR_RETURN(auto cond, create_cond(split_node.condition));
RETURN_IF_ERROR(
tree_compiler.SetNode(id, child_if_true, child_if_false, cond));
}
for (int64_t i = 0; i < tree.adjustments.size(); ++i) {
RETURN_IF_ERROR(tree_compiler.SetLeaf(i + tree.split_nodes.size(),
tree.adjustments[i] * tree.weight));
}
return absl::OkStatus();
}
int group_count_;
std::vector<TypedSlot> input_slots_;
std::vector<PredictorCompiler<UniversalBoundCondition>> universal_compilers_;
std::vector<PredictorCompiler<IntervalBoundCondition>>
interval_splits_compilers_;
};
absl::StatusOr<ForestEvaluator> ForestEvaluator::Compile(
const DecisionForest& decision_forest,
absl::Span<const TypedSlot> input_slots, absl::Span<const Output> outputs,
CompilationParams params) {
ASSIGN_OR_RETURN(auto tree2group,
SplitTreesByGroups(decision_forest.GetTrees(), outputs));
std::vector<FrameLayout::Slot<float>> output_slots;
output_slots.reserve(outputs.size());
for (const auto& output : outputs) {
output_slots.push_back(output.slot);
}
RegularPredictorsBuilder regular_builder(outputs.size(), input_slots);
BitmaskBuilder bitmask_builder(input_slots, output_slots);
SingleInputBuilder single_input_builder(input_slots, output_slots);
std::vector<std::map<int, double>> consts(outputs.size());
if (tree2group.size() != decision_forest.GetTrees().size()) {
return absl::InternalError("size of tree2group doesn't match trees");
}
for (size_t i = 0; i < decision_forest.GetTrees().size(); ++i) {
if (tree2group[i] == -1) {
continue;
}
if (tree2group[i] < 0 || tree2group[i] >= outputs.size()) {
return absl::InternalError("invalid tree2group mapping");
}
const DecisionTree& tree = decision_forest.GetTrees()[i];
if (params.enable_regular_eval && tree.split_nodes.empty()) {
consts[tree2group[i]][tree.tag.submodel_id] +=
tree.adjustments[0] * tree.weight;
continue;
}
if (params.enable_single_input_eval) {
if (std::optional<SplitCondition::InputSignature> input_signature =
GetSingleInputSignature(tree)) {
if (single_input_builder.IsInputTypeSupported(input_signature->type)) {
RETURN_IF_ERROR(single_input_builder.AddTree(tree, *input_signature,
tree2group[i]));
continue;
}
}
}
if (params.enable_bitmask_eval &&
std::all_of(tree.split_nodes.begin(), tree.split_nodes.end(),
BitmaskBuilder::IsSplitNodeSupported)) {
auto oblivious = ToObliviousTree(tree);
if (oblivious.has_value() && (oblivious->layer_splits.size() <=
BitmaskBuilder::kMaxRegionsForBitmask)) {
bitmask_builder.AddObliviousTree(*std::move(oblivious), tree2group[i]);
continue;
}
if (tree.adjustments.size() <= BitmaskBuilder::kMaxRegionsForBitmask) {
bitmask_builder.AddSmallTree(tree, tree2group[i]);
continue;
}
}
if (params.enable_regular_eval) {
RETURN_IF_ERROR(regular_builder.AddTree(tree, tree2group[i]));
} else {
return absl::InvalidArgumentError(
"No suitable evaluator. Use enable_regular_eval=true.");
}
}
for (int group_id = 0; group_id < consts.size(); ++group_id) {
for (const auto& [submodel_id, value] : consts[group_id]) {
DecisionTree tree;
tree.adjustments = {static_cast<float>(value)};
tree.tag.submodel_id = submodel_id;
RETURN_IF_ERROR(regular_builder.AddTree(tree, group_id));
}
}
ASSIGN_OR_RETURN(auto regular_predictors, std::move(regular_builder).Build());
ASSIGN_OR_RETURN(auto bitmask_predictor, std::move(bitmask_builder).Build());
ASSIGN_OR_RETURN(auto single_input_predictor,
std::move(single_input_builder).Build());
return ForestEvaluator(std::move(output_slots), std::move(regular_predictors),
std::move(bitmask_predictor),
std::move(single_input_predictor));
}
void ForestEvaluator::Eval(const ConstFramePtr input_ctx,
FramePtr output_ctx) const {
for (size_t i = 0; i < output_slots_.size(); ++i) {
*output_ctx.GetMutable(output_slots_[i]) =
regular_predictors_[i].Predict(input_ctx);
}
if (bitmask_predictor_) {
bitmask_predictor_->IncrementalEval(input_ctx, output_ctx);
}
single_input_predictor_.IncrementalEval(input_ctx, output_ctx);
}
} | #include "arolla/decision_forest/pointwise_evaluation/forest_evaluator.h"
#include <cmath>
#include <cstdint>
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/random/distributions.h"
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/decision_forest/decision_forest.h"
#include "arolla/decision_forest/split_condition.h"
#include "arolla/decision_forest/split_conditions/interval_split_condition.h"
#include "arolla/decision_forest/split_conditions/set_of_values_split_condition.h"
#include "arolla/decision_forest/testing/test_util.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/bytes.h"
namespace arolla {
namespace {
using ::absl_testing::StatusIs;
using ::testing::HasSubstr;
constexpr float kInf = std::numeric_limits<float>::infinity();
constexpr auto S = DecisionTreeNodeId::SplitNodeId;
constexpr auto A = DecisionTreeNodeId::AdjustmentId;
const ForestEvaluator::CompilationParams kDefaultEval{
.enable_regular_eval = true,
.enable_bitmask_eval = true,
.enable_single_input_eval = true};
const ForestEvaluator::CompilationParams kRegularEval{
.enable_regular_eval = true,
.enable_bitmask_eval = false,
.enable_single_input_eval = false};
const ForestEvaluator::CompilationParams kBitmaskEval{
.enable_regular_eval = false,
.enable_bitmask_eval = true,
.enable_single_input_eval = false};
const ForestEvaluator::CompilationParams kSingleInputEval{
.enable_regular_eval = false,
.enable_bitmask_eval = false,
.enable_single_input_eval = true};
void FillArgs(FramePtr ctx, int row_id, absl::Span<const TypedSlot> slots) {}
template <typename T, typename... Tn>
void FillArgs(FramePtr frame, int row_id, absl::Span<const TypedSlot> slots,
const std::vector<OptionalValue<T>>& inputs1,
const std::vector<OptionalValue<Tn>>&... inputsN) {
auto slot = slots[0].ToSlot<OptionalValue<T>>().value();
frame.Set(slot, inputs1[row_id]);
FillArgs(frame, row_id, slots.subspan(1), inputsN...);
}
class SourceLocation {
public:
SourceLocation(int line, const char* filename)
: line_(line), file_name_(filename) {}
const char* file_name() { return file_name_.c_str(); }
constexpr int line() const { return line_; }
static SourceLocation current(int line = __builtin_LINE(),
const char* file_name = __builtin_FILE()) {
return SourceLocation(line, file_name);
}
private:
int line_ = 0;
std::string file_name_;
};
std::string ErrFormat(SourceLocation loc,
ForestEvaluator::CompilationParams params,
const std::string& msg, int row_id) {
return absl::StrFormat(
"%s Test at %s:%d, row_id=%d, params = "
"{enable_regular_eval=%d, enable_bitmask_eval=%d, "
"enable_single_input_eval=%d}",
msg, loc.file_name(), loc.line(), row_id, params.enable_regular_eval,
params.enable_bitmask_eval, params.enable_single_input_eval);
}
template <typename... T>
void TestCases(SourceLocation loc, const DecisionForest& forest,
absl::Span<const TreeFilter> groups,
ForestEvaluator::CompilationParams params,
absl::Span<const std::vector<float>> expected_outputs,
const std::vector<OptionalValue<T>>&... inputs) {
ASSERT_TRUE(((expected_outputs.size() == inputs.size()) && ...))
<< absl::StrCat(
"Input and output vector sizes are different: (",
absl::StrJoin({expected_outputs.size(), inputs.size()...}, ", "),
")");
std::vector<TypedSlot> input_slots;
std::vector<ForestEvaluator::Output> outputs;
FrameLayout::Builder layout_builder;
CreateSlotsForForest(forest, &layout_builder, &input_slots);
outputs.reserve(groups.size());
for (const TreeFilter& filter : groups) {
outputs.push_back({filter, layout_builder.AddSlot<float>()});
}
FrameLayout layout = std::move(layout_builder).Build();
ASSERT_OK_AND_ASSIGN(
auto evaluator,
ForestEvaluator::Compile(forest, input_slots, outputs, params));
MemoryAllocation alloc(&layout);
auto frame = alloc.frame();
for (int i = 0; i < expected_outputs.size(); ++i) {
FillArgs(frame, i, input_slots, inputs...);
evaluator.Eval(frame, frame);
for (int j = 0; j < outputs.size(); ++j) {
EXPECT_EQ(frame.Get(outputs[j].slot), expected_outputs[i][j])
<< ErrFormat(loc, params, "Incorrect output.", i);
}
}
}
void RandomTestAgainstReferenceImplementation(
SourceLocation loc, std::vector<DecisionTree> trees,
const std::vector<ForestEvaluator::CompilationParams>& params,
absl::BitGen* rnd) {
for (int i = 0; i < trees.size(); ++i) {
trees[i].tag.submodel_id = i % 4;
}
TreeFilter group0{.submodels{0, 3}};
TreeFilter group1{.submodels{1, 2}};
ASSERT_OK_AND_ASSIGN(auto forest,
DecisionForest::FromTrees(std::move(trees)));
std::vector<TypedSlot> input_slots;
std::vector<ForestEvaluator::Output> outputs;
FrameLayout::Builder layout_builder;
CreateSlotsForForest(*forest, &layout_builder, &input_slots);
outputs.push_back({group0, layout_builder.AddSlot<float>()});
outputs.push_back({group1, layout_builder.AddSlot<float>()});
FrameLayout layout = std::move(layout_builder).Build();
std::vector<ForestEvaluator> evaluators;
for (auto p : params) {
ASSERT_OK_AND_ASSIGN(auto evaluator, ForestEvaluator::Compile(
*forest, input_slots, outputs, p));
evaluators.push_back(std::move(evaluator));
}
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
for (int item_id = 0; item_id < 15; ++item_id) {
for (auto slot : input_slots) {
ASSERT_OK(FillWithRandomValue(slot, frame, rnd,
0.25));
}
float reference_implementation_res0 =
DecisionForestNaiveEvaluation(*forest, frame, input_slots, group0);
float reference_implementation_res1 =
DecisionForestNaiveEvaluation(*forest, frame, input_slots, group1);
for (int eval_id = 0; eval_id < evaluators.size(); ++eval_id) {
const ForestEvaluator& evaluator = evaluators[eval_id];
frame.Set(outputs[0].slot, 0.0f);
frame.Set(outputs[1].slot, 0.0f);
evaluator.Eval(frame, frame);
EXPECT_FLOAT_EQ(reference_implementation_res0, frame.Get(outputs[0].slot))
<< ErrFormat(loc, params[eval_id], "Incorrect output #0 in Eval",
item_id);
EXPECT_FLOAT_EQ(reference_implementation_res1, frame.Get(outputs[1].slot))
<< ErrFormat(loc, params[eval_id], "Incorrect output #1 in Eval",
item_id);
}
}
}
TEST(ForestEvaluator, GroupsValidation) {
std::vector<DecisionTree> trees(3);
trees[0].tag.submodel_id = 3;
trees[0].adjustments = {1.0};
trees[1].tag.submodel_id = 2;
trees[1].adjustments = {1.0};
trees[2].tag.submodel_id = 1;
trees[2].adjustments = {1.0};
ASSERT_OK_AND_ASSIGN(auto forest,
DecisionForest::FromTrees(std::move(trees)));
EXPECT_THAT(ForestEvaluator::Compile(*forest, {}, {}).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("at least one output is expected")));
auto fake_slot = FrameLayout::Slot<float>::UnsafeUninitializedSlot();
EXPECT_THAT(
ForestEvaluator::Compile(
*forest, {},
{ForestEvaluator::Output{{.submodels = {1, 3}}, fake_slot},
ForestEvaluator::Output{{.submodels = {2, 3}}, fake_slot}})
.status(),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr(
"intersection of groups for outputs #0 and #1 is not empty")));
EXPECT_OK(ForestEvaluator::Compile(
*forest, {},
{ForestEvaluator::Output{{.submodels = {1, 3}}, fake_slot},
ForestEvaluator::Output{{.submodels = {2}}, fake_slot}})
.status());
EXPECT_OK(ForestEvaluator::Compile(
*forest, {},
{ForestEvaluator::Output{{.submodels = {1}}, fake_slot},
ForestEvaluator::Output{{.submodels = {2}}, fake_slot}})
.status());
}
TEST(ForestEvaluator, EmptyForest) {
ASSERT_OK_AND_ASSIGN(auto forest, DecisionForest::FromTrees({}));
for (auto params :
{kDefaultEval, kRegularEval, kBitmaskEval, kSingleInputEval}) {
TestCases<>(SourceLocation::current(), *forest,
{{.submodels = {0}}, {.submodels = {1}}}, params,
{{0.0, 0.0}});
}
}
TEST(ForestEvaluator, Constant) {
std::vector<DecisionTree> trees(2);
trees[0].tag = {.submodel_id = 0};
trees[0].adjustments = {1.5};
trees[1].tag = {.submodel_id = 1};
trees[1].adjustments = {2.5};
ASSERT_OK_AND_ASSIGN(auto forest,
DecisionForest::FromTrees(std::move(trees)));
for (auto params : {kDefaultEval, kRegularEval, kBitmaskEval}) {
TestCases<>(SourceLocation::current(), *forest,
{{.submodels = {0}}, {.submodels = {1}}}, params,
{{1.5, 2.5}});
}
}
TEST(ForestEvaluator, SmallForest) {
std::vector<DecisionTree> trees(2);
trees[0].tag = {.submodel_id = 0};
trees[0].adjustments = {0.5, 1.5, 2.5, 3.5};
trees[0].split_nodes = {
{S(1), S(2), IntervalSplit(0, 1.5, kInf)},
{A(0), A(2), SetOfValuesSplit<int64_t>(1, {1, 2}, false)},
{A(1), A(3), IntervalSplit(0, -kInf, 10)}};
trees[1].tag = {.submodel_id = 1};
trees[1].adjustments = {-1.0, 1.0};
trees[1].split_nodes = {{A(0), A(1), IntervalSplit(0, 1, 5)}};
ASSERT_OK_AND_ASSIGN(auto forest,
DecisionForest::FromTrees(std::move(trees)));
for (auto params : {kDefaultEval, kRegularEval, kBitmaskEval}) {
TestCases<float, int64_t>(
SourceLocation::current(), *forest,
{{.submodels = {0}}, {.submodels = {1}}}, params,
{{0.5, -1},
{2.5, -1},
{2.5, 1},
{3.5, 1},
{3.5, -1},
{1.5, -1},
{2.5, -1},
{0.5, -1}},
{0, 0, 1.2, 1.6, 7.0, 13.5, NAN, {}},
{3, 1, 1, 1, 1, 1, 1, {}});
}
}
TEST(ForestEvaluator, RangesSplits) {
DecisionTree tree;
tree.split_nodes = {{S(2), S(1), IntervalSplit(0, -1.0, 1.0)},
{A(1), A(2), IntervalSplit(0, 0.5, 0.5)},
{A(0), A(3), IntervalSplit(0, 2.5, 3.5)}};
tree.adjustments = {0.0, 1.0, 2.0, 3.0};
ASSERT_OK_AND_ASSIGN(auto forest, DecisionForest::FromTrees({tree}));
for (auto params :
{kDefaultEval, kRegularEval, kBitmaskEval, kSingleInputEval}) {
TestCases<float>(
SourceLocation::current(), *forest, {{}}, params,
{{0}, {0}, {0}, {1}, {2}, {3}, {3}, {3}},
{{}, -5, 5, -1, 0.5, 2.5, 3.0, 3.5});
}
}
TEST(ForestEvaluator, EqualSplits) {
DecisionTree tree;
tree.split_nodes = {{S(2), S(1), IntervalSplit(0, 1.0, 1.0)},
{A(1), A(2), IntervalSplit(1, 5.0, 5.0)},
{A(0), A(3), IntervalSplit(1, -5.0, -5.0)}};
tree.adjustments = {0.0, 1.0, 2.0, 3.0};
ASSERT_OK_AND_ASSIGN(auto forest, DecisionForest::FromTrees({tree}));
for (auto params : {kDefaultEval, kRegularEval, kBitmaskEval}) {
TestCases<float, float>(
SourceLocation::current(), *forest, {{}}, params,
{{0}, {0}, {0}, {1}, {1}, {2}, {3}, {3}},
{{}, 0.0, -5.0, 1.0, 1.0, 1.0, 0.0, {}},
{{}, {}, {}, {}, -5.0, +5.0, -5.0, -5.0});
}
}
TEST(ForestEvaluator, BytesInput) {
DecisionTree tree;
tree.split_nodes = {
{A(0), A(1), SetOfValuesSplit<Bytes>(0, {Bytes("X")}, false)}};
tree.adjustments = {0.0, 1.0};
ASSERT_OK_AND_ASSIGN(auto forest, DecisionForest::FromTrees({tree}));
for (auto params : {kDefaultEval, kRegularEval}) {
TestCases<Bytes>(SourceLocation::current(), *forest, {{}},
params,
{{0}, {1}, {0}},
{{}, Bytes("X"), Bytes("Y")});
}
}
TEST(ForestEvaluator, BitmaskNotPossible) {
absl::BitGen rnd;
auto forest =
CreateRandomForest(&rnd, 10, true,
70, 70,
1);
std::vector<TypedSlot> slots;
FrameLayout::Builder layout_builder;
CreateSlotsForForest(*forest, &layout_builder, &slots);
EXPECT_THAT(
SimpleForestEvaluator::Compile(*forest, slots, kBitmaskEval),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("No suitable evaluator. Use enable_regular_eval=true.")));
}
TEST(ForestEvaluator, SingleInputEvalNotPossible) {
DecisionTree tree;
tree.split_nodes = {{S(2), S(1), IntervalSplit(0, 1.0, 1.0)},
{A(1), A(2), IntervalSplit(1, 5.0, 5.0)},
{A(0), A(3), IntervalSplit(1, -5.0, -5.0)}};
tree.adjustments = {0.0, 1.0, 2.0, 3.0};
ASSERT_OK_AND_ASSIGN(auto forest, DecisionForest::FromTrees({tree}));
std::vector<TypedSlot> slots;
FrameLayout::Builder layout_builder;
CreateSlotsForForest(*forest, &layout_builder, &slots);
EXPECT_THAT(
SimpleForestEvaluator::Compile(*forest, slots, kSingleInputEval),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("No suitable evaluator. Use enable_regular_eval=true.")));
}
TEST(ForestEvaluator, ObliviousTree) {
DecisionTree tree;
std::vector<std::shared_ptr<SplitCondition>> conditions = {
IntervalSplit(0, -5, 5),
IntervalSplit(1, 0, kInf),
SetOfValuesSplit<int64_t>(2, {1, 2}, false),
IntervalSplit(3, -kInf, 3.0),
SetOfValuesSplit<int64_t>(4, {4, 2}, true),
IntervalSplit(5, -1, 7),
IntervalSplit(6, -kInf, -5)};
int layer_size = 1;
for (int layer = 0; layer < conditions.size(); ++layer) {
int layer_offset = tree.split_nodes.size() + layer_size;
for (int i = 0; i < layer_size; ++i) {
auto left =
(layer == conditions.size() - 1) ? A(i * 2) : S(layer_offset + i * 2);
auto right = (layer == conditions.size() - 1)
? A(i * 2 + 1)
: S(layer_offset + i * 2 + 1);
tree.split_nodes.push_back({left, right, conditions[layer]});
}
layer_size *= 2;
}
for (int i = 0; i < layer_size; ++i) {
tree.adjustments.push_back(i);
}
ASSERT_OK_AND_ASSIGN(auto forest, DecisionForest::FromTrees({tree}));
for (auto params : {kDefaultEval, kRegularEval, kBitmaskEval}) {
TestCases<float, float, int64_t, float, int64_t, float, float>(
SourceLocation::current(), *forest, {{}}, params,
{{58}, {86}, {12}, {39}, {112}},
{{}, 3, -7, 15, -4},
{10, -1, {}, 25, 1},
{2, 1, 3, {}, 1},
{0, {}, -5, 8, 14},
{1, 2, {}, 4, 5},
{0, 4, -3, 7, {}},
{10, 5, -3, -8, {}});
}
}
TEST(ForestEvaluator, TestAgainstReferenceOnSmallTrees) {
absl::BitGen rnd;
std::vector<QTypePtr> types;
for (int input_id = 0; input_id < 10; input_id++) {
types.push_back(GetOptionalQType<float>());
}
for (int input_id = 10; input_id < 15; input_id++) {
types.push_back(GetOptionalQType<int64_t>());
}
for (int iteration = 0; iteration < 10; ++iteration) {
std::vector<DecisionTree> trees;
for (int i = 0; i < 10; ++i) {
int num_splits = absl::Uniform<int32_t>(rnd, 0, 32);
trees.push_back(
CreateRandomTree(&rnd, true, num_splits, &types));
}
RandomTestAgainstReferenceImplementation(
SourceLocation::current(), trees,
{kDefaultEval, kRegularEval, kBitmaskEval}, &rnd);
}
}
TEST(ForestEvaluator, TestAgainstReferenceOnSingleInputTrees) {
absl::BitGen rnd;
std::vector<QTypePtr> types;
for (int input_id = 0; input_id < 10; input_id++) {
types.push_back(GetOptionalQType<float>());
}
for (int input_id = 10; input_id < 15; input_id++) {
types.push_back(GetOptionalQType<int64_t>());
}
for (int iteration = 0; iteration < 10; ++iteration) {
std::vector<DecisionTree> trees;
for (int i = 0; i < 10; ++i) {
int num_splits = absl::Uniform<int32_t>(rnd, 1, 1024);
trees.push_back(
CreateRandomTree(&rnd, false, num_splits, &types));
}
RandomTestAgainstReferenceImplementation(
SourceLocation::current(), trees,
{kDefaultEval, kRegularEval, kSingleInputEval}, &rnd);
}
}
TEST(ForestEvaluator, TestAgainstReference) {
absl::BitGen rnd;
std::vector<QTypePtr> types;
for (int feature_id = 0; feature_id < 10; feature_id++) {
types.push_back(GetOptionalQType<float>());
}
for (int feature_id = 10; feature_id < 15; feature_id++) {
types.push_back(GetOptionalQType<int64_t>());
}
for (int iteration = 0; iteration < 5; ++iteration) {
std::vector<DecisionTree> trees;
for (int i = 0; i < 10; ++i) {
int num_splits = absl::Uniform<int32_t>(rnd, 0, 1024);
trees.push_back(
CreateRandomTree(&rnd, true, num_splits, &types));
}
for (int i = 0; i < 10; ++i) {
int num_splits = absl::Uniform<int32_t>(rnd, 0, 1024);
trees.push_back(
CreateRandomTree(&rnd, false, num_splits, &types));
}
for (int i = 0; i < 10; ++i) {
int num_splits = absl::Uniform<int32_t>(rnd, 0, 1024);
trees.push_back(CreateRandomFloatTree(
&rnd, 10, true, num_splits,
0.4, 0.4));
}
for (int i = 0; i < 10; ++i) {
int num_splits = absl::Uniform<int32_t>(rnd, 0, 32);
trees.push_back(
CreateRandomTree(&rnd, true, num_splits, &types));
}
for (int i = 0; i < 5; ++i) {
int depth = absl::Uniform<int32_t>(rnd, 1, 20);
trees.push_back(CreateRandomObliviousTree(&rnd, depth, &types));
}
RandomTestAgainstReferenceImplementation(
SourceLocation::current(), trees, {kDefaultEval, kRegularEval}, &rnd);
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/decision_forest/pointwise_evaluation/forest_evaluator.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/decision_forest/pointwise_evaluation/forest_evaluator_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
7b28c983-e40f-40d6-8d0d-bb13ca2bdef9 | cpp | google/arolla | oblivious | arolla/decision_forest/pointwise_evaluation/oblivious.cc | arolla/decision_forest/pointwise_evaluation/oblivious_test.cc | #include "arolla/decision_forest/pointwise_evaluation/oblivious.h"
#include <cstddef>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "arolla/decision_forest/decision_forest.h"
#include "arolla/decision_forest/split_condition.h"
namespace arolla {
namespace {
bool IsPowerOf2(size_t x) { return (x & (x - 1)) == 0; }
struct StackEntry {
DecisionTreeNodeId node_id;
int depth;
};
template <typename CallbackFn>
bool TraverseTree(const DecisionTree& tree, CallbackFn callback) {
std::vector<StackEntry> stack;
stack.reserve(32);
stack.push_back(StackEntry{GetTreeRootId(tree), 0});
while (!stack.empty()) {
auto [node_id, depth] = stack.back();
stack.pop_back();
if (!callback(node_id, depth)) {
return false;
}
if (!node_id.is_leaf()) {
const auto& node = tree.split_nodes[node_id.split_node_index()];
stack.push_back(StackEntry{node.child_if_true, depth + 1});
stack.push_back(StackEntry{node.child_if_false, depth + 1});
}
}
return true;
}
}
std::optional<ObliviousDecisionTree> ToObliviousTree(const DecisionTree& tree) {
size_t region_count = tree.adjustments.size();
if (!IsPowerOf2(region_count)) {
return std::nullopt;
}
size_t depth = region_count ? __builtin_ctz(region_count) : 0;
std::vector<std::shared_ptr<const SplitCondition>> layer_splits;
layer_splits.reserve(depth);
std::vector<float> adjustments;
adjustments.reserve(region_count);
auto process_node = [&](DecisionTreeNodeId node_id, int current_depth) {
if (node_id.is_leaf()) {
if (current_depth != depth) {
return false;
}
adjustments.push_back(tree.adjustments[node_id.adjustment_index()] *
tree.weight);
} else {
if (current_depth >= depth) {
return false;
}
const auto& node = tree.split_nodes[node_id.split_node_index()];
if (layer_splits.size() == current_depth) {
layer_splits.push_back(node.condition);
} else {
DCHECK_LT(current_depth, layer_splits.size());
if (*layer_splits[current_depth] != *node.condition) {
return false;
}
}
}
return true;
};
if (!TraverseTree(tree, process_node)) {
return std::nullopt;
}
return ObliviousDecisionTree{tree.tag, std::move(layer_splits),
std::move(adjustments)};
}
} | #include "arolla/decision_forest/pointwise_evaluation/oblivious.h"
#include <limits>
#include <memory>
#include <optional>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "arolla/decision_forest/decision_forest.h"
#include "arolla/decision_forest/split_condition.h"
#include "arolla/decision_forest/split_conditions/interval_split_condition.h"
namespace arolla {
namespace {
using ::testing::ElementsAre;
constexpr auto S = DecisionTreeNodeId::SplitNodeId;
constexpr auto A = DecisionTreeNodeId::AdjustmentId;
constexpr float inf = std::numeric_limits<float>::infinity();
std::shared_ptr<SplitCondition> Cond(int input_id, float left, float right) {
return std::make_shared<IntervalSplitCondition>(input_id, left, right);
}
TEST(ObliviousTest, Errors) {
{
DecisionTree tree;
tree.split_nodes = {{A(0), S(1), Cond(0, -inf, 1.0)},
{A(1), A(2), Cond(0, -1.0, inf)}};
tree.adjustments = {0.0, 1.0, 2.0};
EXPECT_EQ(ToObliviousTree(tree), std::nullopt);
}
{
DecisionTree tree;
tree.split_nodes = {{A(0), S(1), Cond(0, -inf, 1.0)},
{S(2), A(2), Cond(0, -1.0, inf)},
{A(1), A(3), Cond(0, -1.0, inf)}};
tree.adjustments = {0.0, 1.0, 2.0, 3.0};
EXPECT_EQ(ToObliviousTree(tree), std::nullopt);
}
{
DecisionTree tree;
tree.split_nodes = {{S(2), S(1), Cond(0, -inf, 1.0)},
{A(1), A(2), Cond(0, -1.0, inf)},
{A(0), A(3), Cond(0, 1.0, inf)}};
tree.adjustments = {0.0, 1.0, 2.0, 3.0};
EXPECT_EQ(ToObliviousTree(tree), std::nullopt);
}
}
TEST(ObliviousTest, Ok) {
{
DecisionTree tree;
tree.adjustments = {2.0};
tree.weight = 0.5;
auto oblivious_tree = ToObliviousTree(tree);
ASSERT_TRUE(oblivious_tree.has_value());
EXPECT_THAT(oblivious_tree->layer_splits, ElementsAre());
EXPECT_THAT(oblivious_tree->adjustments, ElementsAre(1.0));
}
{
DecisionTree tree;
tree.split_nodes = {{A(0), A(1), Cond(0, -inf, 1.0)}};
tree.adjustments = {7.0, 3.0};
tree.weight = 2.0;
auto oblivious_tree = ToObliviousTree(tree);
ASSERT_TRUE(oblivious_tree.has_value());
EXPECT_EQ(oblivious_tree->layer_splits.size(), 1);
EXPECT_EQ(*oblivious_tree->layer_splits[0], *Cond(0, -inf, 1.0));
EXPECT_THAT(oblivious_tree->adjustments, ElementsAre(14.0, 6.0));
}
{
DecisionTree tree;
tree.split_nodes = {{S(2), S(1), Cond(0, -inf, 1.0)},
{A(1), A(2), Cond(0, -1.0, inf)},
{A(0), A(3), Cond(0, -1.0, inf)}};
tree.adjustments = {0.0, 1.0, 2.0, 3.0};
auto oblivious_tree = ToObliviousTree(tree);
ASSERT_TRUE(oblivious_tree.has_value());
EXPECT_EQ(oblivious_tree->layer_splits.size(), 2);
EXPECT_EQ(*oblivious_tree->layer_splits[0], *Cond(0, -inf, 1.0));
EXPECT_EQ(*oblivious_tree->layer_splits[1], *Cond(0, -1.0, inf));
EXPECT_THAT(oblivious_tree->adjustments, ElementsAre(0.0, 3.0, 1.0, 2.0));
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/decision_forest/pointwise_evaluation/oblivious.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/decision_forest/pointwise_evaluation/oblivious_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
534e3bfc-a9e2-4780-8149-1ee9748e5b1d | cpp | google/arolla | test_util | arolla/decision_forest/testing/test_util.cc | arolla/decision_forest/testing/test_util_test.cc | #include "arolla/decision_forest/testing/test_util.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/random/distributions.h"
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "arolla/decision_forest/decision_forest.h"
#include "arolla/decision_forest/split_condition.h"
#include "arolla/decision_forest/split_conditions/interval_split_condition.h"
#include "arolla/decision_forest/split_conditions/set_of_values_split_condition.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_slot.h"
namespace arolla {
namespace {
constexpr int kSetOfValuesSize = 10;
template <typename ConditionFactoryFn>
DecisionTree CreateRandomTreeImpl(absl::BitGen* rnd, int num_features,
bool interactions, int num_splits,
ConditionFactoryFn condition_factory) {
DecisionTree tree;
tree.adjustments.resize(num_splits + 1);
for (float& val : tree.adjustments) {
val = absl::Uniform<uint8_t>(*rnd);
}
int single_feature_id = absl::Uniform<int32_t>(*rnd, 0, num_features);
for (int i = 0; i < num_splits; ++i) {
auto child1 =
i * 2 + 1 < num_splits
? DecisionTreeNodeId::SplitNodeId(i * 2 + 1)
: DecisionTreeNodeId::AdjustmentId(i * 2 + 1 - num_splits);
auto child2 =
i * 2 + 2 < num_splits
? DecisionTreeNodeId::SplitNodeId(i * 2 + 2)
: DecisionTreeNodeId::AdjustmentId(i * 2 + 2 - num_splits);
int feature_id;
if (interactions) {
feature_id = absl::Uniform<int32_t>(*rnd, 0, num_features);
} else {
feature_id = single_feature_id;
}
tree.split_nodes.push_back({child1, child2, condition_factory(feature_id)});
}
return tree;
}
}
absl::Status FillWithRandomValue(TypedSlot tslot, FramePtr ctx,
absl::BitGen* rnd, double missed_prob) {
if (tslot.byte_offset() == FrameLayout::Slot<float>::kUninitializedOffset) {
return absl::OkStatus();
}
bool missed = (absl::Uniform<float>(*rnd, 0, 1) < missed_prob);
if (tslot.GetType() == GetOptionalQType<float>()) {
auto slot = tslot.ToSlot<OptionalValue<float>>().value();
auto val = OptionalValue<float>(absl::Uniform<float>(*rnd, 0, 1));
ctx.Set(slot, missed ? OptionalValue<float>{} : val);
} else if (tslot.GetType() == GetOptionalQType<int64_t>()) {
auto slot = tslot.ToSlot<OptionalValue<int64_t>>().value();
auto val = OptionalValue<int64_t>(absl::Uniform<int64_t>(*rnd, 0, 1000));
ctx.Set(slot, missed ? OptionalValue<int64_t>{} : val);
} else {
return absl::UnimplementedError(std::string("Unimplemented for type: ") +
std::string(tslot.GetType()->name()));
}
return absl::OkStatus();
}
absl::Status FillArrayWithRandomValues(int64_t size, TypedSlot tslot,
FramePtr ctx, absl::BitGen* rnd,
double missed_prob) {
if (tslot.byte_offset() == FrameLayout::Slot<float>::kUninitializedOffset) {
return absl::OkStatus();
}
if (tslot.GetType() == GetDenseArrayQType<float>()) {
auto slot = tslot.UnsafeToSlot<DenseArray<float>>();
DenseArrayBuilder<float> bldr(size);
for (int64_t i = 0; i < size; ++i) {
bool missed = (absl::Uniform<float>(*rnd, 0, 1) < missed_prob);
if (!missed) {
bldr.Set(i, absl::Uniform<float>(*rnd, 0, 1));
}
}
ctx.Set(slot, std::move(bldr).Build());
} else if (tslot.GetType() == GetDenseArrayQType<int64_t>()) {
auto slot = tslot.UnsafeToSlot<DenseArray<int64_t>>();
DenseArrayBuilder<int64_t> bldr(size);
for (int64_t i = 0; i < size; ++i) {
bool missed = (absl::Uniform<float>(*rnd, 0, 1) < missed_prob);
if (!missed) {
bldr.Set(i, absl::Uniform<int64_t>(*rnd, 0, 1000));
}
}
ctx.Set(slot, std::move(bldr).Build());
} else {
return absl::UnimplementedError(std::string("Unimplemented for type: ") +
std::string(tslot.GetType()->name()));
}
return absl::OkStatus();
}
void CreateSlotsForForest(const DecisionForest& forest,
FrameLayout::Builder* layout_builder,
std::vector<TypedSlot>* slots) {
auto placeholder =
TypedSlot::FromSlot(FrameLayout::Slot<float>::UnsafeUninitializedSlot());
for (auto id_qtype : forest.GetRequiredQTypes()) {
while (slots->size() <= id_qtype.first) {
slots->push_back(placeholder);
}
QTypePtr qtype = id_qtype.second;
(*slots)[id_qtype.first] = AddSlot(qtype, layout_builder);
}
}
absl::Status CreateArraySlotsForForest(const DecisionForest& forest,
FrameLayout::Builder* layout_builder,
std::vector<TypedSlot>* slots) {
auto placeholder =
TypedSlot::FromSlot(FrameLayout::Slot<float>::UnsafeUninitializedSlot());
for (auto id_qtype : forest.GetRequiredQTypes()) {
while (slots->size() <= id_qtype.first) {
slots->push_back(placeholder);
}
QTypePtr qtype = id_qtype.second;
if (qtype == GetOptionalQType<float>()) {
(*slots)[id_qtype.first] =
TypedSlot::FromSlot(layout_builder->AddSlot<DenseArray<float>>());
} else if (qtype == GetOptionalQType<int64_t>()) {
(*slots)[id_qtype.first] =
TypedSlot::FromSlot(layout_builder->AddSlot<DenseArray<int64_t>>());
} else {
return absl::UnimplementedError(std::string("Unimplemented for type: ") +
std::string(qtype->name()));
}
}
if (slots->empty()) {
slots->push_back(
TypedSlot::FromSlot(layout_builder->AddSlot<DenseArray<float>>()));
}
return absl::OkStatus();
}
DecisionTree CreateRandomFloatTree(absl::BitGen* rnd, int num_features,
bool interactions, int num_splits,
double range_split_prob,
double equality_split_prob) {
return CreateRandomTreeImpl(
rnd, num_features, interactions, num_splits, [&](int feature_id) {
float split_type_rnd = absl::Uniform<float>(*rnd, 0, 1);
if (split_type_rnd < range_split_prob + equality_split_prob) {
float sp0 = absl::Uniform<uint8_t>(*rnd) / 256.0;
float sp1 = split_type_rnd < range_split_prob
? absl::Uniform<uint8_t>(*rnd) / 256.0
: sp0;
return IntervalSplit(feature_id, std::min(sp0, sp1),
std::max(sp0, sp1));
} else {
float split_point = absl::Uniform<uint8_t>(*rnd) / 256.0;
if (absl::Bernoulli(*rnd, 0.5)) {
return IntervalSplit(feature_id, -INFINITY, split_point);
} else {
return IntervalSplit(feature_id, split_point, +INFINITY);
}
}
});
}
std::unique_ptr<const DecisionForest> CreateRandomFloatForest(
absl::BitGen* rnd, int num_features, bool interactions, int min_num_splits,
int max_num_splits, int num_trees) {
std::vector<DecisionTree> trees;
trees.reserve(num_trees);
for (int i = 0; i < num_trees; ++i) {
int num_splits =
absl::Uniform<int32_t>(*rnd, min_num_splits, max_num_splits);
trees.push_back(
CreateRandomFloatTree(rnd, num_features, interactions, num_splits));
}
return DecisionForest::FromTrees(std::move(trees)).value();
}
DecisionTree CreateRandomTree(absl::BitGen* rnd, bool interactions,
int num_splits,
std::vector<QTypePtr>* feature_types) {
const float inf = std::numeric_limits<float>::infinity();
return CreateRandomTreeImpl(
rnd, feature_types->size(), interactions, num_splits,
[&](int feature_id) -> std::shared_ptr<SplitCondition> {
QTypePtr& type = (*feature_types)[feature_id];
if (!type) {
type = absl::Bernoulli(*rnd, 0.5) ? GetOptionalQType<float>()
: GetOptionalQType<int64_t>();
}
if (type == GetOptionalQType<float>()) {
float split_point = absl::Uniform<uint8_t>(*rnd) / 256.0;
if (absl::Bernoulli(*rnd, 0.5)) {
return IntervalSplit(feature_id, -inf, split_point);
} else {
return IntervalSplit(feature_id, split_point, +inf);
}
} else {
absl::flat_hash_set<int64_t> values;
for (int i = 0; i < kSetOfValuesSize; ++i) {
values.insert(absl::Uniform<int64_t>(*rnd, 0, 1000));
}
return SetOfValuesSplit<int64_t>(feature_id, values,
absl::Bernoulli(*rnd, 0.5));
}
});
}
DecisionTree CreateRandomObliviousTree(absl::BitGen* rnd, int depth,
std::vector<QTypePtr>* feature_types) {
const float inf = std::numeric_limits<float>::infinity();
std::vector<std::shared_ptr<SplitCondition>> conditions(depth);
for (int i = 0; i < depth; ++i) {
int feature_id = absl::Uniform<int32_t>(*rnd, 0, feature_types->size());
QTypePtr& type = (*feature_types)[feature_id];
if (!type) {
type = absl::Bernoulli(*rnd, 0.5) ? GetOptionalQType<float>()
: GetOptionalQType<int64_t>();
}
if (type == GetOptionalQType<float>()) {
float split_point = absl::Uniform<uint8_t>(*rnd) / 256.0;
if (absl::Bernoulli(*rnd, 0.5)) {
conditions[i] = IntervalSplit(feature_id, -inf, split_point);
} else {
conditions[i] = IntervalSplit(feature_id, split_point, +inf);
}
} else {
absl::flat_hash_set<int64_t> values;
for (int i = 0; i < kSetOfValuesSize; ++i) {
values.insert(absl::Uniform<int64_t>(*rnd, 0, 1000));
}
conditions[i] = SetOfValuesSplit<int64_t>(feature_id, values,
absl::Bernoulli(*rnd, 0.5));
}
}
int cond_id = 0;
int node_id = 0;
return CreateRandomTreeImpl(rnd, feature_types->size(), false,
(1 << depth) - 1, [&](int) {
node_id++;
bool last_in_the_row = node_id & (node_id + 1);
if (last_in_the_row) {
return conditions[cond_id];
} else {
return conditions[cond_id++];
}
});
}
std::unique_ptr<const DecisionForest> CreateRandomForest(
absl::BitGen* rnd, int num_features, bool interactions, int min_num_splits,
int max_num_splits, int num_trees,
absl::Span<const QTypePtr> feature_types) {
std::vector<QTypePtr> types;
for (int feature_id = 0; feature_id < num_features; feature_id++) {
if (feature_id < feature_types.size() && feature_types[feature_id]) {
types.push_back(feature_types[feature_id]);
} else {
types.push_back(nullptr);
}
}
std::vector<DecisionTree> trees;
trees.reserve(num_trees);
for (int i = 0; i < num_trees; ++i) {
int num_splits =
absl::Uniform<int32_t>(*rnd, min_num_splits, max_num_splits);
trees.push_back(CreateRandomTree(rnd, interactions, num_splits, &types));
}
return DecisionForest::FromTrees(std::move(trees)).value();
}
} | #include "arolla/decision_forest/testing/test_util.h"
#include <cstddef>
#include <cstdint>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/log/check.h"
#include "absl/random/random.h"
#include "arolla/decision_forest/decision_forest.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_slot.h"
namespace arolla {
namespace {
TEST(TestUtilTest, FillWithRandomValue) {
absl::BitGen rnd;
FrameLayout::Builder bldr;
auto opt_float_slot = bldr.AddSlot<OptionalValue<float>>();
auto opt_int64_slot = bldr.AddSlot<OptionalValue<int64_t>>();
auto layout = std::move(bldr).Build();
RootEvaluationContext ctx(&layout);
ctx.Set(opt_float_slot, OptionalValue<float>(-1.0));
ctx.Set(opt_int64_slot, OptionalValue<int64_t>(-1));
CHECK_OK(FillWithRandomValue(TypedSlot::FromSlot(opt_float_slot), ctx.frame(),
&rnd));
CHECK_OK(FillWithRandomValue(TypedSlot::FromSlot(opt_int64_slot), ctx.frame(),
&rnd));
EXPECT_NE(OptionalValue<float>(-1.0), ctx.Get(opt_float_slot));
EXPECT_NE(OptionalValue<int64_t>(-1), ctx.Get(opt_int64_slot));
}
TEST(TestUtilTest, CreateSlotsForForest) {
absl::BitGen rnd;
auto forest = CreateRandomForest(&rnd, 5, true, 1, 64, 16);
FrameLayout::Builder bldr;
std::vector<TypedSlot> slots;
CreateSlotsForForest(*forest, &bldr, &slots);
EXPECT_OK(forest->ValidateInputSlots(slots));
}
TEST(TestUtilTest, CreateRandomFloatTree) {
absl::BitGen rnd;
for (size_t depth = 0; depth <= 15; ++depth) {
auto tree = CreateRandomFloatTree(&rnd, 5, true, (1 << depth) - 1);
EXPECT_EQ(tree.adjustments.size(), 1 << depth);
EXPECT_EQ(tree.split_nodes.size(), (1 << depth) - 1);
}
}
TEST(TestUtilTest, CreateRandomFloatForest) {
absl::BitGen rnd;
auto forest = CreateRandomFloatForest(&rnd, 5, true, 1, 64, 16);
EXPECT_EQ(forest->GetTrees().size(), 16);
EXPECT_GE(forest->GetRequiredQTypes().size(), 1);
EXPECT_LE(forest->GetRequiredQTypes().size(), 5);
for (const DecisionTree& tree : forest->GetTrees()) {
EXPECT_LE(tree.split_nodes.size(), 64);
}
}
TEST(TestUtilTest, CreateRandomForest) {
absl::BitGen rnd;
auto forest = CreateRandomForest(&rnd, 5, true, 1, 64, 16);
EXPECT_EQ(forest->GetTrees().size(), 16);
EXPECT_GE(forest->GetRequiredQTypes().size(), 1);
EXPECT_LE(forest->GetRequiredQTypes().size(), 5);
for (const DecisionTree& tree : forest->GetTrees()) {
EXPECT_LE(tree.split_nodes.size(), 64);
}
}
TEST(TestUtilTest, CreateRandomObliviousTree) {
absl::BitGen rnd;
std::vector<QTypePtr> types(10);
auto tree = CreateRandomObliviousTree(&rnd, 3, &types);
ASSERT_EQ(tree.split_nodes.size(), 7);
EXPECT_EQ(tree.split_nodes[1].condition, tree.split_nodes[2].condition);
EXPECT_EQ(tree.split_nodes[3].condition, tree.split_nodes[4].condition);
EXPECT_EQ(tree.split_nodes[4].condition, tree.split_nodes[5].condition);
EXPECT_EQ(tree.split_nodes[5].condition, tree.split_nodes[6].condition);
}
TEST(TestUtilTest, CreateRandomForestWithoutInteractions) {
absl::BitGen rnd;
auto forest = CreateRandomForest(&rnd, 5, false, 512, 512, 1);
EXPECT_EQ(forest->GetTrees().size(), 1);
EXPECT_EQ(forest->GetRequiredQTypes().size(), 1);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/decision_forest/testing/test_util.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/decision_forest/testing/test_util_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
e28284ae-2b25-42a4-a4a2-5aa452414aa7 | cpp | google/arolla | forest_model | arolla/decision_forest/expr_operator/forest_model.cc | arolla/decision_forest/expr_operator/forest_model_test.cc | #include "arolla/decision_forest/expr_operator/forest_model.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/decision_forest/decision_forest.h"
#include "arolla/decision_forest/expr_operator/decision_forest_operator.h"
#include "arolla/expr/annotation_utils.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/expr_debug_string.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/expr_visitor.h"
#include "arolla/expr/lambda_expr_operator.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/expr/visitors/substitution.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/array_like/array_like_qtype.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/standard_type_properties/properties.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/text.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
absl::Status ValidateExpression(
const expr::ExprNodePtr& expression,
const ForestModel::SubmodelIds& submodel_ids,
const absl::flat_hash_set<std::string>& input_names) {
absl::flat_hash_set<std::string> unused_submodels;
for (const auto& [k, _] : submodel_ids) unused_submodels.insert(k);
for (const auto& node : expr::VisitorOrder(expression)) {
if (node->is_leaf()) {
return absl::InvalidArgumentError(
"leaves are not allowed in an expression");
}
if (node->is_placeholder()) {
if (submodel_ids.count(node->placeholder_key()) > 0) {
unused_submodels.erase(node->placeholder_key());
} else if (!input_names.contains(node->placeholder_key())) {
return absl::InvalidArgumentError(absl::StrFormat(
"P.%s doesn't correspond to any input and it is not "
"found in submodel_ids",
node->placeholder_key()));
}
}
}
if (!unused_submodels.empty()) {
std::vector<std::string> unused_vec(unused_submodels.begin(),
unused_submodels.end());
absl::c_sort(unused_vec);
return absl::InvalidArgumentError(
absl::StrFormat("submodels [%s] are not used in the expression, but "
"are mentioned in submodel_ids",
absl::StrJoin(unused_vec, ", ")));
}
return absl::OkStatus();
}
absl::Status ValidateInputs(const DecisionForestPtr& forest,
const ForestModel::SubmodelIds& submodel_ids,
const std::vector<ForestModel::Parameter>& inputs) {
for (const auto& input : inputs) {
if (submodel_ids.count(input.name) > 0) {
return absl::InvalidArgumentError(absl::StrFormat(
"name collision of an input and a submodel: '%s'", input.name));
}
}
for (const auto& [key, unused] : forest->GetRequiredQTypes()) {
if (key >= inputs.size()) {
return absl::InvalidArgumentError(absl::StrFormat(
"not enough args: used_input_index=%d size=%d", key, inputs.size()));
}
}
return absl::OkStatus();
}
absl::Status ValidateOOBFilters(
const std::vector<expr::ExprNodePtr>& oob_filters,
const DecisionForestPtr& forest,
const absl::flat_hash_set<std::string>& input_names) {
for (const expr::ExprNodePtr& filter : oob_filters) {
if (filter == nullptr) {
return absl::InvalidArgumentError("OOB filter can't be nullptr");
}
for (const auto& node : expr::VisitorOrder(filter)) {
if (node->is_leaf()) {
return absl::InvalidArgumentError(
"leaves are not allowed in an OOB filter expressing");
}
if (node->is_placeholder() &&
!input_names.contains(node->placeholder_key())) {
return absl::InvalidArgumentError(
absl::StrCat("no input matches P.", node->placeholder_key(),
" in OOB filter ", expr::ToDebugString(node)));
}
}
}
return absl::OkStatus();
}
absl::StatusOr<expr::ExprNodePtr> AddAll(
const expr::ExprNodePtr& first, absl::Span<const expr::ExprNodePtr> nodes) {
auto res = first;
for (const auto& node : nodes) {
ASSIGN_OR_RETURN(res, expr::CallOp("math.add", {res, node}));
}
return res;
}
using NodeCountMap = absl::flat_hash_map<Fingerprint, int>;
NodeCountMap GetNodeCountMap(const expr::ExprNodePtr& expr) {
return PostOrderTraverse(expr,
[&](const expr::ExprNodePtr& node,
absl::Span<const NodeCountMap* const> visits) {
NodeCountMap res{{node->fingerprint(), 1}};
for (const NodeCountMap* visit : visits) {
for (const auto& [k, v] : *visit) {
if (res.contains(k)) {
res[k] += v;
} else {
res[k] = v;
}
}
}
return res;
});
}
}
absl::StatusOr<ForestModelPtr> ForestModel::Create(
ForestModel::ConstructorArgs args) {
expr::ExprOperatorSignature signature;
signature.parameters.reserve(args.inputs.size());
for (const Parameter& param : args.inputs) {
signature.parameters.push_back({param.name});
}
RETURN_IF_ERROR(expr::ValidateSignature(signature));
RETURN_IF_ERROR(ValidateInputs(args.forest, args.submodel_ids, args.inputs));
absl::flat_hash_set<std::string> input_names;
input_names.reserve(args.inputs.size());
for (const auto& input : args.inputs) {
input_names.insert(input.name);
}
RETURN_IF_ERROR(
ValidateExpression(args.expression, args.submodel_ids, input_names));
if (args.oob_filters.has_value()) {
RETURN_IF_ERROR(
ValidateOOBFilters(*args.oob_filters, args.forest, input_names));
}
FingerprintHasher hasher("d18261c6a5414ee8e5b0af80dc480ea8");
hasher.Combine(args.forest->fingerprint(), args.expression->fingerprint(),
signature);
hasher.Combine(args.submodel_ids.size());
for (const auto& [k, v] : args.submodel_ids) {
hasher.Combine(k).CombineSpan(v);
}
hasher.Combine(args.inputs.size());
for (const auto& input : args.inputs) {
if (input.preprocessing != nullptr) {
hasher.Combine(input.preprocessing->fingerprint());
} else {
hasher.Combine(Fingerprint{});
}
}
if (args.oob_filters.has_value()) {
for (const auto& oob_filter : *args.oob_filters) {
hasher.Combine(oob_filter->fingerprint());
}
} else {
hasher.Combine(Fingerprint{});
}
if (args.truncation_step.has_value()) {
hasher.Combine(*args.truncation_step);
} else {
hasher.Combine(Fingerprint{});
}
std::shared_ptr<ForestModel> model(new ForestModel(
std::move(signature), std::move(hasher).Finish(), std::move(args)));
RETURN_IF_ERROR(model->Initialize());
return model;
}
absl::StatusOr<std::vector<expr::ExprNodePtr>> ForestModel::PreprocessInputs(
const expr::ExprNodePtr& node) const {
RETURN_IF_ERROR(ValidateNodeDepsCount(*node));
std::vector<expr::ExprNodePtr> args(inputs_.size());
for (int i = 0; i < inputs_.size(); ++i) {
expr::ExprNodePtr arg = node->node_deps()[i];
if (inputs_[i].preprocessing != nullptr) {
ASSIGN_OR_RETURN(auto lambda,
expr::LambdaOperator::Make(inputs_[i].preprocessing));
ASSIGN_OR_RETURN(arg, expr::CallOp(lambda, {arg}));
ASSIGN_OR_RETURN(arg,
expr::ToLowerNode(arg));
}
if (arg->qtype() == nullptr) {
return absl::InternalError(
absl::StrFormat("invalid preprocessing for input #%d: QType metadata "
"can not be propagated",
i));
}
ASSIGN_OR_RETURN(args[i], CastAndValidateArgType(i, std::move(arg)));
}
return args;
}
absl::StatusOr<expr::ExprNodePtr> ForestModel::ApplyPostprocessing(
const expr::ExprNodePtr& node, const expr::ExprNodePtr& raw_result) const {
RETURN_IF_ERROR(ValidateNodeDepsCount(*node));
absl::flat_hash_map<std::string, expr::ExprNodePtr> expression_params;
expression_params.reserve(inputs_.size() + 1);
for (int i = 0; i < inputs_.size(); ++i) {
expression_params[inputs_[i].name] = node->node_deps()[i];
}
if (res_tuple_key_) {
if (raw_result == nullptr) {
return absl::InvalidArgumentError(
"raw_result can be nullptr only if expression doesn't use decision "
"forest");
}
expression_params[*res_tuple_key_] = raw_result;
}
ASSIGN_OR_RETURN(auto result, SubstitutePlaceholders(
processed_expression_, expression_params,
true));
if (IsNameAnnotation(node)) {
return expr::CallOp(
"annotation.name",
{result, expr::Literal(Text(expr::ReadNameAnnotation(node)))});
}
return result;
}
absl::StatusOr<expr::ExprNodePtr> ForestModel::ToLowerLevel(
const expr::ExprNodePtr& node) const {
RETURN_IF_ERROR(ValidateNodeDepsCount(*node));
for (size_t i = 0; i < inputs_.size(); ++i) {
if (node->node_deps()[i]->qtype() == nullptr) {
return node;
}
}
if (!res_tuple_key_) {
return ApplyPostprocessing(node, nullptr);
}
ASSIGN_OR_RETURN(std::vector<expr::ExprNodePtr> args, PreprocessInputs(node));
ASSIGN_OR_RETURN(auto op, CreateDecisionForestOperator(tree_filters_));
ASSIGN_OR_RETURN(auto res_tuple, expr::MakeOpNode(op, std::move(args)));
return ApplyPostprocessing(node, res_tuple);
}
absl::StatusOr<expr::ExprNodePtr> ForestModel::CreatePartialEvaluator(
absl::Span<const std::pair<int, int>> step_ranges,
absl::Span<const expr::ExprNodePtr> preprocessed_inputs) const {
std::vector<TreeFilter> filters;
filters.reserve(step_ranges.size() * tree_filters_.size());
for (auto& [from, to] : step_ranges) {
for (const TreeFilter& filter : tree_filters_) {
if ((filter.step_range_from > from) ||
(filter.step_range_to >= 0 && filter.step_range_to < to)) {
return absl::InvalidArgumentError("requested range is not available");
}
filters.push_back({from, to, filter.submodels});
}
}
ASSIGN_OR_RETURN(auto op, CreateDecisionForestOperator(std::move(filters)));
return expr::MakeOpNode(
op, std::vector(preprocessed_inputs.begin(), preprocessed_inputs.end()));
}
absl::StatusOr<QTypePtr>
ForestModel::InferTypeOfFirstForestInputAfterPreprocessing(
absl::Span<const QTypePtr> input_qtypes) const {
if (!first_forest_input_id_.has_value()) {
return absl::FailedPreconditionError("forest has no inputs");
}
QTypePtr in_type = input_qtypes[*first_forest_input_id_];
if (inputs_[*first_forest_input_id_].preprocessing != nullptr) {
ASSIGN_OR_RETURN(auto lambda,
expr::LambdaOperator::Make(
inputs_[*first_forest_input_id_].preprocessing));
ASSIGN_OR_RETURN(expr::ExprAttributes attr,
lambda->InferAttributes({expr::ExprAttributes(in_type)}));
if (attr.qtype() == nullptr) {
return absl::InternalError("can't infer preprocessed input type");
}
return attr.qtype();
} else {
return in_type;
}
}
absl::StatusOr<QTypePtr> ForestModel::GetOutputQType(
absl::Span<const QTypePtr> input_qtypes) const {
QTypePtr out_type = GetQType<float>();
if (first_forest_input_id_.has_value()) {
ASSIGN_OR_RETURN(
QTypePtr in_type,
InferTypeOfFirstForestInputAfterPreprocessing(input_qtypes));
if (IsArrayLikeQType(in_type)) {
ASSIGN_OR_RETURN(const ArrayLikeQType* array_qtype,
ToArrayLikeQType(in_type));
ASSIGN_OR_RETURN(out_type,
array_qtype->WithValueQType(GetQType<float>()));
}
}
ASSIGN_OR_RETURN(auto fake_res,
expr::CallOp("annotation.qtype", {expr::Leaf("fake_res"),
expr::Literal(out_type)}));
ASSIGN_OR_RETURN(
auto fake_res_tuple,
expr::BindOp(
"core.make_tuple",
std::vector<expr::ExprNodePtr>(tree_filters_.size(), fake_res), {}));
absl::flat_hash_map<std::string, expr::ExprNodePtr> expression_params;
if (res_tuple_key_) {
expression_params[*res_tuple_key_] = fake_res_tuple;
}
for (int i = 0; i < inputs_.size(); ++i) {
ASSIGN_OR_RETURN(
expression_params[inputs_[i].name],
expr::CallOp("annotation.qtype", {expr::Leaf("fake_input"),
expr::Literal(input_qtypes[i])}));
}
ASSIGN_OR_RETURN(auto expr, SubstitutePlaceholders(
processed_expression_, expression_params,
true));
const auto result = expr->qtype();
if (result == nullptr) {
return absl::FailedPreconditionError("unable to deduce output qtype");
}
return result;
}
absl::StatusOr<expr::ExprNodePtr> ForestModel::CastAndValidateArgType(
int input_id, expr::ExprNodePtr arg) const {
const auto& required_qtypes = forest_->GetRequiredQTypes();
auto required_qtype_iter = required_qtypes.find(input_id);
if (required_qtype_iter == required_qtypes.end()) {
return arg;
}
QTypePtr required_qtype = required_qtype_iter->second;
QTypePtr required_scalar_qtype = DecayOptionalQType(required_qtype);
ASSIGN_OR_RETURN(QTypePtr actual_scalar_qtype, GetScalarQType(arg->qtype()));
if (required_scalar_qtype == GetQType<float>() &&
actual_scalar_qtype != GetQType<float>() &&
IsNumericScalarQType(actual_scalar_qtype)) {
ASSIGN_OR_RETURN(arg,
expr::BindOp("core.to_float32", {std::move(arg)}, {}));
} else if (required_scalar_qtype != actual_scalar_qtype) {
return absl::InvalidArgumentError(
absl::StrFormat("value type of input #%d (%s) doesn't match: "
"expected to be compatible with %s, got %s",
input_id, expr::GetDebugSnippet(arg),
required_qtype->name(), arg->qtype()->name()));
}
if (IsScalarQType(arg->qtype()) && IsOptionalQType(required_qtype)) {
ASSIGN_OR_RETURN(arg,
expr::BindOp("core.to_optional", {std::move(arg)}, {}));
}
return arg;
}
absl::StatusOr<ForestModel::ExpressionAnalysisResult>
ForestModel::AnalyzeExpression() const {
ExpressionAnalysisResult res;
ASSIGN_OR_RETURN(auto expression, expr::ToLowest(expression_));
for (const auto& node : expr::VisitorOrder(expression)) {
if (node->is_op()) {
ASSIGN_OR_RETURN(auto op, expr::DecayRegisteredOperator(node->op()));
res.plain_sum = res.plain_sum && expr::IsBackendOperator(op, "math.add");
} else if (node->is_placeholder() &&
submodel_ids_.count(node->placeholder_key()) > 0) {
res.submodel_nodes.push_back(node);
const auto& submodels = submodel_ids_.at(node->placeholder_key());
if (submodels.empty()) {
return absl::InvalidArgumentError(absl::StrFormat(
"submodel_ids[%s] is empty", node->placeholder_key()));
}
if (res.bag_count != 0 && res.bag_count != submodels.size()) {
return absl::InvalidArgumentError(
"all submodels should have the same number of bags");
}
res.bag_count = submodels.size();
} else {
res.plain_sum_nodes.push_back(node);
}
}
res.bag_count = std::max(res.bag_count, 1);
return res;
}
absl::Status ForestModel::HandlePlainSumExpression(
const std::vector<expr::ExprNodePtr>& submodel_nodes,
std::vector<expr::ExprNodePtr>&& plain_sum_nodes) {
ASSIGN_OR_RETURN(
processed_expression_,
expr::CallOp("core.get_first", {expr::Placeholder(*res_tuple_key_)}));
auto count_map = GetNodeCountMap(expression_);
for (auto& node : plain_sum_nodes) {
int count = count_map[node->fingerprint()];
if (count > 1) {
ASSIGN_OR_RETURN(node, expr::CallOp("math.multiply",
{node, expr::Literal<float>(count)}));
}
}
ASSIGN_OR_RETURN(processed_expression_,
AddAll(processed_expression_, plain_sum_nodes));
TreeFilter used_trees;
for (const auto& node : submodel_nodes) {
int count = count_map[node->fingerprint()];
for (int submodel_id : submodel_ids_[node->placeholder_key()]) {
used_trees.submodels.insert(submodel_id);
if (count > 1) submodel_weight_multipliers_[submodel_id] = count;
}
}
tree_filters_.push_back(used_trees);
return absl::OkStatus();
}
absl::Status ForestModel::HandleExpressionWithoutBags() {
absl::flat_hash_map<std::string, expr::ExprNodePtr> params;
for (const auto& [key, submodels] : submodel_ids_) {
ASSIGN_OR_RETURN(
params[key],
expr::CallOp("core.get_nth",
{expr::Placeholder(*res_tuple_key_),
expr::Literal<int64_t>(tree_filters_.size())}));
TreeFilter filter;
filter.submodels.insert(submodels.begin(), submodels.end());
tree_filters_.push_back(std::move(filter));
}
ASSIGN_OR_RETURN(processed_expression_,
SubstitutePlaceholders(expression_, params));
return absl::OkStatus();
}
absl::StatusOr<expr::ExprNodePtr> ForestModel::UsedBagCountExpr() const {
DCHECK_GT(bag_count_, 0);
if (!oob_filters_.has_value()) {
return expr::Literal<float>(bag_count_);
}
expr::ExprNodePtr used_bag_count = nullptr;
for (int bag_id = 0; bag_id < bag_count_; ++bag_id) {
ASSIGN_OR_RETURN(expr::ExprNodePtr used,
expr::CallOp("core.where", {(*oob_filters_)[bag_id],
expr::Literal<float>(1),
expr::Literal<float>(0)}));
if (used_bag_count != nullptr) {
ASSIGN_OR_RETURN(used_bag_count,
expr::CallOp("math.add", {used_bag_count, used}));
} else {
used_bag_count = used;
}
}
ASSIGN_OR_RETURN(
used_bag_count,
expr::CallOp(
"core.where",
{expr::CallOp("core.greater",
{used_bag_count, expr::Literal<float>(0)}),
used_bag_count, expr::Literal<OptionalValue<float>>(std::nullopt)}));
return used_bag_count;
}
absl::Status ForestModel::HandleExpressionWithBags() {
std::vector<expr::ExprNodePtr> bags(bag_count_);
for (int bag_id = 0; bag_id < bag_count_; ++bag_id) {
absl::flat_hash_map<std::string, expr::ExprNodePtr> params;
for (const auto& [key, submodels] : submodel_ids_) {
expr::ExprNodePtr& param = params[key];
ASSIGN_OR_RETURN(
param, expr::CallOp("core.get_nth",
{expr::Placeholder(*res_tuple_key_),
expr::Literal<int64_t>(tree_filters_.size())}));
TreeFilter filter;
if (submodels.size() <= bag_id) {
return absl::InternalError("invalid submodel_ids");
}
filter.submodels.insert(submodels[bag_id]);
tree_filters_.push_back(std::move(filter));
submodel_weight_multipliers_[submodels[bag_id]] = bag_count_;
}
ASSIGN_OR_RETURN(bags[bag_id], SubstitutePlaceholders(expression_, params));
if (oob_filters_.has_value()) {
ASSIGN_OR_RETURN(
bags[bag_id],
expr::CallOp("core.where", {(*oob_filters_)[bag_id], bags[bag_id],
expr::Literal<float>(0)}));
}
}
ASSIGN_OR_RETURN(
auto sum, AddAll(bags[0], absl::Span<expr::ExprNodePtr>(bags.data() + 1,
bag_count_ - 1)));
ASSIGN_OR_RETURN(processed_expression_,
expr::CallOp("math.divide", {sum, UsedBagCountExpr()}));
return absl::OkStatus();
}
absl::Status ForestModel::Initialize() {
if (submodel_ids_.empty()) {
res_tuple_key_ = std::nullopt;
processed_expression_ = expression_;
bag_count_ = 1;
return absl::OkStatus();
} else {
res_tuple_key_ = submodel_ids_.begin()->first;
}
ASSIGN_OR_RETURN(auto info, AnalyzeExpression());
is_plain_sum_ = info.plain_sum;
bag_count_ = info.bag_count;
if (oob_filters_.has_value() && oob_filters_->size() != bag_count_) {
return absl::FailedPreconditionError(
"if oob_filters is present, its size must be equal to bag count");
}
if (info.plain_sum && !oob_filters_) {
RETURN_IF_ERROR(HandlePlainSumExpression(info.submodel_nodes,
std::move(info.plain_sum_nodes)));
} else if (bag_count_ == 1 && !oob_filters_) {
RETURN_IF_ERROR(HandleExpressionWithoutBags());
} else {
RETURN_IF_ERROR(HandleExpressionWithBags());
}
if (truncation_step_.has_value()) {
for (TreeFilter& filter : tree_filters_) {
filter.step_range_to = *truncation_step_;
}
}
for (const auto& [id, _] : forest_->GetRequiredQTypes()) {
if (first_forest_input_id_.has_value()) {
first_forest_input_id_ = std::min(*first_forest_input_id_, id);
} else {
first_forest_input_id_ = id;
}
}
return absl::OkStatus();
}
namespace {
std::vector<DecisionTree> GetMaybeUsedTrees(
absl::Span<const DecisionTree> trees,
absl::Span<const TreeFilter> tree_filters) {
if (tree_filters.empty()) {
return {};
}
TreeFilter combined_step_filter{
.step_range_from = tree_filters.front().step_range_from,
.step_range_to = tree_filters.front().step_range_to};
for (int i = 1; i < tree_filters.size(); ++i) {
combined_step_filter.step_range_from = std::min(
combined_step_filter.step_range_from, tree_filters[i].step_range_from);
if (tree_filters[i].step_range_to == -1 ||
combined_step_filter.step_range_to == -1) {
combined_step_filter.step_range_to = -1;
} else {
combined_step_filter.step_range_to = std::max(
combined_step_filter.step_range_to, tree_filters[i].step_range_to);
}
}
std::vector<DecisionTree> res;
for (const DecisionTree& tree : trees) {
if (combined_step_filter(tree.tag)) {
res.push_back(tree);
}
}
return res;
}
}
absl::StatusOr<expr::ExprOperatorPtr> ForestModel::CreateDecisionForestOperator(
std::vector<TreeFilter> tree_filters) const {
DecisionForestPtr forest = forest_;
auto required_types = forest->GetRequiredQTypes();
if (!submodel_weight_multipliers_.empty()) {
std::vector<DecisionTree> trees =
GetMaybeUsedTrees(forest->GetTrees(), tree_filters);
for (DecisionTree& tree : trees) {
auto mult_iter = submodel_weight_multipliers_.find(tree.tag.submodel_id);
if (mult_iter != submodel_weight_multipliers_.end()) {
tree.weight *= mult_iter->second;
}
}
ASSIGN_OR_RETURN(forest, DecisionForest::FromTrees(std::move(trees)));
}
return std::make_shared<DecisionForestOperator>(
std::move(forest), std::move(tree_filters), required_types);
}
ForestModel::ForestModel(expr::ExprOperatorSignature&& signature,
Fingerprint&& fingerprint, ConstructorArgs&& args)
: expr::BasicExprOperator("core.forest_model", signature,
"DecisionForest with pre- and post-processing",
fingerprint),
forest_(std::move(args.forest)),
submodel_ids_(std::move(args.submodel_ids)),
oob_filters_(std::move(args.oob_filters)),
truncation_step_(args.truncation_step),
inputs_(std::move(args.inputs)),
expression_(std::move(args.expression)) {}
absl::string_view ForestModel::py_qvalue_specialization_key() const {
return kForestModelQValueSpecializationKey;
}
} | #include "arolla/decision_forest/expr_operator/forest_model.h"
#include <cstdint>
#include <limits>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/no_destructor.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "arolla/decision_forest/decision_forest.h"
#include "arolla/decision_forest/split_conditions/interval_split_condition.h"
#include "arolla/decision_forest/split_conditions/set_of_values_split_condition.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/expr/annotation_utils.h"
#include "arolla/expr/eval/eval.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/expr/tuple_expr_operator.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/serving/expr_compiler.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
using ::absl_testing::IsOk;
using ::absl_testing::StatusIs;
using ::arolla::testing::EqualsExpr;
using ::arolla::testing::WithNameAnnotation;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::NotNull;
using ::testing::WhenDynamicCastTo;
constexpr float inf = std::numeric_limits<float>::infinity();
constexpr auto S = DecisionTreeNodeId::SplitNodeId;
constexpr auto A = DecisionTreeNodeId::AdjustmentId;
absl::StatusOr<DecisionForestPtr> CreateForest() {
std::vector<DecisionTree> trees(2);
trees[0].adjustments = {0.5, 1.5, 2.5, 3.5};
trees[0].tag.submodel_id = 0;
trees[0].split_nodes = {
{S(1), S(2), IntervalSplit(0, 1.5, inf)},
{A(0), A(1), SetOfValuesSplit<int64_t>(1, {5}, false)},
{A(2), A(3), IntervalSplit(0, -inf, 10)}};
trees[1].adjustments = {5};
trees[1].tag.submodel_id = 1;
return DecisionForest::FromTrees(std::move(trees));
}
absl::StatusOr<ForestModelPtr> CreateForestModelOp() {
ForestModel::SubmodelIds submodel_ids = {{"X", {0}}, {"Y", {1}}};
ASSIGN_OR_RETURN(auto preprocessing,
expr::CallOp("math.add", {expr::Placeholder("arg"),
expr::Literal<int64_t>(1)}));
ASSIGN_OR_RETURN(auto expression,
expr::CallOp("math.add", {expr::Placeholder("X"),
expr::Placeholder("Y")}));
ASSIGN_OR_RETURN(auto forest, CreateForest());
return ForestModel::Create({.forest = std::move(forest),
.submodel_ids = std::move(submodel_ids),
.inputs = {{"p1"}, {"p2", preprocessing}},
.expression = expression});
}
absl::Status InitAlias() {
static absl::NoDestructor<absl::Status> init_status(
expr::RegisterOperatorAlias("alias_math.add", "math.add").status());
return *init_status;
}
class ForestModelTest : public ::testing::Test {
void SetUp() override { CHECK_OK(InitAlias()); }
};
TEST_F(ForestModelTest, NotEnoughArgs) {
ForestModel::ConstructorArgs model_data;
model_data.submodel_ids = {{"X", {0}}, {"Y", {1}}};
ASSERT_OK_AND_ASSIGN(model_data.expression,
expr::CallOp("math.add", {expr::Placeholder("X"),
expr::Placeholder("Y")}));
ASSERT_OK_AND_ASSIGN(model_data.forest, CreateForest());
model_data.inputs = {{"p1"}};
EXPECT_THAT(ForestModel::Create(model_data),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("not enough args")));
}
TEST_F(ForestModelTest, ParameterNameCollision) {
ForestModel::ConstructorArgs model_data;
model_data.submodel_ids = {{"X", {0}}, {"Y", {1}}};
ASSERT_OK_AND_ASSIGN(
auto preprocessing,
expr::CallOp("math.add", {expr::Placeholder("arg"),
expr::Literal<OptionalValue<int64_t>>(1)}));
ASSERT_OK_AND_ASSIGN(model_data.expression,
expr::CallOp("math.add", {expr::Placeholder("X"),
expr::Placeholder("Y")}));
ASSERT_OK_AND_ASSIGN(model_data.forest, CreateForest());
model_data.inputs = {{"p1"}, {"p1", preprocessing}};
EXPECT_THAT(ForestModel::Create(model_data),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("non-unique parameter name: 'p1'")));
model_data.inputs = {{"X"}, {"p2", preprocessing}};
EXPECT_THAT(
ForestModel::Create(model_data),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("name collision of an input and a submodel: 'X'")));
}
TEST_F(ForestModelTest, IncorrectExpression) {
ASSERT_OK_AND_ASSIGN(DecisionForestPtr forest, DecisionForest::FromTrees({}));
{
ASSERT_OK_AND_ASSIGN(expr::ExprNodePtr expression,
expr::CallOp("math.add", {expr::Placeholder("X"),
expr::Placeholder("Y")}));
EXPECT_THAT(ForestModel::Create({.forest = forest,
.submodel_ids = {{"X", {0}}},
.inputs = {{"p1"}, {"p2"}},
.expression = expression}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("P.Y doesn't correspond to any input and it "
"is not found in submodel_ids")));
}
{
expr::ExprNodePtr expression = expr::Placeholder("X");
EXPECT_THAT(
ForestModel::Create({.forest = forest,
.submodel_ids = {{"X", {0}}, {"Y", {1}}},
.expression = expression}),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("submodels [Y] are not used in the expression, but are "
"mentioned in submodel_ids")));
}
{
expr::ExprNodePtr expression = expr::Leaf("X");
EXPECT_THAT(
ForestModel::Create({.forest = forest, .expression = expression}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("leaves are not allowed in an expression")));
}
}
TEST_F(ForestModelTest, UsingInputInExpression) {
ASSERT_OK_AND_ASSIGN(auto expression,
expr::CallOp("math.add", {expr::Placeholder("X"),
expr::Placeholder("p1")}));
auto f1 = expr::Literal<float>(1.0);
auto i5 = expr::Literal<int64_t>(5);
ASSERT_OK_AND_ASSIGN(auto forest, CreateForest());
ASSERT_OK_AND_ASSIGN(auto model_op,
ForestModel::Create({.forest = forest,
.submodel_ids = {{"X", {0}}},
.inputs = {{"p1"}, {"p2"}},
.expression = expression}));
ASSERT_OK_AND_ASSIGN(auto model, expr::CallOp(model_op, {f1, i5}));
ASSERT_OK_AND_ASSIGN(auto expanded_model, expr::ToLowest(model));
ASSERT_TRUE(expanded_model->is_op());
EXPECT_TRUE(IsRegisteredOperator(expanded_model->op()));
EXPECT_TRUE(IsBackendOperator(*DecayRegisteredOperator(expanded_model->op()),
"math.add"));
EXPECT_THAT(expanded_model->node_deps()[1], EqualsExpr(f1));
}
TEST_F(ForestModelTest, QTypePropagation) {
ASSERT_OK_AND_ASSIGN(auto model_op, CreateForestModelOp());
ASSERT_OK_AND_ASSIGN(
auto model,
expr::CallOp(model_op, {expr::Literal<OptionalValue<float>>(1.0),
expr::Literal<int64_t>(5)}));
EXPECT_EQ(model->qtype(), GetQType<float>());
}
TEST_F(ForestModelTest, QTypePropagationUsesPreprocessing) {
ForestModel::ConstructorArgs model_data;
model_data.submodel_ids = {{"X", {0, 1}}};
ASSERT_OK_AND_ASSIGN(auto preprocessing,
expr::CallOp("core.const_with_shape",
{expr::Literal<DenseArrayShape>({5}),
expr::Placeholder("arg")}));
model_data.expression = expr::Placeholder("X");
ASSERT_OK_AND_ASSIGN(model_data.forest, CreateForest());
model_data.inputs = {{"p1", preprocessing}, {"p2", preprocessing}};
ASSERT_OK_AND_ASSIGN(auto model_op, ForestModel::Create(model_data));
ASSERT_OK_AND_ASSIGN(
auto model,
expr::CallOp(model_op, {expr::Literal<OptionalValue<float>>(1.0),
expr::Literal<int64_t>(5)}));
EXPECT_EQ(model->qtype(), GetDenseArrayQType<float>());
}
TEST_F(ForestModelTest, QTypePropagationPlainSumWithBroadcasting) {
ForestModel::ConstructorArgs model_data;
model_data.submodel_ids = {{"X", {0, 1}}};
ASSERT_OK_AND_ASSIGN(
model_data.expression,
expr::CallOp("math.add",
{expr::Literal(CreateDenseArray<float>({1., 2., 3.})),
expr::Placeholder("X")}));
ASSERT_OK_AND_ASSIGN(model_data.forest, CreateForest());
model_data.inputs = {{"p1"}, {"p2"}};
ASSERT_OK_AND_ASSIGN(auto model_op, ForestModel::Create(model_data));
ASSERT_OK_AND_ASSIGN(
auto model,
expr::CallOp(model_op, {expr::Literal<OptionalValue<float>>(1.0),
expr::Literal<int64_t>(5)}));
EXPECT_EQ(model->qtype(), GetDenseArrayQType<float>());
ASSERT_OK_AND_ASSIGN(auto lowered, expr::ToLowest(model));
EXPECT_EQ(lowered->qtype(), GetDenseArrayQType<float>());
}
TEST_F(ForestModelTest, EmptyForest) {
ASSERT_OK_AND_ASSIGN(auto forest, DecisionForest::FromTrees({}));
expr::ExprNodePtr expression = expr::Literal<float>(0.5);
ASSERT_OK_AND_ASSIGN(auto model_op,
ForestModel::Create({.forest = std::move(forest),
.expression = expression}));
ASSERT_OK_AND_ASSIGN(auto model, expr::CallOp(model_op, {}));
ASSERT_OK_AND_ASSIGN(auto expanded_model, expr::ToLowest(model));
EXPECT_THAT(expanded_model, EqualsExpr(expression));
}
TEST_F(ForestModelTest, ToLower) {
ASSERT_OK_AND_ASSIGN(auto model_op, CreateForestModelOp());
{
ASSERT_OK_AND_ASSIGN(auto model,
expr::CallOp(model_op, {expr::Literal<float>(1.0),
expr::Literal<int64_t>(5)}));
ASSERT_OK_AND_ASSIGN(model, WithNameAnnotation(model, "forest"));
ASSERT_OK_AND_ASSIGN(auto expanded_model, expr::ToLowest(model));
EXPECT_NE(model->fingerprint(), expanded_model->fingerprint());
EXPECT_EQ(ReadNameAnnotation(expanded_model), "forest");
}
{
ASSERT_OK_AND_ASSIGN(
auto model, expr::CallOp(model_op, {expr::Leaf("f"), expr::Leaf("i")}));
ASSERT_OK_AND_ASSIGN(auto expanded_model, expr::ToLowest(model));
EXPECT_EQ(model->fingerprint(), expanded_model->fingerprint());
}
}
absl::StatusOr<expr::ExprNodePtr> GetExpressionForTest(std::string A,
std::string B,
std::string C,
std::string op) {
return expr::CallOp(
"alias_math.add",
{expr::Placeholder(A),
expr::CallOp(op, {expr::Placeholder(B), expr::Placeholder(C)})});
}
TEST_F(ForestModelTest, ToLowerMergeSubmodels) {
ASSERT_OK_AND_ASSIGN(auto forest, DecisionForest::FromTrees({}));
ASSERT_OK_AND_ASSIGN(auto expression,
GetExpressionForTest("input", "X", "Y", "math.add"));
ASSERT_OK_AND_ASSIGN(
auto model_op,
ForestModel::Create({.forest = std::move(forest),
.submodel_ids = {{"X", {0, 2}}, {"Y", {1, 3}}},
.inputs = {{"input"}},
.expression = expression}));
ASSERT_OK_AND_ASSIGN(auto model,
expr::CallOp(model_op, {expr::Literal<float>(1.0)}));
ASSERT_OK_AND_ASSIGN(auto expanded_model, expr::ToLowest(model));
EXPECT_TRUE(IsRegisteredOperator(expanded_model->op()));
EXPECT_TRUE(IsBackendOperator(*DecayRegisteredOperator(expanded_model->op()),
"math.add"));
EXPECT_THAT(expanded_model->node_deps()[0]->op().get(),
WhenDynamicCastTo<const expr::GetNthOperator*>(NotNull()));
}
TEST_F(ForestModelTest, MergeDuplicatedSubmodels) {
std::vector<DecisionTree> trees(2);
trees[0].adjustments = {1.0};
trees[0].tag.submodel_id = 0;
trees[1].adjustments = {3.0};
trees[1].tag.submodel_id = 1;
ASSERT_OK_AND_ASSIGN(auto forest,
DecisionForest::FromTrees({std::move(trees)}));
ASSERT_OK_AND_ASSIGN(auto expression,
GetExpressionForTest("X", "Y", "X", "math.add"));
ASSERT_OK_AND_ASSIGN(
auto model_op,
ForestModel::Create({.forest = std::move(forest),
.submodel_ids = {{"X", {0}}, {"Y", {1}}},
.expression = expression}));
ASSERT_OK_AND_ASSIGN(auto model, expr::CallOp(model_op, {}));
FrameLayout::Builder layout_builder;
ASSERT_OK_AND_ASSIGN(
auto executable_model,
CompileAndBindForDynamicEvaluation(expr::DynamicEvaluationEngineOptions(),
&layout_builder, model, {}));
FrameLayout layout = std::move(layout_builder).Build();
ASSERT_OK_AND_ASSIGN(const FrameLayout::Slot<float> output,
executable_model->output_slot().ToSlot<float>());
RootEvaluationContext ctx(&layout);
EXPECT_OK(executable_model->InitializeLiterals(&ctx));
EXPECT_THAT(executable_model->Execute(&ctx), IsOk());
EXPECT_THAT(ctx.Get(output), Eq(5.0f));
}
TEST_F(ForestModelTest, DuplicatedNodes) {
ASSERT_OK_AND_ASSIGN(auto forest, DecisionForest::FromTrees({}));
ASSERT_OK_AND_ASSIGN(auto expression,
GetExpressionForTest("input", "X", "input", "math.add"));
ASSERT_OK_AND_ASSIGN(auto model_op,
ForestModel::Create({.forest = std::move(forest),
.submodel_ids = {{"X", {0}}},
.inputs = {{"input"}},
.expression = expression}));
ASSERT_OK_AND_ASSIGN(auto model,
expr::CallOp(model_op, {expr::Leaf("input")}));
FrameLayout::Builder layout_builder;
auto input_slot = layout_builder.AddSlot<float>();
ASSERT_OK_AND_ASSIGN(
auto executable_model,
CompileAndBindForDynamicEvaluation(
expr::DynamicEvaluationEngineOptions(), &layout_builder, model,
{{"input", TypedSlot::FromSlot(input_slot)}}));
FrameLayout layout = std::move(layout_builder).Build();
ASSERT_OK_AND_ASSIGN(const FrameLayout::Slot<float> output,
executable_model->output_slot().ToSlot<float>());
RootEvaluationContext ctx(&layout);
EXPECT_OK(executable_model->InitializeLiterals(&ctx));
ctx.Set(input_slot, 3.1f);
EXPECT_THAT(executable_model->Execute(&ctx), IsOk());
EXPECT_FLOAT_EQ(ctx.Get(output), 6.2f);
}
TEST_F(ForestModelTest, ToLowerSingleBag) {
ASSERT_OK_AND_ASSIGN(auto forest, DecisionForest::FromTrees({}));
ASSERT_OK_AND_ASSIGN(
auto expression,
GetExpressionForTest("input", "X", "Y", "math.multiply"));
ASSERT_OK_AND_ASSIGN(
auto model_op,
ForestModel::Create({.forest = std::move(forest),
.submodel_ids = {{"X", {0}}, {"Y", {1}}},
.inputs = {{"input"}},
.expression = expression}));
ASSERT_OK_AND_ASSIGN(auto model,
expr::CallOp(model_op, {expr::Literal<float>(1.0)}));
ASSERT_OK_AND_ASSIGN(auto expanded_model, expr::ToLowest(model));
EXPECT_TRUE(IsRegisteredOperator(expanded_model->op()));
EXPECT_TRUE(IsBackendOperator(*DecayRegisteredOperator(expanded_model->op()),
"math.add"));
EXPECT_TRUE(expanded_model->node_deps()[0]->is_literal());
EXPECT_TRUE(IsRegisteredOperator(expanded_model->node_deps()[1]->op()));
EXPECT_TRUE(IsBackendOperator(
*DecayRegisteredOperator(expanded_model->node_deps()[1]->op()),
"math.multiply"));
}
TEST_F(ForestModelTest, ToLowerExpandBags) {
std::vector<DecisionTree> trees(4);
trees[0].adjustments = {1.0};
trees[0].tag.submodel_id = 0;
trees[1].adjustments = {2.0};
trees[1].tag.submodel_id = 1;
trees[2].adjustments = {4.0};
trees[2].tag.submodel_id = 2;
trees[3].adjustments = {8.0};
trees[3].tag.submodel_id = 3;
ASSERT_OK_AND_ASSIGN(auto forest,
DecisionForest::FromTrees({std::move(trees)}));
ASSERT_OK_AND_ASSIGN(
auto expression,
GetExpressionForTest("input", "X", "Y", "math.multiply"));
ASSERT_OK_AND_ASSIGN(
auto model_op,
ForestModel::Create({.forest = std::move(forest),
.submodel_ids = {{"X", {0, 2}}, {"Y", {1, 3}}},
.inputs = {{"input"}},
.expression = expression}));
ASSERT_OK_AND_ASSIGN(auto model,
expr::CallOp(model_op, {expr::Literal<float>(1.2)}));
ASSERT_OK_AND_ASSIGN(auto expanded_model, expr::ToLowest(model));
EXPECT_TRUE(IsBackendOperator(*DecayRegisteredOperator(expanded_model->op()),
"math.divide"));
ASSERT_OK_AND_ASSIGN(
auto model_fn,
(ExprCompiler<std::tuple<float>, float>()).CompileOperator(model_op));
ASSERT_OK_AND_ASSIGN(float res, model_fn(1.2));
EXPECT_FLOAT_EQ(res, 69.2f);
}
TEST_F(ForestModelTest, OutOfBagFilters) {
std::vector<DecisionTree> trees(4);
trees[0].adjustments = {1.0};
trees[0].tag.submodel_id = 0;
trees[1].adjustments = {2.0};
trees[1].tag.submodel_id = 1;
trees[2].adjustments = {4.0};
trees[2].tag.submodel_id = 2;
trees[3].adjustments = {8.0};
trees[3].tag.submodel_id = 3;
ASSERT_OK_AND_ASSIGN(auto forest,
DecisionForest::FromTrees({std::move(trees)}));
ASSERT_OK_AND_ASSIGN(
auto expression,
GetExpressionForTest("input", "X", "Y", "math.multiply"));
ASSERT_OK_AND_ASSIGN(auto filter0,
expr::CallOp("core.less", {expr::Placeholder("input"),
expr::Literal(2.0f)}));
ASSERT_OK_AND_ASSIGN(auto filter1,
expr::CallOp("core.less", {expr::Literal(2.0f),
expr::Placeholder("input")}));
ASSERT_OK_AND_ASSIGN(
auto model_op,
ForestModel::Create({.forest = std::move(forest),
.submodel_ids = {{"X", {0, 2}}, {"Y", {1, 3}}},
.inputs = {{"input"}},
.expression = expression,
.oob_filters = std::vector{filter0, filter1}}));
ASSERT_OK_AND_ASSIGN(auto model,
expr::CallOp(model_op, {expr::Literal<float>(1.2)}));
ASSERT_OK_AND_ASSIGN(auto expanded_model, expr::ToLowest(model));
EXPECT_TRUE(IsBackendOperator(*DecayRegisteredOperator(expanded_model->op()),
"math.divide"));
ASSERT_OK_AND_ASSIGN(auto model_fn,
(ExprCompiler<std::tuple<float>, OptionalValue<float>>())
.CompileOperator(model_op));
{
ASSERT_OK_AND_ASSIGN(OptionalValue<float> res, model_fn(1));
EXPECT_EQ(res, 9.0f);
}
{
ASSERT_OK_AND_ASSIGN(OptionalValue<float> res, model_fn(2));
EXPECT_EQ(res, OptionalValue<float>{});
}
{
ASSERT_OK_AND_ASSIGN(OptionalValue<float> res, model_fn(3));
EXPECT_EQ(res, 131.0f);
}
}
TEST_F(ForestModelTest, BagsAndTruncation) {
std::vector<DecisionTree> trees(4);
trees[0].adjustments = {1.0};
trees[0].tag = {.step = 0, .submodel_id = 0};
trees[1].adjustments = {2.0};
trees[1].tag = {.step = 0, .submodel_id = 1};
trees[2].adjustments = {4.0};
trees[2].tag = {.step = 1, .submodel_id = 2};
trees[3].adjustments = {8.0};
trees[3].tag = {.step = 1, .submodel_id = 3};
ASSERT_OK_AND_ASSIGN(auto forest,
DecisionForest::FromTrees({std::move(trees)}));
ASSERT_OK_AND_ASSIGN(
auto expression,
GetExpressionForTest("input", "X", "Y", "math.multiply"));
ASSERT_OK_AND_ASSIGN(
auto model_op,
ForestModel::Create({.forest = std::move(forest),
.submodel_ids = {{"X", {0, 2}}, {"Y", {1, 3}}},
.inputs = {{"input"}},
.expression = expression,
.truncation_step = 1}));
ASSERT_OK_AND_ASSIGN(
auto model_fn,
(ExprCompiler<std::tuple<float>, float>()).CompileOperator(model_op));
ASSERT_OK_AND_ASSIGN(float res, model_fn(1.2));
EXPECT_FLOAT_EQ(res, 5.2f);
}
TEST_F(ForestModelTest, ConversionToOptional) {
ASSERT_OK_AND_ASSIGN(const auto model_op, CreateForestModelOp());
const auto input = expr::Literal<float>(1.0);
ASSERT_OK_AND_ASSIGN(const auto converted_input,
expr::CallOp("core.to_optional", {input}));
ASSERT_OK_AND_ASSIGN(
auto model, expr::CallOp(model_op, {input, expr::Literal<int64_t>(5)}));
ASSERT_OK_AND_ASSIGN(auto expanded_model, expr::ToLowest(model));
ASSERT_OK_AND_ASSIGN(
auto model_with_converted_input,
expr::CallOp(model_op, {converted_input, expr::Literal<int64_t>(5)}));
ASSERT_OK_AND_ASSIGN(auto expanded_model_with_converted_input,
expr::ToLowest(model_with_converted_input));
EXPECT_THAT(expanded_model_with_converted_input, EqualsExpr(expanded_model));
}
TEST_F(ForestModelTest, ConversionFromDouble) {
ASSERT_OK_AND_ASSIGN(const auto model_op, CreateForestModelOp());
const auto input = expr::Literal<double>(1.0);
ASSERT_OK_AND_ASSIGN(const auto converted_input,
expr::CallOp("core.to_float32", {input}));
ASSERT_OK_AND_ASSIGN(
auto model, expr::CallOp(model_op, {input, expr::Literal<int64_t>(5)}));
ASSERT_OK_AND_ASSIGN(auto expanded_model, expr::ToLowest(model));
ASSERT_OK_AND_ASSIGN(
auto model_with_converted_input,
expr::CallOp(model_op, {converted_input, expr::Literal<int64_t>(5)}));
ASSERT_OK_AND_ASSIGN(auto expanded_model_with_converted_input,
expr::ToLowest(model_with_converted_input));
EXPECT_THAT(expanded_model_with_converted_input, EqualsExpr(expanded_model));
}
TEST_F(ForestModelTest, ConversionFromInteger) {
ASSERT_OK_AND_ASSIGN(const auto model_op, CreateForestModelOp());
const auto input = expr::Literal<int>(1);
ASSERT_OK_AND_ASSIGN(const auto converted_input,
expr::CallOp("core.to_float32", {input}));
ASSERT_OK_AND_ASSIGN(
auto model, expr::CallOp(model_op, {input, expr::Literal<int64_t>(5)}));
ASSERT_OK_AND_ASSIGN(auto expanded_model, expr::ToLowest(model));
ASSERT_OK_AND_ASSIGN(
auto model_with_converted_input,
expr::CallOp(model_op, {converted_input, expr::Literal<int64_t>(5)}));
ASSERT_OK_AND_ASSIGN(auto expanded_model_with_converted_input,
expr::ToLowest(model_with_converted_input));
EXPECT_THAT(expanded_model_with_converted_input, EqualsExpr(expanded_model));
}
TEST_F(ForestModelTest, EvaluateOnScalars) {
ASSERT_OK_AND_ASSIGN(auto forest_model, CreateForestModelOp());
ASSERT_OK_AND_ASSIGN(
auto model,
expr::CallOp(forest_model, {expr::Leaf("f"), expr::Leaf("i")}));
FrameLayout::Builder layout_builder;
auto f_slot = layout_builder.AddSlot<float>();
auto i_slot = layout_builder.AddSlot<int64_t>();
ASSERT_OK_AND_ASSIGN(
auto executable_model,
CompileAndBindForDynamicEvaluation(expr::DynamicEvaluationEngineOptions(),
&layout_builder, model,
{{"f", TypedSlot::FromSlot(f_slot)},
{"i", TypedSlot::FromSlot(i_slot)}}));
FrameLayout layout = std::move(layout_builder).Build();
ASSERT_OK_AND_ASSIGN(const FrameLayout::Slot<float> output,
executable_model->output_slot().ToSlot<float>());
RootEvaluationContext ctx(&layout);
EXPECT_OK(executable_model->InitializeLiterals(&ctx));
ctx.Set(f_slot, 1.0f);
ctx.Set(i_slot, 5);
EXPECT_THAT(executable_model->Execute(&ctx), IsOk());
EXPECT_FLOAT_EQ(ctx.Get(output), 5.5f);
ctx.Set(f_slot, 3.0f);
ctx.Set(i_slot, 0);
EXPECT_THAT(executable_model->Execute(&ctx), IsOk());
EXPECT_FLOAT_EQ(ctx.Get(output), 8.5f);
}
TEST_F(ForestModelTest, EvaluateOnScalarAndArray) {
ASSERT_OK_AND_ASSIGN(auto forest_model, CreateForestModelOp());
ASSERT_OK_AND_ASSIGN(
auto model,
expr::CallOp(forest_model, {expr::Leaf("f"), expr::Leaf("i")}));
FrameLayout::Builder layout_builder;
auto f_slot = layout_builder.AddSlot<DenseArray<float>>();
auto i_slot = layout_builder.AddSlot<int64_t>();
EXPECT_THAT(
CompileAndBindForDynamicEvaluation(expr::DynamicEvaluationEngineOptions(),
&layout_builder, model,
{{"f", TypedSlot::FromSlot(f_slot)},
{"i", TypedSlot::FromSlot(i_slot)}}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("either all forest inputs must be scalars or all "
"forest inputs must be arrays, but arg[0] is "
"DENSE_ARRAY_FLOAT32 and "
"arg[1] is OPTIONAL_INT64")));
}
TEST_F(ForestModelTest, EvaluateOnDenseArrays) {
ASSERT_OK_AND_ASSIGN(const auto model_op, CreateForestModelOp());
ASSERT_OK_AND_ASSIGN(
auto model, expr::CallOp(model_op, {expr::Leaf("f"), expr::Leaf("i")}));
FrameLayout::Builder layout_builder;
auto f_slot = layout_builder.AddSlot<DenseArray<float>>();
auto i_slot = layout_builder.AddSlot<DenseArray<int64_t>>();
ASSERT_OK_AND_ASSIGN(
auto executable_model,
CompileAndBindForDynamicEvaluation(expr::DynamicEvaluationEngineOptions(),
&layout_builder, model,
{{"f", TypedSlot::FromSlot(f_slot)},
{"i", TypedSlot::FromSlot(i_slot)}}));
FrameLayout layout = std::move(layout_builder).Build();
ASSERT_OK_AND_ASSIGN(
const FrameLayout::Slot<DenseArray<float>> output,
executable_model->output_slot().ToSlot<DenseArray<float>>());
RootEvaluationContext ctx(&layout);
EXPECT_OK(executable_model->InitializeLiterals(&ctx));
ctx.Set(f_slot, CreateDenseArray<float>({1.0f, 3.0f}));
ctx.Set(i_slot, CreateDenseArray<int64_t>({5, 0}));
EXPECT_THAT(executable_model->Execute(&ctx), IsOk());
EXPECT_THAT(ctx.Get(output), ElementsAre(5.5f, 8.5f));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/decision_forest/expr_operator/forest_model.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/decision_forest/expr_operator/forest_model_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
f478e72d-13d7-41bc-b552-c5d252f61f35 | cpp | google/arolla | decision_forest_operator | arolla/decision_forest/expr_operator/decision_forest_operator.cc | arolla/decision_forest/expr_operator/decision_forest_operator_test.cc | #include "arolla/decision_forest/expr_operator/decision_forest_operator.h"
#include <algorithm>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/decision_forest/decision_forest.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/qtype/array_like/array_like_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
std::vector<int> GetRequiredInputIds(
const absl::flat_hash_map<int, QTypePtr>& required_types) {
std::vector<int> result;
result.reserve(required_types.size());
for (const auto& [id, _] : required_types) {
result.push_back(id);
}
return result;
}
}
DecisionForestOperator::DecisionForestOperator(
DecisionForestPtr forest, std::vector<TreeFilter> tree_filters)
: DecisionForestOperator(GetRequiredInputIds(forest->GetRequiredQTypes()),
forest, std::move(tree_filters)) {}
DecisionForestOperator::DecisionForestOperator(
DecisionForestPtr forest, std::vector<TreeFilter> tree_filters,
const absl::flat_hash_map<int, QTypePtr>& required_types)
: DecisionForestOperator(GetRequiredInputIds(required_types),
std::move(forest), std::move(tree_filters)) {}
DecisionForestOperator::DecisionForestOperator(
std::vector<int> required_input_ids, DecisionForestPtr forest,
std::vector<TreeFilter> tree_filters)
: BasicExprOperator(
"anonymous.decision_forest_operator",
expr::ExprOperatorSignature::MakeVariadicArgs(),
"Evaluates decision forest stored in the operator state.",
FingerprintHasher("::arolla::DecisionForestOperator")
.Combine(forest->fingerprint())
.CombineSpan(tree_filters)
.Finish()),
forest_(std::move(forest)),
tree_filters_(std::move(tree_filters)),
required_input_ids_(std::move(required_input_ids)) {
std::sort(required_input_ids_.begin(), required_input_ids_.end());
}
absl::StatusOr<QTypePtr> DecisionForestOperator::GetOutputQType(
absl::Span<const QTypePtr> input_qtypes) const {
int last_forest_input_id =
required_input_ids_.empty() ? -1 : required_input_ids_.back();
if (last_forest_input_id >= static_cast<int>(input_qtypes.size())) {
return absl::InvalidArgumentError(absl::StrFormat(
"not enough arguments for the decision forest: expected at least %d, "
"got %d",
last_forest_input_id + 1, input_qtypes.size()));
}
bool batched = !input_qtypes.empty() && !required_input_ids_.empty() &&
IsArrayLikeQType(input_qtypes[required_input_ids_[0]]);
for (int id : required_input_ids_) {
if (IsArrayLikeQType(input_qtypes[id]) != batched) {
DCHECK(!required_input_ids_.empty());
return absl::InvalidArgumentError(absl::StrFormat(
"either all forest inputs must be scalars or all forest inputs "
"must be arrays, but arg[%d] is %s and arg[%d] is %s",
required_input_ids_[0], input_qtypes[required_input_ids_[0]]->name(),
id, input_qtypes[id]->name()));
}
}
QTypePtr output_type;
if (batched) {
DCHECK(!required_input_ids_.empty());
ASSIGN_OR_RETURN(const ArrayLikeQType* array_type,
ToArrayLikeQType(input_qtypes[required_input_ids_[0]]));
ASSIGN_OR_RETURN(output_type,
array_type->WithValueQType(GetQType<float>()));
} else {
output_type = GetQType<float>();
}
return MakeTupleQType(
std::vector<QTypePtr>(tree_filters_.size(), output_type));
}
} | #include "arolla/decision_forest/expr_operator/decision_forest_operator.h"
#include <cstdint>
#include <limits>
#include <memory>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "arolla/decision_forest/decision_forest.h"
#include "arolla/decision_forest/split_conditions/interval_split_condition.h"
#include "arolla/decision_forest/split_conditions/set_of_values_split_condition.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/tuple_qtype.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::HasSubstr;
constexpr float inf = std::numeric_limits<float>::infinity();
constexpr auto S = DecisionTreeNodeId::SplitNodeId;
constexpr auto A = DecisionTreeNodeId::AdjustmentId;
absl::StatusOr<DecisionForestPtr> CreateForest() {
std::vector<DecisionTree> trees(2);
trees[0].adjustments = {0.5, 1.5, 2.5, 3.5};
trees[0].tag.submodel_id = 0;
trees[0].split_nodes = {
{S(1), S(2), IntervalSplit(0, 1.5, inf)},
{A(0), A(1), SetOfValuesSplit<int64_t>(1, {5}, false)},
{A(2), A(3), IntervalSplit(0, -inf, 10)}};
trees[1].adjustments = {5};
trees[1].tag.submodel_id = 1;
return DecisionForest::FromTrees(std::move(trees));
}
TEST(DecisionForestOperatorTest, GetOutputQType) {
ASSERT_OK_AND_ASSIGN(const DecisionForestPtr forest, CreateForest());
{
auto forest_op = std::make_shared<DecisionForestOperator>(
forest, std::vector<TreeFilter>{});
EXPECT_THAT(forest_op->GetOutputQType({GetQType<float>()}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("not enough arguments for the decision "
"forest: expected at least 2, got 1")));
EXPECT_THAT(
forest_op->GetOutputQType(
{GetQType<float>(), GetDenseArrayQType<float>()}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("either all forest inputs must be scalars or all "
"forest inputs must be arrays, but arg[0] is "
"FLOAT32 and arg[1] is DENSE_ARRAY_FLOAT32")));
EXPECT_THAT(
forest_op->GetOutputQType({GetQType<float>(), GetQType<float>()}),
IsOkAndHolds(MakeTupleQType({})));
}
{
auto forest_op = std::make_shared<DecisionForestOperator>(
forest, std::vector<TreeFilter>{TreeFilter{.submodels = {0}},
TreeFilter{.submodels = {1, 2}}});
EXPECT_THAT(forest_op->GetOutputQType({GetQType<float>()}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("not enough arguments for the decision "
"forest: expected at least 2, got 1")));
EXPECT_THAT(
forest_op->GetOutputQType(
{GetQType<float>(), GetDenseArrayQType<float>()}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("either all forest inputs must be scalars or all "
"forest inputs must be arrays, but arg[0] is "
"FLOAT32 and arg[1] is DENSE_ARRAY_FLOAT32")));
EXPECT_THAT(
forest_op->GetOutputQType({GetQType<float>(), GetQType<float>()}),
IsOkAndHolds(MakeTupleQType({GetQType<float>(), GetQType<float>()})));
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/decision_forest/expr_operator/decision_forest_operator.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/decision_forest/expr_operator/decision_forest_operator_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
f465a0e8-f7b0-4785-8631-df1b0ea858b0 | cpp | google/arolla | array_encoder | arolla/serialization_codecs/array/encoders/array_encoder.cc | arolla/serialization_codecs/array/encoders/array_encoder_test.cc | #include <cstdint>
#include "absl/base/no_destructor.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "arolla/array/array.h"
#include "arolla/array/edge.h"
#include "arolla/array/id_filter.h"
#include "arolla/array/qtype/types.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/serialization_base/encoder.h"
#include "arolla/serialization_codecs/array/array_codec.pb.h"
#include "arolla/serialization_codecs/array/codec_name.h"
#include "arolla/serialization_codecs/registry.h"
#include "arolla/util/bytes.h"
#include "arolla/util/init_arolla.h"
#include "arolla/util/meta.h"
#include "arolla/util/text.h"
#include "arolla/util/unit.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::serialization_codecs {
namespace {
using ::arolla::serialization_base::Encoder;
using ::arolla::serialization_base::ValueProto;
absl::StatusOr<ValueProto> GenValueProto(Encoder& encoder) {
ASSIGN_OR_RETURN(auto codec_index, encoder.EncodeCodec(kArrayV1Codec));
ValueProto value_proto;
value_proto.set_codec_index(codec_index);
return value_proto;
}
template <typename T>
absl::Status EncodeArrayValueImpl(ArrayV1Proto::ArrayProto& array_proto,
TypedRef value, Encoder& encoder,
ValueProto& value_proto) {
const auto& array = value.UnsafeAs<Array<T>>();
array_proto.set_size(array.size());
if (array.size() > 0) {
ASSIGN_OR_RETURN(
auto dense_data_value_index,
encoder.EncodeValue(TypedValue::FromValue(array.dense_data())));
value_proto.add_input_value_indices(dense_data_value_index);
if (array.dense_data().size() == array.size()) {
DCHECK_EQ(array.id_filter().type(), IdFilter::Type::kFull);
} else {
DCHECK_EQ(array.id_filter().ids().size(), array.dense_data().size());
array_proto.mutable_ids()->Add(array.id_filter().ids().begin(),
array.id_filter().ids().end());
for (auto& id : *array_proto.mutable_ids()) {
id -= array.id_filter().ids_offset();
}
ASSIGN_OR_RETURN(
auto missing_id_value_index,
encoder.EncodeValue(TypedValue::FromValue(array.missing_id_value())));
value_proto.add_input_value_indices(missing_id_value_index);
}
}
return absl::OkStatus();
}
#define GEN_ENCODE_ARRAY(NAME, T, FIELD) \
absl::StatusOr<ValueProto> EncodeArray##NAME##QType(Encoder& encoder) { \
ASSIGN_OR_RETURN(auto value_proto, GenValueProto(encoder)); \
value_proto.MutableExtension(ArrayV1Proto::extension) \
->set_##FIELD##_qtype(true); \
return value_proto; \
} \
\
absl::StatusOr<ValueProto> EncodeArray##NAME##Value(TypedRef value, \
Encoder& encoder) { \
ASSIGN_OR_RETURN(auto value_proto, GenValueProto(encoder)); \
auto* array_proto = value_proto.MutableExtension(ArrayV1Proto::extension) \
->mutable_##FIELD##_value(); \
RETURN_IF_ERROR( \
EncodeArrayValueImpl<T>(*array_proto, value, encoder, value_proto)); \
return value_proto; \
}
GEN_ENCODE_ARRAY(Unit, Unit, array_unit)
GEN_ENCODE_ARRAY(Bytes, Bytes, array_bytes)
GEN_ENCODE_ARRAY(Text, Text, array_text)
GEN_ENCODE_ARRAY(Boolean, bool, array_boolean)
GEN_ENCODE_ARRAY(Int32, int32_t, array_int32)
GEN_ENCODE_ARRAY(Int64, int64_t, array_int64)
GEN_ENCODE_ARRAY(UInt64, uint64_t, array_uint64)
GEN_ENCODE_ARRAY(Float32, float, array_float32)
GEN_ENCODE_ARRAY(Float64, double, array_float64)
#undef GEN_ENCODE_ARRAY
absl::StatusOr<ValueProto> EncodeArrayEdgeQType(Encoder& encoder) {
ASSIGN_OR_RETURN(auto value_proto, GenValueProto(encoder));
value_proto.MutableExtension(ArrayV1Proto::extension)
->set_array_edge_qtype(true);
return value_proto;
}
absl::StatusOr<ValueProto> EncodeArrayEdgeValue(TypedRef value,
Encoder& encoder) {
ASSIGN_OR_RETURN(auto value_proto, GenValueProto(encoder));
auto* array_edge_proto =
value_proto.MutableExtension(ArrayV1Proto::extension)
->mutable_array_edge_value();
const auto& array_edge = value.UnsafeAs<ArrayEdge>();
ASSIGN_OR_RETURN(
auto array_value_index,
encoder.EncodeValue(TypedValue::FromValue(array_edge.edge_values())));
value_proto.add_input_value_indices(array_value_index);
switch (array_edge.edge_type()) {
case ArrayEdge::EdgeType::MAPPING:
array_edge_proto->set_edge_type(ArrayV1Proto::ArrayEdgeProto::MAPPING);
array_edge_proto->set_parent_size(array_edge.parent_size());
return value_proto;
case ArrayEdge::EdgeType::SPLIT_POINTS:
array_edge_proto->set_edge_type(
ArrayV1Proto::ArrayEdgeProto::SPLIT_POINTS);
return value_proto;
}
return absl::InternalError(
absl::StrCat("unknown ArrayEdge edge type: ", array_edge.edge_type()));
}
absl::StatusOr<ValueProto> EncodeArrayToScalarEdgeQType(Encoder& encoder) {
ASSIGN_OR_RETURN(auto value_proto, GenValueProto(encoder));
value_proto.MutableExtension(ArrayV1Proto::extension)
->set_array_to_scalar_edge_qtype(true);
return value_proto;
}
absl::StatusOr<ValueProto> EncodeArrayToScalarEdgeValue(TypedRef value,
Encoder& encoder) {
const auto& array_to_scalar_edge = value.UnsafeAs<ArrayGroupScalarEdge>();
ASSIGN_OR_RETURN(auto value_proto, GenValueProto(encoder));
value_proto.MutableExtension(ArrayV1Proto::extension)
->set_array_to_scalar_edge_value(array_to_scalar_edge.child_size());
return value_proto;
}
absl::StatusOr<ValueProto> EncodeArrayShapeQType(Encoder& encoder) {
ASSIGN_OR_RETURN(auto value_proto, GenValueProto(encoder));
value_proto.MutableExtension(ArrayV1Proto::extension)
->set_array_shape_qtype(true);
return value_proto;
}
absl::StatusOr<ValueProto> EncodeArrayShapeValue(TypedRef value,
Encoder& encoder) {
const auto& array_shape = value.UnsafeAs<ArrayShape>();
ASSIGN_OR_RETURN(auto value_proto, GenValueProto(encoder));
value_proto.MutableExtension(ArrayV1Proto::extension)
->set_array_shape_value(array_shape.size);
return value_proto;
}
}
absl::StatusOr<ValueProto> EncodeArray(TypedRef value, Encoder& encoder) {
using QTypeEncoder = absl::StatusOr<ValueProto> (*)(Encoder&);
using ValueEncoder = absl::StatusOr<ValueProto> (*)(TypedRef, Encoder&);
using QTypeEncoders = absl::flat_hash_map<QTypePtr, QTypeEncoder>;
using ValueEncoders = absl::flat_hash_map<QTypePtr, ValueEncoder>;
static const absl::NoDestructor<QTypeEncoders> kQTypeEncoders(QTypeEncoders{
{GetArrayQType<Unit>(), &EncodeArrayUnitQType},
{GetArrayQType<bool>(), &EncodeArrayBooleanQType},
{GetArrayQType<Bytes>(), &EncodeArrayBytesQType},
{GetArrayQType<Text>(), &EncodeArrayTextQType},
{GetArrayQType<int32_t>(), &EncodeArrayInt32QType},
{GetArrayQType<int64_t>(), &EncodeArrayInt64QType},
{GetArrayQType<uint64_t>(), &EncodeArrayUInt64QType},
{GetArrayQType<float>(), &EncodeArrayFloat32QType},
{GetArrayQType<double>(), &EncodeArrayFloat64QType},
{GetQType<ArrayEdge>(), &EncodeArrayEdgeQType},
{GetQType<ArrayGroupScalarEdge>(), &EncodeArrayToScalarEdgeQType},
{GetQType<ArrayShape>(), &EncodeArrayShapeQType},
});
static const absl::NoDestructor<ValueEncoders> kValueEncoders(ValueEncoders{
{GetArrayQType<Unit>(), &EncodeArrayUnitValue},
{GetArrayQType<bool>(), &EncodeArrayBooleanValue},
{GetArrayQType<Bytes>(), &EncodeArrayBytesValue},
{GetArrayQType<Text>(), &EncodeArrayTextValue},
{GetArrayQType<int32_t>(), &EncodeArrayInt32Value},
{GetArrayQType<int64_t>(), &EncodeArrayInt64Value},
{GetArrayQType<uint64_t>(), &EncodeArrayUInt64Value},
{GetArrayQType<float>(), &EncodeArrayFloat32Value},
{GetArrayQType<double>(), &EncodeArrayFloat64Value},
{GetQType<ArrayEdge>(), &EncodeArrayEdgeValue},
{GetQType<ArrayGroupScalarEdge>(), &EncodeArrayToScalarEdgeValue},
{GetQType<ArrayShape>(), &EncodeArrayShapeValue},
});
if (value.GetType() == GetQType<QTypePtr>()) {
const auto& qtype_value = value.UnsafeAs<QTypePtr>();
auto it = kQTypeEncoders->find(qtype_value);
if (it != kQTypeEncoders->end()) {
return it->second(encoder);
}
} else {
auto it = kValueEncoders->find(value.GetType());
if (it != kValueEncoders->end()) {
return it->second(value, encoder);
}
}
return absl::UnimplementedError(absl::StrFormat(
"%s does not support serialization of %s: %s; this may indicate a "
"missing BUILD dependency on the encoder for this qtype",
kArrayV1Codec, value.GetType()->name(), value.Repr()));
}
AROLLA_INITIALIZER(
.reverse_deps = {arolla::initializer_dep::kS11n},
.init_fn = []() -> absl::Status {
RETURN_IF_ERROR(
RegisterValueEncoderByQType(GetQType<ArrayEdge>(), EncodeArray));
RETURN_IF_ERROR(RegisterValueEncoderByQType(
GetQType<ArrayGroupScalarEdge>(), EncodeArray));
RETURN_IF_ERROR(
RegisterValueEncoderByQType(GetQType<ArrayShape>(), EncodeArray));
absl::Status status;
arolla::meta::foreach_type<ScalarTypes>([&](auto meta_type) {
if (status.ok()) {
status = RegisterValueEncoderByQType(
GetArrayQType<typename decltype(meta_type)::type>(),
EncodeArray);
}
});
return status;
})
} | #include <cstdint>
#include <optional>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/log/check.h"
#include "absl/status/statusor.h"
#include "arolla/array/array.h"
#include "arolla/array/edge.h"
#include "arolla/array/qtype/types.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/serialization/encode.h"
#include "arolla/serialization_base/base.pb.h"
#include "arolla/serialization_codecs/array/array_codec.pb.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::serialization_codecs {
namespace {
using ::arolla::serialization::Encode;
using ::arolla::serialization_base::ValueProto;
template <typename T>
absl::StatusOr<ValueProto> GenValueProto(const T& value) {
ASSIGN_OR_RETURN(auto container_proto,
Encode({TypedValue::FromValue(value)}, {}));
CHECK_GT(container_proto.decoding_steps_size(), 1);
CHECK(container_proto.decoding_steps().rbegin()[1].has_value());
return container_proto.decoding_steps().rbegin()[1].value();
}
TEST(EncodeArrayTest, IdsOffset) {
auto array = CreateArray<float>({5, std::nullopt, 3, std::nullopt, 1})
.ToSparseForm()
.Slice(1, 3);
ASSERT_EQ(array.id_filter().ids_offset(), 1);
ASSERT_OK_AND_ASSIGN(auto value_proto, GenValueProto(array));
EXPECT_THAT(value_proto.input_value_indices(), testing::ElementsAre(2, 4));
ASSERT_TRUE(value_proto.HasExtension(ArrayV1Proto::extension));
const auto& array_proto = value_proto.GetExtension(ArrayV1Proto::extension);
ASSERT_EQ(array_proto.value_case(), ArrayV1Proto::kArrayFloat32Value);
const auto& array_float32_proto = array_proto.array_float32_value();
EXPECT_EQ(array_float32_proto.size(), 3);
EXPECT_THAT(array_float32_proto.ids(), testing::ElementsAre(1));
}
TEST(EncodeArrayTest, EdgeRoundTrips) {
const auto splits = CreateArray<int64_t>({0, 2, 5});
ASSERT_OK_AND_ASSIGN(auto array_edge, ArrayEdge::FromSplitPoints(splits));
ASSERT_OK_AND_ASSIGN(auto value_proto, GenValueProto(array_edge));
ASSERT_TRUE(value_proto.HasExtension(ArrayV1Proto::extension));
const auto& array_proto = value_proto.GetExtension(ArrayV1Proto::extension);
ASSERT_EQ(array_proto.value_case(), ArrayV1Proto::kArrayEdgeValue);
const auto& array_edge_proto = array_proto.array_edge_value();
EXPECT_EQ(array_edge_proto.parent_size(), 0);
EXPECT_THAT(array_edge_proto.edge_type(),
ArrayV1Proto::ArrayEdgeProto::SPLIT_POINTS);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/serialization_codecs/array/encoders/array_encoder.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/serialization_codecs/array/encoders/array_encoder_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
4f331c16-80de-4a7d-8bc0-99e3a571617b | cpp | google/arolla | dense_array_encoder | arolla/serialization_codecs/dense_array/encoders/dense_array_encoder.cc | arolla/serialization_codecs/dense_array/encoders/dense_array_encoder_test.cc | #include <cstddef>
#include <cstdint>
#include <type_traits>
#include <utility>
#include "absl/base/no_destructor.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "arolla/dense_array/bitmap.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/edge.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/serialization_base/encoder.h"
#include "arolla/serialization_codecs/dense_array/codec_name.h"
#include "arolla/serialization_codecs/dense_array/dense_array_codec.pb.h"
#include "arolla/serialization_codecs/registry.h"
#include "arolla/util/bytes.h"
#include "arolla/util/init_arolla.h"
#include "arolla/util/meta.h"
#include "arolla/util/text.h"
#include "arolla/util/unit.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::serialization_codecs {
namespace {
namespace bm = ::arolla::bitmap;
using ::arolla::serialization_base::Encoder;
using ::arolla::serialization_base::ValueProto;
using BitmapProto = std::decay_t<std::remove_const_t<
decltype(std::declval<DenseArrayV1Proto::DenseArrayUnitProto>().bitmap())>>;
absl::StatusOr<ValueProto> GenValueProto(Encoder& encoder) {
ASSIGN_OR_RETURN(auto codec_index, encoder.EncodeCodec(kDenseArrayV1Codec));
ValueProto value_proto;
value_proto.set_codec_index(codec_index);
return value_proto;
}
BitmapProto GenBitmapProto(const bm::Bitmap& bitmap, int offset, int64_t size) {
BitmapProto result;
if (bm::CountBits(bitmap, offset, size) == size) {
return result;
}
const int64_t bitmapSize = bm::BitmapSize(size);
result.Resize(bitmapSize, 0);
for (int64_t i = 0; i < bitmapSize; ++i) {
result[i] = bm::GetWordWithOffset(bitmap, i, offset);
}
if (int last_word_usage = size % bm::kWordBitCount) {
result[bitmapSize - 1] &= (1U << last_word_usage) - 1;
}
return result;
}
absl::StatusOr<ValueProto> EncodeDenseArrayUnitValue(TypedRef value,
Encoder& encoder) {
DCHECK(value.GetType() == GetQType<DenseArray<Unit>>());
const auto& dense_array = value.UnsafeAs<DenseArray<Unit>>();
ASSIGN_OR_RETURN(auto value_proto, GenValueProto(encoder));
auto* dense_array_unit_proto =
value_proto.MutableExtension(DenseArrayV1Proto::extension)
->mutable_dense_array_unit_value();
dense_array_unit_proto->set_size(dense_array.size());
*dense_array_unit_proto->mutable_bitmap() = GenBitmapProto(
dense_array.bitmap, dense_array.bitmap_bit_offset, dense_array.size());
return value_proto;
}
#define GEN_ENCODE_DENSE_ARRAY_QTYPE(NAME, FIELD) \
absl::StatusOr<ValueProto> EncodeDenseArray##NAME##QType(Encoder& encoder) { \
ASSIGN_OR_RETURN(auto value_proto, GenValueProto(encoder)); \
value_proto.MutableExtension(DenseArrayV1Proto::extension) \
->set_##FIELD##_qtype(true); \
return value_proto; \
}
GEN_ENCODE_DENSE_ARRAY_QTYPE(Unit, dense_array_unit)
#define GEN_ENCODE_DENSE_ARRAY_VALUE(NAME, T, FIELD) \
absl::StatusOr<ValueProto> EncodeDenseArray##NAME##Value(TypedRef value, \
Encoder& encoder) { \
\
const auto& dense_array = value.UnsafeAs<DenseArray<T>>(); \
ASSIGN_OR_RETURN(auto value_proto, GenValueProto(encoder)); \
auto* dense_array_value_proto = \
value_proto.MutableExtension(DenseArrayV1Proto::extension) \
->mutable_##FIELD##_value(); \
dense_array_value_proto->set_size(dense_array.size()); \
*dense_array_value_proto->mutable_bitmap() = \
GenBitmapProto(dense_array.bitmap, dense_array.bitmap_bit_offset, \
dense_array.size()); \
dense_array.ForEach([&](int64_t, bool present, const T& value) { \
if (present) { \
dense_array_value_proto->add_values(value); \
} \
}); \
return value_proto; \
} \
GEN_ENCODE_DENSE_ARRAY_QTYPE(NAME, FIELD)
GEN_ENCODE_DENSE_ARRAY_VALUE(Boolean, bool, dense_array_boolean)
GEN_ENCODE_DENSE_ARRAY_VALUE(Int32, int32_t, dense_array_int32)
GEN_ENCODE_DENSE_ARRAY_VALUE(Int64, int64_t, dense_array_int64)
GEN_ENCODE_DENSE_ARRAY_VALUE(UInt64, uint64_t, dense_array_uint64)
GEN_ENCODE_DENSE_ARRAY_VALUE(Float32, float, dense_array_float32)
GEN_ENCODE_DENSE_ARRAY_VALUE(Float64, double, dense_array_float64)
#undef GEN_ENCODE_DENSE_ARRAY_VALUE
#define GEN_ENCODE_DENSE_ARRAY_STRING_VALUE(NAME, T, FIELD) \
absl::StatusOr<ValueProto> EncodeDenseArray##NAME##Value(TypedRef value, \
Encoder& encoder) { \
\
const auto& dense_array = value.UnsafeAs<DenseArray<T>>(); \
ASSIGN_OR_RETURN(auto value_proto, GenValueProto(encoder)); \
auto* dense_array_value_proto = \
value_proto.MutableExtension(DenseArrayV1Proto::extension) \
->mutable_##FIELD##_value(); \
dense_array_value_proto->set_size(dense_array.size()); \
*dense_array_value_proto->mutable_bitmap() = \
GenBitmapProto(dense_array.bitmap, dense_array.bitmap_bit_offset, \
dense_array.size()); \
dense_array_value_proto->set_characters( \
dense_array.values.characters().span().data(), \
dense_array.values.characters().span().size()); \
for (size_t i = 0; i < dense_array.size(); ++i) { \
if (dense_array.present(i)) { \
const auto& offset = dense_array.values.offsets()[i]; \
dense_array_value_proto->add_value_offset_starts( \
offset.start - dense_array.values.base_offset()); \
dense_array_value_proto->add_value_offset_ends( \
offset.end - dense_array.values.base_offset()); \
} \
} \
return value_proto; \
} \
GEN_ENCODE_DENSE_ARRAY_QTYPE(NAME, FIELD)
GEN_ENCODE_DENSE_ARRAY_STRING_VALUE(Bytes, Bytes, dense_array_bytes)
GEN_ENCODE_DENSE_ARRAY_STRING_VALUE(Text, Text, dense_array_text)
#undef GEN_ENCODE_DENSE_ARRAY_STRING_VALUE
#undef GEN_ENCODE_DENSE_ARRAY_QTYPE
absl::StatusOr<ValueProto> EncodeDenseArrayEdgeQType(Encoder& encoder) {
ASSIGN_OR_RETURN(auto value_proto, GenValueProto(encoder));
value_proto.MutableExtension(DenseArrayV1Proto::extension)
->set_dense_array_edge_qtype(true);
return value_proto;
}
absl::StatusOr<ValueProto> EncodeDenseArrayEdgeValue(TypedRef value,
Encoder& encoder) {
ASSIGN_OR_RETURN(auto value_proto, GenValueProto(encoder));
auto* dense_array_edge_proto =
value_proto.MutableExtension(DenseArrayV1Proto::extension)
->mutable_dense_array_edge_value();
const auto& dense_array_edge = value.UnsafeAs<DenseArrayEdge>();
ASSIGN_OR_RETURN(auto dense_array_value_index,
encoder.EncodeValue(
TypedValue::FromValue(dense_array_edge.edge_values())));
value_proto.add_input_value_indices(dense_array_value_index);
switch (dense_array_edge.edge_type()) {
case DenseArrayEdge::EdgeType::MAPPING:
dense_array_edge_proto->set_edge_type(
DenseArrayV1Proto::DenseArrayEdgeProto::MAPPING);
dense_array_edge_proto->set_parent_size(dense_array_edge.parent_size());
return value_proto;
case DenseArrayEdge::EdgeType::SPLIT_POINTS:
dense_array_edge_proto->set_edge_type(
DenseArrayV1Proto::DenseArrayEdgeProto::SPLIT_POINTS);
return value_proto;
}
return absl::InternalError(absl::StrCat("unknown DesnseArrayEdge edge type: ",
dense_array_edge.edge_type()));
}
absl::StatusOr<ValueProto> EncodeDenseArrayToScalarEdgeQType(Encoder& encoder) {
ASSIGN_OR_RETURN(auto value_proto, GenValueProto(encoder));
value_proto.MutableExtension(DenseArrayV1Proto::extension)
->set_dense_array_to_scalar_edge_qtype(true);
return value_proto;
}
absl::StatusOr<ValueProto> EncodeDenseArrayToScalarEdgeValue(TypedRef value,
Encoder& encoder) {
const auto& dense_array_to_scalar_edge =
value.UnsafeAs<DenseArrayGroupScalarEdge>();
ASSIGN_OR_RETURN(auto value_proto, GenValueProto(encoder));
value_proto.MutableExtension(DenseArrayV1Proto::extension)
->set_dense_array_to_scalar_edge_value(
dense_array_to_scalar_edge.child_size());
return value_proto;
}
absl::StatusOr<ValueProto> EncodeDenseArrayShapeQType(Encoder& encoder) {
ASSIGN_OR_RETURN(auto value_proto, GenValueProto(encoder));
value_proto.MutableExtension(DenseArrayV1Proto::extension)
->set_dense_array_shape_qtype(true);
return value_proto;
}
absl::StatusOr<ValueProto> EncodeDenseArrayShapeValue(TypedRef value,
Encoder& encoder) {
const auto& dense_array_shape = value.UnsafeAs<DenseArrayShape>();
ASSIGN_OR_RETURN(auto value_proto, GenValueProto(encoder));
value_proto.MutableExtension(DenseArrayV1Proto::extension)
->set_dense_array_shape_value(dense_array_shape.size);
return value_proto;
}
absl::StatusOr<ValueProto> EncodeDenseArray(TypedRef value, Encoder& encoder) {
using QTypeEncoder = absl::StatusOr<ValueProto> (*)(Encoder&);
using ValueEncoder = absl::StatusOr<ValueProto> (*)(TypedRef, Encoder&);
using QTypeEncoders = absl::flat_hash_map<QTypePtr, QTypeEncoder>;
using ValueEncoders = absl::flat_hash_map<QTypePtr, ValueEncoder>;
static const absl::NoDestructor<QTypeEncoders> kQTypeEncoders(QTypeEncoders{
{GetDenseArrayQType<Unit>(), &EncodeDenseArrayUnitQType},
{GetDenseArrayQType<bool>(), &EncodeDenseArrayBooleanQType},
{GetDenseArrayQType<Bytes>(), &EncodeDenseArrayBytesQType},
{GetDenseArrayQType<Text>(), &EncodeDenseArrayTextQType},
{GetDenseArrayQType<int32_t>(), &EncodeDenseArrayInt32QType},
{GetDenseArrayQType<int64_t>(), &EncodeDenseArrayInt64QType},
{GetDenseArrayQType<uint64_t>(), &EncodeDenseArrayUInt64QType},
{GetDenseArrayQType<float>(), &EncodeDenseArrayFloat32QType},
{GetDenseArrayQType<double>(), &EncodeDenseArrayFloat64QType},
{GetQType<DenseArrayEdge>(), &EncodeDenseArrayEdgeQType},
{GetQType<DenseArrayGroupScalarEdge>(),
&EncodeDenseArrayToScalarEdgeQType},
{GetQType<DenseArrayShape>(), &EncodeDenseArrayShapeQType},
});
static const absl::NoDestructor<ValueEncoders> kValueEncoders(ValueEncoders{
{GetDenseArrayQType<Unit>(), &EncodeDenseArrayUnitValue},
{GetDenseArrayQType<bool>(), &EncodeDenseArrayBooleanValue},
{GetDenseArrayQType<Bytes>(), &EncodeDenseArrayBytesValue},
{GetDenseArrayQType<Text>(), &EncodeDenseArrayTextValue},
{GetDenseArrayQType<int32_t>(), &EncodeDenseArrayInt32Value},
{GetDenseArrayQType<int64_t>(), &EncodeDenseArrayInt64Value},
{GetDenseArrayQType<uint64_t>(), &EncodeDenseArrayUInt64Value},
{GetDenseArrayQType<float>(), &EncodeDenseArrayFloat32Value},
{GetDenseArrayQType<double>(), &EncodeDenseArrayFloat64Value},
{GetQType<DenseArrayEdge>(), &EncodeDenseArrayEdgeValue},
{GetQType<DenseArrayGroupScalarEdge>(),
&EncodeDenseArrayToScalarEdgeValue},
{GetQType<DenseArrayShape>(), &EncodeDenseArrayShapeValue},
});
if (value.GetType() == GetQType<QTypePtr>()) {
const auto& qtype_value = value.UnsafeAs<QTypePtr>();
auto it = kQTypeEncoders->find(qtype_value);
if (it != kQTypeEncoders->end()) {
return it->second(encoder);
}
} else {
auto it = kValueEncoders->find(value.GetType());
if (it != kValueEncoders->end()) {
return it->second(value, encoder);
}
}
return absl::UnimplementedError(absl::StrFormat(
"%s does not support serialization of %s: %s; this may indicate a "
"missing BUILD dependency on the encoder for this qtype",
kDenseArrayV1Codec, value.GetType()->name(), value.Repr()));
}
AROLLA_INITIALIZER(
.reverse_deps = {arolla::initializer_dep::kS11n},
.init_fn = []() -> absl::Status {
RETURN_IF_ERROR(RegisterValueEncoderByQType(
GetQType<DenseArrayEdge>(), EncodeDenseArray));
RETURN_IF_ERROR(RegisterValueEncoderByQType(
GetQType<DenseArrayGroupScalarEdge>(), EncodeDenseArray));
RETURN_IF_ERROR(RegisterValueEncoderByQType(
GetQType<DenseArrayShape>(), EncodeDenseArray));
absl::Status status;
arolla::meta::foreach_type<ScalarTypes>([&](auto meta_type) {
if (status.ok()) {
status = RegisterValueEncoderByQType(
GetDenseArrayQType<typename decltype(meta_type)::type>(),
EncodeDenseArray);
}
});
return status;
})
}
} | #include <cstdint>
#include <optional>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/log/check.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/buffer.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/serialization/encode.h"
#include "arolla/serialization_base/base.pb.h"
#include "arolla/serialization_codecs/dense_array/dense_array_codec.pb.h"
#include "arolla/util/text.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::serialization_codecs {
namespace {
using ::arolla::serialization::Encode;
using ::arolla::serialization_base::ValueProto;
template <typename T>
absl::StatusOr<ValueProto> GenValueProto(const T& value) {
ASSIGN_OR_RETURN(auto container_proto,
Encode({TypedValue::FromValue(value)}, {}));
CHECK_GT(container_proto.decoding_steps_size(), 1);
CHECK(container_proto.decoding_steps().rbegin()[1].has_value());
return container_proto.decoding_steps().rbegin()[1].value();
}
TEST(EncodeDenseArrayTest, BitmapWithBitOffset) {
DenseArray<float> arr;
arr.values = CreateBuffer<float>({-1.0f, 1.0f, -1.0f, 3.0f, -1.0f});
arr.bitmap = CreateBuffer<uint32_t>({0b1111111111111111010100});
arr.bitmap_bit_offset = 1;
ASSERT_OK_AND_ASSIGN(auto value_proto, GenValueProto(arr));
ASSERT_TRUE(value_proto.HasExtension(DenseArrayV1Proto::extension));
const auto& dense_array_proto =
value_proto.GetExtension(DenseArrayV1Proto::extension);
ASSERT_EQ(dense_array_proto.value_case(),
DenseArrayV1Proto::kDenseArrayFloat32Value);
const auto& dense_array_float32_proto =
dense_array_proto.dense_array_float32_value();
ASSERT_EQ(dense_array_float32_proto.size(), 5);
ASSERT_THAT(dense_array_float32_proto.bitmap(),
testing::ElementsAre(0b1010U));
ASSERT_THAT(dense_array_float32_proto.values(),
testing::ElementsAre(1.0f, 3.0f));
}
TEST(EncodeDenseArrayTest, StringBufferBaseOffset) {
constexpr absl::string_view characters = "abracadabra";
DenseArray<Text> arr;
arr.values = StringsBuffer(
CreateBuffer<StringsBuffer::Offsets>({{1, 3}, {4, 5}, {8, 10}, {8, 11}}),
Buffer<char>::Create(characters.begin(), characters.end()), 1);
arr.bitmap = CreateBuffer<uint32_t>({0b0101});
ASSERT_THAT(arr,
testing::ElementsAre("ab", std::nullopt, "ab", std::nullopt));
ASSERT_OK_AND_ASSIGN(auto value_proto, GenValueProto(arr));
ASSERT_TRUE(value_proto.HasExtension(DenseArrayV1Proto::extension));
const auto& dense_array_proto =
value_proto.GetExtension(DenseArrayV1Proto::extension);
ASSERT_EQ(dense_array_proto.value_case(),
DenseArrayV1Proto::kDenseArrayTextValue);
const auto& dense_array_string_proto =
dense_array_proto.dense_array_text_value();
ASSERT_EQ(dense_array_string_proto.size(), 4);
ASSERT_THAT(dense_array_string_proto.bitmap(), testing::ElementsAre(0b101U));
ASSERT_EQ(dense_array_string_proto.characters(), characters);
ASSERT_THAT(dense_array_string_proto.value_offset_starts(),
testing::ElementsAre(0, 7));
ASSERT_THAT(dense_array_string_proto.value_offset_ends(),
testing::ElementsAre(2, 9));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/serialization_codecs/dense_array/encoders/dense_array_encoder.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/serialization_codecs/dense_array/encoders/dense_array_encoder_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
d805fca5-8a9a-4455-a3fa-300e6ce5fe81 | cpp | google/arolla | expr_compiler | arolla/serving/expr_compiler.cc | arolla/serving/expr_compiler_test.cc | #include "arolla/serving/expr_compiler.h"
#include <optional>
#include "absl/base/no_destructor.h"
#include "arolla/expr/optimization/optimizer.h"
namespace arolla::serving_impl {
absl::NoDestructor<std::optional<expr::Optimizer>>
ExprCompilerDefaultOptimizer::optimizer_;
} | #include "arolla/serving/expr_compiler.h"
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "arolla/expr/eval/eval.h"
#include "arolla/expr/eval/thread_safe_model_executor.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/io/accessors_input_loader.h"
#include "arolla/io/accessors_slot_listener.h"
#include "arolla/io/input_loader.h"
#include "arolla/io/slot_listener.h"
#include "arolla/io/tuple_input_loader.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qexpr/evaluation_engine.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qexpr/simple_executable.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/status_macros_backport.h"
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::arolla::CompiledExpr;
using ::arolla::GetQType;
using ::arolla::InputLoaderPtr;
using ::arolla::SlotListener;
using ::arolla::expr::CallOp;
using ::arolla::expr::ExprNodePtr;
using ::arolla::expr::Leaf;
using ::arolla::testing::WithExportValueAnnotation;
using ::testing::_;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::IsNull;
using ::testing::NotNull;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
struct TestInput {
float x;
float y;
};
struct TestSideOutput {
std::optional<float> subtract;
};
absl::StatusOr<std::unique_ptr<arolla::InputLoader<TestInput>>>
CreateInputLoader() {
return ::arolla::CreateAccessorsInputLoader<TestInput>(
"x", [](const auto& x) { return x.x; },
"y", [](const auto& x) { return x.y; });
}
absl::StatusOr<std::unique_ptr<SlotListener<TestSideOutput>>>
CreateSlotListener() {
return ::arolla::CreateAccessorsSlotListener<TestSideOutput>(
"subtract", [](float x, TestSideOutput* out) { out->subtract = x; });
}
absl::StatusOr<ExprNodePtr> CreateExpr() {
ASSIGN_OR_RETURN(auto add_expr, CallOp("math.add", {Leaf("x"), Leaf("y")}));
ASSIGN_OR_RETURN(auto subtract_expr,
CallOp("math.subtract", {Leaf("x"), Leaf("y")}));
return WithExportValueAnnotation(add_expr, "subtract", subtract_expr);
}
absl::StatusOr<std::unique_ptr<CompiledExpr>> CreateCompiledExpr() {
ASSIGN_OR_RETURN(auto add_expr, CallOp("math.add", {Leaf("x"), Leaf("y")}));
ASSIGN_OR_RETURN(auto subtract_expr,
CallOp("math.subtract", {Leaf("x"), Leaf("y")}));
return ::arolla::expr::CompileForDynamicEvaluation(
::arolla::expr::DynamicEvaluationEngineOptions(), add_expr,
{{"x", GetQType<float>()}, {"y", GetQType<float>()}},
{{"subtract", subtract_expr}});
}
}
namespace arolla {
namespace {
class TestInplaceCompiledExpr : public InplaceCompiledExpr {
public:
TestInplaceCompiledExpr()
: InplaceCompiledExpr(
{}, GetQType<float>(),
{}) {}
absl::StatusOr<std::unique_ptr<BoundExpr>> InplaceBind(
const absl::flat_hash_map<std::string, TypedSlot>& input_slots,
TypedSlot output_slot,
const absl::flat_hash_map<std::string, TypedSlot>& named_output_slots)
const final {
return std::make_unique<SimpleBoundExpr>(
input_slots, output_slot,
std::vector<std::unique_ptr<BoundOperator>>{},
std::vector<std::unique_ptr<BoundOperator>>{},
named_output_slots);
}
};
class ExprCompilerTest : public ::testing::Test {
public:
void SetUp() override {
ASSERT_OK_AND_ASSIGN(auto add_expr,
expr::CallOp("math.add", {Leaf("x"), Leaf("y")}));
ASSERT_OK_AND_ASSIGN(auto subtract_expr,
expr::CallOp("math.subtract", {Leaf("x"), Leaf("y")}));
ASSERT_OK_AND_ASSIGN(expr_, CreateExpr());
ASSERT_OK_AND_ASSIGN(compiled_expr_, CreateCompiledExpr());
}
expr::ExprNodePtr expr_;
std::unique_ptr<const CompiledExpr> compiled_expr_;
};
TEST_F(ExprCompilerTest, CompileExprNodePtr) {
ASSERT_OK_AND_ASSIGN(auto model,
(ExprCompiler<TestInput, std::optional<float>>())
.SetInputLoader(CreateInputLoader())
.AllowOutputCasting()
.Compile(expr_));
ASSERT_OK_AND_ASSIGN(
auto model_with_options,
(ExprCompiler<TestInput, std::optional<float>>())
.SetInputLoader(CreateInputLoader())
.AllowOutputCasting()
.Compile<ExprCompilerFlags::kEvalWithOptions>(expr_));
static_assert(
std::is_same_v<decltype(model),
std::function<absl::StatusOr<std::optional<float>>(
const TestInput&)>>);
static_assert(
std::is_same_v<decltype(model_with_options),
std::function<absl::StatusOr<std::optional<float>>(
const ModelFunctionOptions&, const TestInput&)>>);
TestInput input{.x = 28, .y = 29};
EXPECT_THAT(model(input), IsOkAndHolds(57));
EXPECT_THAT(model_with_options({}, input), IsOkAndHolds(57));
}
TEST_F(ExprCompilerTest, CompileExprNodePtrWithSideOutput) {
ASSERT_OK_AND_ASSIGN(
auto model,
(ExprCompiler<TestInput, std::optional<float>, TestSideOutput>())
.SetInputLoader(CreateInputLoader())
.SetSlotListener(CreateSlotListener())
.AllowOutputCasting()
.Compile(expr_));
static_assert(
std::is_same_v<decltype(model),
std::function<absl::StatusOr<std::optional<float>>(
const TestInput&, TestSideOutput*)>>);
TestInput input{.x = 28, .y = 29};
EXPECT_THAT(model(input, nullptr), IsOkAndHolds(57));
TestSideOutput side_output;
EXPECT_THAT(model(input, &side_output), IsOkAndHolds(57));
EXPECT_THAT(side_output.subtract, Eq(-1));
}
TEST_F(ExprCompilerTest, CompileCompiledExpr) {
ASSERT_OK_AND_ASSIGN(auto model,
(ExprCompiler<TestInput, std::optional<float>>())
.SetInputLoader(CreateInputLoader())
.AllowOutputCasting()
.Compile(*compiled_expr_));
static_assert(
std::is_same_v<decltype(model),
std::function<absl::StatusOr<std::optional<float>>(
const TestInput&)>>);
TestInput input{.x = 28, .y = 29};
EXPECT_THAT(model(input), IsOkAndHolds(57));
}
TEST_F(ExprCompilerTest, CompileCompiledExprForceNonOptionalOutput) {
ASSERT_OK_AND_ASSIGN(auto model, (ExprCompiler<TestInput, float>())
.SetInputLoader(CreateInputLoader())
.ForceNonOptionalOutput()
.Compile(*compiled_expr_));
static_assert(
std::is_same_v<decltype(model),
std::function<absl::StatusOr<float>(const TestInput&)>>);
TestInput input{.x = 28, .y = 29};
EXPECT_THAT(model(input), IsOkAndHolds(57));
}
TEST_F(ExprCompilerTest, CompileCompiledExprWithSideOutput) {
ASSERT_OK_AND_ASSIGN(
auto model,
(ExprCompiler<TestInput, std::optional<float>, TestSideOutput>())
.SetInputLoader(CreateInputLoader())
.SetSlotListener(CreateSlotListener())
.AllowOutputCasting()
.Compile(*compiled_expr_));
static_assert(
std::is_same_v<decltype(model),
std::function<absl::StatusOr<std::optional<float>>(
const TestInput&, TestSideOutput*)>>);
TestInput input{.x = 28, .y = 29};
EXPECT_THAT(model(input, nullptr), IsOkAndHolds(57));
TestSideOutput side_output;
EXPECT_THAT(model(input, &side_output), IsOkAndHolds(57));
EXPECT_THAT(side_output.subtract, Eq(-1));
}
TEST_F(ExprCompilerTest, CompileExprOperatorWithTuple) {
ASSERT_OK_AND_ASSIGN(auto model,
(ExprCompiler<std::tuple<float, float>, float>())
.CompileOperator(expr::LookupOperator("math.add")));
static_assert(
std::is_same_v<decltype(model), std::function<absl::StatusOr<float>(
const std::tuple<float, float>&)>>);
EXPECT_THAT(model({28, 29}), IsOkAndHolds(57));
}
TEST_F(ExprCompilerTest, CompileExprOperatorWithTypedRefs) {
ASSERT_OK_AND_ASSIGN(
auto model, (ExprCompiler<absl::Span<const TypedRef>, TypedValue>())
.CompileOperator(expr::LookupOperator("math.add"),
{GetQType<float>(), GetQType<float>()}));
static_assert(
std::is_same_v<decltype(model), std::function<absl::StatusOr<TypedValue>(
const absl::Span<const TypedRef>&)>>);
auto a = TypedValue::FromValue<float>(28);
auto b = TypedValue::FromValue<float>(29);
std::vector<TypedRef> args{a.AsRef(), b.AsRef()};
ASSERT_OK_AND_ASSIGN(TypedValue res, model(args));
EXPECT_THAT(res.As<float>(), IsOkAndHolds(57));
}
TEST_F(ExprCompilerTest, Ownership) {
ExprCompiler<TestInput, std::optional<float>, TestSideOutput> mc;
mc.SetInputLoader(CreateInputLoader());
ExprCompiler<TestInput, std::optional<float>, TestSideOutput> other_mc =
std::move(mc);
other_mc.SetSlotListener(CreateSlotListener());
mc = std::move(other_mc);
mc.AllowOutputCasting();
ASSERT_OK(mc.Compile(expr_));
}
TEST_F(ExprCompilerTest, Move) {
auto set_input_loader = [](auto mc) {
return std::move(mc).SetInputLoader(CreateInputLoader());
};
ASSERT_OK_AND_ASSIGN(
auto model,
set_input_loader(
ExprCompiler<TestInput, std::optional<float>, TestSideOutput>()
.SetSlotListener(CreateSlotListener()))
.SetExperimentalArenaAllocator()
.AllowOutputCasting()
.Compile(expr_));
TestInput input{.x = 28, .y = 29};
EXPECT_THAT(model(input, nullptr), IsOkAndHolds(57));
}
TEST_F(ExprCompilerTest, Optimizer) {
auto replace_add_with_subtract =
[](expr::ExprNodePtr x) -> absl::StatusOr<expr::ExprNodePtr> {
if (expr::IsBackendOperator(*expr::DecayRegisteredOperator(x->op()),
"math.add")) {
return expr::WithNewOperator(x, *expr::LookupOperator("math.subtract"));
}
return x;
};
ASSERT_OK_AND_ASSIGN(auto model,
(ExprCompiler<TestInput, std::optional<float>>())
.SetInputLoader(CreateInputLoader())
.SetExprOptimizer(replace_add_with_subtract)
.AllowOutputCasting()
.Compile(expr_));
TestInput input{.x = 28, .y = 29};
EXPECT_THAT(model(input), IsOkAndHolds(-1));
}
TEST_F(ExprCompilerTest, OtherOptionsSmokeTest) {
ASSERT_OK_AND_ASSIGN(
auto model,
(ExprCompiler<TestInput, std::optional<float>, TestSideOutput>())
.SetInputLoader(CreateInputLoader())
.SetSlotListener(CreateSlotListener())
.SetExperimentalArenaAllocator()
.SetAlwaysCloneThreadSafetyPolicy()
.AllowOutputCasting()
.Compile(expr_));
TestInput input{.x = 28, .y = 29};
EXPECT_THAT(model(input, nullptr), IsOkAndHolds(57));
TestSideOutput side_output;
EXPECT_THAT(model(input, &side_output), IsOkAndHolds(57));
EXPECT_THAT(side_output.subtract, Eq(-1));
}
TEST_F(ExprCompilerTest, DefaultThreadSafetyPolicy) {
ASSERT_OK_AND_ASSIGN(
auto model,
(ExprCompiler<TestInput, std::optional<float>, TestSideOutput>())
.SetInputLoader(CreateInputLoader())
.SetSlotListener(CreateSlotListener())
.AllowOutputCasting()
.Compile(expr_));
TestInput input{.x = 28, .y = 29};
TestSideOutput side_output;
EXPECT_THAT(model(input, &side_output), IsOkAndHolds(57));
EXPECT_THAT(side_output.subtract, Eq(-1));
EXPECT_THAT((model.target<expr::ThreadSafePoolModelExecutor<
TestInput, std::optional<float>, TestSideOutput>>()),
NotNull());
}
TEST_F(ExprCompilerTest, DefaultThreadSafetyPolicy_Codegen) {
ASSERT_OK_AND_ASSIGN(auto eval_model, (ExprCompiler<TestInput, float>())
.SetInputLoader(CreateInputLoader())
.Compile(*compiled_expr_));
ASSERT_OK_AND_ASSIGN(auto codegen_model,
(ExprCompiler<TestInput, float>())
.SetInputLoader(CreateInputLoader())
.Compile(TestInplaceCompiledExpr()));
EXPECT_THAT(
(eval_model
.target<expr::ThreadSafePoolModelExecutor<TestInput, float>>()),
NotNull());
EXPECT_THAT(
(codegen_model
.target<expr::ThreadSafePoolModelExecutor<TestInput, float>>()),
IsNull());
}
TEST_F(ExprCompilerTest, PoolThreadSafetyPolicy) {
ASSERT_OK_AND_ASSIGN(
auto model,
(ExprCompiler<TestInput, std::optional<float>, TestSideOutput>())
.SetInputLoader(CreateInputLoader())
.SetSlotListener(CreateSlotListener())
.SetPoolThreadSafetyPolicy()
.AllowOutputCasting()
.Compile(expr_));
TestInput input{.x = 28, .y = 29};
TestSideOutput side_output;
EXPECT_THAT(model(input, &side_output), IsOkAndHolds(57));
EXPECT_THAT(side_output.subtract, Eq(-1));
EXPECT_THAT((model.target<expr::ThreadSafePoolModelExecutor<
TestInput, std::optional<float>, TestSideOutput>>()),
NotNull());
}
TEST_F(ExprCompilerTest, AlwaysCloneThreadSafetyPolicy) {
ASSERT_OK_AND_ASSIGN(
auto model,
(ExprCompiler<TestInput, std::optional<float>, TestSideOutput>())
.SetInputLoader(CreateInputLoader())
.SetSlotListener(CreateSlotListener())
.SetAlwaysCloneThreadSafetyPolicy()
.AllowOutputCasting()
.Compile(expr_));
TestInput input{.x = 28, .y = 29};
TestSideOutput side_output;
EXPECT_THAT(model(input, &side_output), IsOkAndHolds(57));
EXPECT_THAT(side_output.subtract, Eq(-1));
}
TEST_F(ExprCompilerTest, ThreadUnsafe) {
ASSERT_OK_AND_ASSIGN(
auto model,
(ExprCompiler<TestInput, std::optional<float>, TestSideOutput>())
.SetInputLoader(CreateInputLoader())
.SetSlotListener(CreateSlotListener())
.SetThreadUnsafe_I_SWEAR_TO_COPY_MODEL_FUNCTION_BEFORE_CALL()
.AllowOutputCasting()
.Compile(expr_));
TestInput input{.x = 28, .y = 29};
TestSideOutput side_output;
EXPECT_THAT(model(input, &side_output), IsOkAndHolds(57));
EXPECT_THAT(side_output.subtract, Eq(-1));
}
TEST_F(ExprCompilerTest, ForceNonOptionalOutput) {
ASSERT_OK_AND_ASSIGN(auto expr, CallOp("math.neg", {Leaf("x")}));
ASSERT_OK_AND_ASSIGN(
auto input_loader,
::arolla::CreateAccessorsInputLoader<std::optional<float>>(
"x", [](const auto& x) { return OptionalValue<float>(x); }));
ASSERT_OK_AND_ASSIGN(
auto model,
(ExprCompiler<std::optional<float>, std::optional<float>>())
.SetInputLoader(MakeNotOwningInputLoader(input_loader.get()))
.Compile(expr));
EXPECT_THAT(model(std::nullopt), IsOkAndHolds(std::nullopt));
EXPECT_THAT((ExprCompiler<std::optional<float>, float>())
.SetInputLoader(MakeNotOwningInputLoader(input_loader.get()))
.AllowOutputCasting()
.Compile(expr),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("model output is deduced to optional, while "
"non-optional is requested")));
ASSERT_OK_AND_ASSIGN(
auto full_model,
(ExprCompiler<std::optional<float>, float>())
.SetInputLoader(MakeNotOwningInputLoader(input_loader.get()))
.ForceNonOptionalOutput()
.Compile(expr));
EXPECT_THAT(full_model(-57), IsOkAndHolds(57));
EXPECT_THAT(full_model(std::nullopt),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("expects a present value, got missing")));
}
class VoidSlotListener : public StaticSlotListener<void> {
public:
VoidSlotListener() : StaticSlotListener<void>({}) {}
absl::StatusOr<BoundSlotListener<Output>> BindImpl(
const absl::flat_hash_map<std::string, TypedSlot>& input_slots)
const final {
return absl::UnimplementedError("unimplemented");
}
private:
};
TEST_F(ExprCompilerTest, Errors) {
EXPECT_THAT((ExprCompiler<TestInput, std::optional<float>, TestSideOutput>())
.SetSlotListener(CreateSlotListener())
.Compile(expr_),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("InputLoader is not specified, use "
"ExprCompiler::SetInputLoader()")));
EXPECT_THAT(
(ExprCompiler<TestInput, std::optional<float>, TestSideOutput>())
.SetInputLoader(CreateInputLoader())
.Compile(expr_),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("SlotListener is not specified, use "
"ExprCompiler::SetSlotListener() or ExprCompiler<...> "
"without SideOutput template parameter")));
EXPECT_THAT((ExprCompiler<TestInput, std::optional<float>, void>())
.SetInputLoader(CreateInputLoader())
.SetSlotListener(std::unique_ptr<SlotListener<void>>(
new VoidSlotListener()))
.Compile(expr_),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("SlotListener with SideOutput==void is not "
"supported by ExprCompiler")));
EXPECT_THAT(
(ExprCompiler<std::tuple<float, float>, std::optional<float>>())
.SetInputLoader(
TupleInputLoader<std::tuple<float, float>>::Create({"x", "y"}))
.CompileOperator(nullptr),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("InputLoader is specified, but not needed for "
"ExprCompiler::CompilerOperator")));
}
TEST_F(ExprCompilerTest, CompileExprSet) {
ASSERT_OK_AND_ASSIGN(
auto models,
CompileExprSet(
ExprCompiler<TestInput, std::optional<float>>()
.SetInputLoader(CreateInputLoader())
.AllowOutputCasting(),
absl::flat_hash_map<std::string, absl::StatusOr<expr::ExprNodePtr>>{
{"first", expr_}, {"second", expr_}}));
ASSERT_THAT(models,
UnorderedElementsAre(Pair("first", _), Pair("second", _)));
static_assert(
std::is_same_v<std::decay_t<decltype(models[""])>,
std::function<absl::StatusOr<std::optional<float>>(
const TestInput&)>>);
TestInput input{.x = 28, .y = 29};
EXPECT_THAT(models["first"](input), IsOkAndHolds(57));
}
TEST_F(ExprCompilerTest, CompileExprSet_Errors) {
EXPECT_THAT(
CompileExprSet(
ExprCompiler<TestInput, std::optional<float>>()
.SetInputLoader(CreateInputLoader())
.AllowOutputCasting(),
absl::flat_hash_map<std::string, absl::StatusOr<expr::ExprNodePtr>>{
{"first", expr_},
{"bad_model", absl::FailedPreconditionError("very bad model")}}),
StatusIs(absl::StatusCode::kFailedPrecondition,
"very bad model; while initializing model \"bad_model\""));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/serving/expr_compiler.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/serving/expr_compiler_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
9ec08230-232b-4d04-9482-3ba951dfa44e | cpp | google/arolla | inplace_expr_compiler | arolla/serving/inplace_expr_compiler.cc | arolla/serving/inplace_expr_compiler_test.cc | #include "arolla/serving/inplace_expr_compiler.h"
#include <cstddef>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "arolla/naming/table.h"
#include "arolla/qexpr/evaluation_engine.h"
#include "arolla/qtype/named_field_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::inplace_expr_compiler_impl {
TypedSlotMap CollectInternalSlots(TypedSlot root_slot) {
TypedSlotMap result;
if (GetFieldNames(root_slot.GetType()).empty()) {
return result;
}
std::vector<std::pair<TypedSlot, naming::TablePath>> stack{{root_slot, {}}};
while (!stack.empty()) {
auto [slot, table] = stack.back();
stack.pop_back();
auto field_names = GetFieldNames(slot.GetType());
for (size_t i = 0; i < field_names.size(); ++i) {
const auto& field_name = field_names[i];
const TypedSlot& field_slot = slot.SubSlot(i);
result.emplace(table.Column(naming::FieldAccess(field_name)).FullName(),
field_slot);
if (!GetFieldNames(field_slot.GetType()).empty()) {
stack.emplace_back(field_slot,
table.Child(naming::FieldAccess(field_name)));
}
}
}
return result;
}
namespace {
absl::Status CheckField(QTypePtr qtype, const TypedSlotMap& slot_map,
QTypePtr field_qtype, absl::string_view field_name) {
if (GetFieldNames(qtype).empty()) {
return absl::FailedPreconditionError(
absl::StrCat("no registered field names for ", qtype->name(),
" in Compile.*ExprOnStructInput"));
}
if (!slot_map.contains(field_name)) {
return absl::FailedPreconditionError(
absl::StrCat("input `", field_name, "` not found in ", qtype->name(),
" in Compile.*ExprOnStructInput"));
}
QTypePtr result_type = slot_map.at(field_name).GetType();
if (result_type != field_qtype) {
return absl::FailedPreconditionError(absl::StrCat(
"input `", field_name, "` type mismatch for ", qtype->name(),
" in Compile.*ExprOnStructInput, expected in struct: ",
result_type->name(), ", found in expr: ", field_qtype->name()));
}
return absl::OkStatus();
}
absl::StatusOr<TypedSlotMap> CollectInputSlots(
QTypePtr qtype, const TypedSlotMap& struct_slot_map,
const CompiledExpr& compiled_expr) {
TypedSlotMap input_slots;
input_slots.reserve(compiled_expr.input_types().size());
for (const auto& [name, field_qtype] : compiled_expr.input_types()) {
RETURN_IF_ERROR(CheckField(qtype, struct_slot_map, field_qtype, name));
input_slots.emplace(name, struct_slot_map.at(name));
}
return input_slots;
}
}
absl::StatusOr<IoSlots> CollectIoSlots(QTypePtr qtype,
const CompiledExpr& compiled_expr,
absl::string_view final_output_name) {
TypedSlotMap struct_slot_map =
CollectInternalSlots(TypedSlot::UnsafeFromOffset(qtype, 0));
ASSIGN_OR_RETURN(TypedSlotMap input_slots,
CollectInputSlots(qtype, struct_slot_map, compiled_expr));
RETURN_IF_ERROR(CheckField(qtype, struct_slot_map,
compiled_expr.output_type(), final_output_name));
if (compiled_expr.input_types().contains(final_output_name)) {
return absl::FailedPreconditionError(absl::StrCat(
final_output_name, " present both as an input and as final output"));
}
if (compiled_expr.named_output_types().contains(final_output_name)) {
return absl::FailedPreconditionError(
absl::StrCat(final_output_name,
" present both as final output and as named output"));
}
for (const auto& [name, field_qtype] : compiled_expr.input_types()) {
if (compiled_expr.named_output_types().contains(name)) {
return absl::FailedPreconditionError(
absl::StrCat(name, " present both as an input and as named output"));
}
}
for (const auto& [name, field_qtype] : compiled_expr.named_output_types()) {
RETURN_IF_ERROR(CheckField(qtype, struct_slot_map, field_qtype, name));
}
absl::flat_hash_map<std::string, TypedSlot> named_output_slots;
named_output_slots.reserve(compiled_expr.named_output_types().size());
for (const auto& [name, _] : compiled_expr.named_output_types()) {
named_output_slots.emplace(name, struct_slot_map.at(name));
}
return IoSlots{.input_slots = input_slots,
.output_slot = struct_slot_map.at(final_output_name),
.named_output_slots = named_output_slots};
}
} | #include "arolla/serving/inplace_expr_compiler.h"
#include <array>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qexpr/evaluation_engine.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/simple_qtype.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/serving/expr_compiler.h"
#include "arolla/util/bytes.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/struct_field.h"
#include "arolla/util/text.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::HasSubstr;
using ::testing::MatchesRegex;
struct UnsupportedType {};
struct TestOutputStruct {
double x_plus_y;
double x_times_y;
UnsupportedType unsupported_type_field;
double unused;
static auto ArollaStructFields() {
using CppType = TestOutputStruct;
return std::tuple{
AROLLA_DECLARE_STRUCT_FIELD(x_plus_y),
AROLLA_DECLARE_STRUCT_FIELD(x_times_y),
AROLLA_SKIP_STRUCT_FIELD(unsupported_type_field),
AROLLA_DECLARE_STRUCT_FIELD(unused),
};
}
void ArollaFingerprint(FingerprintHasher* hasher) const {
CombineStructFields(hasher, *this);
}
};
struct TestStruct {
float x;
double y;
void* unsupported_field;
TestOutputStruct side_outputs;
static auto ArollaStructFields() {
using CppType = TestStruct;
return std::tuple{
AROLLA_DECLARE_STRUCT_FIELD(x),
AROLLA_DECLARE_STRUCT_FIELD(y),
AROLLA_SKIP_STRUCT_FIELD(unsupported_field),
AROLLA_DECLARE_STRUCT_FIELD(side_outputs),
};
}
void ArollaFingerprint(FingerprintHasher* hasher) const {
CombineStructFields(hasher, *this);
}
};
struct TestStructWithOptional {
OptionalValue<float> x;
OptionalValue<double> y;
std::array<int, 6> skip_me;
OptionalValue<double> x_plus_y;
constexpr static auto ArollaStructFields() {
using CppType = TestStructWithOptional;
return std::tuple{
AROLLA_DECLARE_STRUCT_FIELD(x),
AROLLA_DECLARE_STRUCT_FIELD(y),
AROLLA_SKIP_STRUCT_FIELD(skip_me),
AROLLA_DECLARE_STRUCT_FIELD(x_plus_y),
};
}
void ArollaFingerprint(FingerprintHasher* hasher) const {
CombineStructFields(hasher, *this);
}
};
struct TestStructWithString {
std::string title;
UnsupportedType it_is_not_supported;
OptionalValue<::arolla::Bytes> name;
UnsupportedType not_supported_sorry;
std::string full_name;
static auto ArollaStructFields() {
using CppType = TestStructWithString;
return std::tuple{
AROLLA_DECLARE_STRUCT_FIELD(title),
AROLLA_SKIP_STRUCT_FIELD(it_is_not_supported),
AROLLA_DECLARE_STRUCT_FIELD(name),
AROLLA_SKIP_STRUCT_FIELD(not_supported_sorry),
AROLLA_DECLARE_STRUCT_FIELD(full_name),
};
}
void ArollaFingerprint(FingerprintHasher* hasher) const {
CombineStructFields(hasher, *this);
}
};
}
AROLLA_DECLARE_SIMPLE_QTYPE(TEST_OUTPUT_STRUCT, TestOutputStruct);
AROLLA_DEFINE_SIMPLE_QTYPE(TEST_OUTPUT_STRUCT, TestOutputStruct);
AROLLA_DECLARE_SIMPLE_QTYPE(TEST_STRUCT, TestStruct);
AROLLA_DEFINE_SIMPLE_QTYPE(TEST_STRUCT, TestStruct);
AROLLA_DECLARE_SIMPLE_QTYPE(TEST_STRUCT_WITH_OPTIONAL, TestStructWithOptional);
AROLLA_DEFINE_SIMPLE_QTYPE(TEST_STRUCT_WITH_OPTIONAL, TestStructWithOptional);
AROLLA_DECLARE_SIMPLE_QTYPE(TEST_STRUCT_WITH_STRING, TestStructWithString);
AROLLA_DEFINE_SIMPLE_QTYPE(TEST_STRUCT_WITH_STRING, TestStructWithString);
namespace {
class FailingCompiledExpr : public InplaceCompiledExpr {
public:
using InplaceCompiledExpr::InplaceCompiledExpr;
absl::StatusOr<std::unique_ptr<BoundExpr>> InplaceBind(
const absl::flat_hash_map<std::string, TypedSlot>& slots,
TypedSlot output_slot,
const absl::flat_hash_map<std::string, TypedSlot>& )
const final {
return absl::InternalError("Fake:(");
}
};
TEST(CompileInplaceExprOnStruct, NoFieldNames) {
FailingCompiledExpr compiled_expr({}, GetQType<double>(), {});
EXPECT_THAT(
CompileInplaceExprOnStruct<int32_t>(compiled_expr, "/final_output"),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*registered field.*INT32.*")));
}
TEST(CompileInplaceExprOnStruct, NoFinalOutputName) {
FailingCompiledExpr compiled_expr({}, GetQType<double>(), {});
EXPECT_THAT(
CompileInplaceExprOnStruct<TestStruct>(compiled_expr, "/final_output"),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*input.*/final_output.*TEST_STRUCT.*")));
}
TEST(CompileInplaceExprOnStruct, InputTypeMismatch) {
FailingCompiledExpr compiled_expr({}, GetQType<double>(), {});
EXPECT_THAT(
CompileInplaceExprOnStruct<TestStruct>(compiled_expr, "/x"),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(
".*/x.*TEST_STRUCT.*expected.*FLOAT32.*found.*FLOAT64")));
}
TEST(CompileInplaceExprOnStruct, InputTypeUnknown) {
FailingCompiledExpr compiled_expr({}, GetQType<double>(), {});
EXPECT_THAT(CompileInplaceExprOnStruct<TestStruct>(compiled_expr, "/qq"),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*input.*/qq.*TEST_STRUCT.*")));
}
TEST(CompileInplaceExprOnStruct, FinalOutputTypeMismatch) {
FailingCompiledExpr compiled_expr({{"/x", GetQType<double>()}},
GetQType<double>(), {});
EXPECT_THAT(
CompileInplaceExprOnStruct<TestStruct>(compiled_expr,
"/side_outputs/x_plus_y"),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(
".*/x.*TEST_STRUCT.*expected.*FLOAT32.*found.*FLOAT64")));
}
TEST(CompileInplaceExprOnStruct, SideOutputTypeMismatch) {
FailingCompiledExpr compiled_expr(
{{"/x", GetQType<float>()}}, GetQType<double>(),
{{"/side_outputs/x_times_y", GetQType<float>()}});
EXPECT_THAT(
CompileInplaceExprOnStruct<TestStruct>(compiled_expr,
"/side_outputs/x_plus_y"),
StatusIs(
absl::StatusCode::kFailedPrecondition,
MatchesRegex(
".*/side_outputs/"
"x_times_y.*TEST_STRUCT.*expected.*FLOAT64.*found.*FLOAT32")));
}
TEST(CompileInplaceExprOnStruct, SideOutputUnknown) {
FailingCompiledExpr compiled_expr(
{{"/x", GetQType<float>()}}, GetQType<double>(),
{{"/side_outputs/x_power_y", GetQType<double>()}});
EXPECT_THAT(
CompileInplaceExprOnStruct<TestStruct>(compiled_expr,
"/side_outputs/x_plus_y"),
StatusIs(
absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*/side_outputs/x_power_y.*not found.*TEST_STRUCT.*")));
}
TEST(CompileInplaceExprOnStruct, CompiledExprBindingFailure) {
FailingCompiledExpr compiled_expr({{"/x", GetQType<float>()}},
GetQType<double>(), {});
EXPECT_THAT(CompileInplaceExprOnStruct<TestStruct>(compiled_expr,
"/side_outputs/x_plus_y"),
StatusIs(absl::StatusCode::kInternal, "Fake:("));
}
TEST(CompileInplaceExprOnStruct, InputSideOutputCollision) {
FailingCompiledExpr compiled_expr({{"/y", GetQType<double>()}},
GetQType<double>(),
{{"/y", GetQType<double>()}});
EXPECT_THAT(CompileInplaceExprOnStruct<TestStruct>(compiled_expr,
"/side_outputs/x_plus_y"),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*/y.*input.*named output.*")));
}
TEST(CompileInplaceExprOnStruct, InputFinalOutputCollision) {
FailingCompiledExpr compiled_expr(
{{"/y", GetQType<double>()}}, GetQType<double>(),
{{"/side_outputs/x_plus_y", GetQType<double>()}});
EXPECT_THAT(CompileInplaceExprOnStruct<TestStruct>(compiled_expr, "/y"),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*/y.*input.*final output.*")));
}
TEST(CompileInplaceExprOnStruct, SideOutputFinalOutputCollision) {
FailingCompiledExpr compiled_expr(
{{"/y", GetQType<double>()}}, GetQType<double>(),
{{"/side_outputs/x_plus_y", GetQType<double>()}});
EXPECT_THAT(
CompileInplaceExprOnStruct<TestStruct>(compiled_expr,
"/side_outputs/x_plus_y"),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(
".*/side_outputs/x_plus_y.*final output.*named output.*")));
}
class TestBoundExpr final : public BoundExpr {
public:
TestBoundExpr(FrameLayout::Slot<float> x, FrameLayout::Slot<double> y,
FrameLayout::Slot<double> x_plus_y,
FrameLayout::Slot<double> x_times_y)
: BoundExpr(
{{"/x", TypedSlot::FromSlot(x)}, {"/y", TypedSlot::FromSlot(y)}},
TypedSlot::FromSlot(x_plus_y),
{{"/side_outputs/x_times_y", TypedSlot::FromSlot(x_times_y)}}),
x_(x),
y_(y),
x_plus_y_(x_plus_y),
x_times_y_(x_times_y) {}
void InitializeLiterals(EvaluationContext*, FramePtr) const final {}
void Execute(EvaluationContext*, FramePtr frame) const final {
frame.Set(x_plus_y_, frame.Get(x_) + frame.Get(y_));
frame.Set(x_times_y_, frame.Get(x_) * frame.Get(y_));
}
private:
FrameLayout::Slot<float> x_;
FrameLayout::Slot<double> y_;
FrameLayout::Slot<double> x_plus_y_;
FrameLayout::Slot<double> x_times_y_;
};
class TestCompiledExpr : public InplaceCompiledExpr {
public:
TestCompiledExpr()
: InplaceCompiledExpr(
{{"/x", GetQType<float>()}, {"/y", GetQType<double>()}},
GetQType<double>(),
{{"/side_outputs/x_times_y", GetQType<double>()}}) {}
absl::StatusOr<std::unique_ptr<BoundExpr>> InplaceBind(
const absl::flat_hash_map<std::string, TypedSlot>& slots,
TypedSlot output_slot,
const absl::flat_hash_map<std::string, TypedSlot>& named_output_slots)
const final {
RETURN_IF_ERROR(VerifySlotTypes(input_types(), slots));
return std::make_unique<TestBoundExpr>(
slots.at("/x").ToSlot<float>().value(),
slots.at("/y").ToSlot<double>().value(),
output_slot.ToSlot<double>().value(),
named_output_slots.at("/side_outputs/x_times_y")
.ToSlot<double>()
.value());
}
};
TEST(CompileInplaceExprOnStructTest, SuccessXPlusY) {
TestCompiledExpr compiled_expr;
ASSERT_OK_AND_ASSIGN(std::function<absl::Status(TestStruct&)> eval_fn,
CompileInplaceExprOnStruct<TestStruct>(
compiled_expr, "/side_outputs/x_plus_y"));
TestStruct input{
.x = 5.f,
.y = 7.,
.side_outputs = {.x_plus_y = -1, .x_times_y = -1, .unused = -1}};
ASSERT_OK(eval_fn(input));
EXPECT_EQ(input.side_outputs.x_plus_y, 12);
EXPECT_EQ(input.side_outputs.x_times_y, 35.);
EXPECT_EQ(input.x, 5);
EXPECT_EQ(input.y, 7);
EXPECT_EQ(input.side_outputs.unused, -1.);
}
class TestBoundExprWithOptionals final : public BoundExpr {
public:
TestBoundExprWithOptionals(FrameLayout::Slot<OptionalValue<float>> x,
FrameLayout::Slot<OptionalValue<double>> y,
FrameLayout::Slot<OptionalValue<double>> x_plus_y)
: BoundExpr(
{{"/x", TypedSlot::FromSlot(x)}, {"/y", TypedSlot::FromSlot(y)}},
TypedSlot::FromSlot(x_plus_y), {}),
x_(x),
y_(y),
x_plus_y_(x_plus_y) {}
void InitializeLiterals(EvaluationContext*, FramePtr) const final {}
void Execute(EvaluationContext*, FramePtr frame) const final {
if (frame.Get(x_).present && frame.Get(y_).present) {
frame.Set(x_plus_y_, frame.Get(x_).value + frame.Get(y_).value);
} else {
frame.Set(x_plus_y_, std::nullopt);
}
}
private:
FrameLayout::Slot<OptionalValue<float>> x_;
FrameLayout::Slot<OptionalValue<double>> y_;
FrameLayout::Slot<OptionalValue<double>> x_plus_y_;
};
class TestCompiledExprWithOptionals : public InplaceCompiledExpr {
public:
TestCompiledExprWithOptionals()
: InplaceCompiledExpr({{"/x", GetQType<OptionalValue<float>>()},
{"/y", GetQType<OptionalValue<double>>()}},
GetQType<OptionalValue<double>>(), {}) {}
absl::StatusOr<std::unique_ptr<BoundExpr>> InplaceBind(
const absl::flat_hash_map<std::string, TypedSlot>& slots,
TypedSlot output_slot,
const absl::flat_hash_map<std::string, TypedSlot>& )
const final {
RETURN_IF_ERROR(VerifySlotTypes(input_types(), slots));
return std::make_unique<TestBoundExprWithOptionals>(
slots.at("/x").ToSlot<OptionalValue<float>>().value(),
slots.at("/y").ToSlot<OptionalValue<double>>().value(),
output_slot.ToSlot<OptionalValue<double>>().value());
}
};
TEST(CompileInplaceExprOnStructTest, SuccessXPlusYWithOptionals) {
TestCompiledExprWithOptionals compiled_expr;
ASSERT_OK_AND_ASSIGN(
std::function<absl::Status(TestStructWithOptional&)> eval_fn,
CompileInplaceExprOnStruct<TestStructWithOptional>(compiled_expr,
"/x_plus_y"));
TestStructWithOptional input{.x = 5.f, .y = 7., .x_plus_y = -1};
ASSERT_OK(eval_fn(input));
EXPECT_EQ(input.x_plus_y, 12.);
EXPECT_EQ(input.x, 5.f);
EXPECT_EQ(input.y, 7.);
}
class TestBoundExprWithStrings final : public BoundExpr {
public:
TestBoundExprWithStrings(FrameLayout::Slot<arolla::Bytes> title,
FrameLayout::Slot<OptionalValue<arolla::Bytes>> name,
FrameLayout::Slot<arolla::Bytes> output)
: BoundExpr({{"/title", TypedSlot::FromSlot(title)},
{"/name", TypedSlot::FromSlot(name)}},
TypedSlot::FromSlot(output), {}),
title_(title),
name_(name),
output_(output) {}
void InitializeLiterals(EvaluationContext*, FramePtr) const final {}
void Execute(EvaluationContext*, FramePtr frame) const final {
if (!frame.Get(name_).present) {
frame.Set(output_, "UNKNOWN");
return;
}
frame.Set(output_,
absl::StrCat(frame.Get(title_), " ", frame.Get(name_).value));
}
private:
FrameLayout::Slot<arolla::Bytes> title_;
FrameLayout::Slot<OptionalValue<arolla::Bytes>> name_;
FrameLayout::Slot<arolla::Bytes> output_;
};
class TestCompiledExprWithStrings : public InplaceCompiledExpr {
public:
TestCompiledExprWithStrings()
: InplaceCompiledExpr(
{{"/title", GetQType<arolla::Bytes>()},
{"/name", GetQType<OptionalValue<arolla::Bytes>>()}},
GetQType<arolla::Bytes>(), {}) {}
absl::StatusOr<std::unique_ptr<BoundExpr>> InplaceBind(
const absl::flat_hash_map<std::string, TypedSlot>& slots,
TypedSlot output_slot,
const absl::flat_hash_map<std::string, TypedSlot>& )
const final {
RETURN_IF_ERROR(VerifySlotTypes(input_types(), slots));
return std::make_unique<TestBoundExprWithStrings>(
slots.at("/title").ToSlot<arolla::Bytes>().value(),
slots.at("/name").ToSlot<OptionalValue<arolla::Bytes>>().value(),
output_slot.ToSlot<arolla::Bytes>().value());
}
};
TEST(CompileInplaceExprOnStructTest, SuccessStringsIO) {
TestCompiledExprWithStrings compiled_expr;
ASSERT_OK_AND_ASSIGN(
std::function<absl::Status(TestStructWithString&)> eval_fn,
CompileInplaceExprOnStruct<TestStructWithString>(compiled_expr,
"/full_name"));
TestStructWithString input{
.title = "Mr.", .name = arolla::Bytes("Abc"), .full_name = "????"};
ASSERT_OK(eval_fn(input));
EXPECT_EQ(input.full_name, "Mr. Abc");
input.name = std::nullopt;
ASSERT_OK(eval_fn(input));
EXPECT_EQ(input.full_name, "UNKNOWN");
}
TEST(CompileDynamicExprOnStructInputTest, TypeError) {
ASSERT_OK_AND_ASSIGN(
expr::ExprNodePtr expr,
expr::CallOp("annotation.qtype",
{expr::Leaf("/x"), expr::Literal(GetQType<int>())}));
EXPECT_THAT((ExprCompiler<TestStruct, std::optional<double>>())
.SetInputLoader(CreateStructInputLoader<TestStruct>())
.Compile(expr)
.status(),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*inconsistent.*qtype.*INT32.*")));
}
TEST(CompileDynamicExprOnStructInputTest, UnknownLeaf) {
expr::ExprNodePtr expr = expr::Leaf("/unknown");
EXPECT_THAT((ExprCompiler<TestStruct, std::optional<double>>())
.SetInputLoader(CreateStructInputLoader<TestStruct>())
.Compile(expr)
.status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("unknown inputs: /unknown")));
}
TEST(CompileDynamicExprOnStructInputTest, TypeErrorOnCodegenModel) {
TestCompiledExprWithOptionals compiled_expr;
EXPECT_THAT((ExprCompiler<TestStruct, std::optional<double>>())
.SetInputLoader(CreateStructInputLoader<TestStruct>())
.Compile(compiled_expr)
.status(),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*slot types mismatch.*")));
}
TEST(CompileDynamicExprOnStructInputTest, Nested) {
ASSERT_OK_AND_ASSIGN(
expr::ExprNodePtr expr,
expr::CallOp("math.add",
{expr::Leaf("/x"), expr::Leaf("/side_outputs/x_plus_y")}));
ASSERT_OK_AND_ASSIGN(
std::function<absl::StatusOr<double>(const TestStruct&)> eval_fn,
(ExprCompiler<TestStruct, double>())
.SetInputLoader(CreateStructInputLoader<TestStruct>())
.Compile(expr));
TestStruct input{
.x = 5.f,
.y = -1.,
.side_outputs = {.x_plus_y = 7., .x_times_y = -1, .unused = -1}};
EXPECT_THAT(eval_fn(input), IsOkAndHolds(12.));
}
TEST(CompileDynamicExprOnStructInputTest, SuccessXPlusYWithOptionals) {
ASSERT_OK_AND_ASSIGN(
expr::ExprNodePtr expr,
expr::CallOp("math.add", {expr::Leaf("/x"), expr::Leaf("/y")}));
ASSERT_OK_AND_ASSIGN(
std::function<absl::StatusOr<std::optional<double>>(
const TestStructWithOptional&)>
eval_fn,
(ExprCompiler<TestStructWithOptional, std::optional<double>>())
.SetInputLoader(CreateStructInputLoader<TestStructWithOptional>())
.Compile(expr));
TestStructWithOptional input{.x = 5.f, .y = 7., .x_plus_y = -1};
EXPECT_THAT(eval_fn(input), IsOkAndHolds(12.));
input.x = std::nullopt;
EXPECT_THAT(eval_fn(input), IsOkAndHolds(std::nullopt));
}
TEST(CompileDynamicExprOnStructInputTest, ErrorStatus) {
absl::StatusOr<expr::ExprNodePtr> status_or_expr =
absl::InternalError("input error");
auto result =
ExprCompiler<TestStructWithOptional, std::optional<double>>()
.SetInputLoader(CreateStructInputLoader<TestStructWithOptional>())
.Compile(status_or_expr);
EXPECT_THAT(result, StatusIs(absl::StatusCode::kInternal,
MatchesRegex("input error")));
}
TEST(CompileDynamicExprOnStructInputTest, SuccessXPlusYOnCodegenModel) {
TestCompiledExpr compiled_expr;
ASSERT_OK_AND_ASSIGN(
std::function<absl::StatusOr<double>(const TestStruct&)> eval_fn,
(ExprCompiler<TestStruct, double>())
.SetInputLoader(CreateStructInputLoader<TestStruct>())
.Compile(compiled_expr));
TestStruct input{.x = 5.f, .y = 7.};
EXPECT_THAT(eval_fn(input), IsOkAndHolds(12.));
}
TEST(CompileDynamicExprOnStructInputTest, SuccessSideOutputOnCodegenModel) {
TestCompiledExpr compiled_expr;
ASSERT_OK_AND_ASSIGN(
std::function<absl::StatusOr<double>(const TestStruct&, TestStruct*)>
eval_fn,
(ExprCompiler<TestStruct, double, TestStruct>())
.SetInputLoader(CreateStructInputLoader<TestStruct>())
.SetSlotListener(CreateStructSlotListener<TestStruct>())
.Compile(compiled_expr));
TestStruct input{.x = 5.f, .y = 7.};
EXPECT_THAT(eval_fn(input, nullptr), IsOkAndHolds(12.));
EXPECT_THAT(eval_fn(input, &input), IsOkAndHolds(12.));
EXPECT_EQ(input.side_outputs.x_times_y, 35);
}
TEST(CompileDynamicExprOnStructWithBytesInputTest, SuccessUpper) {
ASSERT_OK_AND_ASSIGN(expr::ExprNodePtr title,
expr::CallOp("strings.decode", {expr::Leaf("/title")}));
ASSERT_OK_AND_ASSIGN(
expr::ExprNodePtr name,
expr::CallOp("strings.upper",
{expr::CallOp("strings.decode", {expr::Leaf("/name")})}));
ASSERT_OK_AND_ASSIGN(
expr::ExprNodePtr expr,
expr::CallOp("strings.join", {title, expr::Literal(Text(" ")), name}));
ASSERT_OK_AND_ASSIGN(expr,
expr::CallOp("core.get_optional_value",
{expr::CallOp("strings.encode", {expr})}));
ASSERT_OK_AND_ASSIGN(
std::function<absl::StatusOr<arolla::Bytes>(const TestStructWithString&)>
eval_fn,
(ExprCompiler<TestStructWithString, arolla::Bytes>())
.SetInputLoader(CreateStructInputLoader<TestStructWithString>())
.Compile(expr));
TestStructWithString input{.title = "Mr.", .name = Bytes("abc")};
EXPECT_THAT(eval_fn(input), IsOkAndHolds(Bytes("Mr. ABC")));
input.name = std::nullopt;
EXPECT_THAT(eval_fn(input), StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("expects present value")));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/serving/inplace_expr_compiler.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/serving/inplace_expr_compiler_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
9d49cf0e-1088-4c54-b0ca-15e68adff234 | cpp | google/arolla | raw_buffer_factory | arolla/memory/raw_buffer_factory.cc | arolla/memory/raw_buffer_factory_test.cc | #include "arolla/memory/raw_buffer_factory.h"
#include <algorithm>
#include <cstddef>
#include <cstdlib>
#include <cstring>
#include <memory>
#include <tuple>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/base/optimization.h"
#include "absl/log/check.h"
namespace arolla {
namespace {
void noop_free(void*) noexcept {}
void AnnotateMemoryIsInitialized(void* data, size_t size) {
ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(data, size);
}
}
std::tuple<RawBufferPtr, void*> HeapBufferFactory::CreateRawBuffer(
size_t nbytes) {
if (ABSL_PREDICT_FALSE(nbytes == 0)) return {nullptr, nullptr};
void* data = malloc(nbytes);
AnnotateMemoryIsInitialized(data, nbytes);
return {std::shared_ptr<void>(data, free), data};
}
std::tuple<RawBufferPtr, void*> HeapBufferFactory::ReallocRawBuffer(
RawBufferPtr&& old_buffer, void* old_data, size_t old_size,
size_t new_size) {
if (new_size == 0) return {nullptr, nullptr};
if (old_size == 0) return CreateRawBuffer(new_size);
DCHECK_EQ(old_buffer.use_count(), 1);
void* new_data = realloc(old_data, new_size);
if (new_size > old_size) {
AnnotateMemoryIsInitialized(static_cast<char*>(new_data) + old_size,
new_size - old_size);
}
*std::get_deleter<decltype(&free)>(old_buffer) = &noop_free;
old_buffer.reset(new_data, free);
return {std::move(old_buffer), new_data};
}
std::tuple<RawBufferPtr, void*> ProtobufArenaBufferFactory::CreateRawBuffer(
size_t nbytes) {
char* data = arena_.CreateArray<char>(&arena_, nbytes);
AnnotateMemoryIsInitialized(data, nbytes);
return {nullptr, data};
}
std::tuple<RawBufferPtr, void*> ProtobufArenaBufferFactory::ReallocRawBuffer(
RawBufferPtr&& old_buffer, void* data, size_t old_size, size_t new_size) {
if (old_size >= new_size) return {nullptr, data};
char* new_data = arena_.CreateArray<char>(&arena_, new_size);
memcpy(new_data, data, std::min(old_size, new_size));
AnnotateMemoryIsInitialized(new_data + old_size, new_size - old_size);
return {nullptr, new_data};
}
std::tuple<RawBufferPtr, void*> UnsafeArenaBufferFactory::CreateRawBuffer(
size_t nbytes) {
auto last_alloc =
reinterpret_cast<char*>(reinterpret_cast<size_t>(current_ + 7) & ~7ull);
if (ABSL_PREDICT_FALSE(last_alloc + nbytes > end_)) {
return {nullptr, SlowAlloc(nbytes)};
}
current_ = last_alloc + nbytes;
return {nullptr, last_alloc};
}
std::tuple<RawBufferPtr, void*> UnsafeArenaBufferFactory::ReallocRawBuffer(
RawBufferPtr&& old_buffer, void* data, size_t old_size, size_t new_size) {
char* last_alloc = current_ - old_size;
if ((data != last_alloc) || last_alloc + new_size > end_) {
if (old_size >= new_size) return {nullptr, data};
if (data == last_alloc) current_ = last_alloc;
void* new_data = SlowAlloc(new_size);
memcpy(new_data, data, std::min(old_size, new_size));
AnnotateMemoryIsInitialized(data, old_size);
return {nullptr, new_data};
}
current_ = last_alloc + new_size;
if (new_size < old_size) {
AnnotateMemoryIsInitialized(current_, old_size - new_size);
}
return {nullptr, last_alloc};
}
void UnsafeArenaBufferFactory::Reset() {
if (page_id_ >= 0) {
page_id_ = 0;
current_ = reinterpret_cast<char*>(std::get<1>(pages_[0]));
AnnotateMemoryIsInitialized(current_, page_size_);
end_ = current_ + page_size_;
}
big_allocs_.clear();
}
ABSL_ATTRIBUTE_NOINLINE void* UnsafeArenaBufferFactory::SlowAlloc(
size_t nbytes) {
if (ABSL_PREDICT_FALSE(nbytes > page_size_ ||
end_ - current_ >= page_size_ / 2)) {
auto [holder, memory] = base_factory_.CreateRawBuffer(nbytes);
AnnotateMemoryIsInitialized(memory, nbytes);
big_allocs_.emplace_back(std::move(holder), memory);
return memory;
}
NextPage();
auto last_alloc = current_;
current_ += nbytes;
return last_alloc;
}
void UnsafeArenaBufferFactory::NextPage() {
++page_id_;
if (ABSL_PREDICT_FALSE(page_id_ == pages_.size())) {
auto [holder, page] = base_factory_.CreateRawBuffer(page_size_);
current_ = reinterpret_cast<char*>(page);
pages_.emplace_back(std::move(holder), page);
} else {
current_ = reinterpret_cast<char*>(std::get<1>(pages_[page_id_]));
}
AnnotateMemoryIsInitialized(current_, page_size_);
end_ = current_ + page_size_;
}
} | #include "arolla/memory/raw_buffer_factory.h"
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <utility>
#include <vector>
#include "benchmark/benchmark.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "google/protobuf/arena.h"
namespace arolla {
namespace {
using ::testing::AnyOf;
using ::testing::Eq;
using ::testing::Ge;
using ::testing::Le;
void VerifyCanReadUninitialized(const void* ptr, size_t size) {
const char* char_ptr = static_cast<const char*>(ptr);
for (size_t i = 0; i != size; ++i) {
char c = *(char_ptr + i);
benchmark::DoNotOptimize(c);
}
}
TEST(HeapBufferFactory, CreateEmptyBuffer) {
auto [buf, data] = GetHeapBufferFactory()->CreateRawBuffer(0);
EXPECT_EQ(buf, nullptr);
EXPECT_EQ(data, nullptr);
}
TEST(HeapBufferFactory, CreateRawBuffer) {
const size_t size = 13;
auto [buf, data] = GetHeapBufferFactory()->CreateRawBuffer(size);
EXPECT_NE(buf, nullptr);
VerifyCanReadUninitialized(data, size);
EXPECT_EQ(reinterpret_cast<size_t>(data) & 7, 0);
memset(data, 0, size);
}
TEST(HeapBufferFactory, ReallocRawBuffer) {
size_t size = 13;
RawBufferPtr buf;
char* data;
{
auto res = GetHeapBufferFactory()->CreateRawBuffer(size);
buf = std::get<0>(res);
data = reinterpret_cast<char*>(std::get<1>(res));
VerifyCanReadUninitialized(data, size);
}
auto resize_fn = [&](size_t new_size) {
auto res = GetHeapBufferFactory()->ReallocRawBuffer(std::move(buf), data,
size, new_size);
buf = std::get<0>(res);
data = reinterpret_cast<char*>(std::get<1>(res));
size = new_size;
};
data[0] = 5;
resize_fn(4);
EXPECT_EQ(data[0], 5);
VerifyCanReadUninitialized(data + 1, size - 1);
resize_fn(145);
EXPECT_EQ(data[0], 5);
VerifyCanReadUninitialized(data + 1, 144);
}
TEST(ProtobufArenaBufferFactory, CreateAndResize) {
google::protobuf::Arena arena;
ProtobufArenaBufferFactory buf_factory(arena);
auto [buf1, data1] = buf_factory.CreateRawBuffer(2);
VerifyCanReadUninitialized(data1, 2);
char* d = reinterpret_cast<char*>(data1);
d[0] = 'A';
d[1] = 'B';
auto [buf2, data2] =
buf_factory.ReallocRawBuffer(std::move(buf1), data1, 2, 1);
EXPECT_EQ(data1, data2);
auto [buf3, data3] =
buf_factory.ReallocRawBuffer(std::move(buf2), data2, 1, 3);
EXPECT_NE(data2, data3);
d = reinterpret_cast<char*>(data3);
EXPECT_EQ(d[0], 'A');
VerifyCanReadUninitialized(d + 1, 2);
}
TEST(UnsafeArenaBufferFactory, CreateEmptyBuffer) {
UnsafeArenaBufferFactory arena(25);
auto [buf1, data1] = arena.CreateRawBuffer(0);
auto [buf2, data2] = arena.CreateRawBuffer(0);
auto [buf3, data3] = arena.CreateRawBuffer(1);
VerifyCanReadUninitialized(data3, 1);
auto [buf4, data4] = arena.CreateRawBuffer(0);
auto [buf5, data5] = arena.CreateRawBuffer(0);
EXPECT_EQ(data1, data2);
EXPECT_NE(data3, nullptr);
EXPECT_NE(data2, data4);
EXPECT_NE(data3, data4);
EXPECT_EQ(data4, data5);
}
TEST(UnsafeArenaBufferFactory, CreateRawBuffer) {
std::vector<int64_t> sizes = {17, 1, 15, 1, 10};
std::vector<RawBufferPtr> bufs;
std::vector<char*> ptrs;
bufs.reserve(sizes.size());
ptrs.reserve(sizes.size());
UnsafeArenaBufferFactory arena1(25);
google::protobuf::Arena proto_arena;
ProtobufArenaBufferFactory proto_buf_factory(proto_arena);
UnsafeArenaBufferFactory arena2(25, proto_buf_factory);
for (UnsafeArenaBufferFactory* arena_ptr : {&arena1, &arena2}) {
UnsafeArenaBufferFactory& arena = *arena_ptr;
for (size_t i = 0; i < sizes.size(); ++i) {
auto [buf, data] = arena.CreateRawBuffer(sizes[i]);
VerifyCanReadUninitialized(data, sizes[i]);
EXPECT_EQ(reinterpret_cast<size_t>(data) & 7, 0);
memset(data, i, sizes[i]);
bufs.push_back(buf);
ptrs.push_back(reinterpret_cast<char*>(data));
}
EXPECT_EQ(ptrs[0] + 24, ptrs[1]);
EXPECT_EQ(ptrs[2] + 16, ptrs[3]);
for (size_t i = 0; i < sizes.size(); ++i) {
for (int64_t j = 0; j < sizes[i]; ++j) {
EXPECT_EQ(ptrs[i][j], i);
}
}
}
}
TEST(UnsafeArenaBufferFactory, ReallocRawBuffer) {
UnsafeArenaBufferFactory arena1(25);
google::protobuf::Arena proto_arena;
ProtobufArenaBufferFactory proto_buf_factory(proto_arena);
UnsafeArenaBufferFactory arena2(25, proto_buf_factory);
for (UnsafeArenaBufferFactory* arena_ptr : {&arena1, &arena2}) {
UnsafeArenaBufferFactory& arena = *arena_ptr;
auto [buf1, data1] = arena.CreateRawBuffer(10);
VerifyCanReadUninitialized(data1, 10);
EXPECT_EQ(buf1, nullptr);
reinterpret_cast<char*>(data1)[0] = 7;
auto [buf2, data2] = arena.ReallocRawBuffer(std::move(buf1), data1, 10, 25);
reinterpret_cast<char*>(data1)[24] = -1;
EXPECT_EQ(reinterpret_cast<char*>(data2)[0], 7);
EXPECT_EQ(data1, data2);
auto [buf3, data3] = arena.ReallocRawBuffer(std::move(buf2), data2, 25, 26);
VerifyCanReadUninitialized(data2, 25);
EXPECT_NE(data1, data3);
EXPECT_EQ(reinterpret_cast<char*>(data3)[0], 7);
auto [buf4, data4] = arena.ReallocRawBuffer(std::move(buf3), data3, 26, 10);
EXPECT_NE(data1, data4);
EXPECT_EQ(reinterpret_cast<char*>(data4)[0], 7);
auto [buf5, data5] = arena.CreateRawBuffer(20);
VerifyCanReadUninitialized(data5, 20);
auto [buf6, data6] = arena.ReallocRawBuffer(std::move(buf5), data5, 20, 15);
VerifyCanReadUninitialized(static_cast<const char*>(data6) + 15, 5);
EXPECT_EQ(data1, data5);
EXPECT_EQ(data1, data6);
auto [buf7, data7] = arena.CreateRawBuffer(8);
VerifyCanReadUninitialized(data7, 8);
EXPECT_EQ(reinterpret_cast<char*>(data1) + 16,
reinterpret_cast<char*>(data7));
reinterpret_cast<char*>(data7)[0] = 3;
auto [buf8, data8] = arena.ReallocRawBuffer(std::move(buf7), data7, 8, 20);
EXPECT_EQ(reinterpret_cast<char*>(data8)[0], 3);
auto [buf9, data9] = arena.CreateRawBuffer(1);
VerifyCanReadUninitialized(data9, 1);
EXPECT_EQ(reinterpret_cast<char*>(data8) + 24,
reinterpret_cast<char*>(data9));
}
}
TEST(UnsafeArenaBufferFactory, BigAlloc) {
UnsafeArenaBufferFactory arena1(32);
google::protobuf::Arena proto_arena;
ProtobufArenaBufferFactory proto_buf_factory(proto_arena);
UnsafeArenaBufferFactory arena2(32, proto_buf_factory);
for (UnsafeArenaBufferFactory* arena_ptr : {&arena1, &arena2}) {
UnsafeArenaBufferFactory& arena = *arena_ptr;
auto [buf1, data1] = arena.CreateRawBuffer(16);
VerifyCanReadUninitialized(data1, 16);
auto [buf2, data2] = arena.CreateRawBuffer(64);
VerifyCanReadUninitialized(data2, 64);
auto [buf3, data3] = arena.CreateRawBuffer(16);
VerifyCanReadUninitialized(data3, 16);
EXPECT_THAT(reinterpret_cast<char*>(data3),
Eq(reinterpret_cast<char*>(data1) + 16));
EXPECT_THAT(reinterpret_cast<char*>(data2) - reinterpret_cast<char*>(data1),
AnyOf(Le(-64), Ge(32)));
memset(data2, 0, 64);
EXPECT_THAT(reinterpret_cast<int64_t*>(data2)[0], Eq(0));
}
}
TEST(UnsafeArenaBufferFactory, Reset) {
UnsafeArenaBufferFactory arena1(32);
google::protobuf::Arena proto_arena;
ProtobufArenaBufferFactory proto_buf_factory(proto_arena);
UnsafeArenaBufferFactory arena2(32, proto_buf_factory);
for (UnsafeArenaBufferFactory* arena_ptr : {&arena1, &arena2}) {
UnsafeArenaBufferFactory& arena = *arena_ptr;
arena.Reset();
auto [buf1, data1] = arena.CreateRawBuffer(16);
VerifyCanReadUninitialized(data1, 16);
auto [buf2, data2] = arena.CreateRawBuffer(16);
VerifyCanReadUninitialized(data2, 16);
auto [buf3, data3] = arena.CreateRawBuffer(16);
VerifyCanReadUninitialized(data3, 16);
std::memset(data1, 255, 16);
std::memset(data2, 255, 16);
std::memset(data3, 255, 16);
arena.Reset();
auto [buf4, data4] = arena.CreateRawBuffer(8);
VerifyCanReadUninitialized(data4, 16);
auto [buf5, data5] = arena.CreateRawBuffer(16);
VerifyCanReadUninitialized(data5, 16);
auto [buf6, data6] = arena.CreateRawBuffer(24);
VerifyCanReadUninitialized(data6, 16);
EXPECT_EQ(data1, data4);
EXPECT_EQ(reinterpret_cast<char*>(data2),
reinterpret_cast<char*>(data5) + 8);
EXPECT_EQ(data3, data6);
}
}
TEST(UnsafeArenaBufferFactory, BaseFactory) {
UnsafeArenaBufferFactory arena1(1024);
auto [buf_before, ptr_before] = arena1.CreateRawBuffer(1);
UnsafeArenaBufferFactory arena2(32, arena1);
auto [buf_small, ptr_small] = arena2.CreateRawBuffer(8);
auto [buf_big, ptr_big] = arena2.CreateRawBuffer(128);
auto [buf_after, ptr_after] = arena1.CreateRawBuffer(1);
EXPECT_LT(ptr_before, ptr_small);
EXPECT_LT(ptr_before, ptr_big);
EXPECT_GT(ptr_after, ptr_small);
EXPECT_GT(ptr_after, ptr_big);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/memory/raw_buffer_factory.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/memory/raw_buffer_factory_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
5326e467-00f3-4f31-a980-31e0e3f00c84 | cpp | google/arolla | optional_value | arolla/memory/optional_value.cc | arolla/memory/optional_value_test.cc | #include "arolla/memory/optional_value.h"
#include <cstdint>
#include "absl/strings/str_cat.h"
#include "arolla/util/bytes.h"
#include "arolla/util/repr.h"
#include "arolla/util/text.h"
namespace arolla {
ReprToken ReprTraits<OptionalValue<bool>>::operator()(
const OptionalValue<bool>& value) const {
return ReprToken{
value.present ? absl::StrCat("optional_boolean{", Repr(value.value), "}")
: "optional_boolean{NA}"};
}
ReprToken ReprTraits<OptionalValue<int32_t>>::operator()(
const OptionalValue<int32_t>& value) const {
return ReprToken{value.present
? absl::StrCat("optional_int32{", Repr(value.value), "}")
: "optional_int32{NA}"};
}
ReprToken ReprTraits<OptionalValue<int64_t>>::operator()(
const OptionalValue<int64_t>& value) const {
return ReprToken{value.present ? absl::StrCat("optional_", Repr(value.value))
: "optional_int64{NA}"};
}
ReprToken ReprTraits<OptionalValue<uint64_t>>::operator()(
const OptionalValue<uint64_t>& value) const {
return ReprToken{value.present ? absl::StrCat("optional_", Repr(value.value))
: "optional_uint64{NA}"};
}
ReprToken ReprTraits<OptionalValue<float>>::operator()(
const OptionalValue<float>& value) const {
return ReprToken{
value.present ? absl::StrCat("optional_float32{", Repr(value.value), "}")
: "optional_float32{NA}"};
}
ReprToken ReprTraits<OptionalValue<double>>::operator()(
const OptionalValue<double>& value) const {
return ReprToken{value.present ? absl::StrCat("optional_", Repr(value.value))
: "optional_float64{NA}"};
}
ReprToken ReprTraits<OptionalValue<Bytes>>::operator()(
const OptionalValue<Bytes>& value) const {
return ReprToken{value.present
? absl::StrCat("optional_bytes{", Repr(value.value), "}")
: "optional_bytes{NA}"};
}
ReprToken ReprTraits<OptionalValue<Text>>::operator()(
const OptionalValue<Text>& value) const {
return ReprToken{value.present
? absl::StrCat("optional_text{", Repr(value.value), "}")
: "optional_text{NA}"};
}
ReprToken ReprTraits<OptionalUnit>::operator()(
const OptionalUnit& value) const {
return ReprToken{value.present ? "present" : "missing"};
}
} | #include "arolla/memory/optional_value.h"
#include <cstdint>
#include <cstring>
#include <memory>
#include <new>
#include <optional>
#include <sstream>
#include <string>
#include <type_traits>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/util/bytes.h"
#include "arolla/util/repr.h"
#include "arolla/util/text.h"
#include "arolla/util/view_types.h"
namespace arolla {
namespace testing {
namespace {
using absl_testing::IsOkAndHolds;
using absl_testing::StatusIs;
using ::testing::HasSubstr;
using ::testing::Test;
TEST(OptionalValueTest, TestEmptyValues) {
OptionalValue<float> v1;
EXPECT_FALSE(v1.present);
OptionalValue<float> v2(std::optional<float>{});
EXPECT_FALSE(v2.present);
OptionalValue<float> v3(std::nullopt);
EXPECT_FALSE(v3.present);
EXPECT_EQ(v1, v2);
EXPECT_EQ(v1, v3);
v1.value = 1.0f;
v2.value = 2.0f;
EXPECT_EQ(v1, v2);
auto absl_v = v2.AsOptional();
EXPECT_FALSE(absl_v.has_value());
}
TEST(OptionalValueTest, TestConstExpr) {
static_assert(!OptionalValue<int>().present);
static_assert(OptionalValue<int>(5).present);
static_assert(OptionalValue<int>(5).value == 5);
static_assert(MakeOptionalValue(5).present);
static_assert(MakeOptionalValue(5).value == 5);
}
TEST(OptionalValueTest, TestPresentValues) {
OptionalValue<float> v1(1.0f);
EXPECT_TRUE(v1.present);
EXPECT_EQ(1.0f, v1.value);
EXPECT_EQ(Repr(v1), "optional_float32{1.}");
auto v_auto = MakeOptionalValue(1.0f);
EXPECT_TRUE(v_auto.present);
EXPECT_EQ(1.0f, v_auto.value);
EXPECT_EQ(Repr(v_auto), "optional_float32{1.}");
OptionalValue<float> v2(std::optional<float>{2.0f});
EXPECT_TRUE(v2.present);
EXPECT_EQ(2.0f, v2.value);
EXPECT_EQ(Repr(v2), "optional_float32{2.}");
EXPECT_NE(v1, v2);
v1.value = 2.0f;
EXPECT_EQ(v1, v2);
}
TEST(OptionalValueTest, TestAssignment) {
OptionalValue<float> v1;
v1 = 1.0f;
EXPECT_TRUE(v1.present);
EXPECT_EQ(v1.value, 1.0f);
v1 = std::nullopt;
EXPECT_FALSE(v1.present);
}
TEST(OptionalValueTest, MakeStatusOrOptionalValue) {
absl::StatusOr<OptionalValue<float>> v =
MakeStatusOrOptionalValue(absl::StatusOr<float>(1.0f));
ASSERT_OK(v.status());
EXPECT_TRUE(v.value().present);
EXPECT_EQ(v.value().value, 1.0f);
absl::StatusOr<OptionalValue<float>> v_error = MakeStatusOrOptionalValue(
absl::StatusOr<float>(absl::InternalError("fake")));
EXPECT_THAT(v_error.status(),
StatusIs(absl::StatusCode::kInternal, HasSubstr("fake")));
}
TEST(OptionalValueTest, OptionalUnit) {
EXPECT_EQ(OptionalUnit(), kMissing);
EXPECT_EQ(OptionalUnit(false), kMissing);
EXPECT_FALSE(kMissing);
EXPECT_FALSE(kMissing.present);
EXPECT_EQ(Repr(kMissing), "missing");
EXPECT_EQ(OptionalUnit(true), kPresent);
EXPECT_TRUE(kPresent);
EXPECT_TRUE(kPresent.present);
EXPECT_EQ(Repr(kPresent), "present");
}
TEST(OptionalValueTest, Comparison) {
OptionalValue<float> v0;
v0.value = 1.0f;
OptionalValue<float> v1(1.0f);
OptionalValue<float> v2(2.0f);
{
EXPECT_TRUE(v1 == v1);
EXPECT_TRUE(v0 == v0);
EXPECT_FALSE(v1 == v2);
EXPECT_FALSE(v1 == v0);
EXPECT_FALSE(v1 != v1);
EXPECT_FALSE(v0 != v0);
EXPECT_TRUE(v1 != v2);
EXPECT_TRUE(v1 != v0);
OptionalValue<float> v0_2;
v0_2.value = 2.0f;
EXPECT_TRUE(v0 == v0_2);
EXPECT_FALSE(v0 != v0_2);
}
{
EXPECT_TRUE(v1 == 1.0f);
EXPECT_TRUE(1.0f == v1);
EXPECT_FALSE(v1 != 1.0f);
EXPECT_FALSE(1.0f != v1);
EXPECT_FALSE(v1 == 2.0f);
EXPECT_FALSE(2.0f == v1);
EXPECT_TRUE(v1 != 2.0f);
EXPECT_TRUE(2.0f != v1);
}
{
EXPECT_FALSE(v1 == std::nullopt);
EXPECT_FALSE(std::nullopt == v1);
EXPECT_TRUE(v0 == std::nullopt);
EXPECT_TRUE(std::nullopt == v0);
EXPECT_TRUE(v1 != std::nullopt);
EXPECT_TRUE(std::nullopt != v1);
EXPECT_FALSE(v0 != std::nullopt);
EXPECT_FALSE(std::nullopt != v0);
}
}
TEST(OptionalValueTest, TestImplicitConstructors) {
OptionalValue<float> v = {};
EXPECT_EQ(v, OptionalValue<float>());
v = 3.5;
EXPECT_EQ(v, OptionalValue<float>(3.5));
v = std::optional<float>(2.5);
EXPECT_EQ(v, OptionalValue<float>(2.5));
}
TEST(OptionalValueTest, TestMoves) {
auto ptr = std::make_unique<std::string>("Hello!");
OptionalValue<std::unique_ptr<std::string>> v1(std::move(ptr));
EXPECT_TRUE(v1.present);
EXPECT_EQ("Hello!", *(v1.value));
std::optional<std::unique_ptr<std::string>> v2(std::move(v1).AsOptional());
EXPECT_TRUE(v2.has_value());
EXPECT_EQ("Hello!", **v2);
}
template <typename T>
using Slot = FrameLayout::Slot<T>;
TEST(OptionalValueTest, TestFrameLayout) {
FrameLayout::Builder builder;
builder.AddSlot<double>();
builder.AddSlot<int32_t>();
auto optional_slot = builder.AddSlot<OptionalValue<float>>();
Slot<bool> presence_slot = optional_slot.GetSubslot<0>();
Slot<float> value_slot = optional_slot.GetSubslot<1>();
FrameLayout layout = std::move(builder).Build();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
frame.Set(optional_slot, OptionalValue<float>{1.0f});
EXPECT_EQ(true, frame.Get(presence_slot));
EXPECT_EQ(1.0f, frame.Get(value_slot));
frame.Set(value_slot, 2.0f);
EXPECT_EQ(2.0, frame.Get(optional_slot).value);
}
TEST(OptionalValue, IsBZeroConstructible) {
EXPECT_TRUE(is_bzero_constructible<OptionalValue<float>>());
EXPECT_TRUE(is_bzero_constructible<OptionalValue<int>>());
EXPECT_FALSE(is_bzero_constructible<OptionalValue<std::string>>());
}
TEST(OptionalValue, BZeroStateIsEmptyValue) {
using T = OptionalValue<float>;
std::aligned_storage_t<sizeof(T), alignof(T)> storage;
memset(&storage, 0, sizeof(storage));
EXPECT_FALSE(std::launder(reinterpret_cast<const T*>(&storage))->present);
}
TEST(OptionalValue, StructuredBindings) {
{
OptionalValue<float> f;
auto [present, value] = f;
EXPECT_FALSE(present);
}
{
OptionalValue<float> f = 17.0;
auto [present, value] = f;
EXPECT_TRUE(present);
EXPECT_EQ(value, 17.0);
}
}
TEST(OptionalValue, ViewType) {
static_assert(std::is_same_v<view_type_t<OptionalValue<int64_t>>,
OptionalValue<int64_t>>);
static_assert(std::is_same_v<view_type_t<OptionalValue<Bytes>>,
OptionalValue<absl::string_view>>);
auto fn = [](OptionalValue<absl::string_view> v) -> char {
return (v.present && !v.value.empty()) ? v.value[0] : 'X';
};
EXPECT_EQ(fn(OptionalValue<Text>(Text("Hello"))), 'H');
EXPECT_EQ(fn(std::nullopt), 'X');
}
TEST(OptionalValue, WrapFnToAcceptOptionalArgs) {
{
auto fn = [](int a, OptionalValue<int64_t> b, int64_t c) -> int {
return a + c + (b.present ? b.value : 10);
};
auto opt_fn = WrapFnToAcceptOptionalArgs(fn);
EXPECT_EQ(opt_fn(1, 2, 3), OptionalValue<int>(6));
EXPECT_EQ(opt_fn(std::nullopt, 2, 3), OptionalValue<int>());
EXPECT_EQ(opt_fn(1, std::nullopt, 3), OptionalValue<int>(14));
EXPECT_EQ(opt_fn(1, 2, std::nullopt), OptionalValue<int>());
}
{
auto fn = [](const Bytes& v) -> const Bytes& { return v; };
auto opt_fn = WrapFnToAcceptOptionalArgs(fn);
EXPECT_EQ(opt_fn(Bytes("123")), OptionalValue<Bytes>("123"));
}
{
auto fn = [](absl::string_view v) { return v; };
auto opt_fn = WrapFnToAcceptOptionalArgs(fn);
EXPECT_EQ(opt_fn(MakeOptionalValue(Bytes("123"))),
MakeOptionalValue(absl::string_view("123")));
}
{
auto fn = [](int a, OptionalValue<int64_t> b,
int64_t c) -> absl::StatusOr<int> {
if (c < 0) {
return absl::InvalidArgumentError("c < 0");
} else {
return a + c + (b.present ? b.value : 10);
}
};
auto opt_fn = WrapFnToAcceptOptionalArgs(fn);
EXPECT_THAT(opt_fn(1, 2, 3), IsOkAndHolds(OptionalValue<int>(6)));
EXPECT_THAT(opt_fn(1, 2, -3),
StatusIs(absl::StatusCode::kInvalidArgument, "c < 0"));
EXPECT_THAT(opt_fn(std::nullopt, 2, -3),
IsOkAndHolds(OptionalValue<int>()));
}
}
TEST(OptionalValueReprTest, bool) {
EXPECT_EQ(Repr(OptionalValue<bool>(true)), "optional_boolean{true}");
EXPECT_EQ(Repr(OptionalValue<bool>()), "optional_boolean{NA}");
}
TEST(OptionalValueReprTest, int32_t) {
EXPECT_EQ(Repr(OptionalValue<int32_t>(1)), "optional_int32{1}");
EXPECT_EQ(Repr(OptionalValue<int32_t>()), "optional_int32{NA}");
}
TEST(OptionalValueReprTest, int64_t) {
EXPECT_EQ(Repr(OptionalValue<int64_t>(1)), "optional_int64{1}");
EXPECT_EQ(Repr(OptionalValue<int64_t>()), "optional_int64{NA}");
}
TEST(OptionalValueReprTest, uint64_t) {
EXPECT_EQ(Repr(OptionalValue<uint64_t>(1)), "optional_uint64{1}");
EXPECT_EQ(Repr(OptionalValue<uint64_t>()), "optional_uint64{NA}");
}
TEST(OptionalValueReprTest, float) {
EXPECT_EQ(Repr(OptionalValue<float>(1.5)), "optional_float32{1.5}");
EXPECT_EQ(Repr(OptionalValue<float>()), "optional_float32{NA}");
}
TEST(OptionalValueReprTest, double) {
EXPECT_EQ(Repr(OptionalValue<double>(1.5)), "optional_float64{1.5}");
EXPECT_EQ(Repr(OptionalValue<double>()), "optional_float64{NA}");
}
TEST(OptionalValueReprTest, Bytes) {
EXPECT_EQ(Repr(OptionalValue<Bytes>("abc")), "optional_bytes{b'abc'}");
EXPECT_EQ(Repr(OptionalValue<Bytes>()), "optional_bytes{NA}");
}
TEST(OptionalValueReprTest, Text) {
EXPECT_EQ(Repr(OptionalValue<Text>("abc")), "optional_text{'abc'}");
EXPECT_EQ(Repr(OptionalValue<Text>()), "optional_text{NA}");
}
TEST(OptionalValueReprTest, StreamOp) {
{
std::ostringstream oss;
oss << OptionalValue<float>(1.5);
EXPECT_EQ(oss.str(), "optional_float32{1.5}");
}
{
std::ostringstream oss;
oss << OptionalValue<float>();
EXPECT_EQ(oss.str(), "optional_float32{NA}");
}
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/memory/optional_value.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/memory/optional_value_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
0f77a7a9-b07e-42da-9f2a-f6b620f364f6 | cpp | google/arolla | frame | arolla/memory/frame.cc | arolla/memory/frame_test.cc | #include "arolla/memory/frame.h"
#include <algorithm>
#include <cstddef>
#include <cstring>
#include <tuple>
#include <typeindex>
#include <typeinfo>
#include <utility>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "arolla/util/algorithms.h"
#include "arolla/util/memory.h"
namespace arolla {
std::type_index FrameLayout::FieldFactory::type_index() const { return type_; }
void FrameLayout::FieldFactory::Add(size_t offset) {
offsets_.push_back(offset);
}
void FrameLayout::FieldFactory::AddDerived(
const FieldFactory& derived_factory) {
DCHECK(type_index() == derived_factory.type_index());
for (size_t cur_offset : derived_factory.offsets_) {
offsets_.push_back(cur_offset);
}
}
FrameLayout::FieldFactory FrameLayout::FieldFactory::Derive(
size_t offset) const {
FieldFactory res = *this;
for (size_t& cur_offset : res.offsets_) {
cur_offset += offset;
}
return res;
}
void FrameLayout::FieldInitializers::AddOffsetToFactory(
size_t offset, FieldFactory empty_factory) {
auto it = type2factory.find(empty_factory.type_index());
if (it == type2factory.end()) {
bool inserted;
std::tie(it, inserted) =
type2factory.emplace(empty_factory.type_index(), factories.size());
factories.push_back(std::move(empty_factory));
}
DCHECK_LT(it->second, factories.size());
if (it->second < factories.size()) {
factories[it->second].Add(offset);
}
}
void FrameLayout::FieldInitializers::AddDerived(
size_t offset, const FieldInitializers& derived_initializers) {
for (const auto& [derived_tpe, derived_id] :
derived_initializers.type2factory) {
const auto& derived_factory = derived_initializers.factories[derived_id];
if (auto it = type2factory.find(derived_tpe); it != type2factory.end()) {
factories[it->second].AddDerived(derived_factory.Derive(offset));
} else {
type2factory.emplace(derived_tpe, factories.size());
factories.push_back(derived_factory.Derive(offset));
}
}
}
FrameLayout::Slot<void> FrameLayout::Builder::AddSubFrame(
const FrameLayout& subframe) {
alloc_size_ = RoundUp(alloc_size_, subframe.AllocAlignment().value);
size_t offset = alloc_size_;
alloc_size_ += subframe.AllocSize();
alloc_alignment_ =
std::max(alloc_alignment_, subframe.AllocAlignment().value);
initializers_.AddDerived(offset, subframe.initializers_);
#ifndef NDEBUG
for (const auto& [field_offset, field_type] : subframe.registered_fields_) {
registered_fields_.emplace(offset + field_offset, field_type);
}
#endif
return FrameLayout::Slot<void>(offset);
}
absl::Status FrameLayout::Builder::RegisterUnsafeSlot(
size_t byte_offset, size_t byte_size, const std::type_info& type) {
return RegisterSlot(byte_offset, byte_size, type);
}
absl::Status FrameLayout::Builder::RegisterSlot(size_t byte_offset,
size_t byte_size,
const std::type_info& type,
bool allow_duplicates) {
if (byte_offset == FrameLayout::Slot<float>::kUninitializedOffset) {
return absl::FailedPreconditionError(
"unable to register uninitialized slot");
}
if (byte_offset > alloc_size_ || byte_size > alloc_size_ - byte_offset) {
return absl::FailedPreconditionError(absl::StrCat(
"unable to register slot after the end of alloc, offset: ", byte_offset,
", size: ", byte_size, ", alloc size: ", alloc_size_));
}
#ifndef NDEBUG
if (!registered_fields_.emplace(byte_offset, std::type_index(type)).second &&
!allow_duplicates) {
return absl::FailedPreconditionError(absl::StrCat(
"slot is already registered ", byte_offset, " ", type.name()));
}
#endif
return absl::OkStatus();
}
} | #include "arolla/memory/frame.h"
#include <array>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <sstream>
#include <string>
#include <type_traits>
#include <utility>
#include <variant>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/util/demangle.h"
#include "arolla/util/is_bzero_constructible.h"
#include "arolla/util/memory.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::testing {
namespace {
using ::absl_testing::IsOk;
using ::absl_testing::StatusIs;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
struct SimpleStruct {
int a;
float b;
};
struct InitializedStruct {
int a = 1;
float b = 2.0;
};
TEST(FrameLayoutTest, SlotOutput) {
FrameLayout::Builder builder;
auto slot = builder.AddSlot<int>();
std::ostringstream ss;
ss << slot;
EXPECT_EQ(ss.str(), std::string("Slot<") + TypeName<int>() + ">(0)");
}
TEST(FrameLayoutTest, SimpleFields) {
FrameLayout::Builder builder;
auto slot1 = builder.AddSlot<int>();
auto slot2 = builder.AddSlot<float>();
auto slot3 = builder.AddSlot<double>();
auto layout = std::move(builder).Build();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
EXPECT_THAT(frame.Get(slot1), Eq(0));
EXPECT_THAT(frame.Get(slot2), Eq(0.0f));
EXPECT_THAT(frame.Get(slot3), Eq(0.0));
frame.Set(slot1, 1);
frame.Set(slot2, 2.0f);
frame.Set(slot3, M_PI);
EXPECT_THAT(frame.Get(slot1), Eq(1));
EXPECT_THAT(frame.Get(slot2), Eq(2.0f));
EXPECT_THAT(frame.Get(slot3), Eq(M_PI));
}
TEST(FrameLayoutTest, SimpleArrays) {
FrameLayout::Builder builder;
auto slot1 = builder.AddSlot<std::array<int, 4>>();
auto slot2 = builder.AddSlot<std::array<float, 4>>();
auto slot3 = builder.AddSlot<std::array<char, 4>>();
auto layout = std::move(builder).Build();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
EXPECT_THAT(frame.Get(slot1), ElementsAre(0, 0, 0, 0));
EXPECT_THAT(frame.Get(slot2), ElementsAre(0.0f, 0.0f, 0.0f, 0.0f));
EXPECT_THAT(frame.Get(slot3), ElementsAre(0, 0, 0, 0));
frame.Set(slot1, std::array<int, 4>{1, 2, 3, 4});
frame.Set(slot2, std::array<float, 4>{1.0f, 2.0f, 3.0f, 4.0f});
frame.Set(slot3, std::array<char, 4>{'a', 'b', 'c', 'd'});
EXPECT_THAT(frame.Get(slot1), ElementsAre(1, 2, 3, 4));
EXPECT_THAT(frame.Get(slot2), ElementsAre(1.0f, 2.0f, 3.0f, 4.0f));
EXPECT_THAT(frame.Get(slot3), ElementsAre('a', 'b', 'c', 'd'));
}
TEST(FrameLayoutTest, SimplePointers) {
FrameLayout::Builder builder;
auto slot1 = builder.AddSlot<int*>();
auto slot2 = builder.AddSlot<char*>();
auto layout = std::move(builder).Build();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
EXPECT_THAT(frame.Get(slot1), Eq(nullptr));
EXPECT_THAT(frame.Get(slot2), Eq(nullptr));
int int_values[] = {1, 2, 3, 4};
char text[] = "It was a dark and stormy night.";
frame.Set(slot1, int_values);
frame.Set(slot2, text);
EXPECT_THAT(frame.Get(slot1), Eq(int_values));
EXPECT_THAT(frame.Get(slot2), Eq(text));
}
TEST(FrameLayoutTest, SmartPointers) {
FrameLayout::Builder builder;
auto slot1 = builder.AddSlot<std::unique_ptr<int>>();
auto slot2 = builder.AddSlot<std::unique_ptr<std::string>>();
auto layout = std::move(builder).Build();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
EXPECT_THAT(frame.Get(slot1), Eq(nullptr));
EXPECT_THAT(frame.Get(slot2), Eq(nullptr));
frame.Set(slot1, std::make_unique<int>(12));
frame.Set(slot2,
std::make_unique<std::string>("It was a dark and stormy night."));
EXPECT_THAT(*frame.Get(slot1), Eq(12));
EXPECT_THAT(*frame.Get(slot2), Eq("It was a dark and stormy night."));
}
TEST(FrameLayoutTest, Vector) {
FrameLayout::Builder builder;
auto slot1 = builder.AddSlot<std::vector<int>>();
auto slot2 = builder.AddSlot<std::vector<std::string>>();
auto layout = std::move(builder).Build();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
EXPECT_THAT(frame.Get(slot1), IsEmpty());
EXPECT_THAT(frame.Get(slot2), IsEmpty());
auto* int_vector = frame.GetMutable(slot1);
int_vector->push_back(1);
int_vector->push_back(2);
int_vector->push_back(3);
auto* string_vector = frame.GetMutable(slot2);
string_vector->push_back("How");
string_vector->push_back("now");
string_vector->push_back("brown");
string_vector->push_back("cow?");
EXPECT_THAT(frame.Get(slot1), ElementsAre(1, 2, 3));
EXPECT_THAT(frame.Get(slot2), ElementsAre("How", "now", "brown", "cow?"));
}
TEST(FrameLayoutTest, Structs) {
FrameLayout::Builder builder;
auto slot1 = builder.AddSlot<SimpleStruct>();
auto slot2 = builder.AddSlot<InitializedStruct>();
auto layout = std::move(builder).Build();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
const SimpleStruct& s1 = frame.Get(slot1);
EXPECT_THAT(s1.a, Eq(0));
EXPECT_THAT(s1.b, Eq(0.0f));
const InitializedStruct& s2 = frame.Get(slot2);
EXPECT_THAT(s2.a, Eq(1));
EXPECT_THAT(s2.b, Eq(2.0f));
}
TEST(FrameLayoutTest, AFewDifferentTypesWellInitialized) {
FrameLayout::Builder builder;
auto slot1 = builder.AddSlot<std::vector<int>>();
auto slot2 = builder.AddSlot<std::vector<std::string>>();
auto slot3 = builder.AddSlot<std::vector<int>>();
auto slot4 = builder.AddSlot<SimpleStruct>();
auto slot5 = builder.AddSlot<InitializedStruct>();
auto slot6 = builder.AddSlot<std::vector<int>>();
auto slot7 = builder.AddSlot<std::vector<std::string>>();
auto slot8 = builder.AddSlot<std::vector<double>>();
auto slot9 = builder.AddSlot<InitializedStruct>();
auto layout = std::move(builder).Build();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
EXPECT_THAT(frame.Get(slot1), IsEmpty());
EXPECT_THAT(frame.Get(slot2), IsEmpty());
EXPECT_THAT(frame.Get(slot3), IsEmpty());
EXPECT_THAT(frame.Get(slot6), IsEmpty());
EXPECT_THAT(frame.Get(slot7), IsEmpty());
EXPECT_THAT(frame.Get(slot8), IsEmpty());
const SimpleStruct& simple = frame.Get(slot4);
EXPECT_THAT(simple.a, Eq(0));
EXPECT_THAT(simple.b, Eq(0.0f));
for (const InitializedStruct& init : {frame.Get(slot5), frame.Get(slot9)}) {
EXPECT_THAT(init.a, Eq(1));
EXPECT_THAT(init.b, Eq(2.0f));
}
}
TEST(FrameLayoutTest, HasField) {
FrameLayout::Builder builder;
auto slot1 = builder.AddSlot<int>();
auto slot2 = builder.AddSlot<std::vector<int>>();
auto slot3 = builder.AddSlot<SimpleStruct>();
auto slot4 = builder.AddSlot<std::array<SimpleStruct, 4>>();
auto slot5 = builder.AddSlot<InitializedStruct>();
auto slot6 = builder.AddSlot<std::array<InitializedStruct, 4>>();
auto layout = std::move(builder).Build();
EXPECT_TRUE(layout.HasField(slot1.byte_offset(), typeid(int)));
EXPECT_TRUE(layout.HasField(slot2.byte_offset(), typeid(std::vector<int>)));
EXPECT_TRUE(layout.HasField(slot3.byte_offset(), typeid(SimpleStruct)));
EXPECT_TRUE(layout.HasField(slot4.byte_offset(),
typeid(std::array<SimpleStruct, 4>)));
EXPECT_TRUE(layout.HasField(slot5.byte_offset(), typeid(InitializedStruct)));
EXPECT_TRUE(layout.HasField(slot6.byte_offset(),
typeid(std::array<InitializedStruct, 4>)));
}
TEST(FrameLayoutTest, RegisterUnsafeSlotWithEmptyField) {
FrameLayout::Builder builder;
ASSERT_TRUE(builder.RegisterUnsafeSlot(0, 0, typeid(std::monostate())).ok());
auto layout = std::move(builder).Build();
EXPECT_TRUE(layout.HasField(0, typeid(std::monostate())));
}
TEST(FrameLayoutTest, FieldDescriptorsRegisterUnsafe) {
FrameLayout::Builder builder;
auto slot = builder.AddSlot<int32_t>();
auto slot_1part =
FrameLayout::Slot<int16_t>::UnsafeSlotFromOffset(slot.byte_offset());
auto slot_2part =
FrameLayout::Slot<int16_t>::UnsafeSlotFromOffset(slot.byte_offset() + 2);
ASSERT_THAT(builder.RegisterUnsafeSlot(slot_1part), IsOk());
ASSERT_THAT(builder.RegisterUnsafeSlot(slot_2part), IsOk());
ASSERT_THAT(builder.RegisterUnsafeSlot(slot.byte_offset() + 2, sizeof(int8_t),
typeid(int8_t)),
IsOk());
#ifndef NDEBUG
EXPECT_THAT(builder.RegisterUnsafeSlot(slot_2part),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("slot is already registered")));
EXPECT_THAT(builder.RegisterUnsafeSlot(slot_2part, true),
IsOk());
#endif
auto layout = std::move(builder).Build();
EXPECT_TRUE(layout.HasField(slot.byte_offset(), typeid(int32_t)));
EXPECT_TRUE(layout.HasField(slot.byte_offset(), typeid(int16_t)));
EXPECT_TRUE(layout.HasField(slot.byte_offset() + 2, typeid(int16_t)));
EXPECT_TRUE(layout.HasField(slot.byte_offset() + 2, typeid(int8_t)));
#ifndef NDEBUG
EXPECT_FALSE(layout.HasField(slot.byte_offset() + 2, typeid(float)));
EXPECT_FALSE(layout.HasField(slot.byte_offset() + 1, typeid(int8_t)));
#endif
}
TEST(FrameLayoutTest, FieldDescriptorsRegisterUnsafeErrors) {
FrameLayout::Builder builder;
auto slot = builder.AddSlot<int32_t>();
auto slot_1part =
FrameLayout::Slot<int16_t>::UnsafeSlotFromOffset(slot.byte_offset());
auto slot_after_end =
FrameLayout::Slot<int16_t>::UnsafeSlotFromOffset(slot.byte_offset() + 4);
auto uninitialized_slot =
FrameLayout::Slot<int16_t>::UnsafeUninitializedSlot();
auto status = builder.RegisterUnsafeSlot(slot_1part);
ASSERT_OK(status);
#ifndef NDEBUG
status = builder.RegisterUnsafeSlot(slot);
ASSERT_FALSE(status.ok());
ASSERT_EQ(status.code(), absl::StatusCode::kFailedPrecondition);
EXPECT_THAT(status.message(), HasSubstr("slot is already registered"));
status = builder.RegisterUnsafeSlot(slot_1part);
ASSERT_FALSE(status.ok());
ASSERT_EQ(status.code(), absl::StatusCode::kFailedPrecondition);
EXPECT_THAT(status.message(), HasSubstr("slot is already registered"));
#endif
status = builder.RegisterUnsafeSlot(slot_after_end);
ASSERT_FALSE(status.ok());
ASSERT_EQ(status.code(), absl::StatusCode::kFailedPrecondition);
EXPECT_THAT(status.message(),
HasSubstr("unable to register slot after the end of alloc"));
status = builder.RegisterUnsafeSlot(100, sizeof(int), typeid(int));
ASSERT_FALSE(status.ok());
ASSERT_EQ(status.code(), absl::StatusCode::kFailedPrecondition);
EXPECT_THAT(status.message(),
HasSubstr("unable to register slot after the end of alloc, "
"offset: 100, size: 4, alloc size: 4"));
status = builder.RegisterUnsafeSlot(uninitialized_slot);
ASSERT_FALSE(status.ok());
ASSERT_EQ(status.code(), absl::StatusCode::kFailedPrecondition);
EXPECT_THAT(status.message(),
HasSubstr("unable to register uninitialized slot"));
}
struct SelfReference {
const SelfReference* self;
SelfReference() : self(this) {}
SelfReference(const SelfReference&) = delete;
SelfReference& operator=(const SelfReference&) = delete;
~SelfReference() {
volatile auto secure_ptr = &self;
*secure_ptr = nullptr;
}
};
TEST(FrameLayoutTest, AddSubFrame) {
FrameLayout subframe_layout;
std::vector<FrameLayout::Slot<SelfReference>> field_slots;
{
FrameLayout::Builder builder;
for (int i = 0; i < 2; ++i) {
field_slots.push_back(builder.AddSlot<SelfReference>());
}
subframe_layout = std::move(builder).Build();
}
FrameLayout frame_layout;
std::vector<FrameLayout::Slot<void>> subframe_slots;
{
FrameLayout::Builder builder;
builder.AddSlot<float>();
for (int j = 0; j < 3; ++j) {
subframe_slots.push_back(builder.AddSubFrame(subframe_layout));
builder.AddSlot<double>();
}
frame_layout = std::move(builder).Build();
}
for (const auto& subframe_slot : subframe_slots) {
for (const auto& field_slot : field_slots) {
EXPECT_TRUE(frame_layout.HasField(
subframe_slot.byte_offset() + field_slot.byte_offset(),
typeid(SelfReference)));
}
}
const auto alloc =
AlignedAlloc(frame_layout.AllocAlignment(), frame_layout.AllocSize());
frame_layout.InitializeAlignedAlloc(alloc.get());
FramePtr frame(alloc.get(), &frame_layout);
for (const auto& subframe_slot : subframe_slots) {
for (const auto& field_slot : field_slots) {
const void* subframe_ptr =
frame.GetRawPointer(subframe_slot.byte_offset());
ConstFramePtr subframe(subframe_ptr, &subframe_layout);
const SelfReference& field = subframe.Get(field_slot);
EXPECT_TRUE(field.self == &field);
}
}
frame_layout.DestroyAlloc(alloc.get());
ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(alloc.get(), frame_layout.AllocSize());
for (const auto& subframe_slot : subframe_slots) {
for (const auto& field_slot : field_slots) {
const void* subframe_ptr =
frame.GetRawPointer(subframe_slot.byte_offset());
ConstFramePtr subframe(subframe_ptr, &subframe_layout);
const SelfReference& field = subframe.Get(field_slot);
EXPECT_TRUE(field.self == nullptr);
}
}
}
TEST(FrameLayoutTest, AddSubFrameAllocAlignment) {
FrameLayout::Builder builder;
builder.AddSubFrame(MakeTypeLayout<std::aligned_storage_t<16, 16>>());
builder.AddSubFrame(MakeTypeLayout<std::aligned_storage_t<16, 16>>());
auto frame_layout = std::move(builder).Build();
EXPECT_EQ(frame_layout.AllocSize(), 32);
EXPECT_EQ(frame_layout.AllocAlignment().value, 16);
}
TEST(FrameLayoutTest, ArrayCompatibility) {
FrameLayout::Builder builder;
builder.AddSlot<std::aligned_storage_t<16, 16>>();
builder.AddSlot<std::aligned_storage_t<1, 1>>();
auto frame_layout = std::move(builder).Build();
EXPECT_EQ(frame_layout.AllocSize(), 32);
EXPECT_EQ(frame_layout.AllocAlignment().value, 16);
}
TEST(FrameLayoutTest, InitDestroyAllocN) {
static int instance_counter = 0;
struct InstanceCounted {
InstanceCounted() { ++instance_counter; }
~InstanceCounted() { --instance_counter; }
};
struct SelfReferenced {
SelfReferenced() : self(this) {}
SelfReferenced* self;
};
FrameLayout::Builder builder;
auto int_slot = builder.AddSlot<int>();
auto self_ref_slot = builder.AddSlot<SelfReferenced>();
builder.AddSlot<InstanceCounted>();
auto layout = std::move(builder).Build();
const int n = 10;
const auto alloc =
AlignedAlloc(layout.AllocAlignment(), layout.AllocSize() * n);
layout.InitializeAlignedAllocN(alloc.get(), n);
EXPECT_EQ(instance_counter, n);
for (int i = 0; i < n; ++i) {
ConstFramePtr ith_frame(
static_cast<const std::byte*>(alloc.get()) + i * layout.AllocSize(),
&layout);
EXPECT_EQ(ith_frame.Get(int_slot), 0);
EXPECT_EQ(ith_frame.Get(self_ref_slot).self, &ith_frame.Get(self_ref_slot));
}
layout.DestroyAllocN(alloc.get(), n);
EXPECT_EQ(instance_counter, 0);
}
struct IsBZeroConstructible {
static bool ctor_called;
static bool dtor_called;
IsBZeroConstructible() { ctor_called = true; }
~IsBZeroConstructible() { dtor_called = true; }
};
bool IsBZeroConstructible::ctor_called;
bool IsBZeroConstructible::dtor_called;
}
}
namespace arolla {
template <>
struct is_bzero_constructible<::arolla::testing::IsBZeroConstructible>
: std::true_type {};
}
namespace arolla::testing {
namespace {
TEST(FrameLayoutTest, IsBZeroConstructibleHandling) {
ASSERT_FALSE(IsBZeroConstructible::ctor_called);
ASSERT_FALSE(IsBZeroConstructible::dtor_called);
{
auto layout = MakeTypeLayout<IsBZeroConstructible>();
MemoryAllocation alloc(&layout);
}
EXPECT_FALSE(IsBZeroConstructible::ctor_called);
EXPECT_TRUE(IsBZeroConstructible::dtor_called);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/memory/frame.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/memory/frame_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
b556b66a-2f27-4a98-b434-a184b2ab59b1 | cpp | google/arolla | strings_buffer | arolla/memory/strings_buffer.cc | arolla/memory/strings_buffer_test.cc | #include "arolla/memory/strings_buffer.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <limits>
#include <tuple>
#include <utility>
#include "absl/log/check.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/memory/optional_value.h"
#include "arolla/memory/raw_buffer_factory.h"
#include "arolla/memory/simple_buffer.h"
#include "arolla/util/fingerprint.h"
namespace arolla {
StringsBuffer::Builder::Builder(int64_t max_size, RawBufferFactory* factory)
: factory_(factory) {
size_t initial_char_buffer_size = max_size * 16;
DCHECK_LT(initial_char_buffer_size, std::numeric_limits<offset_type>::max());
size_t offsets_size = max_size * sizeof(Offsets);
InitDataPointers(
factory->CreateRawBuffer(offsets_size + initial_char_buffer_size),
max_size, initial_char_buffer_size);
std::memset(offsets_.data(), 0, offsets_size);
}
StringsBuffer::ReshuffleBuilder::ReshuffleBuilder(
int64_t max_size, const StringsBuffer& buffer,
const OptionalValue<absl::string_view>& default_value,
RawBufferFactory* buf_factory)
: offsets_bldr_(max_size, buf_factory),
old_offsets_(buffer.offsets()),
characters_(buffer.characters()),
base_offset_(buffer.base_offset()) {
if (default_value.present && !default_value.value.empty()) {
int64_t def_value_size = default_value.value.size();
offsets_bldr_.SetNConst(
0, max_size, {characters_.size(), def_value_size + characters_.size()});
SimpleBuffer<char>::Builder chars_bldr(characters_.size() + def_value_size,
buf_factory);
char* data = chars_bldr.GetMutableSpan().data();
std::memcpy(data, characters_.begin(), characters_.size());
std::memcpy(data + characters_.size(), default_value.value.begin(),
def_value_size);
characters_ = std::move(chars_bldr).Build();
} else {
std::memset(offsets_bldr_.GetMutableSpan().begin(), 0,
max_size * sizeof(Offsets));
}
}
StringsBuffer StringsBuffer::Builder::Build(int64_t size) && {
DCHECK_LE(size, offsets_.size());
if (num_chars_ != characters_.size()) {
ResizeCharacters(num_chars_);
}
SimpleBuffer<Offsets> offsets(buf_, offsets_.subspan(0, size));
SimpleBuffer<char> characters(std::move(buf_),
characters_.subspan(0, num_chars_));
return StringsBuffer(std::move(offsets), std::move(characters));
}
void StringsBuffer::Builder::ResizeCharacters(size_t new_size) {
DCHECK_LT(new_size, std::numeric_limits<offset_type>::max());
size_t offsets_size = offsets_.size() * sizeof(Offsets);
InitDataPointers(factory_->ReallocRawBuffer(std::move(buf_), offsets_.begin(),
offsets_size + characters_.size(),
offsets_size + new_size),
offsets_.size(), new_size);
}
void StringsBuffer::Builder::InitDataPointers(
std::tuple<RawBufferPtr, void*>&& buf, int64_t offsets_count,
int64_t characters_size) {
buf_ = std::move(std::get<0>(buf));
void* data = std::get<1>(buf);
offsets_ =
absl::Span<Offsets>(reinterpret_cast<Offsets*>(data), offsets_count);
characters_ = absl::Span<char>(
reinterpret_cast<char*>(data) + offsets_count * sizeof(Offsets),
characters_size);
}
StringsBuffer::StringsBuffer(SimpleBuffer<StringsBuffer::Offsets> offsets,
SimpleBuffer<char> characters,
offset_type base_offset)
: offsets_(std::move(offsets)),
characters_(std::move(characters)),
base_offset_(base_offset) {
for (int64_t i = 0; i < offsets_.size(); ++i) {
DCHECK_LE(base_offset_, offsets_[i].start);
DCHECK_LE(offsets_[i].start, offsets_[i].end);
DCHECK_LE(offsets_[i].end, base_offset_ + characters_.size());
}
}
bool StringsBuffer::operator==(const StringsBuffer& other) const {
if (this == &other) {
return true;
}
if (size() != other.size()) {
return false;
}
return std::equal(begin(), end(), other.begin());
}
StringsBuffer StringsBuffer::Slice(int64_t offset, int64_t count) const& {
if (count == 0) {
return StringsBuffer{};
}
return StringsBuffer{offsets_.Slice(offset, count), characters_,
base_offset_};
}
StringsBuffer StringsBuffer::Slice(int64_t offset, int64_t count) && {
if (count == 0) {
return StringsBuffer{};
}
return StringsBuffer{std::move(offsets_).Slice(offset, count),
std::move(characters_), base_offset_};
}
StringsBuffer StringsBuffer::ShallowCopy() const {
return StringsBuffer(offsets_.ShallowCopy(), characters_.ShallowCopy(),
base_offset_);
}
StringsBuffer StringsBuffer::DeepCopy(RawBufferFactory* buffer_factory) const {
if (size() == 0) {
return StringsBuffer{};
}
offset_type min_offset = offsets_[0].start;
offset_type max_offset = offsets_[0].end;
for (int64_t i = 1; i < size(); ++i) {
min_offset = std::min(min_offset, offsets_[i].start);
max_offset = std::max(max_offset, offsets_[i].end);
}
auto characters_slice =
characters_.Slice(min_offset - base_offset_, max_offset - min_offset);
return StringsBuffer(offsets_.DeepCopy(buffer_factory),
characters_slice.DeepCopy(buffer_factory), min_offset);
}
void FingerprintHasherTraits<StringsBuffer>::operator()(
FingerprintHasher* hasher, const StringsBuffer& value) const {
hasher->Combine(value.size());
if (!value.empty()) {
auto offsets_span = value.offsets().span();
hasher->CombineRawBytes(offsets_span.data(),
offsets_span.size() * sizeof(offsets_span[0]));
hasher->CombineSpan(value.characters().span());
}
}
} | #include <array>
#include <cstddef>
#include <initializer_list>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/hash/hash_testing.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "arolla/memory/buffer.h"
#include "arolla/util/fingerprint.h"
namespace arolla {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::IsEmpty;
using ::testing::Not;
class StringsBufferTest : public ::testing::Test {
public:
Buffer<std::string> CreateTestBuffer(int num_rows) {
std::vector<std::string> values(num_rows);
for (int i = 0; i < num_rows; i++) {
values[i] = absl::StrFormat("str%d", i);
}
return Buffer<std::string>::Create(values.begin(), values.end());
}
template <typename T>
Buffer<std::string> CreateTestBuffer(std::initializer_list<T> values) {
return Buffer<std::string>::Create(values.begin(), values.end());
}
};
TEST_F(StringsBufferTest, Simple) {
Buffer<std::string> buffer = CreateTestBuffer(4);
EXPECT_TRUE(buffer.is_owner());
EXPECT_THAT(buffer, ElementsAre("str0", "str1", "str2", "str3"));
EXPECT_EQ(buffer[0], "str0");
EXPECT_EQ(buffer[3], "str3");
}
TEST_F(StringsBufferTest, Empty) {
Buffer<std::string> buffer1 = CreateTestBuffer(0);
EXPECT_THAT(buffer1, IsEmpty());
Buffer<std::string> buffer2 = buffer1.DeepCopy();
EXPECT_THAT(buffer2, IsEmpty());
Buffer<std::string> buffer3;
EXPECT_THAT(buffer3, IsEmpty());
}
TEST_F(StringsBufferTest, Move) {
size_t num_rows = 4;
Buffer<std::string> buffer = CreateTestBuffer(num_rows);
EXPECT_TRUE(buffer.is_owner());
Buffer<std::string> buffer2 = std::move(buffer);
EXPECT_TRUE(buffer2.is_owner());
EXPECT_FALSE(buffer.is_owner());
EXPECT_THAT(buffer2, ElementsAre("str0", "str1", "str2", "str3"));
Buffer<std::string> buffer3;
EXPECT_TRUE(buffer3.is_owner());
buffer3 = std::move(buffer2);
EXPECT_TRUE(buffer3.is_owner());
EXPECT_FALSE(buffer2.is_owner());
EXPECT_THAT(buffer3, ElementsAre("str0", "str1", "str2", "str3"));
}
TEST_F(StringsBufferTest, MemoryUsage) {
EXPECT_EQ(sizeof(Buffer<StringsBuffer::Offsets>), 4 * sizeof(void*));
EXPECT_EQ(sizeof(Buffer<char>), 4 * sizeof(void*));
EXPECT_EQ(sizeof(Buffer<std::string>),
sizeof(Buffer<StringsBuffer::Offsets>) + sizeof(Buffer<char>) + 8);
for (size_t sz = 0; sz < 10; sz += 1) {
const size_t chars = sz * 4;
const size_t offsets = sz * sizeof(StringsBuffer::Offsets);
Buffer<std::string> buffer = CreateTestBuffer(sz);
EXPECT_EQ(chars + offsets, buffer.memory_usage());
}
}
TEST_F(StringsBufferTest, MoveSlice) {
size_t num_rows = 10;
Buffer<std::string> buffer = CreateTestBuffer(num_rows);
EXPECT_TRUE(buffer.is_owner());
buffer = std::move(buffer).Slice(0, 5);
EXPECT_TRUE(buffer.is_owner());
EXPECT_THAT(buffer, ElementsAre("str0", "str1", "str2", "str3", "str4"));
Buffer<std::string> buffer2 = std::move(buffer).Slice(2, 3);
EXPECT_TRUE(buffer2.is_owner());
EXPECT_FALSE(buffer.is_owner());
EXPECT_THAT(buffer2, ElementsAre("str2", "str3", "str4"));
}
TEST_F(StringsBufferTest, ShallowCopy) {
size_t num_rows = 10;
Buffer<std::string> buffer = CreateTestBuffer(num_rows);
Buffer<std::string> buffer_copy1 = buffer.ShallowCopy();
EXPECT_FALSE(buffer_copy1.is_owner());
EXPECT_EQ(buffer.begin(), buffer_copy1.begin());
EXPECT_EQ(buffer.end(), buffer_copy1.end());
EXPECT_THAT(buffer, ElementsAreArray(buffer_copy1));
Buffer<std::string> buffer_copy2 = buffer.Slice(5, 5);
EXPECT_THAT(buffer, Not(ElementsAreArray(buffer_copy2)));
EXPECT_TRUE(buffer_copy2.is_owner());
EXPECT_EQ(buffer[5], buffer_copy2[0]);
}
TEST_F(StringsBufferTest, DeepCopy) {
size_t num_rows = 5;
Buffer<std::string> buffer = CreateTestBuffer(num_rows);
Buffer<std::string> buffer_copy = buffer.DeepCopy();
Buffer<std::string> buffer_slice_copy = buffer.Slice(1, 3).DeepCopy();
buffer = Buffer<std::string>();
EXPECT_TRUE(buffer_copy.is_owner());
EXPECT_THAT(buffer_copy, ElementsAre("str0", "str1", "str2", "str3", "str4"));
EXPECT_TRUE(buffer_slice_copy.is_owner());
EXPECT_THAT(buffer_slice_copy, ElementsAre("str1", "str2", "str3"));
buffer_copy = buffer.DeepCopy();
EXPECT_THAT(buffer_copy, IsEmpty());
}
TEST_F(StringsBufferTest, EmptySlice) {
size_t num_rows = 10;
Buffer<std::string> buffer = CreateTestBuffer(num_rows);
Buffer<std::string> copy = buffer.Slice(3, 0);
EXPECT_THAT(copy, IsEmpty());
buffer = std::move(buffer).Slice(3, 0);
EXPECT_THAT(buffer, IsEmpty());
copy = buffer.Slice(0, 0);
EXPECT_THAT(copy, IsEmpty());
}
TEST_F(StringsBufferTest, HugeString) {
StringsBuffer::Builder builder(2);
builder.Set(0, "small string");
std::string huge_string;
for (int i = 0; i < 1000; ++i) huge_string.append("huge string; ");
builder.Set(1, huge_string);
StringsBuffer buffer = std::move(builder).Build(2);
EXPECT_EQ(buffer.size(), 2);
EXPECT_EQ(buffer[0], "small string");
EXPECT_EQ(buffer[1], huge_string);
}
TEST_F(StringsBufferTest, SupportsAbslHash) {
StringsBuffer empty;
std::array<absl::string_view, 5> values = {"one", "two", "three", "four",
"five"};
StringsBuffer test1 = StringsBuffer::Create(values.begin(), values.end());
StringsBuffer test2 = StringsBuffer::Create(values.rbegin(), values.rend());
EXPECT_TRUE(
absl::VerifyTypeImplementsAbslHashCorrectly({empty, test1, test2}));
}
TEST_F(StringsBufferTest, Fingerprint) {
std::array<absl::string_view, 5> values = {"one", "two", "three", "four",
"five"};
StringsBuffer test1 = StringsBuffer::Create(values.begin(), values.end());
StringsBuffer test2 = StringsBuffer::Create(values.begin(), values.end());
StringsBuffer test3 = StringsBuffer::Create(values.rbegin(), values.rend());
Fingerprint f1 = FingerprintHasher("salt").Combine(test1).Finish();
Fingerprint f2 = FingerprintHasher("salt").Combine(test2).Finish();
Fingerprint f3 = FingerprintHasher("salt").Combine(test3).Finish();
EXPECT_EQ(f1, f2);
EXPECT_NE(f1, f3);
}
TEST(StringsBufferBuilder, Inserter) {
Buffer<std::string>::Builder builder(10);
auto inserter = builder.GetInserter(1);
for (int i = 0; i < 4; ++i) inserter.Add(absl::StrFormat("str%d", i));
builder.Set(0, "aba");
auto buffer = std::move(builder).Build(inserter);
EXPECT_THAT(buffer, ElementsAre("aba", "str0", "str1", "str2", "str3"));
}
TEST(StringsBufferBuilder, InserterCord) {
Buffer<std::string>::Builder builder(10);
auto inserter = builder.GetInserter(1);
for (int i = 0; i < 4; ++i) {
inserter.Add(absl::Cord(absl::StrFormat("str%d", i)));
}
builder.Set(0, "aba");
auto buffer = std::move(builder).Build(inserter);
EXPECT_THAT(buffer, ElementsAre("aba", "str0", "str1", "str2", "str3"));
}
TEST(StringsBufferBuilder, Generator) {
Buffer<std::string>::Builder builder(10);
builder.SetNConst(0, 10, "default");
int i = 0;
builder.SetN(2, 3, [&]() { return absl::StrFormat("str%d", ++i); });
auto buffer = std::move(builder).Build(6);
EXPECT_THAT(buffer, ElementsAre("default", "default", "str1", "str2", "str3",
"default"));
}
TEST(StringsBufferBuilder, RandomAccess) {
Buffer<std::string>::Builder builder(10);
builder.Set(4, "s1");
builder.Set(2, "s2");
builder.Set(1, "s3");
builder.Set(0, "s4");
builder.Set(3, "s5");
builder.Set(1, "s6");
auto buffer = std::move(builder).Build(5);
EXPECT_THAT(buffer, ElementsAre("s4", "s6", "s2", "s5", "s1"));
}
TEST(StringsBufferBuilder, RandomAccessCord) {
Buffer<std::string>::Builder builder(10);
builder.Set(4, absl::Cord("s1"));
builder.Set(2, absl::Cord("s2"));
builder.Set(1, absl::Cord("s3"));
builder.Set(0, absl::Cord("s4"));
builder.Set(3, absl::Cord("s5"));
builder.Set(1, absl::Cord("s6"));
auto buffer = std::move(builder).Build(5);
EXPECT_THAT(buffer, ElementsAre("s4", "s6", "s2", "s5", "s1"));
}
TEST(StringsBufferBuilder, ReshuffleBuilder) {
auto buf = CreateBuffer<std::string>({"5v", "4ab", "3", "2", "1"});
{
Buffer<std::string>::ReshuffleBuilder bldr(7, buf, std::nullopt);
bldr.CopyValue(3, 1);
bldr.CopyValue(1, 2);
bldr.CopyValue(2, 0);
bldr.CopyValueToRange(4, 7, 0);
auto res = std::move(bldr).Build();
EXPECT_THAT(res, ElementsAre("", "3", "5v", "4ab", "5v", "5v", "5v"));
EXPECT_EQ(res.characters().begin(), buf.characters().begin());
}
{
Buffer<std::string>::ReshuffleBuilder bldr(4, buf, {true, ""});
bldr.CopyValue(3, 1);
bldr.CopyValue(1, 2);
bldr.CopyValue(2, 0);
auto res = std::move(bldr).Build();
EXPECT_THAT(res, ElementsAre("", "3", "5v", "4ab"));
EXPECT_EQ(res.characters().begin(), buf.characters().begin());
}
{
Buffer<std::string>::ReshuffleBuilder bldr(4, buf, {true, "0abc"});
bldr.CopyValue(3, 1);
bldr.CopyValue(1, 2);
bldr.CopyValue(2, 0);
auto res = std::move(bldr).Build();
EXPECT_THAT(res, ElementsAre("0abc", "3", "5v", "4ab"));
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/memory/strings_buffer.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/memory/strings_buffer_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
0fbf4176-79d2-4fec-be4d-07982b247e44 | cpp | google/arolla | optools | arolla/optools/optools.cc | arolla/qexpr/optools_test.cc | #include "arolla/optools/optools.h"
#include <cstddef>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qtype/qtype.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/string.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::optools::optools_impl {
namespace {
class QExprWrappingOperator final : public expr::BackendExprOperatorTag,
public expr::BasicExprOperator {
public:
QExprWrappingOperator(absl::string_view name,
std::vector<OperatorPtr> qexpr_ops,
expr::ExprOperatorSignature signature,
absl::string_view description)
: expr::BasicExprOperator(
name, signature, description,
FingerprintHasher("arolla::optools_impl::QExprWrappingOperator")
.Combine(name, signature)
.Finish()),
qexpr_ops_(std::move(qexpr_ops)) {}
absl::StatusOr<QTypePtr> GetOutputQType(
absl::Span<const QTypePtr> input_qtypes) const override {
for (const OperatorPtr& op : qexpr_ops_) {
absl::Span<const QTypePtr> required_qtypes =
op->signature()->input_types();
bool match = true;
for (size_t i = 0; i < input_qtypes.size(); ++i) {
if (input_qtypes[i] != required_qtypes[i]) {
match = false;
break;
}
}
if (match) {
return op->signature()->output_type();
}
}
std::string msg = "no such overload; available signatures: ";
bool is_first = true;
for (const auto& op : qexpr_ops_) {
absl::StrAppend(&msg, op->signature(), NonFirstComma(is_first, ", "));
}
return absl::InvalidArgumentError(msg);
}
private:
std::vector<OperatorPtr> qexpr_ops_;
};
}
absl::Status RegisterFunctionAsOperatorImpl(
absl::string_view name, std::vector<OperatorPtr> qexpr_ops,
expr::ExprOperatorSignature signature, absl::string_view description) {
RETURN_IF_ERROR(expr::ValidateSignature(signature));
if (expr::HasVariadicParameter(signature)) {
return absl::InvalidArgumentError(
"incorrect operator signature: RegisterFunctionAsOperator doesn't "
"support variadic args");
}
if (qexpr_ops.empty()) {
return absl::InvalidArgumentError(
"at least one qexpr operator is required");
}
size_t arg_count = qexpr_ops[0]->signature()->input_types().size();
for (const OperatorPtr& op : qexpr_ops) {
if (op->signature()->input_types().size() != arg_count) {
return absl::InvalidArgumentError(
"arg count must be the same for all overloads");
}
RETURN_IF_ERROR(
::arolla::OperatorRegistry::GetInstance()->RegisterOperator(name, op));
}
if (signature.parameters.empty()) {
signature = expr::ExprOperatorSignature::MakeArgsN(arg_count);
} else if (signature.parameters.size() != arg_count) {
return absl::InvalidArgumentError(
"operator signature doesn't match the function");
}
return expr::RegisterOperator<QExprWrappingOperator>(
name, name, std::move(qexpr_ops), signature, description)
.status();
}
} | #include "arolla/qexpr/optools.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "arolla/qexpr/operator_factory.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
namespace test_namespace {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::testing::Eq;
AROLLA_REGISTER_QEXPR_OPERATOR("optools_test.to_string_from_lambda",
[](int32_t x) {
std::pair<int32_t, int32_t> p(x, x);
return absl::StrCat(p.first);
});
std::string ToString(int32_t x) { return absl::StrCat(x); }
AROLLA_REGISTER_QEXPR_OPERATOR("optools_test.to_string_from_function",
ToString);
template <typename T>
struct ToStringOp {
std::string operator()(T x) const { return absl::StrCat(x); }
};
AROLLA_REGISTER_QEXPR_OPERATOR("optools_test.to_string_from_functor",
ToStringOp<int32_t>());
template <typename T, typename U>
class ToStringOperatorFamily : public arolla::OperatorFamily {
public:
absl::StatusOr<arolla::OperatorPtr> DoGetOperator(
absl::Span<const arolla::QTypePtr> input_types,
arolla::QTypePtr output_type) const final {
if (input_types.size() != 1 || input_types[0] != arolla::GetQType<T>()) {
return absl::InvalidArgumentError(
"the only supported input type is int32");
}
if (output_type != arolla::GetQType<U>()) {
return absl::InvalidArgumentError(
"the only supported output type is string");
}
return arolla::QExprOperatorFromFunction(ToString);
}
};
AROLLA_REGISTER_QEXPR_OPERATOR_FAMILY(
"optools_test.to_string_from_family",
std::make_unique<ToStringOperatorFamily<int32_t, std::string>>());
TEST(OptoolsTest, FromFunction) {
EXPECT_THAT(arolla::InvokeOperator<std::string>(
"optools_test.to_string_from_function", 57),
IsOkAndHolds(Eq("57")));
}
TEST(OptoolsTest, FromLambda) {
EXPECT_THAT(arolla::InvokeOperator<std::string>(
"optools_test.to_string_from_lambda", 57),
IsOkAndHolds(Eq("57")));
}
TEST(OptoolsTest, FromFunctor) {
EXPECT_THAT(arolla::InvokeOperator<std::string>(
"optools_test.to_string_from_functor", 57),
IsOkAndHolds(Eq("57")));
}
TEST(OptoolsTest, FromFamily) {
EXPECT_THAT(arolla::InvokeOperator<std::string>(
"optools_test.to_string_from_family", 57),
IsOkAndHolds(Eq("57")));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/optools/optools.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/optools_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
802b3b11-0f4c-4890-b4d4-7fec4156a47f | cpp | google/arolla | qtype | arolla/jagged_shape/dense_array/qtype/qtype.cc | arolla/jagged_shape/dense_array/qtype/qtype_test.cc | #include "arolla/jagged_shape/dense_array/qtype/qtype.h"
#include "absl/base/no_destructor.h"
#include "arolla/dense_array/edge.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/jagged_shape/dense_array/jagged_shape.h"
#include "arolla/jagged_shape/qtype/qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/init_arolla.h"
#include "arolla/util/meta.h"
namespace arolla {
namespace {
class JaggedDenseArrayShapeQType final : public JaggedShapeQType {
public:
static const JaggedDenseArrayShapeQType* GetInstance() {
static absl::NoDestructor<JaggedDenseArrayShapeQType> result;
return result.get();
}
JaggedDenseArrayShapeQType()
: JaggedShapeQType(meta::type<JaggedDenseArrayShape>(),
"JAGGED_DENSE_ARRAY_SHAPE") {}
QTypePtr edge_qtype() const override { return GetQType<DenseArrayEdge>(); };
};
}
QTypePtr QTypeTraits<JaggedDenseArrayShape>::type() {
return JaggedDenseArrayShapeQType::GetInstance();
}
AROLLA_INITIALIZER(
.reverse_deps = {arolla::initializer_dep::kQTypes}, .init_fn = [] {
return SetEdgeQTypeToJaggedShapeQType(
GetQType<DenseArrayEdge>(), GetQType<JaggedDenseArrayShape>());
})
} | #include "arolla/jagged_shape/dense_array/qtype/qtype.h"
#include <cstdint>
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/edge.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/jagged_shape/array/jagged_shape.h"
#include "arolla/jagged_shape/array/qtype/qtype.h"
#include "arolla/jagged_shape/dense_array/jagged_shape.h"
#include "arolla/jagged_shape/qtype/qtype.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/testing/repr_token_eq.h"
namespace arolla {
namespace {
using ::absl_testing::StatusIs;
using ::arolla::testing::ReprTokenEq;
TEST(QTypeTest, TypedValueRepr) {
ASSERT_OK_AND_ASSIGN(auto edge, DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 2})));
ASSERT_OK_AND_ASSIGN(auto shape, JaggedDenseArrayShape::FromEdges({edge}));
auto tv = TypedValue::FromValue(shape);
EXPECT_THAT(tv.GenReprToken(), ReprTokenEq("JaggedShape(2)"));
}
TEST(QTypeTest, JaggedDenseArrayShapeQType) {
QTypePtr type = GetQType<JaggedDenseArrayShape>();
EXPECT_NE(type, nullptr);
EXPECT_EQ(type->name(), "JAGGED_DENSE_ARRAY_SHAPE");
EXPECT_EQ(type->type_info(), typeid(JaggedDenseArrayShape));
EXPECT_EQ(type->value_qtype(), nullptr);
EXPECT_TRUE(IsJaggedShapeQType(type));
EXPECT_EQ(type, GetQType<JaggedDenseArrayShape>());
EXPECT_NE(type, GetQType<JaggedArrayShape>());
}
TEST(QTypeTest, JaggedDenseArrayShapeFingerprint) {
ASSERT_OK_AND_ASSIGN(auto edge1, DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 2})));
ASSERT_OK_AND_ASSIGN(auto edge2, DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 1, 3})));
ASSERT_OK_AND_ASSIGN(auto shape1,
JaggedDenseArrayShape::FromEdges({edge1, edge2}));
ASSERT_OK_AND_ASSIGN(auto shape2,
JaggedDenseArrayShape::FromEdges({edge1, edge2}));
auto tv1 = TypedValue::FromValue(shape1);
auto tv2 = TypedValue::FromValue(shape2);
EXPECT_EQ(tv1.GetFingerprint(), tv2.GetFingerprint());
ASSERT_OK_AND_ASSIGN(auto edge3, DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 1, 4})));
ASSERT_OK_AND_ASSIGN(auto shape3,
JaggedDenseArrayShape::FromEdges({edge1, edge3}));
auto tv3 = TypedValue::FromValue(shape3);
EXPECT_NE(tv1.GetFingerprint(), tv3.GetFingerprint());
}
TEST(QTypeTest, CopyTo) {
ASSERT_OK_AND_ASSIGN(auto edge1, DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 2})));
ASSERT_OK_AND_ASSIGN(auto edge2, DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 1, 3})));
ASSERT_OK_AND_ASSIGN(auto shape,
JaggedDenseArrayShape::FromEdges({edge1, edge2}));
auto tv = TypedValue::FromValue(shape);
auto tv_copy = TypedValue(tv.AsRef());
EXPECT_EQ(tv.GetFingerprint(), tv_copy.GetFingerprint());
}
TEST(QTypeTest, JaggedShapeQTypeFromEdgeQType) {
{
ASSERT_OK_AND_ASSIGN(auto shape_qtype, GetJaggedShapeQTypeFromEdgeQType(
GetQType<DenseArrayEdge>()));
EXPECT_EQ(shape_qtype, GetQType<JaggedDenseArrayShape>());
}
{
EXPECT_THAT(
GetJaggedShapeQTypeFromEdgeQType(GetQType<DenseArrayGroupScalarEdge>()),
StatusIs(absl::StatusCode::kInvalidArgument,
"DENSE_ARRAY_TO_SCALAR_EDGE key is not registered"));
}
{
EXPECT_THAT(
SetEdgeQTypeToJaggedShapeQType(GetQType<DenseArrayEdge>(),
GetQType<DenseArrayGroupScalarEdge>()),
StatusIs(absl::StatusCode::kInvalidArgument,
"DENSE_ARRAY_EDGE key is already registered"));
}
}
TEST(QTypeTest, EdgeQType) {
auto type = GetQType<JaggedDenseArrayShape>();
auto shape_qtype = dynamic_cast<const JaggedShapeQType*>(type);
EXPECT_EQ(shape_qtype->edge_qtype(), GetQType<DenseArrayEdge>());
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/jagged_shape/dense_array/qtype/qtype.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/jagged_shape/dense_array/qtype/qtype_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
d5a011d6-a644-480a-9873-2b3562db0ce6 | cpp | google/arolla | slice_qtype | arolla/qtype/slice_qtype.cc | arolla/qtype/slice_qtype_test.cc | #include "arolla/qtype/slice_qtype.h"
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include "absl/base/no_destructor.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "arolla/qtype/derived_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/util/fast_dynamic_downcast_final.h"
#include "arolla/util/repr.h"
namespace arolla {
namespace {
std::string SliceQTypeName(QTypePtr start, QTypePtr stop, QTypePtr step) {
return absl::StrCat("slice<", JoinTypeNames({start, stop, step}), ">");
}
class SliceQType final : public BasicDerivedQType {
public:
SliceQType(QTypePtr start, QTypePtr stop, QTypePtr step)
: BasicDerivedQType(ConstructorArgs{
.name = SliceQTypeName(start, stop, step),
.base_qtype = MakeTupleQType({start, stop, step}),
.qtype_specialization_key =
std::string(GetSliceQTypeSpecializationKey()),
}) {}
ReprToken UnsafeReprToken(const void* source) const override {
return ReprToken{
absl::StrCat("slice", GetBaseQType()->UnsafeReprToken(source).str)};
}
};
class SliceQTypeRegistry {
public:
static SliceQTypeRegistry* instance() {
static absl::NoDestructor<SliceQTypeRegistry> result;
return result.get();
}
QTypePtr GetQType(QTypePtr start, QTypePtr stop, QTypePtr step)
ABSL_LOCKS_EXCLUDED(lock_) {
{
absl::ReaderMutexLock guard(&lock_);
if (const auto it = registry_.find({start, stop, step});
it != registry_.end()) {
return it->second.get();
}
}
auto slice_qtype = std::make_unique<SliceQType>(start, stop, step);
absl::MutexLock guard(&lock_);
return registry_.try_emplace({start, stop, step}, std::move(slice_qtype))
.first->second.get();
}
private:
using RegistryKey = std::tuple<QTypePtr, QTypePtr, QTypePtr>;
absl::Mutex lock_;
absl::flat_hash_map<RegistryKey, std::unique_ptr<SliceQType>> registry_
ABSL_GUARDED_BY(lock_);
};
}
bool IsSliceQType(const QType* qtype) {
return fast_dynamic_downcast_final<const SliceQType*>(qtype) != nullptr;
}
QTypePtr MakeSliceQType(QTypePtr start, QTypePtr stop, QTypePtr step) {
return SliceQTypeRegistry::instance()->GetQType(start, stop, step);
}
absl::string_view GetSliceQTypeSpecializationKey() {
return "::arolla::SliceQType";
}
} | #include "arolla/qtype/slice_qtype.h"
#include <cstdint>
#include "gtest/gtest.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/derived_qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/util/bytes.h"
namespace arolla::testing {
namespace {
TEST(SliceQType, MakeSliceQType) {
auto start = GetQType<int32_t>();
auto stop = GetQType<double>();
auto step = GetQType<Bytes>();
auto qtype = MakeSliceQType(start, stop, step);
EXPECT_EQ(qtype->name(), "slice<INT32,FLOAT64,BYTES>");
auto derived_qtype_interface =
dynamic_cast<const DerivedQTypeInterface*>(qtype);
ASSERT_NE(derived_qtype_interface, nullptr);
auto tuple_qtype = MakeTupleQType({start, stop, step});
EXPECT_EQ(derived_qtype_interface->GetBaseQType(), tuple_qtype);
{
auto qtype2 = MakeSliceQType(start, stop, step);
EXPECT_EQ(qtype, qtype2);
}
{
auto qtype2 = MakeSliceQType(start, stop, start);
EXPECT_EQ(qtype2->name(), "slice<INT32,FLOAT64,INT32>");
EXPECT_NE(qtype, qtype2);
}
}
TEST(SliceQType, IsSliceQType) {
auto start = GetQType<int32_t>();
auto stop = GetQType<double>();
auto step = GetQType<Bytes>();
auto tuple_qtype = MakeTupleQType({start, stop, step});
EXPECT_FALSE(IsSliceQType(tuple_qtype));
auto qtype = MakeSliceQType(start, stop, step);
EXPECT_TRUE(IsSliceQType(qtype));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/slice_qtype.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/slice_qtype_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
395cc3d9-d03c-4ef5-b9c9-47134def4148 | cpp | google/arolla | optional_qtype | arolla/qtype/optional_qtype.cc | arolla/qtype/optional_qtype_test.cc | #include "arolla/qtype/optional_qtype.h"
#include "absl/base/no_destructor.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "arolla/memory/frame.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/unit.h"
namespace arolla {
namespace {
class OptionalQTypeMaps {
public:
void Register(QTypePtr qtype, QTypePtr optional_qtype) {
absl::MutexLock l(&lock_);
to_optional_[qtype] = optional_qtype;
to_optional_[optional_qtype] = optional_qtype;
}
absl::StatusOr<QTypePtr> ToOptionalQType(QTypePtr qtype) {
absl::ReaderMutexLock l(&lock_);
auto iter = to_optional_.find(qtype);
if (iter == to_optional_.end()) {
return absl::InvalidArgumentError(
absl::StrCat("no optional qtype for ", qtype->name()));
}
return iter->second;
}
bool IsOptionalQType(QTypePtr qtype) {
absl::ReaderMutexLock l(&lock_);
auto iter = to_optional_.find(qtype);
return iter != to_optional_.end() && iter->second == qtype;
}
private:
absl::Mutex lock_;
absl::flat_hash_map<QTypePtr, QTypePtr> to_optional_ ABSL_GUARDED_BY(lock_);
};
OptionalQTypeMaps* GetOptionalQTypeMaps() {
static absl::NoDestructor<OptionalQTypeMaps> instance;
return instance.get();
}
}
void RegisterOptionalQType(QTypePtr optional_qtype) {
const auto* value_qtype = optional_qtype->value_qtype();
DCHECK(value_qtype != nullptr);
const auto& sub_slots = optional_qtype->type_fields();
DCHECK_GT(sub_slots.size(), 0);
DCHECK(sub_slots[0].GetType()->type_info() == typeid(bool));
DCHECK_EQ(sub_slots[0].byte_offset(), 0);
if (sub_slots.size() == 1) {
DCHECK_EQ(value_qtype, GetQType<Unit>());
} else if (sub_slots.size() == 2) {
DCHECK_EQ(sub_slots[1].GetType(), value_qtype);
} else {
LOG(FATAL) << "Unexpected number of subslots in optional: "
<< sub_slots.size();
}
GetOptionalQTypeMaps()->Register(value_qtype, optional_qtype);
}
absl::StatusOr<QTypePtr> ToOptionalQType(QTypePtr qtype) {
return GetOptionalQTypeMaps()->ToOptionalQType(qtype);
}
const QType* DecayOptionalQType(const QType* qtype) {
return IsOptionalQType(qtype) ? qtype->value_qtype() : qtype;
}
bool IsOptionalQType(const QType* qtype) {
return (qtype != nullptr && qtype->value_qtype() != nullptr &&
!qtype->type_fields().empty() &&
GetOptionalQTypeMaps()->IsOptionalQType(qtype));
}
absl::StatusOr<FrameLayout::Slot<bool>> GetPresenceSubslotFromOptional(
TypedSlot slot) {
if (!IsOptionalQType(slot.GetType())) {
return absl::InvalidArgumentError(
absl::StrCat("'", slot.GetType()->name(), "' is not optional qtype."));
}
if (slot.SubSlotCount() == 0) {
return absl::InternalError("optional value has no subslots.");
}
return slot.SubSlot(0).ToSlot<bool>();
}
absl::StatusOr<TypedSlot> GetValueSubslotFromOptional(TypedSlot slot) {
if (!IsOptionalQType(slot.GetType())) {
return absl::InvalidArgumentError(
absl::StrCat("'", slot.GetType()->name(), "' is not optional qtype."));
}
if (slot.SubSlotCount() != 2) {
return absl::InvalidArgumentError(absl::StrCat(
"'", slot.GetType()->name(), "' does not have a value subslot."));
}
return slot.SubSlot(1);
}
absl::StatusOr<TypedValue> CreateMissingValue(QTypePtr optional_qtype) {
if (!IsOptionalQType(optional_qtype)) {
return absl::InvalidArgumentError(absl::StrFormat(
"cannot create a missing value for non-optional qtype `%s`",
optional_qtype->name()));
}
return TypedValue::UnsafeFromTypeDefaultConstructed(optional_qtype);
}
} | #include "arolla/qtype/optional_qtype.h"
#include <cstdint>
#include <optional>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/testing/qtype.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/qtype/typed_value.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::arolla::testing::TypedValueWith;
using ::testing::FloatEq;
using ::testing::IsFalse;
using ::testing::IsTrue;
template <typename T>
using Slot = FrameLayout::Slot<T>;
TEST(OptionalQType, SplitOptionalUnitSlot) {
FrameLayout::Builder layout_builder;
auto slot = layout_builder.AddSlot<OptionalUnit>();
auto layout = std::move(layout_builder).Build();
Slot<bool> presence_slot1 = GetPresenceSubslotFromOptional(slot);
auto typed_slot = TypedSlot::FromSlot(slot);
ASSERT_OK_AND_ASSIGN(Slot<bool> presence_slot2,
GetPresenceSubslotFromOptional(typed_slot));
Slot<bool> presence_slot3 = UnsafePresenceSubslotFromOptional(typed_slot);
EXPECT_THAT(GetValueSubslotFromOptional(typed_slot),
StatusIs(absl::StatusCode::kInvalidArgument));
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
frame.Set(slot, kPresent);
EXPECT_TRUE(frame.Get(presence_slot1));
EXPECT_TRUE(frame.Get(presence_slot2));
EXPECT_TRUE(frame.Get(presence_slot3));
frame.Set(slot, kMissing);
EXPECT_FALSE(frame.Get(presence_slot1));
EXPECT_FALSE(frame.Get(presence_slot2));
EXPECT_FALSE(frame.Get(presence_slot3));
}
TEST(OptionalQType, SplitOptionalFloatSlot) {
FrameLayout::Builder layout_builder;
auto slot = layout_builder.AddSlot<OptionalValue<float>>();
auto layout = std::move(layout_builder).Build();
Slot<bool> presence_slot1 = GetPresenceSubslotFromOptional(slot);
Slot<float> value_slot1 = GetValueSubslotFromOptional(slot);
auto typed_slot = TypedSlot::FromSlot(slot);
ASSERT_OK_AND_ASSIGN(Slot<bool> presence_slot2,
GetPresenceSubslotFromOptional(typed_slot));
ASSERT_OK_AND_ASSIGN(TypedSlot typed_value_slot2,
GetValueSubslotFromOptional(typed_slot));
ASSERT_OK_AND_ASSIGN(Slot<float> value_slot2,
typed_value_slot2.ToSlot<float>());
Slot<bool> presence_slot3 = UnsafePresenceSubslotFromOptional(typed_slot);
Slot<float> value_slot3 =
UnsafeValueSubslotFromOptional(typed_slot).UnsafeToSlot<float>();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
frame.Set(slot, 17.5f);
EXPECT_TRUE(frame.Get(presence_slot1));
EXPECT_TRUE(frame.Get(presence_slot2));
EXPECT_TRUE(frame.Get(presence_slot3));
EXPECT_THAT(frame.Get(value_slot1), FloatEq(17.5f));
EXPECT_THAT(frame.Get(value_slot2), FloatEq(17.5f));
EXPECT_THAT(frame.Get(value_slot3), FloatEq(17.5f));
frame.Set(slot, std::nullopt);
EXPECT_FALSE(frame.Get(presence_slot1));
EXPECT_FALSE(frame.Get(presence_slot2));
EXPECT_FALSE(frame.Get(presence_slot3));
}
TEST(OptionalQType, CreateMissingValue) {
EXPECT_THAT(
CreateMissingValue(GetOptionalQType<int64_t>()),
IsOkAndHolds(TypedValueWith<OptionalValue<int64_t>>(std::nullopt)));
EXPECT_THAT(
CreateMissingValue(GetQType<int64_t>()),
StatusIs(absl::StatusCode::kInvalidArgument,
"cannot create a missing value for non-optional qtype `INT64`"));
}
TEST(OptionalQType, UnsafeIsPresent) {
EXPECT_THAT(UnsafeIsPresent(TypedRef::FromValue(kPresent)), IsTrue());
EXPECT_THAT(UnsafeIsPresent(TypedRef::FromValue(kMissing)), IsFalse());
OptionalValue<float> present_float = 1;
EXPECT_THAT(UnsafeIsPresent(TypedRef::FromValue(present_float)), IsTrue());
OptionalValue<float> missing_float = std::nullopt;
EXPECT_THAT(UnsafeIsPresent(TypedRef::FromValue(missing_float)), IsFalse());
ASSERT_OK_AND_ASSIGN(TypedValue typed_missing_float,
CreateMissingValue(GetOptionalQType<float>()));
EXPECT_THAT(UnsafeIsPresent(typed_missing_float.AsRef()), IsFalse());
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/optional_qtype.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/optional_qtype_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
7337069a-bced-4adb-9f27-e26dd49f1d5e | cpp | google/arolla | typed_value | arolla/qtype/typed_value.cc | arolla/qtype/typed_value_test.cc | #include "arolla/qtype/typed_value.h"
#include <cstddef>
#include <memory>
#include <new>
#include <utility>
#include "absl/base/call_once.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/memory/frame.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/memory.h"
namespace arolla {
namespace {
template <typename TypedRef >
absl::Status CheckPreconditionsForInitCompound(
QTypePtr compound_qtype, absl::Span<const TypedRef> field_refs) {
const auto& field_slots = compound_qtype->type_fields();
if (field_slots.size() != field_refs.size()) {
return absl::InvalidArgumentError(absl::StrFormat(
"expected %d values, got %d; compound_qtype=%s", field_slots.size(),
field_refs.size(), compound_qtype->name()));
}
for (size_t i = 0; i < field_refs.size(); ++i) {
if (field_refs[i].GetType() != field_slots[i].GetType()) {
return absl::InvalidArgumentError(absl::StrFormat(
"expected fields[%d]: %s, got %s; compound_qtype=%s", i,
field_slots[i].GetType()->name(), field_refs[i].GetType()->name(),
compound_qtype->name()));
}
}
return absl::OkStatus();
}
template <typename TypedRef >
void InitCompound(QTypePtr compound_qtype,
absl::Span<const TypedRef> field_refs, void* destination) {
compound_qtype->type_layout().InitializeAlignedAlloc(destination);
const auto& field_slots = compound_qtype->type_fields();
FramePtr frame(destination, &compound_qtype->type_layout());
for (size_t i = 0; i < field_refs.size(); ++i) {
const auto& field_ref = field_refs[i];
field_ref.GetType()->UnsafeCopy(
field_ref.GetRawPointer(),
frame.GetRawPointer(field_slots[i].byte_offset()));
}
}
}
TypedValue::Impl* TypedValue::AllocRawImpl(QTypePtr qtype) {
const auto& type_layout = qtype->type_layout();
const size_t alignment = type_layout.AllocAlignment().value;
size_t extra_space = type_layout.AllocSize() + alignment;
void* buffer = ::operator new(sizeof(Impl) + extra_space);
Impl* impl = new (buffer) Impl;
impl->qtype = qtype;
impl->data = static_cast<char*>(buffer) + sizeof(Impl);
void* tmp = std::align(alignment, extra_space, impl->data, extra_space);
DCHECK_NE(tmp, nullptr);
return impl;
}
TypedValue::Impl* TypedValue::AllocImpl(QTypePtr qtype, const void* value) {
auto* impl = AllocRawImpl(qtype);
qtype->type_layout().InitializeAlignedAlloc(impl->data);
qtype->UnsafeCopy(value, impl->data);
return impl;
}
TypedValue TypedValue::UnsafeFromTypeDefaultConstructed(QTypePtr qtype) {
auto* impl = AllocRawImpl(qtype);
qtype->type_layout().InitializeAlignedAlloc(impl->data);
return TypedValue(impl);
}
absl::StatusOr<TypedValue> TypedValue::FromFields(
QTypePtr compound_qtype, absl::Span<const TypedRef> fields) {
if (auto status = CheckPreconditionsForInitCompound(compound_qtype, fields);
!status.ok()) {
return status;
}
auto* impl = AllocRawImpl(compound_qtype);
InitCompound(compound_qtype, fields, impl->data);
return TypedValue(impl);
}
absl::StatusOr<TypedValue> TypedValue::FromFields(
QTypePtr compound_qtype, absl::Span<const TypedValue> fields) {
if (auto status = CheckPreconditionsForInitCompound(compound_qtype, fields);
!status.ok()) {
return status;
}
auto* impl = AllocRawImpl(compound_qtype);
InitCompound(compound_qtype, fields, impl->data);
return TypedValue(impl);
}
const Fingerprint& TypedValue::GetFingerprint() const {
absl::call_once(impl_->fingerprint_once, [impl = impl_] {
FingerprintHasher hasher("TypedValue");
hasher.Combine(impl->qtype);
impl->qtype->UnsafeCombineToFingerprintHasher(impl->data, &hasher);
impl->fingerprint = std::move(hasher).Finish();
});
return impl_->fingerprint;
}
} | #include "arolla/qtype/typed_value.h"
#include <cstdint>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/bytes.h"
#include "arolla/util/fingerprint.h"
struct WithoutQTypeTraits {};
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::Eq;
using ::testing::HasSubstr;
TEST(TypedValueTest, ReprBasic) {
EXPECT_EQ(TypedValue::FromValue<bool>(true).Repr(), "true");
EXPECT_EQ(TypedValue::FromValue<int32_t>(5).Repr(), "5");
EXPECT_EQ(TypedValue::FromValue<int64_t>(5).Repr(), "int64{5}");
EXPECT_EQ(TypedValue::FromValue<uint64_t>(5).Repr(), "uint64{5}");
EXPECT_EQ(TypedValue::FromValue<float>(5.0f).Repr(), "5.");
EXPECT_EQ(TypedValue::FromValue<double>(5.0).Repr(), "float64{5}");
}
TEST(TypedValueTest, FromValue) {
auto tval = TypedValue::FromValue<int64_t>(1);
EXPECT_THAT(tval.GetType(), Eq(GetQType<int64_t>()));
EXPECT_THAT(tval.As<int64_t>(), IsOkAndHolds(int64_t{1}));
EXPECT_THAT(tval.As<float>().status(),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST(TypedValueTest, As) {
auto int_value = TypedValue::FromValue<double>(1.0);
EXPECT_THAT(int_value.As<double>(), IsOkAndHolds(1.0));
EXPECT_THAT(int_value.As<float>().status(),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("type mismatch: expected C++ type `double` "
"(FLOAT64), got `float`")));
EXPECT_THAT(int_value.As<WithoutQTypeTraits>().status(),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("type mismatch: expected C++ type `double` "
"(FLOAT64), got `WithoutQTypeTraits`")));
}
TEST(TypedValueTest, FromValueWithQType) {
auto f64 = GetQType<double>();
absl::StatusOr<TypedValue> tmp = TypedValue::FromValueWithQType(1.0, f64);
auto tval = std::move(tmp).value();
EXPECT_THAT(tval.GetType(), Eq(f64));
EXPECT_THAT(tval.As<double>(), IsOkAndHolds(1.0));
EXPECT_THAT(tval.As<float>(),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("type mismatch: expected C++ type `double` "
"(FLOAT64), got `float`")));
EXPECT_THAT(TypedValue::FromValueWithQType(1.0f, f64).status(),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST(TypedValueTest, UnsafeFromTypeDefaultConstructed) {
{
auto f64 = GetQType<double>();
auto tval = TypedValue::UnsafeFromTypeDefaultConstructed(f64);
EXPECT_THAT(tval.GetType(), Eq(GetQType<double>()));
EXPECT_THAT(tval.As<double>(), IsOkAndHolds(double{0.}));
EXPECT_THAT(tval.As<float>().status(),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
{
auto bytes = GetQType<Bytes>();
auto tval = TypedValue::UnsafeFromTypeDefaultConstructed(bytes);
EXPECT_THAT(tval.GetType(), Eq(GetQType<Bytes>()));
ASSERT_OK_AND_ASSIGN(auto val_ref, tval.As<Bytes>());
EXPECT_THAT(val_ref.get(), Eq(Bytes()));
EXPECT_THAT(tval.As<float>().status(),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
{
auto of64 = GetQType<OptionalValue<double>>();
auto tval = TypedValue::UnsafeFromTypeDefaultConstructed(of64);
EXPECT_THAT(tval.GetType(), Eq(GetQType<OptionalValue<double>>()));
ASSERT_OK_AND_ASSIGN(auto val_ref, tval.As<OptionalValue<double>>());
EXPECT_THAT(val_ref.get(), Eq(OptionalValue<double>()));
EXPECT_THAT(tval.As<OptionalValue<float>>().status(),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
}
TEST(TypedValueTest, FromSlot) {
FrameLayout::Builder builder;
QTypePtr f32 = GetQType<float>();
TypedSlot tslot = AddSlot(f32, &builder);
auto layout = std::move(builder).Build();
MemoryAllocation alloc(&layout);
FramePtr ptr = alloc.frame();
EXPECT_OK(TypedValue::FromValue(7.5f).CopyToSlot(tslot, ptr));
auto tval = TypedValue::FromSlot(tslot, ptr);
EXPECT_THAT(tval.GetType(), Eq(f32));
EXPECT_THAT(tval.As<float>(), IsOkAndHolds(7.5f));
}
TEST(TypedValueTest, ToSlot) {
FrameLayout::Builder builder;
QTypePtr f64 = GetQType<double>();
TypedSlot tslot = AddSlot(f64, &builder);
auto layout = std::move(builder).Build();
MemoryAllocation alloc(&layout);
FramePtr ptr = alloc.frame();
auto tval = TypedValue::FromValue(double{1.0});
EXPECT_OK(tval.CopyToSlot(tslot, ptr));
auto slot = tslot.ToSlot<double>().value();
EXPECT_THAT(ptr.Get(slot), Eq(1.0));
}
TEST(TypedValueTest, Copy) {
auto tval = TypedValue::FromValue(double{1.0});
auto tval_copy = tval;
EXPECT_THAT(tval_copy.As<double>(), IsOkAndHolds(1.0));
}
TEST(TypedValueTest, FingerprintUniqueness) {
absl::flat_hash_set<Fingerprint> fingerprints;
EXPECT_TRUE(
fingerprints.insert(TypedValue::FromValue(int32_t{0}).GetFingerprint())
.second);
EXPECT_TRUE(
fingerprints.insert(TypedValue::FromValue(int64_t{0}).GetFingerprint())
.second);
EXPECT_TRUE(
fingerprints.insert(TypedValue::FromValue(uint64_t{0}).GetFingerprint())
.second);
EXPECT_TRUE(
fingerprints.insert(TypedValue::FromValue(double{0}).GetFingerprint())
.second);
EXPECT_TRUE(
fingerprints.insert(TypedValue::FromValue(float{0}).GetFingerprint())
.second);
}
TEST(TypedValueTest, FingerprintReproducibility) {
EXPECT_EQ(TypedValue::FromValue(int32_t{0}).GetFingerprint(),
TypedValue::FromValue(int32_t{0}).GetFingerprint());
EXPECT_EQ(TypedValue::FromValue(int64_t{0}).GetFingerprint(),
TypedValue::FromValue(int64_t{0}).GetFingerprint());
EXPECT_EQ(TypedValue::FromValue(uint64_t{0}).GetFingerprint(),
TypedValue::FromValue(uint64_t{0}).GetFingerprint());
EXPECT_EQ(TypedValue::FromValue(float{0}).GetFingerprint(),
TypedValue::FromValue(float{0}).GetFingerprint());
EXPECT_EQ(TypedValue::FromValue(double{0}).GetFingerprint(),
TypedValue::FromValue(double{0}).GetFingerprint());
}
TEST(TypedValueTest, UnsafeAs) {
auto tval = TypedValue::FromValue<int64_t>(1);
ASSERT_THAT(tval.GetType(), Eq(GetQType<int64_t>()));
EXPECT_THAT(tval.UnsafeAs<int64_t>(), Eq(int64_t{1}));
}
TEST(TypedValueTest, CopyConstructor) {
TypedValue x = TypedValue::FromValue<int64_t>(1);
TypedValue y = x;
EXPECT_EQ(x.GetType(), y.GetType());
EXPECT_EQ(x.GetRawPointer(), y.GetRawPointer());
}
TEST(TypedValueTest, CopyOperator) {
TypedValue x = TypedValue::FromValue<int64_t>(1);
TypedValue y = TypedValue::FromValue<int64_t>(2);
y = x;
EXPECT_EQ(x.GetType(), y.GetType());
EXPECT_EQ(x.GetRawPointer(), y.GetRawPointer());
}
TEST(TypedValueTest, MoveConstructor) {
TypedValue x = TypedValue::FromValue<int64_t>(1);
auto* x_type = x.GetType();
auto* x_raw_ptr = x.GetRawPointer();
TypedValue y = std::move(x);
EXPECT_EQ(y.GetType(), x_type);
EXPECT_EQ(y.GetRawPointer(), x_raw_ptr);
}
TEST(TypedValueTest, MoveOperator) {
TypedValue x = TypedValue::FromValue<int64_t>(1);
TypedValue y = TypedValue::FromValue<int64_t>(2);
auto* x_type = x.GetType();
auto* x_raw_ptr = x.GetRawPointer();
y = std::move(x);
EXPECT_EQ(y.GetType(), x_type);
EXPECT_EQ(y.GetRawPointer(), x_raw_ptr);
}
TEST(TypedValueTest, CopyFromValue) {
const Bytes bytes("data");
TypedValue x = TypedValue::FromValue(bytes);
ASSERT_OK_AND_ASSIGN(Bytes x_bytes, x.As<Bytes>());
EXPECT_THAT(x_bytes, Eq(bytes));
}
TEST(TypedValueTest, CopyFromValueT) {
const Bytes bytes("data");
TypedValue x = TypedValue::FromValue<Bytes>(bytes);
ASSERT_OK_AND_ASSIGN(Bytes x_bytes, x.As<Bytes>());
EXPECT_THAT(x_bytes, Eq(bytes));
}
TEST(TypedValueTest, MoveFromValueT) {
Bytes bytes("a long string literal to ensure memory allocation");
auto* data_raw_ptr = bytes.data();
TypedValue x = TypedValue::FromValue<Bytes>(std::move(bytes));
EXPECT_EQ(x.UnsafeAs<Bytes>().data(), data_raw_ptr);
}
TEST(TypedValueTest, CopyFromValueWithQType) {
const Bytes bytes("data");
ASSERT_OK_AND_ASSIGN(
TypedValue x, TypedValue::FromValueWithQType(bytes, GetQType<Bytes>()));
ASSERT_OK_AND_ASSIGN(Bytes x_bytes, x.As<Bytes>());
EXPECT_THAT(x_bytes, Eq(bytes));
}
TEST(TypedValueTest, MoveFromValueWithQType) {
Bytes bytes("a long string literal to ensure memory allocation");
auto* data_raw_ptr = bytes.data();
ASSERT_OK_AND_ASSIGN(TypedValue x, TypedValue::FromValueWithQType(
std::move(bytes), GetQType<Bytes>()));
EXPECT_EQ(x.UnsafeAs<Bytes>().data(), data_raw_ptr);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/typed_value.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/typed_value_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
5f5ef3e3-bb1a-46e0-b920-a077c2c24a8f | cpp | google/arolla | weak_qtype | arolla/qtype/weak_qtype.cc | arolla/qtype/weak_qtype_test.cc | #include "arolla/qtype/weak_qtype.h"
#include <utility>
#include <vector>
#include "absl/base/no_destructor.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/derived_qtype.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/repr.h"
namespace arolla {
namespace {
class WeakFloatQType final : public BasicDerivedQType {
public:
explicit WeakFloatQType()
: BasicDerivedQType(ConstructorArgs{
.name = "WEAK_FLOAT",
.base_qtype = GetQType<double>(),
}) {
CHECK_OK(VerifyDerivedQType(this));
}
static QTypePtr get() {
static const absl::NoDestructor<WeakFloatQType> result;
return result.get();
}
ReprToken UnsafeReprToken(const void* source) const override {
return GenReprTokenWeakFloat(*static_cast<const double*>(source));
}
};
class OptionalWeakFloatQType final : public QType,
public DerivedQTypeInterface {
public:
OptionalWeakFloatQType() : QType(MakeConstructorArgs()) {
CHECK_OK(VerifyDerivedQType(this));
}
static QTypePtr get() {
static const absl::NoDestructor<OptionalWeakFloatQType> result;
return result.get();
}
QTypePtr GetBaseQType() const final { return GetOptionalQType<double>(); }
ReprToken UnsafeReprToken(const void* source) const final {
const auto& value = *static_cast<const OptionalValue<double>*>(source);
if (value.present) {
return ReprToken{
absl::StrCat("optional_", GenReprTokenWeakFloat(value.value).str)};
}
return ReprToken{"optional_weak_float{NA}"};
}
void UnsafeCopy(const void* source, void* destination) const final {
if (source != destination) {
*static_cast<OptionalValue<double>*>(destination) =
*static_cast<const OptionalValue<double>*>(source);
}
}
void UnsafeCombineToFingerprintHasher(const void* source,
FingerprintHasher* hasher) const final {
hasher->Combine(*static_cast<const OptionalValue<double>*>(source));
}
private:
static ConstructorArgs MakeConstructorArgs() {
auto base_qtype = GetOptionalQType<double>();
std::vector<TypedSlot> fields = base_qtype->type_fields();
DCHECK_EQ(fields.size(), 2);
fields[1] = TypedSlot::UnsafeFromOffset(WeakFloatQType::get(),
fields[1].byte_offset());
return ConstructorArgs{
.name = "OPTIONAL_WEAK_FLOAT",
.type_info = base_qtype->type_info(),
.type_layout = base_qtype->type_layout(),
.type_fields = std::move(fields),
.value_qtype = WeakFloatQType::get(),
};
}
};
}
QTypePtr GetWeakFloatQType() { return WeakFloatQType::get(); }
QTypePtr GetOptionalWeakFloatQType() { return OptionalWeakFloatQType::get(); }
namespace {
static const int optional_weak_float_registered =
(RegisterOptionalQType(GetOptionalWeakFloatQType()), 1);
}
} | #include "arolla/qtype/weak_qtype.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/array/qtype/types.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/shape_qtype.h"
#include "arolla/qtype/standard_type_properties/properties.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/testing/repr_token_eq.h"
#include "arolla/util/unit.h"
namespace arolla {
namespace {
using ::absl_testing::StatusIs;
using ::arolla::testing::ReprTokenEq;
using ::testing::MatchesRegex;
TEST(WeakQTypeTest, Smoke) {
auto qtype = GetWeakFloatQType();
EXPECT_EQ(qtype->name(), "WEAK_FLOAT");
auto optional_qtype = GetOptionalWeakFloatQType();
EXPECT_EQ(optional_qtype->name(), "OPTIONAL_WEAK_FLOAT");
}
TEST(WeakQTypeTest, Optional) {
QTypePtr qtype = GetWeakFloatQType();
QTypePtr optional_qtype = GetOptionalWeakFloatQType();
EXPECT_EQ(optional_qtype->value_qtype(), qtype);
EXPECT_TRUE(IsOptionalQType(optional_qtype));
ASSERT_OK_AND_ASSIGN(QTypePtr to_optional_res, ToOptionalQType(qtype));
EXPECT_EQ(to_optional_res, optional_qtype);
EXPECT_EQ(DecayOptionalQType(optional_qtype), qtype);
ASSERT_OK_AND_ASSIGN(TypedValue v, CreateMissingValue(optional_qtype));
ASSERT_EQ(v.GetType(), optional_qtype);
}
TEST(WeakQTypeTest, IsScalarQType) {
EXPECT_TRUE(IsScalarQType(GetWeakFloatQType()));
EXPECT_FALSE(IsScalarQType(GetOptionalWeakFloatQType()));
}
TEST(WeakQTypeTest, GetScalarQType) {
{
ASSERT_OK_AND_ASSIGN(QTypePtr scalar_qtype,
GetScalarQType(GetWeakFloatQType()));
EXPECT_EQ(scalar_qtype, GetWeakFloatQType());
}
{
ASSERT_OK_AND_ASSIGN(QTypePtr scalar_qtype,
GetScalarQType(GetOptionalWeakFloatQType()));
EXPECT_EQ(scalar_qtype, GetWeakFloatQType());
}
}
TEST(WeakQTypeTest, WithScalarQType) {
{
ASSERT_OK_AND_ASSIGN(
QTypePtr res_qtype,
WithScalarQType(GetQType<float>(), GetWeakFloatQType()));
EXPECT_EQ(res_qtype, GetWeakFloatQType());
}
{
ASSERT_OK_AND_ASSIGN(
QTypePtr res_qtype,
WithScalarQType(GetOptionalQType<double>(), GetWeakFloatQType()));
EXPECT_EQ(res_qtype, GetOptionalWeakFloatQType());
}
EXPECT_THAT(WithScalarQType(GetArrayQType<float>(), GetWeakFloatQType()),
StatusIs(absl::StatusCode::kInvalidArgument,
MatchesRegex("Array type with elements of type "
"WEAK_FLOAT is not registered.")));
{
ASSERT_OK_AND_ASSIGN(
QTypePtr res_qtype,
WithScalarQType(GetWeakFloatQType(), GetQType<double>()));
EXPECT_EQ(res_qtype, GetQType<double>());
}
{
ASSERT_OK_AND_ASSIGN(
QTypePtr res_qtype,
WithScalarQType(GetOptionalWeakFloatQType(), GetQType<float>()));
EXPECT_EQ(res_qtype, GetOptionalQType<float>());
}
}
TEST(WeakQTypeTest, DecayContainerQType) {
EXPECT_EQ(DecayContainerQType(GetWeakFloatQType()), GetWeakFloatQType());
EXPECT_EQ(DecayContainerQType(GetOptionalWeakFloatQType()),
GetWeakFloatQType());
}
TEST(WeakQTypeTest, GetShapeQType) {
{
ASSERT_OK_AND_ASSIGN(QTypePtr shape_qtype,
GetShapeQType(GetWeakFloatQType()));
EXPECT_EQ(shape_qtype, GetQType<ScalarShape>());
}
{
ASSERT_OK_AND_ASSIGN(QTypePtr shape_qtype,
GetShapeQType(GetOptionalWeakFloatQType()));
EXPECT_EQ(shape_qtype, GetQType<OptionalScalarShape>());
}
}
TEST(WeakQTypeTest, GetPresenceQType) {
{
ASSERT_OK_AND_ASSIGN(QTypePtr presence_qtype,
GetPresenceQType(GetWeakFloatQType()));
EXPECT_EQ(presence_qtype, GetQType<Unit>());
}
{
ASSERT_OK_AND_ASSIGN(QTypePtr presence_qtype,
GetPresenceQType(GetOptionalWeakFloatQType()));
EXPECT_EQ(presence_qtype, GetOptionalQType<Unit>());
}
}
TEST(WeakQTypeTest, OptionalLike) {
EXPECT_FALSE(IsOptionalLikeQType(GetWeakFloatQType()));
EXPECT_TRUE(IsOptionalLikeQType(GetOptionalWeakFloatQType()));
{
ASSERT_OK_AND_ASSIGN(QTypePtr optional_like_qtype,
ToOptionalLikeQType(GetWeakFloatQType()));
EXPECT_EQ(optional_like_qtype, GetOptionalWeakFloatQType());
}
{
ASSERT_OK_AND_ASSIGN(QTypePtr optional_like_qtype,
ToOptionalLikeQType(GetOptionalWeakFloatQType()));
EXPECT_EQ(optional_like_qtype, GetOptionalWeakFloatQType());
}
}
TEST(WeakQTypeTest, WeakFloatFingerprint) {
const double value_a = 1.5;
const double value_b = 2.5;
const auto float64_qvalue_a = TypedValue::FromValue(value_a);
ASSERT_OK_AND_ASSIGN(
const auto weak_float_qvalue_a1,
TypedValue::FromValueWithQType(value_a, GetWeakFloatQType()));
ASSERT_OK_AND_ASSIGN(
const auto weak_float_qvalue_a2,
TypedValue::FromValueWithQType(value_a, GetWeakFloatQType()));
ASSERT_OK_AND_ASSIGN(
const auto weak_float_qvalue_b,
TypedValue::FromValueWithQType(value_b, GetWeakFloatQType()));
EXPECT_NE(float64_qvalue_a.GetFingerprint(),
weak_float_qvalue_a1.GetFingerprint());
EXPECT_EQ(weak_float_qvalue_a1.GetFingerprint(),
weak_float_qvalue_a2.GetFingerprint());
EXPECT_NE(weak_float_qvalue_a1.GetFingerprint(),
weak_float_qvalue_b.GetFingerprint());
}
TEST(WeakQTypeTest, OptionalWeakFloatFingerprint) {
const OptionalValue<double> value_a(1.5);
const OptionalValue<double> value_b(2.5);
const auto optional_float64_qvalue_a = TypedValue::FromValue(value_a);
ASSERT_OK_AND_ASSIGN(
const auto optional_weak_float_qvalue_a1,
TypedValue::FromValueWithQType(value_a, GetOptionalWeakFloatQType()));
ASSERT_OK_AND_ASSIGN(
const auto optional_weak_float_qvalue_a2,
TypedValue::FromValueWithQType(value_a, GetOptionalWeakFloatQType()));
ASSERT_OK_AND_ASSIGN(
const auto optional_weak_float_qvalue_b,
TypedValue::FromValueWithQType(value_b, GetOptionalWeakFloatQType()));
EXPECT_NE(optional_float64_qvalue_a.GetFingerprint(),
optional_weak_float_qvalue_a1.GetFingerprint());
EXPECT_EQ(optional_weak_float_qvalue_a1.GetFingerprint(),
optional_weak_float_qvalue_a2.GetFingerprint());
EXPECT_NE(optional_weak_float_qvalue_a1.GetFingerprint(),
optional_weak_float_qvalue_b.GetFingerprint());
}
TEST(WeakQTypeTest, WeakFloatRepr) {
ASSERT_OK_AND_ASSIGN(auto qvalue, TypedValue::FromValueWithQType(
double{1.5}, GetWeakFloatQType()));
EXPECT_THAT(qvalue.GenReprToken(), ReprTokenEq("weak_float{1.5}"));
}
TEST(WeakQTypeTest, OptionalWeakFloatRepr) {
ASSERT_OK_AND_ASSIGN(
auto qvalue, TypedValue::FromValueWithQType(OptionalValue<double>(1.5),
GetOptionalWeakFloatQType()));
EXPECT_THAT(qvalue.GenReprToken(), ReprTokenEq("optional_weak_float{1.5}"));
}
TEST(WeakQTypeTest, OptionalWeakFloatMissingValueRepr) {
ASSERT_OK_AND_ASSIGN(
auto qvalue, TypedValue::FromValueWithQType(OptionalValue<double>(),
GetOptionalWeakFloatQType()));
EXPECT_THAT(qvalue.GenReprToken(), ReprTokenEq("optional_weak_float{NA}"));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/weak_qtype.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/weak_qtype_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
a755e12a-a649-46e9-8e77-9ef89b24815e | cpp | google/arolla | simple_qtype | arolla/qtype/simple_qtype.cc | arolla/qtype/simple_qtype_test.cc | #include "arolla/qtype/simple_qtype.h"
#include <cstdint>
#include <optional>
#include <string>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
namespace arolla {
absl::Status SimpleQType::InitNameMap() {
name2index_.reserve(field_names_.size());
for (const auto& field_name : field_names_) {
if (bool inserted =
name2index_.emplace(field_name, name2index_.size()).second;
!inserted) {
return absl::FailedPreconditionError(absl::StrCat(
"duplicated name field for QType ", name(), ": ", field_name));
}
}
return absl::OkStatus();
}
absl::Span<const std::string> SimpleQType::GetFieldNames() const {
return field_names_;
}
std::optional<int64_t> SimpleQType::GetFieldIndexByName(
absl::string_view field_name) const {
if (auto it = name2index_.find(field_name); it != name2index_.end()) {
return it->second;
}
return std::nullopt;
}
} | #include "arolla/qtype/simple_qtype.h"
#include <cstdint>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/no_destructor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/named_field_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/meta.h"
#include "arolla/util/repr.h"
#include "arolla/util/struct_field.h"
#include "arolla/util/testing/repr_token_eq.h"
namespace arolla {
namespace {
using ::arolla::testing::ReprTokenEq;
using ::testing::ElementsAre;
using ::testing::IsEmpty;
using ::testing::MatchesRegex;
struct TypeWithRepr {};
struct TypeWithoutRepr {};
struct FullFeaturedType {
int32_t state;
};
struct TypeWithNamedFields {
float x;
double y;
constexpr static auto ArollaStructFields() {
using CppType = TypeWithNamedFields;
return std::tuple{
AROLLA_DECLARE_STRUCT_FIELD(x),
AROLLA_DECLARE_STRUCT_FIELD(y),
};
}
void ArollaFingerprint(FingerprintHasher* hasher) const {
CombineStructFields(hasher, *this);
}
};
}
AROLLA_DECLARE_QTYPE(TypeWithRepr);
AROLLA_DECLARE_QTYPE(TypeWithoutRepr);
AROLLA_DECLARE_QTYPE(FullFeaturedType);
AROLLA_DECLARE_QTYPE(TypeWithNamedFields);
AROLLA_DECLARE_FINGERPRINT_HASHER_TRAITS(TypeWithRepr);
void FingerprintHasherTraits<TypeWithRepr>::operator()(
FingerprintHasher*, const TypeWithRepr&) const {}
AROLLA_DECLARE_FINGERPRINT_HASHER_TRAITS(TypeWithoutRepr);
void FingerprintHasherTraits<TypeWithoutRepr>::operator()(
FingerprintHasher*, const TypeWithoutRepr&) const {}
AROLLA_DECLARE_FINGERPRINT_HASHER_TRAITS(FullFeaturedType);
void FingerprintHasherTraits<FullFeaturedType>::operator()(
FingerprintHasher* hasher, const FullFeaturedType& value) const {
hasher->Combine(value.state);
}
AROLLA_DECLARE_REPR(TypeWithRepr);
ReprToken ReprTraits<TypeWithRepr>::operator()(const TypeWithRepr&) const {
return ReprToken{"type_with_repr", {10, 50}};
}
AROLLA_DECLARE_REPR(FullFeaturedType);
ReprToken ReprTraits<FullFeaturedType>::operator()(
const FullFeaturedType& value) const {
return ReprToken{absl::StrFormat("FullFeaturedType{%d}", value.state),
{31, 27}};
}
AROLLA_DEFINE_SIMPLE_QTYPE(TYPE_WITH_REPR, TypeWithRepr);
AROLLA_DEFINE_SIMPLE_QTYPE(TYPE_WITHOUT_REPR, TypeWithoutRepr);
AROLLA_DEFINE_SIMPLE_QTYPE(TYPE_WITH_NAMED_FIELDS, TypeWithNamedFields);
QTypePtr QTypeTraits<FullFeaturedType>::type() {
struct FullFeaturedTypeQType final : SimpleQType {
FullFeaturedTypeQType()
: SimpleQType(
meta::type<FullFeaturedType>(), "FullFeaturedType",
GetQType<TypeWithoutRepr>(),
"::arolla::FullFeaturedQType") {}
absl::string_view UnsafePyQValueSpecializationKey(
const void* ) const final {
return "::arolla::FullFeaturedQValue";
}
};
static const absl::NoDestructor<FullFeaturedTypeQType> result;
return result.get();
}
namespace {
TEST(SimpleQType, TypeWithRepr) {
TypeWithRepr x;
EXPECT_THAT(GetQType<TypeWithRepr>()->UnsafeReprToken(&x),
ReprTokenEq("type_with_repr", {10, 50}));
}
TEST(SimpleQType, TypeWithoutRepr) {
TypeWithoutRepr x;
const auto repr_result = GetQType<TypeWithoutRepr>()->UnsafeReprToken(&x);
EXPECT_THAT(repr_result.str,
MatchesRegex("<value of TYPE_WITHOUT_REPR at 0x[0-9a-f]+>"));
EXPECT_THAT(repr_result.precedence.left, -1);
EXPECT_THAT(repr_result.precedence.right, -1);
}
TEST(SimpleQType, FullFeaturedQType) {
auto qtype = GetQType<FullFeaturedType>();
const FullFeaturedType x{4};
EXPECT_EQ(qtype->value_qtype(), GetQType<TypeWithoutRepr>());
EXPECT_EQ(qtype->qtype_specialization_key(), "::arolla::FullFeaturedQType");
EXPECT_THAT(qtype->UnsafeReprToken(&x),
ReprTokenEq("FullFeaturedType{4}", {31, 27}));
EXPECT_EQ(qtype->UnsafePyQValueSpecializationKey(&x),
"::arolla::FullFeaturedQValue");
FingerprintHasher hx("salt");
FingerprintHasher hy("salt");
const FullFeaturedType y{3};
qtype->UnsafeCombineToFingerprintHasher(&x, &hx);
qtype->UnsafeCombineToFingerprintHasher(&y, &hy);
EXPECT_NE(std::move(hx).Finish(), std::move(hy).Finish());
}
TEST(SimpleQType, TypeWithNames) {
QTypePtr qtype = GetQType<TypeWithNamedFields>();
EXPECT_THAT(GetFieldNames(qtype), ElementsAre("x", "y"));
EXPECT_EQ(GetFieldIndexByName(qtype, "x"), 0);
EXPECT_EQ(GetFieldIndexByName(qtype, "y"), 1);
EXPECT_EQ(GetFieldIndexByName(qtype, "z"), std::nullopt);
}
TEST(SimpleQType, TypeWithNamesErrors) {
QTypePtr qtype = GetQType<int>();
EXPECT_THAT(GetFieldNames(qtype), IsEmpty());
EXPECT_EQ(GetFieldIndexByName(qtype, "y"), std::nullopt);
EXPECT_EQ(GetFieldIndexByName(qtype, "x"), std::nullopt);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/simple_qtype.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/simple_qtype_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
6ec690be-11c7-4341-96de-6b3f76ee661a | cpp | google/arolla | derived_qtype | arolla/qtype/derived_qtype.cc | arolla/qtype/derived_qtype_test.cc | #include "arolla/qtype/derived_qtype.h"
#include <cstddef>
#include <utility>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/repr.h"
namespace arolla {
BasicDerivedQType::BasicDerivedQType(ConstructorArgs args)
: QType(QType::ConstructorArgs{
.name = std::move(args.name),
.type_info = args.base_qtype->type_info(),
.type_layout = args.base_qtype->type_layout(),
.type_fields = args.base_qtype->type_fields(),
.value_qtype = args.value_qtype,
.qtype_specialization_key = std::move(args.qtype_specialization_key),
}),
base_qtype_(args.base_qtype) {
CHECK_OK(VerifyDerivedQType(this));
}
ReprToken BasicDerivedQType::UnsafeReprToken(const void* source) const {
return ReprToken{
absl::StrCat(name(), "{", base_qtype_->UnsafeReprToken(source).str, "}")};
}
void BasicDerivedQType::UnsafeCopy(const void* source,
void* destination) const {
base_qtype_->UnsafeCopy(source, destination);
}
void BasicDerivedQType::UnsafeCombineToFingerprintHasher(
const void* source, FingerprintHasher* hasher) const {
base_qtype_->UnsafeCombineToFingerprintHasher(source, hasher);
}
const QType* DecayDerivedQType(const QType* qtype) {
if (auto* derived_qtype_interface =
dynamic_cast<const DerivedQTypeInterface*>(qtype)) {
return derived_qtype_interface->GetBaseQType();
}
return qtype;
}
absl::Status VerifyDerivedQType(QTypePtr qtype) {
const auto* derived_qtype_interface =
dynamic_cast<const DerivedQTypeInterface*>(qtype);
if (derived_qtype_interface == nullptr) {
return absl::InvalidArgumentError(
absl::StrFormat("%s is not a derived qtype", qtype->name()));
}
const auto* base_qtype = derived_qtype_interface->GetBaseQType();
if (base_qtype == nullptr) {
return absl::FailedPreconditionError(absl::StrFormat(
"invalid derived_qtype=%s: base_qtype=nullptr", qtype->name()));
}
if (dynamic_cast<const DerivedQTypeInterface*>(base_qtype) != nullptr) {
return absl::FailedPreconditionError(absl::StrFormat(
"base_qtype=%s cannot be a derived qtype", base_qtype->name()));
}
const bool type_info_ok = (qtype->type_info() == base_qtype->type_info());
if (!type_info_ok) {
return absl::FailedPreconditionError(absl::StrFormat(
"invalid derived_qtype=%s: base_qtype=%s: incompatible type_info",
qtype->name(), base_qtype->name()));
}
const bool type_layout_ok =
(qtype->type_layout().AllocSize() ==
base_qtype->type_layout().AllocSize() &&
qtype->type_layout().AllocAlignment().value ==
base_qtype->type_layout().AllocAlignment().value);
if (!type_layout_ok) {
return absl::FailedPreconditionError(absl::StrFormat(
"invalid derived_qtype=%s: base_qtype=%s: incompatible type_layout",
qtype->name(), base_qtype->name()));
}
bool type_fields_ok =
(qtype->type_fields().empty() ||
qtype->type_fields().size() == base_qtype->type_fields().size());
for (size_t i = 0; type_fields_ok && i < qtype->type_fields().size() &&
i < base_qtype->type_fields().size();
++i) {
const auto& derived_field = qtype->type_fields()[i];
const auto& base_field = base_qtype->type_fields()[i];
type_fields_ok = type_fields_ok &&
(derived_field.byte_offset() == base_field.byte_offset() &&
DecayDerivedQType(derived_field.GetType()) ==
DecayDerivedQType(base_field.GetType()));
}
if (!type_layout_ok) {
return absl::FailedPreconditionError(absl::StrFormat(
"invalid derived_qtype=%s: base_qtype=%s: incompatible type_fields",
qtype->name(), base_qtype->name()));
}
const bool value_qtype_ok =
(qtype->value_qtype() == nullptr ||
base_qtype->value_qtype() == nullptr ||
DecayDerivedQType(qtype->value_qtype()) ==
DecayDerivedQType(base_qtype->value_qtype()));
if (!value_qtype_ok) {
return absl::FailedPreconditionError(absl::StrFormat(
"invalid derived_qtype=%s: base_qtype=%s: incompatible value_qtype",
qtype->name(), base_qtype->name()));
}
return absl::OkStatus();
}
TypedRef DecayDerivedQValue(TypedRef qvalue) {
return TypedRef::UnsafeFromRawPointer(DecayDerivedQType(qvalue.GetType()),
qvalue.GetRawPointer());
}
TypedValue DecayDerivedQValue(const TypedValue& qvalue) {
return TypedValue(DecayDerivedQValue(qvalue.AsRef()));
}
TypedRef UnsafeDowncastDerivedQValue(QTypePtr derived_qtype, TypedRef qvalue) {
DCHECK_NE(derived_qtype, nullptr);
auto* base_qtype = DecayDerivedQType(derived_qtype);
DCHECK_EQ(qvalue.GetType(), base_qtype);
return TypedRef::UnsafeFromRawPointer(derived_qtype, qvalue.GetRawPointer());
}
TypedValue UnsafeDowncastDerivedQValue(QTypePtr derived_qtype,
const TypedValue& qvalue) {
return TypedValue(UnsafeDowncastDerivedQValue(derived_qtype, qvalue.AsRef()));
}
} | #include "arolla/qtype/derived_qtype.h"
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/no_destructor.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/testing/repr_token_eq.h"
namespace arolla {
namespace {
using ::arolla::testing::ReprTokenEq;
struct PointQType final : BasicDerivedQType {
PointQType()
: BasicDerivedQType(ConstructorArgs{
.name = "POINT",
.base_qtype =
MakeTupleQType({GetQType<double>(), GetQType<double>()}),
.value_qtype = GetQType<double>(),
.qtype_specialization_key = "::arolla::PointQType",
}) {}
static QTypePtr get() {
static const absl::NoDestructor<PointQType> result;
return result.get();
}
};
TEST(BasicDerivedQTypeTest, QTypeProperties) {
const auto point_qtype = PointQType::get();
EXPECT_EQ(point_qtype->name(), "POINT");
EXPECT_EQ(point_qtype->value_qtype(), GetQType<double>());
EXPECT_EQ(point_qtype->qtype_specialization_key(), "::arolla::PointQType");
const auto tuple_qtype =
MakeTupleQType({GetQType<double>(), GetQType<double>()});
EXPECT_EQ(point_qtype->type_info(), tuple_qtype->type_info());
EXPECT_EQ(point_qtype->type_layout().AllocSize(),
tuple_qtype->type_layout().AllocSize());
EXPECT_EQ(point_qtype->type_layout().AllocAlignment().value,
tuple_qtype->type_layout().AllocAlignment().value);
EXPECT_EQ(point_qtype->type_fields().size(), 2);
}
TEST(BasicDerivedQTypeTest, DefaultRepr) {
const auto tuple_qvalue = MakeTupleFromFields(1., 2.);
const auto point_qvalue =
UnsafeDowncastDerivedQValue(PointQType::get(), tuple_qvalue.AsRef());
EXPECT_THAT(point_qvalue.GenReprToken(),
ReprTokenEq("POINT{(float64{1}, float64{2})}"));
}
TEST(BasicDerivedQTypeTest, UnsafeCombineToFingerprintHasher) {
const auto tuple_qvalue = MakeTupleFromFields(1., 2.);
const auto* tuple_qtype = tuple_qvalue.GetType();
const auto* point_qtype = PointQType::get();
FingerprintHasher hasher1("seed");
FingerprintHasher hasher2("seed");
tuple_qtype->UnsafeCombineToFingerprintHasher(tuple_qvalue.GetRawPointer(),
&hasher1);
point_qtype->UnsafeCombineToFingerprintHasher(tuple_qvalue.GetRawPointer(),
&hasher2);
EXPECT_EQ(std::move(hasher1).Finish(), std::move(hasher2).Finish());
}
TEST(BasicDerivedQTypeTest, DecayDerivedQType) {
const auto point_qtype = PointQType::get();
const auto tuple_qtype =
MakeTupleQType({GetQType<double>(), GetQType<double>()});
EXPECT_NE(point_qtype, tuple_qtype);
EXPECT_EQ(DecayDerivedQType(point_qtype), tuple_qtype);
EXPECT_EQ(DecayDerivedQType(tuple_qtype), tuple_qtype);
EXPECT_EQ(DecayDerivedQType(nullptr), nullptr);
}
TEST(BasicDerivedQTypeTest, UnsafeDowncastDerivedQRef) {
const auto tuple_qvalue = MakeTupleFromFields(1., 2.);
const auto point_qvalue = TypedValue(
UnsafeDowncastDerivedQValue(PointQType::get(), tuple_qvalue.AsRef()));
EXPECT_EQ(point_qvalue.GetType(), PointQType::get());
EXPECT_NE(point_qvalue.GetFingerprint(), tuple_qvalue.GetFingerprint());
}
TEST(BasicDerivedQTypeTest, UnsafeDowncastDerivedQValue) {
const auto tuple_qvalue = MakeTupleFromFields(1., 2.);
const auto point_qvalue =
UnsafeDowncastDerivedQValue(PointQType::get(), tuple_qvalue);
EXPECT_EQ(point_qvalue.GetType(), PointQType::get());
EXPECT_NE(point_qvalue.GetFingerprint(), tuple_qvalue.GetFingerprint());
}
TEST(BasicDerivedQTypeTest, DecayDerivedQRef) {
const auto tuple_qvalue = MakeTupleFromFields(1., 2.);
const auto point_qvalue = TypedValue(
UnsafeDowncastDerivedQValue(PointQType::get(), tuple_qvalue.AsRef()));
EXPECT_NE(point_qvalue.GetFingerprint(), tuple_qvalue.GetFingerprint());
EXPECT_EQ(
TypedValue(DecayDerivedQValue(point_qvalue.AsRef())).GetFingerprint(),
tuple_qvalue.GetFingerprint());
EXPECT_EQ(
TypedValue(DecayDerivedQValue(tuple_qvalue.AsRef())).GetFingerprint(),
tuple_qvalue.GetFingerprint());
}
TEST(BasicDerivedQTypeTest, DecayDerivedQValue) {
const auto tuple_qvalue = MakeTupleFromFields(1., 2.);
const auto point_qvalue =
UnsafeDowncastDerivedQValue(PointQType::get(), tuple_qvalue);
EXPECT_NE(point_qvalue.GetFingerprint(), tuple_qvalue.GetFingerprint());
EXPECT_EQ(DecayDerivedQValue(point_qvalue).GetFingerprint(),
tuple_qvalue.GetFingerprint());
EXPECT_EQ(TypedValue(DecayDerivedQValue(tuple_qvalue)).GetFingerprint(),
tuple_qvalue.GetFingerprint());
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/derived_qtype.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/derived_qtype_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
cfe580f4-0120-4a4a-afd0-a78f383ab876 | cpp | google/arolla | any_qtype | arolla/qtype/any_qtype.cc | arolla/qtype/any_qtype_test.cc | #include "arolla/qtype/any_qtype.h"
#include <typeinfo>
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "arolla/qtype/simple_qtype.h"
#include "arolla/util/demangle.h"
namespace arolla {
absl::Status Any::InvalidCast(const std::type_info& t) const {
if (value_.has_value()) {
return absl::FailedPreconditionError(absl::StrFormat(
"can not cast Any(%s) to %s", TypeName(value_.type()), TypeName(t)));
} else {
return absl::FailedPreconditionError("can not cast an empty ::arolla::Any");
}
}
AROLLA_DEFINE_SIMPLE_QTYPE(ANY, Any);
} | #include "arolla/qtype/any_qtype.h"
#include <string>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/qtype/typed_value.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::HasSubstr;
TEST(AnyQType, AnyConstructorRegression) {
Any any;
Any copy_1 = any;
Any copy_2(any);
Any copy_3 = std::move(any);
Any copy_4(std::move(copy_2));
}
TEST(AnyQType, Any) {
int v1 = 5;
std::string v2 = "string";
TypedValue tv1 = TypedValue::FromValue(Any(v1));
TypedValue tv2 = TypedValue::FromValue(Any(v2));
TypedValue tv3 = TypedValue::FromValue(Any());
ASSERT_OK_AND_ASSIGN(const Any& a1, tv1.As<Any>());
ASSERT_OK_AND_ASSIGN(const Any& a2, tv2.As<Any>());
ASSERT_OK_AND_ASSIGN(const Any& a3, tv3.As<Any>());
EXPECT_THAT(a1.As<int>(), IsOkAndHolds(v1));
EXPECT_THAT(a1.As<double>(), StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("can not cast Any")));
ASSERT_OK_AND_ASSIGN(const std::string& v2_res, a2.As<std::string>());
EXPECT_EQ(v2, v2_res);
EXPECT_THAT(a2.As<double>(), StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("can not cast Any")));
EXPECT_THAT(a3.As<double>(),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("can not cast an empty ::arolla::Any")));
}
TEST(AnyQType, Fingerprint) {
Any a = Any(1);
Any b = Any(1);
Any a_copy = a;
EXPECT_NE(TypedValue::FromValue(a).GetFingerprint(),
TypedValue::FromValue(b).GetFingerprint());
EXPECT_EQ(TypedValue::FromValue(a).GetFingerprint(),
TypedValue::FromValue(a_copy).GetFingerprint());
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/any_qtype.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/any_qtype_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
a6871c12-fea8-48ff-a88a-4a630c10dcb0 | cpp | google/arolla | shape_qtype | arolla/qtype/shape_qtype.cc | arolla/qtype/shape_qtype_test.cc | #include "arolla/qtype/shape_qtype.h"
#include <string>
#include "absl/base/no_destructor.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/meta.h"
#include "arolla/util/repr.h"
#include "arolla/util/unit.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
absl::Status EnsureIsBaseType(QTypePtr qtype) {
return IsScalarQType(qtype) || IsOptionalQType(qtype)
? absl::OkStatus()
: absl::InvalidArgumentError(absl::StrFormat(
"Shape::WithValueQType supports only scalar and "
"optional values, got %s",
qtype->name()));
}
class ScalarShapeQType final : public ShapeQType {
public:
ScalarShapeQType() : ShapeQType(meta::type<ScalarShape>(), "SCALAR_SHAPE") {}
absl::StatusOr<QTypePtr> WithValueQType(QTypePtr value_qtype) const final {
RETURN_IF_ERROR(EnsureIsBaseType(value_qtype));
return value_qtype;
}
QTypePtr presence_qtype() const final { return GetQType<Unit>(); }
};
class OptionalScalarShapeQType final : public ShapeQType {
public:
OptionalScalarShapeQType()
: ShapeQType(meta::type<OptionalScalarShape>(), "OPTIONAL_SCALAR_SHAPE") {
}
absl::StatusOr<QTypePtr> WithValueQType(QTypePtr value_qtype) const final {
RETURN_IF_ERROR(EnsureIsBaseType(value_qtype));
return ToOptionalQType(value_qtype);
}
QTypePtr presence_qtype() const final { return GetOptionalQType<Unit>(); }
};
}
QTypePtr QTypeTraits<ScalarShape>::type() {
static const absl::NoDestructor<ScalarShapeQType> shape_qtype;
return shape_qtype.get();
}
QTypePtr QTypeTraits<OptionalScalarShape>::type() {
static const absl::NoDestructor<OptionalScalarShapeQType> shape_qtype;
return shape_qtype.get();
}
ReprToken ReprTraits<ScalarShape>::operator()(
const ScalarShape& ) const {
return ReprToken{"scalar_shape"};
}
void FingerprintHasherTraits<ScalarShape>::operator()(
FingerprintHasher* hasher, const ScalarShape& ) const {
hasher->Combine(absl::string_view("scalar_shape"));
}
ReprToken ReprTraits<OptionalScalarShape>::operator()(
const OptionalScalarShape& ) const {
return ReprToken{"optional_scalar_shape"};
}
void FingerprintHasherTraits<OptionalScalarShape>::operator()(
FingerprintHasher* hasher, const OptionalScalarShape& ) const {
hasher->Combine(absl::string_view("optional_scalar_shape"));
}
} | #include "arolla/qtype/shape_qtype.h"
#include <cstdint>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status_matchers.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/bytes.h"
#include "arolla/util/repr.h"
#include "arolla/util/testing/repr_token_eq.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::arolla::testing::ReprTokenEq;
using ::testing::Eq;
using ::testing::NotNull;
TEST(ShapeQType, ScalarShape) {
auto scalar_shape = dynamic_cast<const ShapeQType*>(GetQType<ScalarShape>());
ASSERT_THAT(scalar_shape, NotNull());
EXPECT_THAT(scalar_shape->WithValueQType(GetQType<int64_t>()),
IsOkAndHolds(Eq(GetQType<int64_t>())));
EXPECT_THAT(scalar_shape->WithValueQType(GetQType<Bytes>()),
IsOkAndHolds(Eq(GetQType<Bytes>())));
EXPECT_THAT(scalar_shape->WithValueQType(GetOptionalQType<int64_t>()),
IsOkAndHolds(Eq(GetOptionalQType<int64_t>())));
}
TEST(ShapeQType, OptionalScalarShape) {
auto optional_shape =
dynamic_cast<const ShapeQType*>(GetQType<OptionalScalarShape>());
ASSERT_THAT(optional_shape, NotNull());
EXPECT_THAT(optional_shape->WithValueQType(GetQType<int64_t>()),
IsOkAndHolds(Eq(GetOptionalQType<int64_t>())));
EXPECT_THAT(optional_shape->WithValueQType(GetQType<Bytes>()),
IsOkAndHolds(Eq(GetOptionalQType<Bytes>())));
EXPECT_THAT(optional_shape->WithValueQType(GetOptionalQType<int64_t>()),
IsOkAndHolds(Eq(GetOptionalQType<int64_t>())));
}
TEST(ShapeQType, ScalarShapeRepr) {
EXPECT_THAT(GenReprToken(ScalarShape{}), ReprTokenEq("scalar_shape"));
}
TEST(ShapeQType, OptionalScalarShapeRepr) {
EXPECT_THAT(GenReprToken(OptionalScalarShape{}),
ReprTokenEq("optional_scalar_shape"));
}
TEST(ShapeQType, TypedValuScalarShapeRepr) {
EXPECT_THAT(TypedValue::FromValue(ScalarShape{}).GenReprToken(),
ReprTokenEq("scalar_shape"));
}
TEST(ShapeQType, TypedValueOptionalScalarShapeRepr) {
EXPECT_THAT(TypedValue::FromValue(OptionalScalarShape{}).GenReprToken(),
ReprTokenEq("optional_scalar_shape"));
}
TEST(ShapeQType, ScalarShapeFingerprint) {
EXPECT_THAT(TypedValue::FromValue(ScalarShape{}).GetFingerprint(),
Eq(TypedValue::FromValue(ScalarShape{}).GetFingerprint()));
}
TEST(ShapeQType, OptionalScalarShapeFingerprint) {
EXPECT_THAT(
TypedValue::FromValue(OptionalScalarShape{}).GetFingerprint(),
Eq(TypedValue::FromValue(OptionalScalarShape{}).GetFingerprint()));
}
TEST(ShapeQType, IsShapeQType) {
EXPECT_TRUE(IsShapeQType(GetQType<OptionalScalarShape>()));
EXPECT_FALSE(IsShapeQType(GetQType<int32_t>()));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/shape_qtype.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/shape_qtype_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
15822a77-db68-4c49-8b8f-b70bcaebfe2c | cpp | google/arolla | typed_slot | arolla/qtype/typed_slot.cc | arolla/qtype/typed_slot_test.cc | #include "arolla/qtype/typed_slot.h"
#include <algorithm>
#include <optional>
#include <string>
#include <typeinfo>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/memory/frame.h"
#include "arolla/qtype/qtype.h"
#include "arolla/util/demangle.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
std::string TypeMismatchError(absl::string_view name, QTypePtr expected_type,
QTypePtr actual_type) {
return absl::StrFormat("%s{expected:%s, actual:%s}", name,
expected_type->name(), actual_type->name());
}
absl::Status SlotTypesError(std::vector<std::string> missed_slots,
std::vector<std::string> type_mismatch,
std::vector<std::string> unwanted_slots) {
if (missed_slots.empty() && type_mismatch.empty() && unwanted_slots.empty()) {
return absl::OkStatus();
}
std::string msg = "slots/types match errors:";
if (!missed_slots.empty()) {
std::sort(missed_slots.begin(), missed_slots.end());
msg +=
absl::StrFormat("missed slots: %s;", absl::StrJoin(missed_slots, ","));
}
if (!type_mismatch.empty()) {
std::sort(type_mismatch.begin(), type_mismatch.end());
msg += absl::StrFormat("slot types mismatch: %s;",
absl::StrJoin(type_mismatch, ","));
}
if (!unwanted_slots.empty()) {
std::sort(unwanted_slots.begin(), unwanted_slots.end());
msg += absl::StrFormat("unwanted slots: %s;",
absl::StrJoin(unwanted_slots, ","));
}
return absl::FailedPreconditionError(msg);
}
}
std::vector<QTypePtr> SlotsToTypes(absl::Span<const TypedSlot> slots) {
std::vector<QTypePtr> types;
types.reserve(slots.size());
for (const auto& slot : slots) {
types.push_back(slot.GetType());
}
return types;
}
absl::Status TypedSlot::VerifyType(const std::type_info& tpe) const {
if (GetType()->type_info() != tpe) {
return absl::InvalidArgumentError(absl::StrFormat(
"slot type does not match C++ type: expected %s, got %s", TypeName(tpe),
TypeName(GetType()->type_info())));
}
return absl::OkStatus();
}
absl::flat_hash_map<std::string, QTypePtr> SlotsToTypes(
const absl::flat_hash_map<std::string, TypedSlot>& slots) {
absl::flat_hash_map<std::string, QTypePtr> types;
types.reserve(slots.size());
for (const auto& kv : slots) {
types[kv.first] = kv.second.GetType();
}
return types;
}
std::vector<TypedSlot> AddSlots(absl::Span<const QTypePtr> types,
FrameLayout::Builder* layout_builder) {
std::vector<TypedSlot> slots;
slots.reserve(types.size());
for (const auto* type : types) {
slots.push_back(AddSlot(type, layout_builder));
}
return slots;
}
std::vector<std::pair<std::string, TypedSlot>> AddNamedSlots(
absl::Span<const std::pair<std::string, QTypePtr>> types,
FrameLayout::Builder* layout_builder) {
std::vector<std::pair<std::string, TypedSlot>> slots;
slots.reserve(types.size());
for (const auto& [name, type] : types) {
slots.emplace_back(name, AddSlot(type, layout_builder));
}
return slots;
}
absl::flat_hash_map<std::string, TypedSlot> AddSlotsMap(
const absl::flat_hash_map<std::string, const QType*>& types,
FrameLayout::Builder* layout_builder) {
absl::flat_hash_map<std::string, TypedSlot> slots;
slots.reserve(types.size());
for (const auto& name_type : types) {
slots.insert({name_type.first, AddSlot(name_type.second, layout_builder)});
}
return slots;
}
absl::Status RegisterUnsafeSlots(absl::Span<const TypedSlot> slots,
FrameLayout::Builder* layout_builder) {
for (const auto& slot : slots) {
RETURN_IF_ERROR(layout_builder->RegisterUnsafeSlot(
slot.byte_offset(), slot.GetType()->type_layout().AllocSize(),
slot.GetType()->type_info()));
}
return absl::OkStatus();
}
absl::Status RegisterUnsafeSlotsMap(
const absl::flat_hash_map<std::string, TypedSlot>& slots,
FrameLayout::Builder* layout_builder) {
for (const auto& name_slot : slots) {
const auto& slot = name_slot.second;
RETURN_IF_ERROR(layout_builder->RegisterUnsafeSlot(
slot.byte_offset(), slot.GetType()->type_layout().AllocSize(),
slot.GetType()->type_info()));
}
return absl::OkStatus();
}
absl::StatusOr<std::vector<std::optional<TypedSlot>>>
MaybeFindSlotsAndVerifyTypes(
absl::Span<const std::pair<std::string, QTypePtr>> types_in_order,
const absl::flat_hash_map<std::string, TypedSlot>& slots) {
std::vector<std::string> type_mismatch;
std::vector<std::optional<TypedSlot>> res_slots;
res_slots.reserve(types_in_order.size());
for (const auto& [name, type] : types_in_order) {
auto it = slots.find(name);
if (it == slots.end()) {
res_slots.push_back(std::nullopt);
continue;
}
res_slots.push_back({it->second});
if (it->second.GetType() != type) {
type_mismatch.push_back(
TypeMismatchError(name, type, it->second.GetType()));
}
}
RETURN_IF_ERROR(SlotTypesError({}, std::move(type_mismatch),
{}));
return {std::move(res_slots)};
}
absl::StatusOr<std::vector<TypedSlot>> FindSlotsAndVerifyTypes(
absl::Span<const std::pair<std::string, QTypePtr>> types_in_order,
const absl::flat_hash_map<std::string, TypedSlot>& slots) {
std::vector<std::string> missed_slots;
std::vector<std::string> type_mismatch;
std::vector<TypedSlot> res_slots;
res_slots.reserve(types_in_order.size());
for (const auto& [name, type] : types_in_order) {
auto it = slots.find(name);
if (it == slots.end()) {
missed_slots.push_back(name);
continue;
}
res_slots.push_back({it->second});
if (it->second.GetType() != type) {
type_mismatch.push_back(
TypeMismatchError(name, type, it->second.GetType()));
}
}
RETURN_IF_ERROR(SlotTypesError(std::move(missed_slots),
std::move(type_mismatch),
{}));
return {std::move(res_slots)};
}
absl::Status VerifySlotTypes(
const absl::flat_hash_map<std::string, QTypePtr>& types,
const absl::flat_hash_map<std::string, TypedSlot>& slots,
bool verify_unwanted_slots, bool verify_missed_slots) {
std::vector<std::string> missed_slots;
std::vector<std::string> type_mismatch;
std::vector<std::string> unwanted_slots;
for (const auto& [name, type] : types) {
auto it = slots.find(name);
if (it == slots.end()) {
if (verify_missed_slots) {
missed_slots.push_back(name);
}
continue;
}
if (it->second.GetType() != type) {
type_mismatch.push_back(
TypeMismatchError(name, type, it->second.GetType()));
}
}
if (verify_unwanted_slots) {
for (const auto& [name, _] : slots) {
if (!types.contains(name)) {
unwanted_slots.push_back(name);
}
}
}
return SlotTypesError(std::move(missed_slots), std::move(type_mismatch),
std::move(unwanted_slots));
}
} | #include "arolla/qtype/typed_slot.h"
#include <cstdint>
#include <optional>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/bytes.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::MatchesRegex;
using ::testing::Pair;
using ::testing::StrEq;
using ::testing::UnorderedElementsAre;
TEST(TypedSlotTest, Copy) {
FrameLayout::Builder layout_builder;
auto slot = layout_builder.AddSlot<int>();
auto slot2 = layout_builder.AddSlot<float>();
auto typed_slot = TypedSlot::FromSlot(slot);
auto typed_slot2 = TypedSlot::FromSlot(slot2);
auto typed_slot_copy = typed_slot;
EXPECT_EQ(typed_slot.GetType(), typed_slot_copy.GetType());
EXPECT_EQ(typed_slot, typed_slot_copy);
typed_slot_copy = typed_slot2;
EXPECT_EQ(typed_slot2.GetType(), typed_slot_copy.GetType());
EXPECT_EQ(typed_slot2, typed_slot_copy);
}
TEST(TypedSlotTest, PrimitiveTypes) {
FrameLayout::Builder layout_builder;
auto slot = layout_builder.AddSlot<int32_t>();
auto typed_slot = TypedSlot::FromSlot(slot);
EXPECT_EQ(typed_slot.GetType(), GetQType<int32_t>());
FrameLayout::Slot<int32_t> new_slot = typed_slot.ToSlot<int32_t>().value();
EXPECT_EQ(slot.byte_offset(), new_slot.byte_offset());
EXPECT_THAT(typed_slot.ToSlot<int64_t>().status(),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(TypedSlotTest, SlotsToTypes) {
FrameLayout::Builder layout_builder;
auto slot1 = layout_builder.AddSlot<int32_t>();
auto slot2 = layout_builder.AddSlot<float>();
auto typed_slot1 = TypedSlot::FromSlot(slot1);
auto typed_slot2 = TypedSlot::FromSlot(slot2);
EXPECT_THAT(SlotsToTypes(std::vector<TypedSlot>{typed_slot1, typed_slot2}),
ElementsAre(GetQType<int32_t>(), GetQType<float>()));
EXPECT_THAT(SlotsToTypes(absl::flat_hash_map<std::string, TypedSlot>{
{"X", typed_slot1}, {"Y", typed_slot2}}),
UnorderedElementsAre(Pair("X", GetQType<int32_t>()),
Pair("Y", GetQType<float>())));
}
TEST(TypedSlotTest, UnsafeFromOffset) {
const QType* i32 = GetQType<int32_t>();
auto typed_slot = TypedSlot::UnsafeFromOffset(i32, 10);
EXPECT_EQ(typed_slot.byte_offset(), 10);
EXPECT_EQ(typed_slot.GetType(), i32);
}
TEST(TypedSlotTest, AddSlots) {
FrameLayout::Builder layout_builder;
const QType* i32 = GetQType<int32_t>();
const QType* i64 = GetQType<int64_t>();
std::vector<TypedSlot> slots = AddSlots({i32, i64}, &layout_builder);
ASSERT_EQ(slots.size(), 2);
EXPECT_EQ(i32, slots[0].GetType());
EXPECT_EQ(i64, slots[1].GetType());
}
TEST(TypedSlotTest, AddNamedSlots) {
FrameLayout::Builder layout_builder;
const QType* i32 = GetQType<int32_t>();
const QType* i64 = GetQType<int64_t>();
std::vector<std::pair<std::string, TypedSlot>> slots =
AddNamedSlots({{"c", i32}, {"b", i64}}, &layout_builder);
ASSERT_EQ(slots.size(), 2);
EXPECT_EQ("c", slots[0].first);
EXPECT_EQ(i32, slots[0].second.GetType());
EXPECT_EQ("b", slots[1].first);
EXPECT_EQ(i64, slots[1].second.GetType());
}
TEST(TypedSlotTest, AddSlotsMap) {
FrameLayout::Builder layout_builder;
const QType* i32 = GetQType<int32_t>();
const QType* i64 = GetQType<int64_t>();
absl::flat_hash_map<std::string, TypedSlot> slots =
AddSlotsMap({{"a", i32}, {"b", i64}}, &layout_builder);
ASSERT_EQ(slots.size(), 2);
EXPECT_EQ(i32, slots.at("a").GetType());
EXPECT_EQ(i64, slots.at("b").GetType());
}
TEST(TypedSlotTest, RegisterUnsafeSlots) {
FrameLayout::Builder layout_builder;
layout_builder.AddSlot<int64_t>();
const QType* i32 = GetQType<int32_t>();
const QType* f32 = GetQType<float>();
auto slot_i32 = TypedSlot::UnsafeFromOffset(i32, 0);
auto slot_f32 = TypedSlot::UnsafeFromOffset(f32, 4);
ASSERT_OK(RegisterUnsafeSlots({slot_i32, slot_f32}, &layout_builder));
#ifndef NDEBUG
ASSERT_FALSE(RegisterUnsafeSlots({slot_i32}, &layout_builder).ok());
#endif
auto layout = std::move(layout_builder).Build();
layout.HasField(0, typeid(int32_t));
layout.HasField(4, typeid(float));
}
TEST(TypedSlotTest, RegisterUnsafeSlotsMap) {
FrameLayout::Builder layout_builder;
layout_builder.AddSlot<int64_t>();
const QType* i32 = GetQType<int32_t>();
const QType* f32 = GetQType<float>();
auto slot_i32 = TypedSlot::UnsafeFromOffset(i32, 0);
auto slot_f32 = TypedSlot::UnsafeFromOffset(f32, 4);
ASSERT_OK(RegisterUnsafeSlotsMap({{"a", slot_i32}, {"b", slot_f32}},
&layout_builder));
#ifndef NDEBUG
ASSERT_FALSE(RegisterUnsafeSlotsMap({{"a", slot_i32}}, &layout_builder).ok());
#endif
auto layout = std::move(layout_builder).Build();
layout.HasField(0, typeid(int32_t));
layout.HasField(4, typeid(float));
}
TEST(TypedSlotTest, GetSubslots) {
FrameLayout::Builder layout_builder;
auto opt_float_slot = layout_builder.AddSlot<OptionalValue<float>>();
auto opt_int32_slot = layout_builder.AddSlot<OptionalValue<int32_t>>();
auto float64_slot = layout_builder.AddSlot<double>();
FrameLayout layout = std::move(layout_builder).Build();
TypedSlot opt_float_tslot = TypedSlot::FromSlot(opt_float_slot);
TypedSlot opt_int32_tslot = TypedSlot::FromSlot(opt_int32_slot);
TypedSlot float64_tslot = TypedSlot::FromSlot(float64_slot);
EXPECT_EQ(opt_float_tslot.SubSlotCount(), 2);
EXPECT_EQ(opt_int32_tslot.SubSlotCount(), 2);
EXPECT_EQ(float64_tslot.SubSlotCount(), 0);
EXPECT_EQ(opt_float_tslot.SubSlot(0),
TypedSlot::FromSlot(opt_float_slot.GetSubslot<0>()));
EXPECT_EQ(opt_float_tslot.SubSlot(1),
TypedSlot::FromSlot(opt_float_slot.GetSubslot<1>()));
EXPECT_EQ(opt_int32_tslot.SubSlot(0),
TypedSlot::FromSlot(opt_int32_slot.GetSubslot<0>()));
EXPECT_EQ(opt_int32_tslot.SubSlot(1),
TypedSlot::FromSlot(opt_int32_slot.GetSubslot<1>()));
MemoryAllocation alloc_holder(&layout);
FramePtr frame = alloc_holder.frame();
frame.Set(opt_float_slot, OptionalValue<float>(1.0));
frame.Set(opt_int32_slot, OptionalValue<int32_t>());
auto float_present_slot = opt_float_tslot.SubSlot(0).ToSlot<bool>().value();
auto int32_present_slot = opt_int32_tslot.SubSlot(0).ToSlot<bool>().value();
EXPECT_EQ(frame.Get(float_present_slot), true);
EXPECT_EQ(frame.Get(int32_present_slot), false);
auto int32_value_slot = opt_int32_tslot.SubSlot(1).ToSlot<int32_t>().value();
frame.Set(int32_present_slot, true);
frame.Set(int32_value_slot, 2);
EXPECT_EQ(frame.Get(opt_int32_slot), OptionalValue<int32_t>(2));
}
TEST(TypedSlotTest, DebugPrintTypedSlot) {
FrameLayout::Builder layout_builder;
auto slot1 = layout_builder.AddSlot<int32_t>();
auto slot2 = layout_builder.AddSlot<float>();
auto slot3 = layout_builder.AddSlot<Bytes>();
auto typed_slot1 = TypedSlot::FromSlot(slot1);
auto typed_slot2 = TypedSlot::FromSlot(slot2);
auto typed_slot3 = TypedSlot::FromSlot(slot3);
std::stringstream buffer;
buffer << "typed_slot1 is: " << typed_slot1 << ", ";
buffer << "typed_slot2 is: " << typed_slot2 << ", ";
buffer << "typed_slot3 is: " << typed_slot3 << ".";
EXPECT_THAT(buffer.str(), StrEq("typed_slot1 is: TypedSlot<INT32>@0, "
"typed_slot2 is: TypedSlot<FLOAT32>@4, "
"typed_slot3 is: TypedSlot<BYTES>@8."));
}
TEST(TypedSlotTest, ToSlots) {
FrameLayout::Builder layout_builder;
auto slot1 = layout_builder.AddSlot<int32_t>();
auto slot2 = layout_builder.AddSlot<float>();
ASSERT_OK_AND_ASSIGN(
auto slots_tuple,
(TypedSlot::ToSlots<int32_t, float>(
{TypedSlot::FromSlot(slot1), TypedSlot::FromSlot(slot2)})));
EXPECT_THAT(std::get<0>(slots_tuple).byte_offset(), Eq(slot1.byte_offset()));
EXPECT_THAT(std::get<1>(slots_tuple).byte_offset(), Eq(slot2.byte_offset()));
EXPECT_THAT(TypedSlot::ToSlots<float>({TypedSlot::FromSlot(slot1)}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("slot type does not match C++ type")));
EXPECT_THAT(
(TypedSlot::ToSlots<int32_t, float>({TypedSlot::FromSlot(slot1)})),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("wrong number of slots: expected 2, got 1")));
}
TEST(TypedSlotTest, MaybeFindSlotsAndVerifyTypesErrors) {
FrameLayout::Builder layout_builder;
auto float_slot = layout_builder.AddSlot<float>();
EXPECT_THAT(
MaybeFindSlotsAndVerifyTypes({{"a", GetQType<int>()}},
{{"a", TypedSlot::FromSlot(float_slot)}}),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*slot types "
"mismatch.*a.*expected:INT32.*actual:FLOAT32.*")));
}
TEST(TypedSlotTest, MaybeFindSlotsAndVerifyTypes) {
FrameLayout::Builder layout_builder;
auto int_slot = layout_builder.AddSlot<int>();
auto float_slot = layout_builder.AddSlot<float>();
EXPECT_THAT(
MaybeFindSlotsAndVerifyTypes(
{{"a", GetQType<int>()}, {"c", GetQType<float>()}},
{{"b", TypedSlot::FromSlot(float_slot)},
{"a", TypedSlot::FromSlot(int_slot)}}),
IsOkAndHolds(ElementsAre(TypedSlot::FromSlot(int_slot), std::nullopt)));
}
TEST(TypedSlotTest, FindSlotsAndVerifyTypesErrors) {
FrameLayout::Builder layout_builder;
auto float_slot = layout_builder.AddSlot<float>();
EXPECT_THAT(
FindSlotsAndVerifyTypes({{"NAME", GetQType<int>()}},
{{"NAME", TypedSlot::FromSlot(float_slot)}}),
StatusIs(
absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*slot types "
"mismatch.*NAME.*expected:INT32.*actual:FLOAT32.*")));
EXPECT_THAT(FindSlotsAndVerifyTypes({{"FAKE", GetQType<int>()}},
{{"b", TypedSlot::FromSlot(float_slot)}}),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*missed slots:.*FAKE.*")));
EXPECT_THAT(
FindSlotsAndVerifyTypes(
{{"NAME", GetQType<int>()}, {"FAKE", GetQType<int>()}},
{{"NAME", TypedSlot::FromSlot(float_slot)}}),
StatusIs(
absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*missed slots:.*FAKE.*slot types mismatch:.*NAME.*")));
}
TEST(TypedSlotTest, FindSlotsAndVerifyTypes) {
FrameLayout::Builder layout_builder;
auto int_slot = layout_builder.AddSlot<int>();
auto float_slot = layout_builder.AddSlot<float>();
auto int8_slot = layout_builder.AddSlot<int32_t>();
EXPECT_THAT(FindSlotsAndVerifyTypes(
{{"c", GetQType<float>()}, {"a", GetQType<int>()}},
{{"c", TypedSlot::FromSlot(float_slot)},
{"b", TypedSlot::FromSlot(int8_slot)},
{"a", TypedSlot::FromSlot(int_slot)}}),
IsOkAndHolds(ElementsAre(TypedSlot::FromSlot(float_slot),
TypedSlot::FromSlot(int_slot))));
}
TEST(TypedSlotTest, VerifySlotTypes) {
FrameLayout::Builder layout_builder;
auto int_slot = layout_builder.AddSlot<int>();
auto float_slot = layout_builder.AddSlot<float>();
EXPECT_OK(VerifySlotTypes({{"a", GetQType<int>()}, {"c", GetQType<float>()}},
{{"c", TypedSlot::FromSlot(float_slot)},
{"a", TypedSlot::FromSlot(int_slot)}}));
EXPECT_OK(VerifySlotTypes({{"a", GetQType<int>()}, {"c", GetQType<float>()}},
{{"c", TypedSlot::FromSlot(float_slot)}},
true,
false));
EXPECT_OK(VerifySlotTypes({{"a", GetQType<int>()}},
{{"c", TypedSlot::FromSlot(float_slot)},
{"a", TypedSlot::FromSlot(int_slot)}},
false));
EXPECT_THAT(
VerifySlotTypes({{"a", GetQType<int>()}},
{{"a", TypedSlot::FromSlot(float_slot)}}),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*slot types "
"mismatch.*a.*expected:INT32.*actual:FLOAT32.*")));
EXPECT_THAT(
VerifySlotTypes({{"a", GetQType<int>()}, {"c", GetQType<float>()}},
{{"a", TypedSlot::FromSlot(float_slot)}}),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*missed slots:.*c.*slot types mismatch:.*a.*")));
EXPECT_THAT(
VerifySlotTypes({{"d", GetQType<int>()}},
{{"a", TypedSlot::FromSlot(float_slot)}}),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*missed slots:.*d.*unwanted slots:.*a.*")));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/typed_slot.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/typed_slot_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
df710905-4c94-4794-b5f8-5d19921229f4 | cpp | google/arolla | unspecified_qtype | arolla/qtype/unspecified_qtype.cc | arolla/qtype/unspecified_qtype_test.cc | #include "arolla/qtype/unspecified_qtype.h"
#include "absl/base/no_destructor.h"
#include "absl/strings/string_view.h"
#include "arolla/memory/frame.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/repr.h"
namespace arolla {
namespace {
struct Unspecified {};
class UnspecifiedQType final : public QType {
public:
UnspecifiedQType()
: QType(ConstructorArgs{.name = "UNSPECIFIED",
.type_info = typeid(Unspecified),
.type_layout = MakeTypeLayout<Unspecified>()}) {}
ReprToken UnsafeReprToken(const void* source) const override {
return ReprToken{"unspecified"};
}
void UnsafeCopy(const void* ,
void* ) const override {}
void UnsafeCombineToFingerprintHasher(
const void* , FingerprintHasher* hasher) const override {
hasher->Combine(absl::string_view("::arolla::UnspecifiedQValue"));
}
};
}
QTypePtr GetUnspecifiedQType() {
static const absl::NoDestructor<UnspecifiedQType> result;
return result.get();
}
const TypedValue& GetUnspecifiedQValue() {
static const absl::NoDestructor<TypedValue> result(
TypedValue::UnsafeFromTypeDefaultConstructed(GetUnspecifiedQType()));
return *result;
}
} | #include "arolla/qtype/unspecified_qtype.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/testing/repr_token_eq.h"
namespace arolla {
namespace {
using ::arolla::testing::ReprTokenEq;
TEST(UnspecifiedQTypeTest, UnspecifiedQType) {
const auto unspecified_qtype = GetUnspecifiedQType();
EXPECT_EQ(unspecified_qtype->name(), "UNSPECIFIED");
EXPECT_EQ(unspecified_qtype->type_layout().AllocSize(), 1);
EXPECT_EQ(unspecified_qtype->type_layout().AllocAlignment().value, 1);
EXPECT_TRUE(unspecified_qtype->type_fields().empty());
EXPECT_EQ(unspecified_qtype->value_qtype(), nullptr);
}
TEST(UnspecifiedQTypeTest, UnspecifiedQValue) {
const auto unspecified_qvalue = GetUnspecifiedQValue();
EXPECT_EQ(unspecified_qvalue.GetType(), GetUnspecifiedQType());
EXPECT_THAT(unspecified_qvalue.GenReprToken(), ReprTokenEq("unspecified"));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/unspecified_qtype.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/unspecified_qtype_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
4ee02b11-1f7f-48c0-be99-6a067fa5e82a | cpp | google/arolla | tuple_qtype | arolla/qtype/tuple_qtype.cc | arolla/qtype/tuple_qtype_test.cc | #include "arolla/qtype/tuple_qtype.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/no_destructor.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "arolla/memory/frame.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/derived_qtype.h"
#include "arolla/qtype/named_field_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/fast_dynamic_downcast_final.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/repr.h"
#include "arolla/util/string.h"
#include "arolla/util/unit.h"
namespace arolla {
namespace {
class Tuple {};
class TupleQType final : public QType {
public:
static std::unique_ptr<TupleQType> Make(
absl::Span<const QTypePtr> field_qtypes) {
FrameLayout::Builder layout_builder;
std::vector<TypedSlot> fields;
fields.reserve(field_qtypes.size());
for (auto field_qtype : field_qtypes) {
fields.push_back(AddSlot(field_qtype, &layout_builder));
}
bool needTupleTag = true;
for (const auto& field : fields) {
if (field.byte_offset() == 0 &&
field.GetType()->type_layout().HasField(0, typeid(Tuple))) {
needTupleTag = false;
break;
}
}
if (needTupleTag) {
auto status = layout_builder.RegisterUnsafeSlot(0, 0, typeid(Tuple));
if (!status.ok()) {
LOG(FATAL) << status;
}
}
return std::make_unique<TupleQType>(
field_qtypes, std::move(layout_builder).Build(), std::move(fields));
}
TupleQType(absl::Span<const QTypePtr> field_qtypes, FrameLayout&& layout,
std::vector<TypedSlot>&& fields)
: QType(ConstructorArgs{
.name = absl::StrCat("tuple<", JoinTypeNames(field_qtypes), ">"),
.type_info = typeid(Tuple),
.type_layout = std::move(layout),
.type_fields = std::move(fields),
.qtype_specialization_key = "::arolla::TupleQType",
}),
field_qtypes_(field_qtypes.begin(), field_qtypes.end()) {}
absl::Span<const QTypePtr> field_qtypes() const { return field_qtypes_; }
void UnsafeCopy(const void* source, void* destination) const override {
ConstFramePtr source_frame(source, &type_layout());
FramePtr destination_frame(destination, &type_layout());
for (const auto& field : type_fields()) {
field.CopyTo(source_frame, field, destination_frame);
}
}
void UnsafeCombineToFingerprintHasher(
const void* source, FingerprintHasher* hasher) const override {
hasher->Combine(type_fields().size());
for (const auto& field : type_fields()) {
field.GetType()->UnsafeCombineToFingerprintHasher(
static_cast<const char*>(source) + field.byte_offset(), hasher);
}
}
ReprToken UnsafeReprToken(const void* source) const override {
ConstFramePtr frame_ptr(source, &type_layout());
std::ostringstream result;
result << "(";
bool first = true;
for (const auto& field : type_fields()) {
result << NonFirstComma(first)
<< TypedRef::FromSlot(field, frame_ptr).Repr();
}
result << ")";
return ReprToken{std::move(result).str()};
}
private:
std::vector<QTypePtr> field_qtypes_;
};
class TupleQTypeRegistry {
public:
static TupleQTypeRegistry* instance() {
static absl::NoDestructor<TupleQTypeRegistry> result;
return result.get();
}
QTypePtr GetQType(absl::Span<const QTypePtr> field_qtypes)
ABSL_LOCKS_EXCLUDED(lock_) {
{
absl::ReaderMutexLock guard(&lock_);
if (const auto it = registry_.find(field_qtypes); it != registry_.end()) {
return it->second.get();
}
}
auto tuple_qtype = TupleQType::Make(field_qtypes);
absl::MutexLock guard(&lock_);
return registry_
.try_emplace(tuple_qtype->field_qtypes(), std::move(tuple_qtype))
.first->second.get();
}
private:
absl::Mutex lock_;
absl::flat_hash_map<absl::Span<const QTypePtr>, std::unique_ptr<TupleQType>>
registry_ ABSL_GUARDED_BY(lock_);
};
template <typename T >
TypedValue MakeTupleImpl(absl::Span<const T> fields) {
std::vector<QTypePtr> field_types;
field_types.reserve(fields.size());
for (const auto& field : fields) {
field_types.push_back(field.GetType());
}
auto status_or_result =
TypedValue::FromFields(MakeTupleQType(field_types), fields);
DCHECK_OK(status_or_result.status());
return status_or_result.value_or(TypedValue::FromValue(Unit{}));
}
template <typename T >
absl::StatusOr<TypedValue> MakeNamedTupleImpl(
absl::Span<const std::string> field_names, absl::Span<const T> fields) {
std::vector<QTypePtr> field_qtypes;
field_qtypes.reserve(fields.size());
for (const auto& field : fields) {
field_qtypes.push_back(field.GetType());
}
ASSIGN_OR_RETURN(
auto named_tuple_qtype,
MakeNamedTupleQType(field_names, MakeTupleQType(field_qtypes)));
absl::StatusOr<TypedValue> result =
TypedValue::FromFields(named_tuple_qtype, fields);
DCHECK_OK(result.status());
return std::move(result).value_or(TypedValue::FromValue(Unit{}));
}
std::string NamedTupleQTypeName(absl::Span<const std::string> field_names,
QTypePtr tuple_qtype) {
constexpr size_t kMaxFieldNames = 5;
std::ostringstream o;
o << "namedtuple<";
size_t fields_to_report = std::min(field_names.size(), kMaxFieldNames);
for (size_t i = 0; i != fields_to_report; ++i) {
if (i != 0) {
o << ",";
}
o << field_names[i] << "="
<< tuple_qtype->type_fields()[i].GetType()->name();
}
if (fields_to_report < field_names.size()) {
o << ", [" << field_names.size() - fields_to_report << " fields]";
}
o << ">";
return o.str();
}
class NamedTupleQType final : public BasicDerivedQType,
public NamedFieldQTypeInterface {
public:
NamedTupleQType(absl::Span<const std::string> field_names,
QTypePtr tuple_qtype)
: BasicDerivedQType(ConstructorArgs{
.name = NamedTupleQTypeName(field_names, tuple_qtype),
.base_qtype = tuple_qtype,
.qtype_specialization_key = "::arolla::NamedTupleQType",
}),
field_names_(field_names.begin(), field_names.end()) {
name2index_.reserve(field_names.size());
int64_t id = 0;
for (const std::string& name : field_names_) {
name2index_.emplace(name, id++);
}
}
absl::Span<const std::string> GetFieldNames() const final {
return field_names_;
}
std::optional<int64_t> GetFieldIndexByName(
absl::string_view field_name) const final {
if (auto it = name2index_.find(field_name); it != name2index_.end()) {
return it->second;
}
return std::nullopt;
}
private:
absl::flat_hash_map<absl::string_view, int64_t> name2index_;
std::vector<std::string> field_names_;
};
class NamedTupleQTypeRegistry {
public:
static NamedTupleQTypeRegistry* instance() {
static absl::NoDestructor<NamedTupleQTypeRegistry> result;
return result.get();
}
QTypePtr GetQType(absl::Span<const std::string> field_names,
QTypePtr tuple_qtype) ABSL_LOCKS_EXCLUDED(lock_) {
{
absl::ReaderMutexLock guard(&lock_);
if (const auto it = registry_.find({field_names, tuple_qtype});
it != registry_.end()) {
return it->second.get();
}
}
auto named_tuple_qtype =
std::make_unique<NamedTupleQType>(field_names, tuple_qtype);
absl::MutexLock guard(&lock_);
return registry_
.try_emplace({named_tuple_qtype->GetFieldNames(), tuple_qtype},
std::move(named_tuple_qtype))
.first->second.get();
}
private:
using RegistryKey = std::pair<absl::Span<const std::string>, QTypePtr>;
absl::Mutex lock_;
absl::flat_hash_map<RegistryKey, std::unique_ptr<NamedTupleQType>> registry_
ABSL_GUARDED_BY(lock_);
};
}
bool IsTupleQType(const QType* qtype) {
return fast_dynamic_downcast_final<const TupleQType*>(qtype) != nullptr;
}
QTypePtr MakeTupleQType(absl::Span<const QTypePtr> field_qtypes) {
return TupleQTypeRegistry::instance()->GetQType(field_qtypes);
}
TypedValue MakeTuple(absl::Span<const TypedRef> fields) {
return MakeTupleImpl(fields);
}
TypedValue MakeTuple(absl::Span<const TypedValue> fields) {
return MakeTupleImpl(fields);
}
absl::StatusOr<TypedValue> MakeNamedTuple(
absl::Span<const std::string> field_names,
absl::Span<const TypedRef> fields) {
return MakeNamedTupleImpl(field_names, fields);
}
absl::StatusOr<TypedValue> MakeNamedTuple(
absl::Span<const std::string> field_names,
absl::Span<const TypedValue> fields) {
return MakeNamedTupleImpl(field_names, fields);
}
bool IsNamedTupleQType(const QType* qtype) {
return fast_dynamic_downcast_final<const NamedTupleQType*>(qtype) != nullptr;
}
absl::StatusOr<QTypePtr> MakeNamedTupleQType(
absl::Span<const std::string> field_names, QTypePtr tuple_qtype) {
if (!IsTupleQType(tuple_qtype)) {
return absl::InvalidArgumentError(absl::StrFormat(
"incorrect NamedTupleQType: expected tuple, found %s",
tuple_qtype != nullptr ? tuple_qtype->name() : std::string("nullptr")));
}
if (field_names.size() != tuple_qtype->type_fields().size()) {
return absl::InvalidArgumentError(absl::StrFormat(
"incorrect NamedTupleQType #field_names != #fields: %d vs %d",
field_names.size(), tuple_qtype->type_fields().size()));
}
absl::flat_hash_set<absl::string_view> name_set;
for (const std::string& name : field_names) {
if (!name_set.insert(name).second) {
return absl::InvalidArgumentError(absl::StrFormat(
"incorrect NamedTupleQType: field name %s is duplicated", name));
}
}
return NamedTupleQTypeRegistry::instance()->GetQType(field_names,
tuple_qtype);
}
} | #include "arolla/qtype/tuple_qtype.h"
#include <cstddef>
#include <cstdint>
#include <optional>
#include <string>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/derived_qtype.h"
#include "arolla/qtype/named_field_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/bytes.h"
#include "arolla/util/testing/repr_token_eq.h"
namespace arolla::testing {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::arolla::testing::ReprTokenEq;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::MatchesRegex;
TEST(TupleQType, Empty) {
auto qtype = MakeTupleQType({});
EXPECT_TRUE(IsTupleQType(qtype));
EXPECT_EQ(qtype->name(), "tuple<>");
EXPECT_EQ(qtype->type_layout().AllocSize(), 0);
EXPECT_EQ(qtype->type_layout().AllocAlignment().value, 1);
auto value = MakeTupleFromFields();
EXPECT_EQ(value.GetType(), qtype);
EXPECT_EQ(value.GetFieldCount(), 0);
EXPECT_THAT(value.GenReprToken(), ReprTokenEq("()"));
}
TEST(TupleQType, EmptyRegression) {
auto qtype_0 = MakeTupleQType({});
auto qtype_1 = MakeTupleQType({qtype_0, qtype_0});
EXPECT_TRUE(IsTupleQType(qtype_1));
EXPECT_EQ(qtype_1->name(), "tuple<tuple<>,tuple<>>");
EXPECT_EQ(qtype_1->type_layout().AllocSize(), 0);
EXPECT_EQ(qtype_1->type_layout().AllocAlignment().value, 1);
auto value_0 = MakeTupleFromFields();
auto value_1 = MakeTupleFromFields(value_0, value_0);
EXPECT_EQ(value_1.GetType(), qtype_1);
auto copy_1 = TypedValue(value_1.AsRef());
EXPECT_EQ(value_1.GetFingerprint(), copy_1.GetFingerprint());
EXPECT_THAT(value_1.GenReprToken(), ReprTokenEq("((), ())"));
}
TEST(TupleQType, Trivial) {
auto qtype = MakeTupleQType(
{GetQType<int32_t>(), GetQType<double>(), GetQType<Bytes>()});
EXPECT_TRUE(IsTupleQType(qtype));
EXPECT_EQ(qtype->name(), "tuple<INT32,FLOAT64,BYTES>");
auto value = MakeTupleFromFields(int32_t{34}, double{17}, Bytes("Hello"));
EXPECT_EQ(value.GetType(), qtype);
EXPECT_EQ(value.GetFieldCount(), 3);
EXPECT_THAT(value.GetField(0).As<int32_t>(), IsOkAndHolds(int32_t{34}));
EXPECT_THAT(value.GetField(1).As<double>(), IsOkAndHolds(double{17.}));
ASSERT_OK_AND_ASSIGN(Bytes bytes, value.GetField(2).As<Bytes>());
EXPECT_THAT(bytes, Eq(Bytes("Hello")));
EXPECT_THAT(value.GenReprToken(), ReprTokenEq("(34, float64{17}, b'Hello')"));
}
TEST(TupleQType, CopyTo) {
auto qtype = MakeTupleQType(
{GetQType<int32_t>(), GetQType<double>(), GetQType<Bytes>()});
EXPECT_TRUE(IsTupleQType(qtype));
EXPECT_EQ(qtype->name(), "tuple<INT32,FLOAT64,BYTES>");
auto value = MakeTupleFromFields(int32_t{34}, double{17}, Bytes("Hello"));
EXPECT_THAT(value.GetField(0).As<int32_t>(), IsOkAndHolds(int32_t{34}));
EXPECT_THAT(value.GetField(1).As<double>(), IsOkAndHolds(double{17.}));
auto copy = TypedValue(value.AsRef());
EXPECT_EQ(value.GetFingerprint(), copy.GetFingerprint());
EXPECT_THAT(copy.GenReprToken(), ReprTokenEq("(34, float64{17}, b'Hello')"));
}
TEST(TupleQType, QValueFromFields) {
auto qtype = MakeTupleQType({GetQType<int>(), GetQType<float>()});
{
ASSERT_OK_AND_ASSIGN(auto qvalue, TypedValue::FromFields(
qtype, {TypedRef::FromValue(2),
TypedRef::FromValue(3.14f)}));
EXPECT_THAT(qvalue.GetField(0).As<int>(), IsOkAndHolds(2));
EXPECT_THAT(qvalue.GetField(1).As<float>(), IsOkAndHolds(3.14f));
}
{
ASSERT_OK_AND_ASSIGN(
auto qvalue,
TypedValue::FromFields(
qtype, {TypedValue::FromValue(2), TypedValue::FromValue(3.14f)}));
EXPECT_THAT(qvalue.GetField(0).As<int>(), IsOkAndHolds(2));
EXPECT_THAT(qvalue.GetField(1).As<float>(), IsOkAndHolds(3.14f));
}
{
EXPECT_THAT(TypedValue::FromFields(qtype, {TypedValue::FromValue(2)}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected 2 values, got 1; "
"compound_qtype=tuple<INT32,FLOAT32>")));
}
{
EXPECT_THAT(TypedValue::FromFields(qtype, {TypedValue::FromValue(2),
TypedValue::FromValue(3)}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected fields[1]: FLOAT32, got INT32; "
"compound_qtype=tuple<INT32,FLOAT32>")));
}
}
TEST(NamedTupleQType, Empty) {
auto tuple_qtype = MakeTupleQType({});
ASSERT_OK_AND_ASSIGN(auto qtype, MakeNamedTupleQType({}, tuple_qtype));
EXPECT_TRUE(IsNamedTupleQType(qtype));
EXPECT_THAT(GetFieldNames(qtype), IsEmpty());
}
TEST(NamedTupleQType, Trivial) {
auto tuple_qtype = MakeTupleQType(
{GetQType<int32_t>(), GetQType<double>(), GetQType<Bytes>()});
ASSERT_OK_AND_ASSIGN(auto qtype,
MakeNamedTupleQType({"a", "b", "c"}, tuple_qtype));
EXPECT_TRUE(IsNamedTupleQType(qtype));
EXPECT_EQ(qtype->name(), "namedtuple<a=INT32,b=FLOAT64,c=BYTES>");
EXPECT_EQ(GetFieldIndexByName(nullptr, "a"), std::nullopt);
EXPECT_EQ(GetFieldIndexByName(qtype, "a"), 0);
EXPECT_EQ(GetFieldIndexByName(qtype, "b"), 1);
EXPECT_EQ(GetFieldIndexByName(qtype, "c"), 2);
EXPECT_EQ(GetFieldIndexByName(qtype, "d"), std::nullopt);
EXPECT_THAT(GetFieldNames(qtype), ElementsAre("a", "b", "c"));
EXPECT_EQ(GetFieldQTypeByName(nullptr, "a"), nullptr);
EXPECT_EQ(GetFieldQTypeByName(qtype, "a"), GetQType<int32_t>());
EXPECT_EQ(GetFieldQTypeByName(qtype, "b"), GetQType<double>());
EXPECT_EQ(GetFieldQTypeByName(qtype, "c"), GetQType<Bytes>());
EXPECT_EQ(GetFieldQTypeByName(qtype, "d"), nullptr);
auto derived_qtype_interface =
dynamic_cast<const DerivedQTypeInterface*>(qtype);
ASSERT_NE(derived_qtype_interface, nullptr);
EXPECT_EQ(derived_qtype_interface->GetBaseQType(), tuple_qtype);
{
ASSERT_OK_AND_ASSIGN(auto qtype2,
MakeNamedTupleQType({"a", "b", "c"}, tuple_qtype));
EXPECT_EQ(qtype, qtype2);
EXPECT_THAT(GetFieldNames(qtype2), ElementsAre("a", "b", "c"));
}
{
ASSERT_OK_AND_ASSIGN(auto qtype2,
MakeNamedTupleQType({"c", "b", "a"}, tuple_qtype));
EXPECT_EQ(qtype2->name(), "namedtuple<c=INT32,b=FLOAT64,a=BYTES>");
EXPECT_EQ(GetFieldIndexByName(qtype2, "c"), 0);
EXPECT_NE(qtype, qtype2);
EXPECT_THAT(GetFieldNames(qtype2), ElementsAre("c", "b", "a"));
}
{
auto tuple_qtype2 = MakeTupleQType(
{GetQType<int32_t>(), GetQType<double>(), GetQType<int32_t>()});
ASSERT_OK_AND_ASSIGN(auto qtype2,
MakeNamedTupleQType({"a", "b", "c"}, tuple_qtype2));
EXPECT_EQ(qtype2->name(), "namedtuple<a=INT32,b=FLOAT64,c=INT32>");
EXPECT_NE(qtype, qtype2);
EXPECT_THAT(GetFieldNames(qtype2), ElementsAre("a", "b", "c"));
}
}
TEST(NamedTupleQType, QValueFromFields) {
auto tuple_qtype = MakeTupleQType({GetQType<int>(), GetQType<float>()});
ASSERT_OK_AND_ASSIGN(auto qtype,
MakeNamedTupleQType({"a", "b"}, tuple_qtype));
{
ASSERT_OK_AND_ASSIGN(auto qvalue, TypedValue::FromFields(
qtype, {TypedRef::FromValue(2),
TypedRef::FromValue(3.14f)}));
EXPECT_TRUE(IsNamedTupleQType(qvalue.GetType()));
EXPECT_EQ(qvalue.GetType(), qtype);
EXPECT_THAT(qvalue.GetField(0).As<int>(), IsOkAndHolds(2));
EXPECT_THAT(qvalue.GetField(1).As<float>(), IsOkAndHolds(3.14f));
}
{
ASSERT_OK_AND_ASSIGN(
auto qvalue,
TypedValue::FromFields(
qtype, {TypedValue::FromValue(2), TypedValue::FromValue(3.14f)}));
EXPECT_TRUE(IsNamedTupleQType(qvalue.GetType()));
EXPECT_EQ(qvalue.GetType(), qtype);
EXPECT_THAT(qvalue.GetField(0).As<int>(), IsOkAndHolds(2));
EXPECT_THAT(qvalue.GetField(1).As<float>(), IsOkAndHolds(3.14f));
}
{
EXPECT_THAT(
TypedValue::FromFields(qtype, {TypedValue::FromValue(2)}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected 2 values, got 1; "
"compound_qtype=namedtuple<a=INT32,b=FLOAT32>")));
}
{
EXPECT_THAT(
TypedValue::FromFields(qtype, {TypedValue::FromValue(2),
TypedValue::FromValue(3)}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected fields[1]: FLOAT32, got INT32; "
"compound_qtype=namedtuple<a=INT32,b=FLOAT32>")));
}
}
TEST(NamedTupleQType, BigTuple) {
constexpr size_t kFieldCount = 100;
QTypePtr field_qtype = GetQType<int32_t>();
auto tuple_qtype =
MakeTupleQType(std::vector<QTypePtr>{kFieldCount, field_qtype});
std::vector<std::string> names;
for (size_t i = 0; i != kFieldCount; ++i) {
names.push_back(std::to_string(i));
}
ASSERT_OK_AND_ASSIGN(auto qtype, MakeNamedTupleQType(names, tuple_qtype));
EXPECT_TRUE(IsNamedTupleQType(qtype));
EXPECT_THAT(GetFieldNames(qtype), ElementsAreArray(names));
EXPECT_EQ(qtype->name(),
"namedtuple<0=INT32,1=INT32,2=INT32,3=INT32,4=INT32, [95 fields]>");
}
TEST(NamedTupleQType, Errors) {
EXPECT_THAT(
MakeNamedTupleQType({"a", "b"}, nullptr).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
MatchesRegex(".*NamedTupleQType.*tuple.*found.*nullptr.*")));
EXPECT_THAT(
MakeNamedTupleQType({"a", "b"}, GetQType<int32_t>()).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
MatchesRegex(".*NamedTupleQType.*tuple.*found.*INT32.*")));
auto tuple_qtype = MakeTupleQType(
{GetQType<int32_t>(), GetQType<double>(), GetQType<Bytes>()});
EXPECT_THAT(MakeNamedTupleQType({"a", "b"}, tuple_qtype).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
MatchesRegex(".*NamedTupleQType.*2 vs 3.*")));
EXPECT_THAT(MakeNamedTupleQType({"a", "b", "a"}, tuple_qtype).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
MatchesRegex(".*NamedTupleQType.*a.*duplicate.*")));
EXPECT_THAT(GetFieldNames(nullptr), IsEmpty());
EXPECT_THAT(GetFieldNames(GetQType<int32_t>()), IsEmpty());
}
TEST(NamedTupleQType, GetFieldByNameAs) {
ASSERT_OK_AND_ASSIGN(auto named_tuple, MakeNamedTuple(
{"a", "b"}, {TypedRef::FromValue(2.0f), TypedRef::FromValue(3)}));
EXPECT_THAT(GetFieldByNameAs<float>(named_tuple.AsRef(), "a"),
IsOkAndHolds(2.0f));
EXPECT_THAT(GetFieldByNameAs<float>(named_tuple.AsRef(), "c").status(),
StatusIs(absl::StatusCode::kInvalidArgument,
MatchesRegex(".*no field named \"c\".*")));
EXPECT_THAT(
GetFieldByNameAs<Bytes>(named_tuple.AsRef(), "a").status(),
StatusIs(
absl::StatusCode::kFailedPrecondition,
HasSubstr("type mismatch: expected C++ type `float` (FLOAT32), got "
"`arolla::Bytes`; while accessing field \"a\"")));
}
TEST(NamedTupleQType, MakeNamedTuple) {
ASSERT_OK_AND_ASSIGN(auto named_tuple,
MakeNamedTuple({"a", "b"}, {TypedRef::FromValue(2.0f),
TypedRef::FromValue(3)}));
ASSERT_OK_AND_ASSIGN(
auto named_tuple_qtype,
MakeNamedTupleQType(
{"a", "b"}, MakeTupleQType({GetQType<float>(), GetQType<int>()})));
EXPECT_EQ(named_tuple.GetType(), named_tuple_qtype);
EXPECT_THAT(named_tuple.GenReprToken(),
ReprTokenEq("namedtuple<a=FLOAT32,b=INT32>{(2., 3)}"));
EXPECT_EQ(named_tuple.GetFieldCount(), 2);
}
TEST(NamedTupleQType, MakeEmptyNamedTuple) {
ASSERT_OK_AND_ASSIGN(auto named_tuple,
MakeNamedTuple({}, absl::Span<const TypedRef>{}));
ASSERT_OK_AND_ASSIGN(auto named_tuple_qtype,
MakeNamedTupleQType({}, MakeTupleQType({})));
EXPECT_EQ(named_tuple.GetType(), named_tuple_qtype);
EXPECT_THAT(named_tuple.GenReprToken(), ReprTokenEq("namedtuple<>{()}"));
EXPECT_EQ(named_tuple.GetFieldCount(), 0);
}
TEST(NamedTupleQtype, MakeNamedTuple_SameFromTypedValueAndTypedRef) {
ASSERT_OK_AND_ASSIGN(TypedValue named_tuple_from_values,
MakeNamedTuple({"a", "b"}, {TypedValue::FromValue(2.0f),
TypedValue::FromValue(3)}));
ASSERT_OK_AND_ASSIGN(auto named_tuple_from_refs,
MakeNamedTuple({"a", "b"}, {TypedRef::FromValue(2.0f),
TypedRef::FromValue(3)}));
EXPECT_EQ(named_tuple_from_values.GetFingerprint(),
named_tuple_from_refs.GetFingerprint());
}
TEST(NamedTupleQType, MakeNamedTuple_Error) {
EXPECT_THAT(
MakeNamedTuple({"a"},
{TypedValue::FromValue(2.0f), TypedValue::FromValue(3)}),
StatusIs(
absl::StatusCode::kInvalidArgument,
MatchesRegex(
"incorrect NamedTupleQType #field_names != #fields: 1 vs 2")));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/tuple_qtype.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/tuple_qtype_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
74a88cec-4e14-40d9-854d-624c46d9da7a | cpp | google/arolla | regex | arolla/qtype/strings/regex.cc | arolla/qtype/strings/regex_test.cc | #include "arolla/qtype/strings/regex.h"
#include <memory>
#include <string>
#include "absl/base/nullability.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "arolla/qtype/simple_qtype.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/repr.h"
#include "re2/re2.h"
namespace arolla {
namespace {
class RE2Regex final : public Regex {
public:
explicit RE2Regex(absl::string_view pattern) : re2_(pattern, RE2::Quiet) {}
bool ok() const { return re2_.ok(); }
absl::string_view error() const { return re2_.error(); }
absl::string_view pattern() const final { return re2_.pattern(); }
int NumberOfCapturingGroups() const final {
return re2_.NumberOfCapturingGroups();
}
bool PartialMatch(absl::string_view text) const final {
return re2_.PartialMatch(text, re2_);
}
bool PartialMatch(absl::string_view text, std::string* match) const final {
return RE2::PartialMatch(text, re2_, match);
}
private:
RE2 re2_;
};
}
absl::StatusOr<absl::Nonnull<RegexPtr>> CompileRegex(
absl::string_view pattern) {
auto result = std::make_shared<RE2Regex>(pattern);
if (result->ok()) {
return result;
}
return absl::InvalidArgumentError(absl::StrCat(
"invalid regular expression: `", pattern, "`; ", result->error()));
}
void FingerprintHasherTraits<RegexPtr>::operator()(
FingerprintHasher* hasher, const RegexPtr& value) const {
if (value != nullptr) {
hasher->Combine(value->pattern());
}
}
ReprToken ReprTraits<RegexPtr>::operator()(const RegexPtr& value) const {
if (value == nullptr) {
return {"regex{}"};
}
return {absl::StrCat("regex{`", value->pattern(), "`}")};
}
AROLLA_DEFINE_SIMPLE_QTYPE(REGEX, RegexPtr)
} | #include "arolla/qtype/strings/regex.h"
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/repr.h"
using ::absl_testing::StatusIs;
using ::testing::HasSubstr;
namespace arolla {
namespace {
TEST(Regex, NoCapturingGroups) {
ASSERT_OK_AND_ASSIGN(auto regex, CompileRegex("\\d+ bottles of beer"));
ASSERT_NE(regex, nullptr);
EXPECT_EQ(regex->NumberOfCapturingGroups(), 0);
EXPECT_TRUE(regex->PartialMatch("100 bottles of beer"));
std::string match;
EXPECT_FALSE(regex->PartialMatch("100 bottles of beer", &match));
}
TEST(Regex, OneCapturingGroup) {
ASSERT_OK_AND_ASSIGN(auto regex, CompileRegex("(\\d+) bottles of beer"));
ASSERT_NE(regex, nullptr);
EXPECT_EQ(regex->NumberOfCapturingGroups(), 1);
EXPECT_TRUE(regex->PartialMatch("100 bottles of beer"));
std::string match;
EXPECT_TRUE(regex->PartialMatch("100 bottles of beer", &match));
EXPECT_EQ(match, "100");
}
TEST(Regex, ManyCapturingGroup) {
ASSERT_OK_AND_ASSIGN(auto regex, CompileRegex("(\\d+) (bottles) (of) beer"));
ASSERT_NE(regex, nullptr);
EXPECT_EQ(regex->NumberOfCapturingGroups(), 3);
EXPECT_TRUE(regex->PartialMatch("100 bottles of beer"));
std::string match;
EXPECT_TRUE(regex->PartialMatch("100 bottles of beer", &match));
EXPECT_EQ(match, "100");
}
TEST(Regex, Repr) {
ASSERT_OK_AND_ASSIGN(auto regex1, CompileRegex("abc"));
ASSERT_OK_AND_ASSIGN(auto regex2, CompileRegex("a.c"));
EXPECT_EQ(regex1->pattern(), "abc");
EXPECT_EQ(regex2->pattern(), "a.c");
EXPECT_EQ(Repr(RegexPtr{}), "regex{}");
EXPECT_EQ(Repr(regex1), "regex{`abc`}");
EXPECT_EQ(Repr(regex2), "regex{`a.c`}");
}
TEST(Regex, Fingerprint) {
ASSERT_OK_AND_ASSIGN(auto regex1_1, CompileRegex("abc"));
ASSERT_OK_AND_ASSIGN(auto regex1_2, CompileRegex("abc"));
ASSERT_OK_AND_ASSIGN(auto regex2_1, CompileRegex("a.c"));
ASSERT_OK_AND_ASSIGN(auto regex2_2, CompileRegex("a.c"));
auto fingerprint0_1 = FingerprintHasher("salt").Combine(RegexPtr{}).Finish();
auto fingerprint0_2 = FingerprintHasher("salt").Combine(RegexPtr{}).Finish();
auto fingerprint1_1 = FingerprintHasher("salt").Combine(regex1_1).Finish();
auto fingerprint1_2 = FingerprintHasher("salt").Combine(regex1_2).Finish();
auto fingerprint2_1 = FingerprintHasher("salt").Combine(regex2_1).Finish();
auto fingerprint2_2 = FingerprintHasher("salt").Combine(regex2_2).Finish();
EXPECT_EQ(fingerprint0_1, fingerprint0_2);
EXPECT_EQ(fingerprint1_1, fingerprint1_2);
EXPECT_EQ(fingerprint2_1, fingerprint2_2);
EXPECT_NE(fingerprint0_1, fingerprint1_1);
EXPECT_NE(fingerprint1_1, fingerprint2_1);
EXPECT_NE(fingerprint2_1, fingerprint0_1);
}
TEST(Regex, QType) {
EXPECT_EQ(GetQType<RegexPtr>()->name(), "REGEX");
EXPECT_EQ(GetQType<RegexPtr>()->type_info(), typeid(RegexPtr));
ASSERT_OK_AND_ASSIGN(auto regex, CompileRegex("a.c"));
auto qvalue = TypedValue::FromValue(regex);
EXPECT_EQ(qvalue.Repr(), "regex{`a.c`}");
}
TEST(Regex, CompilationError) {
EXPECT_THAT(CompileRegex("ab\\αcd"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("invalid regular expression: `ab\\αcd`;")));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/strings/regex.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/strings/regex_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
fc5aac15-bf0a-45c4-8f6a-280c6aa505ab | cpp | google/arolla | common_qtype | arolla/qtype/standard_type_properties/common_qtype.cc | arolla/qtype/standard_type_properties/common_qtype_test.cc | #include "arolla/qtype/standard_type_properties/common_qtype.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include "absl/algorithm/container.h"
#include "absl/types/span.h"
#include "arolla/qtype/array_like/array_like_qtype.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/shape_qtype.h"
#include "arolla/qtype/standard_type_properties/properties.h"
namespace arolla {
namespace {
const QType* CommonScalarQType(const QType* lhs_qtype, const QType* rhs_qtype) {
if (lhs_qtype == rhs_qtype) {
return lhs_qtype;
}
if (lhs_qtype == GetWeakFloatQType()) {
lhs_qtype = GetQType<float>();
}
if (rhs_qtype == GetWeakFloatQType()) {
rhs_qtype = GetQType<float>();
}
static const std::array numeric_types = {
GetQType<double>(), GetQType<float>(), GetQType<int64_t>(),
GetQType<int32_t>()};
auto lhs_it = absl::c_find(numeric_types, lhs_qtype);
auto rhs_it = absl::c_find(numeric_types, rhs_qtype);
if (lhs_it != numeric_types.end() && rhs_it != numeric_types.end()) {
return *std::min(lhs_it, rhs_it);
}
return nullptr;
}
const ShapeQType* CommonShapeQType(const ShapeQType* lhs_qtype,
const ShapeQType* rhs_qtype,
bool enable_broadcasting) {
if (lhs_qtype == rhs_qtype) {
return rhs_qtype;
}
if (!enable_broadcasting &&
(IsArrayLikeShapeQType(lhs_qtype) || IsArrayLikeShapeQType(rhs_qtype))) {
return nullptr;
}
if (lhs_qtype == GetQType<ScalarShape>()) {
return rhs_qtype;
}
if (rhs_qtype == GetQType<ScalarShape>()) {
return lhs_qtype;
}
if (lhs_qtype == GetQType<OptionalScalarShape>()) {
return rhs_qtype;
}
if (rhs_qtype == GetQType<OptionalScalarShape>()) {
return lhs_qtype;
}
return nullptr;
}
}
const QType* CommonQType(const QType* lhs_qtype, const QType* rhs_qtype,
bool enable_broadcasting) {
if (lhs_qtype == nullptr || rhs_qtype == nullptr) {
return nullptr;
}
if (lhs_qtype == rhs_qtype) {
return lhs_qtype;
}
const QType* scalar_qtype;
{
auto lhs_scalar_qtype = GetScalarQTypeOrNull(lhs_qtype);
if (lhs_scalar_qtype == nullptr) {
return nullptr;
}
auto rhs_scalar_qtype = GetScalarQTypeOrNull(rhs_qtype);
if (rhs_scalar_qtype == nullptr) {
return nullptr;
}
scalar_qtype = CommonScalarQType(lhs_scalar_qtype, rhs_scalar_qtype);
if (scalar_qtype == nullptr) {
return nullptr;
}
}
const ShapeQType* shape_qtype =
CommonShapeQType(GetShapeQTypeOrNull(lhs_qtype),
GetShapeQTypeOrNull(rhs_qtype), enable_broadcasting);
if (shape_qtype == nullptr) {
return nullptr;
}
return shape_qtype->WithValueQType(scalar_qtype).value_or(nullptr);
}
bool CanCastImplicitly(QTypePtr from_qtype, QTypePtr to_qtype,
bool enable_broadcasting) {
return to_qtype != nullptr &&
CommonQType(from_qtype, to_qtype, enable_broadcasting) == to_qtype;
}
const QType* CommonQType(absl::Span<const QType* const> qtypes,
bool enable_broadcasting) {
if (qtypes.empty()) {
return nullptr;
}
const QType* result = qtypes[0];
for (const QType* qtype : qtypes.subspan(1)) {
result = CommonQType(result, qtype, enable_broadcasting);
}
return result;
}
const QType* BroadcastQType(absl::Span<QType const* const> target_qtypes,
const QType* qtype) {
if (absl::c_any_of(target_qtypes,
[](auto* qtype) { return qtype == nullptr; }) ||
qtype == nullptr) {
return nullptr;
}
const ShapeQType* shape_qtype = GetShapeQTypeOrNull(qtype);
for (const auto* target_qtype : target_qtypes) {
shape_qtype =
CommonShapeQType(shape_qtype, GetShapeQTypeOrNull(target_qtype),
true);
}
if (shape_qtype == nullptr) {
return nullptr;
}
auto* scalar_qtype = GetScalarQTypeOrNull(qtype);
if (scalar_qtype == nullptr) {
return nullptr;
}
return shape_qtype->WithValueQType(scalar_qtype).value_or(nullptr);
}
} | #include "arolla/qtype/standard_type_properties/common_qtype.h"
#include <algorithm>
#include <cstdint>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "arolla/array/qtype/types.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/qtype/weak_qtype.h"
#include "arolla/util/bytes.h"
#include "arolla/util/meta.h"
#include "arolla/util/unit.h"
namespace arolla {
namespace {
using ::testing::IsFalse;
using ::testing::IsNull;
using ::testing::IsTrue;
const QType* ReferenceCommonQType(const QType* arg0, const QType* arg1,
bool enable_broadcasting_) {
if (arg0 == arg1) {
return arg0;
}
const QType* result = nullptr;
const auto gen_result = [&](QTypePtr a0, QTypePtr a1, QTypePtr r) {
if (a0 == arg0 && a1 == arg1) {
result = r;
}
};
const auto gen_results = [&](QTypePtr a0, QTypePtr a1, QTypePtr r) {
ASSERT_OK_AND_ASSIGN(auto a0_optional, ToOptionalQType(a0));
ASSERT_OK_AND_ASSIGN(auto a0_dense_array,
GetDenseArrayQTypeByValueQType(a0));
ASSERT_OK_AND_ASSIGN(auto a0_array, GetArrayQTypeByValueQType(a0));
ASSERT_OK_AND_ASSIGN(auto a1_optional, ToOptionalQType(a1));
ASSERT_OK_AND_ASSIGN(auto a1_dense_array,
GetDenseArrayQTypeByValueQType(a1));
ASSERT_OK_AND_ASSIGN(auto a1_array, GetArrayQTypeByValueQType(a1));
ASSERT_OK_AND_ASSIGN(auto r_optional, ToOptionalQType(r));
ASSERT_OK_AND_ASSIGN(auto r_dense_array, GetDenseArrayQTypeByValueQType(r));
ASSERT_OK_AND_ASSIGN(auto r_array, GetArrayQTypeByValueQType(r));
gen_result(a0, a1, r);
gen_result(a0, a1_optional, r_optional);
gen_result(a0_optional, a1_optional, r_optional);
gen_result(a0_optional, a1, r_optional);
gen_result(a0_dense_array, a1_dense_array, r_dense_array);
gen_result(a0_array, a1_array, r_array);
if (enable_broadcasting_) {
gen_result(a0, a1_dense_array, r_dense_array);
gen_result(a0_optional, a1_dense_array, r_dense_array);
gen_result(a0, a1_array, r_array);
gen_result(a0_optional, a1_array, r_array);
gen_result(a0_dense_array, a1_optional, r_dense_array);
gen_result(a0_dense_array, a1, r_dense_array);
gen_result(a0_array, a1_optional, r_array);
gen_result(a0_array, a1, r_array);
}
};
meta::foreach_type<ScalarTypes>([&](auto meta_type) {
auto x = GetQType<typename decltype(meta_type)::type>();
gen_results(x, x, x);
});
static const auto numeric_qtypes = {
GetQType<int32_t>(),
GetQType<int64_t>(),
GetQType<float>(),
GetQType<double>(),
};
for (auto it = numeric_qtypes.begin();
result == nullptr && it != numeric_qtypes.end(); ++it) {
for (auto jt = numeric_qtypes.begin();
result == nullptr && jt != numeric_qtypes.end(); ++jt) {
gen_results(*it, *jt, *std::max(it, jt));
}
}
gen_results(GetWeakFloatQType(), GetWeakFloatQType(), GetWeakFloatQType());
gen_results(GetWeakFloatQType(), GetQType<int32_t>(), GetQType<float>());
gen_results(GetQType<int32_t>(), GetWeakFloatQType(), GetQType<float>());
gen_results(GetWeakFloatQType(), GetQType<int64_t>(), GetQType<float>());
gen_results(GetQType<int64_t>(), GetWeakFloatQType(), GetQType<float>());
gen_results(GetWeakFloatQType(), GetQType<float>(), GetQType<float>());
gen_results(GetQType<float>(), GetWeakFloatQType(), GetQType<float>());
gen_results(GetWeakFloatQType(), GetQType<double>(), GetQType<double>());
gen_results(GetQType<double>(), GetWeakFloatQType(), GetQType<double>());
return result;
}
class CommonQTypeMultipleParametersTests
: public ::testing::TestWithParam<bool> {
protected:
CommonQTypeMultipleParametersTests() {
meta::foreach_type<ScalarTypes>([&](auto meta_type) {
using T = typename decltype(meta_type)::type;
known_qtypes_.push_back(GetQType<T>());
known_qtypes_.push_back(GetOptionalQType<T>());
known_qtypes_.push_back(GetDenseArrayQType<T>());
known_qtypes_.push_back(GetArrayQType<T>());
});
known_qtypes_.push_back(nullptr);
known_qtypes_.push_back(GetDenseArrayWeakFloatQType());
known_qtypes_.push_back(GetArrayWeakFloatQType());
known_qtypes_.push_back(MakeTupleQType({}));
enable_broadcasting_ = GetParam();
}
std::vector<const QType*> known_qtypes_;
bool enable_broadcasting_;
};
TEST_P(CommonQTypeMultipleParametersTests, VsReferenceImplementation) {
for (auto lhs : known_qtypes_) {
for (auto rhs : known_qtypes_) {
EXPECT_EQ(CommonQType(lhs, rhs, enable_broadcasting_),
ReferenceCommonQType(lhs, rhs, enable_broadcasting_))
<< "lhs=" << (lhs ? lhs->name() : "nullptr")
<< ", rhs=" << (rhs ? rhs->name() : "nullptr");
}
}
}
TEST_P(CommonQTypeMultipleParametersTests, SemiLatticeProperties) {
for (auto arg_0 : known_qtypes_) {
EXPECT_EQ(
CommonQType(arg_0, arg_0, enable_broadcasting_), arg_0);
for (auto arg_1 : known_qtypes_) {
EXPECT_EQ(
CommonQType(arg_0, arg_1, enable_broadcasting_),
CommonQType(arg_1, arg_0, enable_broadcasting_));
for (auto arg_2 : known_qtypes_) {
EXPECT_EQ(
CommonQType(CommonQType(arg_0, arg_1, enable_broadcasting_), arg_2,
enable_broadcasting_),
CommonQType(arg_0, CommonQType(arg_1, arg_2, enable_broadcasting_),
enable_broadcasting_))
<< arg_0->name() << " " << arg_1->name() << " " << arg_2->name();
}
}
}
}
INSTANTIATE_TEST_SUITE_P(CommonQTypeTests, CommonQTypeMultipleParametersTests,
::testing::Values(false, true));
class CommonQTypeTest : public ::testing::Test {
protected:
static void SetUpTestCase() {
meta::foreach_type<ScalarTypes>([&](auto meta_type) {
GetQType<typename decltype(meta_type)::type>();
GetOptionalQType<typename decltype(meta_type)::type>();
GetDenseArrayQType<typename decltype(meta_type)::type>();
});
}
};
TEST_F(CommonQTypeTest, OnSpans) {
EXPECT_THAT(CommonQType({}, true), IsNull());
EXPECT_EQ(CommonQType({GetQType<int64_t>()}, true),
GetQType<int64_t>());
EXPECT_THAT(
CommonQType({nullptr, GetQType<int64_t>()}, true),
IsNull());
EXPECT_THAT(
CommonQType({GetQType<int64_t>(), nullptr}, true),
IsNull());
EXPECT_EQ(CommonQType({GetQType<int64_t>(), GetOptionalQType<int32_t>()},
true),
GetOptionalQType<int64_t>());
EXPECT_EQ(CommonQType({GetQType<int64_t>(), GetOptionalQType<int32_t>(),
GetDenseArrayQType<int32_t>()},
true),
GetDenseArrayQType<int64_t>());
EXPECT_EQ(
CommonQType(GetDenseArrayQType<int32_t>(), GetOptionalQType<int64_t>(),
true),
GetDenseArrayQType<int64_t>());
EXPECT_THAT(CommonQType({GetQType<int64_t>(), GetOptionalQType<int32_t>(),
GetDenseArrayQType<int32_t>()},
false),
IsNull());
}
TEST_F(CommonQTypeTest, WeakQType) {
EXPECT_EQ(CommonQType(GetQType<double>(), GetWeakFloatQType(),
true),
GetQType<double>());
EXPECT_EQ(CommonQType(GetQType<float>(), GetWeakFloatQType(),
true),
GetQType<float>());
EXPECT_EQ(CommonQType(GetWeakFloatQType(), GetWeakFloatQType(),
true),
GetWeakFloatQType());
EXPECT_EQ(CommonQType(GetOptionalWeakFloatQType(), GetWeakFloatQType(),
true),
GetOptionalWeakFloatQType());
EXPECT_EQ(CommonQType(GetOptionalWeakFloatQType(), GetQType<double>(),
true),
GetOptionalQType<double>());
EXPECT_EQ(CommonQType(GetOptionalWeakFloatQType(), GetQType<float>(),
true),
GetOptionalQType<float>());
EXPECT_EQ(CommonQType(GetWeakFloatQType(), GetOptionalQType<double>(),
true),
GetOptionalQType<double>());
EXPECT_EQ(CommonQType(GetWeakFloatQType(), GetOptionalQType<float>(),
true),
GetOptionalQType<float>());
EXPECT_EQ(CommonQType(GetOptionalWeakFloatQType(), GetOptionalQType<double>(),
true),
GetOptionalQType<double>());
EXPECT_EQ(CommonQType(GetOptionalWeakFloatQType(), GetOptionalQType<float>(),
true),
GetOptionalQType<float>());
EXPECT_EQ(CommonQType(GetWeakFloatQType(), GetArrayQType<double>(),
true),
GetArrayQType<double>());
EXPECT_EQ(CommonQType(GetWeakFloatQType(), GetArrayQType<float>(),
true),
GetArrayQType<float>());
EXPECT_EQ(CommonQType(GetOptionalWeakFloatQType(), GetArrayQType<double>(),
true),
GetArrayQType<double>());
EXPECT_EQ(CommonQType(GetOptionalWeakFloatQType(), GetArrayQType<float>(),
true),
GetArrayQType<float>());
}
class CanCastImplicitlyTest : public ::testing::Test {
protected:
static void SetUpTestCase() {
meta::foreach_type<ScalarTypes>([&](auto meta_type) {
GetQType<typename decltype(meta_type)::type>();
GetOptionalQType<typename decltype(meta_type)::type>();
GetDenseArrayQType<typename decltype(meta_type)::type>();
});
}
};
TEST_F(CanCastImplicitlyTest, OnScalars) {
EXPECT_THAT(CanCastImplicitly(GetQType<int32_t>(), GetQType<int64_t>(),
false),
IsTrue());
EXPECT_THAT(CanCastImplicitly(GetQType<float>(), GetQType<double>(),
false),
IsTrue());
EXPECT_THAT(CanCastImplicitly(GetQType<float>(), GetOptionalQType<float>(),
false),
IsTrue());
EXPECT_THAT(CanCastImplicitly(GetQType<float>(), GetOptionalQType<double>(),
false),
IsTrue());
EXPECT_THAT(CanCastImplicitly(GetQType<int64_t>(), GetQType<int32_t>(),
false),
IsFalse());
EXPECT_THAT(CanCastImplicitly(GetQType<int32_t>(), GetQType<float>(),
false),
IsTrue());
EXPECT_THAT(CanCastImplicitly(GetQType<int32_t>(), GetQType<uint64_t>(),
false),
IsFalse());
EXPECT_THAT(CanCastImplicitly(GetQType<int32_t>(), nullptr,
false),
IsFalse());
EXPECT_THAT(CanCastImplicitly(nullptr, GetQType<int32_t>(),
false),
IsFalse());
EXPECT_THAT(CanCastImplicitly(nullptr, nullptr,
false),
IsFalse());
}
TEST_F(CanCastImplicitlyTest, WithBroadcasting) {
EXPECT_THAT(
CanCastImplicitly(GetQType<int32_t>(), GetDenseArrayQType<int32_t>(),
false),
IsFalse());
EXPECT_THAT(CanCastImplicitly(GetOptionalQType<int32_t>(),
GetDenseArrayQType<int32_t>(),
false),
IsFalse());
EXPECT_THAT(
CanCastImplicitly(GetQType<int32_t>(), GetDenseArrayQType<int32_t>(),
true),
IsTrue());
EXPECT_THAT(CanCastImplicitly(GetOptionalQType<int32_t>(),
GetDenseArrayQType<int32_t>(),
true),
IsTrue());
}
TEST_F(CanCastImplicitlyTest, WeakQType) {
EXPECT_THAT(CanCastImplicitly(GetQType<float>(), GetWeakFloatQType(), false),
IsFalse());
EXPECT_THAT(CanCastImplicitly(GetQType<double>(), GetWeakFloatQType(), false),
IsFalse());
EXPECT_THAT(
CanCastImplicitly(GetQType<int32_t>(), GetWeakFloatQType(), false),
IsFalse());
EXPECT_THAT(CanCastImplicitly(GetWeakFloatQType(), GetQType<float>(), false),
IsTrue());
EXPECT_THAT(CanCastImplicitly(GetWeakFloatQType(), GetQType<double>(), false),
IsTrue());
EXPECT_THAT(
CanCastImplicitly(GetWeakFloatQType(), GetQType<int32_t>(), false),
IsFalse());
EXPECT_THAT(
CanCastImplicitly(GetWeakFloatQType(), GetOptionalQType<float>(), false),
IsTrue());
EXPECT_THAT(CanCastImplicitly(GetOptionalWeakFloatQType(),
GetOptionalQType<double>(), false),
IsTrue());
EXPECT_THAT(CanCastImplicitly(GetWeakFloatQType(), GetArrayWeakFloatQType(),
false),
IsFalse());
EXPECT_THAT(CanCastImplicitly(GetWeakFloatQType(), GetArrayWeakFloatQType(),
true),
IsTrue());
EXPECT_THAT(
CanCastImplicitly(GetOptionalWeakFloatQType(), GetArrayWeakFloatQType(),
true),
IsTrue());
EXPECT_THAT(
CanCastImplicitly(GetWeakFloatQType(), GetDenseArrayQType<float>(),
true),
IsTrue());
EXPECT_THAT(
CanCastImplicitly(GetWeakFloatQType(), GetDenseArrayQType<double>(),
true),
IsTrue());
}
class BroadcastQTypeTests : public ::testing::Test {
protected:
static void SetUpTestCase() {
meta::foreach_type<ScalarTypes>([&](auto meta_type) {
using T = typename decltype(meta_type)::type;
GetQType<T>();
GetOptionalQType<T>();
GetDenseArrayQType<T>();
GetArrayQType<T>();
});
GetDenseArrayWeakFloatQType();
GetArrayWeakFloatQType();
}
};
TEST_F(BroadcastQTypeTests, Empty) {
ASSERT_THAT(BroadcastQType({}, nullptr), IsNull());
}
TEST_F(BroadcastQTypeTests, SingleScalarType) {
ASSERT_EQ(BroadcastQType({}, GetQType<int32_t>()), GetQType<int32_t>());
}
TEST_F(BroadcastQTypeTests, NullHandling) {
ASSERT_THAT(BroadcastQType({nullptr}, GetQType<int32_t>()), IsNull());
ASSERT_THAT(BroadcastQType({GetQType<int32_t>()}, nullptr), IsNull());
ASSERT_THAT(
BroadcastQType({GetQType<int32_t>(), nullptr}, GetQType<int32_t>()),
IsNull());
}
TEST_F(BroadcastQTypeTests, ScalarAndOptional) {
ASSERT_EQ(BroadcastQType({GetOptionalQType<int32_t>()}, GetQType<int64_t>()),
GetOptionalQType<int64_t>());
ASSERT_EQ(BroadcastQType({GetQType<int64_t>()}, GetOptionalQType<int32_t>()),
GetOptionalQType<int32_t>());
}
TEST_F(BroadcastQTypeTests, ArrayAndDenseArray) {
EXPECT_THAT(
BroadcastQType({GetArrayQType<float>()}, GetDenseArrayQType<float>()),
IsNull());
EXPECT_THAT(
BroadcastQType({GetArrayQType<float>(), GetDenseArrayQType<float>()},
GetQType<float>()),
IsNull());
}
TEST_F(BroadcastQTypeTests, Basic) {
ASSERT_EQ(
BroadcastQType({GetOptionalQType<float>(), GetDenseArrayQType<Bytes>()},
GetQType<int32_t>()),
GetDenseArrayQType<int32_t>());
}
TEST_F(BroadcastQTypeTests, WeakFloat) {
ASSERT_EQ(BroadcastQType({GetDenseArrayQType<Unit>()}, GetWeakFloatQType()),
GetDenseArrayWeakFloatQType());
ASSERT_EQ(
BroadcastQType({GetDenseArrayQType<Unit>()}, GetOptionalWeakFloatQType()),
GetDenseArrayWeakFloatQType());
ASSERT_EQ(BroadcastQType({GetArrayQType<Unit>()}, GetWeakFloatQType()),
GetArrayWeakFloatQType());
ASSERT_EQ(
BroadcastQType({GetArrayQType<Unit>()}, GetOptionalWeakFloatQType()),
GetArrayWeakFloatQType());
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/standard_type_properties/common_qtype.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/standard_type_properties/common_qtype_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
135e2997-7d12-4167-bdc6-65a292190b26 | cpp | google/arolla | properties | arolla/qtype/standard_type_properties/properties.cc | arolla/qtype/standard_type_properties/properties_test.cc | #include "arolla/qtype/standard_type_properties/properties.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "arolla/qtype/array_like/array_like_qtype.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/shape_qtype.h"
namespace arolla {
const QType* GetScalarQTypeOrNull(
const QType* qtype) {
if (qtype != nullptr) {
if (auto* value_qtype = qtype->value_qtype()) {
return value_qtype;
}
if (IsScalarQType(qtype)) {
return qtype;
}
}
return nullptr;
}
absl::StatusOr<QTypePtr> GetScalarQType(QTypePtr qtype) {
DCHECK(qtype);
if (auto* result = GetScalarQTypeOrNull(qtype)) {
return result;
}
return absl::InvalidArgumentError(absl::StrFormat(
"there is no corresponding scalar type for %s", qtype->name()));
}
const ShapeQType* GetShapeQTypeOrNull(
const QType* qtype) {
if (qtype != nullptr) {
if (qtype->value_qtype() == nullptr) {
if (IsScalarQType(qtype)) {
return static_cast<const ShapeQType*>(GetQType<ScalarShape>());
}
} else {
if (IsOptionalQType(qtype)) {
return static_cast<const ShapeQType*>(GetQType<OptionalScalarShape>());
}
if (auto* array_qtype = dynamic_cast<const ArrayLikeQType*>(qtype)) {
return array_qtype->shape_qtype();
}
}
}
return nullptr;
}
absl::StatusOr<const ShapeQType*> GetShapeQType(QTypePtr qtype) {
DCHECK(qtype);
if (auto* result = GetShapeQTypeOrNull(qtype)) {
return result;
}
return absl::InvalidArgumentError(
absl::StrFormat("no shape type for %s", qtype->name()));
}
QTypePtr DecayContainerQType(QTypePtr qtype) {
DCHECK(qtype);
auto* value_qtype = qtype->value_qtype();
if (value_qtype != nullptr) {
return value_qtype;
}
return qtype;
}
absl::StatusOr<QTypePtr> WithScalarQType(QTypePtr qtype,
QTypePtr new_scalar_qtype) {
DCHECK(qtype);
DCHECK(new_scalar_qtype);
if (!IsScalarQType(new_scalar_qtype)) {
return absl::InvalidArgumentError(absl::StrFormat(
"unable to replace scalar type in %s with a non-scalar type %s",
qtype->name(), new_scalar_qtype->name()));
}
if (auto shape_qtype = GetShapeQType(qtype); shape_qtype.ok()) {
return (**shape_qtype).WithValueQType(new_scalar_qtype);
}
return absl::InvalidArgumentError(
absl::StrFormat("unable to replace scalar type in %s", qtype->name()));
}
absl::StatusOr<QTypePtr> GetPresenceQType(QTypePtr qtype) {
DCHECK(qtype);
if (auto shape_qtype = GetShapeQType(qtype); shape_qtype.ok()) {
return (**shape_qtype).presence_qtype();
}
return absl::InvalidArgumentError(
absl::StrFormat("no type to represent presence in %s", qtype->name()));
}
bool IsOptionalLikeQType(const QType* qtype) {
return qtype != nullptr && qtype->value_qtype() != nullptr &&
(IsOptionalQType(qtype) || IsArrayLikeQType(qtype));
}
absl::StatusOr<QTypePtr> ToOptionalLikeQType(QTypePtr qtype) {
DCHECK(qtype);
if (qtype->value_qtype() == nullptr) {
if (IsScalarQType(qtype)) {
return ToOptionalQType(qtype);
}
} else if (IsOptionalLikeQType(qtype)) {
return qtype;
}
return absl::InvalidArgumentError(
absl::StrFormat("no optional-like qtype for %s", qtype->name()));
}
} | #include "arolla/qtype/standard_type_properties/properties.h"
#include <cstdint>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/array/array.h"
#include "arolla/array/qtype/types.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/shape_qtype.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/util/unit.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::MatchesRegex;
TEST(TypeProperties, GetScalarQType) {
EXPECT_THAT(GetScalarQType(GetQType<int64_t>()),
IsOkAndHolds(GetQType<int64_t>()));
EXPECT_THAT(GetScalarQType(GetOptionalQType<int64_t>()),
IsOkAndHolds(GetQType<int64_t>()));
EXPECT_THAT(GetScalarQType(GetDenseArrayQType<int64_t>()),
IsOkAndHolds(GetQType<int64_t>()));
EXPECT_THAT(GetScalarQType(GetDenseArrayWeakFloatQType()),
IsOkAndHolds(GetWeakFloatQType()));
EXPECT_THAT(GetScalarQType(GetArrayWeakFloatQType()),
IsOkAndHolds(GetWeakFloatQType()));
EXPECT_THAT(
GetScalarQType(MakeTupleQType({GetQType<int64_t>()})),
StatusIs(absl::StatusCode::kInvalidArgument,
MatchesRegex("there is no corresponding scalar type for .*")));
}
TEST(TypeProperties, GetShapeQType) {
EXPECT_THAT(GetShapeQType(GetQType<int64_t>()),
IsOkAndHolds(GetQType<ScalarShape>()));
EXPECT_THAT(GetShapeQType(GetOptionalQType<int64_t>()),
IsOkAndHolds(GetQType<OptionalScalarShape>()));
EXPECT_THAT(GetShapeQType(GetDenseArrayQType<int64_t>()),
IsOkAndHolds(GetQType<DenseArrayShape>()));
EXPECT_THAT(GetShapeQType(GetDenseArrayWeakFloatQType()),
IsOkAndHolds(GetQType<DenseArrayShape>()));
EXPECT_THAT(GetShapeQType(GetArrayWeakFloatQType()),
IsOkAndHolds(GetQType<ArrayShape>()));
EXPECT_THAT(GetShapeQType(MakeTupleQType({GetQType<int64_t>()})),
StatusIs(absl::StatusCode::kInvalidArgument,
MatchesRegex("no shape type for .*")));
}
TEST(TypeProperties, WithScalarQType) {
EXPECT_THAT(WithScalarQType(GetQType<int64_t>(), GetQType<float>()),
IsOkAndHolds(GetQType<float>()));
EXPECT_THAT(WithScalarQType(GetOptionalQType<int64_t>(), GetQType<float>()),
IsOkAndHolds(GetOptionalQType<float>()));
EXPECT_THAT(WithScalarQType(GetDenseArrayQType<int64_t>(), GetQType<float>()),
IsOkAndHolds(GetDenseArrayQType<float>()));
EXPECT_THAT(
WithScalarQType(GetDenseArrayQType<int64_t>(), GetWeakFloatQType()),
IsOkAndHolds(GetDenseArrayWeakFloatQType()));
EXPECT_THAT(WithScalarQType(GetArrayQType<int64_t>(), GetWeakFloatQType()),
IsOkAndHolds(GetArrayWeakFloatQType()));
EXPECT_THAT(WithScalarQType(MakeTupleQType({GetQType<int64_t>()}),
GetOptionalQType<float>()),
StatusIs(absl::StatusCode::kInvalidArgument,
MatchesRegex("unable to replace scalar type in .* with "
"a non-scalar type .*")));
EXPECT_THAT(
WithScalarQType(MakeTupleQType({GetQType<int64_t>()}), GetQType<float>()),
StatusIs(absl::StatusCode::kInvalidArgument,
MatchesRegex("unable to replace scalar type in .*")));
}
TEST(TypeProperties, GetPresenceQType) {
EXPECT_THAT(GetPresenceQType(GetQType<int64_t>()),
IsOkAndHolds(GetQType<Unit>()));
EXPECT_THAT(GetPresenceQType(GetOptionalQType<int64_t>()),
IsOkAndHolds(GetQType<OptionalUnit>()));
EXPECT_THAT(GetPresenceQType(GetDenseArrayQType<int64_t>()),
IsOkAndHolds(GetDenseArrayQType<Unit>()));
EXPECT_THAT(GetPresenceQType(GetDenseArrayWeakFloatQType()),
IsOkAndHolds(GetDenseArrayQType<Unit>()));
EXPECT_THAT(GetPresenceQType(GetArrayWeakFloatQType()),
IsOkAndHolds(GetArrayQType<Unit>()));
EXPECT_THAT(GetPresenceQType(MakeTupleQType({GetQType<int64_t>()})),
StatusIs(absl::StatusCode::kInvalidArgument,
MatchesRegex("no type to represent presence in .*")));
}
TEST(TypeProperties, IsOptionalLikeQType) {
EXPECT_FALSE(IsOptionalLikeQType(GetQType<int64_t>()));
EXPECT_TRUE(IsOptionalLikeQType(GetOptionalQType<int64_t>()));
EXPECT_TRUE(IsOptionalLikeQType(GetDenseArrayQType<int64_t>()));
EXPECT_TRUE(IsOptionalLikeQType(GetDenseArrayWeakFloatQType()));
EXPECT_TRUE(IsOptionalLikeQType(GetArrayWeakFloatQType()));
}
TEST(TypeProperties, ToOptionalLikeQType) {
EXPECT_THAT(ToOptionalLikeQType(GetQType<int64_t>()),
IsOkAndHolds(GetOptionalQType<int64_t>()));
EXPECT_THAT(ToOptionalLikeQType(GetOptionalQType<int64_t>()),
IsOkAndHolds(GetOptionalQType<int64_t>()));
EXPECT_THAT(ToOptionalLikeQType(GetDenseArrayQType<int64_t>()),
IsOkAndHolds(GetDenseArrayQType<int64_t>()));
EXPECT_THAT(ToOptionalLikeQType(GetDenseArrayWeakFloatQType()),
IsOkAndHolds(GetDenseArrayWeakFloatQType()));
EXPECT_THAT(ToOptionalLikeQType(GetArrayWeakFloatQType()),
IsOkAndHolds(GetArrayWeakFloatQType()));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/standard_type_properties/properties.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/standard_type_properties/properties_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
e97785da-47c3-461f-9298-9579bfd4fba6 | cpp | google/arolla | frame_iter | arolla/qtype/array_like/frame_iter.cc | arolla/qtype/array_like/frame_iter_test.cc | #include "arolla/qtype/array_like/frame_iter.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/raw_buffer_factory.h"
#include "arolla/qtype/array_like/array_like_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
absl::StatusOr<std::vector<std::unique_ptr<BatchToFramesCopier>>>
CreateInputCopiers(absl::Span<const TypedRef> input_arrays,
absl::Span<const TypedSlot> input_scalar_slots) {
if (input_arrays.size() != input_scalar_slots.size()) {
return absl::InvalidArgumentError(
absl::StrFormat("size of input_arrays and input_scalar_slots should be "
"the same: %d vs %d",
input_arrays.size(), input_scalar_slots.size()));
}
absl::flat_hash_map<QTypePtr, std::unique_ptr<BatchToFramesCopier>>
input_copiers;
for (size_t i = 0; i < input_arrays.size(); ++i) {
QTypePtr array_type = input_arrays[i].GetType();
if (!input_copiers.contains(array_type)) {
ASSIGN_OR_RETURN(input_copiers[array_type],
CreateBatchToFramesCopier(array_type));
}
RETURN_IF_ERROR(input_copiers[array_type]->AddMapping(
input_arrays[i], input_scalar_slots[i]));
}
std::vector<std::unique_ptr<BatchToFramesCopier>> input_copiers_vector;
for (auto& [_, v] : input_copiers)
input_copiers_vector.push_back(std::move(v));
return input_copiers_vector;
}
absl::StatusOr<std::vector<std::unique_ptr<BatchFromFramesCopier>>>
CreateOutputCopiers(absl::Span<const TypedSlot> output_array_slots,
absl::Span<const TypedSlot> output_scalar_slots,
RawBufferFactory* buffer_factory) {
if (output_array_slots.size() != output_scalar_slots.size()) {
return absl::InvalidArgumentError(absl::StrFormat(
"size of output_array_slots and output_scalar_slots should be "
"the same: %d vs %d",
output_array_slots.size(), output_scalar_slots.size()));
}
absl::flat_hash_map<QTypePtr, std::unique_ptr<BatchFromFramesCopier>>
output_copiers;
for (size_t i = 0; i < output_array_slots.size(); ++i) {
QTypePtr array_type = output_array_slots[i].GetType();
if (!output_copiers.contains(array_type)) {
ASSIGN_OR_RETURN(output_copiers[array_type],
CreateBatchFromFramesCopier(array_type, buffer_factory));
}
RETURN_IF_ERROR(output_copiers[array_type]->AddMapping(
output_scalar_slots[i], output_array_slots[i]));
}
std::vector<std::unique_ptr<BatchFromFramesCopier>> output_copiers_vector;
for (auto& [_, v] : output_copiers)
output_copiers_vector.push_back(std::move(v));
return output_copiers_vector;
}
}
absl::StatusOr<FrameIterator> FrameIterator::Create(
absl::Span<const TypedRef> input_arrays,
absl::Span<const TypedSlot> input_scalar_slots,
absl::Span<const TypedSlot> output_array_slots,
absl::Span<const TypedSlot> output_scalar_slots,
const FrameLayout* scalar_layout, FrameIterator::Options options) {
ASSIGN_OR_RETURN(
std::vector<std::unique_ptr<BatchToFramesCopier>> input_copiers,
CreateInputCopiers(input_arrays, input_scalar_slots));
RawBufferFactory* buf_factory = options.buffer_factory;
if (!buf_factory) buf_factory = GetHeapBufferFactory();
ASSIGN_OR_RETURN(
std::vector<std::unique_ptr<BatchFromFramesCopier>> output_copiers,
CreateOutputCopiers(output_array_slots, output_scalar_slots,
buf_factory));
std::optional<int64_t> row_count = std::nullopt;
for (const auto& copier : input_copiers) {
if (!copier->row_count() ||
(row_count && *row_count != *copier->row_count())) {
return absl::InvalidArgumentError(
absl::StrFormat("input arrays have different sizes: %d vs %d",
*row_count, *copier->row_count()));
}
row_count = copier->row_count();
}
if (!row_count.has_value()) {
if (!options.row_count.has_value()) {
return absl::InvalidArgumentError(
"options.row_count can not be missed if there is no input arrays");
}
row_count = options.row_count;
} else if (options.row_count.has_value() &&
*options.row_count != *row_count) {
return absl::InvalidArgumentError(
absl::StrFormat("sizes of input arrays don't correspond "
"to options.row_count: %d vs %d",
*row_count, *options.row_count));
}
return FrameIterator(std::move(input_copiers), std::move(output_copiers),
*row_count, options.frame_buffer_count, scalar_layout);
}
FrameIterator::FrameIterator(
std::vector<std::unique_ptr<BatchToFramesCopier>>&& input_copiers,
std::vector<std::unique_ptr<BatchFromFramesCopier>>&& output_copiers,
size_t row_count, size_t frame_buffer_count,
const FrameLayout* scalar_layout)
: row_count_(row_count),
input_copiers_(std::move(input_copiers)),
output_copiers_(std::move(output_copiers)),
scalar_layout_(scalar_layout) {
frame_buffer_count = std::min(row_count, frame_buffer_count);
dense_scalar_layout_size_ = (scalar_layout_->AllocSize() + 7) & ~7;
buffer_.resize(dense_scalar_layout_size_ * frame_buffer_count);
for (size_t i = 0; i < frame_buffer_count; ++i) {
void* alloc_ptr = GetAllocByIndex(i);
scalar_layout->InitializeAlignedAlloc(alloc_ptr);
frames_.emplace_back(alloc_ptr, scalar_layout);
const_frames_.emplace_back(alloc_ptr, scalar_layout);
}
for (auto& copier : input_copiers_) copier->Start();
for (auto& copier : output_copiers_) copier->Start(row_count);
}
FrameIterator::~FrameIterator() {
for (size_t i = 0; i < frames_.size(); ++i) {
scalar_layout_->DestroyAlloc(GetAllocByIndex(i));
}
}
absl::Status FrameIterator::StoreOutput(FramePtr output_frame) {
for (std::unique_ptr<BatchFromFramesCopier>& copier : output_copiers_) {
RETURN_IF_ERROR(copier->Finalize(output_frame));
}
return absl::OkStatus();
}
void FrameIterator::PreloadFrames(size_t frames_count) {
for (auto& copier : input_copiers_) {
copier->CopyNextBatch({frames_.data(), frames_count});
}
}
void FrameIterator::SaveOutputsOfProcessedFrames(size_t frames_count) {
for (auto& copier : output_copiers_) {
absl::Status status =
copier->CopyNextBatch({const_frames_.data(), frames_count});
DCHECK_OK(status);
}
}
} | #include "arolla/qtype/array_like/frame_iter.h"
#include <cstdint>
#include <optional>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/threading.h"
namespace arolla {
namespace {
using ::absl_testing::StatusIs;
using ::testing::ElementsAre;
using ::testing::HasSubstr;
using ::testing::Test;
TEST(FrameIterator, Iterate) {
FrameLayout::Builder scalar_bldr;
auto scalar_f_slot1 = scalar_bldr.AddSlot<OptionalValue<float>>();
auto scalar_i_slot1 = scalar_bldr.AddSlot<OptionalValue<int64_t>>();
auto scalar_i_slot2 = scalar_bldr.AddSlot<OptionalValue<int64_t>>();
auto scalar_f_slot2 = scalar_bldr.AddSlot<OptionalValue<float>>();
auto scalar_layout = std::move(scalar_bldr).Build();
std::vector<TypedSlot> scalar_slots = {
TypedSlot::FromSlot(scalar_f_slot1), TypedSlot::FromSlot(scalar_i_slot1),
TypedSlot::FromSlot(scalar_i_slot2), TypedSlot::FromSlot(scalar_f_slot2)};
DenseArray<float> arr_f1 =
CreateDenseArray<float>({1.5, std::nullopt, 2.5, 3.5});
DenseArray<int64_t> arr_i1 = CreateDenseArray<int64_t>({3, 4, 5, 6});
DenseArray<int64_t> arr_i2 =
CreateDenseArray<int64_t>({2, std::nullopt, 0, std::nullopt});
DenseArray<float> arr_f2 =
CreateDenseArray<float>({3.2, 2.2, std::nullopt, 1.2});
FrameLayout::Builder vector_bldr;
auto arr_output_f1 = vector_bldr.AddSlot<DenseArray<float>>();
auto arr_output_i1 = vector_bldr.AddSlot<DenseArray<int64_t>>();
auto arr_output_i2 = vector_bldr.AddSlot<DenseArray<int64_t>>();
auto arr_output_f2 = vector_bldr.AddSlot<DenseArray<float>>();
auto output_vector_layout = std::move(vector_bldr).Build();
std::vector<TypedRef> input_refs = {
TypedRef::FromValue(arr_f1), TypedRef::FromValue(arr_i1),
TypedRef::FromValue(arr_i2), TypedRef::FromValue(arr_f2)};
std::vector<TypedSlot> output_slots = {
TypedSlot::FromSlot(arr_output_f1), TypedSlot::FromSlot(arr_output_i1),
TypedSlot::FromSlot(arr_output_i2), TypedSlot::FromSlot(arr_output_f2)};
auto scalar_processing_fn = [&](FramePtr frame) {
OptionalValue<float> f1 = frame.Get(scalar_f_slot1);
OptionalValue<float> f2 = frame.Get(scalar_f_slot2);
if (f1.present) frame.Set(scalar_f_slot1, f1.value + 1.0);
if (f2.present) frame.Set(scalar_f_slot2, f2.value + 2.0);
OptionalValue<int64_t> i1 = frame.Get(scalar_i_slot1);
OptionalValue<int64_t> i2 = frame.Get(scalar_i_slot2);
if (i1.present) frame.Set(scalar_i_slot1, i1.value + 3);
if (i2.present) frame.Set(scalar_i_slot2, i2.value + 4);
};
auto check_output_fn = [&](FrameIterator& frame_iterator) {
MemoryAllocation alloc(&output_vector_layout);
FramePtr output_frame = alloc.frame();
EXPECT_OK(frame_iterator.StoreOutput(output_frame));
EXPECT_THAT(output_frame.Get(arr_output_f1),
ElementsAre(2.5, std::nullopt, 3.5, 4.5));
EXPECT_THAT(output_frame.Get(arr_output_f2),
ElementsAre(5.2, 4.2, std::nullopt, 3.2));
EXPECT_THAT(output_frame.Get(arr_output_i1), ElementsAre(6, 7, 8, 9));
EXPECT_THAT(output_frame.Get(arr_output_i2),
ElementsAre(6, std::nullopt, 4, std::nullopt));
};
{
ASSERT_OK_AND_ASSIGN(
auto frame_iterator,
FrameIterator::Create(input_refs, scalar_slots, output_slots,
scalar_slots, &scalar_layout,
{.frame_buffer_count = 2}));
frame_iterator.ForEachFrame(scalar_processing_fn);
check_output_fn(frame_iterator);
}
StdThreading threading(4);
for (int threads = 1; threads <= 4; ++threads) {
ASSERT_OK_AND_ASSIGN(
auto frame_iterator,
FrameIterator::Create(input_refs, scalar_slots, output_slots,
scalar_slots, &scalar_layout,
{.frame_buffer_count = 3}));
frame_iterator.ForEachFrame(scalar_processing_fn, threading, threads);
check_output_fn(frame_iterator);
}
}
TEST(FrameIterator, EmptyArrays) {
FrameLayout::Builder scalar_bldr;
auto scalar_slot = scalar_bldr.AddSlot<OptionalValue<float>>();
auto scalar_layout = std::move(scalar_bldr).Build();
std::vector<TypedSlot> scalar_slots = {TypedSlot::FromSlot(scalar_slot)};
FrameLayout::Builder arrays_layout_bldr;
auto arr_output = arrays_layout_bldr.AddSlot<DenseArray<float>>();
auto output_arrays_layout = std::move(arrays_layout_bldr).Build();
DenseArray<float> arr;
std::vector<TypedRef> input_refs = {TypedRef::FromValue(arr)};
std::vector<TypedSlot> output_slots = {TypedSlot::FromSlot(arr_output)};
auto scalar_processing_fn = [&](FramePtr frame) { ADD_FAILURE(); };
ASSERT_OK_AND_ASSIGN(auto frame_iterator,
FrameIterator::Create(
input_refs, scalar_slots, output_slots, scalar_slots,
&scalar_layout, {.frame_buffer_count = 2}));
frame_iterator.ForEachFrame(scalar_processing_fn);
MemoryAllocation alloc(&output_arrays_layout);
FramePtr output_frame = alloc.frame();
EXPECT_OK(frame_iterator.StoreOutput(output_frame));
EXPECT_EQ(output_frame.Get(arr_output).size(), 0);
}
TEST(FrameIterator, EmptyInputAndOutput) {
FrameLayout::Builder scalar_bldr;
auto scalar_layout = std::move(scalar_bldr).Build();
{
auto frame_iterator_or_status =
FrameIterator::Create({}, {}, {}, {}, &scalar_layout);
EXPECT_THAT(
frame_iterator_or_status,
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("options.row_count can not be missed if there "
"is no input arrays")));
}
{
ASSERT_OK_AND_ASSIGN(auto frame_iterator,
FrameIterator::Create({}, {}, {}, {}, &scalar_layout,
{.row_count = 4}));
EXPECT_EQ(frame_iterator.row_count(), 4);
}
}
TEST(FrameIterator, IncorrectInputType) {
FrameLayout::Builder scalar_bldr;
auto scalar_slot = scalar_bldr.AddSlot<float>();
auto scalar_layout = std::move(scalar_bldr).Build();
std::vector<TypedSlot> scalar_slots = {TypedSlot::FromSlot(scalar_slot)};
DenseArray<int64_t> arr = CreateDenseArray<int64_t>({1, std::nullopt, 2, 3});
auto frame_iterator_or_status = FrameIterator::Create(
{TypedRef::FromValue(arr)}, scalar_slots, {}, {}, &scalar_layout);
EXPECT_THAT(frame_iterator_or_status,
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("slot type does not match")));
}
TEST(FrameIterator, IncorrectOutputType) {
FrameLayout::Builder vector_bldr;
auto vector_slot = vector_bldr.AddSlot<DenseArray<float>>();
auto vector_layout = std::move(vector_bldr).Build();
FrameLayout::Builder scalar_bldr;
auto scalar_slot = scalar_bldr.AddSlot<int64_t>();
auto scalar_layout = std::move(scalar_bldr).Build();
auto frame_iterator_or_status =
FrameIterator::Create({}, {}, {TypedSlot::FromSlot(vector_slot)},
{TypedSlot::FromSlot(scalar_slot)}, &scalar_layout);
EXPECT_THAT(frame_iterator_or_status,
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("slot type does not match")));
}
TEST(FrameIterator, WrongSize) {
FrameLayout::Builder scalar_bldr;
auto scalar_f_slot1 = scalar_bldr.AddSlot<OptionalValue<float>>();
auto scalar_i_slot1 = scalar_bldr.AddSlot<OptionalValue<int64_t>>();
auto scalar_layout = std::move(scalar_bldr).Build();
std::vector<TypedSlot> scalar_slots = {TypedSlot::FromSlot(scalar_f_slot1),
TypedSlot::FromSlot(scalar_i_slot1)};
DenseArray<float> arr_f1 =
CreateDenseArray<float>({1.5, std::nullopt, 2.5, 3.5});
DenseArray<int64_t> arr_i1 = CreateDenseArray<int64_t>({3, 4, 5});
auto frame_iterator_or_status = FrameIterator::Create(
{TypedRef::FromValue(arr_f1), TypedRef::FromValue(arr_i1)}, scalar_slots,
{}, {}, &scalar_layout);
EXPECT_THAT(frame_iterator_or_status,
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("input arrays have different sizes")));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/array_like/frame_iter.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/array_like/frame_iter_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
483b923a-2dce-43d2-aaf9-c1676862cd7b | cpp | google/arolla | dict_types | arolla/qtype/dict/dict_types.cc | arolla/qtype/dict/dict_types_test.cc | #include "arolla/qtype/dict/dict_types.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include "absl/base/no_destructor.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/qtype/derived_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/util/bytes.h"
#include "arolla/util/fast_dynamic_downcast_final.h"
#include "arolla/util/text.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
class KeyToRowDictTypeRegistry {
public:
static KeyToRowDictTypeRegistry& instance() {
static absl::NoDestructor<KeyToRowDictTypeRegistry> result;
return *result;
}
absl::Status Register(QTypePtr key_qtype, QTypePtr dict_qtype) {
absl::MutexLock l(&lock_);
auto [iter, inserted] = dict_types_.emplace(key_qtype, dict_qtype);
if (!inserted) {
return absl::FailedPreconditionError(absl::StrFormat(
"attempt to register %s dict twice", dict_qtype->name()));
}
return absl::OkStatus();
}
absl::StatusOr<QTypePtr> Get(QTypePtr qtype) {
absl::ReaderMutexLock l(&lock_);
auto iter = dict_types_.find(qtype);
if (iter == dict_types_.end()) {
return absl::NotFoundError(
absl::StrFormat("no dict with %s keys found", qtype->name()));
}
return iter->second;
}
private:
absl::Mutex lock_;
absl::flat_hash_map<QTypePtr, QTypePtr> dict_types_ ABSL_GUARDED_BY(lock_);
};
class DictQType final : public BasicDerivedQType {
public:
DictQType(std::string name, QTypePtr dict_type, QTypePtr values_array_type)
: BasicDerivedQType(ConstructorArgs{
.name = std::move(name),
.base_qtype = MakeTupleQType({dict_type, values_array_type}),
.qtype_specialization_key = "::arolla::DictQType",
}) {}
};
class DictQTypeRegistry {
public:
static DictQTypeRegistry& instance() {
static absl::NoDestructor<DictQTypeRegistry> result;
return *result;
}
absl::StatusOr<QTypePtr> GetQType(QTypePtr key_type, QTypePtr value_type) {
{
absl::ReaderMutexLock guard(&lock_);
if (const auto it = registry_.find({key_type, value_type});
it != registry_.end()) {
return it->second.get();
}
}
ASSIGN_OR_RETURN(QTypePtr dict_type, GetKeyToRowDictQType(key_type));
ASSIGN_OR_RETURN(QTypePtr values_array_type,
GetDenseArrayQTypeByValueQType(value_type));
auto kv_dict_type = std::make_unique<DictQType>(
absl::StrFormat("Dict<%s,%s>", key_type->name(), value_type->name()),
dict_type, values_array_type);
absl::MutexLock guard(&lock_);
return registry_
.emplace(std::make_pair(key_type, value_type), std::move(kv_dict_type))
.first->second.get();
}
private:
absl::Mutex lock_;
absl::flat_hash_map<std::pair<QTypePtr, QTypePtr>, std::unique_ptr<QType>>
registry_ ABSL_GUARDED_BY(lock_);
};
}
namespace dict_impl {
void RegisterKeyToRowDictQType(QTypePtr key_type, QTypePtr dict_type) {
auto status =
KeyToRowDictTypeRegistry::instance().Register(key_type, dict_type);
DCHECK_OK(status);
}
}
absl::StatusOr<QTypePtr> GetKeyToRowDictQType(QTypePtr key_type) {
return KeyToRowDictTypeRegistry::instance().Get(key_type);
}
bool IsKeyToRowDictQType(QTypePtr type) {
if (type->value_qtype() == nullptr) {
return false;
}
ASSIGN_OR_RETURN(QTypePtr dict_type,
GetKeyToRowDictQType(type->value_qtype()), false);
return dict_type == type;
}
absl::StatusOr<QTypePtr> GetDictQType(QTypePtr key_type, QTypePtr value_type) {
return DictQTypeRegistry::instance().GetQType(key_type, value_type);
}
const QType* GetDictKeyQTypeOrNull(QTypePtr dict_type) {
auto d = fast_dynamic_downcast_final<const DictQType*>(dict_type);
return d != nullptr ? d->type_fields()[0].GetType()->value_qtype() : nullptr;
}
const QType* GetDictValueQTypeOrNull(QTypePtr dict_type) {
auto d = fast_dynamic_downcast_final<const DictQType*>(dict_type);
return d != nullptr ? d->type_fields()[1].GetType()->value_qtype() : nullptr;
}
bool IsDictQType(const QType* qtype) {
return fast_dynamic_downcast_final<const DictQType*>(qtype) != nullptr;
}
template struct QTypeTraits<KeyToRowDict<bool>>;
template struct QTypeTraits<KeyToRowDict<int32_t>>;
template struct QTypeTraits<KeyToRowDict<int64_t>>;
template struct QTypeTraits<KeyToRowDict<Bytes>>;
template struct QTypeTraits<KeyToRowDict<Text>>;
} | #include "arolla/qtype/dict/dict_types.h"
#include <cstdint>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/bytes.h"
#include "arolla/util/repr.h"
#include "arolla/util/unit.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::Ne;
using ::testing::Property;
TEST(DictTypes, GetKeyToRowDictQType) {
GetKeyToRowDictQType<int64_t>();
EXPECT_THAT(GetKeyToRowDictQType<int64_t>()->value_qtype(),
Eq(GetQType<int64_t>()));
EXPECT_THAT(GetKeyToRowDictQType(GetQType<int64_t>()),
IsOkAndHolds(GetQType<KeyToRowDict<int64_t>>()));
EXPECT_THAT(GetKeyToRowDictQType(GetQType<int64_t>()),
IsOkAndHolds(GetKeyToRowDictQType<int64_t>()));
EXPECT_THAT(GetKeyToRowDictQType(GetQType<KeyToRowDict<int64_t>>()),
StatusIs(absl::StatusCode::kNotFound,
HasSubstr("no dict with DICT_INT64 keys found")));
}
TEST(DictTypes, GetDictQType) {
GetKeyToRowDictQType<int64_t>();
GetDenseArrayQType<float>();
GetDenseArrayQType<double>();
ASSERT_OK_AND_ASSIGN(QTypePtr int_to_float_dict,
GetDictQType(GetQType<int64_t>(), GetQType<float>()));
EXPECT_THAT(int_to_float_dict->name(), Eq("Dict<INT64,FLOAT32>"));
EXPECT_THAT(GetDictKeyQTypeOrNull(int_to_float_dict),
Eq(GetQType<int64_t>()));
EXPECT_THAT(GetDictValueQTypeOrNull(int_to_float_dict),
Eq(GetQType<float>()));
EXPECT_THAT(
int_to_float_dict->type_fields(),
ElementsAre(
Property(&TypedSlot::GetType, Eq(GetKeyToRowDictQType<int64_t>())),
Property(&TypedSlot::GetType, Eq(GetDenseArrayQType<float>()))));
EXPECT_THAT(GetDictQType(GetQType<int64_t>(), GetQType<float>()),
IsOkAndHolds(Eq(int_to_float_dict)));
EXPECT_THAT(GetDictQType(GetQType<int64_t>(), GetQType<double>()),
IsOkAndHolds(Ne(int_to_float_dict)));
}
TEST(DictTypes, IsDictQType) {
GetKeyToRowDictQType<int64_t>();
GetDenseArrayQType<float>();
GetDenseArrayQType<Unit>();
{
ASSERT_OK_AND_ASSIGN(QTypePtr int_to_float_dict,
GetDictQType(GetQType<int64_t>(), GetQType<float>()));
ASSERT_TRUE(IsDictQType(int_to_float_dict));
}
{
ASSERT_OK_AND_ASSIGN(QTypePtr int_to_unit_dict,
GetDictQType(GetQType<int64_t>(), GetQType<Unit>()));
ASSERT_TRUE(IsDictQType(int_to_unit_dict));
}
{
EXPECT_THAT(GetDictQType(GetQType<Unit>(), GetQType<float>()),
StatusIs(absl::StatusCode::kNotFound,
HasSubstr("no dict with UNIT keys found")));
}
{
EXPECT_THAT(GetDictQType(GetQType<float>(), GetQType<float>()),
StatusIs(absl::StatusCode::kNotFound,
HasSubstr("no dict with FLOAT32 keys found")));
}
}
TEST(DictTypes, ReprTraits) {
EXPECT_EQ(Repr(KeyToRowDict<float>{}), "dict{}");
EXPECT_EQ(Repr(KeyToRowDict<float>{{{0.5, 1}}}), "dict{0.5:int64{1},}");
EXPECT_EQ(Repr(KeyToRowDict<float>{{{0.5, 1}, {2.5, 3}}}),
"dict{0.5:int64{1},2.5:int64{3},}");
EXPECT_EQ(Repr(KeyToRowDict<Bytes>{{{Bytes("key"), 2}}}),
"dict{b'key':int64{2},}");
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/dict/dict_types.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/dict/dict_types_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
13692b9c-e863-4fa0-8eed-e55cea52b26f | cpp | google/arolla | bytes | arolla/util/bytes.cc | arolla/util/bytes_test.cc | #include "arolla/util/bytes.h"
#include <cstddef>
#include <string>
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "arolla/util/repr.h"
namespace arolla {
ReprToken ReprTraits<Bytes>::operator()(const Bytes& value) const {
constexpr size_t kBytesAbbrevLimit = 120;
ReprToken result;
absl::string_view bytes = value;
if (bytes.size() <= kBytesAbbrevLimit) {
result.str = absl::StrCat("b'", absl::CHexEscape(bytes), "'");
} else {
result.str =
absl::StrCat("b'", absl::CHexEscape(bytes.substr(0, kBytesAbbrevLimit)),
"... (", bytes.size(), " bytes total)'");
}
return result;
}
} | #include "arolla/util/bytes.h"
#include <string>
#include <type_traits>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/strings/string_view.h"
#include "arolla/util/repr.h"
#include "arolla/util/testing/repr_token_eq.h"
namespace arolla {
namespace {
using ::arolla::testing::ReprTokenEq;
using ::testing::Eq;
using ::testing::MatchesRegex;
TEST(BytesTest, Constructor) {
EXPECT_THAT(Bytes("Hello"), Eq("Hello"));
std::string hello = "Hello";
EXPECT_THAT(Bytes(hello), Eq("Hello"));
absl::string_view hello_view = hello;
EXPECT_THAT(Bytes(hello_view), Eq("Hello"));
}
TEST(BytesTest, CopyAndMoveConstructors) {
static_assert(std::is_nothrow_move_constructible<Bytes>::value);
Bytes src("Google");
Bytes copied(src);
EXPECT_THAT(copied, Eq(src));
Bytes moved(std::move(src));
EXPECT_THAT(moved, Eq(copied));
}
TEST(BytesTest, CopyAndMoveAssignment) {
static_assert(std::is_nothrow_move_assignable<Bytes>::value);
Bytes src("Google");
Bytes copied = src;
EXPECT_THAT(copied, Eq(src));
Bytes moved = std::move(src);
EXPECT_THAT(moved, Eq(copied));
}
TEST(BytesTest, AssignmentFromString) {
std::string google = "Google";
{
Bytes val("x");
val = "Google";
EXPECT_THAT(val, Eq(google));
}
{
Bytes val("x");
val = google;
EXPECT_THAT(val, Eq(google));
}
{
absl::string_view google_view = google;
Bytes val("x");
val = google_view;
EXPECT_THAT(val, Eq("Google"));
}
{
Bytes val("x");
val = std::move(google);
EXPECT_THAT(val, Eq("Google"));
}
}
TEST(BytesTest, Repr) {
EXPECT_THAT(GenReprToken(Bytes("G'\"\t\xff")),
ReprTokenEq(R"(b'G\'\"\t\xff')"));
EXPECT_THAT(Repr(Bytes(std::string(1024, 'x'))),
MatchesRegex(R"(b'x{120}[.]{3} \(1024 bytes total\)')"));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/bytes.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/bytes_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
9f1540d2-c47f-4247-95d8-8101f4e8582d | cpp | google/arolla | string | arolla/util/string.cc | arolla/util/string_test.cc | #include "arolla/util/string.h"
#include <cstddef>
#include <string>
#include "absl/log/check.h"
namespace arolla {
std::string Truncate(std::string str, size_t max_length) {
DCHECK_GT(max_length, 3);
if (str.size() > max_length) {
str.resize(max_length);
str.replace(max_length - 3, 3, "...");
}
return str;
}
} | #include "arolla/util/string.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
namespace arolla {
namespace {
using ::testing::Eq;
TEST(StringTest, Truncate) {
EXPECT_THAT(Truncate("", 7), Eq(""));
EXPECT_THAT(Truncate("fifty seven", 7), Eq("fift..."));
EXPECT_THAT(Truncate("fifty seven", 10), Eq("fifty s..."));
EXPECT_THAT(Truncate("fifty seven", 11), Eq("fifty seven"));
EXPECT_THAT(Truncate("fifty seven", 20), Eq("fifty seven"));
}
TEST(StringTest, IsQualifiedIdentifier) {
static_assert(IsQualifiedIdentifier("foo"));
static_assert(!IsQualifiedIdentifier(".bar"));
static_assert(!IsQualifiedIdentifier("0.bar"));
static_assert(!IsQualifiedIdentifier("9.bar"));
static_assert(!IsQualifiedIdentifier("-.bar"));
static_assert(IsQualifiedIdentifier("_.bar"));
static_assert(IsQualifiedIdentifier("A.bar"));
static_assert(IsQualifiedIdentifier("Z.bar"));
static_assert(IsQualifiedIdentifier("a.bar"));
static_assert(IsQualifiedIdentifier("z.bar"));
static_assert(IsQualifiedIdentifier("_0.bar"));
static_assert(IsQualifiedIdentifier("_9.bar"));
static_assert(!IsQualifiedIdentifier("_-.bar"));
static_assert(IsQualifiedIdentifier("__.bar"));
static_assert(IsQualifiedIdentifier("_A.bar"));
static_assert(IsQualifiedIdentifier("_Z.bar"));
static_assert(IsQualifiedIdentifier("_a.bar"));
static_assert(IsQualifiedIdentifier("_z.bar"));
static_assert(!IsQualifiedIdentifier("foo..bar"));
static_assert(!IsQualifiedIdentifier("foo.0.bar"));
static_assert(!IsQualifiedIdentifier("foo.9.bar"));
static_assert(!IsQualifiedIdentifier("foo.-.bar"));
static_assert(IsQualifiedIdentifier("foo._.bar"));
static_assert(IsQualifiedIdentifier("foo.A.bar"));
static_assert(IsQualifiedIdentifier("foo.Z.bar"));
static_assert(IsQualifiedIdentifier("foo.a.bar"));
static_assert(IsQualifiedIdentifier("foo.z.bar"));
static_assert(IsQualifiedIdentifier("foo._0.bar"));
static_assert(IsQualifiedIdentifier("foo._9.bar"));
static_assert(!IsQualifiedIdentifier("foo._-.bar"));
static_assert(IsQualifiedIdentifier("foo.__.bar"));
static_assert(IsQualifiedIdentifier("foo._A.bar"));
static_assert(IsQualifiedIdentifier("foo._Z.bar"));
static_assert(IsQualifiedIdentifier("foo._a.bar"));
static_assert(IsQualifiedIdentifier("foo._z.bar"));
static_assert(!IsQualifiedIdentifier("foo.bar."));
static_assert(IsQualifiedIdentifier("test.add"));
static_assert(IsQualifiedIdentifier("test.subtest.add"));
}
TEST(StringTest, NonFirstComma) {
bool first_call = true;
EXPECT_EQ(NonFirstComma(first_call), "");
EXPECT_FALSE(first_call);
EXPECT_EQ(NonFirstComma(first_call), ", ");
EXPECT_FALSE(first_call);
}
TEST(StringTest, ContainerAccessString) {
EXPECT_EQ(ContainerAccessString("bar"), ".bar");
EXPECT_EQ(ContainerAccessString("bar.baz"), "['bar.baz']");
EXPECT_EQ(ContainerAccessString(""), "['']");
}
TEST(StringTest, starts_with) {
constexpr bool compile_time_true = starts_with("", "");
EXPECT_TRUE(compile_time_true);
constexpr bool compile_time_false = starts_with("foo", "bar");
EXPECT_FALSE(compile_time_false);
EXPECT_TRUE(starts_with("", ""));
EXPECT_TRUE(starts_with("Hello, World!", "Hello"));
EXPECT_TRUE(starts_with("Hello, World!", "Hello, World!"));
EXPECT_FALSE(starts_with("Hello, World!", "Hello, World! "));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/string.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/string_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
f25e9d48-d814-44db-83e3-84a6b1b9ef2d | cpp | google/arolla | status | arolla/util/status.cc | arolla/util/status_test.cc | #include "absl/status/status.h"
#include <cstdint>
#include <initializer_list>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
namespace arolla {
absl::Status SizeMismatchError(std::initializer_list<int64_t> sizes) {
return absl::InvalidArgumentError(absl::StrCat(
"argument sizes mismatch: (", absl::StrJoin(sizes, ", "), ")"));
}
} | #include "arolla/util/status.h"
#include <initializer_list>
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::testing::AnyOf;
using ::testing::Eq;
using ::testing::Test;
TEST(StatusTest, CheckInputStatus) {
EXPECT_OK(CheckInputStatus());
EXPECT_OK(CheckInputStatus(13));
EXPECT_OK(CheckInputStatus(13, "a"));
EXPECT_OK(CheckInputStatus(absl::StatusOr<int>(13), "a"));
EXPECT_OK(CheckInputStatus(13, absl::StatusOr<std::string>("a")));
EXPECT_OK(CheckInputStatus(absl::StatusOr<int>(13),
absl::StatusOr<std::string>("a")));
absl::Status bad_status1{absl::StatusCode::kInvalidArgument, "bad 1"};
absl::Status bad_status2{absl::StatusCode::kDataLoss, "bad 2"};
EXPECT_EQ(CheckInputStatus(absl::StatusOr<int>(bad_status1)), bad_status1);
EXPECT_EQ(CheckInputStatus(13, absl::StatusOr<int>(bad_status1)),
bad_status1);
EXPECT_EQ(CheckInputStatus(absl::StatusOr<int>(13),
absl::StatusOr<int>(bad_status1)),
bad_status1);
EXPECT_EQ(CheckInputStatus(absl::StatusOr<int>(bad_status1),
absl::StatusOr<int>(bad_status2)),
bad_status1);
}
TEST(StatusTest, LiftStatusUpSuccess) {
std::tuple<> empty = LiftStatusUp().value();
EXPECT_THAT(empty, Eq(std::make_tuple()));
std::tuple<int> one = LiftStatusUp(absl::StatusOr<int>(1)).value();
EXPECT_THAT(one, Eq(std::make_tuple(1)));
std::tuple<std::string, int> two =
LiftStatusUp(absl::StatusOr<std::string>("one"), absl::StatusOr<int>(2))
.value();
EXPECT_THAT(two, Eq(std::make_tuple(std::string("one"), 2)));
ASSERT_OK_AND_ASSIGN(
std::vector<int> vec,
LiftStatusUp(absl::Span<const absl::StatusOr<int>>{1, 2}));
EXPECT_THAT(vec, ::testing::ElementsAre(1, 2));
absl::flat_hash_map<int, int> fhm =
LiftStatusUp(std::initializer_list<std::pair<int, absl::StatusOr<int>>>{
{1, 2}, {3, 4}})
.value();
EXPECT_THAT(fhm, Eq(absl::flat_hash_map<int, int>{{1, 2}, {3, 4}}));
absl::flat_hash_map<int, int> fhm1 =
LiftStatusUp(std::initializer_list<
std::pair<absl::StatusOr<int>, absl::StatusOr<int>>>{
{1, 2}, {3, 4}})
.value();
EXPECT_THAT(fhm, Eq(absl::flat_hash_map<int, int>{{1, 2}, {3, 4}}));
}
TEST(StatusTest, LiftStatusUpErrors) {
absl::Status bad_status1{absl::StatusCode::kInvalidArgument, "bad 1"};
absl::Status bad_status2{absl::StatusCode::kDataLoss, "bad 2"};
EXPECT_EQ(LiftStatusUp(absl::StatusOr<int>(bad_status1)).status(),
bad_status1);
EXPECT_EQ(LiftStatusUp(absl::StatusOr<std::string>("one"),
absl::StatusOr<int>(bad_status2))
.status(),
bad_status2);
EXPECT_EQ(LiftStatusUp(absl::StatusOr<std::string>("one"),
absl::StatusOr<int>(bad_status1),
absl::StatusOr<float>(bad_status2))
.status(),
bad_status1);
EXPECT_EQ(LiftStatusUp(absl::StatusOr<float>(bad_status2),
absl::StatusOr<std::string>("one"),
absl::StatusOr<int>(bad_status1))
.status(),
bad_status2);
EXPECT_THAT(LiftStatusUp(absl::Span<const absl::StatusOr<int>>{bad_status1,
bad_status2})
.status(),
bad_status1);
EXPECT_THAT(
LiftStatusUp(std::initializer_list<std::pair<int, absl::StatusOr<int>>>{
{1, bad_status1}, {2, 3}, {4, bad_status2}})
.status(),
AnyOf(bad_status1, bad_status2));
EXPECT_THAT(
LiftStatusUp(std::initializer_list<
std::pair<absl::StatusOr<int>, absl::StatusOr<int>>>{
{bad_status1, 1}, {2, 3}, {4, bad_status2}})
.status(),
AnyOf(bad_status1, bad_status2));
EXPECT_THAT(
LiftStatusUp(std::initializer_list<
std::pair<absl::StatusOr<int>, absl::StatusOr<int>>>{
{1, bad_status1}, {2, 3}, {4, bad_status2}})
.status(),
AnyOf(bad_status1, bad_status2));
}
TEST(StatusTest, UnStatus) {
using T = std::unique_ptr<int>;
using StatusOrT = absl::StatusOr<T>;
{
StatusOrT status_or_t = std::make_unique<int>(1);
const T& value = UnStatus(status_or_t);
EXPECT_EQ(*value, 1);
EXPECT_EQ(value.get(), status_or_t.value().get());
}
{
StatusOrT status_or_t = std::make_unique<int>(1);
T value = UnStatus(std::move(status_or_t));
EXPECT_EQ(*value, 1);
}
{
T original_value = std::make_unique<int>(1);
const T& value = UnStatus(original_value);
EXPECT_EQ(*value, 1);
EXPECT_EQ(value.get(), original_value.get());
}
{
T original_value = std::make_unique<int>(1);
T value = UnStatus(std::move(original_value));
EXPECT_EQ(*value, 1);
}
}
TEST(StatusTest, FirstErrorStatus) {
absl::Status ok_status = absl::OkStatus();
absl::Status failed_precondition = absl::FailedPreconditionError("msg1");
absl::Status internal_error = absl::InternalError("msg2");
EXPECT_OK(FirstErrorStatus({}));
EXPECT_OK(FirstErrorStatus({ok_status, ok_status}));
EXPECT_EQ(FirstErrorStatus({failed_precondition}), failed_precondition);
EXPECT_EQ(FirstErrorStatus({ok_status, failed_precondition}),
failed_precondition);
EXPECT_EQ(FirstErrorStatus({failed_precondition, ok_status}),
failed_precondition);
EXPECT_EQ(FirstErrorStatus({failed_precondition, internal_error}),
failed_precondition);
EXPECT_EQ(FirstErrorStatus({internal_error, failed_precondition}),
internal_error);
}
TEST(StatusTest, GetStatusOrOk) {
absl::Status ok_status = absl::OkStatus();
EXPECT_OK(GetStatusOrOk(5));
EXPECT_OK(GetStatusOrOk(ok_status));
EXPECT_OK(GetStatusOrOk(absl::StatusOr<int>(5)));
absl::Status failed_precondition = absl::FailedPreconditionError("msg1");
EXPECT_EQ(GetStatusOrOk(failed_precondition), failed_precondition);
EXPECT_EQ(GetStatusOrOk(absl::StatusOr<int>(failed_precondition)),
failed_precondition);
}
TEST(StatusTest, IsOkStatus) {
absl::Status ok_status = absl::OkStatus();
EXPECT_TRUE(IsOkStatus(5));
EXPECT_TRUE(IsOkStatus(ok_status));
EXPECT_TRUE(IsOkStatus(absl::StatusOr<int>(5)));
absl::Status failed_precondition = absl::FailedPreconditionError("msg1");
EXPECT_FALSE(IsOkStatus(failed_precondition));
EXPECT_FALSE(IsOkStatus(absl::StatusOr<int>(failed_precondition)));
}
TEST(StatusTest, UnStatusCaller) {
absl::Status failed_precondition = absl::FailedPreconditionError("msg1");
absl::Status failed_precondition2 = absl::FailedPreconditionError("msg2");
auto add_op = [](int a, int b) { return a + b; };
UnStatusCaller<decltype(add_op)> add_op_wrap{add_op};
auto add_op_with_status = [](int a, int b) -> absl::StatusOr<int> {
return a + b;
};
auto add_op_with_status_wrap = MakeUnStatusCaller(add_op_with_status);
auto add_op_always_error = [&](int a, int b) -> absl::StatusOr<int> {
return failed_precondition;
};
auto add_op_always_error_wrap = MakeUnStatusCaller(add_op_always_error);
EXPECT_THAT(add_op_wrap(5, 7), IsOkAndHolds(12));
EXPECT_THAT(add_op_with_status_wrap(5, 7), IsOkAndHolds(12));
EXPECT_EQ(add_op_always_error_wrap(5, 7).status(), failed_precondition);
EXPECT_EQ(add_op_wrap(5, absl::StatusOr<int>(failed_precondition)).status(),
failed_precondition);
EXPECT_EQ(add_op_wrap(absl::StatusOr<int>(failed_precondition),
absl::StatusOr<int>(failed_precondition2))
.status(),
failed_precondition);
EXPECT_EQ(
add_op_always_error_wrap(5, absl::StatusOr<int>(failed_precondition2))
.status(),
failed_precondition2);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/status.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/status_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
4801e1ab-188b-4751-9e34-c3067036a3bb | cpp | google/arolla | init_arolla_internal | arolla/util/init_arolla_internal.cc | arolla/util/init_arolla_internal_test.cc | #include "arolla/util/init_arolla_internal.h"
#include <algorithm>
#include <cstddef>
#include <deque>
#include <sstream>
#include <stack>
#include <utility>
#include <variant>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/util/init_arolla.h"
#include "arolla/util/string.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::init_arolla_internal {
namespace {
struct Node {
const Initializer* initializer = nullptr;
absl::string_view name;
std::vector<Node*> deps;
enum { kPending, kExecuting, kDone } execution_state = kPending;
};
struct Digraph {
std::deque<Node> nodes;
absl::flat_hash_map<absl::string_view, Node*> node_index;
};
Node* GetNode(Digraph& digraph, absl::string_view name) {
if (name.empty()) {
return &digraph.nodes.emplace_back();
}
auto& result = digraph.node_index[name];
if (result == nullptr) {
result = &digraph.nodes.emplace_back();
result->name = name;
}
return result;
}
absl::StatusOr<Node*> InitNode(
Digraph& digraph,
const absl::flat_hash_set<absl::string_view>& previously_completed,
const Initializer& initializer) {
if (starts_with(initializer.name, kPhonyNamePrefix)) {
return absl::FailedPreconditionError(absl::StrFormat(
"an initializer name may not start with `%s` prefix: '%s'",
kPhonyNamePrefix, absl::CHexEscape(initializer.name)));
}
Node* result = GetNode(digraph, initializer.name);
if (result->initializer != nullptr ||
previously_completed.contains(initializer.name)) {
return absl::FailedPreconditionError(
absl::StrFormat("name collision between arolla initializers: '%s'",
absl::CHexEscape(initializer.name)));
}
result->initializer = &initializer;
for (auto dep : initializer.deps) {
if (!previously_completed.contains(dep)) {
result->deps.push_back(GetNode(digraph, dep));
}
}
for (auto reverse_dep : initializer.reverse_deps) {
if (previously_completed.contains(reverse_dep)) {
return absl::FailedPreconditionError(absl::StrFormat(
"the newly registered initializer '%s' expects to be executed "
"before the previously registered and executed initializer '%s'. "
"This is likely due to a missing linkage dependency between the "
"library providing '%s' and the library providing '%s'",
absl::CHexEscape(initializer.name), absl::CHexEscape(reverse_dep),
absl::CHexEscape(initializer.name), absl::CHexEscape(reverse_dep)));
}
GetNode(digraph, reverse_dep)->deps.push_back(result);
}
return result;
}
absl::Status ExecuteNode(Node& node, std::stack<Node*>& dependency_stack) {
if (node.execution_state == Node::kDone) {
return absl::OkStatus();
}
if (node.execution_state == Node::kExecuting) {
std::ostringstream message;
message << "a circular dependency between initializers: '"
<< absl::CHexEscape(node.name) << "'";
for (;; dependency_stack.pop()) {
message << " <- '" << absl::CHexEscape(dependency_stack.top()->name)
<< "'";
if (dependency_stack.top() == &node) {
break;
}
}
return absl::FailedPreconditionError(std::move(message).str());
}
DCHECK_EQ(node.execution_state, Node::kPending);
node.execution_state = Node::kExecuting;
std::stable_sort(node.deps.begin(), node.deps.end(),
[](auto* lhs, auto* rhs) { return lhs->name < rhs->name; });
dependency_stack.push(&node);
for (auto* dep : node.deps) {
RETURN_IF_ERROR(ExecuteNode(*dep, dependency_stack));
}
dependency_stack.pop();
node.execution_state = Node::kDone;
if (node.initializer != nullptr) {
if (auto* init_fn =
std::get_if<Initializer::VoidInitFn>(&node.initializer->init_fn)) {
(*init_fn)();
} else if (auto* init_fn = std::get_if<Initializer::StatusInitFn>(
&node.initializer->init_fn)) {
RETURN_IF_ERROR((*init_fn)()) << "while executing initializer '"
<< absl::CHexEscape(node.name) << "'";
}
} else if (!starts_with(node.name, kPhonyNamePrefix)) {
return absl::FailedPreconditionError(absl::StrFormat(
"the initializer '%s' expects to be executed after the initializer "
"'%s', which has not been defined yet. This is likely due to a "
"missing linkage dependency between the library providing '%s' "
"and the library providing '%s'",
absl::CHexEscape(dependency_stack.top()->name),
absl::CHexEscape(node.name),
absl::CHexEscape(dependency_stack.top()->name),
absl::CHexEscape(node.name)));
}
return absl::OkStatus();
}
}
absl::Status Coordinator::Run(
absl::Span<const Initializer* const> initializers) {
Digraph digraph;
std::vector<Node*> nodes;
nodes.reserve(initializers.size());
for (auto& initializer : initializers) {
ASSIGN_OR_RETURN(nodes.emplace_back(),
InitNode(digraph, previously_completed_, *initializer));
}
std::stable_sort(nodes.begin(), nodes.end(),
[](auto* lhs, auto* rhs) { return lhs->name < rhs->name; });
std::stack<Node*> dependency_stack;
for (auto* node : nodes) {
RETURN_IF_ERROR(ExecuteNode(*node, dependency_stack));
if (!node->name.empty() && !starts_with(node->name, kPhonyNamePrefix)) {
previously_completed_.emplace(node->name);
}
}
return absl::OkStatus();
}
} | #include "arolla/util/init_arolla_internal.h"
#include <algorithm>
#include <array>
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/no_destructor.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/util/init_arolla.h"
namespace arolla::init_arolla_internal {
namespace {
using ::absl_testing::StatusIs;
TEST(InitArollaInternalTest, Dependencies) {
static absl::NoDestructor<std::string> result;
Initializer a{.name = "A", .deps = {"C"}, .init_fn = [] { *result += "A"; }};
Initializer b{.name = "B", .deps = {"C"}, .init_fn = [] { *result += "B"; }};
Initializer c{
.name = "C", .deps = {"D", "E"}, .init_fn = [] { *result += "C"; }};
Initializer d{.name = "D", .init_fn = [] { *result += "D"; }};
Initializer e{.name = "E", .init_fn = [] { *result += "E"; }};
std::array initializers = {&a, &b, &c, &d, &e};
std::sort(initializers.begin(), initializers.end());
do {
result->clear();
Coordinator coordinator;
EXPECT_OK(coordinator.Run(initializers));
EXPECT_EQ(*result, "DECAB");
} while (std::next_permutation(initializers.begin(), initializers.end()));
}
TEST(InitArollaInternalTest, ReverseDependencies) {
static absl::NoDestructor<std::string> result;
Initializer a{.name = "A", .init_fn = [] { *result += "A"; }};
Initializer b{.name = "B", .init_fn = [] { *result += "B"; }};
Initializer c{.name = "C", .reverse_deps = {"A", "B"}, .init_fn = [] {
*result += "C";
}};
Initializer d{
.name = "D", .reverse_deps = {"C"}, .init_fn = [] { *result += "D"; }};
Initializer e{
.name = "E", .reverse_deps = {"C"}, .init_fn = [] { *result += "E"; }};
std::array initializers = {&a, &b, &c, &d, &e};
std::sort(initializers.begin(), initializers.end());
do {
result->clear();
Coordinator coordinator;
EXPECT_OK(coordinator.Run(initializers));
EXPECT_EQ(*result, "DECAB");
} while (std::next_permutation(initializers.begin(), initializers.end()));
}
TEST(InitArollaInternalTest, MixedDependencies) {
static absl::NoDestructor<std::string> result;
Initializer a{.name = "A", .init_fn = [] { *result += "A"; }};
Initializer b{.name = "B", .init_fn = [] { *result += "B"; }};
Initializer c{.name = "C",
.deps = {"D", "E"},
.reverse_deps = {"A", "B"},
.init_fn = [] { *result += "C"; }};
Initializer d{.name = "D", .init_fn = [] { *result += "D"; }};
Initializer e{.name = "E", .init_fn = [] { *result += "E"; }};
std::array initializers = {&a, &b, &c, &d, &e};
std::sort(initializers.begin(), initializers.end());
do {
result->clear();
Coordinator coordinator;
EXPECT_OK(coordinator.Run(initializers));
EXPECT_EQ(*result, "DECAB");
} while (std::next_permutation(initializers.begin(), initializers.end()));
}
TEST(InitArollaInternalTest, TwoPhases) {
static absl::NoDestructor<std::string> result;
Initializer a{.name = "A", .deps = {"C"}, .init_fn = [] { *result += "A"; }};
Initializer b{.name = "B", .deps = {"C"}, .init_fn = [] { *result += "B"; }};
Initializer c{
.name = "C", .deps = {"D", "E"}, .init_fn = [] { *result += "C"; }};
Initializer d{.name = "D", .init_fn = [] { *result += "D"; }};
Initializer e{.name = "E", .init_fn = [] { *result += "E"; }};
Coordinator coordinator;
EXPECT_OK(coordinator.Run({&c, &d, &e}));
EXPECT_EQ(*result, "DEC");
EXPECT_OK(coordinator.Run({&a, &b}));
EXPECT_EQ(*result, "DECAB");
}
TEST(InitArollaInternalTest, AnonymousInitializers) {
static absl::NoDestructor<std::string> result;
Initializer x{.name = "X", .init_fn = [] { *result += "X"; }};
Initializer y{.name = "Y", .init_fn = [] { *result += "Y"; }};
Initializer a0{
.deps = {"Y"}, .reverse_deps = {"X"}, .init_fn = [] { *result += "0"; }};
Initializer a1{
.deps = {"Y"}, .reverse_deps = {"X"}, .init_fn = [] { *result += "1"; }};
Initializer a2{
.deps = {"Y"}, .reverse_deps = {"X"}, .init_fn = [] { *result += "2"; }};
Coordinator coordinator;
EXPECT_OK(coordinator.Run({&x, &y, &a0, &a1, &a2}));
EXPECT_EQ(*result, "Y012X");
}
TEST(InitArollaInternalTest, PhonyInitializers) {
static absl::NoDestructor<std::string> result;
Initializer x{
.name = "X", .deps = {"@phony/name"}, .init_fn = [] { *result += "X"; }};
Initializer y{.name = "Y", .reverse_deps = {"@phony/name"}, .init_fn = [] {
*result += "Y";
}};
Initializer a{
.name = "A", .deps = {"@phony/name"}, .init_fn = [] { *result += "A"; }};
Initializer b{.name = "B", .reverse_deps = {"@phony/name"}, .init_fn = [] {
*result += "B";
}};
Coordinator coordinator;
EXPECT_OK(coordinator.Run({&x, &y}));
EXPECT_EQ(*result, "YX");
EXPECT_OK(coordinator.Run({&a, &b}));
EXPECT_EQ(*result, "YXBA");
}
TEST(InitArollaInternalTest, DanglingReverseDependency) {
static absl::NoDestructor<std::string> result;
Initializer x{.name = "X", .reverse_deps = {"undefined_dep"}, .init_fn = [] {
*result += "X";
}};
Coordinator coordinator;
EXPECT_OK(coordinator.Run({&x}));
EXPECT_EQ(*result, "X");
}
TEST(InitArollaInternalTest, InitFn) {
static absl::NoDestructor<std::string> result;
Initializer x{
.name = "X",
.init_fn = [] { *result += "X"; },
};
Initializer y{
.name = "Y",
.init_fn =
[] {
*result += "Y";
return absl::OkStatus();
},
};
Coordinator coordinator;
EXPECT_OK(coordinator.Run({&x, &y}));
EXPECT_EQ(*result, "XY");
}
TEST(InitArollaInternalTest, Error_NameCollision) {
Initializer a1{.name = "A"};
Initializer a2{.name = "A"};
{
Coordinator coordinator;
EXPECT_THAT(coordinator.Run({&a1, &a2}),
StatusIs(absl::StatusCode::kFailedPrecondition,
"name collision between arolla initializers: 'A'"));
}
{
Coordinator coordinator;
EXPECT_OK(coordinator.Run({&a1}));
EXPECT_THAT(coordinator.Run({&a2}),
StatusIs(absl::StatusCode::kFailedPrecondition,
"name collision between arolla initializers: 'A'"));
}
}
TEST(InitArollaInternalTest, Error_PhonyName) {
Initializer phony{.name = "@phony/name"};
Coordinator coordinator;
EXPECT_THAT(coordinator.Run({&phony}),
StatusIs(absl::StatusCode::kFailedPrecondition,
"an initializer name may not start with `@phony` "
"prefix: '@phony/name'"));
}
TEST(InitArollaInternalTest, Error_LateReverseDependency) {
Initializer x{.name = "X"};
Initializer y{.name = "Y", .reverse_deps = {"X"}};
Coordinator coordinator;
EXPECT_OK(coordinator.Run({&x}));
EXPECT_THAT(
coordinator.Run({&y}),
StatusIs(absl::StatusCode::kFailedPrecondition,
"the newly registered initializer 'Y' expects to be executed "
"before the previously registered and executed initializer 'X'. "
"This is likely due to a missing linkage dependency between the "
"library providing 'Y' and the library providing 'X'"));
}
TEST(InitArollaInternalTest, Error_UndefinedDependency) {
Initializer x{.name = "X", .deps = {"Y"}};
Coordinator coordinator;
EXPECT_THAT(
coordinator.Run({&x}),
StatusIs(absl::StatusCode::kFailedPrecondition,
"the initializer 'X' expects to be executed after the "
"initializer 'Y', which has not been defined yet. This is "
"likely due to a missing linkage dependency between the library "
"providing 'X' and the library providing 'Y'"));
}
TEST(InitArollaInternalTest, Error_CircularDependency) {
Initializer a{.name = "A", .reverse_deps = {"X"}};
Initializer x{.name = "X", .reverse_deps = {"Y"}};
Initializer y{.name = "Y", .reverse_deps = {"Z"}};
Initializer z{.name = "Z", .reverse_deps = {"X"}};
Coordinator coordinator;
EXPECT_THAT(coordinator.Run({&a, &x, &y, &z}),
StatusIs(absl::StatusCode::kFailedPrecondition,
"a circular dependency between initializers: 'X' <- 'Y' "
"<- 'Z' <- 'X'"));
}
TEST(InitArollaInternalTest, Error_InitFnFails) {
Initializer x{.name = "X",
.init_fn = [] { return absl::InvalidArgumentError("error"); }};
Coordinator coordinator;
EXPECT_THAT(coordinator.Run({&x}),
StatusIs(absl::StatusCode::kInvalidArgument,
"error; while executing initializer 'X'"));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/init_arolla_internal.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/init_arolla_internal_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.